diff --git a/.circleci/config.yml b/.circleci/config.yml index 3edc86341ea3f..937d78fb6a86f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,39 +3,177 @@ version: 2.1 parameters: ubuntu-amd64-machine-image: type: string - default: "ubuntu-2004:2022.04.1" + default: "ubuntu-2204:2023.02.1" ubuntu-arm64-machine-image: type: string - default: "ubuntu-2004:2022.04.1" + default: "ubuntu-2204:2023.02.1" + PYTEST_LOGLEVEL: + type: string + default: "WARNING" + skip_test_selection: + type: boolean + default: false + randomize-aws-credentials: + type: boolean + default: false + only-acceptance-tests: + type: boolean + default: false executors: ubuntu-machine-amd64: machine: image: << pipeline.parameters.ubuntu-amd64-machine-image >> +commands: + prepare-acceptance-tests: + steps: + - run: + name: Check if only Acceptance Tests are running + command: | + only_acceptance_tests="<< pipeline.parameters.only-acceptance-tests >>" + trigger_source="<< pipeline.trigger_source >>" + git_branch="<< pipeline.git.branch >>" + echo "only-acceptance-tests: $only_acceptance_tests" + # GitHub event: webhook, Scheduled run: scheduled_pipeline, Manual run: api + echo "trigger_source: $trigger_source" + echo "git branch: $git_branch" + + # Function to set environment variables + set_env_vars() { + echo "export ONLY_ACCEPTANCE_TESTS=$1" >> $BASH_ENV + echo "export DEFAULT_TAG='$2'" >> $BASH_ENV + echo "$3" + } + + if [[ "$only_acceptance_tests" == "true" ]]; then + set_env_vars "true" "latest" "Only acceptance tests run, the default tag is 'latest'" + elif [[ "$git_branch" == "master" ]] && [[ "$trigger_source" == "webhook" ]]; then + set_env_vars "true" "latest" "Regular push run to master means only acceptance test run, the default tag is 'latest'" + else + set_env_vars "false" "latest" "All tests run, the default tag is 'latest'" + fi + + source $BASH_ENV + + prepare-testselection: + steps: + - unless: + condition: << pipeline.parameters.skip_test_selection >> + steps: + - run: + name: Setup test selection environment variable + command: | + if [[ -n "$CI_PULL_REQUEST" ]] ; then + echo "export TESTSELECTION_PYTEST_ARGS='--path-filter=target/testselection/test-selection.txt '" >> $BASH_ENV + fi + + prepare-pytest-tinybird: + steps: + - run: + name: Setup Environment Variables + command: | + if [[ $CIRCLE_BRANCH == "master" ]] ; then + echo "export TINYBIRD_PYTEST_ARGS='--report-to-tinybird '" >> $BASH_ENV + fi + if << pipeline.parameters.randomize-aws-credentials >> ; then + echo "export TINYBIRD_DATASOURCE=community_tests_circleci_ma_mr" >> $BASH_ENV + elif [[ $ONLY_ACCEPTANCE_TESTS == "true" ]] ; then + echo "export TINYBIRD_DATASOURCE=community_tests_circleci_acceptance" >> $BASH_ENV + else + echo "export TINYBIRD_DATASOURCE=community_tests_circleci" >> $BASH_ENV + fi + echo "export TINYBIRD_TOKEN=${TINYBIRD_CI_TOKEN}" >> $BASH_ENV + echo "export CI_COMMIT_BRANCH=${CIRCLE_BRANCH}" >> $BASH_ENV + echo "export CI_COMMIT_SHA=${CIRCLE_SHA1}" >> $BASH_ENV + echo "export CI_JOB_URL=${CIRCLE_BUILD_URL}" >> $BASH_ENV + # workflow ID as the job name to associate the tests with workflows in TB + echo "export CI_JOB_NAME=${CIRCLE_WORKFLOW_ID}" >> $BASH_ENV + echo "export CI_JOB_ID=${CIRCLE_JOB}" >> $BASH_ENV + source $BASH_ENV + + prepare-account-region-randomization: + steps: + - when: + condition: << pipeline.parameters.randomize-aws-credentials >> + steps: + - run: + name: Generate Random AWS Account ID + command: | + # Generate a random 12-digit number for TEST_AWS_ACCOUNT_ID + export TEST_AWS_ACCOUNT_ID=$(LC_ALL=C tr -dc '0-9' < /dev/urandom | fold -w 12 | head -n 1) + export TEST_AWS_ACCESS_KEY_ID=$TEST_AWS_ACCOUNT_ID + # Set TEST_AWS_REGION_NAME to a random AWS region other than us-east-1 + export AWS_REGIONS=("us-east-2" "us-west-1" "us-west-2" "ap-southeast-2" "ap-northeast-1" "eu-central-1" "eu-west-1") + export TEST_AWS_REGION_NAME=${AWS_REGIONS[$RANDOM % ${#AWS_REGIONS[@]}]} + echo "export TEST_AWS_REGION_NAME=${TEST_AWS_REGION_NAME}" >> $BASH_ENV + echo "export TEST_AWS_ACCESS_KEY_ID=${TEST_AWS_ACCESS_KEY_ID}" >> $BASH_ENV + echo "export TEST_AWS_ACCOUNT_ID=${TEST_AWS_ACCOUNT_ID}" >> $BASH_ENV + source $BASH_ENV + + jobs: + ################ + ## Build Jobs ## + ################ + docker-build: + parameters: + platform: + description: "Platform to build for" + default: "amd64" + type: string + machine_image: + description: "CircleCI machine type to run at" + default: << pipeline.parameters.ubuntu-amd64-machine-image >> + type: string + resource_class: + description: "CircleCI machine type to run at" + default: "medium" + type: string + machine: + image: << parameters.machine_image >> + resource_class: << parameters.resource_class >> + working_directory: /tmp/workspace/repo + environment: + IMAGE_NAME: "localstack/localstack" + PLATFORM: "<< parameters.platform >>" + steps: + - prepare-acceptance-tests + - attach_workspace: + at: /tmp/workspace + - run: + name: Install global python dependencies + command: | + pip install --upgrade setuptools setuptools_scm + - run: + name: Build community docker image + command: ./bin/docker-helper.sh build + - run: + name: Save docker image + working_directory: target + command: ../bin/docker-helper.sh save + - persist_to_workspace: + root: + /tmp/workspace + paths: + - repo/target/ + install: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo steps: - checkout - restore_cache: - key: python-requirements-{{ checksum "setup.cfg" }} - - run: - name: Install prerequisites - command: | - # fix for: https://discuss.circleci.com/t/heroku-gpg-issues-in-ubuntu-images/43834/3 - sudo rm -rf /etc/apt/sources.list.d/heroku.list - sudo apt-get update - sudo apt-get install -y libsasl2-dev + key: python-requirements-{{ checksum "requirements-typehint.txt" }} - run: name: Setup environment command: | + make install-dev-types make install mkdir -p target/reports mkdir -p target/coverage - save_cache: - key: python-requirements-{{ checksum "setup.cfg" }} + key: python-requirements-{{ checksum "requirements-typehint.txt" }} paths: - "~/.cache/pip" - persist_to_workspace: @@ -44,6 +182,10 @@ jobs: paths: - repo + + ########################## + ## Acceptance Test Jobs ## + ########################## preflight: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo @@ -53,6 +195,38 @@ jobs: - run: name: Linting command: make lint + - run: + name: Checking AWS compatibility markers + command: make check-aws-markers + + # can't completely skip it since we need the dependency from other tasks => conditional in run step + test-selection: + executor: ubuntu-machine-amd64 + working_directory: /tmp/workspace/repo + steps: + - attach_workspace: + at: /tmp/workspace + - unless: + condition: << pipeline.parameters.skip_test_selection >> + steps: + - run: + # script expects an environment variable $GITHUB_API_TOKEN to be set to fetch PR details + name: Generate test selection filters from changed files + command: | + if [[ -z "$CI_PULL_REQUEST" ]] ; then + echo "Skipping test selection" + circleci-agent step halt + else + source .venv/bin/activate + PYTHONPATH=localstack-core python -m localstack.testing.testselection.scripts.generate_test_selection /tmp/workspace/repo target/testselection/test-selection.txt --pr-url $CI_PULL_REQUEST + cat target/testselection/test-selection.txt + fi + + - persist_to_workspace: + root: + /tmp/workspace + paths: + - repo/target/testselection/ unit-tests: executor: ubuntu-machine-amd64 @@ -60,97 +234,191 @@ jobs: steps: - attach_workspace: at: /tmp/workspace + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: name: Unit tests environment: TEST_PATH: "tests/unit" - PYTEST_ARGS: "--junitxml=target/reports/unit-tests.xml -o junit_suite_name=unit-tests" COVERAGE_ARGS: "-p" - command: make test-coverage + command: | + COVERAGE_FILE="target/coverage/.coverage.unit.${CIRCLE_NODE_INDEX}" \ + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}--junitxml=target/reports/unit-tests.xml -o junit_suite_name=unit-tests" \ + make test-coverage - store_test_results: path: target/reports/ - - run: - name: Store coverage results - command: mv .coverage.* target/coverage/ - persist_to_workspace: root: /tmp/workspace paths: - repo/target/coverage/ - itest-lambda-docker: - executor: ubuntu-machine-amd64 + acceptance-tests: + parameters: + platform: + description: "Platform to run on" + default: "amd64" + type: string + resource_class: + description: "CircleCI machine type to run at" + default: "medium" + type: string + machine_image: + description: "CircleCI machine type to run at" + default: << pipeline.parameters.ubuntu-amd64-machine-image >> + type: string + machine: + image: << parameters.machine_image >> + resource_class: << parameters.resource_class >> working_directory: /tmp/workspace/repo + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> + IMAGE_NAME: "localstack/localstack" + PLATFORM: "<< parameters.platform >>" steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace - run: - name: Pull Lambda runtimes - command: | - sudo useradd localstack -s /bin/bash - docker pull -q lambci/lambda:nodejs12.x - docker pull -q localstack/lambda-js:nodejs14.x - docker pull -q lambci/lambda:ruby2.7 - docker pull -q lambci/lambda:python3.6 - docker pull -q lambci/lambda:python3.7 - docker pull -q lambci/lambda:python3.8 - docker pull -q mlupin/docker-lambda:python3.9 - docker pull -q lambci/lambda:dotnetcore3.1 - docker pull -q mlupin/docker-lambda:dotnet6 - docker pull -q lambci/lambda:provided - docker pull -q lambci/lambda:java8 - docker pull -q lambci/lambda:java8.al2 - docker pull -q lambci/lambda:java11 - - run: - name: Test Docker client - environment: - TEST_PATH: "tests/integration/docker_utils" - TEST_SKIP_LOCALSTACK_START: 1 - PYTEST_ARGS: "--reruns 2 --junitxml=target/reports/docker-client.xml -o junit_suite_name='docker-client'" - COVERAGE_ARGS: "-p" - command: make test-coverage - - run: - name: Test 'docker' Lambda executor - environment: - LAMBDA_EXECUTOR: "docker" - USE_SSL: 1 - TEST_PATH: "tests/integration/awslambda/ tests/integration/test_integration.py tests/integration/test_apigateway.py" - PYTEST_ARGS: "--reruns 2 --junitxml=target/reports/lambda-docker.xml -o junit_suite_name='lambda-docker'" - COVERAGE_ARGS: "-p" - command: make test-coverage + name: Load docker image + working_directory: target + command: ../bin/docker-helper.sh load + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: - name: Test 'docker-reuse' Lambda executor + name: Acceptance tests environment: - LAMBDA_EXECUTOR: "docker-reuse" - TEST_PATH: "tests/integration/awslambda/ tests/integration/test_integration.py tests/integration/test_apigateway.py" - PYTEST_ARGS: "--reruns 2 --junitxml=target/reports/lambda-docker-reuse.xml -o junit_suite_name='lambda-docker-reuse'" + TEST_PATH: "tests/aws/" COVERAGE_ARGS: "-p" - command: make test-coverage - - run: - name: Store coverage results - command: mv .coverage.* target/coverage/ + COVERAGE_FILE: "target/coverage/.coverage.acceptance.<< parameters.platform >>" + PYTEST_ARGS: "${TINYBIRD_PYTEST_ARGS}--reruns 3 -m acceptance_test --junitxml=target/reports/acceptance-test-report-<< parameters.platform >>-${CIRCLE_NODE_INDEX}.xml -o junit_suite_name='acceptance_test'" + LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC: 1 + DEBUG: 1 + command: | + make docker-run-tests + - store_test_results: + path: target/reports/ - persist_to_workspace: root: /tmp/workspace paths: + - repo/target/reports/ + - repo/target/metric_reports/ - repo/target/coverage/ + + + ########################### + ## Integration Test Jobs ## + ########################### + integration-tests: + parameters: + platform: + description: "Platform to build for" + default: "amd64" + type: string + resource_class: + description: "CircleCI machine type to run at" + default: "medium" + type: string + machine_image: + description: "CircleCI machine type to run at" + default: << pipeline.parameters.ubuntu-amd64-machine-image >> + type: string + machine: + image: << parameters.machine_image >> + resource_class: << parameters.resource_class >> + working_directory: /tmp/workspace/repo + parallelism: 4 + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> + IMAGE_NAME: "localstack/localstack" + PLATFORM: "<< parameters.platform >>" + steps: + - prepare-acceptance-tests + - attach_workspace: + at: /tmp/workspace + - run: + name: Load docker image + working_directory: target + command: ../bin/docker-helper.sh load + # Prebuild and cache Lambda multiruntime test functions, supporting both architectures: amd64 and arm64 + # Currently, all runners prebuild the Lambda functions, not just the one(s) executing Lambda multiruntime tests. + - run: + name: Compute Lambda build hashes + # Any change in the Lambda function source code (i.e., **/src/**) or build process (i.e., **/Makefile) invalidates the cache + command: | + find tests/aws/services/lambda_/functions/common -type f \( -path '**/src/**' -o -path '**/Makefile' \) | xargs sha256sum > /tmp/common-functions-checksums + - restore_cache: + key: common-functions-<< parameters.platform >>-{{ checksum "/tmp/common-functions-checksums" }} + - run: + name: Pre-build Lambda common test packages + command: ./scripts/build_common_test_functions.sh `pwd`/tests/aws/services/lambda_/functions/common + - save_cache: + key: common-functions-<< parameters.platform >>-{{ checksum "/tmp/common-functions-checksums" }} + paths: + - "tests/aws/services/lambda_/functions/common" + - prepare-testselection + - prepare-pytest-tinybird + - prepare-account-region-randomization + - run: + name: Run integration tests + # circleci split returns newline separated list, so `tr` is necessary to prevent problems in the Makefile + # if we're doing performing a test selection, we need to filter the list of files before splitting by timings + command: | + if [ -z $TESTSELECTION_PYTEST_ARGS ] ; then + TEST_FILES=$(circleci tests glob "tests/aws/**/test_*.py" "tests/integration/**/test_*.py" | circleci tests split --verbose --split-by=timings | tr '\n' ' ') + else + TEST_FILES=$(circleci tests glob "tests/aws/**/test_*.py" "tests/integration/**/test_*.py" | PYTHONPATH=localstack-core python -m localstack.testing.testselection.scripts.filter_by_test_selection target/testselection/test-selection.txt | circleci tests split --verbose --split-by=timings | tr '\n' ' ') + fi + echo $TEST_FILES + if [[ -z "$TEST_FILES" ]] ; then + echo "Skipping test execution because no tests were selected" + circleci-agent step halt + else + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}-o junit_family=legacy --junitxml=target/reports/test-report-<< parameters.platform >>-${CIRCLE_NODE_INDEX}.xml" \ + COVERAGE_FILE="target/coverage/.coverage.<< parameters.platform >>.${CIRCLE_NODE_INDEX}" \ + TEST_PATH=$TEST_FILES \ + DEBUG=1 \ + make docker-run-tests + fi - store_test_results: path: target/reports/ + - store_artifacts: + path: target/reports/ + - persist_to_workspace: + root: + /tmp/workspace + paths: + - repo/target/reports/ + - repo/target/coverage/ + - repo/target/metric_reports - itest-lambda-provider: + bootstrap-tests: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> + IMAGE_NAME: "localstack/localstack" + PLATFORM: "amd64" steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace - run: - name: Test ASF Lambda provider + name: Load docker image + working_directory: target + command: ../bin/docker-helper.sh load + - prepare-pytest-tinybird + - prepare-account-region-randomization + - run: + name: Run bootstrap tests environment: - PROVIDER_OVERRIDE_LAMBDA: "asf" - TEST_PATH: "tests/integration/awslambda/test_lambda.py tests/integration/awslambda/test_lambda_api.py tests/integration/awslambda/test_lambda_common.py tests/integration/awslambda/test_lambda_integration_sqs.py tests/integration/cloudformation/resources/test_lambda.py tests/integration/awslambda/test_lambda_integration_dynamodbstreams.py tests/integration/awslambda/test_lambda_integration_kinesis.py tests/integration/awslambda/test_lambda_developer_tools.py" - PYTEST_ARGS: "--reruns 3 --junitxml=target/reports/lambda_asf.xml -o junit_suite_name='lambda_asf'" + TEST_PATH: "tests/bootstrap" COVERAGE_ARGS: "-p" - command: make test-coverage + command: | + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}--junitxml=target/reports/bootstrap-tests.xml -o junit_suite_name=bootstrap-tests" make test-coverage + - store_test_results: + path: target/reports/ - run: name: Store coverage results command: mv .coverage.* target/coverage/ @@ -159,26 +427,33 @@ jobs: /tmp/workspace paths: - repo/target/coverage/ - - store_test_results: - path: target/reports/ - itest-s3-asf-provider: + + ###################### + ## Custom Test Jobs ## + ###################### + itest-cloudwatch-v1-provider: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace + - prepare-testselection + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: - name: Test ASF S3 provider + name: Test CloudWatch v1 provider environment: - PROVIDER_OVERRIDE_S3: "asf" - TEST_PATH: "tests/integration/s3/" - PYTEST_ARGS: "--reruns 3 --junitxml=target/reports/s3_asf.xml -o junit_suite_name='s3_asf'" + PROVIDER_OVERRIDE_CLOUDWATCH: "v1" + TEST_PATH: "tests/aws/services/cloudwatch/" COVERAGE_ARGS: "-p" - command: make test-coverage - - run: - name: Store coverage results - command: mv .coverage.* target/coverage/ + command: | + COVERAGE_FILE="target/coverage/.coverage.cloudwatchV1.${CIRCLE_NODE_INDEX}" \ + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}--reruns 3 --junitxml=target/reports/cloudwatch_v1.xml -o junit_suite_name='cloudwatch_v1'" \ + make test-coverage - persist_to_workspace: root: /tmp/workspace @@ -187,23 +462,29 @@ jobs: - store_test_results: path: target/reports/ - itest-sfn-v2-provider: + # TODO: remove legacy v1 provider in future 4.x release + itest-events-v1-provider: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace + - prepare-testselection + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: - name: Test SFN V2 provider + name: Test EventBridge v1 provider environment: - PROVIDER_OVERRIDE_STEPFUNCTIONS: "v2" - TEST_PATH: "tests/integration/stepfunctions/v2/" - PYTEST_ARGS: "--reruns 3 --junitxml=target/reports/sfn_v2.xml -o junit_suite_name='sfn_v2'" + PROVIDER_OVERRIDE_EVENTS: "v1" + TEST_PATH: "tests/aws/services/events/" COVERAGE_ARGS: "-p" - command: make test-coverage - - run: - name: Store coverage results - command: mv .coverage.* target/coverage/ + command: | + COVERAGE_FILE="target/coverage/.coverage.eventsV1.${CIRCLE_NODE_INDEX}" \ + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}--reruns 3 --junitxml=target/reports/events_v1.xml -o junit_suite_name='events_v1'" \ + make test-coverage - persist_to_workspace: root: /tmp/workspace @@ -212,127 +493,96 @@ jobs: - store_test_results: path: target/reports/ - docker-build: - parameters: - platform: - description: "Platform to build for" - default: "amd64" - type: string - machine_image: - description: "CircleCI machine type to run at" - default: "ubuntu-2004:202107-02" - type: string - resource_class: - description: "CircleCI machine type to run at" - default: "medium" - type: string - machine: - image: << parameters.machine_image >> - # TODO re-enable docker layer caching after file system problems are fixed - # docker_layer_caching: true - resource_class: << parameters.resource_class >> + itest-ddb-v2-provider: + executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace + - prepare-testselection + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: - name: Build full docker image - command: make docker-build-full - - run: - name: Build community docker image - command: make docker-build - - run: - name: Build light docker image - command: make docker-build-light - - run: - name: Build pro docker image - command: make docker-build-pro - - run: - name: Save docker images - command: PLATFORM="<< parameters.platform >>" make docker-save-images - - when: - condition: - equal: [ master, << pipeline.git.branch >> ] - steps: - - run: - name: Run pre-release smoke tests - command: echo "make ci-pro-smoke-tests" + name: Test DynamoDB(Streams) v2 provider + environment: + PROVIDER_OVERRIDE_DYNAMODB: "v2" + TEST_PATH: "tests/aws/services/dynamodb/ tests/aws/services/dynamodbstreams/ tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py" + COVERAGE_ARGS: "-p" + command: | + COVERAGE_FILE="target/coverage/.coverage.dynamodb_v2.${CIRCLE_NODE_INDEX}" \ + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}--reruns 3 --junitxml=target/reports/dynamodb_v2.xml -o junit_suite_name='dynamodb_v2'" \ + make test-coverage - persist_to_workspace: root: /tmp/workspace paths: - - repo/target/ + - repo/target/coverage/ + - store_test_results: + path: target/reports/ - docker-test: - parameters: - platform: - description: "Platform to build for" - default: "amd64" - type: string - resource_class: - description: "CircleCI machine type to run at" - default: "medium" - type: string - machine_image: - description: "CircleCI machine type to run at" - default: << pipeline.parameters.ubuntu-amd64-machine-image >> - type: string - machine: - image: << parameters.machine_image >> - resource_class: << parameters.resource_class >> + itest-cfn-v2-engine-provider: + executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo - parallelism: 2 + environment: + PYTEST_LOGLEVEL: << pipeline.parameters.PYTEST_LOGLEVEL >> steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace + - prepare-testselection + - prepare-pytest-tinybird + - prepare-account-region-randomization - run: - name: Load docker localstack-full image - command: docker load -i target/localstack-docker-images-<< parameters.platform >>.tar - - run: - name: Run integration tests - # circleci split returns newline separated list, so `tr` is necessary to prevent problems in the Makefile + name: Test CloudFormation Engine v2 + environment: + PROVIDER_OVERRIDE_CLOUDFORMATION: "engine-v2" + TEST_PATH: "tests/aws/services/cloudformation/v2" + COVERAGE_ARGS: "-p" + # TODO: use docker-run-tests command: | - TEST_FILES=$(circleci tests glob "tests/integration/**/test_*.py" | circleci tests split --split-by=timings | tr '\n' ' ') - PYTEST_ARGS="-o junit_family=legacy --junitxml=target/reports/test-report-<< parameters.platform >>-${CIRCLE_NODE_INDEX}.xml" \ - COVERAGE_FILE="target/coverage/.coverage.<< parameters.platform >>.${CIRCLE_NODE_INDEX}" \ - TEST_PATH=$TEST_FILES \ - make docker-run-tests - - store_test_results: - path: target/reports/ + COVERAGE_FILE="target/coverage/.coverage.cloudformation_v2.${CIRCLE_NODE_INDEX}" \ + PYTEST_ARGS="${TINYBIRD_PYTEST_ARGS}${TESTSELECTION_PYTEST_ARGS}--reruns 3 --junitxml=target/reports/cloudformation_v2.xml -o junit_suite_name='cloudformation_v2'" \ + make test-coverage - persist_to_workspace: root: /tmp/workspace paths: - - repo/target/reports/ - repo/target/coverage/ - - repo/target/metric_reports + - store_test_results: + path: target/reports/ + ######################### + ## Parity Metrics Jobs ## + ######################### capture-not-implemented: - parameters: - pro: - description: "Run tests against Pro?" - default: false - type: boolean executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + IMAGE_NAME: "localstack/localstack" + PLATFORM: "amd64" steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace - run: - name: Load docker localstack-full image - command: docker load -i target/localstack-docker-images-amd64.tar + name: Load docker image + working_directory: target + command: ../bin/docker-helper.sh load - run: name: Run localstack command: | - <<#parameters.pro>>LOCALSTACK_API_KEY=$TEST_LOCALSTACK_API_KEY<> DEBUG=1 DISABLE_EVENTS="1" IMAGE_NAME="localstack/localstack-full:latest" bin/localstack start -d - bin/localstack wait -t 120 || (bin/localstack logs && false) + source .venv/bin/activate + DEBUG=1 DISABLE_EVENTS="1" IMAGE_NAME="localstack/localstack:latest" localstack start -d + localstack wait -t 120 || (python -m localstack.cli.main logs && false) - run: name: Run capture-not-implemented command: | source .venv/bin/activate cd scripts - <<#parameters.pro>>mkdir -p pro<> - python -m capture_notimplemented_responses <<#parameters.pro>>./pro<> + python -m capture_notimplemented_responses - run: name: Print the logs command: | @@ -347,15 +597,31 @@ jobs: root: /tmp/workspace paths: - - repo/scripts/<<#parameters.pro>>pro/<>implementation_coverage_aggregated.csv - - repo/scripts/<<#parameters.pro>>pro/<>implementation_coverage_full.csv + - repo/scripts/implementation_coverage_aggregated.csv + - repo/scripts/implementation_coverage_full.csv + + ############################ + ## Result Publishing Jobs ## + ############################ report: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace + - run: + name: Collect isolated acceptance coverage + command: | + source .venv/bin/activate + mkdir target/coverage/acceptance + cp target/coverage/.coverage.acceptance.* target/coverage/acceptance + cd target/coverage/acceptance + coverage combine + mv .coverage ../../../.coverage.acceptance + - store_artifacts: + path: .coverage.acceptance - run: name: Collect coverage command: | @@ -367,26 +633,71 @@ jobs: - run: name: Report coverage statistics command: | - source .venv/bin/activate - coverage report || true - coverage html || true - coveralls || true + if [ -z "${CI_PULL_REQUEST}" ]; then + source .venv/bin/activate + coverage report || true + coverage html || true + coveralls || true + else + echo "Skipping coverage reporting for pull request." + fi + - run: + name: Store acceptance parity metrics + command: | + mkdir acceptance_parity_metrics + mv target/metric_reports/metric-report*acceptance* acceptance_parity_metrics/ + - run: + name: Upload test metrics and implemented coverage data to tinybird + command: | + if [ -z "$CIRCLE_PR_REPONAME" ] ; then + # check if a fork-only env var is set (https://circleci.com/docs/variables/) + source .venv/bin/activate + mkdir parity_metrics && mv target/metric_reports/metric-report-raw-data-*amd64*.csv parity_metrics + METRIC_REPORT_DIR_PATH=parity_metrics \ + IMPLEMENTATION_COVERAGE_FILE=scripts/implementation_coverage_full.csv \ + SOURCE_TYPE=community \ + python -m scripts.tinybird.upload_raw_test_metrics_and_coverage + else + echo "Skipping parity reporting to tinybird (no credentials, running on fork)..." + fi + - run: - name: Parity metric aggregation + name: Create Coverage Diff (Code Coverage) + # pycobertura diff will return with exit code 0-3 -> we currently expect 2 (2: the changes worsened the overall coverage), + # but we still want cirecleci to continue with the tasks, so we return 0. + # From the docs: + # Upon exit, the diff command may return various exit codes: + # 0: all changes are covered, no new uncovered statements have been introduced + # 1: some exception occurred (likely due to inappropriate usage or a bug in pycobertura) + # 2: the changes worsened the overall coverage + # 3: the changes introduced uncovered statements but the overall coverage is still better than before command: | source .venv/bin/activate - python -m scripts.metric_aggregator . amd64 + pip install pycobertura + coverage xml --data-file=.coverage -o all.coverage.report.xml --include="localstack-core/localstack/services/*/**" --omit="*/**/__init__.py" + coverage xml --data-file=.coverage.acceptance -o acceptance.coverage.report.xml --include="localstack-core/localstack/services/*/**" --omit="*/**/__init__.py" + pycobertura show --format html acceptance.coverage.report.xml -o coverage-acceptance.html + bash -c "pycobertura diff --format html all.coverage.report.xml acceptance.coverage.report.xml -o coverage-diff.html; if [[ \$? -eq 1 ]] ; then exit 1 ; else exit 0 ; fi" - run: - name: Upload test metrics and implemented coverage data to tinybird + name: Create Metric Coverage Diff (API Coverage) + environment: + COVERAGE_DIR_ALL: "parity_metrics" + COVERAGE_DIR_ACCEPTANCE: "acceptance_parity_metrics" + OUTPUT_DIR: "api-coverage" command: | source .venv/bin/activate - METRIC_REPORT_FILE=$(find parity_metrics -type f -iname "metric-report-raw-data-all-*.csv") - METRIC_REPORT_PATH=$METRIC_REPORT_FILE \ - COMMUNITY_IMPL_COV_PATH=scripts/implementation_coverage_full.csv \ - PRO_IMPL_COV_PATH=scripts/pro/implementation_coverage_full.csv \ - python -m scripts.tinybird.upload_raw_test_metrics_and_coverage + mkdir api-coverage + python -m scripts.metrics_coverage.diff_metrics_coverage + - store_artifacts: + path: api-coverage/ + - store_artifacts: + path: coverage-acceptance.html + - store_artifacts: + path: coverage-diff.html - store_artifacts: path: parity_metrics/ + - store_artifacts: + path: acceptance_parity_metrics/ - store_artifacts: path: scripts/implementation_coverage_aggregated.csv destination: community/implementation_coverage_aggregated.csv @@ -394,81 +705,219 @@ jobs: path: scripts/implementation_coverage_full.csv destination: community/implementation_coverage_full.csv - store_artifacts: - path: scripts/pro/implementation_coverage_aggregated.csv - destination: pro/implementation_coverage_aggregated.csv - - store_artifacts: - path: scripts/pro/implementation_coverage_full.csv - destination: pro/implementation_coverage_full.csv + path: .coverage - docker-push: + push: executor: ubuntu-machine-amd64 working_directory: /tmp/workspace/repo + environment: + IMAGE_NAME: "localstack/localstack" steps: + - prepare-acceptance-tests - attach_workspace: at: /tmp/workspace - run: - name: Load docker images - amd64 + name: Install global python dependencies command: | - # Load all images for AMD64 - docker load -i target/localstack-docker-images-amd64.tar + pip install --upgrade setuptools setuptools_scm + - run: + name: Load docker image - amd64 + working_directory: target + environment: + PLATFORM: amd64 + command: ../bin/docker-helper.sh load - run: name: Log in to ECR registry command: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws - run: - name: Push docker images - amd64 + name: Push docker image - amd64 + environment: + PLATFORM: amd64 command: | # Push to Docker Hub - PLATFORM="amd64" make docker-push-master-all + ./bin/docker-helper.sh push # Push to Amazon Public ECR - PLATFORM="amd64" SOURCE_IMAGE_NAME="localstack/localstack" TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" make docker-push-master - PLATFORM="amd64" SOURCE_IMAGE_NAME="localstack/localstack-pro" TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack-pro" make docker-push-master + TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push # Load and push per architecture (load overwrites the previous ones) - run: name: Load docker image - arm64 - command: | - # Load all images for AMD64 - docker load -i target/localstack-docker-images-arm64.tar + working_directory: target + environment: + PLATFORM: arm64 + command: ../bin/docker-helper.sh load - run: - name: Push docker images - arm64 + name: Push docker image - arm64 + environment: + PLATFORM: arm64 command: | # Push to Docker Hub - PLATFORM="arm64" make docker-push-master-all + ./bin/docker-helper.sh push # Push to Amazon Public ECR - PLATFORM="arm64" SOURCE_IMAGE_NAME="localstack/localstack" TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" make docker-push-master - PLATFORM="arm64" SOURCE_IMAGE_NAME="localstack/localstack-pro" TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack-pro" make docker-push-master + TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push - run: - name: Create multi-platform manifests - full + name: Create multi-platform manifests command: | # Push to Docker Hub - MANIFEST_IMAGE_NAME="localstack/localstack-full" make docker-create-push-manifests + ./bin/docker-helper.sh push-manifests + # Push to Amazon Public ECR + IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push-manifests - run: - name: Create multi-platform manifests - light + name: Publish a dev release command: | - # Push to Docker Hub - make docker-create-push-manifests-light - # Push to Amazon Public ECR - MANIFEST_IMAGE_NAME="public.ecr.aws/localstack/localstack" make docker-create-push-manifests - MANIFEST_IMAGE_NAME="public.ecr.aws/localstack/localstack-pro" make docker-create-push-manifests + if git describe --exact-match --tags >/dev/null 2>&1; then + echo "not publishing a dev release as this is a tagged commit" + else + source .venv/bin/activate + make publish || echo "dev release failed (maybe it is already published)" + fi + + push-to-tinybird: + executor: ubuntu-machine-amd64 + working_directory: /tmp/workspace/repo + steps: + - prepare-acceptance-tests + - run: + name: Wait for the workflow to complete + command: | + # Record the time this step started + START_TIME=$(date +%s) + + # Determine if reporting the workflow even is necessary and what the workflow variant is + if [[ << pipeline.parameters.randomize-aws-credentials >> == "true" ]] && [[ $ONLY_ACCEPTANCE_TESTS == "true" ]] ; then + echo "Don't report only-acceptance-test workflows with randomized aws credentials" + circleci-agent step halt + elif [[ << pipeline.parameters.randomize-aws-credentials >> == "true" ]] ; then + TINYBIRD_WORKFLOW=tests_circleci_ma_mr + elif [[ $ONLY_ACCEPTANCE_TESTS == "true" ]] ; then + TINYBIRD_WORKFLOW=tests_circleci_acceptance + else + TINYBIRD_WORKFLOW=tests_circleci + fi + + + # wait for the workflow to be done + while [[ $(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job"| jq -r '.items[]|select(.name != "push-to-tinybird" and .name != "push" and .name != "report")|.status' | grep -c "running") -gt 0 ]]; do + sleep 10 + done + + # check if a step failed / determine the outcome + FAILED_COUNT=$(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job" | jq -r '.items[]|.status' | grep -c "failed") || true + echo "failed count: $FAILED_COUNT" + if [[ $FAILED_COUNT -eq 0 ]]; then + OUTCOME="success" + else + OUTCOME="failure" + fi + echo "outcome: $OUTCOME" + + # Record the time this step is done + END_TIME=$(date +%s) + + # Build the payload + echo '{"workflow": "'$TINYBIRD_WORKFLOW'", "attempt": 1, "run_id": "'$CIRCLE_WORKFLOW_ID'", "start": '$START_TIME', "end": '$END_TIME', "commit": "'$CIRCLE_SHA1'", "branch": "'$CIRCLE_BRANCH'", "repository": "'$CIRCLE_PROJECT_USERNAME'/'$CIRCLE_PROJECT_REPONAME'", "outcome": "'$OUTCOME'", "workflow_url": "'$CIRCLE_BUILD_URL'"}' > stats.json + echo 'Sending: '$(cat stats.json) + + # Send the data to Tinybird + curl -X POST "https://api.tinybird.co/v0/events?name=ci_workflows" -H "Authorization: Bearer $TINYBIRD_CI_TOKEN" -d @stats.json + # Fail this step depending on the success to trigger a rerun of this step together with others in case of a "rerun failed" + [[ $OUTCOME = "success" ]] && exit 0 || exit 1 + + +#################### +## Workflow setup ## +#################### workflows: - main: + acceptance-only-run: + # this workflow only runs when only-acceptance-tests is explicitly set + # or when the pipeline is running on the master branch but is neither scheduled nor a manual run + # (basically the opposite of the full-run workflow) + when: + or: + - << pipeline.parameters.only-acceptance-tests >> + - and: + - equal: [ master, << pipeline.git.branch>> ] + - equal: [ webhook, << pipeline.trigger_source >> ] + jobs: + - push-to-tinybird: + filters: + branches: + only: master + - install + - preflight: + requires: + - install + - unit-tests: + requires: + - preflight + - docker-build: + name: docker-build-amd64 + platform: amd64 + machine_image: << pipeline.parameters.ubuntu-amd64-machine-image >> + resource_class: medium + requires: + - preflight + - docker-build: + name: docker-build-arm64 + platform: arm64 + # The latest version of ubuntu is not yet supported for ARM: + # https://circleci.com/docs/2.0/arm-resources/ + machine_image: << pipeline.parameters.ubuntu-arm64-machine-image >> + resource_class: arm.medium + requires: + - preflight + - acceptance-tests: + name: acceptance-tests-arm64 + platform: arm64 + resource_class: arm.medium + machine_image: << pipeline.parameters.ubuntu-arm64-machine-image >> + requires: + - docker-build-arm64 + - acceptance-tests: + name: acceptance-tests-amd64 + platform: amd64 + machine_image: << pipeline.parameters.ubuntu-amd64-machine-image >> + resource_class: medium + requires: + - docker-build-amd64 + full-run: + # this workflow only runs when only-acceptance-tests is not explicitly set (the default) + # or when the pipeline is running on the master branch because of a Github event (webhook) + # (basically the opposite of the acceptance-only-run workflow) + unless: + or: + - << pipeline.parameters.only-acceptance-tests >> + - and: + - equal: [ master, << pipeline.git.branch>> ] + - equal: [ webhook, << pipeline.trigger_source >> ] jobs: + - push-to-tinybird: + filters: + branches: + only: master - install - preflight: requires: - install - - itest-lambda-docker: + - test-selection: + requires: + - install + - itest-cloudwatch-v1-provider: requires: - preflight - - itest-lambda-provider: + - test-selection + - itest-events-v1-provider: requires: - preflight - - itest-s3-asf-provider: + - test-selection + - itest-ddb-v2-provider: requires: - preflight - - itest-sfn-v2-provider: + - test-selection + - itest-cfn-v2-engine-provider: requires: - preflight + - test-selection - unit-tests: requires: - preflight @@ -488,49 +937,40 @@ workflows: resource_class: arm.medium requires: - preflight - - docker-test: - name: docker-test-arm64 + - acceptance-tests: + name: acceptance-tests-arm64 + platform: arm64 + resource_class: arm.medium + machine_image: << pipeline.parameters.ubuntu-arm64-machine-image >> + requires: + - docker-build-arm64 + - acceptance-tests: + name: acceptance-tests-amd64 + platform: amd64 + machine_image: << pipeline.parameters.ubuntu-amd64-machine-image >> + resource_class: medium + requires: + - docker-build-amd64 + - integration-tests: + name: integration-tests-arm64 platform: arm64 resource_class: arm.medium machine_image: << pipeline.parameters.ubuntu-arm64-machine-image >> requires: - docker-build-arm64 - - docker-test: - name: docker-test-amd64 + - test-selection + - integration-tests: + name: integration-tests-amd64 platform: amd64 resource_class: medium machine_image: << pipeline.parameters.ubuntu-amd64-machine-image >> requires: - docker-build-amd64 - - capture-not-implemented: - name: collect-not-implemented-community + - test-selection + - bootstrap-tests: requires: - docker-build-amd64 - capture-not-implemented: - name: collect-not-implemented-pro - pro: true + name: collect-not-implemented requires: - docker-build-amd64 - - report: - requires: - - itest-lambda-docker - - itest-lambda-provider - - itest-s3-asf-provider - - itest-sfn-v2-provider - - docker-test-amd64 - - docker-test-arm64 - - collect-not-implemented-community - - collect-not-implemented-pro - - unit-tests - - docker-push: - filters: - branches: - only: master - requires: - - itest-lambda-docker - - itest-lambda-provider - - itest-s3-asf-provider - - itest-sfn-v2-provider - - docker-test-amd64 - - docker-test-arm64 - - unit-tests diff --git a/.dockerignore b/.dockerignore index 23661895a0302..840fcde68d78b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,11 +1,11 @@ -.git .venv* .filesystem +**/.filesystem # ignore files generated in CI build -tests/integration/**/node_modules -tests/integration/**/.terraform +tests/aws/**/node_modules +tests/aws/**/.terraform **/__pycache__ target/ htmlcov/ diff --git a/.env-exporter b/.env-exporter deleted file mode 100644 index 16c6a918f6f73..0000000000000 --- a/.env-exporter +++ /dev/null @@ -1,12 +0,0 @@ -# ~/.env-exporter -# about zlib -export CFLAGS="-I$(xcrun --show-sdk-path)/usr/include" -# about readline -export CFLAGS="-I$(brew --prefix readline)/include $CFLAGS" -export LDFLAGS="-L$(brew --prefix readline)/lib $LDFLAGS" -# about openssl -export CFLAGS="-I$(brew --prefix openssl)/include $CFLAGS" -export LDFLAGS="-L$(brew --prefix openssl)/lib $LDFLAGS" -# about SQLite (maybe not necessary) -export CFLAGS="-I$(brew --prefix sqlite)/include $CFLAGS" -export LDFLAGS="-L$(brew --prefix sqlite)/lib $LDFLAGS" diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 8c7922f005c22..16e4bab6fbb37 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -57,10 +57,21 @@ body: description: | examples: - **OS**: Ubuntu 20.04 - - **LocalStack**: latest + - **LocalStack**: + You can find this information in the logs when starting localstack + + LocalStack version: 3.4.1.dev + LocalStack Docker image sha: sha256:f02ab8ef73f66b0ab26bb3d24a165e1066a714355f79a42bf8aa1a336d5722e7 + LocalStack build date: 2024-05-14 + LocalStack build git hash: ecd7dc879 + value: | - OS: - LocalStack: + LocalStack version: + LocalStack Docker image sha: + LocalStack build date: + LocalStack build git hash: render: markdown validations: required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index ce9229616970d..47868d7b130ac 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,11 +1,8 @@ blank_issues_enabled: true contact_links: - - name: ❓ Question - url: https://discuss.localstack.cloud - about: Ask a question about LocalStack - name: 📖 LocalStack Documentation url: https://localstack.cloud/docs/getting-started/overview/ about: The LocalStack documentation may answer your questions! - name: 💬 LocalStack Community Support (Slack) - url: https://localstack-community.slack.com + url: https://localstack.cloud/slack about: Please ask and answer questions here. diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index dc62466dd15eb..747700eb3bca9 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,4 +1,4 @@ -name: 💡 Feature request +name: ✨ Feature request description: Request a new feature title: "feature request: " labels: ["type: feature", "status: triage needed"] @@ -31,7 +31,6 @@ body: label: Anything else? description: | Links? References? Anything that will give us more context about the issue you are encountering! - Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. validations: required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 367c67bd7f151..ee3b8eecf5459 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1 +1,24 @@ -**Please refer to the contribution guidelines in the README when submitting PRs.** +<!-- Please refer to the contribution guidelines before raising a PR: https://github.com/localstack/localstack/blob/master/docs/CONTRIBUTING.md --> + +<!-- Why am I raising this PR? Add context such as related issues, PRs, or documentation. --> +## Motivation + + +<!-- What changes does this PR make? How does LocalStack behave differently now? --> +## Changes + +<!-- Optional section: How to test these changes? --> +<!-- +## Testing + +--> + +<!-- Optional section: What's left to do before it can be merged? --> +<!-- +## TODO + +What's left to do: + +- [ ] ... +- [ ] ... +--> diff --git a/.github/actions/build-image/action.yml b/.github/actions/build-image/action.yml new file mode 100644 index 0000000000000..eeb8832cb4494 --- /dev/null +++ b/.github/actions/build-image/action.yml @@ -0,0 +1,63 @@ +name: 'Build Image' +description: 'Composite action which combines all steps necessary to build the LocalStack Community image.' +inputs: + dockerhubPullUsername: + description: 'Username to log in to DockerHub to mitigate rate limiting issues with DockerHub.' + required: false + dockerhubPullToken: + description: 'API token to log in to DockerHub to mitigate rate limiting issues with DockerHub.' + required: false + disableCaching: + description: 'Disable Caching' + required: false +outputs: + image-artifact-name: + description: "Name of the artifact containing the built docker image" + value: ${{ steps.image-artifact-name.outputs.image-artifact-name }} +runs: + using: "composite" + # This GH Action requires localstack repo in 'localstack' dir + full git history (fetch-depth: 0) + steps: + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version-file: 'localstack/.python-version' + + - name: Install docker helper dependencies + shell: bash + run: pip install --upgrade setuptools setuptools_scm + + - name: Login to Docker Hub + # login to DockerHub to avoid rate limiting issues on custom runners + uses: docker/login-action@v3 + if: ${{ inputs.dockerHubPullUsername != '' && inputs.dockerHubPullToken != '' }} + with: + username: ${{ inputs.dockerhubPullUsername }} + password: ${{ inputs.dockerhubPullToken }} + + - name: Build Docker Image + id: build-image + shell: bash + env: + DOCKER_BUILD_FLAGS: "--load ${{ inputs.disableCaching == 'true' && '--no-cache' || '' }}" + PLATFORM: ${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }} + DOCKERFILE: ../Dockerfile + DOCKER_BUILD_CONTEXT: .. + IMAGE_NAME: "localstack/localstack" + working-directory: localstack/localstack-core + run: | + ../bin/docker-helper.sh build + ../bin/docker-helper.sh save + + - name: Store Docker Image as Artifact + uses: actions/upload-artifact@v4 + with: + name: localstack-docker-image-${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }} + # the path is defined by the "save" command of the docker-helper, which sets a GitHub output "IMAGE_FILENAME" + path: localstack/localstack-core/${{ steps.build-image.outputs.IMAGE_FILENAME || steps.build-test-image.outputs.IMAGE_FILENAME}} + retention-days: 1 + + - name: Set image artifact name as output + id: image-artifact-name + shell: bash + run: echo "image-artifact-name=localstack-docker-image-${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_OUTPUT diff --git a/.github/actions/load-localstack-docker-from-artifacts/action.yml b/.github/actions/load-localstack-docker-from-artifacts/action.yml new file mode 100644 index 0000000000000..cb22c52682734 --- /dev/null +++ b/.github/actions/load-localstack-docker-from-artifacts/action.yml @@ -0,0 +1,31 @@ +name: 'Load Localstack Docker image' +description: 'Composite action that loads a LocalStack Docker image from a tar archive stored in GitHub Workflow Artifacts into the local Docker image cache' +inputs: + platform: + required: false + description: Target architecture for running the Docker image + default: "amd64" +runs: + using: "composite" + steps: + - name: Download Docker Image + uses: actions/download-artifact@v4 + with: + name: localstack-docker-image-${{ inputs.platform }} + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version-file: '.python-version' + cache: 'pip' + cache-dependency-path: 'requirements-typehint.txt' + + - name: Install docker helper dependencies + shell: bash + run: pip install --upgrade setuptools setuptools_scm + + - name: Load Docker Image + shell: bash + env: + PLATFORM: ${{ inputs.platform }} + run: bin/docker-helper.sh load diff --git a/.github/actions/setup-tests-env/action.yml b/.github/actions/setup-tests-env/action.yml new file mode 100644 index 0000000000000..95cd7fe359787 --- /dev/null +++ b/.github/actions/setup-tests-env/action.yml @@ -0,0 +1,22 @@ +name: 'Setup Test Environment' +description: 'Composite action which combines all steps necessary to setup the runner for test execution' +runs: + using: "composite" + steps: + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version-file: '.python-version' + cache: 'pip' + cache-dependency-path: 'requirements-typehint.txt' + + - name: Install Community Dependencies + shell: bash + run: make install-dev-types + + - name: Setup environment + shell: bash + run: | + make install + mkdir -p target/reports + mkdir -p target/coverage diff --git a/.github/bot_templates/MARKER_REPORT_ISSUE.md.j2 b/.github/bot_templates/MARKER_REPORT_ISSUE.md.j2 new file mode 100644 index 0000000000000..75aaab56924b9 --- /dev/null +++ b/.github/bot_templates/MARKER_REPORT_ISSUE.md.j2 @@ -0,0 +1,36 @@ +--- +title: AWS Marker Report +--- +# AWS Marker Report + +- Repository: {{ data.meta.repo_url }} +- Reference Commit: `{{ data.meta.commit_sha }}` +- Timestamp: `{{ data.meta.timestamp }}` + +This is an autogenerated report on our pytest marker usage with a special focus on our AWS compatibility markers, i.e. the ones prefixed with `aws_`. + +## Overview + +```text +{% for name, count in data.aggregated.items() -%} +{{ name }} : {{ count }} +{% endfor -%} +``` + +Both `aws_unknown` and `aws_needs_fixing` should be reduced to `0` over time. +If you have some spare capacity please take one of these tests, try it against AWS and see if it works. Replace `aws_unknown` with the correct marker. +To avoid the case where two people are concurrently working on one test case, please tick the box to "claim" a case when you want to work on it. + +_Note_: The individual assignments here are based on the entries in the [CODEOWNERS]({{ data.meta.repo_url }}/blob/{{ data.meta.commit_sha }}/CODEOWNERS) file. + +## unknown ({{ data.aggregated['aws_unknown'] }}) + +{% for item in data.owners_aws_unknown -%} +- [ ] `{{ item.pytest_node_id }}` {{ " ".join(item.owners) }} +{% endfor %} + +## needs_fixing ({{ data.aggregated['aws_needs_fixing'] }}) + +{% for item in data.owners_aws_needs_fixing -%} +- [ ] `{{ item.pytest_node_id }}` {{ " ".join(item.owners) }} +{% endfor %} diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..e2d4b7fd95167 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,35 @@ +version: 2 +updates: + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + reviewers: + - "silv-io" + - "alexrashed" + ignore: + - dependency-name: "python" + update-types: ["version-update:semver-major", "version-update:semver-minor"] + - dependency-name: "eclipse-temurin" + update-types: ["version-update:semver-major"] + labels: + - "area: dependencies" + - "semver: patch" + groups: + docker-base-images: + patterns: + - "*" + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: "weekly" + reviewers: + - "silv-io" + - "alexrashed" + labels: + - "area: dependencies" + - "semver: patch" + groups: + github-actions: + patterns: + - "*" diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 0000000000000..37ac2b7325bc0 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,15 @@ +# configuration for automatically generated GitHub release notes +changelog: + exclude: + labels: + - "area: dependencies" + categories: + - title: Breaking Changes 🛠 + labels: + - "semver: major" + - title: Exciting New Features 🎉 + labels: + - "semver: minor" + - title: Other Changes + labels: + - "*" diff --git a/.github/renovate.json b/.github/renovate.json deleted file mode 100644 index 262195574aa39..0000000000000 --- a/.github/renovate.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "extends": [ - "schedule:earlyMondays", - ":semanticCommitsDisabled" - ], - "enabledManagers": ["dockerfile"], - "docker": { - "pinDigests": true, - "ignorePaths": ["bin/lambda/Dockerfile.nodejs14x", "Dockerfile.rh"] - }, - "major": { - "enabled": false - }, - "minor": { - "enabled": false - }, - "packageRules": [ - { - "managers": ["dockerfile"], - "commitMessageAction": "update", - "commitMessageTopic": "Docker base image ({{{depName}}}) tag", - "digest": { - "commitMessageTopic": "Docker base image ({{{depName}}}{{#if currentValue}}:{{{currentValue}}}{{/if}}) digest" - } - } - ] -} diff --git a/.github/workflows/asf-updates.yml b/.github/workflows/asf-updates.yml index 2186eeb59a00c..69bf11a17e754 100644 --- a/.github/workflows/asf-updates.yml +++ b/.github/workflows/asf-updates.yml @@ -10,25 +10,29 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Open Source - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up system wide dependencies run: | - sudo apt-get install libsasl2-dev jq + sudo apt-get update + sudo apt-get install jq - - name: Set up Python 3.8 + - name: Set up Python 3.11 id: setup-python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: '3.11' + + - name: Install release helper dependencies + run: pip install --upgrade setuptools setuptools_scm - name: Cache LocalStack community dependencies (venv) - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: .venv - key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('setup.cfg') }} + key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('requirements-dev.txt') }} - name: Install dependencies run: make install-dev @@ -43,6 +47,13 @@ jobs: source .venv/bin/activate python3 -m localstack.aws.scaffold upgrade + - name: Format code + run: | + source .venv/bin/activate + # explicitly perform an unsafe fix to remove unused imports in the generated ASF APIs + ruff check --select F401 --unsafe-fixes --fix . --config "lint.preview = true" + make format-modified + - name: Check for changes id: check-for-changes run: | @@ -50,11 +61,32 @@ jobs: # Check against the PR branch if it exists, otherwise against the master # Store the result in target/diff-check.log and store the diff count in the GitHub Action output "diff-count" mkdir -p target - (git diff --name-only origin/asf-auto-updates localstack/aws/api/ 2>/dev/null || git diff --name-only origin/master localstack/aws/api/ 2>/dev/null) | tee target/diff-check.log - echo "::set-output name=diff-count::$(cat target/diff-check.log | wc -l)" + (git diff --name-only origin/asf-auto-updates localstack-core/localstack/aws/api/ 2>/dev/null || git diff --name-only origin/master localstack-core/localstack/aws/api/ 2>/dev/null) | tee target/diff-check.log + echo "diff-count=$(cat target/diff-check.log | wc -l)" >> $GITHUB_OUTPUT # Store a (multiline-sanitized) list of changed services (compared to the master) in the GitHub Action output "changed-services" - echo "::set-output name=changed-services::$(git diff --name-only origin/master localstack/aws/api/ | sed 's#localstack/aws/api/#- #g' | sed 's#/__init__.py##g' | sed 's/_/-/g' | sed -z 's/\n/%0A/g' | sed -z 's/\r/%0D/g')" + echo "changed-services<<EOF" >> $GITHUB_OUTPUT + echo "$(git diff --name-only origin/master localstack-core/localstack/aws/api/ | sed 's#localstack-core/localstack/aws/api/#- #g' | sed 's#/__init__.py##g' | sed 's/_/-/g')" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Update botocore and transitive pins + # only update the pin if we have updates in the ASF code + if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} + run: | + source .venv/bin/activate + # determine botocore version in venv + BOTOCORE_VERSION=$(python -c "import botocore; print(botocore.__version__)"); + echo "Pinning botocore, boto3, and boto3-stubs to version $BOTOCORE_VERSION" + bin/release-helper.sh set-dep-ver botocore "==$BOTOCORE_VERSION" + bin/release-helper.sh set-dep-ver boto3 "==$BOTOCORE_VERSION" + + # upgrade the requirements files only for the botocore package + pip install pip-tools + pip-compile --strip-extras --upgrade-package "botocore==$BOTOCORE_VERSION" --upgrade-package "boto3==$BOTOCORE_VERSION" --extra base-runtime -o requirements-base-runtime.txt pyproject.toml + pip-compile --strip-extras --upgrade-package "botocore==$BOTOCORE_VERSION" --upgrade-package "boto3==$BOTOCORE_VERSION" --upgrade-package "awscli" --extra runtime -o requirements-runtime.txt pyproject.toml + pip-compile --strip-extras --upgrade-package "botocore==$BOTOCORE_VERSION" --upgrade-package "boto3==$BOTOCORE_VERSION" --upgrade-package "awscli" --extra test -o requirements-test.txt pyproject.toml + pip-compile --strip-extras --upgrade-package "botocore==$BOTOCORE_VERSION" --upgrade-package "boto3==$BOTOCORE_VERSION" --upgrade-package "awscli" --extra dev -o requirements-dev.txt pyproject.toml + pip-compile --strip-extras --upgrade-package "botocore==$BOTOCORE_VERSION" --upgrade-package "boto3==$BOTOCORE_VERSION" --upgrade-package "awscli" --extra typehint -o requirements-typehint.txt pyproject.toml - name: Read PR markdown template if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} @@ -66,14 +98,14 @@ jobs: - name: Add changed services to template if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} id: markdown - uses: mad9000/actions-find-and-replace-string@2 + uses: mad9000/actions-find-and-replace-string@5 with: source: ${{ steps.template.outputs.content }} find: '{{ SERVICES }}' replace: ${{ steps.check-for-changes.outputs.changed-services }} - name: Create PR - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v7 if: ${{ success() && steps.check-for-changes.outputs.diff-count != '0' && steps.check-for-changes.outputs.diff-count != '' }} with: title: "Update ASF APIs" @@ -82,6 +114,6 @@ jobs: author: "LocalStack Bot <localstack-bot@users.noreply.github.com>" committer: "LocalStack Bot <localstack-bot@users.noreply.github.com>" commit-message: "update generated ASF APIs to latest version" - labels: "area: asf" + labels: "area: asf, area: dependencies, semver: patch" token: ${{ secrets.PRO_ACCESS_TOKEN }} - reviewers: alexrashed + reviewers: silv-io,alexrashed diff --git a/.github/workflows/aws-main.yml b/.github/workflows/aws-main.yml new file mode 100644 index 0000000000000..4a20111727b0f --- /dev/null +++ b/.github/workflows/aws-main.yml @@ -0,0 +1,301 @@ +name: AWS / Build, Test, Push + +on: + schedule: + - cron: 0 2 * * MON-FRI + push: + paths: + - '**' + - '!.github/**' + - '.github/actions/**' + - '.github/workflows/aws-main.yml' + - '.github/workflows/aws-tests.yml' + - '!CODEOWNERS' + - '!README.md' + - '!.gitignore' + - '!.git-blame-ignore-revs' + - '!docs/**' + branches: + - master + pull_request: + paths: + - '**' + - '!.github/**' + - '.github/actions/**' + - '.github/workflows/aws-main.yml' + - '.github/workflows/aws-tests.yml' + - '!CODEOWNERS' + - '!README.md' + - '!.gitignore' + - '!.git-blame-ignore-revs' + - '!docs/**' + workflow_dispatch: + inputs: + onlyAcceptanceTests: + description: 'Only run acceptance tests' + required: false + type: boolean + default: false + forceARMTests: + description: 'Run the ARM tests' + required: false + type: boolean + default: false + enableTestSelection: + description: 'Enable Test Selection' + required: false + type: boolean + default: false + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + +env: + # Docker Image name and default tag used by docker-helper.sh + IMAGE_NAME: "localstack/localstack" + DEFAULT_TAG: "latest" + PLATFORM_NAME_AMD64: "amd64" + PLATFORM_NAME_ARM64: "arm64" + + +jobs: + test: + name: "Run integration tests" + uses: ./.github/workflows/aws-tests.yml + with: + # onlyAcceptance test is either explicitly set, or it's a push event. + # otherwise it's false (schedule event, workflow_dispatch event without setting it to true) + onlyAcceptanceTests: ${{ inputs.onlyAcceptanceTests == true || github.event_name == 'push' }} + # default "disableCaching" to `false` if it's a push or schedule event + disableCaching: ${{ inputs.disableCaching == true }} + # default "disableTestSelection" to `true` if it's a push or schedule event + disableTestSelection: ${{ (inputs.enableTestSelection != '' && inputs.enableTestSelection) || github.event_name == 'push' }} + PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL }} + forceARMTests: ${{ inputs.forceARMTests == true }} + secrets: + DOCKERHUB_PULL_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + DOCKERHUB_PULL_TOKEN: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + TINYBIRD_CI_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + + report: + name: "Publish coverage and parity metrics" + runs-on: ubuntu-latest + needs: + - test + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version-file: '.python-version' + cache: 'pip' + cache-dependency-path: 'requirements-dev.txt' + + - name: Install Community Dependencies + shell: bash + run: make install-dev + + - name: Load all test results + uses: actions/download-artifact@v4 + with: + pattern: test-results-* + path: target/coverage/ + merge-multiple: true + + - name: Combine coverage results from acceptance tests + run: | + source .venv/bin/activate + mkdir target/coverage/acceptance + cp target/coverage/.coverage.acceptance* target/coverage/acceptance + cd target/coverage/acceptance + coverage combine + mv .coverage ../../../.coverage.acceptance + + - name: Combine all coverage results + run: | + source .venv/bin/activate + cd target/coverage + ls -la + coverage combine + mv .coverage ../../ + + - name: Report coverage statistics + env: + COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} + run: | + source .venv/bin/activate + coverage report || true + coverage html || true + coveralls || true + + - name: Create Coverage Diff (Code Coverage) + # pycobertura diff will return with exit code 0-3 -> we currently expect 2 (2: the changes worsened the overall coverage), + # but we still want cirecleci to continue with the tasks, so we return 0. + # From the docs: + # Upon exit, the diff command may return various exit codes: + # 0: all changes are covered, no new uncovered statements have been introduced + # 1: some exception occurred (likely due to inappropriate usage or a bug in pycobertura) + # 2: the changes worsened the overall coverage + # 3: the changes introduced uncovered statements but the overall coverage is still better than before + run: | + source .venv/bin/activate + pip install pycobertura + coverage xml --data-file=.coverage -o all.coverage.report.xml --include="localstack-core/localstack/services/*/**" --omit="*/**/__init__.py" + coverage xml --data-file=.coverage.acceptance -o acceptance.coverage.report.xml --include="localstack-core/localstack/services/*/**" --omit="*/**/__init__.py" + pycobertura show --format html acceptance.coverage.report.xml -o coverage-acceptance.html + bash -c "pycobertura diff --format html all.coverage.report.xml acceptance.coverage.report.xml -o coverage-diff.html; if [[ \$? -eq 1 ]] ; then exit 1 ; else exit 0 ; fi" + + - name: Create Metric Coverage Diff (API Coverage) + env: + COVERAGE_DIR_ALL: "parity_metrics" + COVERAGE_DIR_ACCEPTANCE: "acceptance_parity_metrics" + OUTPUT_DIR: "api-coverage" + run: | + source .venv/bin/activate + mkdir $OUTPUT_DIR + python -m scripts.metrics_coverage.diff_metrics_coverage + + - name: Archive coverage and parity metrics + uses: actions/upload-artifact@v4 + with: + name: coverage-and-parity-metrics + path: | + .coverage + api-coverage/ + coverage-acceptance.html + coverage-diff.html + parity_metrics/ + acceptance_parity_metrics/ + scripts/implementation_coverage_aggregated.csv + scripts/implementation_coverage_full.csv + retention-days: 7 + + push: + name: "Push images" + runs-on: ubuntu-latest + # push image on master, target branch not set, and the dependent steps were either successful or skipped + if: github.ref == 'refs/heads/master' && !failure() && !cancelled() && github.repository == 'localstack/localstack' + needs: + # all tests need to be successful for the image to be pushed + - test + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Load Localstack ${{ env.PLATFORM_NAME_AMD64 }} Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: ${{ env.PLATFORM_NAME_AMD64 }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + with: + registry-type: public + + - name: Push ${{ env.PLATFORM_NAME_AMD64 }} Docker Image + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + PLATFORM: ${{ env.PLATFORM_NAME_AMD64 }} + run: | + # Push to Docker Hub + ./bin/docker-helper.sh push + # Push to Amazon Public ECR + TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push + + - name: Load Localstack ${{ env.PLATFORM_NAME_ARM64 }} Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: ${{ env.PLATFORM_NAME_ARM64 }} + + - name: Push ${{ env.PLATFORM_NAME_ARM64 }} Docker Image + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + PLATFORM: ${{ env.PLATFORM_NAME_ARM64 }} + run: | + # Push to Docker Hub + ./bin/docker-helper.sh push + # Push to Amazon Public ECR + TARGET_IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push + + - name: Push Multi-Arch Manifest + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + run: | + # Push to Docker Hub + ./bin/docker-helper.sh push-manifests + # Push to Amazon Public ECR + IMAGE_NAME="public.ecr.aws/localstack/localstack" ./bin/docker-helper.sh push-manifests + + - name: Publish dev release + env: + TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} + TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} + run: | + if git describe --exact-match --tags >/dev/null 2>&1; then + echo "not publishing a dev release as this is a tagged commit" + else + make install-runtime publish || echo "dev release failed (maybe it is already published)" + fi + + push-to-tinybird: + name: Push Workflow Status to Tinybird + if: always() && github.ref == 'refs/heads/master' && github.repository == 'localstack/localstack' + runs-on: ubuntu-latest + needs: + - test + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + # differentiate between "acceptance only" and "proper / full" runs + workflow_id: ${{ (inputs.onlyAcceptanceTests == true || github.event_name == 'push') && 'tests_acceptance' || 'tests_full' }} + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" + # determine the output only for the jobs that are direct dependencies of this job (to avoid issues with workflow_call embeddings) + outcome: ${{ ((contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) && 'failure') || 'success' }} + + cleanup: + name: "Cleanup" + runs-on: ubuntu-latest + # only remove the image artifacts if the build was successful + # (this allows a re-build of failed jobs until for the time of the retention period) + if: always() && !failure() && !cancelled() + needs: push + steps: + - uses: geekyeggo/delete-artifact@v5 + with: + # delete the docker images shared within the jobs (storage on GitHub is expensive) + name: | + localstack-docker-image-* + lambda-common-* + failOnError: false + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/aws-tests-mamr.yml b/.github/workflows/aws-tests-mamr.yml new file mode 100644 index 0000000000000..8bb24681d9bcf --- /dev/null +++ b/.github/workflows/aws-tests-mamr.yml @@ -0,0 +1,83 @@ +name: AWS / MA/MR tests + +on: + schedule: + - cron: 0 1 * * MON-FRI + pull_request: + paths: + - '.github/workflows/aws-mamr.yml' + - '.github/workflows/aws-tests.yml' + - '.github/actions/**' + workflow_dispatch: + inputs: + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + +env: + IMAGE_NAME: "localstack/localstack" + + + +jobs: + generate-random-creds: + name: "Generate random AWS credentials" + runs-on: ubuntu-latest + outputs: + region: ${{ steps.generate-aws-values.outputs.region }} + account_id: ${{ steps.generate-aws-values.outputs.account_id }} + steps: + - name: Generate values + id: generate-aws-values + run: | + # Generate a random 12-digit number for TEST_AWS_ACCOUNT_ID + ACCOUNT_ID=$(shuf -i 100000000000-999999999999 -n 1) + echo "account_id=$ACCOUNT_ID" >> $GITHUB_OUTPUT + # Set TEST_AWS_REGION_NAME to a random AWS region other than us-east-1 + REGIONS=("us-east-2" "us-west-1" "us-west-2" "ap-southeast-2" "ap-northeast-1" "eu-central-1" "eu-west-1") + REGION=${REGIONS[RANDOM % ${#REGIONS[@]}]} + echo "region=$REGION" >> $GITHUB_OUTPUT + + test-ma-mr: + name: "Run integration tests" + needs: generate-random-creds + uses: ./.github/workflows/aws-tests.yml + with: + disableCaching: ${{ inputs.disableCaching == true }} + PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL }} + testAWSRegion: ${{ needs.generate-random-creds.outputs.region }} + testAWSAccountId: ${{ needs.generate-random-creds.outputs.account_id }} + testAWSAccessKeyId: ${{ needs.generate-random-creds.outputs.account_id }} + secrets: + DOCKERHUB_PULL_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + DOCKERHUB_PULL_TOKEN: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + TINYBIRD_CI_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + + push-to-tinybird: + name: Push Workflow Status to Tinybird + if: always() && github.ref == 'refs/heads/master' && github.repository == 'localstack/localstack' + runs-on: ubuntu-latest + needs: + - test-ma-mr + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + workflow_id: ${{ 'tests_mamr' }} + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" + # determine the output only for the jobs that are direct dependencies of this job (to avoid issues with workflow_call embeddings) + outcome: ${{ ((contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) && 'failure') || 'success' }} diff --git a/.github/workflows/aws-tests.yml b/.github/workflows/aws-tests.yml new file mode 100644 index 0000000000000..49d763255ca36 --- /dev/null +++ b/.github/workflows/aws-tests.yml @@ -0,0 +1,902 @@ +name: AWS Integration Tests + +on: + workflow_dispatch: + inputs: + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + disableTestSelection: + description: 'Disable Test Selection' + required: false + type: boolean + default: false + randomize-aws-credentials: + description: 'Randomize AWS credentials' + default: false + required: false + type: boolean + onlyAcceptanceTests: + description: 'Run only acceptance tests' + default: false + required: false + type: boolean + forceARMTests: + description: 'Run the ARM64 tests' + default: false + required: false + type: boolean + testAWSRegion: + description: 'AWS test region' + required: false + type: string + default: 'us-east-1' + testAWSAccountId: + description: 'AWS test account ID' + required: false + type: string + default: '000000000000' + testAWSAccessKeyId: + description: 'AWS test access key ID' + required: false + type: string + default: 'test' + workflow_call: + inputs: + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + PYTEST_LOGLEVEL: + type: string + required: false + description: Loglevel for PyTest + default: WARNING + disableTestSelection: + description: 'Disable Test Selection' + required: false + type: boolean + default: false + randomize-aws-credentials: + description: "Randomize AWS credentials" + default: false + required: false + type: boolean + onlyAcceptanceTests: + description: "Run only acceptance tests" + default: false + required: false + type: boolean + forceARMTests: + description: 'Run the ARM64 tests' + default: false + required: false + type: boolean + testAWSRegion: + description: 'AWS test region' + required: false + type: string + default: 'us-east-1' + testAWSAccountId: + description: 'AWS test account ID' + required: false + type: string + default: '000000000000' + testAWSAccessKeyId: + description: 'AWS test access key ID' + required: false + type: string + default: 'test' + secrets: + DOCKERHUB_PULL_USERNAME: + description: 'A DockerHub username - Used to avoid rate limiting issues.' + required: true + DOCKERHUB_PULL_TOKEN: + description: 'A DockerHub token - Used to avoid rate limiting issues.' + required: true + TINYBIRD_CI_TOKEN: + description: 'Token for accessing our tinybird ci analytics workspace.' + required: true + +env: + PYTEST_LOGLEVEL: ${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }} + IMAGE_NAME: "localstack/localstack" + TESTSELECTION_PYTEST_ARGS: "${{ !inputs.disableTestSelection && '--path-filter=dist/testselection/test-selection.txt ' || '' }}" + TEST_AWS_REGION_NAME: ${{ inputs.testAWSRegion }} + TEST_AWS_ACCOUNT_ID: ${{ inputs.testAWSAccountId }} + TEST_AWS_ACCESS_KEY_ID: ${{ inputs.testAWSAccessKeyId }} + # Set non-job-specific environment variables for pytest-tinybird + TINYBIRD_URL: https://api.tinybird.co + TINYBIRD_DATASOURCE: raw_tests + TINYBIRD_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + TINYBIRD_TIMEOUT: 5 + CI_REPOSITORY_NAME: localstack/localstack + # differentiate between "acceptance", "mamr" and "full" runs + CI_WORKFLOW_NAME: ${{ inputs.onlyAcceptanceTests && 'tests_acceptance' + || inputs.testAWSAccountId != '000000000000' && 'tests_mamr' + || 'tests_full' }} + CI_COMMIT_BRANCH: ${{ github.head_ref || github.ref_name }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_JOB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }} + # report to tinybird if executed on master + TINYBIRD_PYTEST_ARGS: "${{ github.ref == 'refs/heads/master' && '--report-to-tinybird ' || '' }}" + + + +jobs: + build: + name: "Build Docker Image (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }})" + needs: + - test-preflight + strategy: + matrix: + runner: + - ubuntu-latest + - ubuntu-24.04-arm + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}} + fail-fast: false + runs-on: ${{ matrix.runner }} + steps: + - name: Determine Runner Architecture + shell: bash + run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v4 + with: + path: localstack + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Build Image + uses: localstack/localstack/.github/actions/build-image@master + with: + disableCaching: ${{ inputs.disableCaching == true && 'true' || 'false' }} + dockerhubPullUsername: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + dockerhubPullToken: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + + - name: Restore Lambda common runtime packages + id: cached-lambda-common-restore + if: inputs.disableCaching != true + uses: actions/cache/restore@v4 + with: + path: localstack/tests/aws/services/lambda_/functions/common + key: common-it-${{ runner.os }}-${{ runner.arch }}-lambda-common-${{ hashFiles('localstack/tests/aws/services/lambda_/functions/common/**/src/*', 'localstack/tests/aws/services/lambda_/functions/common/**/Makefile') }} + + - name: Prebuild lambda common packages + run: ./localstack/scripts/build_common_test_functions.sh `pwd`/localstack/tests/aws/services/lambda_/functions/common + + - name: Save Lambda common runtime packages + if: inputs.disableCaching != true + uses: actions/cache/save@v4 + with: + path: localstack/tests/aws/services/lambda_/functions/common + key: ${{ steps.cached-lambda-common-restore.outputs.cache-primary-key }} + + - name: Archive Lambda common packages + uses: actions/upload-artifact@v4 + with: + name: lambda-common-${{ env.PLATFORM }} + path: | + localstack/tests/aws/services/lambda_/functions/common + retention-days: 1 + + + test-preflight: + name: "Preflight & Unit-Tests" + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Linting + run: make lint + + - name: Check AWS compatibility markers + run: make check-aws-markers + + - name: Determine Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + run: | + source .venv/bin/activate + if [ -z "${{ github.event.pull_request.base.sha }}" ]; then + echo "Do test selection based on branch name" + else + echo "Do test selection based on Pull Request event" + SCRIPT_OPTS="--base-commit-sha ${{ github.event.pull_request.base.sha }} --head-commit-sha ${{ github.event.pull_request.head.sha }}" + fi + source .venv/bin/activate + python -m localstack.testing.testselection.scripts.generate_test_selection $(pwd) dist/testselection/test-selection.txt $SCRIPT_OPTS || (mkdir -p dist/testselection && echo "SENTINEL_ALL_TESTS" >> dist/testselection/test-selection.txt) + echo "Test selection:" + cat dist/testselection/test-selection.txt + + - name: Archive Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + uses: actions/upload-artifact@v4 + with: + name: test-selection + path: | + dist/testselection/test-selection.txt + retention-days: 1 + + - name: Run Unit Tests + timeout-minutes: 8 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEBUG: 1 + TEST_PATH: "tests/unit" + JUNIT_REPORTS_FILE: "pytest-junit-unit.xml" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} -o junit_suite_name=unit-tests" + COVERAGE_FILE: ".coverage.unit" + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }}-unit + CI_JOB_ID: ${{ github.job }}-unit + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-preflight + include-hidden-files: true + path: | + pytest-junit-unit.xml + .coverage.unit + retention-days: 30 + + publish-preflight-test-results: + name: Publish Preflight- & Unit-Test Results + needs: test-preflight + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-preflight + + - name: Publish Preflight- & Unit-Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + test-results-preflight/*.xml + check_name: "Test Results ${{ inputs.testAWSAccountId != '000000000000' && '(MA/MR) ' || ''}}- Preflight, Unit" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + + test-integration: + name: "Integration Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }} - ${{ matrix.group }})" + if: ${{ !inputs.onlyAcceptanceTests }} + needs: + - build + - test-preflight + strategy: + matrix: + group: [ 1, 2, 3, 4 ] + runner: + - ubuntu-latest + - ubuntu-24.04-arm + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}} + fail-fast: false + runs-on: ${{ matrix.runner }} + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }} + CI_JOB_ID: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }} + steps: + - name: Determine Runner Architecture + shell: bash + run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV + + - name: Login to Docker Hub + # login to DockerHub to avoid rate limiting issues on custom runners + if: github.repository_owner == 'localstack' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + password: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + + - name: Set environment + if: ${{ inputs.testEnvironmentVariables != ''}} + shell: bash + run: | + echo "${{ inputs.testEnvironmentVariables }}" | sed "s/;/\n/" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Download Lambda Common packages + uses: actions/download-artifact@v4 + with: + name: lambda-common-${{ env.PLATFORM }} + path: | + tests/aws/services/lambda_/functions/common + + - name: Load Localstack Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: "${{ env.PLATFORM }}" + + - name: Download Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + uses: actions/download-artifact@v4 + with: + name: test-selection + path: dist/testselection/ + + - name: Run Integration Tests + timeout-minutes: 120 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}${{ env.TESTSELECTION_PYTEST_ARGS }} --splits 4 --group ${{ matrix.group }} --store-durations --clean-durations --ignore=tests/unit/ --ignore=tests/bootstrap" + COVERAGE_FILE: "target/.coverage.integration-${{ env.PLATFORM }}-${{ matrix.group }}" + JUNIT_REPORTS_FILE: "target/pytest-junit-integration-${{ env.PLATFORM }}-${{ matrix.group }}.xml" + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + # increase Docker SDK timeout to avoid timeouts on BuildJet runners - https://github.com/docker/docker-py/issues/2266 + DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS: 300 + run: make docker-run-tests + + - name: Archive Test Durations + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: pytest-split-durations-${{ env.PLATFORM }}-${{ matrix.group }} + path: .test_durations + include-hidden-files: true + retention-days: 5 + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-integration-${{ env.PLATFORM }}-${{ matrix.group }} + include-hidden-files: true + path: | + target/pytest-junit-integration-${{ env.PLATFORM }}-${{ matrix.group }}.xml + target/.coverage.integration-${{ env.PLATFORM }}-${{ matrix.group }} + retention-days: 30 + + test-bootstrap: + name: Test Bootstrap + if: ${{ !inputs.onlyAcceptanceTests }} + runs-on: ubuntu-latest + needs: + - test-preflight + - build + timeout-minutes: 60 + env: + PLATFORM: 'amd64' + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Load Localstack Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: "${{ env.PLATFORM }}" + + - name: Run Bootstrap Tests + timeout-minutes: 30 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TEST_PATH: "tests/bootstrap" + COVERAGE_FILE: ".coverage.bootstrap" + JUNIT_REPORTS_FILE: "pytest-junit-bootstrap.xml" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} -o junit_suite_name=bootstrap-tests" + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-bootstrap + include-hidden-files: true + path: | + pytest-junit-bootstrap.xml + .coverage.bootstrap + retention-days: 30 + + publish-test-results: + name: Publish Test Results + strategy: + matrix: + arch: + - amd64 + - arm64 + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - arch: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'arm64' || ''}} + needs: + - test-integration + - test-bootstrap + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Bootstrap Artifacts + uses: actions/download-artifact@v4 + if: ${{ matrix.arch == 'amd64' }} + with: + pattern: test-results-bootstrap + + - name: Download Integration Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-integration-${{ matrix.arch }}-* + + - name: Publish Bootstrap and Integration Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results (${{ matrix.arch }}${{ inputs.testAWSAccountId != '000000000000' && ', MA/MR' || ''}}) - Integration${{ matrix.arch == 'amd64' && ', Bootstrap' || ''}}" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + + test-acceptance: + name: "Acceptance Tests (${{ contains(matrix.runner, 'arm') && 'ARM64' || 'AMD64' }})" + needs: + - build + strategy: + matrix: + runner: + - ubuntu-latest + - ubuntu-24.04-arm + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - runner: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'ubuntu-24.04-arm' || ''}} + fail-fast: false + runs-on: ${{ matrix.runner }} + env: + # Acceptance tests are executed for all test cases, without any test selection + TESTSELECTION_PYTEST_ARGS: "" + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }} + CI_JOB_ID: ${{ github.job }}-${{ contains(matrix.runner, 'arm') && 'arm' || 'amd' }} + steps: + - name: Determine Runner Architecture + shell: bash + run: echo "PLATFORM=${{ (runner.arch == 'X64' && 'amd64') || (runner.arch == 'ARM64' && 'arm64') || '' }}" >> $GITHUB_ENV + + - name: Login to Docker Hub + # login to DockerHub to avoid rate limiting issues on custom runners + if: github.repository_owner == 'localstack' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + password: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + + - name: Set environment + if: ${{ inputs.testEnvironmentVariables != ''}} + shell: bash + run: | + echo "${{ inputs.testEnvironmentVariables }}" | sed "s/;/\n/" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Load Localstack Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: "${{ env.PLATFORM }}" + + - name: Run Acceptance Tests + timeout-minutes: 120 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEBUG: 1 + LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC: 1 + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}${{ env.TESTSELECTION_PYTEST_ARGS }} --reruns 3 -m acceptance_test -o junit_suite_name='acceptance_test'" + COVERAGE_FILE: "target/.coverage.acceptance-${{ env.PLATFORM }}" + JUNIT_REPORTS_FILE: "target/pytest-junit-acceptance-${{ env.PLATFORM }}.xml" + TEST_PATH: "tests/aws/" + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + run: make docker-run-tests + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-acceptance-${{ env.PLATFORM }} + include-hidden-files: true + path: | + target/pytest-junit-acceptance-${{ env.PLATFORM }}.xml + target/.coverage.acceptance-${{ env.PLATFORM }} + retention-days: 30 + + publish-acceptance-test-results: + name: Publish Acceptance Test Results + strategy: + matrix: + arch: + - amd64 + - arm64 + exclude: + # skip the ARM integration tests in case we are not on the master and not on the upgrade-dependencies branch and forceARMTests is not set to true + - arch: ${{ (github.ref != 'refs/heads/master' && github.ref != 'upgrade-dependencies' && inputs.forceARMTests == false) && 'arm64' || ''}} + needs: + - test-acceptance + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Acceptance Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-acceptance-${{ matrix.arch }} + + - name: Publish Acceptance Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results (${{ matrix.arch }}${{ inputs.testAWSAccountId != '000000000000' && ', MA/MR' || ''}}) - Acceptance" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + test-cloudwatch-v1: + name: Test CloudWatch V1 + if: ${{ !inputs.onlyAcceptanceTests }} + runs-on: ubuntu-latest + needs: + - test-preflight + - build + timeout-minutes: 60 + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Run Cloudwatch v1 Provider Tests + timeout-minutes: 30 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEBUG: 1 + COVERAGE_FILE: ".coverage.cloudwatch_v1" + TEST_PATH: "tests/aws/services/cloudwatch/" + JUNIT_REPORTS_FILE: "pytest-junit-cloudwatch-v1.xml" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=cloudwatch_v1" + PROVIDER_OVERRIDE_CLOUDWATCH: "v1" + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-cloudwatch-v1 + include-hidden-files: true + path: | + pytest-junit-cloudwatch-v1.xml + .coverage.cloudwatch_v1 + retention-days: 30 + + test-ddb-v2: + name: Test DynamoDB(Streams) v2 + if: ${{ !inputs.onlyAcceptanceTests }} + runs-on: ubuntu-latest + needs: + - test-preflight + - build + timeout-minutes: 60 + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Download Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + uses: actions/download-artifact@v4 + with: + name: test-selection + path: dist/testselection/ + + - name: Run DynamoDB(Streams) v2 Provider Tests + timeout-minutes: 30 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COVERAGE_FILE: ".coverage.dynamodb_v2" + TEST_PATH: "tests/aws/services/dynamodb/ tests/aws/services/dynamodbstreams/ tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py" + JUNIT_REPORTS_FILE: "pytest-junit-dynamodb-v2.xml" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=dynamodb_v2" + PROVIDER_OVERRIDE_DYNAMODB: "v2" + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-dynamodb-v2 + include-hidden-files: true + path: | + pytest-junit-dynamodb-v2.xml + .coverage.dynamodb_v2 + retention-days: 30 + + test-events-v1: + name: Test EventBridge v1 + if: ${{ !inputs.onlyAcceptanceTests }} + runs-on: ubuntu-latest + needs: + - test-preflight + - build + timeout-minutes: 60 + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Download Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + uses: actions/download-artifact@v4 + with: + name: test-selection + path: dist/testselection/ + + - name: Run EventBridge v1 Provider Tests + timeout-minutes: 30 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEBUG: 1 + COVERAGE_FILE: ".coverage.events_v1" + TEST_PATH: "tests/aws/services/events/" + JUNIT_REPORTS_FILE: "pytest-junit-events-v1.xml" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name=events_v1" + PROVIDER_OVERRIDE_EVENTS: "v1" + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-events-v1 + path: | + pytest-junit-events-v1.xml + .coverage.events_v1 + retention-days: 30 + + test-cfn-v2-engine: + name: Test CloudFormation Engine v2 + if: ${{ !inputs.onlyAcceptanceTests }} + runs-on: ubuntu-latest + needs: + - test-preflight + - build + timeout-minutes: 60 + env: + COVERAGE_FILE: ".coverage.cloudformation_v2" + JUNIT_REPORTS_FILE: "pytest-junit-cloudformation-v2.xml" + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare Local Test Environment + uses: ./.github/actions/setup-tests-env + + - name: Download Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + uses: actions/download-artifact@v4 + with: + name: test-selection + path: dist/testselection/ + + - name: Run CloudFormation Engine v2 Tests + timeout-minutes: 30 + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TEST_PATH: "tests/aws/services/cloudformation/v2" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }} --reruns 3 -o junit_suite_name='cloudformation_v2'" + PROVIDER_OVERRIDE_CLOUDFORMATION: "engine-v2" + run: make test-coverage + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-cloudformation-v2 + include-hidden-files: true + path: | + ${{ env.COVERAGE_FILE }} + ${{ env.JUNIT_REPORTS_FILE }} + retention-days: 30 + + publish-alternative-provider-test-results: + name: Publish Alternative Provider Test Results + needs: + - test-cfn-v2-engine + - test-events-v1 + - test-ddb-v2 + - test-cloudwatch-v1 + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # execute on success or failure, but not if the workflow is cancelled or any of the dependencies has been skipped + if: always() && !cancelled() && !contains(needs.*.result, 'skipped') + steps: + - name: Download Cloudformation v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-cloudformation-v2 + + - name: Download EventBridge v1 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-events-v1 + + - name: Download DynamoDB v2 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-dynamodb-v2 + + - name: Download CloudWatch v1 Artifacts + uses: actions/download-artifact@v4 + with: + pattern: test-results-cloudwatch-v1 + + - name: Publish Bootstrap and Integration Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: | + **/pytest-junit-*.xml + check_name: "Test Results ${{ inputs.testAWSAccountId != '000000000000' && '(MA/MR) ' || ''}}- Alternative Providers" + test_file_prefix: "-/opt/code/localstack/" + action_fail_on_inconclusive: true + + capture-not-implemented: + name: "Capture Not Implemented" + if: ${{ !inputs.onlyAcceptanceTests && github.ref == 'refs/heads/master' }} + runs-on: ubuntu-latest + needs: build + env: + PLATFORM: 'amd64' + steps: + - name: Login to Docker Hub + # login to DockerHub to avoid rate limiting issues on custom runners + if: github.repository_owner == 'localstack' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_PULL_USERNAME }} + password: ${{ secrets.DOCKERHUB_PULL_TOKEN }} + + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Load Localstack Docker Image + uses: ./.github/actions/load-localstack-docker-from-artifacts + with: + platform: "${{ env.PLATFORM }}" + + - name: Install Community Dependencies + run: make install-dev + + - name: Start LocalStack + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DISABLE_EVENTS: "1" + DEBUG: 1 + IMAGE_NAME: "localstack/localstack:latest" + run: | + source .venv/bin/activate + localstack start -d + localstack wait -t 120 || (localstack logs && false) + + - name: Run capture-not-implemented + run: | + source .venv/bin/activate + cd scripts + mkdir ../results + python -m capture_notimplemented_responses ../results/ + + - name: Print the logs + run: | + source .venv/bin/activate + localstack logs + + - name: Stop localstack + run: | + source .venv/bin/activate + localstack stop + + - name: Archive Capture-Not-Implemented Results + uses: actions/upload-artifact@v4 + with: + name: capture-notimplemented + path: results/ + retention-days: 30 diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml deleted file mode 100644 index 59b94aea82f4f..0000000000000 --- a/.github/workflows/cla.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: "CLA Assistant" - -on: - issue_comment: - types: - - "created" - pull_request_target: - types: - - "opened" - - "closed" - - "synchronize" - -jobs: - cla-assistant: - runs-on: "ubuntu-latest" - steps: - - name: "CLA Assistant" - if: "(github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'" - uses: "cla-assistant/github-action@v2.1.3-beta" - env: - GITHUB_TOKEN: "${{ secrets.PRO_ACCESS_TOKEN }}" - PERSONAL_ACCESS_TOKEN: "${{ secrets.PRO_ACCESS_TOKEN }}" - with: - remote-organization-name: "localstack" - remote-repository-name: "localstack" - path-to-signatures: "etc/cla-signatures/signatures.json" - path-to-document: "https://github.com/localstack/localstack/blob/master/.github/CLA.md" - branch: "cla-signatures" - allowlist: "localstack-bot,renovate-bot,renovate" - lock-pullrequest-aftermerge: false diff --git a/.github/workflows/cli-tests.yml b/.github/workflows/cli-tests.yml deleted file mode 100644 index eea65e99f1ba1..0000000000000 --- a/.github/workflows/cli-tests.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: cli-tests -on: - workflow_dispatch: - pull_request: - paths: - - ".github/workflows/cli-tests.yml" - - "localstack/**" - - "tests/**" - - "setup.py" - - "pyproject.toml" - - "setup.cfg" - - "Dockerfile" - - "Dockerfile.rh" - - "docker-compose.yml" - - "bin/**" - branches: - - master - push: - paths: - - ".github/workflows/cli-tests.yml" - - "localstack/**" - - "tests/**" - - "setup.py" - - "pyproject.toml" - - "setup.cfg" - - "Dockerfile" - - "Dockerfile.rh" - - "docker-compose.yml" - - "bin/**" - branches: - - master - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - cli-tests: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11" ] - timeout-minutes: 10 - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Set up Python - id: setup-python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install CLI test dependencies - run: | - python -m pip install --upgrade pip wheel setuptools - pip install -e . - pip install pytest - - name: Run CLI tests - env: - PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack.testing.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -s" - run: | - python -m pytest tests/bootstrap/ diff --git a/.github/workflows/create_artifact_with_features_files.yml b/.github/workflows/create_artifact_with_features_files.yml new file mode 100644 index 0000000000000..30e87074a19c0 --- /dev/null +++ b/.github/workflows/create_artifact_with_features_files.yml @@ -0,0 +1,14 @@ +name: AWS / Archive feature files + +on: + schedule: + - cron: 0 9 * * TUE + workflow_dispatch: + +jobs: + validate-features-files: + name: Create artifact with features files + uses: localstack/meta/.github/workflows/create-artifact-with-features-files.yml@main + with: + artifact_name: 'features-files' + aws_services_path: 'localstack-core/localstack/services' diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/dockerhub-description.yml index 12a5fd16842ce..68f30c7ef9c4a 100644 --- a/.github/workflows/dockerhub-description.yml +++ b/.github/workflows/dockerhub-description.yml @@ -5,7 +5,7 @@ on: branches: - master paths: - - README.md + - DOCKER.md - .github/workflows/dockerhub-description.yml jobs: @@ -13,20 +13,13 @@ jobs: name: Sync DockerHub Description runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v3 + uses: peter-evans/dockerhub-description@v4 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} repository: localstack/localstack short-description: ${{ github.event.repository.description }} - - - name: Docker Hub Description - Pro - uses: peter-evans/dockerhub-description@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - repository: localstack/localstack-pro - short-description: ${{ github.event.repository.description }} + readme-filepath: DOCKER.md diff --git a/.github/workflows/marker-report.yml b/.github/workflows/marker-report.yml new file mode 100644 index 0000000000000..6992be9827954 --- /dev/null +++ b/.github/workflows/marker-report.yml @@ -0,0 +1,120 @@ +name: Generate pytest marker report / Open marker report GH issue +on: + workflow_dispatch: + inputs: + dryRun: + description: 'Execute a Dry-Run? A Dry-Run will not create any issues and only print the issue content in the logs instead' + required: false + type: boolean + default: false + updateExistingIssue: + description: 'Select the empty string "" to open duplicate issues, "true" to update duplicate issues and "false" to skip duplicate issues' + required: false + type: choice + default: '' + options: + - '' + - 'false' + - 'true' + createIssue: + description: 'Open marker report github issue' + required: false + type: boolean + default: false + + push: + paths: + - "tests/**" + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + marker-report: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Cache LocalStack community dependencies (venv) + uses: actions/cache@v4 + with: + path: .venv + key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('requirements-dev.txt') }} + + - name: Install dependencies + run: make install-dev + + - name: Collect marker report + if: ${{ !inputs.createIssue }} + env: + PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -s --co --disable-warnings --marker-report --marker-report-tinybird-upload" + MARKER_REPORT_PROJECT_NAME: localstack + MARKER_REPORT_TINYBIRD_TOKEN: ${{ secrets.MARKER_REPORT_TINYBIRD_TOKEN }} + MARKER_REPORT_COMMIT_SHA: ${{ github.sha }} + run: | + . ./.venv/bin/activate + python -m pytest tests/aws/ + + # makes use of the marker report plugin localstack.testing.pytest.marker_report + - name: Generate marker report + env: + PYTEST_ADDOPTS: "-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -p no: -s --co --disable-warnings --marker-report --marker-report-path './target'" + MARKER_REPORT_PROJECT_NAME: localstack + MARKER_REPORT_COMMIT_SHA: ${{ github.sha }} + run: | + . ./.venv/bin/activate + pip install codeowners + python -m pytest tests/aws/ + mv ./target/marker-report*.json ./target/marker-report.json + + - name: Enrich and render marker report + if: ${{ inputs.createIssue }} + env: + MARKER_REPORT_PATH: ./target/marker-report.json + CODEOWNERS_PATH: ./CODEOWNERS + TEMPLATE_PATH: ./.github/bot_templates/MARKER_REPORT_ISSUE.md.j2 + OUTPUT_PATH: ./target/MARKER_REPORT_ISSUE.md + GITHUB_REPO: ${{ github.repository }} + COMMIT_SHA: ${{ github.sha }} + run: | + . ./.venv/bin/activate + pip install codeowners + python scripts/render_marker_report.py + + - name: Print generated markdown + if: ${{ inputs.createIssue }} + run: | + cat ./target/MARKER_REPORT_ISSUE.md + + - name: Upload generated markdown + if: ${{ inputs.createIssue }} + uses: actions/upload-artifact@v4 + with: + path: ./target/MARKER_REPORT_ISSUE.md + + - name: Create GH issue from template + if: inputs.dryRun != true && inputs.createIssue == true + uses: JasonEtco/create-an-issue@v2 + env: + GITHUB_TOKEN: ${{ secrets.PRO_ACCESS_TOKEN }} + with: + # `update_existing` actually has 3 possible values: + # 1. not set => will always open duplicates + # 2. false => will not update and will not open duplicates (NOOP if title conflict detected) + # 3. true => will update an existing one if conflict detected + update_existing: ${{ inputs.updateExistingIssue || '' }} +# search_existing: open + filename: ./target/MARKER_REPORT_ISSUE.md diff --git a/.github/workflows/pr-cla.yml b/.github/workflows/pr-cla.yml new file mode 100644 index 0000000000000..b81b0786207e5 --- /dev/null +++ b/.github/workflows/pr-cla.yml @@ -0,0 +1,30 @@ +name: "CLA Assistant" + +on: + issue_comment: + types: + - "created" + pull_request_target: + types: + - "opened" + - "closed" + - "synchronize" + +jobs: + cla-assistant: + runs-on: "ubuntu-latest" + steps: + - name: "CLA Assistant" + if: "(github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'" + uses: "cla-assistant/github-action@v2.6.1" + env: + GITHUB_TOKEN: "${{ secrets.PRO_ACCESS_TOKEN }}" + PERSONAL_ACCESS_TOKEN: "${{ secrets.PRO_ACCESS_TOKEN }}" + with: + remote-organization-name: "localstack" + remote-repository-name: "localstack" + path-to-signatures: "etc/cla-signatures/signatures.json" + path-to-document: "https://github.com/localstack/localstack/blob/master/.github/CLA.md" + branch: "cla-signatures" + allowlist: "localstack-bot,*[bot]" + lock-pullrequest-aftermerge: false diff --git a/.github/workflows/pr-enforce-no-major-master.yml b/.github/workflows/pr-enforce-no-major-master.yml new file mode 100644 index 0000000000000..d2efcf8f93191 --- /dev/null +++ b/.github/workflows/pr-enforce-no-major-master.yml @@ -0,0 +1,17 @@ +name: Enforce no major on master + +on: + pull_request_target: + types: [labeled, unlabeled, opened, edited, synchronize] + # only enforce for PRs targeting the master branch + branches: + - master + +jobs: + enforce-no-major: + permissions: + issues: write + pull-requests: write + uses: localstack/meta/.github/workflows/pr-enforce-no-major.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/pr-enforce-no-major-minor-master.yml b/.github/workflows/pr-enforce-no-major-minor-master.yml new file mode 100644 index 0000000000000..60fbd79b4108e --- /dev/null +++ b/.github/workflows/pr-enforce-no-major-minor-master.yml @@ -0,0 +1,17 @@ +name: Enforce no major or minor on master + +on: + pull_request_target: + types: [labeled, unlabeled, opened, edited, synchronize] + # only enforce for PRs targeting the master branch + branches: + - master + +jobs: + enforce-no-major-minor: + permissions: + issues: write + pull-requests: write + uses: localstack/meta/.github/workflows/pr-enforce-no-major-minor.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/pr-enforce-pr-labels.yml b/.github/workflows/pr-enforce-pr-labels.yml new file mode 100644 index 0000000000000..b316c45845360 --- /dev/null +++ b/.github/workflows/pr-enforce-pr-labels.yml @@ -0,0 +1,11 @@ +name: Enforce PR Labels + +on: + pull_request_target: + types: [labeled, unlabeled, opened, edited, synchronize] + +jobs: + enforce-semver-labels: + uses: localstack/meta/.github/workflows/pr-enforce-semver-labels.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/pr-validate-features-files.yml b/.github/workflows/pr-validate-features-files.yml new file mode 100644 index 0000000000000..d62d2b5ffaa77 --- /dev/null +++ b/.github/workflows/pr-validate-features-files.yml @@ -0,0 +1,14 @@ +name: Validate AWS features files + +on: + pull_request: + paths: + - localstack-core/localstack/services/** + branches: + - master + +jobs: + validate-features-files: + uses: localstack/meta/.github/workflows/pr-validate-features-files.yml@main + with: + aws_services_path: 'localstack-core/localstack/services' diff --git a/.github/workflows/pr-welcome-first-time-contributors.yml b/.github/workflows/pr-welcome-first-time-contributors.yml new file mode 100644 index 0000000000000..c01b376ececde --- /dev/null +++ b/.github/workflows/pr-welcome-first-time-contributors.yml @@ -0,0 +1,81 @@ +name: Welcome First Time Contributors ✨ + +on: + pull_request_target: + types: + - opened + issues: + types: + - opened + +jobs: + welcome: + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7 + with: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} + script: | + const issueMessage = `Welcome to LocalStack! Thanks for reporting your first issue and our team will be working towards fixing the issue for you or reach out for more background information. We recommend joining our [Slack Community](https://localstack.cloud/slack/) for real-time help and drop a message to [LocalStack Support](https://docs.localstack.cloud/getting-started/help-and-support/) if you are a licensed user! If you are willing to contribute towards fixing this issue, please have a look at our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md).`; + const prMessage = `Welcome to LocalStack! Thanks for raising your first Pull Request and landing in your contributions. Our team will reach out with any reviews or feedbacks that we have shortly. We recommend joining our [Slack Community](https://localstack.cloud/slack/) and share your PR on the **#community** channel to share your contributions with us. Please make sure you are following our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [Code of Conduct](https://github.com/localstack/.github/blob/main/CODE_OF_CONDUCT.md).`; + + if (!issueMessage && !prMessage) { + throw new Error('Action should have either issueMessage or prMessage set'); + } + + const isIssue = !!context.payload.issue; + let isFirstContribution; + if (isIssue) { + const query = `query($owner:String!, $name:String!, $contributor:String!) { + repository(owner:$owner, name:$name){ + issues(first: 1, filterBy: {createdBy:$contributor}){ + totalCount + } + } + }`; + + const variables = { + owner: context.repo.owner, + name: context.repo.repo, + contributor: context.payload.sender.login + }; + + const { repository: { issues: { totalCount } } } = await github.graphql(query, variables); + isFirstContribution = totalCount === 1; + } else { + const query = `query($qstr: String!) { + search(query: $qstr, type: ISSUE, first: 1) { + issueCount + } + }`; + const variables = { + "qstr": `repo:${context.repo.owner}/${context.repo.repo} type:pr author:${context.payload.sender.login}`, + }; + const { search: { issueCount } } = await github.graphql(query, variables); + isFirstContribution = issueCount === 1; + } + + if (!isFirstContribution) { + return; + } + + const message = isIssue ? issueMessage : prMessage; + if (isIssue) { + const issueNumber = context.payload.issue.number; + await github.rest.issues.createComment({ + owner: context.payload.repository.owner.login, + repo: context.payload.repository.name, + issue_number: issueNumber, + body: message + }); + } + else { + const pullNumber = context.payload.pull_request.number; + await github.rest.pulls.createReview({ + owner: context.payload.repository.owner.login, + repo: context.payload.repository.name, + pull_number: pullNumber, + body: message, + event: 'COMMENT' + }); + } diff --git a/.github/workflows/pro-integration.yml b/.github/workflows/pro-integration.yml deleted file mode 100644 index 5821282af16b2..0000000000000 --- a/.github/workflows/pro-integration.yml +++ /dev/null @@ -1,225 +0,0 @@ -name: integration-tests-against-pro -on: - workflow_dispatch: - inputs: - targetRef: - description: 'LocalStack Pro Ref to test with' - required: false - pull_request: - paths: - - ".github/workflows/pro-integration.yml" - - "localstack/**" - - "tests/**" - - "setup.py" - - "pyproject.toml" - - "setup.cfg" - - "Dockerfile" - - "Dockerfile.rh" - - "docker-compose.yml" - - "bin/**" - branches: - - master - - 'v[0-9]+' - push: - paths: - - ".github/workflows/pro-integration.yml" - - "localstack/**" - - "tests/**" - - "setup.py" - - "pyproject.toml" - - "setup.cfg" - - "Dockerfile" - - "Dockerfile.rh" - - "docker-compose.yml" - - "bin/**" - branches: - - master - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - run-integration-tests: - runs-on: ubuntu-latest - timeout-minutes: 110 - defaults: - run: - working-directory: localstack-ext - environment: localstack-ext-tests - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository # skip job if fork PR - steps: - - name: Determine companion-ref - id: determine-companion-ref - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.PRO_ACCESS_TOKEN }} - result-encoding: string - script: | - if (context.payload.inputs && context.payload.inputs.targetRef) { - console.log("Using manually set target reference: ", context.payload.inputs.targetRef) - return context.payload.inputs.targetRef - } - - const DEFAULT_REF = "refs/heads/master" - - async function isCompanionRefExisting(refName) { - try { - // strip the leading "refs/" for the API call - const apiRef = refName.substr(5) - console.log("Checking if companion repo has ref: ", apiRef) - await github.rest.git.getRef({owner: "localstack", repo: "localstack-ext", ref: apiRef}) - return true - } catch (error) { - if (error.status == 404) { - return false - } else { - // another (unexpected) error occurred, raise the error - throw new Error(`Fetching companion refs failed: ${error}`) - } - } - } - - let ref = context.ref - let baseRef = null - if (context.payload.pull_request) { - // pull requests have their own refs (f.e. 'refs/pull/1/merge') - // use the PR head ref instead - ref = `refs/heads/${context.payload.pull_request.head.ref}` - baseRef = `refs/heads/${context.payload.pull_request.base.ref}` - } - - if (ref == DEFAULT_REF) { - console.log("Current ref is default ref. Using the same for ext repo: ", DEFAULT_REF) - return DEFAULT_REF - } - - if (await isCompanionRefExisting(ref)) { - console.log("Using companion ref in ext repo: ", ref) - return ref - } else if (baseRef && baseRef != DEFAULT_REF && (await isCompanionRefExisting(baseRef))) { - console.log("Using PR base companion ref in ext repo: ", baseRef) - return baseRef - } - - // the companion repo does not have a companion ref, use the default - console.log("Ext repo does not have a companion ref. Using default: ", DEFAULT_REF) - return DEFAULT_REF - - name: Checkout Pro - uses: actions/checkout@v2 - with: - repository: localstack/localstack-ext - ref: ${{steps.determine-companion-ref.outputs.result}} - token: ${{ secrets.PRO_ACCESS_TOKEN }} - path: localstack-ext - - name: Checkout Open Source - uses: actions/checkout@v2 - with: - path: localstack - - name: Set up Python 3.10 - id: setup-python - uses: actions/setup-python@v2 - with: - python-version: '3.10' - - name: Set up Node 14.x - uses: actions/setup-node@v2 - with: - node-version: 14.x - - name: Set up JDK 11 - uses: actions/setup-java@v2 - with: - java-version: '11' - distribution: 'temurin' - - uses: hashicorp/setup-terraform@v1 - with: - terraform_version: 0.13.7 - - name: Set up system wide dependencies - run: | - sudo apt-get install libsasl2-dev jq - - name: Cache LocalStack-ext dependencies (venv) - uses: actions/cache@v2 - id: ext-cache - with: - path: localstack-ext/.venv - key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack-ext/setup.cfg', 'localstack-ext/pyproject.toml', 'localstack/localstack/services/install.py', 'localstack/setup.cfg', 'localstack/localstack/constants.py') }} - - name: Install Python Dependencies for LocalStack Pro - run: make install - - name: Cache LocalStack community dependencies (venv, static libs) - uses: actions/cache@v2 - id: os-cache - with: - path: | - localstack/.venv - localstack/.filesystem/usr/lib/localstack - key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack-ext/setup.cfg', 'localstack-ext/pyproject.toml', 'localstack/localstack/services/install.py', 'localstack/setup.cfg', 'localstack/localstack/constants.py') }} - - name: Install Dependencies for LocalStack Community # lambda tests look for libraries in this virtualenv - working-directory: localstack - run: | - make install - - name: Link community LocalStack into Pro venv - run: | - source .venv/bin/activate - pip install -e ../localstack[runtime,test] - - name: Create Pro Entrypoints - # Entrypoints need to be generated _after_ the community edition has been linked into the venv - run: make entrypoints - - name: Test LocalStack Pro startup - env: - DEBUG: 1 - DNS_ADDRESS: 0 - LOCALSTACK_API_KEY: "test" - run: | - source .venv/bin/activate - bin/test_localstack_pro.sh - - name: Run community integration tests - env: - DEBUG: 1 - DNS_ADDRESS: 0 - LAMBDA_EXECUTOR: "local" - LOCALSTACK_API_KEY: "test" - AWS_SECRET_ACCESS_KEY: "test" - AWS_ACCESS_KEY_ID: "test" - AWS_DEFAULT_REGION: "us-east-1" - HOST_TMP_FOLDER: /tmp/localstack - PYTEST_LOGLEVEL: debug - run: | - # Remove the host tmp folder (might contain remnant files with different permissions) - sudo rm -rf ../localstack/.filesystem/var/lib/localstack/tmp - source .venv/bin/activate - python -m pytest --capture=no --reruns 2 --durations=10 --junitxml=target/reports/pytest.xml ../localstack/tests/integration/ - - name: Run Lambda Tests for lambda executor docker - env: - DEBUG: 0 - DNS_ADDRESS: 0 - LAMBDA_EXECUTOR: "docker" - LOCALSTACK_API_KEY: "test" - HOST_TMP_FOLDER: /tmp/localstack - run: | - # Remove the host tmp folder (might contain remnant files with different permissions) - sudo rm -rf ../localstack/.filesystem/var/lib/localstack/tmp - source .venv/bin/activate - python -m pytest --reruns 2 --durations=10 --show-capture=no --junitxml=target/reports/lambda-docker.xml -o junit_suite_name='lambda-docker' \ - ../localstack/tests/integration/awslambda/ \ - ../localstack/tests/integration/test_integration.py \ - ../localstack/tests/integration/test_apigateway.py - - name: Run Lambda Tests for lambda executor docker-reuse - env: - DEBUG: 1 - DNS_ADDRESS: 0 - LAMBDA_EXECUTOR: "docker-reuse" - LOCALSTACK_API_KEY: "test" - HOST_TMP_FOLDER: /tmp/localstack - run: | - # Remove the host tmp folder (might contain remnant files with different permissions) - sudo rm -rf ../localstack/.filesystem/var/lib/localstack/tmp - source .venv/bin/activate - python -m pytest --reruns 2 --durations=10 --show-capture=no --junitxml=target/reports/lambda-docker-reuse.xml -o junit_suite_name='lambda-docker-reuse' \ - ../localstack/tests/integration/awslambda/ \ - ../localstack/tests/integration/test_integration.py \ - ../localstack/tests/integration/test_apigateway.py - - name: Publish LocalStack Community Integration Test Results - uses: EnricoMi/publish-unit-test-result-action@v1 - if: always() - with: - files: localstack-ext/target/reports/*.xml - check_name: LocalStack integration with Pro diff --git a/.github/workflows/rebase-release-prs.yml b/.github/workflows/rebase-release-prs.yml index 3725d28343299..44f9ba9397c06 100644 --- a/.github/workflows/rebase-release-prs.yml +++ b/.github/workflows/rebase-release-prs.yml @@ -1,8 +1,8 @@ -name: Rebase release PRs +name: Rebase Release PRs on: - push: - branches: - - master + workflow_dispatch: + schedule: + - cron: 0 5 * * MON-FRI jobs: find-release-branches: runs-on: ubuntu-latest @@ -10,7 +10,7 @@ jobs: steps: - name: find release branches id: find-release-branches - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | // find all refs in the repo starting with "release/" @@ -28,7 +28,7 @@ jobs: matrix: head: ${{ fromJson(needs.find-release-branches.outputs.matrix) }} steps: - - uses: peter-evans/rebase@v2 + - uses: peter-evans/rebase@v3 with: token: ${{ secrets.PRO_ACCESS_TOKEN }} head: ${{ matrix.head }} diff --git a/.github/workflows/rebase-release-targeting-prs.yml b/.github/workflows/rebase-release-targeting-prs.yml index 034b043953386..d92480c9a5ef6 100644 --- a/.github/workflows/rebase-release-targeting-prs.yml +++ b/.github/workflows/rebase-release-targeting-prs.yml @@ -1,4 +1,4 @@ -name: Rebase PRs targeting release branches +name: Rebase PRs targeting Release Branches on: push: branches: @@ -9,13 +9,13 @@ jobs: steps: - name: Determine base ref id: determine-base-ref - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: result-encoding: string script: | // remove the ref prefx "refs/heads/" return context.payload.ref.substr(11) - - uses: peter-evans/rebase@v2 + - uses: peter-evans/rebase@v3 with: token: ${{ secrets.PRO_ACCESS_TOKEN }} base: ${{steps.determine-base-ref.outputs.result}} diff --git a/.github/workflows/rh-support.yml b/.github/workflows/rh-support.yml deleted file mode 100644 index a10ffb4da696e..0000000000000 --- a/.github/workflows/rh-support.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: smoke-tests-against-redhat -on: - schedule: - - cron: '0 7 * * 1' # every Monday morning - workflow_dispatch: - -permissions: - contents: read - -jobs: - test-redhat: - runs-on: ubuntu-latest - steps: - - name: Checkout Open Source - uses: actions/checkout@v2 - - - name: Set up Python 3.8 - id: setup-python - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - - name: Build RH Docker Image - run: | - docker build -t localstack-redhat -f ./Dockerfile.rh \ - --build-arg LOCALSTACK_BUILD_GIT_HASH=$(shell git rev-parse --short HEAD) \ - --build-arg=LOCALSTACK_BUILD_DATE=$(shell date -u +"%Y-%m-%d") \ - . - - - name: Execute Smoke Tests - env: - TEST_LOCALSTACK_API_KEY: ${{ secrets.TEST_LOCALSTACK_API_KEY }} - run: | - CI_SMOKE_IMAGE_NAME="localstack-redhat" make ci-pro-smoke-tests diff --git a/.github/workflows/stale-bot.yml b/.github/workflows/stale-bot.yml index dc9c7f79545e1..b55bca386e155 100644 --- a/.github/workflows/stale-bot.yml +++ b/.github/workflows/stale-bot.yml @@ -1,4 +1,4 @@ -name: "Triage Stale issues" +name: Triage Stale Issues on: schedule: @@ -6,44 +6,7 @@ on: workflow_dispatch: jobs: - issue-cleanup: - runs-on: ubuntu-latest - name: Triage Stale Issues - steps: - - uses: aws-actions/stale-issue-cleanup@v4 - with: - issue-types: issues - ancient-issue-message: Hello 👋! It looks like this issue hasn’t been active in longer - than five months. We encourage you to check if this is still an issue in the latest release. - In the absence of more information, we will be closing this issue soon. - If you find that this is still a problem, please feel free to provide a comment or upvote - with a reaction on the initial post to prevent automatic closure. If the issue is already closed, - please feel free to open a new one. - stale-issue-message: Hello 👋! It looks like this issue hasn’t been active in longer than two months. - We encourage you to check if this is still an issue in the latest release. - In the absence of more information, we will be closing this issue soon. - If you find that this is still a problem, please feel free to provide a comment or upvote - with a reaction on the initial post to prevent automatic closure. If the issue is already closed, - please feel free to open a new one. - # These labels are required - stale-issue-label: "status: stale" - exempt-issue-labels: "status: triage needed,status: confirmed,status: accepted" - response-requested-label: "status: response required" - - # Don't set closed-for-staleness label to skip closing very old issues - # regardless of label - closed-for-staleness-label: "status: resolved/stale" - - # Issue timing - days-before-stale: 60 - days-before-close: 14 - days-before-ancient: 150 - - # If you don't want to mark a issue as being ancient based on a - # threshold of "upvotes", you can set this here. An "upvote" is - # the total number of +1, heart, hooray, and rocket reactions - # on an issue. - minimum-upvotes-to-exempt: 2 - - repo-token: ${{ secrets.PRO_ACCESS_TOKEN }} - loglevel: DEBUG + sync-with-project: + uses: localstack/meta/.github/workflows/stale-bot.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/sync-labels.yml b/.github/workflows/sync-labels.yml new file mode 100644 index 0000000000000..f9835bf171c39 --- /dev/null +++ b/.github/workflows/sync-labels.yml @@ -0,0 +1,15 @@ +name: Sync Labels + +on: + schedule: + # once a day at midnight + - cron: "0 0 * * *" + workflow_dispatch: + +jobs: + sync-labels: + uses: localstack/meta/.github/workflows/sync-labels.yml@main + with: + categories: status,aws,semver + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/sync-project.yml b/.github/workflows/sync-project.yml new file mode 100644 index 0000000000000..2bc927284129c --- /dev/null +++ b/.github/workflows/sync-project.yml @@ -0,0 +1,14 @@ +name: Sync Project Cards + +on: + issues: + types: + - labeled + - unlabeled + - opened + +jobs: + sync-with-project: + uses: localstack/meta/.github/workflows/sync-project.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/tests-bin.yml b/.github/workflows/tests-bin.yml new file mode 100644 index 0000000000000..4da8063a78600 --- /dev/null +++ b/.github/workflows/tests-bin.yml @@ -0,0 +1,58 @@ +name: Helper Script Tests +on: + workflow_dispatch: + pull_request: + paths: + - 'bin/docker-helper.sh' + - '.github/workflows/tests-bin.yml' + - 'tests/bin/*.bats' + push: + paths: + - 'bin/docker-helper.sh' + - '.github/workflows/tests-bin.yml' + - 'tests/bin/*.bats' + branches: + - master + - release/* + +jobs: + script-tests: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup BATS + run: | + git clone https://github.com/bats-core/bats-core.git "$HOME"/bats-core + cd "$HOME"/bats-core + sudo ./install.sh /usr/local + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install helper script dependencies + run: pip install --upgrade setuptools setuptools_scm + + - name: Run bats tests + run: | + bats --report-formatter junit -r tests/bin/ --output . + mv report.xml tests-junit-base.xml + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-tests-bin + path: tests-junit-*.xml + retention-days: 30 + + - name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: success() || failure() + with: + files: tests-junit-*.xml + check_name: "Helper Script Tests" + action_fail_on_inconclusive: true diff --git a/.github/workflows/tests-cli.yml b/.github/workflows/tests-cli.yml new file mode 100644 index 0000000000000..9dda7f376e9d1 --- /dev/null +++ b/.github/workflows/tests-cli.yml @@ -0,0 +1,118 @@ +name: CLI Tests +on: + workflow_dispatch: + inputs: + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + pull_request: + paths: + - '**' + - '!.github/**' + - '.github/workflows/tests-cli.yml' + - '!docs/**' + - '!scripts/**' + - '!.dockerignore' + - '!.git-blame-ignore-revs' + - '!CODE_OF_CONDUCT.md' + - '!CODEOWNERS' + - '!CONTRIBUTING.md' + - '!docker-compose.yml' + - '!docker-compose-pro.yml' + - '!Dockerfile*' + - '!LICENSE.txt' + - '!README.md' + push: + paths: + - '**' + - '!.github/**' + - '.github/workflows/tests-cli.yml' + - '!docs/**' + - '!scripts/**' + - '!.dockerignore' + - '!.git-blame-ignore-revs' + - '!CODE_OF_CONDUCT.md' + - '!CODEOWNERS' + - '!CONTRIBUTING.md' + - '!docker-compose.yml' + - '!docker-compose-pro.yml' + - '!Dockerfile*' + - '!LICENSE.txt' + - '!README.md' + branches: + - master + - release/* + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + # Configure PyTest log level + PYTEST_LOGLEVEL: "${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }}" + # Set non-job-specific environment variables for pytest-tinybird + TINYBIRD_URL: https://api.tinybird.co + TINYBIRD_DATASOURCE: community_tests_cli + TINYBIRD_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + CI_COMMIT_BRANCH: ${{ github.head_ref || github.ref_name }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_JOB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }} + # report to tinybird if executed on master + TINYBIRD_PYTEST_ARGS: "${{ github.ref == 'refs/heads/master' && '--report-to-tinybird ' || '' }}" + +permissions: + contents: read # checkout the repository + +jobs: + cli-tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ] + timeout-minutes: 10 + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }}-${{ matrix.python-version }} + CI_JOB_ID: ${{ github.job }}-${{ matrix.python-version }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up Python + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install CLI test dependencies + run: | + make venv + source .venv/bin/activate + pip install -e . + pip install pytest pytest-tinybird + - name: Run CLI tests + env: + PYTEST_ADDOPTS: "${{ env.TINYBIRD_PYTEST_ARGS }}-p no:localstack.testing.pytest.fixtures -p no:localstack_snapshot.pytest.snapshot -p no:localstack.testing.pytest.filters -p no:localstack.testing.pytest.fixture_conflicts -p no:localstack.testing.pytest.validation_tracking -p no:localstack.testing.pytest.path_filter -p no:tests.fixtures -p no:localstack.testing.pytest.stepfunctions.fixtures -p no:localstack.testing.pytest.cloudformation.fixtures -s" + TEST_PATH: "tests/cli/" + run: make test + + push-to-tinybird: + if: always() && github.ref == 'refs/heads/master' && github.repository == 'localstack/localstack' + runs-on: ubuntu-latest + needs: cli-tests + permissions: + actions: read + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + workflow_id: "tests_cli" + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" diff --git a/.github/workflows/tests-podman.yml b/.github/workflows/tests-podman.yml new file mode 100644 index 0000000000000..edd8aeb2926d7 --- /dev/null +++ b/.github/workflows/tests-podman.yml @@ -0,0 +1,90 @@ +name: Podman Docker Client Tests + +on: + workflow_dispatch: + inputs: + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + +env: + # Configure PyTest log level + PYTEST_LOGLEVEL: "${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }}" + # Set non-job-specific environment variables for pytest-tinybird + TINYBIRD_URL: https://api.tinybird.co + TINYBIRD_DATASOURCE: community_tests_podman + TINYBIRD_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + CI_COMMIT_BRANCH: ${{ github.head_ref || github.ref_name }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_JOB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }} + # report to tinybird if executed on master + TINYBIRD_PYTEST_ARGS: "${{ github.ref == 'refs/heads/master' && '--report-to-tinybird ' || '' }}" + +jobs: + podman-tests: + runs-on: ubuntu-latest + timeout-minutes: 20 + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install podman and test dependencies + run: | + make install-test & + + # install podman + sudo apt update + sudo apt install -y podman + podman ps + podman system info + + # disable Docker, to ensure the tests are running against Podman only + docker ps + sudo mv /var/run/docker.sock /var/run/docker.sock.bk + docker ps && exit 1 + dockerCmd=$(which docker) + sudo mv $dockerCmd $dockerCmd".bk" + + # wait for async installation process to finish + wait + + - name: Run Podman Docker client tests + env: + DOCKER_CMD: "podman" + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}" + TEST_PATH: "tests/integration/docker_utils" + DEBUG: "1" + run: | + # determine path of podman socket + podmanSocket=$(podman system info --format json | jq -r '.host.remoteSocket.path') + echo "Running tests against local podman socket $podmanSocket" + DOCKER_HOST=$podmanSocket make test + + push-to-tinybird: + if: always() && github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + needs: podman-tests + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + workflow_id: "tests_podman" + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" diff --git a/.github/workflows/tests-pro-integration.yml b/.github/workflows/tests-pro-integration.yml new file mode 100644 index 0000000000000..466a470956538 --- /dev/null +++ b/.github/workflows/tests-pro-integration.yml @@ -0,0 +1,412 @@ +name: Community Integration Tests against Pro +on: + workflow_call: + inputs: + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + disableTestSelection: + description: 'Disable Test Selection' + required: false + type: boolean + default: false + targetRef: + description: 'LocalStack Pro Ref' + required: false + type: string + PYTEST_LOGLEVEL: + type: string + description: Loglevel for PyTest + default: WARNING + workflow_dispatch: + inputs: + disableCaching: + description: 'Disable Caching' + required: false + type: boolean + default: false + disableTestSelection: + description: 'Disable Test Selection' + required: false + type: boolean + default: false + targetRef: + description: 'LocalStack Pro Ref' + required: false + type: string + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + pull_request: + paths: + - '**' + - '!.github/**' + - '.github/workflows/tests-pro-integration.yml' + - '!docs/**' + - '!scripts/**' + - './scripts/build_common_test_functions.sh' + - '!.dockerignore' + - '!.git-blame-ignore-revs' + - '!CODE_OF_CONDUCT.md' + - '!CODEOWNERS' + - '!CONTRIBUTING.md' + - '!docker-compose.yml' + - '!docker-compose-pro.yml' + - '!Dockerfile*' + - '!LICENSE.txt' + - '!README.md' + schedule: + - cron: '15 4 * * *' # run once a day at 4:15 AM UTC + push: + paths: + - '**' + - '!.github/**' + - '.github/workflows/tests-pro-integration.yml' + - '!docs/**' + - '!scripts/**' + - './scripts/build_common_test_functions.sh' + - '!.dockerignore' + - '!.git-blame-ignore-revs' + - '!CODE_OF_CONDUCT.md' + - '!CODEOWNERS' + - '!CONTRIBUTING.md' + - '!docker-compose.yml' + - '!docker-compose-pro.yml' + - '!Dockerfile*' + - '!LICENSE.txt' + - '!README.md' + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + # Configure PyTest log level + PYTEST_LOGLEVEL: "${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }}" + # Set non-job-specific environment variables for pytest-tinybird + TINYBIRD_URL: https://api.tinybird.co + TINYBIRD_DATASOURCE: community_tests_pro_integration + TINYBIRD_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + CI_COMMIT_BRANCH: ${{ github.head_ref || github.ref_name }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_JOB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }} + # report to tinybird if executed on master on community AND pro (targetRef not set) + TINYBIRD_PYTEST_ARGS: "${{ github.ref == 'refs/heads/master' && inputs.targetRef == '' && '--report-to-tinybird ' || '' }}" + # enable test selection if not running on master and test selection is not explicitly disabled + TESTSELECTION_PYTEST_ARGS: "${{ !inputs.disableTestSelection && '--path-filter=../../localstack/target/testselection/test-selection.txt ' || '' }}" + +jobs: + test-pro: + name: "Community Integration Tests against Pro" + # If this is triggered by a pull_request, make sure the PR head repo name is the same as the target repo name + # (i.e. do not execute job for workflows coming from forks) + if: >- + ( + github.event_name != 'pull_request' || + github.event.pull_request.head.repo.full_name == github.repository + ) + runs-on: ubuntu-latest + timeout-minutes: 90 + strategy: + matrix: + group: [ 1, 2 ] + fail-fast: false + env: + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + steps: + - name: Free Disk Space + uses: jlumbroso/free-disk-space@v1.3.1 + with: + # don't perform all optimizations to decrease action execution time + large-packages: false + docker-images: false + + - name: Checkout Community + uses: actions/checkout@v4 + with: + path: localstack + fetch-depth: 0 # we need the additional commits to figure out the merge base for test selection + + - name: "Determine Companion Ref" + id: determine-companion-ref + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} + result-encoding: string + script: | + if (context.payload.inputs && context.payload.inputs.targetRef) { + console.log("Using manually set target reference: ", context.payload.inputs.targetRef) + return context.payload.inputs.targetRef + } + + const DEFAULT_REF = "refs/heads/master" + + async function isCompanionRefExisting(refName) { + try { + // strip the leading "refs/" for the API call + const apiRef = refName.substr(5) + console.log("Checking if companion repo has ref: ", apiRef) + await github.rest.git.getRef({owner: "localstack", repo: "localstack-ext", ref: apiRef}) + return true + } catch (error) { + if (error.status == 404) { + return false + } else { + // another (unexpected) error occurred, raise the error + throw new Error(`Fetching companion refs failed: ${error}`) + } + } + } + + let ref = context.ref + let baseRef = null + if (context.payload.pull_request) { + // pull requests have their own refs (f.e. 'refs/pull/1/merge') + // use the PR head ref instead + ref = `refs/heads/${context.payload.pull_request.head.ref}` + baseRef = `refs/heads/${context.payload.pull_request.base.ref}` + } + + if (ref == DEFAULT_REF) { + console.log("Current ref is default ref. Using the same for ext repo: ", DEFAULT_REF) + return DEFAULT_REF + } + + if (await isCompanionRefExisting(ref)) { + console.log("Using companion ref in ext repo: ", ref) + return ref + } else if (baseRef && baseRef != DEFAULT_REF && (await isCompanionRefExisting(baseRef))) { + console.log("Using PR base companion ref in ext repo: ", baseRef) + return baseRef + } + + // the companion repo does not have a companion ref, use the default + console.log("Ext repo does not have a companion ref. Using default: ", DEFAULT_REF) + return DEFAULT_REF + + - name: Checkout Pro + uses: actions/checkout@v4 + with: + repository: localstack/localstack-ext + ref: ${{steps.determine-companion-ref.outputs.result}} + token: ${{ secrets.PRO_ACCESS_TOKEN }} + path: localstack-ext + + - name: Set up Python 3.11 + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Set up Node 18.x + uses: actions/setup-node@v4 + with: + node-version: 18.x + + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + java-version: '11' + distribution: 'temurin' + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 0.13.7 + + - name: Install OS packages + run: | + sudo apt-get update + sudo apt-get install -y --allow-downgrades libsnappy-dev jq libvirt-dev + + - name: Cache Ext Dependencies (venv) + if: inputs.disableCaching != true + uses: actions/cache@v4 + with: + path: | + localstack-ext/.venv + # include the matrix group (to re-use the venv) + key: community-it-${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack-ext/localstack-pro-core/requirements-test.txt') }}-${{steps.determine-companion-ref.outputs.result}} + restore-keys: | + community-it-${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-venv-${{ hashFiles('localstack-ext/localstack-pro-core/requirements-test.txt') }}-refs/heads/master + + - name: Cache Ext Dependencies (libs) + if: inputs.disableCaching != true + uses: actions/cache@v4 + with: + path: | + localstack/localstack-core/.filesystem/var/lib/localstack + # include the matrix group (to re-use the var-libs used in the specific test group) + key: community-it-${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-libs-${{ hashFiles('**/packages.py', '**/packages/*') }}-${{steps.determine-companion-ref.outputs.result}}-group-${{ matrix.group }} + restore-keys: | + community-it-${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-libs-${{ hashFiles('**/packages.py', '**/packages/*') }}-refs/heads/master-group-${{ matrix.group }} + + - name: Restore Lambda common runtime packages + id: cached-lambda-common-restore + if: inputs.disableCaching != true + uses: actions/cache/restore@v4 + with: + path: | + localstack/tests/aws/services/lambda_/functions/common + key: community-it-${{ runner.os }}-${{ runner.arch }}-lambda-common-${{ hashFiles('localstack/tests/aws/services/lambda_/functions/common/**/src/*', 'localstack/tests/aws/services/lambda_/functions/common/**/Makefile') }} + + - name: Prebuild lambda common packages + working-directory: localstack + run: ./scripts/build_common_test_functions.sh `pwd`/tests/aws/services/lambda_/functions/common + + - name: Save Lambda common runtime packages + if: inputs.disableCaching != true + uses: actions/cache/save@v4 + with: + path: | + localstack/tests/aws/services/lambda_/functions/common + key: ${{ steps.cached-lambda-common-restore.outputs.cache-primary-key }} + + - name: Install Python Dependencies for Pro + working-directory: localstack-ext + run: make install-ci + + - name: Link Community into Pro venv + working-directory: localstack-ext + run: | + source .venv/bin/activate + pip install -e ../localstack[runtime,test] + + - name: Create Community Entrypoints + working-directory: localstack + # Entrypoints need to be generated _after_ the community edition has been linked into the venv + run: | + VENV_DIR="../localstack-ext/.venv" make entrypoints + ../localstack-ext/.venv/bin/python -m plux show + + - name: Create Pro Entrypoints + working-directory: localstack-ext + # Entrypoints need to be generated _after_ the community edition has been linked into the venv + run: | + make entrypoints + cd localstack-pro-core + ../.venv/bin/python -m plux show + + - name: Test Pro Startup + env: + DEBUG: 1 + DNS_ADDRESS: 0 + LOCALSTACK_AUTH_TOKEN: "test" + working-directory: localstack-ext + run: | + source .venv/bin/activate + bin/test_localstack_pro.sh + + - name: Determine Test Selection + if: ${{ env.TESTSELECTION_PYTEST_ARGS }} + working-directory: localstack + run: | + if [ -z "${{ github.event.pull_request.base.sha }}" ]; then + echo "Do test selection based on Push event" + else + echo "Do test selection based on Pull Request event" + SCRIPT_OPTS="--base-commit-sha ${{ github.event.pull_request.base.sha }} --head-commit-sha ${{ github.event.pull_request.head.sha }}" + fi + . ../localstack-ext/.venv/bin/activate + python -m localstack.testing.testselection.scripts.generate_test_selection $(pwd) target/testselection/test-selection.txt $SCRIPT_OPTS || (mkdir -p target/testselection && echo "SENTINEL_ALL_TESTS" >> target/testselection/test-selection.txt) + echo "Resulting Test Selection file:" + cat target/testselection/test-selection.txt + + - name: Run Community Integration Tests + env: + # add the GitHub API token to avoid rate limit issues + GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEBUG: 1 + DISABLE_BOTO_RETRIES: 1 + DNS_ADDRESS: 0 + LAMBDA_EXECUTOR: "local" + LOCALSTACK_AUTH_TOKEN: "test" + AWS_SECRET_ACCESS_KEY: "test" + AWS_ACCESS_KEY_ID: "test" + AWS_DEFAULT_REGION: "us-east-1" + JUNIT_REPORTS_FILE: "pytest-junit-community-${{ matrix.group }}.xml" + TEST_PATH: "../../localstack/tests/aws/" # TODO: run tests in tests/integration + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}${{ env.TESTSELECTION_PYTEST_ARGS }}--splits ${{ strategy.job-total }} --group ${{ matrix.group }} --durations-path ../../localstack/.test_durations --store-durations" + working-directory: localstack-ext + run: | + # Remove the host tmp folder (might contain remnant files with different permissions) + sudo rm -rf ../localstack/localstack-core/.filesystem/var/lib/localstack/tmp + make test + + - name: Archive Test Durations + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: pytest-split-durations-community-${{ matrix.group }} + path: | + localstack/.test_durations + retention-days: 5 + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-community-${{ matrix.group }} + path: | + localstack-ext/localstack-pro-core/pytest-junit-community-${{ matrix.group }}.xml + retention-days: 30 + + publish-pro-test-results: + name: "Publish Community Tests against Pro Results" + needs: test-pro + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # If this is triggered by a pull_request, make sure the PR head repo name is the same as the target repo name + # (i.e. do not execute job for workflows coming from forks) + if: >- + (success() || failure()) && ( + github.event_name != 'pull_request' || + github.event.pull_request.head.repo.full_name == github.repository + ) + steps: + - name: Download Artifacts 1 + uses: actions/download-artifact@v4 + with: + name: test-results-community-1 + + - name: Download Artifacts 2 + uses: actions/download-artifact@v4 + with: + name: test-results-community-2 + + - name: Publish Community Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + files: "pytest-junit-community-*.xml" + check_name: "LocalStack Community integration with Pro" + action_fail_on_inconclusive: true + + push-to-tinybird: + if: always() && github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + needs: publish-pro-test-results + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + workflow_id: "tests_pro_integration" + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" diff --git a/.github/workflows/tests-s3-image.yml b/.github/workflows/tests-s3-image.yml new file mode 100644 index 0000000000000..7d233f79c8aa7 --- /dev/null +++ b/.github/workflows/tests-s3-image.yml @@ -0,0 +1,281 @@ +name: S3 Image Integration Tests + +on: + push: + paths: + - .github/workflows/tests-s3-image.yml + - localstack-core/localstack/aws/*.py + - localstack-core/localstack/aws/handlers/*¨ + - localstack-core/localstack/aws/protocol/** + - localstack-core/localstack/aws/serving/** + - localstack-core/localstack/aws/api/s3/** + - localstack-core/localstack/http/** + - localstack-core/localstack/runtime/** + - localstack-core/localstack/services/s3/** + - localstack-core/localstack/*.py + - tests/aws/services/s3/** + - Dockerfile.s3 + - requirements-*.txt + - setup.cfg + - Makefile + branches: + - master + pull_request: + paths: + - .github/workflows/tests-s3-image.yml + - localstack-core/localstack/aws/*.py + - localstack-core/localstack/aws/handlers/*¨ + - localstack-core/localstack/aws/protocol/** + - localstack-core/localstack/aws/serving/** + - localstack-core/localstack/aws/api/s3/** + - localstack-core/localstack/http/** + - localstack-core/localstack/runtime/** + - localstack-core/localstack/services/s3/** + - localstack-core/localstack/*.py + - tests/aws/services/s3/** + - Dockerfile.s3 + - requirements-*.txt + - setup.cfg + - Makefile + workflow_dispatch: + inputs: + publishDockerImage: + description: 'Publish S3-only images on Dockerhub' + required: false + type: boolean + default: false + PYTEST_LOGLEVEL: + type: choice + description: Loglevel for PyTest + options: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + default: WARNING + +# Only one pull-request triggered run should be executed at a time +# (head_ref is only set for PR events, otherwise fallback to run_id which differs for every run). +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + # Configure PyTest log level + PYTEST_LOGLEVEL: "${{ inputs.PYTEST_LOGLEVEL || 'WARNING' }}" + # Set non-job-specific environment variables for pytest-tinybird + TINYBIRD_URL: https://api.tinybird.co + TINYBIRD_DATASOURCE: community_tests_s3_image + TINYBIRD_TOKEN: ${{ secrets.TINYBIRD_CI_TOKEN }} + CI_COMMIT_BRANCH: ${{ github.head_ref || github.ref_name }} + CI_COMMIT_SHA: ${{ github.sha }} + CI_JOB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }} + # report to tinybird if executed on master + TINYBIRD_PYTEST_ARGS: "${{ github.ref == 'refs/heads/master' && '--report-to-tinybird ' || '' }}" + + +jobs: + build-test-s3: + strategy: + matrix: + include: + - arch: amd64 + runner: ubuntu-latest + - arch: arm64 + runner: buildjet-2vcpu-ubuntu-2204-arm + name: "Build and Test S3 image" + env: + PLATFORM: ${{ matrix.arch }} + IMAGE_NAME: "localstack/localstack" + DEFAULT_TAG: "s3-latest" + # Set job-specific environment variables for pytest-tinybird + CI_JOB_NAME: ${{ github.job }} + CI_JOB_ID: ${{ github.job }} + runs-on: ${{ matrix.runner }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # setuptools_scm requires the git history (at least until the last tag) to determine the version + fetch-depth: 0 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + # If this is triggered by a pull_request, make sure the PR head repo name is the same as the target repo name + # (i.e. do not execute job for workflows coming from forks) + if: >- + ( + github.event_name != 'pull_request' || + github.event.pull_request.head.repo.full_name == github.repository + ) + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install docker build dependencies + run: pip install --upgrade setuptools setuptools_scm + + - name: Build S3 Docker Image + env: + DOCKERFILE: "./Dockerfile.s3" + run: ./bin/docker-helper.sh build + + - name: Run S3 Image tests + timeout-minutes: 10 + env: + PYTEST_ARGS: "${{ env.TINYBIRD_PYTEST_ARGS }}-o junit_family=legacy --junitxml=target/pytest-junit-s3-image-${{ matrix.arch }}.xml" + TEST_PATH: "tests/aws/services/s3" + DEBUG: 1 + run: | + mkdir target + make docker-run-tests-s3-only + + - name: Archive Test Results + uses: actions/upload-artifact@v4 + if: success() || failure() + with: + name: test-results-s3-image-${{ matrix.arch }} + path: target/pytest-junit-s3-image-${{ matrix.arch }}.xml + retention-days: 30 + + - name: Save the S3 image + id: save-image + run: ./bin/docker-helper.sh save + + - name: Store Docker image as artifact + uses: actions/upload-artifact@v4 + with: + name: localstack-s3-image-${{ matrix.arch }} + path: ${{ steps.save-image.outputs.IMAGE_FILENAME }} + retention-days: 1 + + publish-test-results: + name: "Publish S3 Image Test Results" + needs: build-test-s3 + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + contents: read + issues: read + # If this is triggered by a pull_request, make sure the PR head repo name is the same as the target repo name + # (i.e. do not execute job for workflows coming from forks) + if: >- + (success() || failure()) && ( + github.event_name != 'pull_request' || + github.event.pull_request.head.repo.full_name == github.repository + ) + steps: + - name: Download AMD64 Results + uses: actions/download-artifact@v4 + with: + name: test-results-s3-image-amd64 + + - name: Download ARM64 Results + uses: actions/download-artifact@v4 + with: + name: test-results-s3-image-arm64 + + - name: Publish S3 Image Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + files: pytest-junit-*.xml + check_name: "S3 Image Test Results (AMD64 / ARM64)" + action_fail_on_inconclusive: true + + push-s3-image: + name: "Push S3 images and manifest" + runs-on: ubuntu-latest + needs: + - build-test-s3 + if: inputs.publishDockerImage + env: + IMAGE_NAME: "localstack/localstack" + DEFAULT_TAG: "s3-latest" + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install docker build dependencies + run: pip install --upgrade setuptools setuptools_scm + + - name: Download AMD64 image + uses: actions/download-artifact@v4 + with: + name: localstack-s3-image-amd64 + + - name: Download ARM64 image + uses: actions/download-artifact@v4 + with: + name: localstack-s3-image-arm64 + + - name: Load AMD64 image + env: + PLATFORM: amd64 + run: ./bin/docker-helper.sh load + + - name: Push AMD64 image + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + PLATFORM: amd64 + run: ./bin/docker-helper.sh push + + - name: Load ARM64 image + env: + PLATFORM: arm64 + run: ./bin/docker-helper.sh load + + - name: Push ARM64 image + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + PLATFORM: arm64 + run: ./bin/docker-helper.sh push + + - name: Create and push manifest + env: + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + run: ./bin/docker-helper.sh push-manifests + + cleanup: + name: "Clean up" + runs-on: ubuntu-latest + if: success() + needs: push-s3-image + steps: + - uses: geekyeggo/delete-artifact@v5 + with: + name: localstack-s3-image-* + failOnError: false + + push-to-tinybird: + if: always() && github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + needs: push-s3-image + steps: + - name: Push to Tinybird + uses: localstack/tinybird-workflow-push@v3 + with: + workflow_id: "tests_s3_image" + tinybird_token: ${{ secrets.TINYBIRD_CI_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + tinybird_datasource: "ci_workflows" diff --git a/.github/workflows/update_openapi_spec.yml b/.github/workflows/update_openapi_spec.yml new file mode 100644 index 0000000000000..07d28dee8eccf --- /dev/null +++ b/.github/workflows/update_openapi_spec.yml @@ -0,0 +1,25 @@ +name: Update OpenAPI Spec + +on: + push: + branches: + - master + paths: + - '**/*openapi.yaml' + - '**/*openapi.yml' + workflow_dispatch: + +jobs: + update-openapi-spec: + runs-on: ubuntu-latest + + steps: + # This step dispatches a workflow in the OpenAPI repo, updating the spec and opening a PR. + - name: Dispatch update spec workflow + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.PRO_ACCESS_TOKEN }} + repository: localstack/openapi + event-type: openapi-update + # A git reference is needed when we want to dispatch the worflow from another branch. + client-payload: '{"ref": "${{ github.ref }}", "repo": "${{ github.repository }}"}' diff --git a/.github/workflows/upgrade-python-dependencies.yml b/.github/workflows/upgrade-python-dependencies.yml new file mode 100644 index 0000000000000..83b26043bd8c7 --- /dev/null +++ b/.github/workflows/upgrade-python-dependencies.yml @@ -0,0 +1,13 @@ +name: Upgrade Pinned Python Dependencies + +on: + schedule: + - cron: 0 5 * * TUE + workflow_dispatch: + + +jobs: + upgrade-dependencies: + uses: localstack/meta/.github/workflows/upgrade-python-dependencies.yml@main + secrets: + github-token: ${{ secrets.PRO_ACCESS_TOKEN }} diff --git a/.github/workflows/validate-codeowners.yml b/.github/workflows/validate-codeowners.yml new file mode 100644 index 0000000000000..2f0b19cfd7ca0 --- /dev/null +++ b/.github/workflows/validate-codeowners.yml @@ -0,0 +1,22 @@ +name: LocalStack - Validate Codeowners + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + validate-codeowners: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Validate codeowners + uses: mszostok/codeowners-validator@v0.7.4 + with: + checks: "files,duppatterns,syntax" + experimental_checks: "avoid-shadowing" diff --git a/.github/workflows/welcome-first-time-contributors.yml b/.github/workflows/welcome-first-time-contributors.yml deleted file mode 100644 index 5d4d4517ca15d..0000000000000 --- a/.github/workflows/welcome-first-time-contributors.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: Welcome First Time Contributors ✨ - -on: - pull_request_target: - types: - - opened - issues: - types: - - opened - -jobs: - welcome: - runs-on: ubuntu-latest - steps: - - uses: actions/github-script@v3 - with: - github-token: ${{ secrets.PRO_ACCESS_TOKEN }} - script: | - const issueMessage = `Welcome to LocalStack! Thanks for reporting your first issue and our team will be working towards fixing the issue for you or reach out for more background information. We recommend joining our [Slack Community](https://localstack.cloud/contact/) for real-time help and drop a message to LocalStack Pro Support if you are a Pro user! If you are willing to contribute towards fixing this issue, please have a look at our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [contributing guide](https://docs.localstack.cloud/contributing/).`; - const prMessage = `Welcome to LocalStack! Thanks for raising your first Pull Request and landing in your contributions. Our team will reach out with any reviews or feedbacks that we have shortly. We recommend joining our [Slack Community](https://localstack.cloud/contact/) and share your PR on the **#community** channel to share your contributions with us. Please make sure you are following our [contributing guidelines](https://github.com/localstack/.github/blob/main/CONTRIBUTING.md) and our [Code of Conduct](https://github.com/localstack/.github/blob/main/CODE_OF_CONDUCT.md).`; - - if (!issueMessage && !prMessage) { - throw new Error('Action should have either issueMessage or prMessage set'); - } - - const isIssue = !!context.payload.issue; - let isFirstContribution; - if (isIssue) { - const query = `query($owner:String!, $name:String!, $contributer:String!) { - repository(owner:$owner, name:$name){ - issues(first: 1, filterBy: {createdBy:$contributer}){ - totalCount - } - } - }`; - - const variables = { - owner: context.repo.owner, - name: context.repo.repo, - contributer: context.payload.sender.login - }; - - const { repository: { issues: { totalCount } } } = await github.graphql(query, variables); - isFirstContribution = totalCount === 1; - } else { - const query = `query($qstr: String!) { - search(query: $qstr, type: ISSUE, first: 1) { - issueCount - } - }`; - const variables = { - "qstr": `repo:${context.repo.owner}/${context.repo.repo} type:pr author:${context.payload.sender.login}`, - }; - const { search: { issueCount } } = await github.graphql(query, variables); - isFirstContribution = issueCount === 1; - } - - if (!isFirstContribution) { - return; - } - - const message = isIssue ? issueMessage : prMessage; - if (isIssue) { - const issueNumber = context.payload.issue.number; - await github.issues.createComment({ - owner: context.payload.repository.owner.login, - repo: context.payload.repository.name, - issue_number: issueNumber, - body: message - }); - } - else { - const pullNumber = context.payload.pull_request.number; - await github.pulls.createReview({ - owner: context.payload.repository.owner.login, - repo: context.payload.repository.name, - pull_number: pullNumber, - body: message, - event: 'COMMENT' - }); - } diff --git a/.gitignore b/.gitignore index dbcd48aac4132..548059743cd07 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,10 @@ *.iml .coverage.* htmlcov +*.orig + +# ignore .vs files that store temproray cache of visual studio workspace settings +.vs .cache .filesystem @@ -26,7 +30,6 @@ venv *.egg-info/ .eggs/ .vagrant/ -*.sw* ~* *~ @@ -52,16 +55,23 @@ requirements.copy.txt *tfplan *.terraform.lock.hcl -.python-version - venv api_states -/integration/lambdas/golang/handler.zip -/tests/integration/lambdas/golang/handler.zip +/aws/lambdas/golang/handler.zip +/tests/aws/lambdas/golang/handler.zip tmp/ volume/ # ANTLR4 ID plugin. gen/ + +# hypothesis pytest plugin +.hypothesis + +# RAW snapshots +*.raw.snapshot.json + +# setuptools_scm version.py +*/*/version.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2cb395b4c8b42..52bdb9e2f0fee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,33 +1,36 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - - repo: https://github.com/psf/black - rev: 22.3.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.11.13 hooks: - - id: black + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + # Run the formatter. + - id: ruff-format - - repo: https://github.com/pycqa/isort - rev: 5.12.0 + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.16.0 hooks: - - id: isort - name: isort (python) - - id: isort - name: isort (cython) - types: [cython] - - id: isort - name: isort (pyi) - types: [pyi] - - - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 - hooks: - - id: flake8 - entry: pflake8 - additional_dependencies: - - pyproject-flake8 + - id: mypy + entry: bash -c 'cd localstack-core && mypy --install-types --non-interactive' + additional_dependencies: ['botocore-stubs', 'rolo'] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v5.0.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace + + - repo: https://github.com/localstack/pre-commit-hooks + rev: v1.2.1 + hooks: + - id: check-pinned-deps-for-needed-upgrade + + - repo: https://github.com/python-openapi/openapi-spec-validator + rev: 0.8.0b1 + hooks: + - id: openapi-spec-validator + files: .*openapi.*\.(json|yaml|yml) + exclude: ^(tests/|.github/workflows/) diff --git a/.python-version b/.python-version new file mode 100644 index 0000000000000..2c0733315e415 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/.test_durations b/.test_durations new file mode 100644 index 0000000000000..08c2d52ba5f8b --- /dev/null +++ b/.test_durations @@ -0,0 +1,4799 @@ +{ + "tests/aws/scenario/bookstore/test_bookstore.py::TestBookstoreApplication::test_lambda_dynamodb": 1.8306775939999795, + "tests/aws/scenario/bookstore/test_bookstore.py::TestBookstoreApplication::test_opensearch_crud": 3.4609082799999555, + "tests/aws/scenario/bookstore/test_bookstore.py::TestBookstoreApplication::test_search_books": 60.73045058200003, + "tests/aws/scenario/bookstore/test_bookstore.py::TestBookstoreApplication::test_setup": 93.035824546, + "tests/aws/scenario/kinesis_firehose/test_kinesis_firehose.py::TestKinesisFirehoseScenario::test_kinesis_firehose_s3": 0.012874760000045171, + "tests/aws/scenario/lambda_destination/test_lambda_destination_scenario.py::TestLambdaDestinationScenario::test_destination_sns": 5.604998042999966, + "tests/aws/scenario/lambda_destination/test_lambda_destination_scenario.py::TestLambdaDestinationScenario::test_infra": 13.293676268000013, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_prefill_dynamodb_table": 30.708885502999976, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_stepfunctions_input_recipient_list[step_function_input0-SUCCEEDED]": 3.9920143719999714, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_stepfunctions_input_recipient_list[step_function_input1-SUCCEEDED]": 2.8442309750000163, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_stepfunctions_input_recipient_list[step_function_input2-FAILED]": 0.9125211790000094, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_stepfunctions_input_recipient_list[step_function_input3-FAILED]": 0.6815302869999869, + "tests/aws/scenario/loan_broker/test_loan_broker.py::TestLoanBrokerScenario::test_stepfunctions_input_recipient_list[step_function_input4-FAILED]": 0.5234817729999577, + "tests/aws/scenario/mythical_mysfits/test_mythical_misfits.py::TestMythicalMisfitsScenario::test_deployed_infra_state": 0.0026562400000216257, + "tests/aws/scenario/mythical_mysfits/test_mythical_misfits.py::TestMythicalMisfitsScenario::test_populate_data": 0.001706896999962737, + "tests/aws/scenario/mythical_mysfits/test_mythical_misfits.py::TestMythicalMisfitsScenario::test_user_clicks_are_stored": 0.001825970000027155, + "tests/aws/scenario/note_taking/test_note_taking.py::TestNoteTakingScenario::test_notes_rest_api": 4.533551982999995, + "tests/aws/scenario/note_taking/test_note_taking.py::TestNoteTakingScenario::test_validate_infra_setup": 34.25136890600004, + "tests/aws/services/acm/test_acm.py::TestACM::test_boto_wait_for_certificate_validation": 1.2082726290000778, + "tests/aws/services/acm/test_acm.py::TestACM::test_certificate_for_subdomain_wildcard": 2.2963858519999576, + "tests/aws/services/acm/test_acm.py::TestACM::test_create_certificate_for_multiple_alternative_domains": 11.193298594999987, + "tests/aws/services/acm/test_acm.py::TestACM::test_domain_validation": 0.24601558099999465, + "tests/aws/services/acm/test_acm.py::TestACM::test_import_certificate": 1.0255106409999826, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiAuthorizer::test_authorizer_crud_no_api": 0.03166019600001846, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_doc_parts_crud_no_api": 0.032727379999982986, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_documentation_part_lifecycle": 0.0691414789999385, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_import_documentation_parts": 0.1256413830000156, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_invalid_create_documentation_part_operations": 0.03903247199997395, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_invalid_delete_documentation_part": 0.05247172699995417, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_invalid_get_documentation_part": 0.04592164300004242, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_invalid_get_documentation_parts": 0.01543583700004092, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiDocumentationPart::test_invalid_update_documentation_part": 0.05624032400004353, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_method_lifecycle": 0.07278596300000117, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_method_request_parameters": 0.048786912000025495, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_put_method_model": 0.27825750099992774, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_put_method_validation": 0.06997209599995813, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_update_method": 0.07143810400003758, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiMethod::test_update_method_validation": 0.13446594900000264, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiModels::test_model_lifecycle": 0.06994636100000662, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiModels::test_model_validation": 0.0998845069999561, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiModels::test_update_model": 0.06950375700006362, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_create_request_validator_invalid_api_id": 0.01457731400000739, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_invalid_delete_request_validator": 0.04277879500000381, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_invalid_get_request_validator": 0.04435944799996605, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_invalid_get_request_validators": 0.013965928000061467, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_invalid_update_request_validator_operations": 0.06056790800005274, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_request_validator_lifecycle": 0.09003225700007533, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRequestValidator::test_validators_crud_no_api": 0.03163985399999092, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_create_proxy_resource": 0.11610522900002707, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_create_proxy_resource_validation": 0.07638203299995894, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_create_resource_parent_invalid": 0.03004573599997684, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_delete_resource": 0.06689559799991684, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_resource_lifecycle": 0.10818160799999532, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiResource::test_update_resource_behaviour": 0.14371496300003628, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_create_rest_api_with_binary_media_types": 0.024210735999986355, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_create_rest_api_with_optional_params": 0.07354736299998876, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_create_rest_api_with_tags": 0.04265929800004642, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_get_api_case_insensitive": 0.001909983999951237, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_list_and_delete_apis": 0.08450191100001803, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_update_rest_api_behaviour": 0.05373609100001886, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_update_rest_api_compression": 0.09015043199997308, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_update_rest_api_invalid_api_id": 0.014741459999981998, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayApiRestApi::test_update_rest_api_operation_add_remove": 0.05077170300000944, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayGatewayResponse::test_gateway_response_crud": 0.09927309100004322, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayGatewayResponse::test_gateway_response_put": 0.09819665999998506, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayGatewayResponse::test_gateway_response_validation": 0.10285701900005506, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApiGatewayGatewayResponse::test_update_gateway_response": 0.1202379620000329, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApigatewayIntegration::test_put_integration_request_parameter_bool_type": 0.001880082000013772, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApigatewayIntegration::test_put_integration_response_validation": 0.07398715900001207, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApigatewayIntegration::test_put_integration_wrong_type": 0.040084112999977606, + "tests/aws/services/apigateway/test_apigateway_api.py::TestApigatewayTestInvoke::test_invoke_test_method": 0.18814763500000709, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_account": 0.04275905000008606, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_authorizer_crud": 0.0019337829999699352, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_handle_domain_name": 0.24127724000010176, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_http_integration_with_path_request_parameter": 0.002243820000046526, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_asynchronous_invocation": 1.3724172019999514, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_integration_aws_type": 7.87926492400004, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_proxy_integration[/lambda/foo1]": 0.0017643870000370043, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_proxy_integration[/lambda/{test_param1}]": 0.0017631050000090909, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_proxy_integration_any_method": 0.0018373920000271937, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_proxy_integration_any_method_with_path_param": 0.0018090690000462928, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_lambda_proxy_integration_with_is_base_64_encoded": 0.0018574299999727373, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_gateway_mock_integration": 0.06236799600003451, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_api_mock_integration_response_params": 0.00182911500002092, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigateway_with_custom_authorization_method": 15.373010333000025, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigw_stage_variables[dev]": 1.6355085530000224, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigw_stage_variables[local]": 1.60974901000003, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_apigw_test_invoke_method_api": 2.2329229609999857, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_base_path_mapping": 0.17674683599994978, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_base_path_mapping_root": 0.1587198959999796, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_create_rest_api_with_custom_id[host_based_url]": 0.06318539300002612, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_create_rest_api_with_custom_id[localstack_path_based_url]": 0.06370467900006815, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_create_rest_api_with_custom_id[path_based_url]": 0.06739443399999345, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_delete_rest_api_with_invalid_id": 0.012518495999984225, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-False-UrlType.HOST_BASED]": 0.07240681199999699, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-False-UrlType.LS_PATH_BASED]": 0.07386598499994079, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-False-UrlType.PATH_BASED]": 0.07220304300005864, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-True-UrlType.HOST_BASED]": 0.0934850259999962, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-True-UrlType.LS_PATH_BASED]": 0.07008028300003843, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://allowed-True-UrlType.PATH_BASED]": 0.06892254500002082, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-False-UrlType.HOST_BASED]": 0.07596852300002865, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-False-UrlType.LS_PATH_BASED]": 0.07553099599999769, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-False-UrlType.PATH_BASED]": 0.0795212809999839, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-True-UrlType.HOST_BASED]": 0.070245946, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-True-UrlType.LS_PATH_BASED]": 0.07018232599995144, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_invoke_endpoint_cors_headers[http://denied-True-UrlType.PATH_BASED]": 0.06969378799999504, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_multiple_api_keys_validate": 27.627488958000015, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_put_integration_dynamodb_proxy_validation_with_request_template": 0.0017333970000095178, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_put_integration_dynamodb_proxy_validation_without_request_template": 0.0017808249999688996, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_response_headers_invocation_with_apigw": 1.7913326810000285, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestAPIGateway::test_update_rest_api_deployment": 0.07151520300004677, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_api_gateway_http_integrations[custom]": 0.0017788210000162508, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_api_gateway_http_integrations[proxy]": 0.0018017259999965063, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[NEVER-UrlType.HOST_BASED-GET]": 0.09223141100000021, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[NEVER-UrlType.HOST_BASED-POST]": 0.09137564200005954, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[NEVER-UrlType.PATH_BASED-GET]": 0.09194332000004124, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[NEVER-UrlType.PATH_BASED-POST]": 0.09345741000004182, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_MATCH-UrlType.HOST_BASED-GET]": 0.09004916399999274, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_MATCH-UrlType.HOST_BASED-POST]": 0.09237544999996317, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_MATCH-UrlType.PATH_BASED-GET]": 0.0936843439999393, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_MATCH-UrlType.PATH_BASED-POST]": 0.09075120699992567, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_TEMPLATES-UrlType.HOST_BASED-GET]": 0.09080804499990336, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_TEMPLATES-UrlType.HOST_BASED-POST]": 0.0925549039999396, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_TEMPLATES-UrlType.PATH_BASED-GET]": 0.0920941589999984, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestIntegrations::test_mock_integration_response[WHEN_NO_TEMPLATES-UrlType.PATH_BASED-POST]": 0.09429839000000584, + "tests/aws/services/apigateway/test_apigateway_basic.py::TestTagging::test_tag_api": 0.06903556700001445, + "tests/aws/services/apigateway/test_apigateway_basic.py::test_apigw_call_api_with_aws_endpoint_url": 0.013104117999944265, + "tests/aws/services/apigateway/test_apigateway_basic.py::test_rest_api_multi_region[UrlType.HOST_BASED-ANY]": 3.409972497999945, + "tests/aws/services/apigateway/test_apigateway_basic.py::test_rest_api_multi_region[UrlType.HOST_BASED-GET]": 3.4051879659999713, + "tests/aws/services/apigateway/test_apigateway_basic.py::test_rest_api_multi_region[path_based_url-ANY]": 3.460962833999986, + "tests/aws/services/apigateway/test_apigateway_basic.py::test_rest_api_multi_region[path_based_url-GET]": 9.564439794000009, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_api_gateway_request_validator": 2.4409245159999955, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_api_gateway_request_validator_with_ref_models": 0.16870720400004302, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_api_gateway_request_validator_with_ref_one_ofmodels": 0.17983501700001625, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_input_body_formatting": 3.420733691999942, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_input_path_template_formatting": 0.45992290800006685, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_integration_request_parameters_mapping": 0.10416832400011344, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApiGatewayCommon::test_invocation_trace_id": 3.0983578250000505, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApigatewayRouting::test_api_not_existing": 0.023232739999969, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApigatewayRouting::test_proxy_routing_with_hardcoded_resource_sibling": 0.24598954499992942, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApigatewayRouting::test_routing_not_found": 0.11063040700003057, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApigatewayRouting::test_routing_with_custom_api_id": 0.0979436220000025, + "tests/aws/services/apigateway/test_apigateway_common.py::TestApigatewayRouting::test_routing_with_hardcoded_resource_sibling_order": 0.2238018699999884, + "tests/aws/services/apigateway/test_apigateway_common.py::TestDeployments::test_create_delete_deployments[False]": 0.4042067909999787, + "tests/aws/services/apigateway/test_apigateway_common.py::TestDeployments::test_create_delete_deployments[True]": 0.43591541299997516, + "tests/aws/services/apigateway/test_apigateway_common.py::TestDeployments::test_create_update_deployments": 0.33989664600005653, + "tests/aws/services/apigateway/test_apigateway_common.py::TestDocumentations::test_documentation_parts_and_versions": 0.10777894000005972, + "tests/aws/services/apigateway/test_apigateway_common.py::TestStages::test_create_update_stages": 0.33117468599994027, + "tests/aws/services/apigateway/test_apigateway_common.py::TestStages::test_update_stage_remove_wildcard": 0.31000804499996093, + "tests/aws/services/apigateway/test_apigateway_common.py::TestUsagePlans::test_api_key_required_for_methods": 0.19657758199997488, + "tests/aws/services/apigateway/test_apigateway_common.py::TestUsagePlans::test_usage_plan_crud": 0.18532574699997895, + "tests/aws/services/apigateway/test_apigateway_custom_ids.py::test_apigateway_custom_ids": 0.06122678899998846, + "tests/aws/services/apigateway/test_apigateway_dynamodb.py::test_error_aws_proxy_not_supported": 0.19541213900004095, + "tests/aws/services/apigateway/test_apigateway_dynamodb.py::test_rest_api_to_dynamodb_integration[PutItem]": 0.4285630210000022, + "tests/aws/services/apigateway/test_apigateway_dynamodb.py::test_rest_api_to_dynamodb_integration[Query]": 0.4974042979999922, + "tests/aws/services/apigateway/test_apigateway_dynamodb.py::test_rest_api_to_dynamodb_integration[Scan]": 0.4045932010000115, + "tests/aws/services/apigateway/test_apigateway_eventbridge.py::test_apigateway_to_eventbridge": 0.2640418709999608, + "tests/aws/services/apigateway/test_apigateway_extended.py::TestApigatewayApiKeysCrud::test_get_api_keys": 0.16527851800003646, + "tests/aws/services/apigateway/test_apigateway_extended.py::TestApigatewayApiKeysCrud::test_get_usage_plan_api_keys": 14.565750925999907, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_create_domain_names": 0.07502431400001797, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": 0.40482689199996, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_export_oas30_openapi[TEST_IMPORT_PETS]": 0.3155974850000689, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETSTORE_SWAGGER]": 0.4057825730000104, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_export_swagger_openapi[TEST_IMPORT_PETS]": 0.31219083899992484, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_get_domain_name": 0.0702157470000202, + "tests/aws/services/apigateway/test_apigateway_extended.py::test_get_domain_names": 0.07257708399998819, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_invoke_status_code_passthrough[HTTP]": 1.7505127640000637, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_invoke_status_code_passthrough[HTTP_PROXY]": 1.7139310940000314, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_method[HTTP]": 2.0455031299998154, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_method[HTTP_PROXY]": 2.0076948039998115, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_with_lambda[HTTP]": 2.170325814999842, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_integration_with_lambda[HTTP_PROXY]": 2.1961897050000516, + "tests/aws/services/apigateway/test_apigateway_http.py::test_http_proxy_integration_request_data_mappings": 1.9682702049999534, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_and_validate_rest_api[openapi.spec.tf.json]": 0.3610307600000624, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_and_validate_rest_api[swagger-mock-cors.json]": 0.43036976299993057, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_api": 0.06505610099998194, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_api_with_base_path_oas30[ignore]": 0.8735227780000514, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_api_with_base_path_oas30[prepend]": 0.8828890079998928, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_api_with_base_path_oas30[split]": 0.8768331999999646, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[ignore]": 0.6047385670000267, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[prepend]": 0.5968512279999914, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_rest_apis_with_base_path_swagger[split]": 0.6075581729999158, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_swagger_api": 0.7781904249999343, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_circular_models": 0.2820640759998696, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_circular_models_and_request_validation": 0.38850159200012513, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_cognito_auth_identity_source": 0.3859413489999497, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_global_api_key_authorizer": 0.2785899169998629, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_http_method_integration": 1.1279415980000067, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_integer_http_status_code": 0.17820217300004515, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_import_with_stage_variables": 1.6937769980000894, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[merge]": 0.33803668999996717, + "tests/aws/services/apigateway/test_apigateway_import.py::TestApiGatewayImportRestApi::test_put_rest_api_mode_binary_media_types[overwrite]": 0.3404845199999045, + "tests/aws/services/apigateway/test_apigateway_integrations.py::TestApiGatewayHeaderRemapping::test_apigateway_header_remapping_aws[AWS]": 2.445959500000072, + "tests/aws/services/apigateway/test_apigateway_integrations.py::TestApiGatewayHeaderRemapping::test_apigateway_header_remapping_aws[AWS_PROXY]": 2.4442483880000054, + "tests/aws/services/apigateway/test_apigateway_integrations.py::TestApiGatewayHeaderRemapping::test_apigateway_header_remapping_http[HTTP]": 0.8168622240000332, + "tests/aws/services/apigateway/test_apigateway_integrations.py::TestApiGatewayHeaderRemapping::test_apigateway_header_remapping_http[HTTP_PROXY]": 0.8228991810000252, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_create_execute_api_vpc_endpoint": 5.51742650999995, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_http_integration_status_code_selection": 0.11978421399999206, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_integration_mock_with_path_param": 0.0945010299999467, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_integration_mock_with_request_overrides_in_response_template": 0.11433569400003307, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_integration_mock_with_response_override_in_request_template[False]": 0.0865096940000285, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_integration_mock_with_response_override_in_request_template[True]": 0.08682158499993875, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_put_integration_response_with_response_template": 1.2003933639999786, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_put_integration_responses": 0.16711714799987476, + "tests/aws/services/apigateway/test_apigateway_integrations.py::test_put_integration_validation": 0.20105003500009389, + "tests/aws/services/apigateway/test_apigateway_kinesis.py::test_apigateway_to_kinesis[PutRecord]": 1.158917444999929, + "tests/aws/services/apigateway/test_apigateway_kinesis.py::test_apigateway_to_kinesis[PutRecords]": 1.1512200410001014, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_aws_proxy_binary_response": 3.7535720319999655, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_aws_proxy_response_payload_format_validation": 4.91831154700003, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_integration": 1.7392398289999846, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_integration_response_with_mapping_templates": 1.9345927329999313, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_integration_with_request_template": 1.8528378119999616, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_proxy_integration": 4.085220950999997, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_proxy_integration_non_post_method": 1.3365963490000468, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_proxy_integration_request_data_mapping": 2.878076430999954, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_aws_proxy_response_format": 2.0462573719999, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_rust_proxy_integration": 1.7800068810000766, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_lambda_selection_patterns": 2.0459635729999945, + "tests/aws/services/apigateway/test_apigateway_lambda.py::test_put_integration_aws_proxy_uri": 1.3439045939999232, + "tests/aws/services/apigateway/test_apigateway_lambda_cfn.py::TestApigatewayLambdaIntegration::test_scenario_validate_infra": 7.642166891000102, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_request[CONVERT_TO_TEXT]": 0.5738120400000071, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_request[None]": 0.5696421239999836, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_request_convert_to_binary": 0.5192244249999476, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_request_convert_to_binary_with_request_template": 0.3489556839998613, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_response_convert_to_binary": 0.5803233399999499, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_response_convert_to_binary_with_request_template": 0.3874819389999402, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_response_convert_to_text": 0.5868586699999696, + "tests/aws/services/apigateway/test_apigateway_s3.py::TestApiGatewayS3BinarySupport::test_apigw_s3_binary_support_response_no_content_handling": 0.601561848000074, + "tests/aws/services/apigateway/test_apigateway_s3.py::test_apigateway_s3_any": 0.4845440180000651, + "tests/aws/services/apigateway/test_apigateway_s3.py::test_apigateway_s3_method_mapping": 0.5188475739998921, + "tests/aws/services/apigateway/test_apigateway_sqs.py::test_sqs_amz_json_protocol": 1.079206910000039, + "tests/aws/services/apigateway/test_apigateway_sqs.py::test_sqs_aws_integration": 1.2227715730000455, + "tests/aws/services/apigateway/test_apigateway_sqs.py::test_sqs_aws_integration_with_message_attribute[MessageAttribute]": 0.31224277800004074, + "tests/aws/services/apigateway/test_apigateway_sqs.py::test_sqs_aws_integration_with_message_attribute[MessageAttributes]": 0.3740195999999969, + "tests/aws/services/apigateway/test_apigateway_sqs.py::test_sqs_request_and_response_xml_templates_integration": 0.40435537899998053, + "tests/aws/services/apigateway/test_apigateway_ssm.py::test_get_parameter_query_protocol": 0.0018512760000248818, + "tests/aws/services/apigateway/test_apigateway_ssm.py::test_ssm_aws_integration": 0.28434512699993775, + "tests/aws/services/apigateway/test_apigateway_stepfunctions.py::TestApigatewayStepfunctions::test_apigateway_with_step_function_integration[DeleteStateMachine]": 1.4979481660000147, + "tests/aws/services/apigateway/test_apigateway_stepfunctions.py::TestApigatewayStepfunctions::test_apigateway_with_step_function_integration[StartExecution]": 1.5652199209999935, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_api_exceptions": 0.001882635000015398, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_create_exceptions": 0.0017371530000218627, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_create_invalid_desiredstate": 0.0021124749999898995, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_double_create_with_client_token": 0.0017514999999548309, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_lifecycle": 0.0020145920000231854, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_list_resources": 0.0018738190000249233, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_list_resources_with_resource_model": 0.0017670600000201375, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceApi::test_update": 0.00176567599999089, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_cancel_edge_cases[FAIL]": 0.0019104479999896284, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_cancel_edge_cases[SUCCESS]": 0.00176282099994296, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_cancel_request": 0.0017456990000255246, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_get_request_status": 0.0017219849999037251, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_invalid_request_token_exc": 0.001783229000011488, + "tests/aws/services/cloudcontrol/test_cloudcontrol_api.py::TestCloudControlResourceRequestApi::test_list_request_status": 0.0017618890000221654, + "tests/aws/services/cloudformation/api/test_changesets.py::TestUpdates::test_deleting_resource": 0.0017535739999630096, + "tests/aws/services/cloudformation/api/test_changesets.py::TestUpdates::test_simple_update_single_resource": 4.196529728000087, + "tests/aws/services/cloudformation/api/test_changesets.py::TestUpdates::test_simple_update_two_resources": 0.0018102889999909166, + "tests/aws/services/cloudformation/api/test_changesets.py::test_autoexpand_capability_requirement": 0.052010827999993126, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_and_then_remove_non_supported_resource_change_set": 20.378889379000043, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_and_then_remove_supported_resource_change_set": 21.948498336000057, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_and_then_update_refreshes_template_metadata": 2.146562182000139, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_create_existing": 0.0018562049999673036, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_invalid_params": 0.015462386000081096, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_missing_stackname": 0.004812114000060319, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_update_nonexisting": 0.017314134000002923, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_update_without_parameters": 0.0017991189999975177, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_with_ssm_parameter": 1.1564679650000471, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_change_set_without_parameters": 0.08757776800007377, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_changeset_with_stack_id": 0.23805133400003342, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_delete_create": 2.1519617240001025, + "tests/aws/services/cloudformation/api/test_changesets.py::test_create_while_in_review": 0.0017839809999031786, + "tests/aws/services/cloudformation/api/test_changesets.py::test_delete_change_set_exception": 0.02096512899993286, + "tests/aws/services/cloudformation/api/test_changesets.py::test_deleted_changeset": 0.049388910000061514, + "tests/aws/services/cloudformation/api/test_changesets.py::test_describe_change_set_nonexisting": 0.012799164000171004, + "tests/aws/services/cloudformation/api/test_changesets.py::test_describe_change_set_with_similarly_named_stacks": 0.04916203100003713, + "tests/aws/services/cloudformation/api/test_changesets.py::test_empty_changeset": 1.3261853109999038, + "tests/aws/services/cloudformation/api/test_changesets.py::test_execute_change_set": 0.0017581129999371115, + "tests/aws/services/cloudformation/api/test_changesets.py::test_multiple_create_changeset": 0.3539821809999921, + "tests/aws/services/cloudformation/api/test_changesets.py::test_name_conflicts": 1.920425201999933, + "tests/aws/services/cloudformation/api/test_drift_detection.py::test_drift_detection_on_lambda": 0.00176567599999089, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_crud_extension[HOOK-LocalStack::Testing::TestHook-hooks/localstack-testing-testhook.zip]": 0.0017668899999989662, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_crud_extension[MODULE-LocalStack::Testing::TestModule::MODULE-modules/localstack-testing-testmodule-module.zip]": 0.0016984799999590905, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_crud_extension[RESOURCE-LocalStack::Testing::TestResource-resourcetypes/localstack-testing-testresource.zip]": 0.0017344190000585513, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_extension_not_complete": 0.0017399890000433516, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_extension_type_configuration": 0.0019829820000722975, + "tests/aws/services/cloudformation/api/test_extensions_api.py::TestExtensionsApi::test_extension_versioning": 0.0017593559999795616, + "tests/aws/services/cloudformation/api/test_extensions_hooks.py::TestExtensionsHooks::test_hook_deployment[FAIL]": 0.0017226470000650806, + "tests/aws/services/cloudformation/api/test_extensions_hooks.py::TestExtensionsHooks::test_hook_deployment[WARN]": 0.001798459000042385, + "tests/aws/services/cloudformation/api/test_extensions_modules.py::TestExtensionsModules::test_module_usage": 0.0018429900000000998, + "tests/aws/services/cloudformation/api/test_extensions_resourcetypes.py::TestExtensionsResourceTypes::test_deploy_resource_type": 0.0018800809999675039, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_deletion_of_failed_nested_stack": 15.350770300999898, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_lifecycle_nested_stack": 0.0021995160000187752, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_nested_output_in_params": 12.641020147000177, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_nested_stack": 6.2213530239999955, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_nested_stack_output_refs": 6.2761850989999175, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_nested_stacks_conditions": 6.2567680759999575, + "tests/aws/services/cloudformation/api/test_nested_stacks.py::test_nested_with_nested_stack": 12.337127311999893, + "tests/aws/services/cloudformation/api/test_reference_resolving.py::test_nested_getatt_ref[TopicArn]": 2.1014168770001334, + "tests/aws/services/cloudformation/api/test_reference_resolving.py::test_nested_getatt_ref[TopicName]": 2.101390884999887, + "tests/aws/services/cloudformation/api/test_reference_resolving.py::test_reference_unsupported_resource": 2.098655693000069, + "tests/aws/services/cloudformation/api/test_reference_resolving.py::test_sub_resolving": 2.0988583500000004, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_create_stack_with_policy": 0.002271279000069626, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_different_action_attribute": 0.0017548660000556993, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_different_principal_attribute": 0.0017775479999500021, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_empty_policy": 0.0017465700000229845, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_not_json_policy": 0.001994643999978507, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_policy_during_update": 0.0017814069999531057, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_policy_lifecycle": 0.0018537520001018493, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_deletion[resource0]": 0.0018573380000361794, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_deletion[resource1]": 0.0018445540000584515, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_modifying_with_policy_specifying_resource_id": 0.0018868720000000394, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_replacement": 0.0018853090000447992, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_resource_deletion": 0.0017691330000388916, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_stack_update": 0.001864780999994764, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_update[AWS::S3::Bucket]": 0.001758482000013828, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_prevent_update[AWS::SNS::Topic]": 0.0018557650000730064, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_set_empty_policy_with_url": 0.0017498570000498148, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_set_invalid_policy_with_url": 0.0017550960000107807, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_set_policy_both_policy_and_url": 0.001786254999956327, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_set_policy_with_update_operation": 0.0018222019999711847, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_set_policy_with_url": 0.0017320429999472253, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_update_with_empty_policy": 0.0017254020000336823, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_update_with_overlapping_policies[False]": 0.0017582519999450597, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_update_with_overlapping_policies[True]": 0.0017700239999385303, + "tests/aws/services/cloudformation/api/test_stack_policies.py::TestStackPolicy::test_update_with_policy": 0.0017607180000140943, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_create_stack_with_custom_id": 1.0559966970000687, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_failure_options_for_stack_creation[False-0]": 0.0018337939999355513, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_failure_options_for_stack_creation[True-1]": 0.0017637629999853743, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_failure_options_for_stack_update[False-2]": 0.001741630999958943, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_failure_options_for_stack_update[True-1]": 0.0017651050000040414, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_get_template_using_changesets[json]": 2.105596587999912, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_get_template_using_changesets[yaml]": 2.1046691750000264, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_get_template_using_create_stack[json]": 1.0526410160000523, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_get_template_using_create_stack[yaml]": 1.0544800710000573, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_list_events_after_deployment": 2.1765725430000202, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_list_stack_resources_for_removed_resource": 19.326889136999966, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_stack_description_special_chars": 2.2758553199998914, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_stack_lifecycle": 4.352378526999928, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_stack_name_creation": 0.08427486799996586, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_stack_update_resources": 4.437022683000009, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_update_stack_actual_update": 4.1820604830001, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_update_stack_with_same_template_withoutchange": 2.0940700359999482, + "tests/aws/services/cloudformation/api/test_stacks.py::TestStacksApi::test_update_stack_with_same_template_withoutchange_transformation": 2.268722933000049, + "tests/aws/services/cloudformation/api/test_stacks.py::test_blocked_stack_deletion": 0.0018409469998914574, + "tests/aws/services/cloudformation/api/test_stacks.py::test_describe_stack_events_errors": 0.022376310000026933, + "tests/aws/services/cloudformation/api/test_stacks.py::test_events_resource_types": 2.1492800989999523, + "tests/aws/services/cloudformation/api/test_stacks.py::test_linting_error_during_creation": 0.00192160699998567, + "tests/aws/services/cloudformation/api/test_stacks.py::test_list_parameter_type": 2.1055781279999337, + "tests/aws/services/cloudformation/api/test_stacks.py::test_name_conflicts": 2.3835621590000073, + "tests/aws/services/cloudformation/api/test_stacks.py::test_no_echo_parameter": 3.873515685999905, + "tests/aws/services/cloudformation/api/test_stacks.py::test_notifications": 0.0016597890000866755, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[A-B-C]": 2.383064138000009, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[A-C-B]": 2.380424416999972, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[B-A-C]": 2.3827266380000083, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[B-C-A]": 2.3803099059999795, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[C-A-B]": 2.3788139250000313, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_deploy_order[C-B-A]": 2.3846599639999795, + "tests/aws/services/cloudformation/api/test_stacks.py::test_stack_resource_not_found": 2.098759487000166, + "tests/aws/services/cloudformation/api/test_stacks.py::test_update_termination_protection": 2.1327872100000604, + "tests/aws/services/cloudformation/api/test_stacks.py::test_updating_an_updated_stack_sets_status": 6.363665179999998, + "tests/aws/services/cloudformation/api/test_templates.py::test_create_stack_from_s3_template_url[http_host]": 1.1344928610001261, + "tests/aws/services/cloudformation/api/test_templates.py::test_create_stack_from_s3_template_url[http_invalid]": 0.08959796399994957, + "tests/aws/services/cloudformation/api/test_templates.py::test_create_stack_from_s3_template_url[http_path]": 1.1345376420000548, + "tests/aws/services/cloudformation/api/test_templates.py::test_create_stack_from_s3_template_url[s3_url]": 0.09286828100005096, + "tests/aws/services/cloudformation/api/test_templates.py::test_get_template_summary": 2.25313603699999, + "tests/aws/services/cloudformation/api/test_templates.py::test_validate_invalid_json_template_should_fail": 0.09109408300002997, + "tests/aws/services/cloudformation/api/test_templates.py::test_validate_template": 0.09100720799995088, + "tests/aws/services/cloudformation/api/test_transformers.py::test_duplicate_resources": 2.3662468609999223, + "tests/aws/services/cloudformation/api/test_transformers.py::test_transformer_individual_resource_level": 3.194674485000064, + "tests/aws/services/cloudformation/api/test_transformers.py::test_transformer_property_level": 2.284263168999928, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_basic_update": 3.125625504000027, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_diff_after_update": 3.1404758170000378, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_no_parameters_update": 3.124363280000125, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_no_template_error": 0.0019216560000359095, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_set_notification_arn_with_update": 0.001721301999964453, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_tags": 0.00171792700007245, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_using_template_url": 3.203632647999939, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_capabilities[capability0]": 0.0017446150000068883, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_capabilities[capability1]": 0.0017631810000011683, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_invalid_rollback_configuration_errors": 0.001744726000083574, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_previous_parameter_value": 3.124280088999967, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_previous_template": 0.0018315590000383963, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_resource_types": 0.0017438440000887567, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_role_without_permissions": 0.0018293130000301971, + "tests/aws/services/cloudformation/api/test_update_stack.py::test_update_with_rollback_configuration": 0.001732110999910219, + "tests/aws/services/cloudformation/api/test_validations.py::test_invalid_output_structure[missing-def]": 0.0018139359999622684, + "tests/aws/services/cloudformation/api/test_validations.py::test_invalid_output_structure[multiple-nones]": 0.001792424000086612, + "tests/aws/services/cloudformation/api/test_validations.py::test_invalid_output_structure[none-value]": 0.0017990090000239434, + "tests/aws/services/cloudformation/api/test_validations.py::test_missing_resources_block": 0.0017251100000521546, + "tests/aws/services/cloudformation/api/test_validations.py::test_resources_blocks[invalid-key]": 0.0017480929999464934, + "tests/aws/services/cloudformation/api/test_validations.py::test_resources_blocks[missing-type]": 0.0017456279999805702, + "tests/aws/services/cloudformation/engine/test_attributes.py::TestResourceAttributes::test_dependency_on_attribute_with_dot_notation": 2.112357287000009, + "tests/aws/services/cloudformation/engine/test_attributes.py::TestResourceAttributes::test_invalid_getatt_fails": 0.0019695250001632303, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_condition_on_outputs": 2.1153574870000966, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_conditional_att_to_conditional_resources[create]": 2.1353610309998885, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_conditional_att_to_conditional_resources[no-create]": 2.1248854049998727, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_conditional_in_conditional[dev-us-west-2]": 2.1032438119999597, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_conditional_in_conditional[production-us-east-1]": 2.1032077810000374, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_conditional_with_select": 2.1520674490001284, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependency_in_non_evaluated_if_branch[None-FallbackParamValue]": 2.1277103380000426, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependency_in_non_evaluated_if_branch[false-DefaultParamValue]": 2.130122330999825, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependency_in_non_evaluated_if_branch[true-FallbackParamValue]": 2.1288366360000737, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependent_ref": 0.0019138630000270496, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependent_ref_intrinsic_fn_condition": 0.0017891790000703622, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_dependent_ref_with_macro": 0.0018284929999481392, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_nested_conditions[prod-bucket-policy]": 0.0018843970001398702, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_nested_conditions[prod-nobucket-nopolicy]": 0.0016492589999188567, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_nested_conditions[test-bucket-nopolicy]": 0.0016613720000577814, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_nested_conditions[test-nobucket-nopolicy]": 0.0018229629999950703, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_output_reference_to_skipped_resource": 0.0017153110001117966, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_simple_condition_evaluation_deploys_resource": 2.103323440000054, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_simple_condition_evaluation_doesnt_deploy_resource": 0.08259069799998997, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_simple_intrinsic_fn_condition_evaluation[nope]": 2.090143433999856, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_simple_intrinsic_fn_condition_evaluation[yep]": 2.0917195000000675, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_sub_in_conditions": 2.120622045999994, + "tests/aws/services/cloudformation/engine/test_conditions.py::TestCloudFormationConditions::test_update_conditions": 4.222562855000092, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_async_mapping_error_first_level": 2.0750502189999906, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_async_mapping_error_second_level": 2.0752019890001066, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_aws_refs_in_mappings": 2.0958832930000426, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_maximum_nesting_depth": 0.001891971999953057, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_minimum_nesting_depth": 0.001748793999922782, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_ref_map_key[should-deploy]": 2.11758444599991, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_ref_map_key[should-not-deploy]": 2.094160716999909, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_with_invalid_refs": 0.001843891999897096, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_mapping_with_nonexisting_key": 0.0018103990000781778, + "tests/aws/services/cloudformation/engine/test_mappings.py::TestCloudFormationMappings::test_simple_mapping_working": 2.107490318000032, + "tests/aws/services/cloudformation/engine/test_references.py::TestDependsOn::test_depends_on_with_missing_reference": 0.0019252039999173576, + "tests/aws/services/cloudformation/engine/test_references.py::TestFnSub::test_fn_sub_cases": 2.1138797079998994, + "tests/aws/services/cloudformation/engine/test_references.py::TestFnSub::test_non_string_parameter_in_sub": 2.1105347760001223, + "tests/aws/services/cloudformation/engine/test_references.py::test_resolve_transitive_placeholders_in_strings": 2.125672932000043, + "tests/aws/services/cloudformation/engine/test_references.py::test_useful_error_when_invalid_ref": 0.01652718200000436, + "tests/aws/services/cloudformation/resource_providers/ec2/aws_ec2_networkacl/test_basic.py::TestBasicCRD::test_black_box": 2.5685301569998273, + "tests/aws/services/cloudformation/resource_providers/ec2/test_ec2.py::test_deploy_instance_with_key_pair": 2.405798582999978, + "tests/aws/services/cloudformation/resource_providers/ec2/test_ec2.py::test_deploy_prefix_list": 7.200325427999928, + "tests/aws/services/cloudformation/resource_providers/ec2/test_ec2.py::test_deploy_security_group_with_tags": 2.1093900880000547, + "tests/aws/services/cloudformation/resource_providers/ec2/test_ec2.py::test_deploy_vpc_endpoint": 2.520110026999987, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_basic.py::TestBasicCRD::test_autogenerated_values": 2.0992482510000627, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_basic.py::TestBasicCRD::test_black_box": 2.137619917000052, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_basic.py::TestBasicCRD::test_getatt": 2.1388723600000503, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_basic.py::TestUpdates::test_update_without_replacement": 0.0019185800000514064, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_exploration.py::TestAttributeAccess::test_getatt[Arn]": 0.0017445659999566487, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_exploration.py::TestAttributeAccess::test_getatt[Id]": 0.0018389420000630707, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_exploration.py::TestAttributeAccess::test_getatt[Path]": 0.001958625999918695, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_exploration.py::TestAttributeAccess::test_getatt[PermissionsBoundary]": 0.00184952200004318, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_exploration.py::TestAttributeAccess::test_getatt[UserName]": 0.0018481409998685194, + "tests/aws/services/cloudformation/resource_providers/iam/aws_iam_user/test_parity.py::TestParity::test_create_with_full_properties": 2.2165678659998775, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_cfn_handle_iam_role_resource_no_role_name": 2.144215430999907, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_delete_role_detaches_role_policy": 4.211569410000038, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_iam_user_access_key": 4.227820584000028, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_iam_username_defaultname": 2.1740530550000585, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_managed_policy_with_empty_resource": 2.4698332770000206, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_policy_attachments": 2.390306246000023, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_server_certificate": 2.2643900259998873, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_update_inline_policy": 4.313741876000222, + "tests/aws/services/cloudformation/resource_providers/iam/test_iam.py::test_updating_stack_with_iam_role": 12.258247151999967, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[Arn]": 0.0018629469999495996, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[DomainArn]": 0.0017269339999756994, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[DomainEndpoint]": 0.0018614240000260907, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[DomainName]": 0.002206458000046041, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[EngineVersion]": 0.0018871130000661651, + "tests/aws/services/cloudformation/resource_providers/opensearch/test_domain.py::TestAttributeAccess::test_getattr[Id]": 0.0018559239999831334, + "tests/aws/services/cloudformation/resource_providers/scheduler/test_scheduler.py::test_schedule_and_group": 2.517786729999898, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter.py::TestBasicCRD::test_black_box": 0.002046890000087842, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter.py::TestUpdates::test_update_without_replacement": 0.001878295999972579, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[AllowedPattern]": 0.0017524399999047091, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[DataType]": 0.0017280450000498604, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Description]": 0.0017853519999562195, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Id]": 0.0018674349998946127, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Name]": 0.001911548000066432, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Policies]": 0.0019131010000137394, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Tier]": 0.0024335220000466506, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Type]": 0.0018974309999748584, + "tests/aws/services/cloudformation/resource_providers/ssm/test_parameter_getatt_exploration.py::TestAttributeAccess::test_getattr[Value]": 0.0018926419999161226, + "tests/aws/services/cloudformation/resources/test_acm.py::test_cfn_acm_certificate": 2.098762726000132, + "tests/aws/services/cloudformation/resources/test_apigateway.py::TestServerlessApigwLambda::test_serverless_like_deployment_with_update": 14.49226775999989, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_account": 2.153776676999996, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_api_gateway_with_policy_as_dict": 2.1061227029999827, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_aws_integration": 2.3175775240000576, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_rest_api": 2.3077696399999468, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_apigateway_swagger_import": 2.326086852999879, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_from_s3_swagger": 2.6908184880001045, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_integration": 2.22699852300002, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_deploy_apigateway_models": 2.303220736000185, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_cfn_with_apigateway_resources": 2.3367225529999587, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_rest_api_serverless_ref_resolving": 9.935634518000143, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_update_apigateway_stage": 4.533738938000056, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_update_usage_plan": 4.481719889000033, + "tests/aws/services/cloudformation/resources/test_apigateway.py::test_url_output": 2.1886793730000136, + "tests/aws/services/cloudformation/resources/test_cdk.py::TestCdkInit::test_cdk_bootstrap[10]": 8.637828602000013, + "tests/aws/services/cloudformation/resources/test_cdk.py::TestCdkInit::test_cdk_bootstrap[11]": 8.654450194999981, + "tests/aws/services/cloudformation/resources/test_cdk.py::TestCdkInit::test_cdk_bootstrap[12]": 8.646710715000154, + "tests/aws/services/cloudformation/resources/test_cdk.py::TestCdkInit::test_cdk_bootstrap_redeploy": 5.6206864220000625, + "tests/aws/services/cloudformation/resources/test_cdk.py::TestCdkSampleApp::test_cdk_sample": 2.4335871789999146, + "tests/aws/services/cloudformation/resources/test_cloudformation.py::test_create_macro": 3.2008875340002305, + "tests/aws/services/cloudformation/resources/test_cloudformation.py::test_waitcondition": 2.2037635749998117, + "tests/aws/services/cloudformation/resources/test_cloudwatch.py::test_alarm_creation": 2.0897039149999728, + "tests/aws/services/cloudformation/resources/test_cloudwatch.py::test_alarm_ext_statistic": 2.1282291350000833, + "tests/aws/services/cloudformation/resources/test_cloudwatch.py::test_composite_alarm_creation": 2.41192090599975, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_billing_mode_as_conditional[PAY_PER_REQUEST]": 2.486900565000269, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_billing_mode_as_conditional[PROVISIONED]": 2.4783113580001555, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_default_name_for_table": 2.4844214730001113, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_deploy_stack_with_dynamodb_table": 2.2221644940002534, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_global_table": 2.473494157999994, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_global_table_with_ttl_and_sse": 2.1470889489996807, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_globalindex_read_write_provisioned_throughput_dynamodb_table": 2.190173730000197, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_table_with_ttl_and_sse": 2.1644178489998467, + "tests/aws/services/cloudformation/resources/test_dynamodb.py::test_ttl_cdk": 1.2528888779997942, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_cfn_update_ec2_instance_type": 0.0018424890001824679, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_cfn_with_multiple_route_table_associations": 2.478730465999888, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_cfn_with_multiple_route_tables": 2.2018559599998753, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_dhcp_options": 2.317622851999886, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_ec2_security_group_id_with_vpc": 2.1502568599998995, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_internet_gateway_ref_and_attr": 2.3001288079997266, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_keypair_create_import": 2.2231779139999617, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_simple_route_table_creation": 2.2253968999998506, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_simple_route_table_creation_without_vpc": 2.230643093000026, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_transit_gateway_attachment": 2.832816616000173, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_vpc_creates_default_sg": 2.3920583259998693, + "tests/aws/services/cloudformation/resources/test_ec2.py::test_vpc_with_route_table": 3.318580466999947, + "tests/aws/services/cloudformation/resources/test_elasticsearch.py::test_cfn_handle_elasticsearch_domain": 4.3498726689999785, + "tests/aws/services/cloudformation/resources/test_events.py::test_cfn_event_api_destination_resource": 16.384498511999936, + "tests/aws/services/cloudformation/resources/test_events.py::test_cfn_event_bus_resource": 2.1521314340000117, + "tests/aws/services/cloudformation/resources/test_events.py::test_event_rule_creation_without_target": 2.111316350999914, + "tests/aws/services/cloudformation/resources/test_events.py::test_event_rule_to_logs": 2.2256746699999894, + "tests/aws/services/cloudformation/resources/test_events.py::test_eventbus_policies": 13.487565034000227, + "tests/aws/services/cloudformation/resources/test_events.py::test_eventbus_policy_statement": 2.1132986090001395, + "tests/aws/services/cloudformation/resources/test_events.py::test_rule_pattern_transformation": 2.1338719820000733, + "tests/aws/services/cloudformation/resources/test_events.py::test_rule_properties": 2.1355004029999236, + "tests/aws/services/cloudformation/resources/test_firehose.py::test_firehose_stack_with_kinesis_as_source": 35.56209035200004, + "tests/aws/services/cloudformation/resources/test_integration.py::test_events_sqs_sns_lambda": 19.844008825000174, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_cfn_handle_kinesis_firehose_resources": 11.388530570000285, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_default_parameters_kinesis": 11.323054620999983, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_describe_template": 0.1332682659999591, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_dynamodb_stream_response_with_cf": 11.343470431999776, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_kinesis_stream_consumer_creations": 17.28398102699998, + "tests/aws/services/cloudformation/resources/test_kinesis.py::test_stream_creation": 11.333802713000068, + "tests/aws/services/cloudformation/resources/test_kms.py::test_cfn_with_kms_resources": 2.138225700000021, + "tests/aws/services/cloudformation/resources/test_kms.py::test_deploy_stack_with_kms": 2.118486591999954, + "tests/aws/services/cloudformation/resources/test_kms.py::test_kms_key_disabled": 2.1124727069998244, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaDestinations::test_generic_destination_routing[sqs-sqs]": 19.64750861499988, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaIntegrations::test_cfn_lambda_dynamodb_source": 11.855835591000186, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaIntegrations::test_cfn_lambda_kinesis_source": 21.492579937000073, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaIntegrations::test_cfn_lambda_permissions": 7.868035635999831, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaIntegrations::test_cfn_lambda_sqs_source": 8.277830662999804, + "tests/aws/services/cloudformation/resources/test_lambda.py::TestCfnLambdaIntegrations::test_lambda_dynamodb_event_filter": 7.457770513000014, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_cfn_function_url": 7.509122528000034, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_event_invoke_config": 6.2687003840001125, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_alias": 12.507668946999956, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_cfn_dead_letter_config_async_invocation": 11.059224843000266, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_cfn_run": 6.589569919000041, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_cfn_run_with_empty_string_replacement_deny_list": 6.181614047000039, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_cfn_run_with_non_empty_string_replacement_deny_list": 6.184070529999644, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_code_signing_config": 2.1988878289998866, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_function_tags": 6.5545346639999025, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_layer_crud": 6.268580348999876, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_logging_config": 6.207673469999918, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_version": 6.777706895999927, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_version_provisioned_concurrency": 12.573623398000109, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_vpc": 0.0020456120000744704, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_w_dynamodb_event_filter": 11.456557058000044, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_lambda_w_dynamodb_event_filter_update": 12.669036426000048, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_multiple_lambda_permissions_for_singlefn": 6.2270152460000645, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_python_lambda_code_deployed_via_s3": 6.673998299999994, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_update_lambda_function": 8.283082080000213, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_update_lambda_function_name": 12.321207600999742, + "tests/aws/services/cloudformation/resources/test_lambda.py::test_update_lambda_permissions": 10.30459449, + "tests/aws/services/cloudformation/resources/test_logs.py::test_cfn_handle_log_group_resource": 2.4024696149999727, + "tests/aws/services/cloudformation/resources/test_logs.py::test_logstream": 2.1257465450000836, + "tests/aws/services/cloudformation/resources/test_opensearch.py::test_domain": 0.001924993999864455, + "tests/aws/services/cloudformation/resources/test_opensearch.py::test_domain_with_alternative_types": 17.474204570999973, + "tests/aws/services/cloudformation/resources/test_redshift.py::test_redshift_cluster": 2.12974065100002, + "tests/aws/services/cloudformation/resources/test_resource_groups.py::test_group_defaults": 2.263483554000004, + "tests/aws/services/cloudformation/resources/test_route53.py::test_create_health_check": 2.2625588040000366, + "tests/aws/services/cloudformation/resources/test_route53.py::test_create_record_set_via_id": 2.1883058050000272, + "tests/aws/services/cloudformation/resources/test_route53.py::test_create_record_set_via_name": 2.191496281999889, + "tests/aws/services/cloudformation/resources/test_route53.py::test_create_record_set_without_resource_record": 2.1765871050001806, + "tests/aws/services/cloudformation/resources/test_s3.py::test_bucket_autoname": 2.1080159870000443, + "tests/aws/services/cloudformation/resources/test_s3.py::test_bucket_versioning": 2.1148543650001557, + "tests/aws/services/cloudformation/resources/test_s3.py::test_bucketpolicy": 22.380472424000118, + "tests/aws/services/cloudformation/resources/test_s3.py::test_cfn_handle_s3_notification_configuration": 2.166626836999967, + "tests/aws/services/cloudformation/resources/test_s3.py::test_cors_configuration": 2.5143907990000116, + "tests/aws/services/cloudformation/resources/test_s3.py::test_object_lock_configuration": 2.5105491490000986, + "tests/aws/services/cloudformation/resources/test_s3.py::test_website_configuration": 2.491483969000001, + "tests/aws/services/cloudformation/resources/test_sam.py::test_cfn_handle_serverless_api_resource": 6.626443895000193, + "tests/aws/services/cloudformation/resources/test_sam.py::test_sam_policies": 6.330364576999955, + "tests/aws/services/cloudformation/resources/test_sam.py::test_sam_sqs_event": 13.458627152999952, + "tests/aws/services/cloudformation/resources/test_sam.py::test_sam_template": 6.623674772999948, + "tests/aws/services/cloudformation/resources/test_secretsmanager.py::test_cdk_deployment_generates_secret_value_if_no_value_is_provided": 1.2643974790000811, + "tests/aws/services/cloudformation/resources/test_secretsmanager.py::test_cfn_handle_secretsmanager_secret": 2.2789812999999413, + "tests/aws/services/cloudformation/resources/test_secretsmanager.py::test_cfn_secret_policy[default]": 2.1201289959999485, + "tests/aws/services/cloudformation/resources/test_secretsmanager.py::test_cfn_secret_policy[true]": 2.12221052599989, + "tests/aws/services/cloudformation/resources/test_secretsmanager.py::test_cfn_secretsmanager_gen_secret": 2.269230380000181, + "tests/aws/services/cloudformation/resources/test_sns.py::test_deploy_stack_with_sns_topic": 2.138133092999851, + "tests/aws/services/cloudformation/resources/test_sns.py::test_sns_subscription": 2.1216770040000483, + "tests/aws/services/cloudformation/resources/test_sns.py::test_sns_subscription_region": 2.1463481320001847, + "tests/aws/services/cloudformation/resources/test_sns.py::test_sns_topic_fifo_with_deduplication": 2.3422328910000942, + "tests/aws/services/cloudformation/resources/test_sns.py::test_sns_topic_fifo_without_suffix_fails": 2.084280650999972, + "tests/aws/services/cloudformation/resources/test_sns.py::test_sns_topic_with_attributes": 1.218109590999802, + "tests/aws/services/cloudformation/resources/test_sns.py::test_update_subscription": 4.246193758000118, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_cfn_handle_sqs_resource": 2.137934492000113, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_sqs_fifo_queue_generates_valid_name": 2.1252054359999875, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_sqs_non_fifo_queue_generates_valid_name": 2.1065768149999258, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_sqs_queue_policy": 2.141209419000006, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_update_queue_no_change": 4.237422981000009, + "tests/aws/services/cloudformation/resources/test_sqs.py::test_update_sqs_queuepolicy": 4.219287260999863, + "tests/aws/services/cloudformation/resources/test_ssm.py::test_deploy_patch_baseline": 2.267168947000073, + "tests/aws/services/cloudformation/resources/test_ssm.py::test_maintenance_window": 2.1746278509999684, + "tests/aws/services/cloudformation/resources/test_ssm.py::test_parameter_defaults": 2.295981041000232, + "tests/aws/services/cloudformation/resources/test_ssm.py::test_update_ssm_parameter_tag": 4.198255159000155, + "tests/aws/services/cloudformation/resources/test_ssm.py::test_update_ssm_parameters": 4.185427828000002, + "tests/aws/services/cloudformation/resources/test_stack_sets.py::test_create_stack_set_with_stack_instances": 1.1310859690001962, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_apigateway_invoke": 9.557532390000233, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_apigateway_invoke_localhost": 9.581219253999734, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_apigateway_invoke_localhost_with_path": 15.732532609999907, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_apigateway_invoke_with_path": 15.657945656999573, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_cfn_statemachine_default_s3_location": 4.8114790120000634, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_cfn_statemachine_with_dependencies": 2.1811768110003413, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_nested_statemachine_with_sync2": 15.517835608000041, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_retry_and_catch": 0.0026324739999381563, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_statemachine_create_with_logging_configuration": 2.6860395779999635, + "tests/aws/services/cloudformation/resources/test_stepfunctions.py::test_statemachine_definitionsubstitution": 7.329174582000178, + "tests/aws/services/cloudformation/test_cloudformation_ui.py::TestCloudFormationUi::test_get_cloudformation_ui": 0.06807541500006664, + "tests/aws/services/cloudformation/test_cloudtrail_trace.py::test_cloudtrail_trace_example": 0.0017879920001178107, + "tests/aws/services/cloudformation/test_template_engine.py::TestImportValues::test_cfn_with_exports": 2.1138366009997753, + "tests/aws/services/cloudformation/test_template_engine.py::TestImportValues::test_import_values_across_stacks": 4.211213168000086, + "tests/aws/services/cloudformation/test_template_engine.py::TestImports::test_stack_imports": 4.2440654379997795, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::And-0-0-False]": 0.08343559300033121, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::And-0-1-False]": 0.08199512299984235, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::And-1-0-False]": 0.08608951199994408, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::And-1-1-True]": 2.1222193590001552, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::Or-0-0-False]": 0.08740726199994242, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::Or-0-1-True]": 2.118739531000074, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::Or-1-0-True]": 2.12064257899965, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_and_or_functions[Fn::Or-1-1-True]": 2.1242548129998795, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_base64_sub_and_getatt_functions": 2.1098992900001576, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_cfn_template_with_short_form_fn_sub": 2.103283777999877, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_cidr_function": 0.0018178129998887016, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_find_map_function": 2.1033613459999287, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[ap-northeast-1]": 2.110484255000074, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[ap-southeast-2]": 2.111640680999926, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[eu-central-1]": 2.1183154960001502, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[eu-west-1]": 2.1158879589997923, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[us-east-1]": 2.1006706389996452, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[us-east-2]": 2.1282096619995627, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[us-west-1]": 2.1162471860000096, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_get_azs_function[us-west-2]": 2.113929403999691, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_join_no_value_construct": 2.1123614599998746, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_split_length_and_join_functions": 2.1526951109999573, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_sub_not_ready": 2.1231209659997603, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_sub_number_type": 2.102127057999951, + "tests/aws/services/cloudformation/test_template_engine.py::TestIntrinsicFunctions::test_to_json_functions": 0.0018358570000600594, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_attribute_uses_macro": 5.735985774000028, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_capabilities_requirements": 5.315919531999953, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_error_pass_macro_as_reference": 0.02531915100007609, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_failed_state[raise_error.py]": 3.6651639330000307, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_failed_state[return_invalid_template.py]": 3.6395147020000422, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_failed_state[return_unsuccessful_with_message.py]": 3.6578095429997575, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_failed_state[return_unsuccessful_without_message.py]": 3.6497722999997677, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_functions_and_references_during_transformation": 4.6440179409996745, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_global_scope": 5.157553655999891, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_macro_deployment": 3.217075732000012, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_pyplate_param_type_list": 8.722933005999948, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_scope_order_and_parameters": 0.0020229159999871626, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_snipped_scope[transformation_snippet_topic.json]": 5.7310024369999155, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_snipped_scope[transformation_snippet_topic.yml]": 5.7371070310000505, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_to_validate_template_limit_for_macro": 3.7732982930001526, + "tests/aws/services/cloudformation/test_template_engine.py::TestMacros::test_validate_lambda_internals": 5.207202487000131, + "tests/aws/services/cloudformation/test_template_engine.py::TestPreviousValues::test_parameter_usepreviousvalue_behavior": 0.0018823330001396243, + "tests/aws/services/cloudformation/test_template_engine.py::TestPseudoParameters::test_stack_id": 2.1179285240002628, + "tests/aws/services/cloudformation/test_template_engine.py::TestSecretsManagerParameters::test_resolve_secretsmanager[resolve_secretsmanager.yaml]": 2.10832378699979, + "tests/aws/services/cloudformation/test_template_engine.py::TestSecretsManagerParameters::test_resolve_secretsmanager[resolve_secretsmanager_full.yaml]": 2.115484737000088, + "tests/aws/services/cloudformation/test_template_engine.py::TestSecretsManagerParameters::test_resolve_secretsmanager[resolve_secretsmanager_partial.yaml]": 2.1134552200001053, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_create_change_set_with_ssm_parameter_list": 2.165984060000028, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_create_stack_with_ssm_parameters": 2.1709209520001878, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_resolve_ssm": 2.124466400999836, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_resolve_ssm_secure": 2.1261801399998603, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_resolve_ssm_with_version": 2.157614990999946, + "tests/aws/services/cloudformation/test_template_engine.py::TestSsmParameters::test_ssm_nested_with_nested_stack": 6.244212256000083, + "tests/aws/services/cloudformation/test_template_engine.py::TestStackEvents::test_invalid_stack_deploy": 2.3772469789998922, + "tests/aws/services/cloudformation/test_template_engine.py::TestTypes::test_implicit_type_conversion": 2.1600746910000908, + "tests/aws/services/cloudformation/test_unsupported.py::test_unsupported": 2.0943378629999643, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_acm.py::test_cfn_acm_certificate": 2.10233101599988, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::TestServerlessApigwLambda::test_serverless_like_deployment_with_update": 0.00200961899986396, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_account": 0.001725080000142043, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_api_gateway_with_policy_as_dict": 0.0017991069998970488, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_apigateway_aws_integration": 0.0018477080000138812, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_apigateway_rest_api": 0.0018644489998678182, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_apigateway_swagger_import": 0.00184930099999292, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_deploy_apigateway_from_s3_swagger": 0.0019742039996799576, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_deploy_apigateway_integration": 0.0017334949998257798, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_deploy_apigateway_models": 0.0017409689996839006, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_cfn_with_apigateway_resources": 0.0018320580002182396, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_rest_api_serverless_ref_resolving": 0.00182958300001701, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_update_apigateway_stage": 0.001749664000044504, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_update_usage_plan": 0.0017710150000311842, + "tests/aws/services/cloudformation/v2/ported_from_v1/resources/test_apigateway.py::test_url_output": 0.0017774869997992937, + "tests/aws/services/cloudformation/v2/test_change_set_conditions.py::TestChangeSetConditions::test_condition_add_new_negative_condition_to_existent_resource": 0.0018329499998799292, + "tests/aws/services/cloudformation/v2/test_change_set_conditions.py::TestChangeSetConditions::test_condition_add_new_positive_condition_to_existent_resource": 0.001876010999922073, + "tests/aws/services/cloudformation/v2/test_change_set_conditions.py::TestChangeSetConditions::test_condition_update_adds_resource": 0.001869437999857837, + "tests/aws/services/cloudformation/v2/test_change_set_conditions.py::TestChangeSetConditions::test_condition_update_removes_resource": 0.0018861799999285722, + "tests/aws/services/cloudformation/v2/test_change_set_depends_on.py::TestChangeSetDependsOn::test_multiple_dependencies_addition": 0.0018484389997865946, + "tests/aws/services/cloudformation/v2/test_change_set_depends_on.py::TestChangeSetDependsOn::test_multiple_dependencies_deletion": 0.001761878000024808, + "tests/aws/services/cloudformation/v2/test_change_set_depends_on.py::TestChangeSetDependsOn::test_update_depended_resource": 0.001873685999726149, + "tests/aws/services/cloudformation/v2/test_change_set_depends_on.py::TestChangeSetDependsOn::test_update_depended_resource_list": 0.0019989200000054552, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_direct_attribute_value_change": 0.0018331700000544515, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_direct_attribute_value_change_in_get_attr_chain": 0.0017451159997108334, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_direct_attribute_value_change_with_dependent_addition": 0.0019097939998573565, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_immutable_property_update_causes_resource_replacement": 0.0017883470000015222, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_resource_addition": 0.0018732769999587617, + "tests/aws/services/cloudformation/v2/test_change_set_fn_get_attr.py::TestChangeSetFnGetAttr::test_resource_deletion": 0.0018501220001780894, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_indirect_update_refence_argument": 0.0017313309999735793, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_update_refence_argument": 0.0018505240002468781, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_update_string_literal_argument": 0.0018277520000538061, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_update_string_literal_arguments_empty": 0.0016952229998423718, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_update_string_literal_delimiter": 0.0018080150002788287, + "tests/aws/services/cloudformation/v2/test_change_set_fn_join.py::TestChangeSetFnJoin::test_update_string_literal_delimiter_empty": 0.0017082580000078451, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_addition_with_resource": 0.0016650679999656859, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_deletion_with_resource_remap": 0.0017001220001020556, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_key_addition_with_resource": 0.0017040289999386005, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_key_deletion_with_resource_remap": 0.0018867299997964437, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_key_update": 0.0017021860001023015, + "tests/aws/services/cloudformation/v2/test_change_set_mappings.py::TestChangeSetMappings::test_mapping_leaf_update": 0.001736801000106425, + "tests/aws/services/cloudformation/v2/test_change_set_parameters.py::TestChangeSetParameters::test_update_parameter_default_value": 0.0017071849999865663, + "tests/aws/services/cloudformation/v2/test_change_set_parameters.py::TestChangeSetParameters::test_update_parameter_default_value_with_dynamic_overrides": 0.0017946590000974538, + "tests/aws/services/cloudformation/v2/test_change_set_parameters.py::TestChangeSetParameters::test_update_parameter_with_added_default_value": 0.0016956030001438194, + "tests/aws/services/cloudformation/v2/test_change_set_parameters.py::TestChangeSetParameters::test_update_parameter_with_removed_default_value": 0.0018518859997129766, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_direct_attribute_value_change": 0.0022207030001482053, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_direct_attribute_value_change_in_ref_chain": 0.0018306270003449754, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_direct_attribute_value_change_with_dependent_addition": 0.001839482999912434, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_immutable_property_update_causes_resource_replacement": 0.001823271000148452, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_resource_addition": 0.001844641999923624, + "tests/aws/services/cloudformation/v2/test_change_set_ref.py::TestChangeSetRef::test_supported_pseudo_parameter": 0.0018045269998765434, + "tests/aws/services/cloudformation/v2/test_change_set_values.py::TestChangeSetValues::test_property_empy_list": 0.0018414460000713007, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_dynamic]": 0.0017058729999916977, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_parameter_for_condition_create_resource]": 0.0017133169999397069, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_unrelated_property]": 0.0017007940000439703, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_base_dynamic_parameter_scenarios[change_unrelated_property_not_create_only]": 0.0017330740001852973, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_base_mapping_scenarios[update_string_referencing_resource]": 0.0016935700000431098, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_conditions": 0.0017063939999388822, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_direct_update": 0.0018126229999779753, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_dynamic_update": 0.001887060999933965, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_execute_with_ref": 0.0017103210000186664, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_mappings_with_parameter_lookup": 0.0017087489998175442, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_mappings_with_static_fields": 0.0017003929999646061, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_parameter_changes": 0.001718406000009054, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_unrelated_changes_requires_replacement": 0.0018811210002240841, + "tests/aws/services/cloudformation/v2/test_change_sets.py::TestCaptureUpdateProcess::test_unrelated_changes_update_propagation": 0.0017523310000342462, + "tests/aws/services/cloudformation/v2/test_change_sets.py::test_single_resource_static_update": 0.0018459540001458663, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_alarm_lambda_target": 1.6562972769997941, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_anomaly_detector_lifecycle": 0.0017303390000051877, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_aws_sqs_metrics_created": 2.3561451519999537, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_breaching_alarm_actions": 5.315173837999964, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_create_metric_stream": 0.0017867550000119081, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_dashboard_lifecycle": 0.13976651100028903, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_default_ordering": 0.11865985300005377, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_delete_alarm": 0.08834847999992235, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_describe_alarms_converts_date_format_correctly": 0.07610015199998088, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_describe_minimal_metric_alarm": 0.07895568599974467, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_enable_disable_alarm_actions": 10.252386452999872, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data": 2.0664516419999472, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_different_units_no_unit_in_query[metric_data0]": 0.001755468000055771, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_different_units_no_unit_in_query[metric_data1]": 0.0018299750001915527, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_different_units_no_unit_in_query[metric_data2]": 0.0017313620001004892, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_for_multiple_metrics": 1.0501973240000098, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_pagination": 2.1895978130000913, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_stats[Average]": 0.03697230799980389, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_stats[Maximum]": 0.03341490999991947, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_stats[Minimum]": 0.03451932399980251, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_stats[SampleCount]": 0.03471026800002619, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_stats[Sum]": 0.03285757200023909, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_with_different_units": 0.025698459999830447, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_with_dimensions": 0.04028229799996552, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_data_with_zero_and_labels": 0.036694415000056324, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_statistics": 0.174754799999846, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_with_no_results": 0.055523277000020244, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_get_metric_with_null_dimensions": 0.029771298000014212, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_handle_different_units": 0.028799445999993623, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_insight_rule": 0.0017249300001367374, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_invalid_amount_of_datapoints": 0.5659814650002772, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_invalid_dashboard_name": 0.01653278699996008, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs0]": 0.03048673700004656, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs1]": 0.030162031999907413, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs2]": 0.030369012999699407, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs3]": 0.03048409799998808, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs4]": 0.03251482299992858, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs5]": 0.029859354000109306, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_label_generation[input_pairs6]": 0.035607351999942694, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_list_metrics_pagination": 5.071531530999891, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_list_metrics_uniqueness": 2.0565704170001027, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_list_metrics_with_filters": 4.076339343999962, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_metric_widget": 0.001732204000290949, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_multiple_dimensions": 2.1120178590001615, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_multiple_dimensions_statistics": 0.05055155900004138, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_parallel_put_metric_data_list_metrics": 0.2461613790001138, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_composite_alarm_describe_alarms": 0.08745154299958813, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_alarm": 10.625812600000017, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_alarm_escape_character": 0.06964230500011581, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_data_gzip": 0.023371349999933955, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_data_validation": 0.04073372800007746, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_data_values_list": 0.033487205999790604, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_put_metric_uses_utc": 0.031193712999993295, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_raw_metric_data": 0.023490482999932283, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_set_alarm": 2.3453569420000804, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_set_alarm_invalid_input": 0.08128622100002758, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_store_tags": 0.11573074500006442, + "tests/aws/services/cloudwatch/test_cloudwatch.py::TestCloudwatch::test_trigger_composite_alarm": 4.6893139920002795, + "tests/aws/services/cloudwatch/test_cloudwatch_metrics.py::TestCloudWatchLambdaMetrics::test_lambda_invoke_error": 2.5403738699999394, + "tests/aws/services/cloudwatch/test_cloudwatch_metrics.py::TestCloudWatchLambdaMetrics::test_lambda_invoke_successful": 2.5082642019997365, + "tests/aws/services/cloudwatch/test_cloudwatch_metrics.py::TestSQSMetrics::test_alarm_number_of_messages_sent": 61.247869282000465, + "tests/aws/services/cloudwatch/test_cloudwatch_metrics.py::TestSqsApproximateMetrics::test_sqs_approximate_metrics": 51.88358154000002, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_batch_write_binary": 0.09177013299949976, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_batch_write_items": 0.09424574400009078, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_batch_write_items_streaming": 1.1564454270001079, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_batch_write_not_existing_table": 0.21025506300020425, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_batch_write_not_matching_schema": 0.10724185499975647, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_binary_data_with_stream": 0.761922931999834, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_continuous_backup_update": 0.28909025299981295, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_create_duplicate_table": 0.09464845999991667, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_data_encoding_consistency": 0.9118225570005052, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_delete_table": 0.10892664799985141, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_batch_execute_statement": 0.1304326510003193, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_create_table_with_class": 0.1566640109999753, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_create_table_with_partial_sse_specification": 0.11557827299975543, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_create_table_with_sse_specification": 0.06654137499981516, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_execute_statement_empy_parameter": 0.1046366560003662, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_execute_transaction": 0.1720096419994661, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_get_batch_items": 0.07983857100043679, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_idempotent_writing": 0.1361674580007275, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_partiql_missing": 0.11271423600055641, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_pay_per_request": 0.039187814999877446, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_stream_records_with_update_item": 0.0019804359999398002, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_stream_shard_iterator": 0.8397359150003467, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_stream_stream_view_type": 1.3205458950005777, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_streams_describe_with_exclusive_start_shard_id": 0.7749845120001737, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_streams_shard_iterator_format": 2.866550348000146, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_update_table_without_sse_specification_change": 0.10528765599974577, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_dynamodb_with_kinesis_stream": 1.4520149569998466, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_empty_and_binary_values": 0.07746314900032303, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_global_tables": 0.10020428899997569, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_global_tables_version_2019": 0.4411483870003394, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_gsi_with_billing_mode[PAY_PER_REQUEST]": 0.3438232949997655, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_gsi_with_billing_mode[PROVISIONED]": 0.32098216400027013, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_invalid_query_index": 0.06514302199957456, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_large_data_download": 0.35469851700008803, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_list_tags_of_resource": 0.0804153889994268, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_more_than_20_global_secondary_indexes": 0.18373203600003762, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_multiple_update_expressions": 0.1347740079995674, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_non_ascii_chars": 0.13263165700027457, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_nosql_workbench_localhost_region": 0.07289402399965184, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_query_on_deleted_resource": 0.13519505499971274, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_return_values_in_put_item": 0.11949022500039064, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_return_values_on_conditions_check_failure": 0.2235762930004057, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_stream_destination_records": 11.875390118999803, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_streams_on_global_tables": 1.2194378479998704, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_time_to_live": 0.23857906800049022, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_time_to_live_deletion": 0.41164486099978603, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transact_get_items": 0.10089816400022755, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transact_write_items_streaming": 1.2731577180006752, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transact_write_items_streaming_for_different_tables": 1.1924018999998225, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transaction_write_binary_data": 0.08746691499982262, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transaction_write_canceled": 0.10035997100021632, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_transaction_write_items": 0.10124066400021547, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_valid_local_secondary_index": 0.12044929400008186, + "tests/aws/services/dynamodb/test_dynamodb.py::TestDynamoDB::test_valid_query_index": 0.07351502400024401, + "tests/aws/services/dynamodbstreams/test_dynamodb_streams.py::TestDynamoDBStreams::test_enable_kinesis_streaming_destination": 0.0021923620006418787, + "tests/aws/services/dynamodbstreams/test_dynamodb_streams.py::TestDynamoDBStreams::test_non_existent_stream": 0.02157774499937659, + "tests/aws/services/dynamodbstreams/test_dynamodb_streams.py::TestDynamoDBStreams::test_stream_spec_and_region_replacement": 2.284901014999832, + "tests/aws/services/dynamodbstreams/test_dynamodb_streams.py::TestDynamoDBStreams::test_table_v2_stream": 1.5296037000002798, + "tests/aws/services/ec2/test_ec2.py::TestEc2FlowLogs::test_ec2_flow_logs_s3": 0.6261745950000659, + "tests/aws/services/ec2/test_ec2.py::TestEc2FlowLogs::test_ec2_flow_logs_s3_validation": 0.20289563699998325, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_route_table_association": 1.4424575839998397, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_security_group_with_custom_id[False-id_manager]": 0.0643671389998417, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_security_group_with_custom_id[False-tag]": 0.06609410300006857, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_security_group_with_custom_id[True-id_manager]": 0.054568017999827134, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_security_group_with_custom_id[True-tag]": 0.056784187000175734, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_subnet_with_custom_id": 0.055776436000087415, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_subnet_with_custom_id_and_vpc_id": 0.05558282899983169, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_subnet_with_tags": 0.046136478000335046, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_vpc_endpoint": 0.12393221499996798, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_create_vpc_with_custom_id": 0.04365278700061026, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_describe_vpc_endpoints_with_filter": 0.4085332259992356, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_describe_vpn_gateways_filter_by_vpc": 0.45407857400005014, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_get_security_groups_for_vpc": 0.32327668900006756, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_modify_launch_template[id]": 0.06806467800015525, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_modify_launch_template[name]": 0.052498016000299685, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_reserved_instance_api": 0.03173591499989925, + "tests/aws/services/ec2/test_ec2.py::TestEc2Integrations::test_vcp_peering_difference_regions": 1.1522469299998193, + "tests/aws/services/ec2/test_ec2.py::test_create_specific_vpc_id": 0.027142632000959566, + "tests/aws/services/ec2/test_ec2.py::test_describe_availability_zones_filter_with_zone_ids": 0.3183840209999289, + "tests/aws/services/ec2/test_ec2.py::test_describe_availability_zones_filter_with_zone_names": 0.31069076099993254, + "tests/aws/services/ec2/test_ec2.py::test_describe_availability_zones_filters": 0.3105533560005824, + "tests/aws/services/ec2/test_ec2.py::test_pickle_ec2_backend": 6.753004867000072, + "tests/aws/services/ec2/test_ec2.py::test_raise_create_volume_without_size": 0.01854568399994605, + "tests/aws/services/ec2/test_ec2.py::test_raise_duplicate_launch_template_name": 0.036855320000540814, + "tests/aws/services/ec2/test_ec2.py::test_raise_invalid_launch_template_name": 0.012870111000211182, + "tests/aws/services/ec2/test_ec2.py::test_raise_modify_to_invalid_default_version": 0.03441840100049376, + "tests/aws/services/ec2/test_ec2.py::test_raise_when_launch_template_data_missing": 0.014335335999930976, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_create_domain": 0.0017180360000565997, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_create_existing_domain_causes_exception": 0.0017167949999929988, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_describe_domains": 0.001966601000276569, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_domain_version": 0.0017161330001727038, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_get_compatible_version_for_domain": 0.001792185000340396, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_get_compatible_versions": 0.001757519999955548, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_list_versions": 0.0018160890003855457, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_path_endpoint_strategy": 0.003560866000043461, + "tests/aws/services/es/test_es.py::TestElasticsearchProvider::test_update_domain_config": 0.0018106989996340417, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeApiDestinations::test_api_destinations[auth0]": 0.10497536200000468, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeApiDestinations::test_api_destinations[auth1]": 0.09172505000015008, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeApiDestinations::test_api_destinations[auth2]": 0.09275838500025202, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeApiDestinations::test_create_api_destination_invalid_parameters": 0.014739471000211779, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeApiDestinations::test_create_api_destination_name_validation": 0.044317429999864544, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_connection_secrets[api-key]": 0.05362064100063435, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_connection_secrets[basic]": 0.05430716399996527, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_connection_secrets[oauth]": 0.054552220000005036, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection": 0.04780620800011093, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection_invalid_parameters": 0.014339193000523665, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection_name_validation": 0.014927361999980349, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection_with_auth[auth_params0]": 0.04642291699974521, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection_with_auth[auth_params1]": 0.04870649999975285, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_create_connection_with_auth[auth_params2]": 0.047930428000199754, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_delete_connection": 0.08697680500017668, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_list_connections": 0.04682411500016315, + "tests/aws/services/events/test_api_destinations_and_connection.py::TestEventBridgeConnections::test_update_connection": 0.08871798900008798, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_create_archive_error_duplicate[custom]": 0.08656890800057226, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_create_archive_error_duplicate[default]": 0.06000677500014717, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_create_archive_error_unknown_event_bus": 0.014690129999962664, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_create_list_describe_update_delete_archive[custom]": 0.11999544499985859, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_create_list_describe_update_delete_archive[default]": 0.09399019500006034, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_delete_archive_error_unknown_archive": 0.015460305999567936, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_describe_archive_error_unknown_archive": 0.013643455000419635, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_error_unknown_source_arn": 0.013840231999893149, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_state_enabled[custom]": 0.08754905700016025, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_state_enabled[default]": 0.06098668899994664, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_events[False-custom]": 0.5490299370003413, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_events[False-default]": 0.5190136659998643, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_events[True-custom]": 0.5465816740002083, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_events[True-default]": 0.5391682560002664, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_name_prefix[custom]": 0.10474025299981804, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_name_prefix[default]": 0.07315604899986283, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_source_arn[custom]": 0.08801144299968655, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_list_archive_with_source_arn[default]": 0.05940677099988534, + "tests/aws/services/events/test_archive_and_replay.py::TestArchive::test_update_archive_error_unknown_archive": 0.001770274000136851, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_describe_replay_error_unknown_replay": 0.014375452999502158, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_list_replay_with_limit": 0.2105469480006832, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_list_replays_with_event_source_arn": 0.10008236999965447, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_list_replays_with_prefix": 0.1544209079997927, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_list_describe_canceled_replay[custom]": 0.0018586490004963707, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_list_describe_canceled_replay[default]": 0.0018564450006124389, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_duplicate_different_archive": 0.11742461699986961, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_duplicate_name_same_archive": 0.07102074700014782, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_invalid_end_time[0]": 0.06306003099962254, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_invalid_end_time[10]": 0.06310144499911985, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_unknown_archive": 0.014163215000280616, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::test_start_replay_error_unknown_event_bus": 0.09424151799976244, + "tests/aws/services/events/test_archive_and_replay.py::TestReplay::tests_concurrency_error_too_many_active_replays": 0.0018376089997218514, + "tests/aws/services/events/test_events.py::TestEventBus::test_create_list_describe_delete_custom_event_buses[False-regions0]": 0.041058904999772494, + "tests/aws/services/events/test_events.py::TestEventBus::test_create_list_describe_delete_custom_event_buses[False-regions1]": 0.11359104000030129, + "tests/aws/services/events/test_events.py::TestEventBus::test_create_list_describe_delete_custom_event_buses[True-regions0]": 0.0439350619999459, + "tests/aws/services/events/test_events.py::TestEventBus::test_create_list_describe_delete_custom_event_buses[True-regions1]": 0.13147821900020062, + "tests/aws/services/events/test_events.py::TestEventBus::test_create_multiple_event_buses_same_name": 0.042483622999952786, + "tests/aws/services/events/test_events.py::TestEventBus::test_delete_default_event_bus": 0.013493736000327772, + "tests/aws/services/events/test_events.py::TestEventBus::test_describe_delete_not_existing_event_bus": 0.02241585799947643, + "tests/aws/services/events/test_events.py::TestEventBus::test_list_event_buses_with_limit": 0.2337072220002483, + "tests/aws/services/events/test_events.py::TestEventBus::test_list_event_buses_with_prefix": 0.08371832699958759, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_events_bus_to_bus[domain]": 0.4679558650004765, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_events_bus_to_bus[path]": 0.38720547500088287, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_events_bus_to_bus[standard]": 0.42474314300034166, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_events_nonexistent_event_bus": 0.1613012190000518, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_events_to_default_eventbus_for_custom_eventbus": 1.1305221899997377, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_permission[custom]": 0.2807020720001674, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_permission[default]": 0.09424568200029171, + "tests/aws/services/events/test_events.py::TestEventBus::test_put_permission_non_existing_event_bus": 0.014297827999598667, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission[custom]": 0.08592664199977662, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission[default]": 0.06366937599977973, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission_non_existing_sid[False-custom]": 0.0443572630001654, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission_non_existing_sid[False-default]": 0.02382721199955995, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission_non_existing_sid[True-custom]": 0.0559759259999737, + "tests/aws/services/events/test_events.py::TestEventBus::test_remove_permission_non_existing_sid[True-default]": 0.03218266800013225, + "tests/aws/services/events/test_events.py::TestEventPattern::test_put_events_pattern_nested": 10.23310065699934, + "tests/aws/services/events/test_events.py::TestEventPattern::test_put_events_pattern_with_values_in_array": 5.2905671160001475, + "tests/aws/services/events/test_events.py::TestEventRule::test_delete_rule_with_targets": 0.06829100599998128, + "tests/aws/services/events/test_events.py::TestEventRule::test_describe_nonexistent_rule": 0.015559552000013355, + "tests/aws/services/events/test_events.py::TestEventRule::test_disable_re_enable_rule[custom]": 0.08714107599962517, + "tests/aws/services/events/test_events.py::TestEventRule::test_disable_re_enable_rule[default]": 0.05974590999994689, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target[custom]": 0.21872128799986967, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target[default]": 0.16141433800066807, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target_no_matches[custom]": 0.12437036699975579, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target_no_matches[default]": 0.09590354300007675, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target_with_limit[custom]": 0.3012586889999511, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_names_by_target_with_limit[default]": 0.26739966099967205, + "tests/aws/services/events/test_events.py::TestEventRule::test_list_rule_with_limit": 0.21756955800037758, + "tests/aws/services/events/test_events.py::TestEventRule::test_process_pattern_to_single_matching_rules_single_target": 7.361631890999888, + "tests/aws/services/events/test_events.py::TestEventRule::test_process_to_multiple_matching_rules_different_targets": 0.5192189300000791, + "tests/aws/services/events/test_events.py::TestEventRule::test_process_to_multiple_matching_rules_single_target": 4.3265443200002665, + "tests/aws/services/events/test_events.py::TestEventRule::test_process_to_single_matching_rules_single_target": 10.438614465999763, + "tests/aws/services/events/test_events.py::TestEventRule::test_put_list_with_prefix_describe_delete_rule[custom]": 0.0823999090007419, + "tests/aws/services/events/test_events.py::TestEventRule::test_put_list_with_prefix_describe_delete_rule[default]": 0.056042330999844125, + "tests/aws/services/events/test_events.py::TestEventRule::test_put_multiple_rules_with_same_name": 0.07862212099962562, + "tests/aws/services/events/test_events.py::TestEventRule::test_update_rule_with_targets": 0.09162942299963106, + "tests/aws/services/events/test_events.py::TestEventTarget::test_add_exceed_fife_targets_per_rule": 0.09443016599971088, + "tests/aws/services/events/test_events.py::TestEventTarget::test_list_target_by_rule_limit": 0.141175540000404, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_list_remove_target[custom]": 0.11029364500018346, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_list_remove_target[default]": 0.07932587200048147, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_multiple_targets_with_same_arn_across_different_rules": 0.11062044599975707, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_multiple_targets_with_same_arn_single_rule": 0.07958796199955032, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_multiple_targets_with_same_id_across_different_rules": 0.1093770990005396, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_multiple_targets_with_same_id_single_rule": 0.07687321699995664, + "tests/aws/services/events/test_events.py::TestEventTarget::test_put_target_id_validation": 0.08936333799965723, + "tests/aws/services/events/test_events.py::TestEvents::test_create_connection_validations": 0.013928547999967122, + "tests/aws/services/events/test_events.py::TestEvents::test_events_written_to_disk_are_timestamp_prefixed_for_chronological_ordering": 0.0017446070000914915, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_malformed_detail[ARRAY]": 0.01445282499980749, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_malformed_detail[MALFORMED_JSON]": 0.01535081999918475, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_malformed_detail[SERIALIZED_STRING]": 0.014503579999654903, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_malformed_detail[STRING]": 0.015173661000517313, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_with_too_big_detail": 0.0189416639996125, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_without_detail": 0.013359425000089686, + "tests/aws/services/events/test_events.py::TestEvents::test_put_event_without_detail_type": 0.013376565000271512, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_exceed_limit_ten_entries[custom]": 0.0447677940005633, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_exceed_limit_ten_entries[default]": 0.016592216999924858, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_response_entries_order": 0.29709257099966635, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_time": 0.3087999519998448, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_with_target_delivery_failure": 1.1529659890002222, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_with_time_field": 0.19011642400027995, + "tests/aws/services/events/test_events.py::TestEvents::test_put_events_without_source": 0.013693557000351575, + "tests/aws/services/events/test_events_cross_account_region.py::TestEventsCrossAccountRegion::test_put_events[custom-account]": 0.154568842000117, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[custom-account]": 0.5313974360001339, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[custom-region]": 0.5293333190002159, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[custom-region_account]": 0.5935376539991921, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[default-account]": 0.6588710400005766, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[default-region]": 0.5707647809995251, + "tests/aws/services/events/test_events_cross_account_region.py::test_event_bus_to_event_bus_cross_account_region[default-region_account]": 0.5647931890002837, + "tests/aws/services/events/test_events_inputs.py::TestInputPath::test_put_events_with_input_path": 0.19062565200010795, + "tests/aws/services/events/test_events_inputs.py::TestInputPath::test_put_events_with_input_path_max_level_depth": 0.18728926299991144, + "tests/aws/services/events/test_events_inputs.py::TestInputPath::test_put_events_with_input_path_multiple_targets": 0.2904533500004618, + "tests/aws/services/events/test_events_inputs.py::TestInputPath::test_put_events_with_input_path_nested[event_detail0]": 0.18644244000006438, + "tests/aws/services/events/test_events_inputs.py::TestInputPath::test_put_events_with_input_path_nested[event_detail1]": 0.18782627599966872, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[\"<listmulti> multiple list items\"]": 0.23253303399997094, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[\"<listsingle> single list item <listmulti> multiple list items <systemstring> system account id <payload> payload <userId> user id\"]": 0.2891287659995214, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[\"<listsingle> single list item\"]": 0.23068815000033283, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[\"Payload of <payload> with path users-service/users/<userId> and <userId>\"]": 0.23035630200047308, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"id\" : \"<userId>\"}]": 0.23625393800011807, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"id\" : <userId>}]": 0.22930544199971337, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"method\": \"PUT\", \"nested\": {\"level1\": {\"level2\": {\"level3\": \"users-service/users/<userId>\"} } }, \"bod\": \"<userId>\"}]": 0.22854297200001383, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"method\": \"PUT\", \"path\": \"users-service/users/<userId>\", \"bod\": <payload>}]": 0.22698753199983912, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"method\": \"PUT\", \"path\": \"users-service/users/<userId>\", \"bod\": [<userId>, \"hardcoded\"]}]": 0.23164883400022518, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"method\": \"PUT\", \"path\": \"users-service/users/<userId>\", \"id\": <userId>, \"body\": <payload>}]": 0.22737938499994925, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"multi_replacement\": \"users/<userId>/second/<userId>\"}]": 0.262962398000127, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement[{\"singlelistitem\": <listsingle>}]": 0.2644530110001142, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement_not_valid[{\"not_valid\": \"users-service/users/<payload>\", \"bod\": <payload>}]": 5.151510896000218, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement_not_valid[{\"payload\": \"<payload>\"}]": 5.16533248199994, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_nested_keys_replacement_not_valid[{\"singlelistitem\": \"<listsingle>\"}]": 5.160441324000203, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_predefined_variables[\"Message containing all pre defined variables <aws.events.rule-arn> <aws.events.rule-name> <aws.events.event.ingestion-time>\"]": 0.215058503000364, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_input_transformer_predefined_variables[{\"originalEvent\": <aws.events.event>, \"originalEventJson\": <aws.events.event.json>}]": 0.2300798230003238, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_put_events_with_input_transformer_input_template_json": 0.40584139500015226, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_put_events_with_input_transformer_input_template_string[\"Event of <detail-type> type, at time <timestamp>, info extracted from detail <command>\"]": 0.4091627820002941, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_put_events_with_input_transformer_input_template_string[\"{[/Check with special starting characters for event of <detail-type> type\"]": 0.41758063400038736, + "tests/aws/services/events/test_events_inputs.py::TestInputTransformer::test_put_events_with_input_transformer_missing_keys": 0.10544481099987024, + "tests/aws/services/events/test_events_inputs.py::test_put_event_input_path_and_input_transformer": 0.10090126600061922, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_array_event_payload": 0.024163090999962833, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[arrays]": 0.014366328000050999, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[arrays_NEG]": 0.0151099360000444, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[arrays_empty_EXC]": 0.0883018709996577, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[arrays_empty_null_NEG]": 0.014507431000311044, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[boolean]": 0.014003691000652907, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[boolean_NEG]": 0.018200722000074165, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[complex_many_rules]": 0.01509119100001044, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[complex_multi_match]": 0.01660753800024395, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[complex_multi_match_NEG]": 0.01576589399974182, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[complex_or]": 0.014712684000187437, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[complex_or_NEG]": 0.015046812999571557, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase]": 0.014955588000248099, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase_EXC]": 0.09441616100002648, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase_NEG]": 0.014506498999708128, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase_list]": 0.014255311999932019, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase_list_EXC]": 0.09954963700010921, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_ignorecase_list_NEG]": 0.01459531499995137, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_number]": 0.017142863000572106, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_number_NEG]": 0.019750936000036745, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_number_list]": 0.014563537999947584, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_number_list_NEG]": 0.016350050999790255, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_number_zero]": 0.014620442999785155, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_string]": 0.014361530000314815, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_string_NEG]": 0.014473861000169563, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_string_list]": 0.015617373999702977, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_string_list_NEG]": 0.017018526999891037, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_but_string_null]": 0.016268472999854566, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix]": 0.014246215000184748, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_NEG]": 0.015357249999851774, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_empty_EXC]": 0.09162877100015976, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_ignorecase_EXC]": 0.09029513899986341, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_int_EXC]": 0.08941883399938888, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_list]": 0.01632266799970239, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_list_NEG]": 0.014961357999254687, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_prefix_list_type_EXC]": 0.09603154299975358, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix]": 0.01404967700045745, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_NEG]": 0.014552737000030902, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_empty_EXC]": 0.09075959800020428, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_ignorecase_EXC]": 0.09895774400001756, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_int_EXC]": 0.09087590099943554, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_list]": 0.014350929000556789, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_list_NEG]": 0.01438755500021216, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_suffix_list_type_EXC]": 0.08821051700033422, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard]": 0.015964863000590412, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_NEG]": 0.014593642000363616, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_empty]": 0.014877718999741774, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_list]": 0.015403785000216885, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_list_NEG]": 0.014523671000461036, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_list_type_EXC]": 0.08891777799999545, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_anything_wildcard_type_EXC]": 0.1005086389995995, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_exists]": 0.018940203999591176, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_exists_NEG]": 0.014712234999933571, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_exists_false]": 0.014818493000348099, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_exists_false_NEG]": 0.017383874999723048, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase]": 0.014360126000156015, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase_EXC]": 0.0904330060002394, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase_NEG]": 0.014466587000697473, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase_empty]": 0.014270400999976118, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase_empty_NEG]": 0.014511764999951993, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ignorecase_list_EXC]": 0.09074027899987414, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address]": 0.014668252999854303, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_EXC]": 0.09324683600016215, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_NEG]": 0.01463065500047378, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_bad_ip_EXC]": 0.08950988300011886, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_bad_mask_EXC]": 0.09005268600003546, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_type_EXC]": 0.09098421299995607, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_v6]": 0.015589851000186172, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_v6_NEG]": 0.01508403800016822, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_ip_address_v6_bad_ip_EXC]": 0.09354183200048283, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_EXC]": 0.09040531499977078, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_and]": 0.01520829999981288, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_and_NEG]": 0.014678198000183329, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_number_EXC]": 0.08982489799927862, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_operatorcasing_EXC]": 0.08962194199966689, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_numeric_syntax_EXC]": 0.0892361390001497, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix]": 0.0148785190003764, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix_NEG]": 0.014535805000377877, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix_empty]": 0.014453622000019095, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix_ignorecase]": 0.01476574000025721, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix_int_EXC]": 0.08957038599919542, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_prefix_list_EXC]": 0.09717926100074692, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix]": 0.017292940000061208, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_NEG]": 0.015598793999743066, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_empty]": 0.4864429810004367, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_ignorecase]": 0.014630471000145917, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_ignorecase_NEG]": 0.014445436999722006, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_int_EXC]": 0.09193720199982636, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_suffix_list_EXC]": 0.08950159399955737, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_complex_EXC]": 0.09009113799947954, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_empty_NEG]": 0.01447798800018063, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_int_EXC]": 0.089753218000169, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_list_EXC]": 0.08950578599979053, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_nonrepeating]": 0.014594204000331956, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_nonrepeating_NEG]": 0.015809605999947962, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_repeating]": 0.014596408000215888, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_repeating_NEG]": 0.014158491999751277, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_repeating_star_EXC]": 0.08902763400010372, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[content_wildcard_simplified]": 0.014567866000106733, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[dot_joining_event]": 0.01830412699973749, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[dot_joining_event_NEG]": 0.014533519999531563, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[dot_joining_pattern]": 0.014424317999782943, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[dot_joining_pattern_NEG]": 0.014286480000464508, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[dynamodb]": 0.015795414999956847, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[exists_dynamodb]": 0.01439097499996933, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[exists_dynamodb_NEG]": 0.01471696399994471, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[exists_list_empty_NEG]": 0.014547034999395692, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[int_nolist_EXC]": 0.09011327200005326, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[key_case_sensitive_NEG]": 0.014654464999694028, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[list_within_dict]": 0.014604102000248531, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[minimal]": 0.014487335000467283, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[nested_json_NEG]": 0.014577422999082046, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[null_value]": 0.015336198999648332, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[null_value_NEG]": 0.015005141000074218, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[number_comparison_float]": 0.01454752700010431, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[numeric-int-float]": 0.014677869000479404, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[numeric-null_NEG]": 0.01464140200005204, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[numeric-string_NEG]": 0.014529282999774296, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[operator_case_sensitive_EXC]": 1.3203036390000307, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[operator_multiple_list]": 0.014457429000231059, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[or-anything-but]": 0.015076505000251927, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[or-exists-parent]": 0.01650718200016854, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[or-exists]": 0.01472511299971302, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[or-numeric-anything-but]": 0.015244353000070987, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[or-numeric-anything-but_NEG]": 0.01640343799999755, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[prefix]": 0.014789898999424622, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[sample1]": 0.014226488000076642, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[string]": 0.014969643999847904, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[string_empty]": 0.014602770000237797, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern[string_nolist_EXC]": 0.09042796200083103, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern_source": 0.1926675950000174, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern_with_escape_characters": 0.023069314000025543, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_pattern_with_multi_key": 0.5657805140000107, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_event_with_large_and_complex_payload": 0.048353817999981175, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_invalid_event_payload": 0.025385768000035114, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_invalid_json_event_pattern[[\"not\", \"a\", \"dict\", \"but valid json\"]]": 0.09987239899996325, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_invalid_json_event_pattern[this is valid json but not a dict]": 0.13852553500001363, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_invalid_json_event_pattern[{\"not\": closed mark\"]": 0.09962711399995783, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_invalid_json_event_pattern[{'bad': 'quotation'}]": 0.10898782900002857, + "tests/aws/services/events/test_events_patterns.py::TestEventPattern::test_plain_string_payload": 0.02502368000000388, + "tests/aws/services/events/test_events_patterns.py::TestRuleWithPattern::test_put_event_with_content_base_rule_in_pattern": 0.3600304810000239, + "tests/aws/services/events/test_events_patterns.py::TestRuleWithPattern::test_put_events_with_rule_pattern_anything_but": 5.926419674999977, + "tests/aws/services/events/test_events_patterns.py::TestRuleWithPattern::test_put_events_with_rule_pattern_exists_false": 5.399349487999984, + "tests/aws/services/events/test_events_patterns.py::TestRuleWithPattern::test_put_events_with_rule_pattern_exists_true": 5.281080843000012, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::test_schedule_cron_target_sqs": 0.0016724649999844132, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_invalid_schedule_cron[cron(0 1 * * * *)]": 0.013317363000027171, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_invalid_schedule_cron[cron(0 dummy ? * MON-FRI *)]": 0.013041198000024679, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_invalid_schedule_cron[cron(7 20 * * NOT *)]": 0.013944086999998717, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_invalid_schedule_cron[cron(71 8 1 * ? *)]": 0.012949215999981334, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_invalid_schedule_cron[cron(INVALID)]": 0.013403435000014952, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(* * ? * SAT#3 *)]": 0.0367952629999877, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 10 * * ? *)]": 0.03588348100001326, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 12 * * ? *)]": 0.03580996199997344, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 18 ? * MON-FRI *)]": 0.035646597999999585, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 2 ? * SAT *)]": 0.03633224699999005, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 2 ? * SAT#3 *)]": 0.03661005799997952, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0 8 1 * ? *)]": 0.03553979699998422, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/10 * ? * MON-FRI *)]": 0.0356632180000247, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/15 * * * ? *)]": 0.03553851499998473, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/30 0-2 ? * MON-FRI *)]": 0.03616909199999441, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/30 20-23 ? * MON-FRI *)]": 0.03696543100002714, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/5 5 ? JAN 1-5 2022)]": 0.03598665300003745, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(0/5 8-17 ? * MON-FRI *)]": 0.03921749900001714, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(15 10 ? * 6L 2002-2005)]": 0.036535440000022845, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(15 12 * * ? *)]": 0.03544156399999565, + "tests/aws/services/events/test_events_schedule.py::TestScheduleCron::tests_put_rule_with_schedule_cron[cron(5,35 14 * * ? *)]": 0.035832505999962905, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[ rate(10 minutes)]": 0.021905833999994684, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate( 10 minutes )]": 0.018352662000012288, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate()]": 0.018937344999983452, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(-10 minutes)]": 0.019241194000017003, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(0 minutes)]": 0.02163773499998456, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(1 days)]": 0.020529962000011892, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(1 hours)]": 0.01793091400000435, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(1 minutes)]": 0.019627125000027945, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 MINUTES)]": 0.042094459999987066, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 day)]": 0.016751596000005975, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 hour)]": 0.020734223999994583, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 minute)]": 0.036397849999985965, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 minutess)]": 0.021380859999965196, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 seconds)]": 0.018660197999963657, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10 years)]": 0.015076919000023281, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(10)]": 0.018818713000001708, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_invalid_schedule_rate[rate(foo minutes)]": 0.0330008980000116, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_put_rule_with_schedule_rate": 0.06742087999998603, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::test_scheduled_rule_logs": 0.0016907489999766767, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::tests_put_rule_with_schedule_custom_event_bus": 0.07864557599998534, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::tests_schedule_rate_custom_input_target_sqs": 60.200367938, + "tests/aws/services/events/test_events_schedule.py::TestScheduleRate::tests_schedule_rate_target_sqs": 0.0028407709999953568, + "tests/aws/services/events/test_events_tags.py::TestEventBusTags::test_create_event_bus_with_tags": 0.04281578200001945, + "tests/aws/services/events/test_events_tags.py::TestEventBusTags::test_list_tags_for_deleted_event_bus": 0.035389196000011225, + "tests/aws/services/events/test_events_tags.py::TestRuleTags::test_list_tags_for_deleted_rule": 0.06261820700001408, + "tests/aws/services/events/test_events_tags.py::TestRuleTags::test_put_rule_with_tags": 0.0650481660000537, + "tests/aws/services/events/test_events_tags.py::test_recreate_tagged_resource_without_tags[event_bus-event_bus_custom]": 0.06747288599999024, + "tests/aws/services/events/test_events_tags.py::test_recreate_tagged_resource_without_tags[event_bus-event_bus_default]": 0.019651499000019612, + "tests/aws/services/events/test_events_tags.py::test_recreate_tagged_resource_without_tags[rule-event_bus_custom]": 0.09323643200002607, + "tests/aws/services/events/test_events_tags.py::test_recreate_tagged_resource_without_tags[rule-event_bus_default]": 0.06048868499999571, + "tests/aws/services/events/test_events_tags.py::tests_tag_list_untag_not_existing_resource[not_existing_event_bus]": 0.027392001999970716, + "tests/aws/services/events/test_events_tags.py::tests_tag_list_untag_not_existing_resource[not_existing_rule]": 0.028400630999982468, + "tests/aws/services/events/test_events_tags.py::tests_tag_untag_resource[event_bus-event_bus_custom]": 0.07042766700001835, + "tests/aws/services/events/test_events_tags.py::tests_tag_untag_resource[event_bus-event_bus_default]": 0.04449530999997364, + "tests/aws/services/events/test_events_tags.py::tests_tag_untag_resource[rule-event_bus_custom]": 0.09103599900001313, + "tests/aws/services/events/test_events_tags.py::tests_tag_untag_resource[rule-event_bus_default]": 0.06379281299999207, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetApiDestination::test_put_events_to_target_api_destinations[auth0]": 0.21254704500000798, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetApiDestination::test_put_events_to_target_api_destinations[auth1]": 0.10834632899999974, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetApiDestination::test_put_events_to_target_api_destinations[auth2]": 0.11475955200000953, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetApiGateway::test_put_events_with_target_api_gateway": 24.167259080000008, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetCloudWatchLogs::test_put_events_with_target_cloudwatch_logs": 0.20370585999998525, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetEvents::test_put_events_with_target_events[bus_combination0]": 0.320737658999974, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetEvents::test_put_events_with_target_events[bus_combination1]": 0.34837638999999854, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetEvents::test_put_events_with_target_events[bus_combination2]": 0.3235109530000102, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetFirehose::test_put_events_with_target_firehose": 1.0781683589999602, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetKinesis::test_put_events_with_target_kinesis": 2.3475366209999606, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetLambda::test_put_events_with_target_lambda": 4.228319082000013, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetLambda::test_put_events_with_target_lambda_list_entries_partial_match": 4.316509587000041, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetLambda::test_put_events_with_target_lambda_list_entry": 4.274450865000006, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetSns::test_put_events_with_target_sns[domain]": 0.2224870139999382, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetSns::test_put_events_with_target_sns[path]": 0.23126015200000438, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetSns::test_put_events_with_target_sns[standard]": 0.5043940230000317, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetSqs::test_put_events_with_target_sqs": 0.17410735500001806, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetSqs::test_put_events_with_target_sqs_event_detail_match": 5.219334032000006, + "tests/aws/services/events/test_events_targets.py::TestEventsTargetStepFunctions::test_put_events_with_target_statefunction_machine": 4.301268860999983, + "tests/aws/services/events/test_x_ray_trace_propagation.py::test_xray_trace_propagation_events_api_gateway": 5.782144335999988, + "tests/aws/services/events/test_x_ray_trace_propagation.py::test_xray_trace_propagation_events_events[bus_combination0]": 4.432974792000039, + "tests/aws/services/events/test_x_ray_trace_propagation.py::test_xray_trace_propagation_events_events[bus_combination1]": 4.465967387999967, + "tests/aws/services/events/test_x_ray_trace_propagation.py::test_xray_trace_propagation_events_events[bus_combination2]": 4.429581522000035, + "tests/aws/services/events/test_x_ray_trace_propagation.py::test_xray_trace_propagation_events_lambda": 4.245061544000009, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_elasticsearch_s3_backup": 0.001859577000061563, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_kinesis_as_source": 37.31791386800006, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_kinesis_as_source_multiple_delivery_streams": 68.78578922000003, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_opensearch_s3_backup[domain]": 0.0017180130000156169, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_opensearch_s3_backup[path]": 0.0017173510000247916, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_opensearch_s3_backup[port]": 0.0017864300000383082, + "tests/aws/services/firehose/test_firehose.py::TestFirehoseIntegration::test_kinesis_firehose_s3_as_destination_with_file_extension": 1.2255300639999973, + "tests/aws/services/firehose/test_firehose.py::test_kinesis_firehose_http[False]": 0.07193922099992278, + "tests/aws/services/firehose/test_firehose.py::test_kinesis_firehose_http[True]": 1.5787196120001, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_create_role_with_malformed_assume_role_policy_document": 0.01908091799992917, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_create_user_add_permission_boundary_afterwards": 0.1091912630000138, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_create_user_with_permission_boundary": 0.1019029330000194, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_get_user_without_username_as_role": 0.16335394100002532, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_get_user_without_username_as_root": 0.04053984199998695, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_get_user_without_username_as_user": 0.22668798600005857, + "tests/aws/services/iam/test_iam.py::TestIAMExtensions::test_role_with_path_lifecycle": 0.13767578900007038, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_attach_detach_role_policy": 0.13019357500002116, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_attach_iam_role_to_new_iam_user": 0.09810707999992019, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_create_describe_role": 0.15578965299999936, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_create_role_with_assume_role_policy": 0.26821167099996046, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_create_user_with_tags": 0.030298220000020137, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_delete_non_existent_policy_returns_no_such_entity": 0.014730707999945025, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_instance_profile_tags": 0.1795794550000096, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_list_roles_with_permission_boundary": 0.14082706799996458, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_recreate_iam_role": 0.10532210399998121, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_role_attach_policy": 0.3854157429999532, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_service_linked_role_name_should_match_aws[ecs.amazonaws.com-AWSServiceRoleForECS]": 0.0018936910000206808, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_service_linked_role_name_should_match_aws[eks.amazonaws.com-AWSServiceRoleForAmazonEKS]": 0.001711490999923626, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_simulate_principle_policy[group]": 0.19148357299997087, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_simulate_principle_policy[role]": 0.2657201519999717, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_simulate_principle_policy[user]": 0.22818100600005664, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_update_assume_role_policy": 0.08320352599997705, + "tests/aws/services/iam/test_iam.py::TestIAMIntegrations::test_user_attach_policy": 0.35281626099998675, + "tests/aws/services/iam/test_iam.py::TestIAMPolicyEncoding::test_put_group_policy_encoding": 0.06450350499994784, + "tests/aws/services/iam/test_iam.py::TestIAMPolicyEncoding::test_put_role_policy_encoding": 0.19202468800006045, + "tests/aws/services/iam/test_iam.py::TestIAMPolicyEncoding::test_put_user_policy_encoding": 0.09203308399997923, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_already_exists": 0.03146810000004052, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_deletion": 8.185213659999988, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[accountdiscovery.ssm.amazonaws.com]": 0.2503151570000455, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[acm.amazonaws.com]": 0.24826324800000066, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[appmesh.amazonaws.com]": 0.25143184699993526, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[autoscaling-plans.amazonaws.com]": 0.25113668599993844, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[autoscaling.amazonaws.com]": 0.24329662100001315, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[backup.amazonaws.com]": 0.24825244400000201, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[batch.amazonaws.com]": 0.2538547659999608, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[cassandra.application-autoscaling.amazonaws.com]": 0.24560335400002486, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[cks.kms.amazonaws.com]": 0.2529298829999789, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[cloudtrail.amazonaws.com]": 0.24672017000000324, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[codestar-notifications.amazonaws.com]": 0.2488984720000076, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[config.amazonaws.com]": 0.2505461739999646, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[connect.amazonaws.com]": 0.24960557499991864, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[dms-fleet-advisor.amazonaws.com]": 0.25124973900000214, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[dms.amazonaws.com]": 0.2500574430000029, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[docdb-elastic.amazonaws.com]": 0.24541355500002737, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ec2-instance-connect.amazonaws.com]": 0.2460029650000024, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ec2.application-autoscaling.amazonaws.com]": 0.2468373069999643, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ecr.amazonaws.com]": 0.24660436199997093, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ecs.amazonaws.com]": 0.25588165099992466, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[eks-connector.amazonaws.com]": 0.26263512799999944, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[eks-fargate.amazonaws.com]": 0.25252039499997636, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[eks-nodegroup.amazonaws.com]": 0.250703996000027, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[eks.amazonaws.com]": 0.2463002430000074, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[elasticache.amazonaws.com]": 0.24749133299997084, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[elasticbeanstalk.amazonaws.com]": 0.24774551900003416, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[elasticfilesystem.amazonaws.com]": 0.24729713599998604, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[elasticloadbalancing.amazonaws.com]": 0.2511354310000229, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[email.cognito-idp.amazonaws.com]": 0.24491904500001738, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[emr-containers.amazonaws.com]": 0.2483637290000047, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[emrwal.amazonaws.com]": 0.2535895820000178, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[fis.amazonaws.com]": 0.24476532300002418, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[grafana.amazonaws.com]": 0.9849745239999947, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[imagebuilder.amazonaws.com]": 0.24767623800005367, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[iotmanagedintegrations.amazonaws.com]": 0.3210199620000367, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[kafka.amazonaws.com]": 0.24990994300003422, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[kafkaconnect.amazonaws.com]": 0.2516197950000105, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[lakeformation.amazonaws.com]": 0.2532271979999905, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[lex.amazonaws.com]": 0.32130032999992864, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[lexv2.amazonaws.com]": 0.24943478900001992, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[lightsail.amazonaws.com]": 0.24866275200002974, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[m2.amazonaws.com]": 0.25177049100000204, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[memorydb.amazonaws.com]": 0.2533998669999846, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[mq.amazonaws.com]": 0.2511052829999585, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[mrk.kms.amazonaws.com]": 0.24900876800001015, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[notifications.amazonaws.com]": 0.2512501570000154, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[observability.aoss.amazonaws.com]": 0.2590940570000271, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[opensearchservice.amazonaws.com]": 0.24787942299991528, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ops.apigateway.amazonaws.com]": 0.25255256299999473, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ops.emr-serverless.amazonaws.com]": 0.2523570970000151, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[opsdatasync.ssm.amazonaws.com]": 0.2511042909999901, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[opsinsights.ssm.amazonaws.com]": 0.2490185259999862, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[pullthroughcache.ecr.amazonaws.com]": 0.2482416710000166, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ram.amazonaws.com]": 0.24693440599997984, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[rds.amazonaws.com]": 0.2588130089999936, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[redshift.amazonaws.com]": 0.2542601660000514, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[replication.cassandra.amazonaws.com]": 0.2545088280000414, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[replication.ecr.amazonaws.com]": 0.25897045600004276, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[repository.sync.codeconnections.amazonaws.com]": 0.27914542299998857, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[resource-explorer-2.amazonaws.com]": 0.2599244529999396, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[rolesanywhere.amazonaws.com]": 0.2523633899999709, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[s3-outposts.amazonaws.com]": 0.25398562499998434, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ses.amazonaws.com]": 0.2605283490000829, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[shield.amazonaws.com]": 0.2504347550000716, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ssm-incidents.amazonaws.com]": 0.2511386610000841, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ssm-quicksetup.amazonaws.com]": 0.25504782199999454, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[ssm.amazonaws.com]": 0.2507565669999394, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[sso.amazonaws.com]": 0.2513790909999898, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[vpcorigin.cloudfront.amazonaws.com]": 0.2491454529999828, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[waf.amazonaws.com]": 0.25117880799990644, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle[wafv2.amazonaws.com]": 0.25279129100005093, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix[autoscaling.amazonaws.com]": 0.09972745700002861, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix[connect.amazonaws.com]": 0.09943000700002358, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix[lexv2.amazonaws.com]": 0.09926826199995276, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[accountdiscovery.ssm.amazonaws.com]": 0.015672055000038654, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[acm.amazonaws.com]": 0.015390606999972078, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[appmesh.amazonaws.com]": 0.015015048000009301, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[autoscaling-plans.amazonaws.com]": 0.014573630999962006, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[backup.amazonaws.com]": 0.015678739000009045, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[batch.amazonaws.com]": 0.015273754000077133, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[cassandra.application-autoscaling.amazonaws.com]": 0.015609188999974322, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[cks.kms.amazonaws.com]": 0.014976123999986157, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[cloudtrail.amazonaws.com]": 0.014931561000025795, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[codestar-notifications.amazonaws.com]": 0.014890413999978591, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[config.amazonaws.com]": 0.015052108000020326, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[dms-fleet-advisor.amazonaws.com]": 0.014964502000054836, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[dms.amazonaws.com]": 0.015224176999993233, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[docdb-elastic.amazonaws.com]": 0.0147535969999808, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ec2-instance-connect.amazonaws.com]": 0.01575034299997924, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ec2.application-autoscaling.amazonaws.com]": 0.01457196999996313, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ecr.amazonaws.com]": 0.015176085999996758, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ecs.amazonaws.com]": 0.01565656799999715, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[eks-connector.amazonaws.com]": 0.014800042999979723, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[eks-fargate.amazonaws.com]": 0.014543751999951837, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[eks-nodegroup.amazonaws.com]": 0.01872053700003562, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[eks.amazonaws.com]": 0.015366405000008854, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[elasticache.amazonaws.com]": 0.014820551999946474, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[elasticbeanstalk.amazonaws.com]": 0.01513644400006342, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[elasticfilesystem.amazonaws.com]": 0.014867078999941441, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[elasticloadbalancing.amazonaws.com]": 0.015028170999983104, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[email.cognito-idp.amazonaws.com]": 0.01489119600006461, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[emr-containers.amazonaws.com]": 0.01463460800005123, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[emrwal.amazonaws.com]": 0.014460800999984258, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[fis.amazonaws.com]": 0.014850252999963232, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[grafana.amazonaws.com]": 0.015608948999954464, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[imagebuilder.amazonaws.com]": 0.01567979899994043, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[iotmanagedintegrations.amazonaws.com]": 0.016285100000004604, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[kafka.amazonaws.com]": 0.016921149999973295, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[kafkaconnect.amazonaws.com]": 0.0147939839999367, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[lakeformation.amazonaws.com]": 0.01619699800005492, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[lex.amazonaws.com]": 0.014613504999999805, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[lightsail.amazonaws.com]": 0.016529978999926698, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[m2.amazonaws.com]": 0.01672951199992667, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[memorydb.amazonaws.com]": 0.015849497000033352, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[mq.amazonaws.com]": 0.01488383899999235, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[mrk.kms.amazonaws.com]": 0.01506386700003759, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[notifications.amazonaws.com]": 0.014825512000015806, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[observability.aoss.amazonaws.com]": 0.015109153000025799, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[opensearchservice.amazonaws.com]": 0.015986723000082748, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ops.apigateway.amazonaws.com]": 0.015186040000003231, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ops.emr-serverless.amazonaws.com]": 0.015306652000049326, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[opsdatasync.ssm.amazonaws.com]": 0.015226611999992201, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[opsinsights.ssm.amazonaws.com]": 0.017403544999979204, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[pullthroughcache.ecr.amazonaws.com]": 0.015247772000009263, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ram.amazonaws.com]": 0.015155189000040536, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[rds.amazonaws.com]": 0.0164425629999414, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[redshift.amazonaws.com]": 0.014593929999989541, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[replication.cassandra.amazonaws.com]": 0.015560126999901058, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[replication.ecr.amazonaws.com]": 0.015098052000041662, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[repository.sync.codeconnections.amazonaws.com]": 0.014964693000024454, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[resource-explorer-2.amazonaws.com]": 0.014934847999938938, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[rolesanywhere.amazonaws.com]": 0.014645407000045907, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[s3-outposts.amazonaws.com]": 0.014840410999966025, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ses.amazonaws.com]": 0.01559427800003732, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[shield.amazonaws.com]": 0.015449418000002879, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ssm-incidents.amazonaws.com]": 0.01478175300002249, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ssm-quicksetup.amazonaws.com]": 0.014619625000023007, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[ssm.amazonaws.com]": 0.015304750999916905, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[sso.amazonaws.com]": 0.015551802000061343, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[vpcorigin.cloudfront.amazonaws.com]": 0.015150870999946164, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[waf.amazonaws.com]": 0.014506033000031948, + "tests/aws/services/iam/test_iam.py::TestIAMServiceRoles::test_service_role_lifecycle_custom_suffix_not_allowed[wafv2.amazonaws.com]": 0.014900775999990401, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_create_service_specific_credential_invalid_service": 0.07945431500002087, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_create_service_specific_credential_invalid_user": 0.0240474879998942, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_delete_user_after_service_credential_created": 0.07808046999997487, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_id_match_user_mismatch": 0.09510834000002433, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_invalid_update_parameters": 0.08102366799994343, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_list_service_specific_credential_different_service": 0.07782901699994227, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_service_specific_credential_lifecycle[cassandra.amazonaws.com]": 0.10360224399994422, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_service_specific_credential_lifecycle[codecommit.amazonaws.com]": 0.11067704699996739, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_user_match_id_mismatch[satisfiesregexbutstillinvalid]": 0.09622222500001953, + "tests/aws/services/iam/test_iam.py::TestIAMServiceSpecificCredentials::test_user_match_id_mismatch[totally-wrong-credential-id-with-hyphens]": 0.09377804599989759, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_add_tags_to_stream": 0.6752841809999381, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_cbor_blob_handling": 0.6632380830000102, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_create_stream_without_shard_count": 0.6681083039999862, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_create_stream_without_stream_name_raises": 0.03939180400004716, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_get_records": 0.7486135160000345, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_get_records_empty_stream": 0.660961430000043, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_get_records_next_shard_iterator": 0.6695270460000415, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_get_records_shard_iterator_with_surrounding_quotes": 0.6669586800000502, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_record_lifecycle_data_integrity": 0.8726582599999801, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_stream_consumers": 1.3228941919999784, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard": 4.5397800560000405, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard_cbor_at_timestamp": 4.345565760999989, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard_timeout": 6.304834170999982, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard_with_at_timestamp": 4.517233273000045, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard_with_at_timestamp_cbor": 0.6462730840000859, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesis::test_subscribe_to_shard_with_sequence_number_as_iterator": 4.583152622, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisJavaSDK::test_subscribe_to_shard_with_java_sdk_v2_lambda": 9.602098788999967, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_add_tags_to_stream": 0.6603676790000463, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_cbor_blob_handling": 0.6641956660000119, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_create_stream_without_shard_count": 0.6537117530000387, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_create_stream_without_stream_name_raises": 0.04420862000000625, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_get_records": 0.7233153559999437, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_get_records_empty_stream": 0.6639757489999738, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_get_records_next_shard_iterator": 0.6733806220000247, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_get_records_shard_iterator_with_surrounding_quotes": 0.671002165999937, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_record_lifecycle_data_integrity": 0.9045747150000238, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_stream_consumers": 1.2881085079999366, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard": 4.464565977999996, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard_cbor_at_timestamp": 1.3078095669999925, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard_timeout": 6.3155584989999625, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard_with_at_timestamp": 4.468366357999969, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard_with_at_timestamp_cbor": 0.6353966270000342, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisMockScala::test_subscribe_to_shard_with_sequence_number_as_iterator": 4.471479870000053, + "tests/aws/services/kinesis/test_kinesis.py::TestKinesisPythonClient::test_run_kcl": 21.198555870000007, + "tests/aws/services/kms/test_kms.py::TestKMS::test_all_types_of_key_id_can_be_used_for_encryption": 0.06882434000010562, + "tests/aws/services/kms/test_kms.py::TestKMS::test_cant_delete_deleted_key": 0.033398734999991575, + "tests/aws/services/kms/test_kms.py::TestKMS::test_cant_use_disabled_or_deleted_keys": 0.0532975119999719, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_alias": 0.21628013199995166, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_custom_key_asymmetric": 0.03718806799986396, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_grant_with_invalid_key": 0.0234872800000403, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_grant_with_same_name_two_keys": 0.06173549199979789, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_grant_with_valid_key": 0.04165101300020524, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key": 0.13529521300006309, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_custom_id": 0.02653041599990047, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_custom_key_material_hmac": 0.03497891800009256, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_custom_key_material_symmetric_decrypt": 0.02917435300003035, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_with_invalid_tag_key[lowercase_prefix]": 0.08731881000005615, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_with_invalid_tag_key[too_long_key]": 0.08721343999991404, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_with_invalid_tag_key[uppercase_prefix]": 0.08816220300002442, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_with_tag_and_untag": 0.15710906999981944, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_key_with_too_many_tags_raises_error": 0.09089254000002711, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_list_delete_alias": 0.06014152600005218, + "tests/aws/services/kms/test_kms.py::TestKMS::test_create_multi_region_key": 0.1729672330000085, + "tests/aws/services/kms/test_kms.py::TestKMS::test_derive_shared_secret": 0.20512131800012412, + "tests/aws/services/kms/test_kms.py::TestKMS::test_describe_and_list_sign_key": 0.04224582299991653, + "tests/aws/services/kms/test_kms.py::TestKMS::test_disable_and_enable_key": 0.055155246000026636, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_decrypt[RSA_2048-RSAES_OAEP_SHA_256]": 0.10684238599992568, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_decrypt[SYMMETRIC_DEFAULT-SYMMETRIC_DEFAULT]": 0.03326282900013666, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_decrypt_encryption_context": 0.192759780000074, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_2048-RSAES_OAEP_SHA_1]": 0.18369019700014633, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_2048-RSAES_OAEP_SHA_256]": 0.13942732999998952, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_3072-RSAES_OAEP_SHA_1]": 0.14611602899992704, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_3072-RSAES_OAEP_SHA_256]": 0.2567056150001008, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_4096-RSAES_OAEP_SHA_1]": 0.3409618790000195, + "tests/aws/services/kms/test_kms.py::TestKMS::test_encrypt_validate_plaintext_size_per_key_type[RSA_4096-RSAES_OAEP_SHA_256]": 0.2962016170000652, + "tests/aws/services/kms/test_kms.py::TestKMS::test_error_messaging_for_invalid_keys": 0.1956505540000535, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_and_verify_mac[HMAC_224-HMAC_SHA_224]": 0.12264651800001047, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_and_verify_mac[HMAC_256-HMAC_SHA_256]": 0.12527270099997168, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_and_verify_mac[HMAC_384-HMAC_SHA_384]": 0.12465172199983954, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_and_verify_mac[HMAC_512-HMAC_SHA_512]": 0.12703361599994878, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random[1024]": 0.08568717500008916, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random[12]": 0.0868620529998907, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random[1]": 0.08640832499986573, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random[44]": 0.08461749299988242, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random[91]": 0.08601101200008543, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random_invalid_number_of_bytes[0]": 0.08707461099993452, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random_invalid_number_of_bytes[1025]": 0.0865907260001677, + "tests/aws/services/kms/test_kms.py::TestKMS::test_generate_random_invalid_number_of_bytes[None]": 0.09500209800012271, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_key_does_not_exist": 0.11944896300008168, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_key_in_different_region": 0.13502488299991455, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_key_invalid_uuid": 0.8772165550000182, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_parameters_for_import": 0.5383217609999065, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_public_key": 0.07742839999991702, + "tests/aws/services/kms/test_kms.py::TestKMS::test_get_put_list_key_policies": 0.04767327499996554, + "tests/aws/services/kms/test_kms.py::TestKMS::test_hmac_create_key": 0.11864194800000405, + "tests/aws/services/kms/test_kms.py::TestKMS::test_hmac_create_key_invalid_operations": 0.10198916100000588, + "tests/aws/services/kms/test_kms.py::TestKMS::test_import_key_asymmetric": 0.2486944610000137, + "tests/aws/services/kms/test_kms.py::TestKMS::test_import_key_symmetric": 0.33896871800016015, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_generate_mac[HMAC_224-HMAC_SHA_256]": 0.10179681199997503, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_generate_mac[HMAC_256-INVALID]": 0.10167004400000224, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_key_usage": 0.6011124310000469, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_verify_mac[HMAC_256-HMAC_SHA_256-some different important message]": 0.1821894339999517, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_verify_mac[HMAC_256-HMAC_SHA_512-some important message]": 0.1830470539999851, + "tests/aws/services/kms/test_kms.py::TestKMS::test_invalid_verify_mac[HMAC_256-INVALID-some important message]": 0.1807466679999834, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_enable_rotation_status[180]": 0.10810678399991502, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_enable_rotation_status[90]": 0.10811265100005585, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_rotation_status": 0.05635859099993468, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_rotations_encryption_decryption": 0.1308526500000653, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_rotations_limits": 0.226590943999895, + "tests/aws/services/kms/test_kms.py::TestKMS::test_key_with_long_tag_value_raises_error": 0.08871406700006901, + "tests/aws/services/kms/test_kms.py::TestKMS::test_list_aliases_of_key": 0.0651076159999775, + "tests/aws/services/kms/test_kms.py::TestKMS::test_list_grants_with_invalid_key": 0.013692503000015677, + "tests/aws/services/kms/test_kms.py::TestKMS::test_list_keys": 0.028060510999921462, + "tests/aws/services/kms/test_kms.py::TestKMS::test_list_retirable_grants": 0.06852108099985799, + "tests/aws/services/kms/test_kms.py::TestKMS::test_non_multi_region_keys_should_not_have_multi_region_properties": 0.1693470589999606, + "tests/aws/services/kms/test_kms.py::TestKMS::test_plaintext_size_for_encrypt": 0.10044427100001485, + "tests/aws/services/kms/test_kms.py::TestKMS::test_replicate_key": 0.5171905270000252, + "tests/aws/services/kms/test_kms.py::TestKMS::test_retire_grant_with_grant_id_and_key_id": 0.05559706200006076, + "tests/aws/services/kms/test_kms.py::TestKMS::test_retire_grant_with_grant_token": 0.05700450400001955, + "tests/aws/services/kms/test_kms.py::TestKMS::test_revoke_grant": 0.0570737669997925, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_modifies_key_material": 0.11398178799993275, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_raises_error_given_key_is_disabled": 0.6874510360000841, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_raises_error_given_key_that_does_not_exist": 0.08736365599997953, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_raises_error_given_key_with_imported_key_material": 0.10144614499995441, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_raises_error_given_non_symmetric_key": 0.562883392000117, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_with_symmetric_key_and_automatic_rotation_disabled": 0.11789540500001294, + "tests/aws/services/kms/test_kms.py::TestKMS::test_rotate_key_on_demand_with_symmetric_key_and_automatic_rotation_enabled": 0.13391452400003345, + "tests/aws/services/kms/test_kms.py::TestKMS::test_schedule_and_cancel_key_deletion": 0.04717978100006803, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[ECC_NIST_P256-ECDSA_SHA_256]": 0.30471233699995537, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[ECC_NIST_P384-ECDSA_SHA_384]": 0.3106582499999604, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[ECC_SECG_P256K1-ECDSA_SHA_256]": 0.3111983410000221, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[RSA_2048-RSASSA_PSS_SHA_256]": 0.68845701500004, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[RSA_2048-RSASSA_PSS_SHA_384]": 0.7201158150000992, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[RSA_2048-RSASSA_PSS_SHA_512]": 0.7328560440000729, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[RSA_4096-RSASSA_PKCS1_V1_5_SHA_256]": 3.2564057839999805, + "tests/aws/services/kms/test_kms.py::TestKMS::test_sign_verify[RSA_4096-RSASSA_PKCS1_V1_5_SHA_512]": 3.7212225820001095, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_2048-RSAES_OAEP_SHA_1]": 0.09630264200006877, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_2048-RSAES_OAEP_SHA_256]": 0.12804407299995546, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_3072-RSAES_OAEP_SHA_1]": 0.39022589099988636, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_3072-RSAES_OAEP_SHA_256]": 0.19546406499989644, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_4096-RSAES_OAEP_SHA_1]": 1.496203234999939, + "tests/aws/services/kms/test_kms.py::TestKMS::test_symmetric_encrypt_offline_decrypt_online[RSA_4096-RSAES_OAEP_SHA_256]": 0.47949251899990486, + "tests/aws/services/kms/test_kms.py::TestKMS::test_tag_existing_key_and_untag": 0.1336913289999302, + "tests/aws/services/kms/test_kms.py::TestKMS::test_tag_existing_key_with_invalid_tag_key": 0.10076715400009562, + "tests/aws/services/kms/test_kms.py::TestKMS::test_tag_key_with_duplicate_tag_keys_raises_error": 0.1022515200000953, + "tests/aws/services/kms/test_kms.py::TestKMS::test_untag_key_partially": 0.11641112000006615, + "tests/aws/services/kms/test_kms.py::TestKMS::test_update_alias": 0.06893375099991772, + "tests/aws/services/kms/test_kms.py::TestKMS::test_update_and_add_tags_on_tagged_key": 0.11751015900006223, + "tests/aws/services/kms/test_kms.py::TestKMS::test_update_key_description": 0.0403989220000085, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[ECC_NIST_P256-ECDSA_SHA_256]": 0.04064752900001167, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[ECC_NIST_P384-ECDSA_SHA_384]": 0.04231501400010984, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[ECC_SECG_P256K1-ECDSA_SHA_256]": 0.04243452299999717, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[RSA_2048-RSASSA_PSS_SHA_256]": 0.13781162799989488, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[RSA_2048-RSASSA_PSS_SHA_384]": 0.15267230100005236, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[RSA_2048-RSASSA_PSS_SHA_512]": 0.18987592500002393, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[RSA_4096-RSASSA_PKCS1_V1_5_SHA_256]": 0.8823382750001656, + "tests/aws/services/kms/test_kms.py::TestKMS::test_verify_salt_length[RSA_4096-RSASSA_PKCS1_V1_5_SHA_512]": 1.1594449030000078, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_encryption_context_generate_data_key": 0.18915193599991653, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_encryption_context_generate_data_key_pair": 0.1523963139999296, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_encryption_context_generate_data_key_pair_without_plaintext": 0.16959266600008505, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_encryption_context_generate_data_key_without_plaintext": 0.1900157699999454, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key": 0.03815599799997926, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key_pair": 0.0973009719999709, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key_pair_dry_run": 0.03006914900004176, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key_pair_without_plaintext": 0.12797566100005042, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key_pair_without_plaintext_dry_run": 0.06234219199996005, + "tests/aws/services/kms/test_kms.py::TestKMSGenerateKeys::test_generate_data_key_without_plaintext": 0.030817501999990782, + "tests/aws/services/kms/test_kms.py::TestKMSMultiAccounts::test_cross_accounts_access": 1.765722727999787, + "tests/aws/services/lambda_/event_source_mapping/test_cfn_resource.py::test_adding_tags": 17.638575529999912, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_deletion_event_source_mapping_with_dynamodb": 6.170832594000103, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_disabled_dynamodb_event_source_mapping": 12.31730443299989, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_duplicate_event_source_mappings": 5.586305728999946, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[content_filter_type]": 12.848958127999936, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[content_multiple_filters]": 0.006968690000007882, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[content_or_filter]": 12.85321188599994, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[date_time_conversion]": 12.831042073999924, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[exists_false_filter]": 13.688327391999906, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[exists_filter_type]": 12.771388665000018, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[insert_same_entry_twice]": 12.790709150000112, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[numeric_filter]": 12.810148600000161, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_filter[prefix_filter]": 12.789718105999896, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_source_mapping": 15.676845379999918, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_source_mapping_with_on_failure_destination_config": 11.397867447999943, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_source_mapping_with_s3_on_failure_destination": 11.571674562999988, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_event_source_mapping_with_sns_on_failure_destination_config": 11.426628025000127, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_invalid_event_filter[[{\"eventName\": [\"INSERT\"=123}]]": 4.537032810000028, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_invalid_event_filter[single-string]": 4.580779370999949, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[empty_string_item_identifier_failure]": 14.817250082999976, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[invalid_key_foo_failure]": 14.799305921000041, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[invalid_key_foo_null_value_failure]": 14.843591520000018, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[item_identifier_not_present_failure]": 14.808220079999955, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[null_item_identifier_failure]": 14.800821747999976, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failure_scenarios[unhandled_exception_in_function]": 14.858337190999919, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_failures": 15.25717239800008, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_success_scenarios[empty_batch_item_failure_success]": 9.787422263000053, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_success_scenarios[empty_dict_success]": 9.728968930999827, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_success_scenarios[empty_list_success]": 9.748021979999862, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_success_scenarios[null_batch_item_failure_success]": 9.763338308000016, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_dynamodb_report_batch_item_success_scenarios[null_success]": 9.79778437999994, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_dynamodbstreams.py::TestDynamoDBEventSourceMapping::test_esm_with_not_existing_dynamodb_stream": 1.851036315999977, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisEventFiltering::test_kinesis_event_filtering_json_pattern": 9.31211859699988, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_create_kinesis_event_source_mapping": 12.14590697799997, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_create_kinesis_event_source_mapping_multiple_lambdas_single_kinesis_event_stream": 19.421371248000014, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_disable_kinesis_event_source_mapping": 29.265496796999855, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_duplicate_event_source_mappings": 3.4017732680000563, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_esm_with_not_existing_kinesis_stream": 1.425101042999927, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_empty_provided": 9.260025009999708, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_event_source_mapping_with_async_invocation": 20.193658653999705, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_event_source_mapping_with_on_failure_destination_config": 9.262561967000238, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_event_source_mapping_with_s3_on_failure_destination": 9.320659097999851, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_event_source_mapping_with_sns_on_failure_destination_config": 9.27424224299989, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_event_source_trim_horizon": 26.303340179999964, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_maximum_record_age_exceeded[expire-before-ingestion]": 14.344580303000157, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_maximum_record_age_exceeded[expire-while-retrying]": 9.299457927000049, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_maximum_record_age_exceeded_discard_records": 19.419136923999986, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[empty_string_item_identifier_failure]": 13.075377776000323, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[invalid_key_foo_failure]": 12.179363073999866, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[invalid_key_foo_null_value_failure]": 12.192900912000141, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[item_identifier_not_present_failure]": 12.172738373999891, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[null_item_identifier_failure]": 12.18095697700005, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failure_scenarios[unhandled_exception_in_function]": 12.180816703999653, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_failures": 17.368877388000328, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[empty_batch_item_failure_success]": 7.11514787100009, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[empty_dict_success]": 7.135200515000179, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[empty_list_success]": 7.132734821000213, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[empty_string_success]": 7.127354783999635, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[null_batch_item_failure_success]": 7.138279181000144, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_kinesis.py::TestKinesisSource::test_kinesis_report_batch_item_success_scenarios[null_success]": 7.1128590410000925, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_duplicate_event_source_mappings": 2.6105943840000236, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_event_source_mapping_default_batch_size": 3.460284768000065, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[and]": 6.428960865999898, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[exists]": 6.44431293599996, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[numeric-bigger]": 6.4406876029997875, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[numeric-range]": 6.439136788000042, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[numeric-smaller]": 6.427905940999835, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[or]": 6.424111809999658, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[plain-string-filter]": 0.002015228000118441, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[plain-string-matching]": 0.002816222000092239, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[prefix]": 6.445062804999907, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[single]": 6.448771473999841, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_filter[valid-json-filter]": 6.438209316000211, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping": 6.372904106000078, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size[10000]": 9.557955466000067, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size[1000]": 9.576256149999836, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size[100]": 9.555571795999867, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size[15]": 9.559413996000103, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size_override[10000]": 50.166936663000115, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size_override[1000]": 9.84782876700001, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size_override[100]": 6.637811828000167, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batch_size_override[20]": 6.435833736000177, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batching_reserved_concurrency": 8.700370003999979, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_batching_window_size_override": 27.60035112200012, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_event_source_mapping_update": 11.682036982, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_invalid_event_filter[None]": 1.2600650990000304, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_invalid_event_filter[invalid_filter2]": 1.257216680000056, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_invalid_event_filter[invalid_filter3]": 1.2344059659999402, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::TestSQSEventSourceMapping::test_sqs_invalid_event_filter[simple string]": 1.2343287780004175, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_esm_with_not_existing_sqs_queue": 1.1894653759998164, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_failing_lambda_retries_after_visibility_timeout": 17.85165343099993, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_fifo_message_group_parallelism": 63.495032326, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_message_body_and_attributes_passed_correctly": 6.768202993000386, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_redrive_policy_with_failing_lambda": 18.47574258999998, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_report_batch_item_failures": 23.23761417300011, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_report_batch_item_failures_empty_json_batch_succeeds": 9.935343049999801, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_report_batch_item_failures_invalid_result_json_batch_fails": 15.868418494000025, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_report_batch_item_failures_on_lambda_error": 8.381487519000075, + "tests/aws/services/lambda_/event_source_mapping/test_lambda_integration_sqs.py::test_sqs_queue_as_lambda_dead_letter_queue": 6.267675840000038, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaAliases::test_alias_routingconfig": 3.2918021880000197, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaAliases::test_lambda_alias_moving": 3.410876468999959, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_assume_role[1]": 3.4328520960000333, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_assume_role[2]": 3.39216228999976, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_function_state": 1.700168821999796, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_lambda_different_iam_keys_environment": 5.921658551000064, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_lambda_large_response": 2.93001124899979, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_lambda_too_large_response": 3.562876879999976, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_lambda_too_large_response_but_with_custom_limit": 2.835244094000018, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBaseFeatures::test_large_payloads": 3.2608969230000184, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_ignore_architecture": 2.9581719330001306, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_cache_local[nodejs]": 9.4063971449998, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_cache_local[python]": 3.4075550420000127, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_init_environment": 6.831238581999969, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_invoke_no_timeout": 5.196857975000057, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_invoke_timed_out_environment_reuse": 0.0321427629999107, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_lambda_invoke_with_timeout": 5.795239934999927, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_mixed_architecture": 0.025358354999980293, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_runtime_introspection_arm": 0.017498378999789566, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_runtime_introspection_x86": 3.5531777740002326, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaBehavior::test_runtime_ulimits": 3.1285540659998787, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaCleanup::test_delete_lambda_during_sync_invoke": 0.0017440170001918887, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaCleanup::test_recreate_function": 3.4161329300000034, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_lambda_concurrency_block": 17.364219408999816, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_lambda_concurrency_crud": 1.2346419329996934, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_lambda_concurrency_update": 2.296258390000048, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_lambda_provisioned_concurrency_moves_with_alias": 0.003036435000012716, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_lambda_provisioned_concurrency_scheduling": 8.516381729000159, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_provisioned_concurrency": 2.898677791999944, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_provisioned_concurrency_on_alias": 2.93960954399995, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_reserved_concurrency": 16.405836706000173, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_reserved_concurrency_async_queue": 3.9214294349999363, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaConcurrency::test_reserved_provisioned_overlap": 5.227813992999927, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_handler_error": 1.595507729000019, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_handler_exit": 0.002719772999853376, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_invoke_payload_encoding_error[body-n\\x87r\\x9e\\xe9\\xb5\\xd7I\\xee\\x9bmt]": 1.3708861630000229, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_invoke_payload_encoding_error[message-\\x99\\xeb,j\\x07\\xa1zYh]": 1.3723986510001396, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_error": 7.694419942999957, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_exit": 0.0017995330001667753, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_exit_segfault": 0.0016608819998964464, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_startup_error": 1.6028169619999062, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_startup_timeout": 41.81982029200003, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaErrors::test_lambda_runtime_wrapper_not_found": 0.0021420109999326087, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_dry_run[nodejs16.x]": 0.0029489390001344873, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_dry_run[python3.10]": 0.0030282859997896594, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_event[nodejs16.x]": 2.281449830999918, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_event[python3.10]": 2.295798087000094, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_event_error": 0.002092257999947833, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_no_return_payload[nodejs-Event]": 2.294005343000208, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_no_return_payload[nodejs-RequestResponse]": 8.68759877399998, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_no_return_payload[python-Event]": 2.2996057009997912, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_no_return_payload[python-RequestResponse]": 2.61801069299986, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_request_response[nodejs16.x]": 2.7696100320001733, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_type_request_response[python3.10]": 1.6132022680001228, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_with_logs[nodejs16.x]": 17.465238027999703, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_with_logs[python3.10]": 9.380066224999837, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invocation_with_qualifier": 1.8426856979999684, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_invoke_exceptions": 0.3093109880001066, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_lambda_with_context": 0.0025391830001808557, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaFeatures::test_upload_lambda_from_s3": 2.200014587999931, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_delete_function": 1.1548962980000397, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_function_alias": 1.1588942490000136, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_function_concurrency": 1.1523486229998525, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_function_invocation": 1.558888128000035, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_function_tags": 1.1541276440002548, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_get_function": 1.1455450769999516, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_get_function_configuration": 1.144430131000263, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_get_lambda_layer": 0.20711446500013153, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_list_versions_by_function": 1.1493176050003058, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaMultiAccounts::test_publish_version": 1.1956735449998632, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaPermissions::test_lambda_permission_url_invocation": 0.025083545000143204, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_update_function_url_config": 2.152666870000303, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_echo_http_fixture_default": 3.9822867999998834, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_echo_http_fixture_trim_x_headers": 3.891536488999918, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_echo_invoke[BUFFERED]": 3.607789464999769, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_echo_invoke[None]": 3.494224434000216, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_echo_invoke[RESPONSE_STREAM]": 0.14303240100002768, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_form_payload": 4.400915970000369, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_headers_and_status": 2.9954989220002517, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invalid_invoke_mode": 2.0611565080000673, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[boolean]": 4.575936626000157, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[dict]": 3.6651487589997487, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[float]": 3.580048043000261, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[http-response-json]": 3.4686210530001063, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[http-response]": 3.671196407000025, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[integer]": 3.526429435999944, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[list-mixed]": 3.570913467999844, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation[string]": 3.7383310850002545, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation_custom_id": 2.888942402999646, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation_custom_id_aliased": 3.131158947999893, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_invocation_exception": 3.7414350939998258, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_non_existing_url": 0.16399357500017686, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaURL::test_lambda_url_persists_after_alias_delete": 5.697754753000254, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaVersions::test_async_invoke_queue_upon_function_update": 98.75175207500024, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaVersions::test_function_update_during_invoke": 0.002173371999788287, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaVersions::test_lambda_handler_update": 2.2258366930000193, + "tests/aws/services/lambda_/test_lambda.py::TestLambdaVersions::test_lambda_versions_with_code_changes": 5.560721653999963, + "tests/aws/services/lambda_/test_lambda.py::TestRequestIdHandling::test_request_id_async_invoke_with_retry": 11.274393451999913, + "tests/aws/services/lambda_/test_lambda.py::TestRequestIdHandling::test_request_id_format": 0.02900219699995432, + "tests/aws/services/lambda_/test_lambda.py::TestRequestIdHandling::test_request_id_invoke": 3.6768631099998856, + "tests/aws/services/lambda_/test_lambda.py::TestRequestIdHandling::test_request_id_invoke_url": 3.595541804000277, + "tests/aws/services/lambda_/test_lambda_api.py::TestCodeSigningConfig::test_code_signing_not_found_excs": 1.3382098199999746, + "tests/aws/services/lambda_/test_lambda_api.py::TestCodeSigningConfig::test_function_code_signing_config": 1.2807681620001858, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAccountSettings::test_account_settings": 0.0957911140003489, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAccountSettings::test_account_settings_total_code_size": 1.4477420340003846, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAccountSettings::test_account_settings_total_code_size_config_update": 1.3024633039995024, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAlias::test_alias_lifecycle": 1.5311286540004403, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAlias::test_alias_naming": 1.6426531739998609, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAlias::test_non_existent_alias_deletion": 1.2091474189996916, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAlias::test_non_existent_alias_update": 1.2119617649996144, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaAlias::test_notfound_and_invalid_routingconfigs": 1.4384066560000974, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventInvokeConfig::test_lambda_eventinvokeconfig_exceptions": 2.7691686680000203, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventInvokeConfig::test_lambda_eventinvokeconfig_lifecycle": 1.3731996350002191, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_create_event_filter_criteria_validation": 3.5121567929995763, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_create_event_source_self_managed": 0.001897559000099136, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_create_event_source_validation": 3.3937178969999877, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_create_event_source_validation_kinesis": 1.8873261889998503, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_event_source_mapping_exceptions": 0.15674208799964617, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_event_source_mapping_lifecycle": 4.194944569000199, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_event_source_mapping_lifecycle_delete_function": 6.06619584200007, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaEventSourceMappings::test_function_name_variations": 16.075417395000386, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_create_lambda_exceptions": 0.1629552599993076, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_delete_on_nonexisting_version": 1.2552365810001902, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_arns": 2.5610976499997378, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_lifecycle": 2.4647596569993766, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_and_qualifier_too_long_and_invalid_region-create_function]": 0.10542240799986757, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_and_qualifier_too_long_and_invalid_region-delete_function]": 0.09121035499993013, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_and_qualifier_too_long_and_invalid_region-get_function]": 0.09177350199979628, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_and_qualifier_too_long_and_invalid_region-invoke]": 0.09068421899974055, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_with_multiple_qualifiers-create_function]": 0.1075875120004639, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_with_multiple_qualifiers-delete_function]": 0.09169966299987209, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_with_multiple_qualifiers-get_function]": 0.09162844000002224, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[full_arn_with_multiple_qualifiers-invoke]": 0.09312551799939683, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_is_single_invalid-create_function]": 0.10493958399911207, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_is_single_invalid-delete_function]": 0.09097538800006078, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_is_single_invalid-get_function]": 0.09035388199981753, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_is_single_invalid-invoke]": 0.0914392339996084, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long-create_function]": 0.10661792800010517, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long-delete_function]": 0.09543345800011593, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long-get_function]": 0.09408235700038858, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long-invoke]": 0.009586961000422889, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long_and_invalid_region-create_function]": 0.10619075700014946, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long_and_invalid_region-delete_function]": 0.09056839899994884, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long_and_invalid_region-get_function]": 0.09142040100050508, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[function_name_too_long_and_invalid_region-invoke]": 0.09067390700056421, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[incomplete_arn-create_function]": 0.008148833999712224, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[incomplete_arn-delete_function]": 0.09738297800004148, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[incomplete_arn-get_function]": 0.0917599340000379, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[incomplete_arn-invoke]": 0.009680809000201407, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_account_id_in_partial_arn-create_function]": 0.10928267799999958, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_account_id_in_partial_arn-delete_function]": 0.0954810630000793, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_account_id_in_partial_arn-get_function]": 0.10553466599958483, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_account_id_in_partial_arn-invoke]": 0.11368783799935045, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_function_name-create_function]": 0.1059799669997119, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_function_name-delete_function]": 0.09127622999994855, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_function_name-get_function]": 0.09500060800019128, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_function_name-invoke]": 0.09242355800006408, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_qualifier-create_function]": 0.10736567499952798, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_qualifier-delete_function]": 0.089982315999805, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_qualifier-get_function]": 0.09092404599959991, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_characters_in_qualifier-invoke]": 0.09161254199989344, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_region_in_arn-create_function]": 0.10760217600000033, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_region_in_arn-delete_function]": 0.09327655600009166, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_region_in_arn-get_function]": 0.09180636400014919, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[invalid_region_in_arn-invoke]": 0.09168878299988137, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[latest_version_with_additional_qualifier-create_function]": 0.10677013700023963, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[latest_version_with_additional_qualifier-delete_function]": 0.09517563199960932, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[latest_version_with_additional_qualifier-get_function]": 0.09299611600044955, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[latest_version_with_additional_qualifier-invoke]": 0.0957649400002083, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[lowercase_latest_qualifier-create_function]": 0.11067506800009141, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[lowercase_latest_qualifier-delete_function]": 0.00979453900072258, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[lowercase_latest_qualifier-get_function]": 0.09434537400011322, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[lowercase_latest_qualifier-invoke]": 0.09167496600048253, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_account_id_in_arn-create_function]": 0.10372553300021536, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_account_id_in_arn-delete_function]": 0.0909642880001229, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_account_id_in_arn-get_function]": 0.09568574500008253, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_account_id_in_arn-invoke]": 0.08953327200015337, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_region_in_arn-create_function]": 0.10678431099995578, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_region_in_arn-delete_function]": 0.09070618799978547, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_region_in_arn-get_function]": 0.09530334499913806, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[missing_region_in_arn-invoke]": 0.08970098100007817, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[misspelled_latest_in_arn-create_function]": 0.10281141400037086, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[misspelled_latest_in_arn-delete_function]": 0.09155749599995033, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[misspelled_latest_in_arn-get_function]": 0.09352978300012182, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[misspelled_latest_in_arn-invoke]": 0.08972116400036612, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[non_lambda_arn-create_function]": 0.10474713499979771, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[non_lambda_arn-delete_function]": 0.09121160099994086, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[non_lambda_arn-get_function]": 0.09238412300010168, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[non_lambda_arn-invoke]": 0.09070389199996498, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[partial_arn_with_extra_qualifier-create_function]": 0.1075452300001416, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[partial_arn_with_extra_qualifier-delete_function]": 0.0917112739998629, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[partial_arn_with_extra_qualifier-get_function]": 0.09053356200001872, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[partial_arn_with_extra_qualifier-invoke]": 0.09123025499957294, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[qualifier_too_long-create_function]": 0.12172845899931417, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[qualifier_too_long-delete_function]": 0.0899813180003548, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[qualifier_too_long-get_function]": 0.09238021700002719, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_function_name_and_qualifier_validation[qualifier_too_long-invoke]": 0.10163692300056937, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[delete_function]": 1.208929459000501, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function]": 1.207991779999702, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function_code_signing_config]": 1.2138526149997233, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function_concurrency]": 1.246716725999704, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function_configuration]": 1.216159682999205, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function_event_invoke_config]": 1.2136000079999576, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[get_function_url_config]": 1.214201263999712, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_get_function_wrong_region[invoke]": 1.226063807000628, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_invalid_invoke": 1.1215012580000803, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_invalid_vpc_config_security_group": 0.002020827000251302, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_invalid_vpc_config_subnet": 0.633147975999691, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_lambda_code_location_s3": 1.4669219440002053, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_lambda_code_location_zipfile": 2.384736683999563, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_lambda_concurrent_code_updates": 2.3004994600000828, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_lambda_concurrent_config_updates": 2.263344257999961, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_list_functions": 2.4700689490000514, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[delete_function]": 0.09266082800013464, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function]": 0.09119824400067955, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function_code_signing_config]": 0.09201760500036471, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function_concurrency]": 0.09064667200073018, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function_configuration]": 0.09445719499990446, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function_event_invoke_config]": 0.08943433700005698, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_fn[get_function_url_config]": 0.09202277400027015, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_version[get_function]": 1.197288696000669, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_version[get_function_configuration]": 1.2172399539999788, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_on_nonexisting_version[get_function_event_invoke_config]": 1.2195386999997027, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_with_arn_qualifier_mismatch[delete_function]": 0.10233985900049447, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_with_arn_qualifier_mismatch[get_function]": 0.10111153599973477, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_ops_with_arn_qualifier_mismatch[get_function_configuration]": 0.10047562299951096, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_redundant_updates": 1.315357160000076, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_update_lambda_exceptions": 1.2173563470000772, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaFunction::test_vpc_config": 3.313852114999918, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaImages::test_lambda_image_and_image_config_crud": 0.465917726999578, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaImages::test_lambda_image_crud": 3.0020065359994987, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaImages::test_lambda_image_versions": 0.5501115460006076, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaImages::test_lambda_zip_file_to_image": 1.3746185909994892, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_compatibilities[runtimes0]": 0.1323196829994231, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_compatibilities[runtimes1]": 0.13564235499961796, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_deterministic_version": 0.06057928400014134, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_exceptions": 0.2958905169998616, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_function_exceptions": 17.49226157799967, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_function_quota_exception": 16.382776365000154, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_lifecycle": 1.4527833710003506, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_policy_exceptions": 0.23518577200002255, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_policy_lifecycle": 0.1754178950000096, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaLayer::test_layer_s3_content": 0.21353340099994966, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_add_lambda_permission_aws": 1.2445636669999658, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_add_lambda_permission_fields": 1.3054153600000973, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_create_multiple_lambda_permissions": 1.2320565539998825, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_lambda_permission_fn_versioning": 1.4047097590000703, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_permission_exceptions": 1.3277942289996645, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaPermissions::test_remove_multi_permissions": 1.2803662670007725, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaProvisionedConcurrency::test_lambda_provisioned_lifecycle": 2.4635976090007716, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaProvisionedConcurrency::test_provisioned_concurrency_exceptions": 1.3849405759997353, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaProvisionedConcurrency::test_provisioned_concurrency_limits": 1.263370051000038, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRecursion::test_put_function_recursion_config_allow": 1.2293864019998182, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRecursion::test_put_function_recursion_config_default_terminate": 1.2154534869991949, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRecursion::test_put_function_recursion_config_invalid_value": 1.21867625699997, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaReservedConcurrency::test_function_concurrency": 1.2519868189997396, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaReservedConcurrency::test_function_concurrency_exceptions": 1.2256378880001648, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaReservedConcurrency::test_function_concurrency_limits": 1.2400140240001747, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRevisions::test_function_revisions_basic": 15.68819863699946, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRevisions::test_function_revisions_permissions": 1.2829905199996574, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaRevisions::test_function_revisions_version_and_alias": 1.3540807560002577, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_lambda_envvars_near_limit_succeeds": 1.292404870000155, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_large_environment_fails_multiple_keys": 16.2169111850003, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_large_environment_variables_fails": 16.22153647999994, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_large_lambda": 12.793866835000244, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_oversized_request_create_lambda": 3.6983165540004848, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_oversized_unzipped_lambda": 4.872836943000493, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSizeLimits::test_oversized_zipped_create_lambda": 1.8063840369995887, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_exceptions": 0.10625641799970253, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[dotnet8]": 4.308500883999841, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[java11]": 3.3020375400005832, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[java17]": 1.2817461300001014, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[java21]": 2.300159917000201, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[python3.12]": 1.2680537219998769, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_lifecycle[python3.13]": 1.256969361999836, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[dotnet8]": 1.2271871210000427, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[java11]": 1.2262587579994033, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[java17]": 1.252001521999773, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[java21]": 1.2419912030004525, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[python3.12]": 1.233809797000049, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaSnapStart::test_snapstart_update_function_configuration[python3.13]": 1.2344359409994468, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_create_tag_on_esm_create": 1.362441009000122, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_create_tag_on_fn_create": 1.2314037439996355, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_tag_exceptions[event_source_mapping]": 0.12472489300034795, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_tag_exceptions[lambda_function]": 0.1255774520000159, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_tag_lifecycle[event_source_mapping]": 1.419460906000495, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_tag_lifecycle[lambda_function]": 1.3042137789998378, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTag::test_tag_nonexisting_resource": 1.2478810700004033, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTags::test_tag_exceptions": 1.3193846259996462, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTags::test_tag_lifecycle": 1.3720121429996652, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTags::test_tag_limits": 1.4009362379997583, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaTags::test_tag_versions": 1.2744509009994545, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_create_url_config_custom_id_tag": 1.1367676539998683, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_create_url_config_custom_id_tag_alias": 3.4105405720001727, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_create_url_config_custom_id_tag_invalid_id": 1.1345518109997101, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_url_config_deletion_without_qualifier": 1.355174494000039, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_url_config_exceptions": 1.5725105500005157, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_url_config_lifecycle": 1.3130491750002875, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaUrl::test_url_config_list_paging": 1.3855393809999441, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaVersions::test_publish_version_on_create": 3.268549532000179, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaVersions::test_publish_with_update": 1.3698806679999507, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaVersions::test_publish_with_wrong_sha256": 1.2586902369998825, + "tests/aws/services/lambda_/test_lambda_api.py::TestLambdaVersions::test_version_lifecycle": 2.4837877310001204, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_advanced_logging_configuration_format_switch": 1.3314498630002163, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_function_advanced_logging_configuration": 1.2893146979999983, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_function_partial_advanced_logging_configuration_update[partial_config0]": 2.297276863999741, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_function_partial_advanced_logging_configuration_update[partial_config1]": 1.316500466999969, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_function_partial_advanced_logging_configuration_update[partial_config2]": 1.2908780370003115, + "tests/aws/services/lambda_/test_lambda_api.py::TestLoggingConfig::test_function_partial_advanced_logging_configuration_update[partial_config3]": 2.3045099720002327, + "tests/aws/services/lambda_/test_lambda_api.py::TestPartialARNMatching::test_cross_region_arn_function_access": 1.1494904910000514, + "tests/aws/services/lambda_/test_lambda_api.py::TestPartialARNMatching::test_update_function_configuration_full_arn": 1.2352551269991636, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_disabled": 15.200725250000687, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[dotnetcore3.1]": 0.10496614300063811, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[go1.x]": 0.10590375699985088, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[java8]": 0.10713243700047315, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[nodejs12.x]": 0.1082241799995245, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[nodejs14.x]": 0.11058480099973167, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[provided]": 0.10660791600048469, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[python3.7]": 0.10678785400068591, + "tests/aws/services/lambda_/test_lambda_api.py::TestRuntimeValidation::test_create_deprecated_function_runtime_with_validation_enabled[ruby2.7]": 0.10672012700024425, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[dotnet6]": 3.06352365799998, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[dotnet8]": 4.894699070999934, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[java11]": 6.886372956999992, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[java17]": 6.045630055999936, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[java21]": 6.073045132999994, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[java8.al2]": 5.987005355999997, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[nodejs16.x]": 8.86302933899998, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[nodejs18.x]": 6.797295017999971, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[nodejs20.x]": 6.737347848999946, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[nodejs22.x]": 1.649954269000034, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.10]": 1.6996792870000377, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.11]": 7.772360727000006, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.12]": 1.7074608609999586, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.13]": 1.7068148949999795, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.8]": 1.7247304230000395, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[python3.9]": 6.764890964000017, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[ruby3.2]": 2.3380247210000107, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[ruby3.3]": 2.0157003520000103, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaCallingLocalstack::test_manual_endpoint_injection[ruby3.4]": 2.0860391279999817, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[dotnet6]": 3.5172545260002153, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[dotnet8]": 2.4834033889997045, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[java11]": 2.4450301440001567, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[java17]": 2.3982822569996642, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[java21]": 2.415967052000724, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[java8.al2]": 5.492264927999713, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[nodejs16.x]": 2.424582855999688, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[nodejs18.x]": 2.464710259999265, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[nodejs20.x]": 3.4792536249997283, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[nodejs22.x]": 7.456292094000219, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[provided.al2023]": 3.2485664700002417, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[provided.al2]": 4.096848258000136, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.10]": 2.493533157999991, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.11]": 2.5446987099999205, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.12]": 2.53268901499996, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.13]": 2.5572105699998247, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.8]": 2.7412445979998665, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[python3.9]": 6.593621324000196, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[ruby3.2]": 8.505687551999927, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[ruby3.3]": 8.599851978999595, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_echo_invoke[ruby3.4]": 10.594274528999904, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[dotnet6]": 3.643416814000375, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[dotnet8]": 3.610863486999733, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[java11]": 3.7023230979998516, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[java17]": 3.617223881999962, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[java21]": 3.6980749199997263, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[java8.al2]": 3.9022132559994134, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[nodejs16.x]": 3.5389829720002126, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[nodejs18.x]": 3.4956912620000367, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[nodejs20.x]": 3.48594349699988, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[nodejs22.x]": 3.48520464000012, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[provided.al2023]": 3.54740929899981, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[provided.al2]": 3.5130286399999022, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.10]": 3.553406668000207, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.11]": 4.680043130000286, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.12]": 3.487204077000115, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.13]": 3.480162788999678, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.8]": 3.5251213839997035, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[python3.9]": 3.5234952969994993, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[ruby3.2]": 4.788849405999827, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[ruby3.3]": 3.6056958599997415, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_introspection_invoke[ruby3.4]": 3.5955227210001794, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[java11]": 2.304434307000065, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[java21]": 1.8318432669998401, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[java8.al2]": 9.241412450000013, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[nodejs16.x]": 1.7096603129998584, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[nodejs18.x]": 1.7307644819998131, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[nodejs20.x]": 1.688494534000256, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[nodejs22.x]": 18.49859306799999, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.10]": 6.73107275000001, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.11]": 1.6557294540007206, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.12]": 8.969660962000006, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.13]": 12.428941578999996, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.8]": 11.831267334000017, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[python3.9]": 1.675758656999733, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[ruby3.2]": 7.834567620999991, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[ruby3.3]": 10.270646988000038, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_runtime_wrapper_invoke[ruby3.4]": 7.8768723939999745, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[dotnet6]": 1.848842147999676, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[dotnet8]": 1.8224175709997326, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[java11]": 2.018225147999601, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[java17]": 1.838537517000077, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[java21]": 3.045464458999959, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[java8.al2]": 2.100969334000183, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[nodejs16.x]": 1.7112399080001524, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[nodejs18.x]": 1.7403566210000463, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[nodejs20.x]": 1.7062675109996235, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[nodejs22.x]": 1.697925266999846, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[provided.al2023]": 1.746255986000051, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[provided.al2]": 1.7340155249999043, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.10]": 1.7122417780001342, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.11]": 1.6905850059997647, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.12]": 1.7078953320001347, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.13]": 1.697866336000061, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.8]": 1.7039431319999494, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[python3.9]": 1.6750110779998977, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[ruby3.2]": 1.7591462340001272, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[ruby3.3]": 1.767142593999779, + "tests/aws/services/lambda_/test_lambda_common.py::TestLambdaRuntimesCommon::test_uncaught_exception_invoke[ruby3.4]": 1.7665523680007027, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDLQ::test_dead_letter_queue": 20.82205074700005, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationEventbridge::test_invoke_lambda_eventbridge": 15.639352481000003, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationSqs::test_assess_lambda_destination_invocation[payload0]": 1.8589550149999923, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationSqs::test_assess_lambda_destination_invocation[payload1]": 1.8565729229999306, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationSqs::test_lambda_destination_default_retries": 18.198687576000054, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationSqs::test_maxeventage": 63.680891202, + "tests/aws/services/lambda_/test_lambda_destinations.py::TestLambdaDestinationSqs::test_retries": 22.489840888999993, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestDockerFlags::test_additional_docker_flags": 1.5487824900000078, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestDockerFlags::test_lambda_docker_networks": 5.633000799999991, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestHotReloading::test_hot_reloading[nodejs20.x]": 3.396451893999995, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestHotReloading::test_hot_reloading[python3.12]": 3.358170538999957, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestHotReloading::test_hot_reloading_environment_placeholder": 0.4042654529999936, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestHotReloading::test_hot_reloading_error_path_not_absolute": 0.027339747000041825, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestHotReloading::test_hot_reloading_publish_version": 1.098378410999885, + "tests/aws/services/lambda_/test_lambda_developer_tools.py::TestLambdaDNS::test_lambda_localhost_localstack_cloud_connectivity": 1.5464117399999964, + "tests/aws/services/lambda_/test_lambda_integration_xray.py::test_traceid_outside_handler[Active]": 2.5713510069999757, + "tests/aws/services/lambda_/test_lambda_integration_xray.py::test_traceid_outside_handler[PassThrough]": 2.569453714999952, + "tests/aws/services/lambda_/test_lambda_integration_xray.py::test_xray_trace_propagation": 1.512666779999961, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestCloudwatchLogs::test_multi_line_prints": 3.5940208359999133, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestGoProvidedRuntimes::test_manual_endpoint_injection[provided.al2023]": 1.8460767790001, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestGoProvidedRuntimes::test_manual_endpoint_injection[provided.al2]": 1.8306617480000114, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestGoProvidedRuntimes::test_uncaught_exception_invoke[provided.al2023]": 2.944431734999853, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestGoProvidedRuntimes::test_uncaught_exception_invoke[provided.al2]": 2.9872691719999693, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_java_custom_handler_method_specification[cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom-INTERFACE]": 2.9969425369999954, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_java_custom_handler_method_specification[cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequest-INTERFACE]": 3.000272019000022, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_java_custom_handler_method_specification[cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequestCustom-CUSTOM]": 2.993511850999994, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_java_lambda_subscribe_sns_topic": 8.821029194000062, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_java_runtime_with_lib": 5.582461848999969, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_serializable_input_object[java11]": 2.632787815000029, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_serializable_input_object[java17]": 2.5455394519999572, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_serializable_input_object[java21]": 2.7563983769999822, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_serializable_input_object[java8.al2]": 2.785033191000025, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_stream_handler[java11]": 1.720159057999922, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_stream_handler[java17]": 1.6748605850001468, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_stream_handler[java21]": 1.7263662630000454, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestJavaRuntimes::test_stream_handler[java8.al2]": 1.7039376080001603, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestNodeJSRuntimes::test_invoke_nodejs_es6_lambda[nodejs16.x]": 4.687620160999984, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestNodeJSRuntimes::test_invoke_nodejs_es6_lambda[nodejs18.x]": 4.6949735459999715, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestNodeJSRuntimes::test_invoke_nodejs_es6_lambda[nodejs20.x]": 5.295323750000023, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestNodeJSRuntimes::test_invoke_nodejs_es6_lambda[nodejs22.x]": 4.654764755000031, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.10]": 1.626008560999935, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.11]": 1.624617876000002, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.12]": 1.6280238340000324, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.13]": 1.6293802899999719, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.8]": 1.644360182000014, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_handler_in_submodule[python3.9]": 1.6594063140000799, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.10]": 1.594542821999994, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.11]": 1.4873011860000815, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.12]": 1.521954580000056, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.13]": 1.5288136179999583, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.8]": 1.5343686050000542, + "tests/aws/services/lambda_/test_lambda_runtimes.py::TestPythonRuntimes::test_python_runtime_correct_versions[python3.9]": 1.5117436019999104, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_create_and_delete_log_group": 0.09708713800011992, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_create_and_delete_log_stream": 0.378011698000023, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_delivery_logs_for_sns": 1.083698217999995, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_filter_log_events_response_header": 0.05299736099993879, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_list_tags_log_group": 0.1806159589999652, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_metric_filters": 0.0018212810000477475, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_put_events_multi_bytes_msg": 0.05479535400002078, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_put_subscription_filter_firehose": 1.277328060000059, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_put_subscription_filter_kinesis": 3.9893233719999444, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_put_subscription_filter_lambda": 1.9045418090000794, + "tests/aws/services/logs/test_logs.py::TestCloudWatchLogs::test_resource_does_not_exist": 0.03757785800007696, + "tests/aws/services/opensearch/test_opensearch.py::TestCustomBackendManager::test_custom_backend": 0.13274039899999934, + "tests/aws/services/opensearch/test_opensearch.py::TestCustomBackendManager::test_custom_backend_with_custom_endpoint": 0.15808766400016339, + "tests/aws/services/opensearch/test_opensearch.py::TestEdgeProxiedOpensearchCluster::test_custom_endpoint": 9.944805999999971, + "tests/aws/services/opensearch/test_opensearch.py::TestEdgeProxiedOpensearchCluster::test_custom_endpoint_disabled": 9.937540264999939, + "tests/aws/services/opensearch/test_opensearch.py::TestEdgeProxiedOpensearchCluster::test_route_through_edge": 9.830893729000081, + "tests/aws/services/opensearch/test_opensearch.py::TestMultiClusterManager::test_multi_cluster": 15.089933439000106, + "tests/aws/services/opensearch/test_opensearch.py::TestMultiplexingClusterManager::test_multiplexing_cluster": 10.66446813399989, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_cloudformation_deployment": 12.242749403999937, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_create_domain": 8.935557865999954, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_create_domain_with_invalid_custom_endpoint": 0.01987989799999923, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_create_domain_with_invalid_name": 0.03136097399999471, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_create_existing_domain_causes_exception": 9.903089958999999, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_create_indices": 11.424432070999956, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_describe_domains": 10.475285815999996, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_domain_version": 9.946349076999809, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_endpoint_strategy_path": 10.437236412000061, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_endpoint_strategy_port": 9.871450047000053, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_exception_header_field": 0.011959559000047193, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_get_compatible_version_for_domain": 8.899670849999893, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_get_compatible_versions": 0.019791766999901483, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_get_document": 10.748720251000009, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_gzip_responses": 10.066440099000033, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_list_versions": 0.09416587600003368, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_search": 10.679478274000076, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_security_plugin": 14.391264812999907, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_sql_plugin": 13.578097041999968, + "tests/aws/services/opensearch/test_opensearch.py::TestOpensearchProvider::test_update_domain_config": 9.9371915669999, + "tests/aws/services/opensearch/test_opensearch.py::TestSingletonClusterManager::test_endpoint_strategy_port_singleton_cluster": 10.261006193999947, + "tests/aws/services/redshift/test_redshift.py::TestRedshift::test_cluster_security_groups": 0.03353517200002898, + "tests/aws/services/redshift/test_redshift.py::TestRedshift::test_create_clusters": 0.2733527260000983, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_cloudformation_query": 0.001642066000044906, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_create_group": 5.851496790999931, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_resource_groups_different_region": 0.0016634960001056243, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_resource_groups_tag_query": 0.0018483210001249972, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_resource_type_filters": 0.0016722530000379265, + "tests/aws/services/resource_groups/test_resource_groups.py::TestResourceGroups::test_search_resources": 0.0016380289999915476, + "tests/aws/services/resourcegroupstaggingapi/test_rgsa.py::TestRGSAIntegrations::test_get_resources": 1.50012173000016, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_associate_vpc_with_hosted_zone": 0.34709645600003114, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_create_hosted_zone": 0.5788926570000967, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_create_hosted_zone_in_non_existent_vpc": 0.2219548369999984, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_create_private_hosted_zone": 0.6881752939999615, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_crud_health_check": 0.1219182910000427, + "tests/aws/services/route53/test_route53.py::TestRoute53::test_reusable_delegation_sets": 0.12226797400012401, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_associate_and_disassociate_resolver_rule": 0.5049689869999838, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_create_resolver_endpoint[INBOUND-5]": 0.7740201259999822, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_create_resolver_endpoint[OUTBOUND-10]": 0.29455660999985867, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_create_resolver_query_log_config": 0.2807152430000315, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_create_resolver_rule": 0.3883273060000647, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_create_resolver_rule_with_invalid_direction": 0.3157731880000938, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_delete_non_existent_resolver_endpoint": 0.08848305600008644, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_delete_non_existent_resolver_query_log_config": 0.16200261599999521, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_delete_non_existent_resolver_rule": 0.0888068189999558, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_delete_resolver_endpoint": 0.29264798099995915, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_disassociate_non_existent_association": 0.0930443509998895, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_list_firewall_domain_lists": 0.18944751499998347, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_list_firewall_rules": 0.3201784730000554, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_list_firewall_rules_for_empty_rule_group": 0.10579139300000406, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_list_firewall_rules_for_missing_rule_group": 0.16260305999992397, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_multipe_create_resolver_rule": 0.45431946400003653, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_multiple_create_resolver_endpoint_with_same_req_id": 0.30327258499983145, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_route53resolver_bad_create_endpoint_security_groups": 0.19378388799998447, + "tests/aws/services/route53resolver/test_route53resolver.py::TestRoute53Resolver::test_update_resolver_endpoint": 0.3084415219999528, + "tests/aws/services/s3/test_s3.py::TestS3::test_access_bucket_different_region": 0.0017885900000464972, + "tests/aws/services/s3/test_s3.py::TestS3::test_bucket_availability": 0.03205274500010091, + "tests/aws/services/s3/test_s3.py::TestS3::test_bucket_does_not_exist": 0.44705748100000164, + "tests/aws/services/s3/test_s3.py::TestS3::test_bucket_exists": 0.24444104999997762, + "tests/aws/services/s3/test_s3.py::TestS3::test_bucket_name_with_dots": 0.5746830500000897, + "tests/aws/services/s3/test_s3.py::TestS3::test_bucket_operation_between_regions": 0.47619582000004357, + "tests/aws/services/s3/test_s3.py::TestS3::test_complete_multipart_parts_order": 0.4796090819999108, + "tests/aws/services/s3/test_s3.py::TestS3::test_copy_in_place_with_bucket_encryption": 0.13717745700012074, + "tests/aws/services/s3/test_s3.py::TestS3::test_copy_object_kms": 0.6655479559999549, + "tests/aws/services/s3/test_s3.py::TestS3::test_copy_object_special_character": 0.6620787100001735, + "tests/aws/services/s3/test_s3.py::TestS3::test_copy_object_special_character_plus_for_space": 0.09397022600012406, + "tests/aws/services/s3/test_s3.py::TestS3::test_create_bucket_head_bucket": 0.6826085360000889, + "tests/aws/services/s3/test_s3.py::TestS3::test_create_bucket_via_host_name": 0.03776567900013106, + "tests/aws/services/s3/test_s3.py::TestS3::test_create_bucket_with_existing_name": 0.4453400769999689, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_bucket_no_such_bucket": 0.01810665500011055, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_bucket_policy": 0.09701827899993987, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_bucket_policy_expected_bucket_owner": 0.10726119300011305, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_bucket_with_content": 0.7516324309999618, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_keys_in_versioned_bucket": 0.5457367810000733, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_non_existing_keys": 0.07811742399997001, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_non_existing_keys_in_non_existing_bucket": 0.02090906199998699, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_non_existing_keys_quiet": 0.07687170800011245, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_object_tagging": 0.10821172699979797, + "tests/aws/services/s3/test_s3.py::TestS3::test_delete_objects_encoding": 0.10816376599984778, + "tests/aws/services/s3/test_s3.py::TestS3::test_different_location_constraint": 0.6071483189999753, + "tests/aws/services/s3/test_s3.py::TestS3::test_download_fileobj_multiple_range_requests": 1.0801981570000407, + "tests/aws/services/s3/test_s3.py::TestS3::test_empty_bucket_fixture": 0.13972666800009392, + "tests/aws/services/s3/test_s3.py::TestS3::test_etag_on_get_object_call": 0.4767460140001276, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_notification_configuration_no_such_bucket": 0.01908925900011127, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_policy": 0.11971104799999921, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_policy_invalid_account_id[0000000000020]": 0.06827678699994522, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_policy_invalid_account_id[0000]": 0.06676561099993705, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_policy_invalid_account_id[aa000000000$]": 0.06537631200001215, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_policy_invalid_account_id[abcd]": 0.06618240599993896, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_bucket_versioning_order": 0.5216435680000586, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_after_deleted_in_versioned_bucket": 0.1191548709999779, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_attributes": 0.3090210720000641, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_attributes_versioned": 0.5129822259999628, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_attributes_with_space": 0.0944196269999793, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_content_length_with_virtual_host[False]": 0.09177258700003676, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_content_length_with_virtual_host[True]": 0.09031660799996644, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_no_such_bucket": 0.02095023399999718, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_part": 0.2272735130000001, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_object_with_anon_credentials": 0.49376050199987276, + "tests/aws/services/s3/test_s3.py::TestS3::test_get_range_object_headers": 0.08774943899993559, + "tests/aws/services/s3/test_s3.py::TestS3::test_head_object_fields": 0.10001195899997128, + "tests/aws/services/s3/test_s3.py::TestS3::test_invalid_range_error": 0.08723202399994534, + "tests/aws/services/s3/test_s3.py::TestS3::test_metadata_header_character_decoding": 0.4546980440001107, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_and_list_parts": 0.18064903300000879, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_complete_multipart_too_small": 0.10435315200004425, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_complete_multipart_wrong_part": 0.09621364500003438, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_copy_object_etag": 0.13136524900005497, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_no_such_upload": 0.08644713399996817, + "tests/aws/services/s3/test_s3.py::TestS3::test_multipart_overwrite_key": 0.11739243499994245, + "tests/aws/services/s3/test_s3.py::TestS3::test_object_with_slashes_in_key[False]": 0.1832219680001117, + "tests/aws/services/s3/test_s3.py::TestS3::test_object_with_slashes_in_key[True]": 0.1809875760000068, + "tests/aws/services/s3/test_s3.py::TestS3::test_precondition_failed_error": 0.09724338800003807, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_and_get_object_with_content_language_disposition": 0.9336989019999464, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_and_get_object_with_hash_prefix": 0.46208280500002274, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_and_get_object_with_utf8_key": 0.46086802199988597, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_inventory_config_order": 0.15280903799998669, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy": 0.08965071400007218, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy_expected_bucket_owner": 0.2559648050001897, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy_invalid_account_id[0000000000020]": 0.0667482100000143, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy_invalid_account_id[0000]": 0.06896639400008553, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy_invalid_account_id[aa000000000$]": 0.067638653999893, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_bucket_policy_invalid_account_id[abcd]": 0.06894104299999526, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_single_character_trailing_slash": 0.14746239099997638, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[a/%F0%9F%98%80/]": 0.4838840310000023, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[file%2Fname]": 0.4727280060000112, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test key//]": 0.5028983299998799, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test key/]": 0.48334355099984805, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test%123/]": 0.4697144519999483, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test%123]": 0.4677152219999243, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test%percent]": 0.4765962839999247, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_get_object_special_character[test@key/]": 1.3728039369999578, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_acl_on_delete_marker": 0.5369344969999474, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_checksum": 0.09996022099983293, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_content_encoding": 0.11309315300013623, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_newlines": 0.08243060700010574, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_newlines_no_sig": 0.0962269799998694, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_newlines_no_sig_empty_body": 0.08518394800012175, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_chunked_newlines_with_trailing_checksum": 0.1069947860000866, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[DEEP_ARCHIVE-False]": 0.09836518300005537, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[GLACIER-False]": 0.09812508599998182, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[GLACIER_IR-True]": 0.0975631340000973, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[INTELLIGENT_TIERING-True]": 0.10281046299996888, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[ONEZONE_IA-True]": 0.09732538700006899, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[REDUCED_REDUNDANCY-True]": 0.09938520699995479, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[STANDARD-True]": 0.10212461599996914, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class[STANDARD_IA-True]": 0.10282870100002128, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_storage_class_outposts": 0.07947890699995241, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_tagging_empty_list": 0.12388491099989096, + "tests/aws/services/s3/test_s3.py::TestS3::test_put_object_with_md5_and_chunk_signature": 0.08092732300008265, + "tests/aws/services/s3/test_s3.py::TestS3::test_putobject_with_multiple_keys": 0.45434921499997927, + "tests/aws/services/s3/test_s3.py::TestS3::test_range_header_body_length": 0.10391930600007981, + "tests/aws/services/s3/test_s3.py::TestS3::test_range_key_not_exists": 0.06891879199986306, + "tests/aws/services/s3/test_s3.py::TestS3::test_region_header_exists_outside_us_east_1": 0.5616825910000216, + "tests/aws/services/s3/test_s3.py::TestS3::test_response_structure": 0.16138114099999257, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_analytics_configurations": 0.21578360099999827, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_batch_delete_objects": 0.49507477700001346, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_batch_delete_objects_using_requests_with_acl": 0.001825118999931874, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_batch_delete_public_objects_using_requests": 0.4803460170001017, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_bucket_acl": 0.15159280399996078, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_bucket_acl_exceptions": 0.192852190999929, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_content_type_and_metadata": 0.5315559199999598, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_metadata_directive_copy": 0.4851788639999768, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_metadata_replace": 0.4858983870001339, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place": 0.5423984249999876, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_metadata_directive": 1.4504762060000758, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_storage_class": 0.5024884460000294, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_suspended_only": 0.57903971799999, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_versioned": 0.6340627789999189, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_website_redirect_location": 0.48164891599992643, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_in_place_with_encryption": 0.7894213350000427, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_preconditions": 3.537738044999969, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_storage_class": 0.5094772470000635, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_checksum[CRC32C]": 0.49900077400002374, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_checksum[CRC32]": 0.4921227249999447, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_checksum[CRC64NVME]": 0.49542128799987495, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_checksum[SHA1]": 0.499934082999971, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_checksum[SHA256]": 0.4927889440000399, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_default_checksum[CRC32C]": 0.5072066860000177, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_default_checksum[CRC32]": 0.5032877149999422, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_default_checksum[CRC64NVME]": 0.5054753309999569, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_default_checksum[SHA1]": 0.5150518080000666, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_with_default_checksum[SHA256]": 0.5232877370000324, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_object_wrong_format": 0.4306888549999712, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive[COPY]": 0.4961443169999029, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive[None]": 0.5058024989998557, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive[REPLACE]": 0.49722797100002936, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive_versioned[COPY]": 0.5921784089999846, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive_versioned[None]": 0.6195109850000335, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_copy_tagging_directive_versioned[REPLACE]": 0.5964300869998169, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_delete_object_with_version_id": 0.5218227929999557, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_delete_objects_trailing_slash": 0.07247084599998743, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_download_object_with_lambda": 4.239480436999884, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_get_object_header_overrides": 0.09062230200004251, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_get_object_headers": 0.15785621300005914, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_get_object_preconditions[get_object]": 3.563771599999882, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_get_object_preconditions[head_object]": 3.5464970869999206, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_hostname_with_subdomain": 0.018562334999955965, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_intelligent_tier_config": 0.15776487200014344, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_invalid_content_md5": 11.013065570000094, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_inventory_report_crud": 0.17120473399995717, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_lambda_integration": 10.460847323000053, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_multipart_upload_acls": 0.20364180199999282, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_multipart_upload_sse": 1.088049592999937, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_object_acl": 0.16983092400005262, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_object_acl_exceptions": 0.2300909350000211, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_object_expiry": 3.5588915170000064, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_put_inventory_report_exceptions": 0.16006295700003648, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_put_more_than_1000_items": 13.04194113699998, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_put_object_versioned": 0.6600110919999906, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_raw_request_routing": 0.10314055799983635, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_request_payer": 0.078118475999986, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_request_payer_exceptions": 0.07857127099998706, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_sse_bucket_key_default": 0.23111654399997406, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_sse_default_kms_key": 0.001837382999951842, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_sse_validate_kms_key": 0.270988513999896, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_sse_validate_kms_key_state": 0.30048308999982964, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_timestamp_precision": 0.10385880299998007, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_upload_download_gzip": 0.09252132699998583, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_uppercase_bucket_name": 1.3087115099999664, + "tests/aws/services/s3/test_s3.py::TestS3::test_s3_uppercase_key_names": 0.09707521400002861, + "tests/aws/services/s3/test_s3.py::TestS3::test_set_external_hostname": 0.13612426400004551, + "tests/aws/services/s3/test_s3.py::TestS3::test_upload_big_file": 0.6113825510001334, + "tests/aws/services/s3/test_s3.py::TestS3::test_upload_file_multipart": 0.5772937980000279, + "tests/aws/services/s3/test_s3.py::TestS3::test_upload_file_with_xml_preamble": 0.4548747510001476, + "tests/aws/services/s3/test_s3.py::TestS3::test_upload_part_chunked_cancelled_valid_etag": 0.14080931500006955, + "tests/aws/services/s3/test_s3.py::TestS3::test_upload_part_chunked_newlines_valid_etag": 0.09833168499994827, + "tests/aws/services/s3/test_s3.py::TestS3::test_url_encoded_key[False]": 0.14192265999997744, + "tests/aws/services/s3/test_s3.py::TestS3::test_url_encoded_key[True]": 0.14373728499992922, + "tests/aws/services/s3/test_s3.py::TestS3::test_virtual_host_proxy_does_not_decode_gzip": 0.09866926900008366, + "tests/aws/services/s3/test_s3.py::TestS3::test_virtual_host_proxying_headers": 0.09037830300007954, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_configuration_date": 0.07618389100002787, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_configuration_object_expiry": 0.11599016200011647, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_configuration_object_expiry_versioned": 0.16947959900005571, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_multiple_rules": 0.1213936049999802, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_object_size_rules": 0.12056028899996818, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_bucket_lifecycle_tag_rules": 0.18915143100002751, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_delete_bucket_lifecycle_configuration": 0.10916020700005902, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_delete_lifecycle_configuration_on_bucket_deletion": 0.11382781999998315, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_lifecycle_expired_object_delete_marker": 0.10827164300008008, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_object_expiry_after_bucket_lifecycle_configuration": 0.12880666799992468, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_put_bucket_lifecycle_conf_exc": 0.13669959200012727, + "tests/aws/services/s3/test_s3.py::TestS3BucketLifecycle::test_s3_transition_default_minimum_object_size": 0.12029828799995812, + "tests/aws/services/s3/test_s3.py::TestS3BucketLogging::test_put_bucket_logging": 0.14388900799997373, + "tests/aws/services/s3/test_s3.py::TestS3BucketLogging::test_put_bucket_logging_accept_wrong_grants": 0.12971892200005186, + "tests/aws/services/s3/test_s3.py::TestS3BucketLogging::test_put_bucket_logging_cross_locations": 0.16366970600006425, + "tests/aws/services/s3/test_s3.py::TestS3BucketLogging::test_put_bucket_logging_wrong_target": 0.12043380499994782, + "tests/aws/services/s3/test_s3.py::TestS3BucketReplication::test_replication_config": 0.7409251609999501, + "tests/aws/services/s3/test_s3.py::TestS3BucketReplication::test_replication_config_without_filter": 0.7165504760000658, + "tests/aws/services/s3/test_s3.py::TestS3DeepArchive::test_s3_get_deep_archive_object_restore": 0.5476337010001089, + "tests/aws/services/s3/test_s3.py::TestS3DeepArchive::test_storage_class_deep_archive": 0.1623936920000233, + "tests/aws/services/s3/test_s3.py::TestS3MultiAccounts::test_cross_account_access": 0.1337059480000562, + "tests/aws/services/s3/test_s3.py::TestS3MultiAccounts::test_cross_account_copy_object": 0.09151060600004257, + "tests/aws/services/s3/test_s3.py::TestS3MultiAccounts::test_shared_bucket_namespace": 0.06617687099992509, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_composite[CRC32C]": 0.46805822700002864, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_composite[CRC32]": 0.4787641639998128, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_composite[SHA1]": 0.4886673530002099, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_composite[SHA256]": 0.4896884859999773, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_default": 0.21399080900005174, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_full_object[CRC32C]": 0.615525101999765, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_full_object[CRC32]": 0.5632634639998741, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_full_object[CRC64NVME]": 0.5750817720002033, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_complete_multipart_parts_checksum_full_object_default": 0.12975049599981503, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[COMPOSITE-CRC32C]": 0.0696272879999924, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[COMPOSITE-CRC32]": 0.07159759199998916, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[COMPOSITE-CRC64NVME]": 0.06798300800005563, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[COMPOSITE-SHA1]": 0.07135627999991812, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[COMPOSITE-SHA256]": 0.0693396889998894, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[FULL_OBJECT-CRC32C]": 0.06840707900005327, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[FULL_OBJECT-CRC32]": 0.06763015899991842, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[FULL_OBJECT-CRC64NVME]": 0.07023146500000621, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[FULL_OBJECT-SHA1]": 0.06831016800015277, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_compatibility[FULL_OBJECT-SHA256]": 0.07014687900004901, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_default_for_checksum[CRC32C]": 0.06785457199998746, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_default_for_checksum[CRC32]": 0.06678958399993462, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_default_for_checksum[CRC64NVME]": 0.06769007000002603, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_default_for_checksum[SHA1]": 0.06769925699995838, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_checksum_type_default_for_checksum[SHA256]": 0.06704842599970107, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_parts_checksum_exceptions_composite": 9.24093647399991, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_parts_checksum_exceptions_full_object": 33.146282508999775, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_size_validation": 0.12038114899996799, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_upload_part_checksum_exception[CRC32C]": 6.379795164999905, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_upload_part_checksum_exception[CRC32]": 8.60838399599993, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_upload_part_checksum_exception[CRC64NVME]": 7.317066115999751, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_upload_part_checksum_exception[SHA1]": 9.91704858800017, + "tests/aws/services/s3/test_s3.py::TestS3MultipartUploadChecksum::test_multipart_upload_part_checksum_exception[SHA256]": 6.125142803000017, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_delete_locked_object": 0.12091738799995255, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_put_get_object_legal_hold": 0.13208817599991107, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_put_object_legal_hold_exc": 0.16399912099984704, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_put_object_with_legal_hold": 0.10528819499995734, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_s3_copy_object_legal_hold": 0.5067295910000666, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockLegalHold::test_s3_legal_hold_lock_versioned": 0.5460817800000086, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_bucket_config_default_retention": 0.1311676789998728, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_object_lock_delete_markers": 0.12394048599981033, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_object_lock_extend_duration": 0.12435282200010533, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_s3_copy_object_retention_lock": 0.5126640740000994, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_s3_object_retention": 6.168016758999897, + "tests/aws/services/s3/test_s3.py::TestS3ObjectLockRetention::test_s3_object_retention_exc": 0.2423591750000469, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_default_checksum": 0.08904902199992648, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_policy_casing[s3]": 0.10192880900012824, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_policy_casing[s3v4]": 0.09814932699998735, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_policy_conditions_validation_eq": 0.33128157300006933, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_policy_conditions_validation_starts_with": 0.29175308399999267, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_policy_validation_size": 0.22562913700005538, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_file_as_string": 0.33509955399995306, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_files": 0.08985079699994003, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_metadata": 0.12602507100007188, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_storage_class": 0.33992777000014485, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_tags[invalid]": 1.1073294839999335, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_tags[list]": 0.16549929199982216, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_tags[notxml]": 0.18885971700001392, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_tags[single]": 0.16770689499992386, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_object_with_wrong_content_type": 0.1402285159999792, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_expires": 3.149206551000134, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_malformed_policy[s3]": 0.15236783999989711, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_malformed_policy[s3v4]": 0.1538662599999725, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_missing_fields[s3]": 0.16519374599988623, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_missing_fields[s3v4]": 0.16635775300005662, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_missing_signature[s3]": 0.15178323199995702, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_post_request_missing_signature[s3v4]": 0.15794395899990832, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_presigned_post_with_different_user_credentials": 0.26074631699998463, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_s3_presigned_post_success_action_redirect": 0.09627915099997608, + "tests/aws/services/s3/test_s3.py::TestS3PresignedPost::test_s3_presigned_post_success_action_status_201_response": 0.08327630999997382, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_delete_has_empty_content_length_header": 0.09684576099994047, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_get_object_ignores_request_body": 0.08847620200003803, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_get_request_expires_ignored_if_validation_disabled": 3.108592306000105, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_head_has_correct_content_length_header": 0.0907802570000058, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_pre_signed_url_forward_slash_bucket": 0.0960320019999017, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_pre_signed_url_if_match": 0.09681477000003724, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_pre_signed_url_if_none_match": 0.09428241900002376, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presign_check_signature_validation_for_port_permutation": 0.10228652200009947, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presign_with_additional_query_params": 0.11112677200003418, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_double_encoded_credentials": 0.17126531899987185, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication[s3-False]": 0.2174588280000762, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication[s3-True]": 0.21783440299986978, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication[s3v4-False]": 0.22733316400012882, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication[s3v4-True]": 0.22959002499987946, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_expired[s3-False]": 2.182121216999917, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_expired[s3-True]": 2.1754841810000016, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_expired[s3v4-False]": 2.1739774490000627, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_expired[s3v4-True]": 2.1767032400000517, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_multi_part[s3-False]": 0.11350193299983857, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_multi_part[s3-True]": 0.11433681100015747, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_multi_part[s3v4-False]": 0.11663568599999508, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_signature_authentication_multi_part[s3v4-True]": 0.1150663319999694, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_v4_signed_headers_in_qs": 1.9102292120002176, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_v4_x_amz_in_qs": 8.18852579199995, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_with_different_user_credentials": 0.2505322410000872, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_presigned_url_with_session_token": 0.11057153599983849, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_object": 0.4606241810000711, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_object_with_md5_and_chunk_signature_bad_headers[s3-False]": 0.093007512999975, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_object_with_md5_and_chunk_signature_bad_headers[s3-True]": 0.16838667399997576, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_object_with_md5_and_chunk_signature_bad_headers[s3v4-False]": 0.08986168700005237, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_object_with_md5_and_chunk_signature_bad_headers[s3v4-True]": 0.1655122999999321, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_url_metadata_with_sig_s3[False]": 0.5597614089999752, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_url_metadata_with_sig_s3[True]": 0.5522727649999979, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_url_metadata_with_sig_s3v4[False]": 0.5682459640000843, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_put_url_metadata_with_sig_s3v4[True]": 0.5819345799999383, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_copy_md5": 0.11185535000004165, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_get_response_case_sensitive_headers": 0.08252604999995583, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_get_response_content_type_same_as_upload_and_range": 0.09227117099987936, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_get_response_default_content_type": 0.08302412200009712, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_get_response_header_overrides[s3]": 0.09426893100010147, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_get_response_header_overrides[s3v4]": 0.09401309399993352, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_ignored_special_headers": 0.1209086409999145, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_presign_url_encoding[s3]": 0.09356509799999913, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_presign_url_encoding[s3v4]": 0.09580009100000098, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_presigned_url_expired[s3]": 3.1917944769999167, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_presigned_url_expired[s3v4]": 3.195220457000005, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_put_presigned_url_missing_sig_param[s3]": 0.17348154300009355, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_put_presigned_url_missing_sig_param[s3v4]": 0.17201115899990782, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_put_presigned_url_same_header_and_qs_parameter": 0.18312040699993304, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_put_presigned_url_with_different_headers[s3]": 1.310644095999919, + "tests/aws/services/s3/test_s3.py::TestS3PresignedUrl::test_s3_put_presigned_url_with_different_headers[s3v4]": 0.21134955900004115, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_put_object_checksum[CRC32C]": 5.201910881000003, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_put_object_checksum[CRC32]": 5.305968125000049, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_put_object_checksum[CRC64NVME]": 10.549652295999977, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_put_object_checksum[SHA1]": 14.021193208999875, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_put_object_checksum[SHA256]": 12.465482541000029, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_checksum_no_algorithm": 0.1168862749998425, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_checksum_no_automatic_sdk_calculation": 0.25327124400018874, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_checksum_with_content_encoding": 0.10944395600017742, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[CRC32C]": 0.14029297599995516, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[CRC32]": 0.12116925899999842, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[CRC64NVME]": 0.13496736899992356, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[None]": 0.1355099830002473, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[SHA1]": 0.14435870100010106, + "tests/aws/services/s3/test_s3.py::TestS3PutObjectChecksum::test_s3_get_object_checksum[SHA256]": 0.12897446600004514, + "tests/aws/services/s3/test_s3.py::TestS3Routing::test_access_favicon_via_aws_endpoints[s3.amazonaws.com-False]": 0.09053808100009064, + "tests/aws/services/s3/test_s3.py::TestS3Routing::test_access_favicon_via_aws_endpoints[s3.amazonaws.com-True]": 0.09105075800005125, + "tests/aws/services/s3/test_s3.py::TestS3Routing::test_access_favicon_via_aws_endpoints[s3.us-west-2.amazonaws.com-False]": 0.09184225400008472, + "tests/aws/services/s3/test_s3.py::TestS3Routing::test_access_favicon_via_aws_endpoints[s3.us-west-2.amazonaws.com-True]": 0.09116254099990329, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_copy_object_with_sse_c": 0.22637970899995707, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_multipart_upload_sse_c": 0.45320781199995963, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_multipart_upload_sse_c_validation": 0.19378975800009357, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_object_retrieval_sse_c": 0.2522065909997764, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_put_object_default_checksum_with_sse_c": 0.19012682000004588, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_put_object_lifecycle_with_sse_c": 0.18114233699998294, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_put_object_validation_sse_c": 0.21026624899980106, + "tests/aws/services/s3/test_s3.py::TestS3SSECEncryption::test_sse_c_with_versioning": 0.2325339389999499, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_crud_website_configuration": 0.10675842099976762, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_object_website_redirect_location": 0.28853335500002686, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_routing_rules_conditions": 0.5538015839999844, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_routing_rules_empty_replace_prefix": 0.4218117580001035, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_routing_rules_order": 0.24566422099996998, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_routing_rules_redirects": 0.15418536399999994, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_s3_static_website_hosting": 0.5326743449999185, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_s3_static_website_index": 0.14098655499981305, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_validate_website_configuration": 0.21079172700001436, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_website_hosting_404": 0.23581469900011598, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_website_hosting_http_methods": 0.13805479300015122, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_website_hosting_index_lookup": 0.26538665000009587, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_website_hosting_no_such_website": 0.12971963100005723, + "tests/aws/services/s3/test_s3.py::TestS3StaticWebsiteHosting::test_website_hosting_redirect_all": 0.3008390950000148, + "tests/aws/services/s3/test_s3.py::TestS3TerraformRawRequests::test_terraform_request_sequence": 0.056355477999886716, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketAccelerateConfiguration::test_bucket_acceleration_configuration_crud": 0.09775074600020162, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketAccelerateConfiguration::test_bucket_acceleration_configuration_exc": 0.13171436600009656, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketCRUD::test_delete_bucket_with_objects": 0.4439311880000787, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketCRUD::test_delete_versioned_bucket_with_objects": 0.47611263499993584, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketEncryption::test_s3_bucket_encryption_sse_kms": 0.22821970900008637, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketEncryption::test_s3_bucket_encryption_sse_kms_aws_managed_key": 0.27376531200025056, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketEncryption::test_s3_bucket_encryption_sse_s3": 0.10672454899963668, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketEncryption::test_s3_default_bucket_encryption": 0.08906281300005503, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketEncryption::test_s3_default_bucket_encryption_exc": 0.48615996800003813, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_bucket_tagging_crud": 0.1398659509998197, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_bucket_tagging_exc": 0.08230643900014911, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_crud": 0.17728362899993044, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_exc": 0.22738538999988123, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tagging_versioned": 0.22540512499972465, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_object_tags_delete_or_overwrite_object": 0.13468218399998477, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_put_object_with_tags": 0.2008917159998873, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketObjectTagging::test_tagging_validation": 0.18187249199968392, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketOwnershipControls::test_bucket_ownership_controls_exc": 0.10597805799989146, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketOwnershipControls::test_crud_bucket_ownership_controls": 0.15845459900015157, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketPolicy::test_bucket_policy_crud": 0.11764532200004396, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketPolicy::test_bucket_policy_exc": 0.09594770200010316, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketVersioning::test_bucket_versioning_crud": 0.15372339300006388, + "tests/aws/services/s3/test_s3_api.py::TestS3BucketVersioning::test_object_version_id_format": 0.09473242800027037, + "tests/aws/services/s3/test_s3_api.py::TestS3Multipart::test_upload_part_copy_no_copy_source_range": 0.18377705399984734, + "tests/aws/services/s3/test_s3_api.py::TestS3Multipart::test_upload_part_copy_range": 0.3336242160000893, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_delete_object": 0.09109001700016961, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_delete_object_on_suspended_bucket": 0.5858443820000048, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_delete_object_versioned": 0.5704800819999036, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_delete_objects": 0.08501625799999601, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_delete_objects_versioned": 0.4963833659996908, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_get_object_range": 0.2887471030001052, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_get_object_with_version_unversioned_bucket": 0.46709645299984004, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_list_object_versions_order_unversioned": 0.49798668100015675, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectCRUD::test_put_object_on_suspended_bucket": 0.6245519540002533, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_delete_object_with_no_locking": 0.10015097800010153, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_disable_versioning_on_locked_bucket": 0.06885807800017574, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_get_object_lock_configuration_exc": 0.07211001099994974, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_get_put_object_lock_configuration": 0.0927555130001565, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_put_object_lock_configuration_exc": 0.10475707799992051, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectLock::test_put_object_lock_configuration_on_existing_bucket": 0.11324670199974207, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_match_etag": 0.145411164999814, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_match_with_delete": 0.1377557609998803, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_match_with_put": 0.1574362670003211, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_match_with_put_identical": 0.1489100280000457, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_none_match_with_delete": 0.14991892500006543, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_multipart_if_none_match_with_put": 0.10574941100003343, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_match": 0.1260302099999535, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_match_and_if_none_match_validation": 0.06800320200022725, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_match_validation": 0.08781395200003317, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_match_versioned_bucket": 0.16666263499996603, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_none_match": 0.10531259399999726, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_none_match_validation": 0.08690508699987731, + "tests/aws/services/s3/test_s3_api.py::TestS3ObjectWritePrecondition::test_put_object_if_none_match_versioned_bucket": 0.13878056299995478, + "tests/aws/services/s3/test_s3_api.py::TestS3PublicAccessBlock::test_crud_public_access_block": 0.10442721799995525, + "tests/aws/services/s3/test_s3_concurrency.py::TestParallelBucketCreation::test_parallel_bucket_creation": 0.4091629090000879, + "tests/aws/services/s3/test_s3_concurrency.py::TestParallelBucketCreation::test_parallel_object_creation_and_listing": 0.3298581260000901, + "tests/aws/services/s3/test_s3_concurrency.py::TestParallelBucketCreation::test_parallel_object_creation_and_read": 1.4871105049999187, + "tests/aws/services/s3/test_s3_concurrency.py::TestParallelBucketCreation::test_parallel_object_read_range": 2.3781634839997423, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_expose_headers": 0.26299601999994593, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_http_get_no_config": 0.11059570299994448, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_http_options_no_config": 0.19529026800000793, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_http_options_non_existent_bucket": 0.16047766800011232, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_http_options_non_existent_bucket_ls_allowed": 0.07630728799995268, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_list_buckets": 0.08087982799997917, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_match_headers": 0.7954543320001903, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_match_methods": 0.791354389000162, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_match_origins": 0.6468330230002266, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_no_config_localstack_allowed": 0.10572714799968708, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_options_fails_partial_origin": 0.46664108200002374, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_cors_options_match_partial_origin": 0.16238156700023865, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_delete_cors": 0.18879966000008608, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_get_cors": 0.16890101500007404, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_put_cors": 0.1612013199996909, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_put_cors_default_values": 0.49030464499992377, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_put_cors_empty_origin": 0.16651709100005974, + "tests/aws/services/s3/test_s3_cors.py::TestS3Cors::test_put_cors_invalid_rules": 0.16392229900020538, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListBuckets::test_list_buckets_by_bucket_region": 0.5753617619998295, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListBuckets::test_list_buckets_by_prefix_with_case_sensitivity": 0.4877304020003521, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListBuckets::test_list_buckets_when_continuation_token_is_empty": 0.4774720579998757, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListBuckets::test_list_buckets_with_continuation_token": 0.5392010909999954, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListBuckets::test_list_buckets_with_max_buckets": 0.4702490340002896, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListMultipartUploads::test_list_multipart_uploads_marker_common_prefixes": 0.500815153000076, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListMultipartUploads::test_list_multiparts_next_marker": 0.6280706240002019, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListMultipartUploads::test_list_multiparts_with_prefix_and_delimiter": 0.5066884009997921, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListMultipartUploads::test_s3_list_multiparts_timestamp_precision": 0.07271794400003273, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_list_object_versions_pagination_common_prefixes": 0.579160321000245, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_list_objects_versions_markers": 0.6866955700002109, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_list_objects_versions_with_prefix": 0.6085627460001888, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_list_objects_versions_with_prefix_only_and_pagination": 0.6143905960002485, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_list_objects_versions_with_prefix_only_and_pagination_many_versions": 1.0610067210000125, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectVersions::test_s3_list_object_versions_timestamp_precision": 0.10010286999977325, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_list_objects_marker_common_prefixes": 0.549131609999904, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_list_objects_next_marker": 0.5247440110001662, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_list_objects_with_prefix[%2F]": 0.4620072339998842, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_list_objects_with_prefix[/]": 0.4578297090001797, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_list_objects_with_prefix[]": 0.4594256899999891, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_s3_list_objects_empty_marker": 0.43054569100013396, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_s3_list_objects_timestamp_precision[ListObjectsV2]": 0.08514955700024984, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjects::test_s3_list_objects_timestamp_precision[ListObjects]": 0.08230598800014377, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectsV2::test_list_objects_v2_continuation_common_prefixes": 0.56343386799972, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectsV2::test_list_objects_v2_continuation_start_after": 0.6733004320001328, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectsV2::test_list_objects_v2_with_prefix": 0.5296367320001991, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListObjectsV2::test_list_objects_v2_with_prefix_and_delimiter": 0.5071900099997038, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListParts::test_list_parts_empty_part_number_marker": 0.10055853099993328, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListParts::test_list_parts_pagination": 0.13538326100024278, + "tests/aws/services/s3/test_s3_list_operations.py::TestS3ListParts::test_s3_list_parts_timestamp_precision": 0.08032150999997612, + "tests/aws/services/s3/test_s3_notifications_eventbridge.py::TestS3NotificationsToEventBridge::test_object_created_put": 1.850579843999867, + "tests/aws/services/s3/test_s3_notifications_eventbridge.py::TestS3NotificationsToEventBridge::test_object_created_put_in_different_region": 1.859392261000039, + "tests/aws/services/s3/test_s3_notifications_eventbridge.py::TestS3NotificationsToEventBridge::test_object_created_put_versioned": 5.227331998999944, + "tests/aws/services/s3/test_s3_notifications_eventbridge.py::TestS3NotificationsToEventBridge::test_object_put_acl": 2.344023051000022, + "tests/aws/services/s3/test_s3_notifications_eventbridge.py::TestS3NotificationsToEventBridge::test_restore_object": 1.193712018000042, + "tests/aws/services/s3/test_s3_notifications_lambda.py::TestS3NotificationsToLambda::test_create_object_by_presigned_request_via_dynamodb": 6.219591825999942, + "tests/aws/services/s3/test_s3_notifications_lambda.py::TestS3NotificationsToLambda::test_create_object_put_via_dynamodb": 4.834613696999895, + "tests/aws/services/s3/test_s3_notifications_lambda.py::TestS3NotificationsToLambda::test_invalid_lambda_arn": 0.44969479099995624, + "tests/aws/services/s3/test_s3_notifications_sns.py::TestS3NotificationsToSns::test_bucket_not_exist": 0.3844694500000969, + "tests/aws/services/s3/test_s3_notifications_sns.py::TestS3NotificationsToSns::test_bucket_notifications_with_filter": 1.654540071999918, + "tests/aws/services/s3/test_s3_notifications_sns.py::TestS3NotificationsToSns::test_invalid_topic_arn": 0.2610798559999239, + "tests/aws/services/s3/test_s3_notifications_sns.py::TestS3NotificationsToSns::test_object_created_put": 1.7721063370001957, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_bucket_notification_with_invalid_filter_rules": 0.26806965000014316, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_delete_objects": 0.8091075930003626, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_filter_rules_case_insensitive": 0.09599278599989702, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_invalid_sqs_arn": 0.40466162800021266, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_key_encoding": 0.6360358449999239, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_multiple_invalid_sqs_arns": 1.6823811089998344, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_notifications_with_filter": 0.7374661579999611, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_and_object_removed": 0.8377570879999894, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_complete_multipart_upload": 0.6632929200002309, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_copy": 0.6879901029999473, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_put": 0.7117907859999377, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_put_versioned": 1.0795005669999682, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_created_put_with_presigned_url_upload": 0.9020984859996588, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_put_acl": 0.8210032889999184, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_tagging_delete_event": 0.6923809959998835, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_object_tagging_put_event": 0.675230984000109, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_restore_object": 0.8077008180000576, + "tests/aws/services/s3/test_s3_notifications_sqs.py::TestS3NotificationsToSQS::test_xray_header": 1.6257866280000144, + "tests/aws/services/s3control/test_s3control.py::TestLegacyS3Control::test_lifecycle_public_access_block": 0.2650822660002632, + "tests/aws/services/s3control/test_s3control.py::TestLegacyS3Control::test_public_access_block_validations": 0.030718367999497787, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_already_exists": 0.0016026309999688237, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_bucket_not_exists": 0.001582073000008677, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_lifecycle": 0.001585088999945583, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_name_validation": 0.0015785870000399882, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_pagination": 0.0015803010001036455, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_public_access_block_configuration": 0.0018258380000588659, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlPublicAccessBlock::test_crud_public_access_block": 0.001616847999912352, + "tests/aws/services/s3control/test_s3control.py::TestS3ControlPublicAccessBlock::test_empty_public_access_block": 0.0015959299998939969, + "tests/aws/services/scheduler/test_scheduler.py::test_list_schedules": 0.06473654300020826, + "tests/aws/services/scheduler/test_scheduler.py::test_tag_resource": 0.03454401999988477, + "tests/aws/services/scheduler/test_scheduler.py::test_untag_resource": 0.029484995999837338, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[ rate(10 minutes)]": 0.014520168999979433, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[at(2021-12-31)]": 0.01406694300021627, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[at(2021-12-31T23:59:59Z)]": 0.014427985000111221, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron()]": 0.014644978999967861, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron(0 1 * * * *)]": 0.016722698999956265, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron(0 dummy ? * MON-FRI *)]": 0.014415511000152037, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron(7 20 * * NOT *)]": 0.014060159999871757, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron(71 8 1 * ? *)]": 0.014412186000072325, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[cron(INVALID)]": 0.014088298000160648, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate( 10 minutes )]": 0.014142964000257052, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate()]": 0.014129424999964613, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(-10 minutes)]": 0.01426772699983303, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(10 minutess)]": 0.014296861000048011, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(10 seconds)]": 0.01541575000010198, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(10 years)]": 0.01397083100027885, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(10)]": 0.014251185999910376, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_invalid_schedule_expression[rate(foo minutes)]": 0.014347674000191546, + "tests/aws/services/scheduler/test_scheduler.py::tests_create_schedule_with_valid_schedule_expression": 0.17735617000016646, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_call_lists_secrets_multiple_times": 0.05588091200024792, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_call_lists_secrets_multiple_times_snapshots": 0.0017102839999552089, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_can_recreate_delete_secret": 0.05320144200004506, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_and_update_secret[Valid/_+=.@-Name-a1b2]": 0.08582245899992813, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_and_update_secret[Valid/_+=.@-Name-a1b2c3-]": 0.08400914399999238, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_and_update_secret[Valid/_+=.@-Name]": 0.08342649799988067, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_and_update_secret[s-c64bdc03]": 0.10617051899976104, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_multi_secrets": 0.10075378999999884, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_multi_secrets_snapshot": 0.0016527470002074551, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_secret_version_from_empty_secret": 0.0384154750001926, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_create_secret_with_custom_id": 0.02371935599990138, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_delete_non_existent_secret_returns_as_if_secret_exists": 0.01980243199977849, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_deprecated_secret_version": 0.8921311730000525, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_deprecated_secret_version_stage": 0.19598640299977887, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_exp_raised_on_creation_of_secret_scheduled_for_deletion": 0.04052524700000504, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_first_rotate_secret_with_missing_lambda_arn": 0.03415313899972716, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_force_delete_deleted_secret": 0.06343453800013776, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_get_random_exclude_characters_and_symbols": 0.014892497999881016, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_get_secret_value": 0.07929810399991766, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_get_secret_value_errors": 0.04186563900020701, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_http_put_secret_value_custom_client_request_token_new_version_stages": 0.052725326999734534, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_http_put_secret_value_duplicate_req": 0.04800665799984927, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_http_put_secret_value_null_client_request_token_new_version_stages": 0.05607259899989003, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_http_put_secret_value_with_duplicate_client_request_token": 0.04856977800000095, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_http_put_secret_value_with_non_provided_client_request_token": 0.048856093000040346, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_invalid_secret_name[ Inv *?!]Name\\\\-]": 0.0909093599998414, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_invalid_secret_name[ Inv Name]": 0.0884453230003146, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_invalid_secret_name[ Inv*Name? ]": 0.08812485299995387, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_invalid_secret_name[Inv Name]": 0.0910302340000726, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_last_accessed_date": 0.055901790999541845, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_last_updated_date": 0.0812705659996027, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_list_secrets_filtering": 0.19445825799994054, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_no_client_request_token[CreateSecret]": 0.021968806000131735, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_no_client_request_token[PutSecretValue]": 0.02215613599992139, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_no_client_request_token[RotateSecret]": 0.022228556000072786, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_no_client_request_token[UpdateSecret]": 0.021936485999731303, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_non_versioning_version_stages_no_replacement": 0.2212101539998912, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_non_versioning_version_stages_replacement": 0.21189784899979713, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_put_secret_value_with_new_custom_client_request_token": 0.04821817199990619, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_put_secret_value_with_version_stages": 0.09866570299982413, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_resource_policy": 0.04875536800022928, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_rotate_secret_invalid_lambda_arn": 0.22239830100011204, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_rotate_secret_multiple_times_with_lambda_success": 2.8074591529998543, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_rotate_secret_with_lambda_success[None]": 2.324457150999933, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_rotate_secret_with_lambda_success[True]": 2.3816204770000695, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_exists": 0.047205203999965306, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_exists_snapshots": 0.04743087700012438, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_not_found": 0.02683596899987606, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_restore": 0.0487191580000399, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_tags": 0.12996591800015267, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_secret_version_not_found": 0.044084173999863197, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_description": 0.10208813200028999, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_pending": 0.23170256100024744, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_pending_cycle": 0.2815906990001622, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_pending_cycle_custom_stages_1": 0.2815802969998913, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_pending_cycle_custom_stages_2": 0.3018662529998437, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_pending_cycle_custom_stages_3": 0.26345162399979927, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_current_previous": 0.2167801920002148, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_version_stages_return_type": 0.04996867699969698, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManager::test_update_secret_with_non_provided_client_request_token": 0.04588817600006223, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManagerMultiAccounts::test_cross_account_access": 0.17556910000030257, + "tests/aws/services/secretsmanager/test_secretsmanager.py::TestSecretsManagerMultiAccounts::test_cross_account_access_non_default_key": 0.11041321000016069, + "tests/aws/services/ses/test_ses.py::TestSES::test_cannot_create_event_for_no_topic": 0.03867402200012293, + "tests/aws/services/ses/test_ses.py::TestSES::test_clone_receipt_rule_set": 0.7420435919998454, + "tests/aws/services/ses/test_ses.py::TestSES::test_creating_event_destination_without_configuration_set": 0.06147218899991458, + "tests/aws/services/ses/test_ses.py::TestSES::test_delete_template": 0.055291385000145965, + "tests/aws/services/ses/test_ses.py::TestSES::test_deleting_non_existent_configuration_set": 0.014689517000078922, + "tests/aws/services/ses/test_ses.py::TestSES::test_deleting_non_existent_configuration_set_event_destination": 0.029288249000046562, + "tests/aws/services/ses/test_ses.py::TestSES::test_get_identity_verification_attributes_for_domain": 0.011184211999989202, + "tests/aws/services/ses/test_ses.py::TestSES::test_get_identity_verification_attributes_for_email": 0.02469231699978991, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[-]": 0.014504962999808413, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[-test]": 0.014120386000058716, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test-]": 0.014140259999976479, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test-test_invalid_value:123]": 0.014377645000195116, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test_invalid_name:123-test]": 0.014859668000099191, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test_invalid_name:123-test_invalid_value:123]": 0.014124752999805423, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test_invalid_name_len]": 0.014895663000061177, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test_invalid_value_len]": 0.014644154999814418, + "tests/aws/services/ses/test_ses.py::TestSES::test_invalid_tags_send_email[test_priority_name_value]": 0.01433585699987816, + "tests/aws/services/ses/test_ses.py::TestSES::test_list_templates": 0.12444623799979126, + "tests/aws/services/ses/test_ses.py::TestSES::test_sending_to_deleted_topic": 0.4519943149998653, + "tests/aws/services/ses/test_ses.py::TestSES::test_sent_message_counter": 0.11887601800003722, + "tests/aws/services/ses/test_ses.py::TestSES::test_ses_sns_topic_integration_send_email": 1.5249144230001548, + "tests/aws/services/ses/test_ses.py::TestSES::test_ses_sns_topic_integration_send_raw_email": 1.4808552409997446, + "tests/aws/services/ses/test_ses.py::TestSES::test_ses_sns_topic_integration_send_templated_email": 1.5348211740001716, + "tests/aws/services/ses/test_ses.py::TestSES::test_special_tags_send_email[ses:feedback-id-a-this-marketing-campaign]": 0.015946234000011827, + "tests/aws/services/ses/test_ses.py::TestSES::test_special_tags_send_email[ses:feedback-id-b-that-campaign]": 0.014738378999936685, + "tests/aws/services/ses/test_ses.py::TestSES::test_trying_to_delete_event_destination_from_non_existent_configuration_set": 0.0895421500001703, + "tests/aws/services/ses/test_ses.py::TestSESRetrospection::test_send_email_can_retrospect": 1.5535481340000388, + "tests/aws/services/ses/test_ses.py::TestSESRetrospection::test_send_templated_email_can_retrospect": 0.07046403099980125, + "tests/aws/services/sns/test_sns.py::TestSNSCertEndpoint::test_cert_endpoint_host[]": 0.1878086090000579, + "tests/aws/services/sns/test_sns.py::TestSNSCertEndpoint::test_cert_endpoint_host[sns.us-east-1.amazonaws.com]": 0.13135641799976838, + "tests/aws/services/sns/test_sns.py::TestSNSMultiAccounts::test_cross_account_access": 0.10840783799994824, + "tests/aws/services/sns/test_sns.py::TestSNSMultiAccounts::test_cross_account_publish_to_sqs": 0.45179786500011687, + "tests/aws/services/sns/test_sns.py::TestSNSPlatformEndpoint::test_create_platform_endpoint_check_idempotency": 0.0017134989998339734, + "tests/aws/services/sns/test_sns.py::TestSNSPlatformEndpoint::test_publish_disabled_endpoint": 0.1046066119997704, + "tests/aws/services/sns/test_sns.py::TestSNSPlatformEndpoint::test_publish_to_gcm": 0.0017196020000938006, + "tests/aws/services/sns/test_sns.py::TestSNSPlatformEndpoint::test_publish_to_platform_endpoint_is_dispatched": 0.14217512200048077, + "tests/aws/services/sns/test_sns.py::TestSNSPlatformEndpoint::test_subscribe_platform_endpoint": 0.14423287799991158, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_empty_sns_message": 0.08678343000019595, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_message_structure_json_exc": 0.06404937399997834, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_batch_too_long_message": 0.072661442000026, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_by_path_parameters": 0.13352023399988866, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_message_before_subscribe_topic": 0.1414173689997824, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_message_by_target_arn": 0.19437532699998883, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_non_existent_target": 0.031349337999699856, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_too_long_message": 0.07178752200002236, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_with_empty_subject": 0.03905522100035341, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_publish_wrong_arn_format": 0.03202460600004997, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_topic_publish_another_region": 0.054487076999976125, + "tests/aws/services/sns/test_sns.py::TestSNSPublishCrud::test_unknown_topic_publish": 0.03910054600009971, + "tests/aws/services/sns/test_sns.py::TestSNSPublishDelivery::test_delivery_lambda": 2.2127511040000627, + "tests/aws/services/sns/test_sns.py::TestSNSRetrospectionEndpoints::test_publish_sms_can_retrospect": 0.2469280330001311, + "tests/aws/services/sns/test_sns.py::TestSNSRetrospectionEndpoints::test_publish_to_platform_endpoint_can_retrospect": 0.20070034400009718, + "tests/aws/services/sns/test_sns.py::TestSNSRetrospectionEndpoints::test_subscription_tokens_can_retrospect": 1.096987512999931, + "tests/aws/services/sns/test_sns.py::TestSNSSMS::test_publish_sms": 0.014801035000118645, + "tests/aws/services/sns/test_sns.py::TestSNSSMS::test_publish_sms_endpoint": 0.15681767200021568, + "tests/aws/services/sns/test_sns.py::TestSNSSMS::test_publish_wrong_phone_format": 0.04975588399997832, + "tests/aws/services/sns/test_sns.py::TestSNSSMS::test_subscribe_sms_endpoint": 0.04861617599976853, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_create_subscriptions_with_attributes": 0.099858033999908, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_list_subscriptions": 0.350389370999892, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_list_subscriptions_by_topic_pagination": 1.5278777490000266, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_not_found_error_on_set_subscription_attributes": 0.3210580019997451, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_sns_confirm_subscription_wrong_token": 0.1249307759999283, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_subscribe_idempotency": 0.1070053400001143, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_subscribe_with_invalid_protocol": 0.04329845199981719, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_subscribe_with_invalid_topic": 0.04962113800002044, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_unsubscribe_from_non_existing_subscription": 0.11622247699983745, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_unsubscribe_idempotency": 0.08891710200009584, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_unsubscribe_wrong_arn_format": 0.032772241999964535, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionCrud::test_validate_set_sub_attributes": 0.26919723600008183, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionFirehose::test_publish_to_firehose_with_s3": 1.4317156449999402, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_dlq_external_http_endpoint[False]": 2.6617399490000935, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_dlq_external_http_endpoint[True]": 2.670507843999758, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_http_subscription_response": 0.07583789999989676, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_multiple_subscriptions_http_endpoint": 1.7040643949999321, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_redrive_policy_http_subscription": 1.1431565089999367, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_subscribe_external_http_endpoint[False]": 1.6252535150001677, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_subscribe_external_http_endpoint[True]": 1.6278609450000658, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_subscribe_external_http_endpoint_content_type[False]": 1.6047981459996663, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_subscribe_external_http_endpoint_content_type[True]": 1.6102829020001082, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionHttp::test_subscribe_external_http_endpoint_lambda_url_sig_validation": 2.0652085029998943, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionLambda::test_publish_lambda_verify_signature[1]": 4.21052824100002, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionLambda::test_publish_lambda_verify_signature[2]": 4.220908935999887, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionLambda::test_python_lambda_subscribe_sns_topic": 4.201105062999886, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionLambda::test_redrive_policy_lambda_subscription": 2.281751922000012, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionLambda::test_sns_topic_as_lambda_dead_letter_queue": 2.3690748289998282, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSES::test_email_sender": 2.1062701610001113, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSES::test_topic_email_subscription_confirmation": 0.0613843190001262, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_attribute_raw_subscribe": 0.14043716200012568, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_empty_or_wrong_message_attributes": 0.30405770299989854, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_message_attributes_not_missing": 0.2165908980000495, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_message_attributes_prefixes": 0.16983573100037574, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_message_structure_json_to_sqs": 0.20383019400014746, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_batch_exceptions": 0.06677737200016054, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_batch_messages_from_sns_to_sqs": 0.718092976999742, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_batch_messages_without_topic": 0.03269055300029322, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_sqs_from_sns": 0.2729365349998716, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_sqs_from_sns_with_xray_propagation": 0.13533737500006282, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_sqs_verify_signature[1]": 0.1442182679995767, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_sqs_verify_signature[2]": 0.14241411000011794, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_publish_unicode_chars": 0.13244771899985608, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_redrive_policy_sqs_queue_subscription[False]": 0.19105041500006337, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_redrive_policy_sqs_queue_subscription[True]": 0.2012307749998854, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_sqs_topic_subscription_confirmation": 0.07600934200013398, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_subscribe_sqs_queue": 0.17572893500005193, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_subscribe_to_sqs_with_queue_url": 0.04586082099990563, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQS::test_subscription_after_failure_to_deliver": 1.5154996649998793, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_fifo_topic_to_regular_sqs[False]": 0.2703268760001265, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_fifo_topic_to_regular_sqs[True]": 0.2743641579997984, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_message_to_fifo_sqs[False]": 1.1747691789998953, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_message_to_fifo_sqs[True]": 1.193225558999984, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_message_to_fifo_sqs_ordering": 2.6259913149999647, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_batch_messages_from_fifo_topic_to_fifo_queue[False]": 3.6269530710001163, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_batch_messages_from_fifo_topic_to_fifo_queue[True]": 3.670615330000146, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_fifo_messages_to_dlq[False]": 1.57372833300019, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_fifo_messages_to_dlq[True]": 1.5611228119998941, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_to_fifo_topic_deduplication_on_topic_level": 1.6716114720002224, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_to_fifo_topic_to_sqs_queue_no_content_dedup[False]": 0.270900101000052, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_to_fifo_topic_to_sqs_queue_no_content_dedup[True]": 0.2791799069998433, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_publish_to_fifo_with_target_arn": 0.0314391759998216, + "tests/aws/services/sns/test_sns.py::TestSNSSubscriptionSQSFifo::test_validations_for_fifo": 0.2316779709999537, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_create_duplicate_topic_check_idempotency": 0.08675210199999128, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_create_duplicate_topic_with_more_tags": 0.03345725600024707, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_create_topic_after_delete_with_new_tags": 0.05247338799995305, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_create_topic_test_arn": 0.3040474520000771, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_create_topic_with_attributes": 0.2718563550001818, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_tags": 0.08366482899987204, + "tests/aws/services/sns/test_sns.py::TestSNSTopicCrud::test_topic_delivery_policy_crud": 0.00167188100022031, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyAttributes::test_exists_filter_policy": 0.3149271800000406, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyAttributes::test_exists_filter_policy_attributes_array": 4.2838500000000295, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyAttributes::test_filter_policy": 5.322538917999736, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_empty_array_payload": 0.1681889940000474, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_for_batch": 3.3817883949998304, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_ip_address_condition": 0.34092294700008097, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_large_complex_payload": 0.1901719229997525, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body[False]": 5.328785093000079, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body[True]": 5.335835105999649, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body_array_attributes": 0.6143658079997749, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body_array_of_object_attributes": 0.3447897080000075, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body_dot_attribute": 5.5711469849995865, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyBody::test_filter_policy_on_message_body_or_attribute": 0.8270905840001888, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_policy_complexity": 0.0531667440000092, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_policy_complexity_with_or": 0.056513677999873835, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_validate_policy": 0.12628794700026447, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_validate_policy_exists_operator": 0.11934459000008246, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_validate_policy_nested_anything_but_operator": 0.16385757700004433, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_validate_policy_numeric_operator": 0.22346381299985296, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyConditions::test_validate_policy_string_operators": 0.22188522400028887, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyCrud::test_set_subscription_filter_policy_scope": 0.12184251700000459, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyCrud::test_sub_filter_policy_nested_property": 0.10428378799997517, + "tests/aws/services/sns/test_sns_filter_policy.py::TestSNSFilterPolicyCrud::test_sub_filter_policy_nested_property_constraints": 0.1783609770000112, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_access[domain]": 0.0885092009998516, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_access[path]": 0.08965208599988728, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_access[standard]": 0.09186907399998745, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_get_queue_url[domain]": 0.028839548999940234, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_get_queue_url[path]": 0.028285440000217932, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_cross_account_get_queue_url[standard]": 0.029450716000155808, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_delete_queue_multi_account[sqs]": 0.08494125000015629, + "tests/aws/services/sqs/test_sqs.py::TestSQSMultiAccounts::test_delete_queue_multi_account[sqs_query]": 0.08715545899985955, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_approximate_number_of_messages_delayed[sqs]": 3.1285736219999762, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_approximate_number_of_messages_delayed[sqs_query]": 3.129290998999977, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_batch_send_with_invalid_char_should_succeed[sqs]": 0.1270908670001063, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_batch_send_with_invalid_char_should_succeed[sqs_query]": 0.22059923300002993, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_after_visibility_timeout_expiration[sqs]": 2.1004230799999277, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_after_visibility_timeout_expiration[sqs_query]": 2.104264710000052, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_batch_with_too_large_batch[sqs]": 0.6533817720003299, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_batch_with_too_large_batch[sqs_query]": 0.666144739000174, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_not_permanent[sqs]": 0.09872305599969877, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_message_visibility_not_permanent[sqs_query]": 0.10121720500001175, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_visibility_on_deleted_message_raises_invalid_parameter_value[sqs]": 0.09164700400015136, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_change_visibility_on_deleted_message_raises_invalid_parameter_value[sqs_query]": 0.09225993399991239, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_and_send_to_fifo_queue[sqs]": 0.0639245849999952, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_and_send_to_fifo_queue[sqs_query]": 0.06466771899999912, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_and_update_queue_attributes[sqs]": 0.08166042800053219, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_and_update_queue_attributes[sqs_query]": 0.0852419220000229, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_fifo_queue_with_different_attributes_raises_error[sqs]": 0.14237455399984356, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_fifo_queue_with_different_attributes_raises_error[sqs_query]": 0.14584794799975498, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_fifo_queue_with_same_attributes_is_idempotent": 0.03638546800016229, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_internal_attributes_changes_works[sqs]": 0.08461451900006978, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_internal_attributes_changes_works[sqs_query]": 0.08387791699988156, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_modified_attributes[sqs]": 0.0018473369998446287, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_modified_attributes[sqs_query]": 0.001742742999795155, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_send[sqs]": 0.1156049849998908, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_after_send[sqs_query]": 0.11399804399979985, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_and_get_attributes[sqs]": 0.0300713070000711, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_and_get_attributes[sqs_query]": 0.030713987000126508, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted[sqs]": 0.03490848999990703, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted[sqs_query]": 0.03759918199989443, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted_cache[sqs]": 1.5617488079999475, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted_cache[sqs_query]": 1.5537983030001215, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted_can_be_disabled[sqs]": 0.0437312290000591, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_recently_deleted_can_be_disabled[sqs_query]": 0.04178748599974824, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_default_arguments_works_with_modified_attributes[sqs]": 0.0017319819999102037, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_default_arguments_works_with_modified_attributes[sqs_query]": 0.0017637509999985923, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_default_attributes_is_idempotent": 0.037087251000230026, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_different_attributes_raises_exception[sqs]": 0.19841330899998866, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_different_attributes_raises_exception[sqs_query]": 0.19759350800018183, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_same_attributes_is_idempotent": 0.03681480100021872, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_tags[sqs]": 0.02771976100007123, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_with_tags[sqs_query]": 0.028128327000104036, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_queue_without_attributes_is_idempotent": 0.03457833800007393, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_standard_queue_with_fifo_attribute_raises_error[sqs]": 0.07816115999958129, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_create_standard_queue_with_fifo_attribute_raises_error[sqs_query]": 0.07853448499986371, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_chain[sqs]": 0.0016762899999775982, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_chain[sqs_query]": 0.001691298999958235, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_config": 0.035292416999936904, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_execution_lambda_mapping_preserves_id[sqs]": 0.0017832300002282864, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_execution_lambda_mapping_preserves_id[sqs_query]": 0.001706326000203262, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_list_sources[sqs]": 0.05690664400003698, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_list_sources[sqs_query]": 0.059426621000284285, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_max_receive_count[sqs]": 0.12249692500017773, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_max_receive_count[sqs_query]": 0.12670814600005542, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_message_attributes": 0.7547270870002194, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_with_fifo_and_content_based_deduplication[sqs]": 0.1716114559997095, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_dead_letter_queue_with_fifo_and_content_based_deduplication[sqs_query]": 0.17039014400006636, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_deduplication_interval[sqs]": 0.0018160620002163341, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_deduplication_interval[sqs_query]": 0.0016980219998004031, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_after_visibility_timeout[sqs]": 1.1238981879998846, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_after_visibility_timeout[sqs_query]": 1.12396465300003, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_from_lambda[sqs]": 0.001824156000111543, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_from_lambda[sqs_query]": 0.001676069000041025, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs-]": 0.10434132299974408, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs-invalid:id]": 0.09337309200009258, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs-testLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongId]": 0.0851970149999488, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs_query-]": 0.08075875300005464, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs_query-invalid:id]": 0.0700619810002081, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_invalid_msg_id[sqs_query-testLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongIdtestLongId]": 0.08519973500006017, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_with_too_large_batch[sqs]": 0.6403513139996448, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_batch_with_too_large_batch[sqs_query]": 0.6511461169998256, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_deletes_with_change_visibility_timeout[sqs]": 0.12780724100048246, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_deletes_with_change_visibility_timeout[sqs_query]": 0.130821350999895, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_with_deleted_receipt_handle[sqs]": 0.10773744100015392, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_with_deleted_receipt_handle[sqs_query]": 0.11022989699995378, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_with_illegal_receipt_handle[sqs]": 0.030625260999840975, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_delete_message_with_illegal_receipt_handle[sqs_query]": 0.029475284999989526, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_disallow_queue_name_with_slashes": 0.0017510960001345666, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_extend_message_visibility_timeout_set_in_queue[sqs]": 6.1763415559998975, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_extend_message_visibility_timeout_set_in_queue[sqs_query]": 6.99896832599984, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_external_endpoint[sqs]": 0.13332469899955868, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_external_endpoint[sqs_query]": 0.06357867199994871, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_external_host_via_header_complete_message_lifecycle": 0.08966200499980914, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_external_hostname_via_host_header": 0.030336982000108037, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_approx_number_of_messages[sqs]": 0.2440396839999721, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_approx_number_of_messages[sqs_query]": 0.24859574299989617, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_change_to_high_throughput_after_creation[sqs]": 0.3252113610001288, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_change_to_high_throughput_after_creation[sqs_query]": 0.3285393009998643, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_change_to_regular_throughput_after_creation[sqs]": 0.22915429699992274, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_change_to_regular_throughput_after_creation[sqs_query]": 0.23396986999978253, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_content_based_message_deduplication_arrives_once[sqs]": 1.096609318999981, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_content_based_message_deduplication_arrives_once[sqs_query]": 1.0993522509998002, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_arrives_once_after_delete[sqs-False]": 1.1530038319999676, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_arrives_once_after_delete[sqs-True]": 1.142957158999934, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_arrives_once_after_delete[sqs_query-False]": 1.1538362839999081, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_arrives_once_after_delete[sqs_query-True]": 1.1508683340000516, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_not_on_message_group_id[sqs-False]": 1.1273222679999435, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_not_on_message_group_id[sqs-True]": 1.125189490999901, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_not_on_message_group_id[sqs_query-False]": 1.136232012000164, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_deduplication_not_on_message_group_id[sqs_query-True]": 1.1410946129999502, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_delete_after_visibility_timeout[sqs]": 1.1900849189996734, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_delete_after_visibility_timeout[sqs_query]": 1.1868489870003032, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_delete_message_with_expired_receipt_handle[sqs]": 0.0016842699999415345, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_delete_message_with_expired_receipt_handle[sqs_query]": 0.0016181760001927614, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_empty_message_groups_added_back_to_queue[sqs]": 0.18696485400005258, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_empty_message_groups_added_back_to_queue[sqs_query]": 0.20153134300016973, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_high_throughput_ordering[sqs]": 0.1605448459999934, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_high_throughput_ordering[sqs_query]": 0.1618325450001521, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_attributes[sqs]": 0.15777157200000147, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_attributes[sqs_query]": 0.15983810199986692, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility": 2.116639133000035, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_change_message_visibility[sqs]": 2.1119719250000344, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_change_message_visibility[sqs_query]": 2.1323065569999926, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_delete[sqs]": 0.2861931179995736, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_delete[sqs_query]": 0.2792689949999385, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_partial_delete[sqs]": 0.2666657039999336, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_partial_delete[sqs_query]": 0.26278220699987287, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_terminate_visibility_timeout[sqs]": 0.1317856970001685, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_message_group_visibility_after_terminate_visibility_timeout[sqs_query]": 0.13611670099999174, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_messages_in_order_after_timeout[sqs]": 2.1200272959999893, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_messages_in_order_after_timeout[sqs_query]": 2.110146029999896, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_requires_suffix": 0.014419339000141917, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_message_with_delay_on_queue_works[sqs]": 4.1073976669999865, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_message_with_delay_on_queue_works[sqs_query]": 4.103934449999997, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_message_with_delay_seconds_fails[sqs]": 0.15873957599978894, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_message_with_delay_seconds_fails[sqs_query]": 0.15844080500028213, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_multiple_messages_multiple_single_receives[sqs]": 0.23905085400019743, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_queue_send_multiple_messages_multiple_single_receives[sqs_query]": 0.24499686600006498, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_group_id_ordering[sqs]": 0.130339586999753, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_group_id_ordering[sqs_query]": 0.13560452899992015, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_visibility_timeout_shared_in_group[sqs]": 2.166630939000015, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_visibility_timeout_shared_in_group[sqs_query]": 2.1870424009998715, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_with_zero_visibility_timeout[sqs]": 0.17436443499968846, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_receive_message_with_zero_visibility_timeout[sqs_query]": 0.177297438000096, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_sequence_number_increases[sqs]": 0.09306234799987578, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_sequence_number_increases[sqs_query]": 0.09600766899984592, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_set_content_based_deduplication_strategy[sqs]": 0.08447075000003679, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_fifo_set_content_based_deduplication_strategy[sqs_query]": 0.08740954700010661, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_list_queues_with_query_auth": 0.020054235999623415, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_queue_url_contains_localstack_host[sqs]": 0.029148404000125083, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_queue_url_contains_localstack_host[sqs_query]": 0.03736458600019432, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_queue_url_multi_region[domain]": 0.05032721400016271, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_queue_url_multi_region[path]": 0.04982286899985411, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_queue_url_multi_region[standard]": 0.05133654500014018, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_specific_queue_attribute_response[sqs]": 0.05487656100035565, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_get_specific_queue_attribute_response[sqs_query]": 0.05602282099994227, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_inflight_message_requeue": 4.594766348000121, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_batch_id[sqs]": 0.14218042199991032, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_batch_id[sqs_query]": 0.14021204800019405, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_dead_letter_arn_rejected_before_lookup": 0.0017659989998719539, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_receipt_handle_should_return_error_message[sqs]": 0.034065012999690225, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_receipt_handle_should_return_error_message[sqs_query]": 0.04001393200042003, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_string_attributes_cause_invalid_parameter_value_error[sqs]": 0.02937050199989244, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_invalid_string_attributes_cause_invalid_parameter_value_error[sqs_query]": 0.029052900000351656, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queue_tags[sqs]": 0.03548649400022441, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queue_tags[sqs_query]": 0.03545024400023067, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queues": 0.09656643399989662, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queues_multi_region_with_endpoint_strategy_domain": 0.06551443199987261, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queues_multi_region_with_endpoint_strategy_standard": 0.05901915499998722, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queues_multi_region_without_endpoint_strategy": 0.06714502799991351, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_list_queues_pagination": 0.273624343000165, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_marker_serialization_json_protocol[\"{\\\\\"foo\\\\\": \\\\\"ba\\\\rr\\\\\"}\"]": 0.0782920600001944, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_marker_serialization_json_protocol[{\"foo\": \"ba\\rr\", \"foo2\": \"ba"r"\"}]": 0.0781768069998634, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_deduplication_id_too_long": 0.16585574699979588, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_group_id_too_long": 0.16494935300011093, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_retention": 3.079372381000212, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_retention_fifo": 3.06956698099998, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_retention_with_inflight": 5.607780848000175, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_system_attribute_names_with_attribute_names[sqs]": 0.12120940499994504, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_system_attribute_names_with_attribute_names[sqs_query]": 0.12016031999996812, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_with_attributes_should_be_enqueued[sqs]": 0.07769416399992224, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_with_attributes_should_be_enqueued[sqs_query]": 0.07211073899998155, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_with_carriage_return[sqs]": 0.06224843200016039, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_message_with_carriage_return[sqs_query]": 0.0630495200000496, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_non_existent_queue": 0.2103003729998818, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_posting_to_fifo_requires_deduplicationid_group_id[sqs]": 0.23616783300008137, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_posting_to_fifo_requires_deduplicationid_group_id[sqs_query]": 0.23538207599972338, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_posting_to_queue_via_queue_name[sqs]": 0.04631885000026159, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_posting_to_queue_via_queue_name[sqs_query]": 0.046134704000451165, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_publish_get_delete_message[sqs]": 0.09566594600005374, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_publish_get_delete_message[sqs_query]": 0.09304840200002218, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_publish_get_delete_message_batch[sqs]": 0.2508977639997738, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_publish_get_delete_message_batch[sqs_query]": 0.2473337899998569, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue[sqs]": 1.2130526089999876, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue[sqs_query]": 1.2271326029997454, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_clears_fifo_deduplication_cache[sqs]": 0.09395793500016225, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_clears_fifo_deduplication_cache[sqs_query]": 0.09408482699996057, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_deletes_delayed_messages[sqs]": 3.1578335469996546, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_deletes_delayed_messages[sqs_query]": 3.150445915000091, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_deletes_inflight_messages[sqs]": 4.239921435000042, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_purge_queue_deletes_inflight_messages[sqs_query]": 4.259177208999972, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_queue_list_nonexistent_tags[sqs]": 0.02744931699976405, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_queue_list_nonexistent_tags[sqs_query]": 0.03053503500041188, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_after_visibility_timeout[sqs]": 1.7357519829997727, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_after_visibility_timeout[sqs_query]": 1.9993890450000436, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_empty_queue[sqs]": 1.0934699649999402, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_empty_queue[sqs_query]": 1.0937296019999394, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_attribute_names_filters[sqs]": 0.23568316499972752, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_attribute_names_filters[sqs_query]": 0.23579489400026432, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_attributes_timestamp_types[sqs]": 0.0641738990000249, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_attributes_timestamp_types[sqs_query]": 0.06364502499991431, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_message_attribute_names_filters[sqs]": 0.260924138999826, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_message_attribute_names_filters[sqs_query]": 0.26576942599990616, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_message_system_attribute_names_filters[sqs]": 0.15556281399994987, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_message_system_attribute_names_filters[sqs_query]": 0.15865734800013342, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_wait_time_seconds_and_max_number_of_messages_does_not_block[sqs]": 0.09221760599984918, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_wait_time_seconds_and_max_number_of_messages_does_not_block[sqs_query]": 0.10019635200001176, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_with_visibility_timeout_updates_timeout[sqs]": 0.08942372500018791, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_message_with_visibility_timeout_updates_timeout[sqs_query]": 0.09210011599998325, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_terminate_visibility_timeout[sqs]": 0.09255143700011104, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_receive_terminate_visibility_timeout[sqs_query]": 0.09471740000003592, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_redrive_policy_attribute_validity[sqs]": 0.0016427070002009714, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_redrive_policy_attribute_validity[sqs_query]": 0.0016145750000760017, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_remove_message_with_old_receipt_handle[sqs]": 2.079922900999918, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_remove_message_with_old_receipt_handle[sqs_query]": 2.0788629090000086, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_message_size": 0.2304874590001873, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_missing_deduplication_id_for_fifo_queue[sqs]": 0.14121915499981696, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_missing_deduplication_id_for_fifo_queue[sqs_query]": 0.1398020109998015, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_missing_message_group_id_for_fifo_queue[sqs]": 0.14123457600021538, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_missing_message_group_id_for_fifo_queue[sqs_query]": 0.14369791599983728, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_receive_multiple[sqs]": 0.10470392299998821, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_batch_receive_multiple[sqs_query]": 0.10610120000001189, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_delay_and_wait_time[sqs]": 1.83068551700012, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_delay_and_wait_time[sqs_query]": 1.9986929220001457, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_empty_message[sqs]": 0.1411468659998718, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_empty_message[sqs_query]": 0.14524913099990044, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch[sqs]": 0.11198394099983489, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch[sqs_query]": 0.11284279600022273, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_empty_list[sqs]": 0.029128603999879488, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_empty_list[sqs_query]": 0.029053352000119048, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_oversized_contents[sqs]": 0.14656391800031088, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_oversized_contents[sqs_query]": 0.15372318899972015, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_oversized_contents_with_updated_maximum_message_size[sqs]": 0.11996059400007653, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_batch_with_oversized_contents_with_updated_maximum_message_size[sqs_query]": 0.11482575600007294, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_to_standard_queue_with_empty_message_group_id": 0.0843245849998766, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_attributes[sqs]": 0.0619033830000717, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_attributes[sqs_query]": 0.06354190699994433, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_binary_attributes[sqs]": 0.1032633539998642, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_binary_attributes[sqs_query]": 0.10865792499998861, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_delay_0_works_for_fifo[sqs]": 0.06116502800023227, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_delay_0_works_for_fifo[sqs_query]": 0.06282179100003304, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_empty_string_attribute[sqs]": 0.1409212819999084, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_empty_string_attribute[sqs_query]": 0.1419540169999891, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_fifo_parameters[sqs]": 0.001704833000076178, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_fifo_parameters[sqs_query]": 0.0015828660000352102, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_payload_characters[sqs]": 0.028361683999946763, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_payload_characters[sqs_query]": 0.029091920999690046, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_string_attributes[sqs]": 0.12951364099967577, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_invalid_string_attributes[sqs_query]": 0.13666826300004686, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_updated_maximum_message_size[sqs]": 0.17548180400012825, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_message_with_updated_maximum_message_size[sqs_query]": 0.1780025170000954, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_oversized_message[sqs]": 0.14872975399998722, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_oversized_message[sqs_query]": 0.15144463799970254, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_max_number_of_messages[sqs]": 0.16603263400020296, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_max_number_of_messages[sqs_query]": 0.1647934919999443, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_message[sqs]": 0.06343816400021751, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_message[sqs_query]": 0.06542828500005271, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_message_encoded_content[sqs]": 0.06171695800003363, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_message_encoded_content[sqs_query]": 0.062285409999958574, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_message_multiple_queues": 0.08913700200014318, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_wait_time_seconds[sqs]": 0.23018267099973855, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_send_receive_wait_time_seconds[sqs_query]": 0.23131491300000562, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sent_message_retains_attributes_after_receive[sqs]": 0.09148719300014818, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sent_message_retains_attributes_after_receive[sqs_query]": 0.08191667899995991, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sequence_number[sqs]": 0.0846661140001288, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sequence_number[sqs_query]": 0.08630673999982719, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_empty_queue_policy[sqs]": 0.06252180099977522, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_empty_queue_policy[sqs_query]": 0.06527362600036213, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_empty_redrive_policy[sqs]": 0.06911295999975664, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_empty_redrive_policy[sqs_query]": 0.07062320800014277, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_queue_policy[sqs]": 0.04103634700004477, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_queue_policy[sqs_query]": 0.04448703799994291, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_unsupported_attribute_fifo[sqs]": 0.24487677100000838, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_unsupported_attribute_fifo[sqs_query]": 0.24417458199968678, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_unsupported_attribute_standard[sqs]": 0.21971754900027918, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_set_unsupported_attribute_standard[sqs_query]": 0.2244783090002329, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_fifo_message_group_scope_no_throughput_setting[sqs]": 0.15856955499998548, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_fifo_message_group_scope_no_throughput_setting[sqs_query]": 0.1586945190001643, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_fifo_same_dedup_id_different_message_groups[sqs]": 0.15871796900000845, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_fifo_same_dedup_id_different_message_groups[sqs_query]": 0.1584513949999291, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_permission_lifecycle[sqs]": 0.2439868670001033, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sqs_permission_lifecycle[sqs_query]": 0.2773598139997375, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sse_kms_and_sqs_are_mutually_exclusive[sqs]": 0.0017553459999817278, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sse_kms_and_sqs_are_mutually_exclusive[sqs_query]": 0.0016104659998745774, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sse_queue_attributes[sqs]": 0.10239072200033661, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_sse_queue_attributes[sqs_query]": 0.10263982600008603, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_standard_queue_cannot_have_fifo_suffix": 0.013417597000170645, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_successive_purge_calls_fail[sqs]": 0.1480879369999002, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_successive_purge_calls_fail[sqs_query]": 0.14974695700016127, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_system_attributes_have_no_effect_on_attr_md5[sqs]": 0.07050675699997555, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_system_attributes_have_no_effect_on_attr_md5[sqs_query]": 0.07163318099992466, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tag_queue_overwrites_existing_tag[sqs]": 0.040126752999867676, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tag_queue_overwrites_existing_tag[sqs_query]": 0.04116827699976966, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tag_untag_queue[sqs]": 0.10257306300013624, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tag_untag_queue[sqs_query]": 0.10596567800007506, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tags_case_sensitive[sqs]": 0.034960533000003124, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_tags_case_sensitive[sqs_query]": 0.0358595640002477, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_terminate_visibility_timeout_after_receive[sqs]": 0.09747036500016293, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_terminate_visibility_timeout_after_receive[sqs_query]": 0.10153015799983223, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_too_many_entries_in_batch_request[sqs]": 1.202883624999913, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_too_many_entries_in_batch_request[sqs_query]": 0.1418133290003425, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_untag_queue_ignores_non_existing_tag[sqs]": 0.04163470799994684, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_untag_queue_ignores_non_existing_tag[sqs_query]": 0.042441385000302034, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_wait_time_seconds_queue_attribute_waits_correctly[sqs]": 1.06035360199985, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_wait_time_seconds_queue_attribute_waits_correctly[sqs_query]": 1.060866674999943, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_wait_time_seconds_waits_correctly[sqs]": 1.0629236440001932, + "tests/aws/services/sqs/test_sqs.py::TestSqsProvider::test_wait_time_seconds_waits_correctly[sqs_query]": 1.0625854879997405, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_endpoint_strategy_with_multi_region[domain]": 0.12016148999987308, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_endpoint_strategy_with_multi_region[off]": 0.117180031000089, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_endpoint_strategy_with_multi_region[path]": 0.11707227099986994, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_endpoint_strategy_with_multi_region[standard]": 0.16994413800011898, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_create_queue_fails": 0.03213934900009008, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_delete_queue[domain]": 0.044069312999909016, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_delete_queue[path]": 0.04841333699982897, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_delete_queue[standard]": 0.044473791999962486, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_list_queues_fails": 0.03081042299982073, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_list_queues_fails_json_format": 0.0018364479999490868, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_on_deleted_queue_fails[sqs]": 0.05216581900003803, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_on_deleted_queue_fails[sqs_query]": 0.053054862999943, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_all": 0.049868779000007635, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_json_format": 0.0017271340000206692, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_of_fifo_queue": 0.03838333699991381, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_with_invalid_arg_returns_error": 0.038590367999859154, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_with_query_args": 0.03770597600009751, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_works_without_authparams[domain]": 0.03759939299970938, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_works_without_authparams[path]": 0.039019493000068906, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_attributes_works_without_authparams[standard]": 0.03723196200030543, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_work_for_different_queue[domain]": 0.053079875999856085, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_work_for_different_queue[path]": 0.053170555000178865, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_work_for_different_queue[standard]": 0.05345216099999561, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_works_for_same_queue[domain]": 0.038537932999815894, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_works_for_same_queue[path]": 0.04194190200018966, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_queue_url_works_for_same_queue[standard]": 0.039632486000073186, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_send_and_receive_messages": 0.11578557900020314, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_without_query_json_format_returns_returns_xml": 0.028619582000146693, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_get_without_query_returns_unknown_operation": 0.029183132000071055, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_invalid_action_raises_exception": 0.030459909999990487, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_overwrite_queue_url_in_params": 0.05209754800011979, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_queue_url_format_path_strategy": 0.02189198700034467, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_send_message_via_queue_url_with_json_protocol": 1.08785309100017, + "tests/aws/services/sqs/test_sqs.py::TestSqsQueryApi::test_valid_action_with_missing_parameter_raises_exception": 0.030162009999685324, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[json-domain]": 0.09995854500016321, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[json-path]": 0.09882505799987484, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[json-standard]": 0.10015845800012357, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[query-domain]": 0.1019700090000697, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[query-path]": 0.10289015600005769, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_fifo_list_messages_as_botocore_endpoint_url[query-standard]": 0.1001785179998933, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[json-domain]": 0.07675527799983684, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[json-path]": 0.07649979699999676, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[json-standard]": 0.08343940200006728, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[query-domain]": 0.07989415000020017, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[query-path]": 0.0782456640001783, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_botocore_endpoint_url[query-standard]": 0.08314052700006869, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_json[domain]": 0.07769091800014394, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_json[path]": 0.07524303300010615, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_as_json[standard]": 0.07476482099991699, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_has_no_side_effects[domain]": 0.0969828409999991, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_has_no_side_effects[path]": 0.09637490599993725, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_has_no_side_effects[standard]": 0.09739994500000648, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_delayed_messages[domain]": 0.10563322199982395, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_delayed_messages[path]": 0.10420602699969095, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_delayed_messages[standard]": 0.10341498599996157, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[json-domain]": 0.02774219699972491, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[json-path]": 0.02775501099995381, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[json-standard]": 0.0293243999999504, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[query-domain]": 0.02788348900003257, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[query-path]": 0.028325274999588146, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_action_raises_error[query-standard]": 0.03040905499983637, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_queue_url[domain]": 0.01781294199986405, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_queue_url[path]": 0.017977343000211476, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invalid_queue_url[standard]": 0.019290456999897287, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invisible_messages[domain]": 0.12260086700007378, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invisible_messages[path]": 0.12060631200006355, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_invisible_messages[standard]": 0.12049991100025181, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_non_existent_queue[domain]": 0.02259441500018511, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_non_existent_queue[path]": 0.02256781199980651, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_non_existent_queue[standard]": 0.02256634400009716, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_queue_url_in_path[domain]": 0.0806629950000115, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_queue_url_in_path[path]": 0.0794753430000128, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_with_queue_url_in_path[standard]": 0.07870387499997378, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_without_queue_url[domain]": 0.017130400999803896, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_without_queue_url[path]": 0.016935114000034446, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsDeveloperEndpoints::test_list_messages_without_queue_url[standard]": 0.017896442000164825, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsOverrideHeaders::test_receive_message_override_max_number_of_messages": 0.4730863470001623, + "tests/aws/services/sqs/test_sqs_backdoor.py::TestSqsOverrideHeaders::test_receive_message_override_message_wait_time_seconds": 25.236502244999883, + "tests/aws/services/sqs/test_sqs_move_task.py::test_basic_move_task_workflow": 1.8188541050001277, + "tests/aws/services/sqs/test_sqs_move_task.py::test_cancel_with_invalid_source_arn_in_task_handle": 0.050512659000105486, + "tests/aws/services/sqs/test_sqs_move_task.py::test_cancel_with_invalid_task_handle": 0.054027649000090605, + "tests/aws/services/sqs/test_sqs_move_task.py::test_cancel_with_invalid_task_id_in_task_handle": 0.07478179100007765, + "tests/aws/services/sqs/test_sqs_move_task.py::test_destination_needs_to_exist": 0.10798726200005149, + "tests/aws/services/sqs/test_sqs_move_task.py::test_move_task_cancel": 1.830104909000056, + "tests/aws/services/sqs/test_sqs_move_task.py::test_move_task_delete_destination_queue_while_running": 1.872047072999976, + "tests/aws/services/sqs/test_sqs_move_task.py::test_move_task_with_throughput_limit": 3.3854972010001347, + "tests/aws/services/sqs/test_sqs_move_task.py::test_move_task_workflow_with_default_destination": 1.8002463529999204, + "tests/aws/services/sqs/test_sqs_move_task.py::test_move_task_workflow_with_multiple_sources_as_default_destination": 2.486964338999769, + "tests/aws/services/sqs/test_sqs_move_task.py::test_source_needs_redrive_policy": 0.09210000499979287, + "tests/aws/services/sqs/test_sqs_move_task.py::test_start_multiple_move_tasks": 0.6980477259999134, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_describe_parameters": 0.01527112999997371, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_inexistent_maintenance_window": 0.015372413000022789, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_inexistent_secret": 0.035112855000079435, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_parameter_by_arn": 0.059525974000052884, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_parameters_and_secrets": 0.12538370300012502, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_parameters_by_path_and_filter_by_labels": 0.06377923099989857, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_get_secret_parameter": 0.06594547800000328, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_hierarchical_parameter[/<param>//b//c]": 0.06198107999989588, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_hierarchical_parameter[<param>/b/c]": 0.062209626000139906, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_parameters_with_path": 0.15899454299983518, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_put_parameters": 0.07763973700025417, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_trigger_event_on_systems_manager_change[domain]": 0.11642610900003092, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_trigger_event_on_systems_manager_change[path]": 0.11526392100017802, + "tests/aws/services/ssm/test_ssm.py::TestSSM::test_trigger_event_on_systems_manager_change[standard]": 0.12459161699985088, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task": 2.2069181659996957, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task_failure": 2.009260590000167, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task_no_worker_name": 1.9579327390001708, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task_on_deleted": 0.6052219810001134, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task_start_timeout": 7.105871753999736, + "tests/aws/services/stepfunctions/v2/activities/test_activities.py::TestActivities::test_activity_task_with_heartbeat": 6.267222732000164, + "tests/aws/services/stepfunctions/v2/arguments/test_arguments.py::TestArgumentsBase::test_base_cases[BASE_LAMBDA_EMPTY]": 2.353788852000207, + "tests/aws/services/stepfunctions/v2/arguments/test_arguments.py::TestArgumentsBase::test_base_cases[BASE_LAMBDA_EMPTY_GLOBAL_QL_JSONATA]": 2.427913883999963, + "tests/aws/services/stepfunctions/v2/arguments/test_arguments.py::TestArgumentsBase::test_base_cases[BASE_LAMBDA_EXPRESSION]": 6.2894217159996515, + "tests/aws/services/stepfunctions/v2/arguments/test_arguments.py::TestArgumentsBase::test_base_cases[BASE_LAMBDA_LITERALS]": 2.520435701999986, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_assign_in_choice[CONDITION_FALSE]": 0.8612657080002464, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_assign_in_choice[CONDITION_TRUE]": 1.1017684560001726, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_cases[BASE_CONSTANT_LITERALS]": 1.3073441670001102, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_cases[BASE_EMPTY]": 0.8401979770001162, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_cases[BASE_PATHS]": 1.1562338660000933, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_cases[BASE_SCOPE_MAP]": 1.1777725639997243, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_cases[BASE_VAR]": 1.4245392959999208, + "tests/aws/services/stepfunctions/v2/assign/test_assign_base.py::TestAssignBase::test_base_parallel_cases[BASE_SCOPE_PARALLEL]": 1.2446496620002563, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_from_value[BASE_ASSIGN_FROM_INTRINSIC_FUNCTION]": 2.0346741050002493, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_from_value[BASE_ASSIGN_FROM_PARAMETERS]": 1.0748879340001167, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_from_value[BASE_ASSIGN_FROM_RESULT]": 1.0496478920001664, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_in_catch_state": 2.4808853889996954, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_in_choice_state[CORRECT]": 1.1540624289998505, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_in_choice_state[INCORRECT]": 1.077054416999772, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_assign_in_wait_state": 0.832074423999984, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_CHOICE]": 1.1471047669999734, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_FAIL]": 1.076427927000168, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_INPUTPATH]": 1.0688054990000637, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_INTRINSIC_FUNCTION]": 1.3410011650000797, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_ITERATOR_OUTER_SCOPE]": 3.1601409840000088, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_OUTPUTPATH]": 1.0945666530003564, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_PARAMETERS]": 1.1011698049999268, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_assign[BASE_REFERENCE_IN_WAIT]": 1.1009564239998326, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state[MAP_STATE_REFERENCE_IN_INTRINSIC_FUNCTION]": 1.3825175860001764, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state[MAP_STATE_REFERENCE_IN_ITEMS_PATH]": 1.4121712150001713, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state[MAP_STATE_REFERENCE_IN_ITEM_SELECTOR]": 1.4507190999997874, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state[MAP_STATE_REFERENCE_IN_MAX_CONCURRENCY_PATH]": 1.111834492000071, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state[MAP_STATE_REFERENCE_IN_TOLERATED_FAILURE_PATH]": 1.2003370810000433, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state_max_items_path[MAP_STATE_REFERENCE_IN_MAX_ITEMS_PATH]": 1.2720929830002206, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_reference_in_map_state_max_items_path[MAP_STATE_REFERENCE_IN_MAX_PER_BATCH_PATH]": 0.0019308650000766647, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_state_assign_evaluation_order[BASE_EVALUATION_ORDER_PASS_STATE]": 0.0018191870003647637, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_ARGUMENTS]": 0.0017252619998089358, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_ARGUMENTS_FIELD]": 0.0017576720001670765, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_ASSIGN]": 1.355792971000028, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_OUTPUT]": 1.3681261239999003, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_OUTPUT_FIELD]": 1.3864648230000967, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_undefined_reference[BASE_UNDEFINED_OUTPUT_MULTIPLE_STATES]": 1.4375884309997673, + "tests/aws/services/stepfunctions/v2/assign/test_assign_reference_variables.py::TestAssignReferenceVariables::test_variables_in_lambda_task[BASE_ASSIGN_FROM_LAMBDA_TASK_RESULT]": 2.8240290300000197, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_decl_version_1_0": 0.6193016049999187, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_event_bridge_events_base": 2.7387259809997886, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_event_bridge_events_failure": 0.001994564000142418, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_execution_dateformat": 0.5361818819997097, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_access[$.items[0]]": 0.8096660480002811, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_access[$.items[10]]": 0.8072513950000939, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.item.items[*]]": 0.7903397309999036, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.item.items[1:5].itemValue]": 0.7916253759999563, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.item.items[1:5]]": 0.8081619759998375, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.item.items[1:]]": 0.8279964570001539, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.item.items[:1]]": 0.8262483379999139, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[*].itemValue]": 0.8237168020000354, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[*]]": 0.8208552739999959, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[1:].itemValue]": 0.7995492919999379, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[1:]]": 0.820396096000195, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[:1].itemValue]": 0.788394543999857, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$.items[:1]]": 0.8160532519998469, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_json_path_array_wildcard_or_slice_with_no_input[$[*]]": 0.7389435919999414, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_query_context_object_values": 1.7147601939998367, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_fail": 0.8095693520001532, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_fail_empty": 0.7599169009997695, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_fail_intrinsic": 0.8054115949998959, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_fail_path": 0.8316051590002189, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_pass_regex_json_path": 0.0018942460001198924, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_pass_regex_json_path_base": 0.8442440769999848, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_pass_result": 1.7905188640002052, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_pass_result_jsonpaths": 0.5977238909999869, + "tests/aws/services/stepfunctions/v2/base/test_base.py::TestSnfBase::test_state_pass_result_null_input_output_paths": 0.8402912740002648, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_base_wait_seconds_path[-1.5]": 0.8337240989999373, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_base_wait_seconds_path[-1]": 0.788070499000014, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_base_wait_seconds_path[0]": 0.8214760999999271, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_base_wait_seconds_path[1.5]": 0.830697267000005, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_base_wait_seconds_path[1]": 1.6550992539998788, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_timestamp_too_far_in_future_boundary[24855]": 0.0017789209998682054, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_timestamp_too_far_in_future_boundary[24856]": 0.0016297619999932067, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_wait_timestamppath[.000000Z]": 0.8150028149998434, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_wait_timestamppath[.000000]": 0.8130979939999179, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_wait_timestamppath[.00Z]": 0.78193606800005, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_wait_timestamppath[Z]": 0.6375744360000226, + "tests/aws/services/stepfunctions/v2/base/test_wait.py::TestSfnWait::test_wait_timestamppath[]": 0.820800853000037, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_multiple_executions_and_heartbeat_notifications": 0.0019874310000886908, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_multiple_heartbeat_notifications": 0.0028255380002519814, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sns_publish_wait_for_task_token": 1.3860369020001144, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_failure_in_wait_for_task_tok_no_error_field[SQS_PARALLEL_WAIT_FOR_TASK_TOKEN]": 0.009760211000184427, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_failure_in_wait_for_task_tok_no_error_field[SQS_WAIT_FOR_TASK_TOKEN_CATCH]": 1.7981461099998342, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_failure_in_wait_for_task_token": 2.6496874140000273, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_wait_for_task_tok_with_heartbeat": 7.785913083000196, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_wait_for_task_token": 2.720705597000233, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_wait_for_task_token_call_chain": 4.384848078999994, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_wait_for_task_token_no_token_parameter": 5.876278859000195, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sqs_wait_for_task_token_timeout": 5.9503942679998545, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_start_execution_sync": 2.5623033689996646, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_start_execution_sync2": 1.3660323009999047, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_start_execution_sync_delegate_failure": 1.3405132890000004, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_start_execution_sync_delegate_timeout": 7.816472562999934, + "tests/aws/services/stepfunctions/v2/callback/test_callback.py::TestCallback::test_sync_with_task_token": 3.355114002000164, + "tests/aws/services/stepfunctions/v2/choice_operators/test_boolean_equals.py::TestBooleanEquals::test_boolean_equals": 14.099266057999785, + "tests/aws/services/stepfunctions/v2/choice_operators/test_boolean_equals.py::TestBooleanEquals::test_boolean_equals_path": 15.191257033999818, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_boolean": 13.987185100000033, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_null": 13.964927731999978, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_numeric": 14.216242634999844, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_present": 14.172772205000228, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_string": 15.423709084000166, + "tests/aws/services/stepfunctions/v2/choice_operators/test_is_operators.py::TestIsOperators::test_is_timestamp": 0.003717301000278894, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_equals": 21.372023953000053, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_equals_path": 22.623127365000073, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_greater_than": 2.688769020999871, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_greater_than_equals": 2.7447612799999206, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_greater_than_equals_path": 2.7538474220000353, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_greater_than_path": 2.7752832389999185, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_less_than": 2.7625646209999104, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_less_than_equals": 2.719124019999981, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_less_than_equals_path": 2.715019416000132, + "tests/aws/services/stepfunctions/v2/choice_operators/test_numeric.py::TestNumerics::test_numeric_less_than_path": 2.6882484799998565, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_equals": 6.5561934049997035, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_equals_path": 1.6279018410002664, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_greater_than": 1.991654545000074, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_greater_than_equals": 1.606562115000088, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_greater_than_equals_path": 1.626479125999822, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_greater_than_path": 2.006198316000109, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_less_than": 1.550570131999848, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_less_than_equals": 1.6097117110002728, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_less_than_equals_path": 1.6334538640001028, + "tests/aws/services/stepfunctions/v2/choice_operators/test_string_operators.py::TestStrings::test_string_less_than_path": 1.6235052429999541, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_equals": 8.246174140999983, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_equals_path": 1.6654537479996634, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_greater_than": 1.6617902560001312, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_greater_than_equals": 1.5686742270002014, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_greater_than_equals_path": 0.8551340800001981, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_greater_than_path": 0.8599438880000889, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_less_than": 1.6227170140000453, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_less_than_equals": 1.5980968149999626, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_less_than_equals_path": 0.8222504139998819, + "tests/aws/services/stepfunctions/v2/choice_operators/test_timestamp_operators.py::TestTimestamps::test_timestamp_less_than_path": 0.863516964000155, + "tests/aws/services/stepfunctions/v2/comments/test_comments.py::TestComments::test_comment_in_parameters": 0.6430240790000425, + "tests/aws/services/stepfunctions/v2/comments/test_comments.py::TestComments::test_comments_as_per_docs": 7.72462464799969, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_error_cause_path": 1.197135784999773, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_input_path[$$.Execution.Input]": 1.2151304920000712, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_input_path[$$]": 0.9216381520002415, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_output_path[$$.Execution.Input]": 1.1726846319998003, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_output_path[$$]": 1.0104526379998333, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_result_selector": 2.860743783000089, + "tests/aws/services/stepfunctions/v2/context_object/test_context_object.py::TestSnfBase::test_variable": 1.2223827179996078, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_lambda_task": 2.813157218000242, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_service_lambda_invoke": 2.818888806999894, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_service_lambda_invoke_retry": 6.2258848760002365, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_states_start_sync_execution[SFN_START_EXECUTION_SYNC_ROLE_ARN_INTRINSIC]": 3.3932750619997023, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_states_start_sync_execution[SFN_START_EXECUTION_SYNC_ROLE_ARN_JSONATA]": 1.8747454959998322, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_states_start_sync_execution[SFN_START_EXECUTION_SYNC_ROLE_ARN_PATH]": 1.9509954210002434, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_states_start_sync_execution[SFN_START_EXECUTION_SYNC_ROLE_ARN_PATH_CONTEXT]": 2.048667544999944, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_cross_account_states_start_sync_execution[SFN_START_EXECUTION_SYNC_ROLE_ARN_VARIABLE]": 2.1001691599999504, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_invalid_credentials_field[EMPTY_CREDENTIALS]": 1.0820302629997514, + "tests/aws/services/stepfunctions/v2/credentials/test_credentials_base.py::TestCredentialsBase::test_invalid_credentials_field[INVALID_CREDENTIALS_FIELD]": 1.0739671599999383, + "tests/aws/services/stepfunctions/v2/error_handling/test_aws_sdk.py::TestAwsSdk::test_dynamodb_invalid_param": 0.0018381909997060575, + "tests/aws/services/stepfunctions/v2/error_handling/test_aws_sdk.py::TestAwsSdk::test_dynamodb_put_item_no_such_table": 0.9602138170000671, + "tests/aws/services/stepfunctions/v2/error_handling/test_aws_sdk.py::TestAwsSdk::test_invalid_secret_name": 0.9445192999999108, + "tests/aws/services/stepfunctions/v2/error_handling/test_aws_sdk.py::TestAwsSdk::test_no_such_bucket": 0.8815493249999236, + "tests/aws/services/stepfunctions/v2/error_handling/test_aws_sdk.py::TestAwsSdk::test_s3_no_such_key": 0.9166455300000962, + "tests/aws/services/stepfunctions/v2/error_handling/test_states_errors.py::TestStatesErrors::test_service_task_lambada_catch_state_all_data_limit_exceeded_on_large_utf8_response": 2.4703153720001865, + "tests/aws/services/stepfunctions/v2/error_handling/test_states_errors.py::TestStatesErrors::test_service_task_lambada_data_limit_exceeded_on_large_utf8_response": 2.5278753219997725, + "tests/aws/services/stepfunctions/v2/error_handling/test_states_errors.py::TestStatesErrors::test_start_large_input": 4.956773299000133, + "tests/aws/services/stepfunctions/v2/error_handling/test_states_errors.py::TestStatesErrors::test_task_lambda_catch_state_all_data_limit_exceeded_on_large_utf8_response": 2.516982377999966, + "tests/aws/services/stepfunctions/v2/error_handling/test_states_errors.py::TestStatesErrors::test_task_lambda_data_limit_exceeded_on_large_utf8_response": 2.502299553999819, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_lambda.py::TestTaskLambda::test_no_such_function": 2.5434937800000625, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_lambda.py::TestTaskLambda::test_no_such_function_catch": 2.7376378790002036, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_lambda.py::TestTaskLambda::test_raise_custom_exception": 2.384553780000033, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_lambda.py::TestTaskLambda::test_raise_exception": 2.6669237020000764, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_lambda.py::TestTaskLambda::test_raise_exception_catch": 2.684178824000128, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_dynamodb.py::TestTaskServiceDynamoDB::test_invalid_param": 0.9483351620001486, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_dynamodb.py::TestTaskServiceDynamoDB::test_put_item_invalid_table_name": 1.072161375000178, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_dynamodb.py::TestTaskServiceDynamoDB::test_put_item_no_such_table": 0.9329111299996384, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_invoke_timeout": 7.129512716000136, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_no_such_function": 2.14087731599966, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_no_such_function_catch": 3.3048075300002893, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_custom_exception": 2.496753290000015, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_exception": 2.491760032000002, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_exception_catch": 2.6652558500002215, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_exception_catch_output_path[$.Payload]": 2.5394133249999413, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_exception_catch_output_path[$.no.such.path]": 2.509736076000081, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_lambda.py::TestTaskServiceLambda::test_raise_exception_catch_output_path[None]": 2.483388404999914, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_sfn.py::TestTaskServiceSfn::test_start_execution_no_such_arn": 1.3236926299998686, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_sqs.py::TestTaskServiceSqs::test_send_message_empty_body": 0.0017719979998673807, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_sqs.py::TestTaskServiceSqs::test_send_message_no_such_queue": 1.3316761549999683, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_sqs.py::TestTaskServiceSqs::test_send_message_no_such_queue_no_catch": 1.2701044060002005, + "tests/aws/services/stepfunctions/v2/error_handling/test_task_service_sqs.py::TestTaskServiceSqs::test_sqs_failure_in_wait_for_task_tok": 2.8847892620001403, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map[ITEMS]": 1.5124981090000347, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map[ITEMS_DOUBLE_QUOTES]": 1.3074590539997644, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map[MAX_CONCURRENCY]": 1.2635960329998852, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map[TOLERATED_FAILURE_COUNT]": 5.982920723999996, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map[TOLERATED_FAILURE_PERCENTAGE]": 1.2492021509999631, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map_from_input[ITEMS]": 2.8936900949999824, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map_from_input[MAX_CONCURRENCY]": 2.364839197000009, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map_from_input[TOLERATED_FAILURE_COUNT]": 2.3520260450000308, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_map_from_input[TOLERATED_FAILURE_PERCENTAGE]": 2.864340380000016, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_task[HEARTBEAT_SECONDS]": 2.6900220970003375, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_task[TIMEOUT_SECONDS]": 0.002943756000149733, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_task_from_input[HEARTBEAT_SECONDS]": 19.219125480999992, + "tests/aws/services/stepfunctions/v2/evaluate_jsonata/test_base_evaluate_expressions.py::TestBaseEvaluateJsonata::test_base_task_from_input[TIMEOUT_SECONDS]": 0.00261609899999371, + "tests/aws/services/stepfunctions/v2/express/test_express_async.py::TestExpressAsync::test_base[BASE_PASS_RESULT]": 1.385742117999996, + "tests/aws/services/stepfunctions/v2/express/test_express_async.py::TestExpressAsync::test_base[BASE_RAISE_FAILURE]": 1.3235642460000179, + "tests/aws/services/stepfunctions/v2/express/test_express_async.py::TestExpressAsync::test_catch": 3.1208249539999997, + "tests/aws/services/stepfunctions/v2/express/test_express_async.py::TestExpressAsync::test_query_runtime_memory": 2.3189038050000192, + "tests/aws/services/stepfunctions/v2/express/test_express_async.py::TestExpressAsync::test_retry": 10.220456748999993, + "tests/aws/services/stepfunctions/v2/express/test_express_sync.py::TestExpressSync::test_base[BASE_PASS_RESULT]": 0.6488946609999857, + "tests/aws/services/stepfunctions/v2/express/test_express_sync.py::TestExpressSync::test_base[BASE_RAISE_FAILURE]": 0.5810687780000308, + "tests/aws/services/stepfunctions/v2/express/test_express_sync.py::TestExpressSync::test_catch": 2.3031642089999877, + "tests/aws/services/stepfunctions/v2/express/test_express_sync.py::TestExpressSync::test_query_runtime_memory": 1.4983592529999896, + "tests/aws/services/stepfunctions/v2/express/test_express_sync.py::TestExpressSync::test_retry": 9.518933849000007, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_0": 0.7112540700000238, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_2": 3.539180582, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_contains": 3.274367464000022, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_get_item": 0.7470074790000183, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_length": 0.7250492139999949, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_partition": 8.268853872000022, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_range": 1.6389364770000157, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array.py::TestArray::test_array_unique": 0.7151993699999935, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array_jsonata.py::TestArrayJSONata::test_array_partition": 6.578431593999994, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_array_jsonata.py::TestArrayJSONata::test_array_range": 1.9384576600000116, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_encode_decode.py::TestEncodeDecode::test_base_64_decode": 1.0561480449999578, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_encode_decode.py::TestEncodeDecode::test_base_64_encode": 1.0773645479999914, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_context_json_path": 0.7347040760000141, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_escape_sequence": 0.4899449289999893, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_format_1": 2.57576478499999, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_format_2": 2.888833508999994, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_nested_calls_1": 0.7221621589999927, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_generic.py::TestGeneric::test_nested_calls_2": 0.7433459490000018, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_hash_calculations.py::TestHashCalculations::test_hash": 2.0014806189999774, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_json_manipulation.py::TestJsonManipulation::test_json_merge": 0.7392130110000039, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_json_manipulation.py::TestJsonManipulation::test_json_merge_escaped_argument": 0.7730180359999679, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_json_manipulation.py::TestJsonManipulation::test_json_to_string": 2.8707337919999816, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_json_manipulation.py::TestJsonManipulation::test_string_to_json": 3.5274148839999384, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_json_manipulation_jsonata.py::TestJsonManipulationJSONata::test_parse": 2.2068313350000324, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_math_operations.py::TestMathOperations::test_math_add": 7.682675031999992, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_math_operations.py::TestMathOperations::test_math_random": 1.4768367960000148, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_math_operations.py::TestMathOperations::test_math_random_seeded": 0.8083927330000051, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_math_operations_jsonata.py::TestMathOperationsJSONata::test_math_random_seeded": 0.0022870579999789697, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_string_operations.py::TestStringOperations::test_string_split": 2.6245983920000526, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_string_operations.py::TestStringOperations::test_string_split_context_object": 0.7277200239999502, + "tests/aws/services/stepfunctions/v2/intrinsic_functions/test_unique_id_generation.py::TestUniqueIdGeneration::test_uuid": 0.5273850570000036, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[pass_result.json5_ALL_False]": 1.0991152099999795, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[pass_result.json5_ALL_True]": 1.1203211529999635, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[raise_failure.json5_ALL_False]": 1.0969936850000295, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[raise_failure.json5_ALL_True]": 1.122685502999957, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[wait_seconds_path.json5_ALL_False]": 1.1241086410000776, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_base[wait_seconds_path.json5_ALL_True]": 1.091840260999959, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_deleted_log_group": 1.112391792999972, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_log_group_with_multiple_runs": 1.6973937529999716, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_ERROR_False]": 0.8344910069999969, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_ERROR_True]": 1.0060469539999985, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_FATAL_False]": 0.8023937910000427, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_FATAL_True]": 0.8060499129999812, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_OFF_False]": 0.9939169280000328, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[pass_result.json5_OFF_True]": 0.7853987110000276, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_ERROR_False]": 1.0878679320000515, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_ERROR_True]": 1.0768040160000396, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_FATAL_False]": 0.8679731189999984, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_FATAL_True]": 1.524016210999946, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_OFF_False]": 0.7912509780000505, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[raise_failure.json5_OFF_True]": 0.7940549290000263, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_ERROR_False]": 1.067117301000053, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_ERROR_True]": 1.0814539109999828, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_FATAL_False]": 1.093506686000012, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_FATAL_True]": 0.9094880190000367, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_OFF_False]": 1.003353427000036, + "tests/aws/services/stepfunctions/v2/logs/test_logs.py::TestLogs::test_partial_log_levels[wait_seconds_path.json5_OFF_True]": 1.000033411000004, + "tests/aws/services/stepfunctions/v2/mocking/test_aws_scenarios.py::TestBaseScenarios::test_lambda_sqs_integration_happy_path": 0.4298779609999883, + "tests/aws/services/stepfunctions/v2/mocking/test_aws_scenarios.py::TestBaseScenarios::test_lambda_sqs_integration_hybrid_path": 0.5882886379999945, + "tests/aws/services/stepfunctions/v2/mocking/test_aws_scenarios.py::TestBaseScenarios::test_lambda_sqs_integration_retry_path": 7.255779894, + "tests/aws/services/stepfunctions/v2/mocking/test_base_callbacks.py::TestBaseScenarios::test_sfn_start_execution_sync[SFN_SYNC2]": 2.508866942999987, + "tests/aws/services/stepfunctions/v2/mocking/test_base_callbacks.py::TestBaseScenarios::test_sfn_start_execution_sync[SFN_SYNC]": 1.8060861090000344, + "tests/aws/services/stepfunctions/v2/mocking/test_base_callbacks.py::TestBaseScenarios::test_sqs_wait_for_task_token": 1.6629305239999894, + "tests/aws/services/stepfunctions/v2/mocking/test_base_callbacks.py::TestBaseScenarios::test_sqs_wait_for_task_token_task_failure": 1.7959237549999898, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_dynamodb_put_get_item": 1.0967360960000292, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_events_put_events": 0.9760736130000396, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_lambda_invoke": 0.9727351920000729, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_lambda_invoke_retries": 3.4049586710000312, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_lambda_service_invoke": 1.0442268720000243, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_map_state_lambda": 1.6208897540000748, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_lambda": 1.323694504000116, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_sns_publish_base": 1.028937396999936, + "tests/aws/services/stepfunctions/v2/mocking/test_base_scenarios.py::TestBaseScenarios::test_sqs_send_message": 1.089692545000048, + "tests/aws/services/stepfunctions/v2/mocking/test_mock_config_file.py::TestMockConfigFile::test_is_mock_config_flag_detected_set": 0.0047302640001021246, + "tests/aws/services/stepfunctions/v2/mocking/test_mock_config_file.py::TestMockConfigFile::test_is_mock_config_flag_detected_unset": 0.006179448000011689, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_cases[BASE_DIRECT_EXPR]": 1.0610666489999971, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_cases[BASE_EMPTY]": 0.7541225579999491, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_cases[BASE_EXPR]": 1.1091091870000582, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_cases[BASE_LITERALS]": 0.9570830819999401, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_lambda[BASE_LAMBDA]": 3.5011377289999928, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[BOOL]": 0.9843170040000473, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[FLOAT]": 0.9961758139999688, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[INT]": 0.7578635230000259, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[JSONATA_EXPR]": 0.9450328740000487, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[LIST_EMPY]": 0.7390954169999304, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[LIST_RICH]": 0.9663390250000248, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[NULL]": 0.7527677370000561, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_output_any_non_dict[STR_LIT]": 0.9869880260000059, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_base_task_lambda[BASE_TASK_LAMBDA]": 3.0032340090000957, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_output_in_choice[CONDITION_FALSE]": 0.7934537460000115, + "tests/aws/services/stepfunctions/v2/outputdecl/test_output.py::TestArgumentsBase::test_output_in_choice[CONDITION_TRUE]": 0.8168202179999753, + "tests/aws/services/stepfunctions/v2/query_language/test_base_query_language.py::TestBaseQueryLanguage::test_base_query_language_field[JSONATA]": 0.5132882810000297, + "tests/aws/services/stepfunctions/v2/query_language/test_base_query_language.py::TestBaseQueryLanguage::test_base_query_language_field[JSON_PATH]": 0.5158672260000117, + "tests/aws/services/stepfunctions/v2/query_language/test_base_query_language.py::TestBaseQueryLanguage::test_jsonata_query_language_field_downgrade_exception": 0.0017928640000377527, + "tests/aws/services/stepfunctions/v2/query_language/test_base_query_language.py::TestBaseQueryLanguage::test_query_language_field_override[JSONATA_OVERRIDE]": 0.49603257400002576, + "tests/aws/services/stepfunctions/v2/query_language/test_base_query_language.py::TestBaseQueryLanguage::test_query_language_field_override[JSONATA_OVERRIDE_DEFAULT]": 0.703305677000003, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_lambda_task_resource_data_flow[TASK_LAMBDA_LEGACY_RESOURCE_JSONATA_TO_JSONPATH]": 2.5336430170000313, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_lambda_task_resource_data_flow[TASK_LAMBDA_LEGACY_RESOURCE_JSONPATH_TO_JSONATA]": 3.0987200990000474, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_lambda_task_resource_data_flow[TASK_LAMBDA_SDK_RESOURCE_JSONATA_TO_JSONPATH]": 2.3309422820000236, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_lambda_task_resource_data_flow[TASK_LAMBDA_SDK_RESOURCE_JSONPATH_TO_JSONATA]": 2.289668968000001, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_output_to_state[JSONATA_OUTPUT_TO_JSONPATH]": 0.9140576300000589, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_output_to_state[JSONPATH_OUTPUT_TO_JSONATA]": 0.9424557529999902, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_task_dataflow_to_state": 2.57403901400005, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_variable_sampling[JSONATA_ASSIGN_JSONPATH_REF]": 0.9069866300000058, + "tests/aws/services/stepfunctions/v2/query_language/test_mixed_query_language.py::TestMixedQueryLanguageFlow::test_variable_sampling[JSONPATH_ASSIGN_JSONATA_REF]": 0.897081490000005, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_catch_empty": 2.127353888000016, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_catch_states_runtime": 2.4533762840001145, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_aws_docs_scenario[CHOICE_STATE_AWS_SCENARIO]": 0.9118220770000107, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_aws_docs_scenario[CHOICE_STATE_AWS_SCENARIO_JSONATA]": 0.856970581999974, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_condition_constant_jsonata": 1.2965167919999772, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_singleton_composite[CHOICE_STATE_SINGLETON_COMPOSITE]": 0.8027653020000685, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_singleton_composite[CHOICE_STATE_SINGLETON_COMPOSITE_JSONATA]": 0.8040931949999504, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_singleton_composite[CHOICE_STATE_SINGLETON_COMPOSITE_LITERAL_JSONATA]": 0.6208838049999486, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_unsorted_parameters_negative[CHOICE_STATE_UNSORTED_CHOICE_PARAMETERS]": 0.8631936680000081, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_unsorted_parameters_negative[CHOICE_STATE_UNSORTED_CHOICE_PARAMETERS_JSONATA]": 0.7992374809999774, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_unsorted_parameters_positive[CHOICE_STATE_UNSORTED_CHOICE_PARAMETERS]": 1.0150197530000469, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_choice_unsorted_parameters_positive[CHOICE_STATE_UNSORTED_CHOICE_PARAMETERS_JSONATA]": 0.8753280299999915, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_escape_sequence_parsing[ESCAPE_SEQUENCES_JSONATA_COMPARISON_ASSIGN]": 0.7874183390000553, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_escape_sequence_parsing[ESCAPE_SEQUENCES_JSONATA_COMPARISON_OUTPUT]": 0.7899693780000234, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_escape_sequence_parsing[ESCAPE_SEQUENCES_JSONPATH]": 0.8209627549999823, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_escape_sequence_parsing[ESCAPE_SEQUENCES_STRING_LITERALS]": 0.9027063820000194, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_fail_cause_jsonata": 0.7872426849999101, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_fail_error_jsonata": 0.8101193979999834, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_illegal_escapes[ESCAPE_SEQUENCES_ILLEGAL_INTRINSIC_FUNCTION]": 0.0020480299999690033, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_illegal_escapes[ESCAPE_SEQUENCES_ILLEGAL_INTRINSIC_FUNCTION_2]": 0.0015909170000441009, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[INVALID_JSONPATH_IN_ERRORPATH]": 0.7843457290000515, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[INVALID_JSONPATH_IN_STRING_EXPR_CONTEXTPATH]": 0.7890845709998757, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[INVALID_JSONPATH_IN_STRING_EXPR_JSONPATH]": 0.7637312040000097, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[ST.INVALID_JSONPATH_IN_CAUSEPATH]": 0.7857035229998246, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[ST.INVALID_JSONPATH_IN_HEARTBEATSECONDSPATH]": 0.0016274670000484548, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[ST.INVALID_JSONPATH_IN_INPUTPATH]": 0.7786313049998626, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[ST.INVALID_JSONPATH_IN_OUTPUTPATH]": 0.7969828709999547, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_invalid_jsonpath[ST.INVALID_JSONPATH_IN_TIMEOUTSECONDSPATH]": 0.0017937459999757266, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_lambda_empty_retry": 2.3361986039999465, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_lambda_invoke_with_retry_base": 9.685449987000027, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_lambda_invoke_with_retry_extended_input": 9.816028315999915, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_lambda_service_invoke_with_retry_extended_input": 10.068122916999982, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_batching_base_json_max_per_batch_jsonata": 0.0020172430000116037, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_csv_headers_decl": 0.9342256750000502, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_csv_headers_first_line": 0.9259451659999627, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json": 0.8799753989999886, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json_max_items": 0.8908522689999927, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json_max_items_jsonata": 0.9775742319999949, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json_with_items_path[INVALID_ITEMS_PATH]": 1.1868574659999922, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json_with_items_path[VALID_ITEMS_PATH_FROM_ITEM_READER]": 1.104373690999978, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_json_with_items_path[VALID_ITEMS_PATH_FROM_PREVIOUS]": 1.7620481210000207, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_base_list_objects_v2": 0.920342317999939, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_first_row_extra_fields": 0.8956037490000313, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_headers_decl_duplicate_headers": 0.880209648999994, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_headers_decl_extra_fields": 0.909124845000008, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_headers_first_row_typed_headers": 0.893443278999996, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items[0]": 0.8930396150000774, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items[100000000]": 0.9011406380000722, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items[2]": 0.9068365100000619, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[-1]": 0.901848957000027, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[0]": 1.122109933000047, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[1.5]": 0.021534945999974298, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[100000000]": 1.1244338209999682, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[100000001]": 0.9100951719999557, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_csv_max_items_paths[2]": 0.8587545689999274, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_item_reader_json_no_json_list_object": 0.9138318239999421, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state": 0.9021676799999909, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_break_condition": 0.9319269180000447, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_break_condition_legacy": 0.9219132339999305, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_catch": 1.5828382560000023, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_catch_empty_fail": 0.8348867340000083, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_catch_legacy": 0.816178389000072, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_item_selector": 0.8726636060000033, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_item_selector_parameters": 1.1619875570000318, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_items_path_from_previous": 0.8817333270000063, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_parameters": 0.9005068129999927, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_reentrant": 1.7203029819999642, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_distributed_reentrant_lambda": 2.9714986159999626, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_inline_item_selector": 0.8665574150001021, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_config_inline_parameters": 0.9104926609999779, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_item_selector[MAP_STATE_ITEM_SELECTOR]": 2.01728774999998, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_item_selector[MAP_STATE_ITEM_SELECTOR_JSONATA]": 0.823910264999995, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_item_selector_parameters": 1.1018301940000015, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_item_selector_singleton": 1.379092350999997, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata[empty]": 0.7443364420000194, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata[mixed]": 0.7585215819999576, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata[singleton]": 0.7431942860000618, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[boolean]": 0.8233383040000035, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[function]": 0.0019108139999843843, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[null]": 1.5184472420000361, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[number]": 0.6202659220000442, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[object]": 0.6193094079999923, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_fail[string]": 0.7979415620000054, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_variable_sampling_fail[boolean]": 0.8803093770000601, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_variable_sampling_fail[null]": 0.8207496560000322, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_variable_sampling_fail[number]": 0.8729903220000779, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_variable_sampling_fail[object]": 1.478492378999988, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_eval_jsonata_variable_sampling_fail[string]": 0.9278627799999981, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_array[empty]": 0.7715039409999349, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_array[mixed]": 0.7937926080000466, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_array[singleton]": 0.7719049730000052, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_types[boolean]": 1.0055935899999895, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_types[null]": 1.0376942850000432, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_types[number]": 1.0144505949999711, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_types[object]": 1.0150065149999818, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_input_types[string]": 1.0126865450000082, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_variable_sampling[boolean]": 0.8491785250000703, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_variable_sampling[null]": 0.8553098920000366, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_variable_sampling[number]": 0.8535434150000469, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_variable_sampling[object]": 0.8686161370000036, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_items_variable_sampling[string]": 0.8184311840000191, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_label": 0.7752743809999743, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy": 0.9245326360000377, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_distributed": 0.8512868679999883, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_distributed_item_selector": 0.8858682039999621, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_distributed_parameters": 1.6015615369999523, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_inline": 0.8759511889999771, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_inline_item_selector": 0.8891641520000348, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_config_inline_parameters": 0.9217837090000103, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_legacy_reentrant": 1.7292490389999102, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_nested": 0.9526129700000183, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_nested_config_distributed": 0.9301677050000308, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_nested_config_distributed_no_max_max_concurrency": 10.49850474699997, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_no_processor_config": 0.8461344010000857, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_parameters_legacy": 1.9987706910000043, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_parameters_singleton_legacy": 1.3964725090000343, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_result_writer": 1.1968832370000655, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_retry": 3.7905137230000037, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_retry_legacy": 3.771064236999962, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_retry_multiple_retriers": 7.797399253000037, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_count_path[-1]": 0.7564521570000124, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_count_path[0]": 0.7770182189999559, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_count_path[1]": 0.7938085790000287, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_count_path[NoNumber]": 0.7770945209999809, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_count_path[tolerated_failure_count_value0]": 0.7890256630000181, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[-1.1]": 0.7790876360000425, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[-1]": 0.7841271589999224, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[0]": 0.7986977800000545, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[1.1]": 0.7926881330000128, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[100.1]": 1.4472125569999434, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[100]": 0.8202620320000165, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[1]": 0.7906418620000295, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[NoNumber]": 0.807930521000003, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_percentage_path[tolerated_failure_percentage_value0]": 0.8311843440000075, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_values[count_literal]": 0.7971971620000318, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_map_state_tolerated_failure_values[percentage_literal]": 0.7898400449999485, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_max_concurrency_path[0]": 0.8114140269999552, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_max_concurrency_path[1]": 0.7718252359999269, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_max_concurrency_path[NoNumber]": 0.7933895819999748, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_max_concurrency_path[max_concurrency_value0]": 0.7749181920000865, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_max_concurrency_path_negative": 0.8360408509999502, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state[PARALLEL_STATE]": 0.8670177540000168, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state[PARALLEL_STATE_PARAMETERS]": 0.7991589989999852, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_catch": 0.8015887449999468, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_fail": 0.7295432119999532, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_nested": 1.0682879039999875, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_order": 0.8956581660000893, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_parallel_state_retry": 3.7150253539999767, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_retry_interval_features": 6.854689269000005, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_retry_interval_features_jitter_none": 4.4824249079999845, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_retry_interval_features_max_attempts_zero": 2.4179820459999064, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_seconds_jsonata": 0.5606232650001175, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp[NANOSECONDS]": 0.5494174629999407, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp[SECONDS]": 0.5685417750000852, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[INVALID_DATE]": 0.47077886199986096, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[INVALID_ISO]": 0.4925717659999691, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[INVALID_TIME]": 0.5009299390000024, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[JSONATA]": 0.49709355000004507, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[NO_T]": 0.5341191719999188, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_invalid[NO_Z]": 1.2602082749999681, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[INVALID_DATE]": 0.0016176479999785442, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[INVALID_ISO]": 0.001612166999962028, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[INVALID_TIME]": 0.0016243189999158858, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[NANOSECONDS]": 0.764752772000179, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[NO_T]": 0.0018718020000960678, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[NO_Z]": 0.0016137190000335977, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_jsonata[SECONDS]": 0.7603305319998981, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[INVALID_DATE]": 0.7800436889999673, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[INVALID_ISO]": 0.7845425670001305, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[INVALID_TIME]": 0.7794426140001178, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[NANOSECONDS]": 0.7812388810000357, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[NO_T]": 0.7792963299999656, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[NO_Z]": 0.7701200210000252, + "tests/aws/services/stepfunctions/v2/scenarios/test_base_scenarios.py::TestBaseScenarios::test_wait_timestamp_path[SECONDS]": 0.7850010410001005, + "tests/aws/services/stepfunctions/v2/scenarios/test_sfn_scenarios.py::TestFundamental::test_path_based_on_data": 7.1810407560001295, + "tests/aws/services/stepfunctions/v2/scenarios/test_sfn_scenarios.py::TestFundamental::test_step_functions_calling_api_gateway": 11.47213118000002, + "tests/aws/services/stepfunctions/v2/scenarios/test_sfn_scenarios.py::TestFundamental::test_wait_for_callback": 19.634785126999873, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_base": 3.2372750849999647, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_error": 3.2119770099999414, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_body_post[HelloWorld]": 3.3159384650000447, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_body_post[None]": 4.302116198000135, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_body_post[]": 3.2289084220000177, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_body_post[request_body3]": 3.273048779000078, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_headers[custom_header1]": 3.3185169459999315, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_headers[custom_header2]": 3.316922142999829, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_headers[singleStringHeader]": 0.0032847590000528726, + "tests/aws/services/stepfunctions/v2/services/test_apigetway_task_service.py::TestTaskApiGateway::test_invoke_with_query_parameters": 3.637560958999984, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_dynamodb_put_delete_item": 2.063889763000134, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_dynamodb_put_get_item": 2.930973487999836, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_dynamodb_put_update_get_item": 1.4410318709999501, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_list_secrets": 1.0707105860000183, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_get_object[binary]": 1.3676194119999536, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_get_object[bytearray]": 1.2578233740000542, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_get_object[empty_binary]": 1.2743115000001808, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_get_object[empty_str]": 1.2727675630000022, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_get_object[str]": 1.2479577740000423, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_put_object[bool]": 1.3542705929999101, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_put_object[dict]": 1.3002291529999184, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_put_object[list]": 1.325375301000122, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_put_object[num]": 1.3771391090000407, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_s3_put_object[str]": 1.2955481290000534, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_sfn_send_task_outcome_with_no_such_token[state_machine_template0]": 1.062097733000087, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_sfn_send_task_outcome_with_no_such_token[state_machine_template1]": 1.027511271999856, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_sfn_start_execution": 1.2197346759999164, + "tests/aws/services/stepfunctions/v2/services/test_aws_sdk_task_service.py::TestTaskServiceAwsSdk::test_sfn_start_execution_implicit_json_serialisation": 1.237098405000097, + "tests/aws/services/stepfunctions/v2/services/test_dynamodb_task_service.py::TestTaskServiceDynamoDB::test_base_integrations[DYNAMODB_PUT_DELETE_ITEM]": 1.3658884149999722, + "tests/aws/services/stepfunctions/v2/services/test_dynamodb_task_service.py::TestTaskServiceDynamoDB::test_base_integrations[DYNAMODB_PUT_GET_ITEM]": 1.3603615540000646, + "tests/aws/services/stepfunctions/v2/services/test_dynamodb_task_service.py::TestTaskServiceDynamoDB::test_base_integrations[DYNAMODB_PUT_QUERY]": 1.3715085019999833, + "tests/aws/services/stepfunctions/v2/services/test_dynamodb_task_service.py::TestTaskServiceDynamoDB::test_base_integrations[DYNAMODB_PUT_UPDATE_GET_ITEM]": 2.688038919000064, + "tests/aws/services/stepfunctions/v2/services/test_dynamodb_task_service.py::TestTaskServiceDynamoDB::test_invalid_integration": 0.6785854890000564, + "tests/aws/services/stepfunctions/v2/services/test_ecs_task_service.py::TestTaskServiceECS::test_run_task": 0.0018661310000425146, + "tests/aws/services/stepfunctions/v2/services/test_ecs_task_service.py::TestTaskServiceECS::test_run_task_raise_failure": 0.0018429689999948096, + "tests/aws/services/stepfunctions/v2/services/test_ecs_task_service.py::TestTaskServiceECS::test_run_task_sync": 0.0017047599999386875, + "tests/aws/services/stepfunctions/v2/services/test_ecs_task_service.py::TestTaskServiceECS::test_run_task_sync_raise_failure": 0.0017425200001071062, + "tests/aws/services/stepfunctions/v2/services/test_events_task_service.py::TestTaskServiceEvents::test_put_events_base": 2.1330921109999963, + "tests/aws/services/stepfunctions/v2/services/test_events_task_service.py::TestTaskServiceEvents::test_put_events_malformed_detail": 1.0477688399998897, + "tests/aws/services/stepfunctions/v2/services/test_events_task_service.py::TestTaskServiceEvents::test_put_events_mixed_malformed_detail": 1.0572384390000025, + "tests/aws/services/stepfunctions/v2/services/test_events_task_service.py::TestTaskServiceEvents::test_put_events_no_source": 31.922309869999935, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_bytes_payload": 2.1567627590001166, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[0.0]": 2.1660646919999635, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[0_0]": 2.1574068959999977, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[0_1]": 2.1662054089999856, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[HelloWorld]": 2.1930020790001663, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[True]": 2.143112401999929, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[json_value5]": 2.158147387999975, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_json_values[json_value6]": 2.198048435999908, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_pipe": 3.809157419999906, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_invoke_string_payload": 2.161412453999901, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task.py::TestTaskLambda::test_lambda_task_filter_parameters_input": 2.437066474999938, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke": 2.617546779999884, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_bytes_payload": 2.4323566979999214, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[0.0]": 3.6030644189999066, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[0_0]": 2.632834134999939, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[0_1]": 2.6459750610000583, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[HelloWorld]": 2.5823866819998784, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[True]": 2.663581646999887, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[json_value5]": 2.6687649639999336, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_json_values[json_value6]": 2.6483745560000216, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_invoke_unsupported_param": 2.6298553699998592, + "tests/aws/services/stepfunctions/v2/services/test_lambda_task_service.py::TestTaskServiceLambda::test_list_functions": 0.0027924789999360655, + "tests/aws/services/stepfunctions/v2/services/test_sfn_task_service.py::TestTaskServiceSfn::test_start_execution": 1.2591937419999795, + "tests/aws/services/stepfunctions/v2/services/test_sfn_task_service.py::TestTaskServiceSfn::test_start_execution_input_json": 1.2256195539999908, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_fifo_message_attribute[input_params0-True]": 1.3244576369999095, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_fifo_message_attribute[input_params1-False]": 1.0515114160000394, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[1]": 0.9827651380001043, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[HelloWorld]": 1.0639517439999508, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[None]": 1.0646824670000115, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[True]": 1.0150817599998163, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[]": 1.092534255999908, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base[message1]": 1.010055293999926, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_base_error_topic_arn": 1.042933230000017, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_message_attributes[\"HelloWorld\"]": 1.3483258469998418, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_message_attributes[HelloWorld]": 1.2223543759999984, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_message_attributes[message_value3]": 2.2614442440000175, + "tests/aws/services/stepfunctions/v2/services/test_sns_task_service.py::TestTaskServiceSns::test_publish_message_attributes[{}]": 1.1499791620000224, + "tests/aws/services/stepfunctions/v2/services/test_sqs_task_service.py::TestTaskServiceSqs::test_send_message": 1.2444930250001107, + "tests/aws/services/stepfunctions/v2/services/test_sqs_task_service.py::TestTaskServiceSqs::test_send_message_attributes": 1.552836147999983, + "tests/aws/services/stepfunctions/v2/services/test_sqs_task_service.py::TestTaskServiceSqs::test_send_message_unsupported_parameters": 1.2420051190001686, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_catch_error_variable_sampling[TASK_CATCH_ERROR_VARIABLE_SAMPLING]": 2.6772251750001033, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_catch_error_variable_sampling[TASK_CATCH_ERROR_VARIABLE_SAMPLING_TO_JSONPATH]": 2.52065858200001, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_map_catch_error[MAP_CATCH_ERROR_OUTPUT]": 0.0028028579999954673, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_map_catch_error[MAP_CATCH_ERROR_OUTPUT_WITH_RETRY]": 0.0017257890000337284, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_map_catch_error[MAP_CATCH_ERROR_VARIABLE_SAMPLING]": 0.0017162020000114353, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_parallel_catch_error[PARALLEL_CATCH_ERROR_OUTPUT]": 0.0017203690000542338, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_parallel_catch_error[PARALLEL_CATCH_ERROR_OUTPUT_WITH_RETRY]": 0.0018743660000382079, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_parallel_catch_error[PARALLEL_CATCH_ERROR_VARIABLE_SAMPLING]": 0.0018528449999166696, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_task_catch_error_output[TASK_CATCH_ERROR_OUTPUT]": 2.441055682999945, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_task_catch_error_output[TASK_CATCH_ERROR_OUTPUT_TO_JSONPATH]": 2.4661862060000885, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_task_catch_error_with_retry[TASK_CATCH_ERROR_OUTPUT_WITH_RETRY]": 3.7022690969999985, + "tests/aws/services/stepfunctions/v2/states_variables/test_error_output.py::TestStateVariablesTemplate::test_task_catch_error_with_retry[TASK_CATCH_ERROR_OUTPUT_WITH_RETRY_TO_JSONPATH]": 3.6969504429999915, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_cloudformation_definition_create_describe[dump]": 1.5881436349999376, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_cloudformation_definition_create_describe[dumps]": 1.576368971000079, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_cloudformation_definition_string_create_describe[dump]": 1.553797421000013, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_cloudformation_definition_string_create_describe[dumps]": 1.5605832370000599, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_delete_invalid_sm": 0.6961708160000626, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_delete_valid_sm": 1.6919566930000656, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_duplicate_definition_format_sm": 0.5611284749999186, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_duplicate_sm_name": 0.602154779999978, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_exact_duplicate_sm": 0.6265577960000428, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_update_state_machine_base_definition": 0.6139535479999267, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_update_state_machine_base_definition_and_role": 0.9179003170000897, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_update_state_machine_base_role_arn": 0.9523478289999048, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_update_state_machine_base_update_none": 0.5811059049999585, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_create_update_state_machine_same_parameters": 0.803074760999948, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_delete_nonexistent_sm": 0.5612622969999848, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_execution": 0.8532308579998471, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_execution_arn_containing_punctuation": 0.8378504150000481, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_execution_invalid_arn": 0.42182849099992836, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_execution_no_such_state_machine": 0.8125546520001308, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_invalid_arn_sm": 0.4286800000000994, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_nonexistent_sm": 0.5518591449999803, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_sm_arn_containing_punctuation": 0.5658289559999048, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_describe_state_machine_for_execution": 0.6573217249999743, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_get_execution_history_invalid_arn": 0.435370929999749, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_get_execution_history_no_such_execution": 0.6082120300001179, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_get_execution_history_reversed": 0.6439360819998683, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_invalid_start_execution_arn": 0.5699820519998866, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_invalid_start_execution_input": 0.9259551009999996, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_execution_invalid_arn": 0.42492491699999846, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_execution_no_such_state_machine": 0.5414465860000064, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_executions_pagination": 2.3908951109999634, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_executions_versions_pagination": 2.032395019999967, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_sms": 1.7774182319999454, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_list_sms_pagination": 1.0339373209999394, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_start_execution": 0.7167468789999702, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_start_execution_idempotent": 1.4154493189998902, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_start_sync_execution": 0.5716178250000894, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_state_machine_status_filter": 0.7285088309999992, + "tests/aws/services/stepfunctions/v2/test_sfn_api.py::TestSnfApi::test_stop_execution": 0.6345933199999081, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[\\x00activity]": 0.34831221500007814, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity name]": 1.4067500859999882, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity\"name]": 0.344950960999995, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity#name]": 0.345598306999932, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity$name]": 0.362341008000044, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity%name]": 0.3649620259998301, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity&name]": 0.3531558590000259, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity*name]": 0.34858276499994645, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity,name]": 0.34580451300007553, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity/name]": 0.3450719850001178, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity:name]": 0.33823142400012784, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity;name]": 0.341224994000072, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity<name]": 0.3389105019999761, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity>name]": 0.3462244620000092, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity?name]": 0.35119658899998285, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity[name]": 0.34867460700002084, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity\\\\name]": 0.37689796099994055, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity\\x1f]": 0.34322929200004637, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity\\x7f]": 0.35415321700008917, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity]name]": 0.33920537700009845, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity^name]": 0.34953446700001223, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity`name]": 0.35634314399987943, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity{name]": 0.33655065799985096, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity|name]": 0.3370144479998771, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity}name]": 0.3410622929999363, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_activity_invalid_name[activity~name]": 0.35655057299993587, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[ACTIVITY_NAME_ABC]": 0.43021359899989875, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[Activity1]": 0.4208897789999355, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]": 0.41945780699995794, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activity-name.1]": 0.41681878199995026, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activity-name_123]": 0.4096636390002004, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activity.name.v2]": 0.41834955300009824, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activity.name]": 0.4197662550001269, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activityName.with.dots]": 0.41866850200005956, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_create_describe_delete_activity[activity_123.name]": 0.4175606719999223, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_describe_activity_invalid_arn": 0.44914019299994834, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_describe_deleted_activity": 0.35904049100008706, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_get_activity_task_deleted": 0.3707353500000181, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_get_activity_task_invalid_arn": 0.43628677600008814, + "tests/aws/services/stepfunctions/v2/test_sfn_api_activities.py::TestSnfApiActivities::test_list_activities": 0.3721023289999721, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_base_create_alias_single_router_config": 0.8076415270001007, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_base_lifecycle_create_delete_list": 0.9577133400000548, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_base_lifecycle_create_invoke_describe_list": 1.1402098880000722, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_base_lifecycle_create_update_describe": 0.878736133999837, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_delete_no_such_alias_arn": 0.8725599350000266, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_delete_revision_with_alias": 1.9021881990000793, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_delete_version_with_alias": 0.8622258359997659, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_error_create_alias_invalid_name": 0.8431407589999935, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_error_create_alias_invalid_router_configs": 0.9765689229998316, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_error_create_alias_not_idempotent": 0.8493711649998659, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_error_create_alias_with_state_machine_arn": 0.7923267229999738, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_idempotent_create_alias": 0.8727633209999794, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_list_state_machine_aliases_pagination_invalid_next_token": 0.8502278880000631, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_list_state_machine_aliases_pagination_max_results[0]": 0.9624754760001224, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_list_state_machine_aliases_pagination_max_results[1]": 0.9358237839998083, + "tests/aws/services/stepfunctions/v2/test_sfn_api_aliasing.py::TestSfnApiAliasing::test_update_no_such_alias_arn": 0.8257441810001183, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_create_describe_delete": 0.8811360439999589, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_illegal_activity_task": 1.0678905810000288, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_illegal_callbacks[SYNC]": 0.9871877439999253, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_illegal_callbacks[WAIT_FOR_TASK_TOKEN]": 1.0379468339999676, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_start_async_describe_history_execution": 1.5414078940000309, + "tests/aws/services/stepfunctions/v2/test_sfn_api_express.py::TestSfnApiExpress::test_start_sync_execution": 0.9411656310001035, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_deleted_log_group": 0.7490657249999231, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_incomplete_logging_configuration[logging_configuration0]": 0.5569680209999888, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_incomplete_logging_configuration[logging_configuration1]": 0.5898548929999379, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_invalid_logging_configuration[logging_configuration0]": 0.5535597949999556, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_invalid_logging_configuration[logging_configuration1]": 0.542750725000019, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_invalid_logging_configuration[logging_configuration2]": 0.5267719570000509, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[ALL-False]": 0.5959662820000631, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[ALL-True]": 0.5937091839999766, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[ERROR-False]": 0.6398123120000037, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[ERROR-True]": 0.6105114679999133, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[FATAL-False]": 0.6068447910000714, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[FATAL-True]": 0.6250527820001253, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[OFF-False]": 0.5994350810000242, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_logging_configuration[OFF-True]": 0.5889836359999663, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_multiple_destinations": 0.5746409659998335, + "tests/aws/services/stepfunctions/v2/test_sfn_api_logs.py::TestSnfApiLogs::test_update_logging_configuration": 0.7206022350000012, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_list_map_runs_and_describe_map_run": 0.913529161999918, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_empty_fail": 0.4624437969999917, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[ ]": 0.4390262320000602, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\"]": 0.43546999200009395, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[#]": 0.4114230699998416, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[$]": 0.44152975200006495, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[%]": 0.44155923599998914, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[&]": 0.4205980180000779, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[*]": 0.44624152700009745, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[,]": 1.539660063000042, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[:]": 0.45531433300004664, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[;]": 0.44653031899997586, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[<]": 0.44204617099990173, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[>]": 0.47471668399998634, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[?]": 0.44316829199988206, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[[]": 0.44151932700003726, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\\\]": 0.41022915099995316, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\n]": 0.4405837980000342, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\r]": 0.4353016129999787, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\t]": 0.43393141800004287, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x00]": 0.4424964040000532, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x01]": 0.44293309700003647, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x02]": 0.4467426019998584, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x03]": 0.44607877899989035, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x04]": 0.4743552710000358, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x05]": 0.5065495460000875, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x06]": 0.4411645290000479, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x07]": 0.4126873129999922, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x08]": 0.439054062000082, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x0b]": 0.4364733190000152, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x0c]": 0.4049070139999458, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x0e]": 0.41900308599997516, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x0f]": 0.41564771700006986, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x10]": 0.4421944460001441, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x11]": 0.4444982950001304, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x12]": 0.44182774099988364, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x13]": 0.44433604999994714, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x14]": 0.440342008000016, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x15]": 0.43890373899989754, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x16]": 0.4412134500000775, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x17]": 0.4380454860000782, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x18]": 0.44175520700002835, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x19]": 0.4413641909999342, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1a]": 0.44511620099990523, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1b]": 0.4497449329999199, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1c]": 0.4728601509999635, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1d]": 0.4994777059998796, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1e]": 0.45321194600012404, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x1f]": 0.42449061399986476, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x7f]": 0.42654749099995115, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x80]": 0.4458500760000561, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x81]": 0.44860443599998234, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x82]": 0.4664083999999775, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x83]": 0.45917724200000976, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x84]": 0.4645919560000493, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x85]": 0.45990498800017576, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x86]": 0.45739347800008545, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x87]": 0.44846143699999175, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x88]": 0.46399504500004696, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x89]": 0.45173733700005414, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8a]": 0.458943391000048, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8b]": 0.4523218510001925, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8c]": 0.447000700999979, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8d]": 0.440195969999877, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8e]": 0.4399009539999952, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x8f]": 0.41626861599991116, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x90]": 0.44992605799996, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x91]": 0.4527835879998747, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x92]": 0.4659891389999302, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x93]": 1.679945506000081, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x94]": 0.4449015430000145, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x95]": 0.4563248739999608, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x96]": 0.45332371500001045, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x97]": 0.44187052299992047, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x98]": 0.4450446960000818, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x99]": 0.4487664750000704, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9a]": 0.4559016600001087, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9b]": 0.4403175749999946, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9c]": 0.44285795099995084, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9d]": 0.45596737400001075, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9e]": 0.4835678890000281, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[\\x9f]": 0.4825932950001288, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[]]": 0.4456575550001389, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[^]": 0.4467860520001068, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[`]": 0.4074593200000436, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[{]": 0.48703024300004927, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[|]": 0.4468120539999063, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[}]": 0.469523590000108, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_invalid_char_fail[~]": 0.4475361450000719, + "tests/aws/services/stepfunctions/v2/test_sfn_api_map_run.py::TestSnfApiMapRun::test_map_state_label_too_long_fail": 0.4621160809999765, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_create_state_machine": 0.47990492199994605, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_invalid_state_machine[None]": 0.5027888899999198, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_invalid_state_machine[tag_list1]": 0.46188935499992567, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_invalid_state_machine[tag_list2]": 0.47276361000001543, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_invalid_state_machine[tag_list3]": 0.44665474999999333, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine[tag_list0]": 0.47957637400008934, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine[tag_list1]": 0.4644884990000264, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine[tag_list2]": 0.47781529600013073, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine[tag_list3]": 0.47895795600015845, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine[tag_list4]": 0.4866146739999522, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_tag_state_machine_version": 0.4972205030001078, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_untag_state_machine[tag_keys0]": 0.5025914579998698, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_untag_state_machine[tag_keys1]": 0.5058975549999332, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_untag_state_machine[tag_keys2]": 0.4908802639999976, + "tests/aws/services/stepfunctions/v2/test_sfn_api_tagging.py::TestSnfApiTagging::test_untag_state_machine[tag_keys3]": 0.4919675250000637, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_not_a_definition[EMPTY_DICT]": 0.34137656400002925, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_not_a_definition[EMPTY_STRING]": 0.36556510700006584, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_not_a_definition[NOT_A_DEF]": 0.35117432099991674, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_type_express[ILLEGAL_WFTT]": 0.3634003939999957, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_type_express[INVALID_BASE_NO_STARTAT]": 0.3474091620000763, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_type_express[VALID_BASE_PASS]": 0.35285891499995614, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_type_standard[INVALID_BASE_NO_STARTAT]": 0.3454583300000422, + "tests/aws/services/stepfunctions/v2/test_sfn_api_validation.py::TestSfnApiValidation::test_validate_state_machine_definition_type_standard[VALID_BASE_PASS]": 0.35159758299994337, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_ASSIGN_FROM_INTRINSIC_FUNCTION]": 2.291611816999989, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_ASSIGN_FROM_PARAMETERS]": 1.0574095979999356, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_ASSIGN_FROM_RESULT]": 1.024733347999927, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_EVALUATION_ORDER_PASS_STATE]": 1.1028663140001527, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_CHOICE]": 1.0781535400000166, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_FAIL]": 1.015891508999971, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_INPUTPATH]": 1.0027356359998976, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_INTRINSIC_FUNCTION]": 2.4251618270001245, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_ITERATOR_OUTER_SCOPE]": 1.7125802279999789, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_OUTPUTPATH]": 1.0773119740001675, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_PARAMETERS]": 1.0521146780000663, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[BASE_REFERENCE_IN_WAIT]": 1.0354283080000641, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_INTRINSIC_FUNCTION]": 1.3152867479999486, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_ITEMS_PATH]": 1.3520402379999723, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_ITEM_SELECTOR]": 1.0939213630000495, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_MAX_CONCURRENCY_PATH]": 1.0530996720000303, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_MAX_ITEMS_PATH]": 1.0533667500000092, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_assign_templates[MAP_STATE_REFERENCE_IN_TOLERATED_FAILURE_PATH]": 1.063767640999913, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_jsonata_template[CHOICE_CONDITION_CONSTANT_JSONATA]": 0.6561670259999346, + "tests/aws/services/stepfunctions/v2/test_sfn_api_variable_references.py::TestSfnApiVariableReferences::test_base_variable_references_in_jsonata_template[CHOICE_STATE_UNSORTED_CHOICE_PARAMETERS_JSONATA]": 0.7128758829999242, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_create_express_with_publish": 0.515161820000003, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_create_publish_describe_no_version_description": 0.6032489780000105, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_create_publish_describe_with_version_description": 0.6001900450000903, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_create_with_publish": 0.5545050370000126, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_create_with_version_description_no_publish": 0.5372420310000052, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_describe_state_machine_for_execution_of_version": 0.7051018650000742, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_describe_state_machine_for_execution_of_version_with_revision": 0.6689285399999108, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_empty_revision_with_publish_and_no_publish_on_creation": 0.5746610789998385, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_empty_revision_with_publish_and_publish_on_creation": 0.5932220470001539, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_idempotent_publish": 0.6239598270000215, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_list_delete_version": 0.6387162180001269, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_list_state_machine_versions_pagination": 1.0638426849999405, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_publish_state_machine_version": 0.6900166440001385, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_publish_state_machine_version_invalid_arn": 0.4360606100000268, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_publish_state_machine_version_no_such_machine": 0.5734867620000159, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_start_version_execution": 0.7279803089999177, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_update_state_machine": 0.6165976949999958, + "tests/aws/services/stepfunctions/v2/test_sfn_api_versioning.py::TestSnfApiVersioning::test_version_ids_between_deletions": 0.606336242999987, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[BASE_CHOICE_STATE]": 1.1330601689999185, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[BASE_FAIL_STATE]": 0.9457227380000859, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[BASE_PASS_STATE]": 0.9389019269999608, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[BASE_RESULT_PASS_STATE]": 0.946125130999917, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[BASE_SUCCEED_STATE]": 0.9089541430000736, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[IO_PASS_STATE]": 1.0390565709999464, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_debug[IO_RESULT_PASS_STATE]": 1.1096661049999739, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[BASE_CHOICE_STATE]": 0.8013421480000034, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[BASE_FAIL_STATE]": 0.6252068369999506, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[BASE_PASS_STATE]": 0.6085578259999238, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[BASE_RESULT_PASS_STATE]": 0.6346525859999019, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[BASE_SUCCEED_STATE]": 0.6039706509999405, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[IO_PASS_STATE]": 0.7100111859999743, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_info[IO_RESULT_PASS_STATE]": 1.9820721479999293, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[BASE_CHOICE_STATE]": 1.121766505999858, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[BASE_FAIL_STATE]": 0.9811297159999413, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[BASE_PASS_STATE]": 0.9325923319998992, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[BASE_RESULT_PASS_STATE]": 0.9571003349999501, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[BASE_SUCCEED_STATE]": 0.9232873800001471, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[IO_PASS_STATE]": 1.0535959229998753, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_inspection_level_trace[IO_RESULT_PASS_STATE]": 1.0591145270000197, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_service_task_state[DEBUG]": 3.75871285300002, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_service_task_state[INFO]": 2.6254382270001315, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_service_task_state[TRACE]": 2.523958900000025, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_task_state[DEBUG]": 2.5842549470000904, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_task_state[INFO]": 2.547250692000034, + "tests/aws/services/stepfunctions/v2/test_state/test_test_state_scenarios.py::TestStateCaseScenarios::test_base_lambda_task_state[TRACE]": 2.536257831999933, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_create_choice_state_machine": 2.8739770209998596, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_create_run_map_state_machine": 1.1731952490000594, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_create_run_state_machine": 1.5738201579999895, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_create_state_machines_in_parallel": 2.0677397490003386, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_events_state_machine": 0.001791517999890857, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_intrinsic_functions": 1.254514391999919, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::TestStateMachine::test_try_catch_state_machine": 10.161825717999818, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_aws_sdk_task": 1.3621733940001377, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_default_logging_configuration": 0.1995053390000976, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_multiregion_nested[statemachine_definition0-eu-central-1]": 0.0016608579999228823, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_multiregion_nested[statemachine_definition0-eu-west-1]": 0.001660855999944033, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_multiregion_nested[statemachine_definition0-us-east-1]": 0.0025518289999126864, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_multiregion_nested[statemachine_definition0-us-east-2]": 0.0017199870001149975, + "tests/aws/services/stepfunctions/v2/test_stepfunctions_v2.py::test_run_aws_sdk_secrets_manager": 3.3415291080000316, + "tests/aws/services/stepfunctions/v2/timeouts/test_heartbeats.py::TestHeartbeats::test_heartbeat_no_timeout": 6.095898601000272, + "tests/aws/services/stepfunctions/v2/timeouts/test_heartbeats.py::TestHeartbeats::test_heartbeat_path_timeout": 6.205913035000094, + "tests/aws/services/stepfunctions/v2/timeouts/test_heartbeats.py::TestHeartbeats::test_heartbeat_timeout": 6.3017243029996735, + "tests/aws/services/stepfunctions/v2/timeouts/test_timeouts.py::TestTimeouts::test_fixed_timeout_lambda": 6.949242629999844, + "tests/aws/services/stepfunctions/v2/timeouts/test_timeouts.py::TestTimeouts::test_fixed_timeout_service_lambda": 6.996302237999998, + "tests/aws/services/stepfunctions/v2/timeouts/test_timeouts.py::TestTimeouts::test_fixed_timeout_service_lambda_with_path": 7.050686655999698, + "tests/aws/services/stepfunctions/v2/timeouts/test_timeouts.py::TestTimeouts::test_global_timeout": 5.714374802999828, + "tests/aws/services/stepfunctions/v2/timeouts/test_timeouts.py::TestTimeouts::test_service_lambda_map_timeout": 0.003185119999898234, + "tests/aws/services/sts/test_sts.py::TestSTSAssumeRoleTagging::test_assume_role_tag_validation": 0.20799444199997197, + "tests/aws/services/sts/test_sts.py::TestSTSAssumeRoleTagging::test_iam_role_chaining_override_transitive_tags": 0.39890159300011874, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_assume_non_existent_role": 0.016097511999987546, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_assume_role": 0.26473025500013136, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_assume_role_with_saml": 0.0519469629998639, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_assume_role_with_web_identity": 0.04102754699988509, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_expiration_date_format": 0.01801258700015751, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_caller_identity_role_access_key[False]": 0.19947775199989337, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_caller_identity_role_access_key[True]": 0.22457528900008583, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_caller_identity_root": 0.015528662000178883, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_caller_identity_user_access_key[False]": 0.07802176199970745, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_caller_identity_user_access_key[True]": 0.3180290329999025, + "tests/aws/services/sts/test_sts.py::TestSTSIntegrations::test_get_federation_token": 0.1302918789999694, + "tests/aws/services/support/test_support.py::TestConfigService::test_support_case_lifecycle": 0.06899514799988538, + "tests/aws/services/swf/test_swf.py::TestSwf::test_run_workflow": 0.20529056400005175, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_failing_deletion": 0.16679914500014092, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_failing_start_transcription_job": 0.3312099540003146, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_get_transcription_job": 2.2873154829999294, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_list_transcription_jobs": 2.3577102979998017, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_error_invalid_length": 32.02200791899986, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_error_speaker_labels": 0.001696116000175607, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_happy_path": 3.5277294709999296, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_speaker_diarization": 0.002241413000092507, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[None-None]": 2.412294025999927, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[test-output-bucket-2-None]": 4.612917553999978, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[test-output-bucket-3-test-output]": 4.94986339199977, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[test-output-bucket-4-test-output.json]": 4.973612471000024, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[test-output-bucket-5-test-files/test-output.json]": 4.935605785000234, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job[test-output-bucket-6-test-files/test-output]": 4.951679161999891, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_start_job_same_name": 2.308895062999909, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.amr-hello my name is]": 2.1630361349998566, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.flac-hello my name is]": 2.1742246039998463, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.mp3-hello my name is]": 2.1606591110003137, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.mp4-hello my name is]": 2.180706547999989, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.ogg-hello my name is]": 2.1736241880003035, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-gb.webm-hello my name is]": 2.2034048839998377, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-us_video.mkv-one of the most vital]": 2.189157536000039, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_supported_media_formats[../../files/en-us_video.mp4-one of the most vital]": 2.1696221879999484, + "tests/aws/services/transcribe/test_transcribe.py::TestTranscribe::test_transcribe_unsupported_media_format_failure": 3.189110919000086, + "tests/aws/test_error_injection.py::TestErrorInjection::test_dynamodb_error_injection": 25.73772035700017, + "tests/aws/test_error_injection.py::TestErrorInjection::test_dynamodb_read_error_injection": 25.73765976100003, + "tests/aws/test_error_injection.py::TestErrorInjection::test_dynamodb_write_error_injection": 51.374003802999596, + "tests/aws/test_error_injection.py::TestErrorInjection::test_kinesis_error_injection": 2.0712776349998876, + "tests/aws/test_integration.py::TestIntegration::test_firehose_extended_s3": 0.19859066200001507, + "tests/aws/test_integration.py::TestIntegration::test_firehose_kinesis_to_s3": 21.337352958999645, + "tests/aws/test_integration.py::TestIntegration::test_firehose_s3": 0.3486078760001874, + "tests/aws/test_integration.py::TestIntegration::test_lambda_streams_batch_and_transactions": 41.787630144999866, + "tests/aws/test_integration.py::TestIntegration::test_scheduled_lambda": 51.37142296699972, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.10]": 1.9085521249996873, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.11]": 1.8927056700001685, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.12]": 1.8963745969999763, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.13]": 1.8995574890002445, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.8]": 1.9571991299999354, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_put_item_to_dynamodb[python3.9]": 1.9070963090000532, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.10]": 7.832791531999874, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.11]": 7.796739921000153, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.12]": 1.8249469110000973, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.13]": 7.846692878999875, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.8]": 15.880032444000335, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_send_message_to_sqs[python3.9]": 1.838076887999705, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.10]": 3.9461678759998904, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.11]": 3.908566732000054, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.12]": 3.948684726000238, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.13]": 3.9198617689999082, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.8]": 3.9683208619999277, + "tests/aws/test_integration.py::TestLambdaOutgoingSdkCalls::test_lambda_start_stepfunctions_execution[python3.9]": 3.924296864000098, + "tests/aws/test_integration.py::test_kinesis_lambda_forward_chain": 0.0033961459998863575, + "tests/aws/test_moto.py::test_call_include_response_metadata": 0.007640673999958381, + "tests/aws/test_moto.py::test_call_multi_region_backends": 0.020316019000119923, + "tests/aws/test_moto.py::test_call_non_implemented_operation": 0.04215984699976616, + "tests/aws/test_moto.py::test_call_s3_with_streaming_trait[IO[bytes]]": 0.025185122999801024, + "tests/aws/test_moto.py::test_call_s3_with_streaming_trait[bytes]": 0.024729422999826056, + "tests/aws/test_moto.py::test_call_s3_with_streaming_trait[str]": 0.051790354000104344, + "tests/aws/test_moto.py::test_call_sqs_invalid_call_raises_http_exception": 0.007976038000151675, + "tests/aws/test_moto.py::test_call_with_es_creates_state_correctly": 0.06390081699987604, + "tests/aws/test_moto.py::test_call_with_modified_request": 0.010939796000229762, + "tests/aws/test_moto.py::test_call_with_sns_with_full_uri": 0.005396828000129972, + "tests/aws/test_moto.py::test_call_with_sqs_creates_state_correctly": 3.2202947519999725, + "tests/aws/test_moto.py::test_call_with_sqs_invalid_call_raises_exception": 0.008190798999976323, + "tests/aws/test_moto.py::test_call_with_sqs_modifies_state_in_moto_backend": 0.009705065000161994, + "tests/aws/test_moto.py::test_call_with_sqs_returns_service_response": 0.007269841999686832, + "tests/aws/test_moto.py::test_moto_fallback_dispatcher": 0.0122353260003365, + "tests/aws/test_moto.py::test_moto_fallback_dispatcher_error_handling": 0.033808257999908164, + "tests/aws/test_moto.py::test_request_with_response_header_location_fields": 0.10541210499991394, + "tests/aws/test_multi_accounts.py::TestMultiAccounts::test_account_id_namespacing_for_localstack_backends": 0.1606827789998988, + "tests/aws/test_multi_accounts.py::TestMultiAccounts::test_account_id_namespacing_for_moto_backends": 1.6339140149998457, + "tests/aws/test_multi_accounts.py::TestMultiAccounts::test_multi_accounts_dynamodb": 0.3124309600000288, + "tests/aws/test_multi_accounts.py::TestMultiAccounts::test_multi_accounts_kinesis": 1.5109277660001226, + "tests/aws/test_multiregion.py::TestMultiRegion::test_multi_region_api_gateway": 0.5312057230000846, + "tests/aws/test_multiregion.py::TestMultiRegion::test_multi_region_sns": 0.08440524200000254, + "tests/aws/test_network_configuration.py::TestLambda::test_function_url": 1.1566875719997824, + "tests/aws/test_network_configuration.py::TestLambda::test_http_api_for_function_url": 0.0018730360000063229, + "tests/aws/test_network_configuration.py::TestOpenSearch::test_default_strategy": 10.292635048999955, + "tests/aws/test_network_configuration.py::TestOpenSearch::test_path_strategy": 10.532582949000016, + "tests/aws/test_network_configuration.py::TestOpenSearch::test_port_strategy": 10.44684026799996, + "tests/aws/test_network_configuration.py::TestS3::test_201_response": 0.09599747600009323, + "tests/aws/test_network_configuration.py::TestS3::test_multipart_upload": 0.11986569600026087, + "tests/aws/test_network_configuration.py::TestS3::test_non_us_east_1_location": 0.07766316499987624, + "tests/aws/test_network_configuration.py::TestSQS::test_domain_based_strategies[domain]": 0.024913650999906167, + "tests/aws/test_network_configuration.py::TestSQS::test_domain_based_strategies[standard]": 0.030973199999834833, + "tests/aws/test_network_configuration.py::TestSQS::test_off_strategy_with_external_port": 0.02685814900019068, + "tests/aws/test_network_configuration.py::TestSQS::test_off_strategy_without_external_port": 0.03286701500019262, + "tests/aws/test_network_configuration.py::TestSQS::test_path_strategy": 0.02213916200003041, + "tests/aws/test_notifications.py::TestNotifications::test_sns_to_sqs": 0.16352553900014755, + "tests/aws/test_notifications.py::TestNotifications::test_sqs_queue_names": 0.022554671000079907, + "tests/aws/test_serverless.py::TestServerless::test_apigateway_deployed": 0.034714342000143006, + "tests/aws/test_serverless.py::TestServerless::test_dynamodb_stream_handler_deployed": 0.04022864099965773, + "tests/aws/test_serverless.py::TestServerless::test_event_rules_deployed": 101.9997040730002, + "tests/aws/test_serverless.py::TestServerless::test_kinesis_stream_handler_deployed": 0.0018369959998381091, + "tests/aws/test_serverless.py::TestServerless::test_lambda_with_configs_deployed": 0.020771460999867486, + "tests/aws/test_serverless.py::TestServerless::test_queue_handler_deployed": 0.03538207700012208, + "tests/aws/test_serverless.py::TestServerless::test_s3_bucket_deployed": 27.6064715550001, + "tests/aws/test_terraform.py::TestTerraform::test_acm": 0.005597220000026937, + "tests/aws/test_terraform.py::TestTerraform::test_apigateway": 0.0016514100000222243, + "tests/aws/test_terraform.py::TestTerraform::test_apigateway_escaped_policy": 0.0017271409999466414, + "tests/aws/test_terraform.py::TestTerraform::test_bucket_exists": 0.004697059999898556, + "tests/aws/test_terraform.py::TestTerraform::test_dynamodb": 0.0016905429999951593, + "tests/aws/test_terraform.py::TestTerraform::test_event_source_mapping": 0.001681966999967699, + "tests/aws/test_terraform.py::TestTerraform::test_lambda": 0.0017064940000182105, + "tests/aws/test_terraform.py::TestTerraform::test_route53": 0.0016686429999026586, + "tests/aws/test_terraform.py::TestTerraform::test_security_groups": 0.0017573279999396618, + "tests/aws/test_terraform.py::TestTerraform::test_sqs": 0.0016967450001175166, + "tests/aws/test_validate.py::TestMissingParameter::test_elasticache": 0.0017614659998343996, + "tests/aws/test_validate.py::TestMissingParameter::test_opensearch": 0.0017614060000141762, + "tests/aws/test_validate.py::TestMissingParameter::test_sns": 0.0017908310001075733, + "tests/aws/test_validate.py::TestMissingParameter::test_sqs_create_queue": 0.00309020400004556, + "tests/aws/test_validate.py::TestMissingParameter::test_sqs_send_message": 0.0018044470000404544, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_container_starts_non_root": 0.0016721790000246983, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_custom_docker_flags": 0.0017374119997839443, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_logs": 0.003083592000166391, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_pulling_image_message": 0.001757678999865675, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_restart": 0.001692167000101108, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_start_already_running": 0.0016777790001469839, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_start_cli_within_container": 0.0016707860002043162, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_start_wait_stop": 0.0017819550000695017, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_status_services": 0.001756225999997696, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_volume_dir_mounted_correctly": 0.0016421529999206541, + "tests/cli/test_cli.py::TestCliContainerLifecycle::test_wait_timeout_raises_exception": 0.0016565590001391683, + "tests/cli/test_cli.py::TestDNSServer::test_dns_port_not_published_by_default": 0.00171745399984502, + "tests/cli/test_cli.py::TestDNSServer::test_dns_port_published_with_flag": 0.0030766299998958857, + "tests/cli/test_cli.py::TestHooks::test_prepare_host_hook_called_with_correct_dirs": 0.5608951789999992, + "tests/cli/test_cli.py::TestImports::test_import_venv": 0.007298142999843549, + "tests/integration/aws/test_app.py::TestExceptionHandlers::test_404_unfortunately_detected_as_s3_request": 0.030348488000299767, + "tests/integration/aws/test_app.py::TestExceptionHandlers::test_internal_failure_handler_http_errors": 0.019404805000249326, + "tests/integration/aws/test_app.py::TestExceptionHandlers::test_router_handler_get_http_errors": 0.0018957150000460388, + "tests/integration/aws/test_app.py::TestExceptionHandlers::test_router_handler_get_unexpected_errors": 0.0019758860000820277, + "tests/integration/aws/test_app.py::TestExceptionHandlers::test_router_handler_patch_http_errors": 0.10676300399995853, + "tests/integration/aws/test_app.py::TestHTTP2Support::test_http2_http": 0.10145176300011371, + "tests/integration/aws/test_app.py::TestHTTP2Support::test_http2_https": 0.10086322000006476, + "tests/integration/aws/test_app.py::TestHTTP2Support::test_http2_https_localhost": 0.06285744200022236, + "tests/integration/aws/test_app.py::TestHttps::test_default_cert_works": 0.0673779859998831, + "tests/integration/aws/test_app.py::TestWebSocketIntegration::test_return_response": 0.0018011490001299535, + "tests/integration/aws/test_app.py::TestWebSocketIntegration::test_ssl_websockets": 0.001830263999863746, + "tests/integration/aws/test_app.py::TestWebSocketIntegration::test_websocket_reject_through_edge_router": 0.0017720240000471676, + "tests/integration/aws/test_app.py::TestWebSocketIntegration::test_websockets_served_through_edge_router": 0.0018670520000796387, + "tests/integration/aws/test_app.py::TestWerkzeugIntegration::test_chunked_request_streaming": 0.11195998900006998, + "tests/integration/aws/test_app.py::TestWerkzeugIntegration::test_chunked_response_streaming": 0.13382102300010956, + "tests/integration/aws/test_app.py::TestWerkzeugIntegration::test_raw_header_handling": 0.10087241599967456, + "tests/integration/aws/test_app.py::TestWerkzeugIntegration::test_response_close_handlers_called_with_router": 0.10282093799992253, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[CmdDockerClient-False-False]": 0.004141584999842962, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[CmdDockerClient-False-True]": 0.0020053299999744922, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[CmdDockerClient-True-False]": 0.0019890599999143888, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[CmdDockerClient-True-True]": 0.0019258629999967525, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[SdkDockerClient-False-False]": 2.995095264000156, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[SdkDockerClient-False-True]": 3.001316029000236, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[SdkDockerClient-True-False]": 2.9942436089997955, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_build_image[SdkDockerClient-True-True]": 3.0290174179999667, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_container_lifecycle_commands[CmdDockerClient]": 0.001895646000320994, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_container_lifecycle_commands[SdkDockerClient]": 20.80559452999978, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_content_into_container[CmdDockerClient]": 0.0019062849999045284, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_content_into_container[SdkDockerClient]": 0.28907256799993775, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_into_container[CmdDockerClient]": 0.0020184760001029645, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_into_container[SdkDockerClient]": 0.20604141800004072, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_structure_into_container[CmdDockerClient]": 0.0018867689998387505, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_directory_structure_into_container[SdkDockerClient]": 0.24904862900007174, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container[CmdDockerClient]": 0.001918528999794944, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container[SdkDockerClient]": 0.23689324099996156, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container_into_directory[CmdDockerClient]": 0.004004520000080447, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container_into_directory[SdkDockerClient]": 0.2496660790000078, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container_to_different_file[CmdDockerClient]": 0.0020005310000215104, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_container_to_different_file[SdkDockerClient]": 0.24526184199976342, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_non_existent_container[CmdDockerClient]": 0.0019712460002665466, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_from_non_existent_container[SdkDockerClient]": 0.008152447999918877, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container[CmdDockerClient]": 0.0020944460000009713, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container[SdkDockerClient]": 0.20222857999988264, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container_with_existing_target[CmdDockerClient]": 0.0021972090000872413, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container_with_existing_target[SdkDockerClient]": 0.3398601520000284, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container_without_target_filename[CmdDockerClient]": 0.001930561999870406, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_container_without_target_filename[SdkDockerClient]": 0.21186606000014763, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_non_existent_container[CmdDockerClient]": 0.0018674430000373832, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_copy_into_non_existent_container[SdkDockerClient]": 0.007534474000067348, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_non_existing_image[CmdDockerClient]": 0.0019359509999503643, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_non_existing_image[SdkDockerClient]": 0.08028640200018344, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_remove_removes_container[CmdDockerClient]": 0.0018963280001571547, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_remove_removes_container[SdkDockerClient]": 1.192338739999741, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_with_init[CmdDockerClient]": 0.0018959760000143433, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_with_init[SdkDockerClient]": 0.025711641000043528, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_with_max_env_vars[CmdDockerClient]": 0.001958433000027071, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_container_with_max_env_vars[SdkDockerClient]": 0.23330542200005766, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_file_in_container[CmdDockerClient]": 0.0019150020002598467, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_file_in_container[SdkDockerClient]": 0.2064487929999359, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_file[CmdDockerClient-False]": 0.001958411999794407, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_file[CmdDockerClient-True]": 0.0018951740000829886, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_file[SdkDockerClient-False]": 0.1932389339999645, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_file[SdkDockerClient-True]": 0.20847888000002968, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_stdout[CmdDockerClient-False]": 0.0018283599999904254, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_stdout[CmdDockerClient-True]": 0.001999570999714706, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_stdout[SdkDockerClient-False]": 0.192862851999962, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_start_container_with_stdin_to_stdout[SdkDockerClient-True]": 0.20766703200024494, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_exposed_ports[CmdDockerClient]": 0.0018740469997737819, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_exposed_ports[SdkDockerClient]": 0.0045728799998414615, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_host_network[CmdDockerClient]": 0.0021264059998884477, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_host_network[SdkDockerClient]": 0.03267042900006345, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_port_mapping[CmdDockerClient]": 0.002283560000023499, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_port_mapping[SdkDockerClient]": 0.02535695300025509, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_volume[CmdDockerClient]": 0.0017768430000160151, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_create_with_volume[SdkDockerClient]": 0.001931533000060881, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_docker_image_names[CmdDockerClient]": 0.0019805239999186597, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_docker_image_names[SdkDockerClient]": 0.6015952650000145, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_docker_not_available[CmdDockerClient]": 0.006613075000132085, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_docker_not_available[SdkDockerClient]": 0.0058547410001210665, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_error_in_container[CmdDockerClient]": 0.001980975000151375, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_error_in_container[SdkDockerClient]": 0.2888057469999694, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container[CmdDockerClient]": 0.001963542000112284, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container[SdkDockerClient]": 0.24005105400010507, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_not_running_raises_exception[CmdDockerClient]": 0.0019789809998655983, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_not_running_raises_exception[SdkDockerClient]": 0.031903200999977344, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_env[CmdDockerClient]": 0.001992335000068124, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_env[SdkDockerClient]": 0.24737434400003622, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_env_deletion[CmdDockerClient]": 0.0018659290001323825, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_env_deletion[SdkDockerClient]": 0.31071927799985133, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_stdin[CmdDockerClient]": 0.0037020659999598138, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_stdin[SdkDockerClient]": 0.23487851600020804, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_stdin_stdout_stderr[CmdDockerClient]": 0.002056535999827247, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_stdin_stdout_stderr[SdkDockerClient]": 0.23832517799996822, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_workdir[CmdDockerClient]": 0.0038977100000465725, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_exec_in_container_with_workdir[SdkDockerClient]": 0.2431706130000748, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command[CmdDockerClient]": 0.0019820170000457438, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command[SdkDockerClient]": 0.006033835000152976, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command_non_existing_image[CmdDockerClient]": 0.0018499900002098002, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command_non_existing_image[SdkDockerClient]": 0.07695360699995035, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command_not_pulled_image[CmdDockerClient]": 0.001981275999924037, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_command_not_pulled_image[SdkDockerClient]": 0.4624340860002576, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint[CmdDockerClient]": 0.0018947250000564964, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint[SdkDockerClient]": 0.007305868000003102, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint_non_existing_image[CmdDockerClient]": 0.003700824000134162, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint_non_existing_image[SdkDockerClient]": 0.06580134099999668, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint_not_pulled_image[CmdDockerClient]": 0.002031350000152088, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_entrypoint_not_pulled_image[SdkDockerClient]": 0.44566275000011046, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_id[CmdDockerClient]": 0.001965656000038507, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_id[SdkDockerClient]": 0.20011871399992742, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_id_not_existing[CmdDockerClient]": 0.0019611180002812034, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_id_not_existing[SdkDockerClient]": 0.006851654999763923, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip[CmdDockerClient]": 0.001957091000122091, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip[SdkDockerClient]": 0.20120309900016764, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_host_network[CmdDockerClient]": 0.0019346589999713615, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_host_network[SdkDockerClient]": 0.040100477999658324, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network[CmdDockerClient]": 0.001907997999978761, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network[SdkDockerClient]": 0.4493121250002332, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network_non_existent_network[CmdDockerClient]": 0.0036490459999640734, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network_non_existent_network[SdkDockerClient]": 0.19542863900005614, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network_wrong_network[CmdDockerClient]": 0.0019978059999630204, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_for_network_wrong_network[SdkDockerClient]": 0.34893864700006816, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_non_existing_container[CmdDockerClient]": 0.0019222959999751765, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_ip_non_existing_container[SdkDockerClient]": 0.006023245000278621, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_name[CmdDockerClient]": 0.0038048779999826365, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_name[SdkDockerClient]": 0.2145788709997305, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_name_not_existing[CmdDockerClient]": 0.0019839809999666613, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_container_name_not_existing[SdkDockerClient]": 0.007237971999984438, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_logs[CmdDockerClient]": 0.0019817750001038803, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_logs[SdkDockerClient]": 0.18366636899986588, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_logs_non_existent_container[CmdDockerClient]": 0.001964915000144174, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_logs_non_existent_container[SdkDockerClient]": 0.007136531999776707, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network[CmdDockerClient]": 0.0020014930000797904, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network[SdkDockerClient]": 0.02922286200009694, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network_multiple_networks[CmdDockerClient]": 0.0018801580001763796, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network_multiple_networks[SdkDockerClient]": 0.41844548600010967, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network_non_existing_container[CmdDockerClient]": 0.001899923999872044, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_network_non_existing_container[SdkDockerClient]": 0.0066304879999279365, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_system_id[CmdDockerClient]": 0.0018595989999994345, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_system_id[SdkDockerClient]": 0.021663999000338663, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_system_info[CmdDockerClient]": 0.00364002000014807, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_get_system_info[SdkDockerClient]": 0.02722969399997055, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container[CmdDockerClient]": 0.0020047400000748894, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container[SdkDockerClient]": 0.2032476729998507, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container_volumes[CmdDockerClient]": 0.0017758530000264727, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container_volumes[SdkDockerClient]": 0.006013086999928419, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container_volumes_with_no_volumes[CmdDockerClient]": 0.003788795999980721, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_container_volumes_with_no_volumes[SdkDockerClient]": 0.18806482099989807, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_image[CmdDockerClient]": 0.003796299999976327, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_image[SdkDockerClient]": 0.02786660199990365, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_network[CmdDockerClient]": 0.0019734910001716344, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_network[SdkDockerClient]": 0.12972114700005477, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_network_non_existent_network[CmdDockerClient]": 0.0020103100000596896, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_inspect_network_non_existent_network[SdkDockerClient]": 0.007151316999852497, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_is_container_running[CmdDockerClient]": 0.0018547189999935654, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_is_container_running[SdkDockerClient]": 20.412909238999873, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers[CmdDockerClient]": 0.004508859999759807, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers[SdkDockerClient]": 0.08931729799996901, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter[CmdDockerClient]": 0.001924890000054802, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter[SdkDockerClient]": 0.08665848399982679, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter_illegal_filter[CmdDockerClient]": 0.0018766499997582287, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter_illegal_filter[SdkDockerClient]": 0.006156785000257514, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter_non_existing[CmdDockerClient]": 0.0019199509999907605, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_filter_non_existing[SdkDockerClient]": 0.006631272000049648, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_with_podman_image_ref_format[CmdDockerClient]": 0.001906095000094865, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_list_containers_with_podman_image_ref_format[SdkDockerClient]": 0.23721227999999428, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pause_non_existing_container[CmdDockerClient]": 0.001938556000141034, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pause_non_existing_container[SdkDockerClient]": 0.0056407550000585616, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image[CmdDockerClient]": 0.0019696140000178275, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image[SdkDockerClient]": 0.32487481900011517, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image_with_hash[CmdDockerClient]": 0.0035774230000242824, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image_with_hash[SdkDockerClient]": 0.32382332200018027, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image_with_tag[CmdDockerClient]": 0.0018453820000559062, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_docker_image_with_tag[SdkDockerClient]": 0.4061120740000206, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_non_existent_docker_image[CmdDockerClient]": 0.0018666419996407058, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_pull_non_existent_docker_image[SdkDockerClient]": 0.07649629700017613, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_access_denied[CmdDockerClient]": 0.001912308000100893, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_access_denied[SdkDockerClient]": 0.2895498749999206, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_invalid_registry[CmdDockerClient]": 0.0019241889999648265, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_invalid_registry[SdkDockerClient]": 0.014607293999915782, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_non_existent_docker_image[CmdDockerClient]": 0.002009979999911593, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_push_non_existent_docker_image[SdkDockerClient]": 0.00724829199953092, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_remove_non_existing_container[CmdDockerClient]": 0.0020509739999852172, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_remove_non_existing_container[SdkDockerClient]": 0.005821060000016587, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_restart_non_existing_container[CmdDockerClient]": 0.002022061000161557, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_restart_non_existing_container[SdkDockerClient]": 0.005878531999769621, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container[CmdDockerClient]": 0.004093345000001136, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container[SdkDockerClient]": 0.19326594199969804, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_automatic_pull[CmdDockerClient]": 0.0018643680000423046, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_automatic_pull[SdkDockerClient]": 0.6158057180000469, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_error[CmdDockerClient]": 0.0020197670000925427, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_error[SdkDockerClient]": 0.11478227399993557, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_non_existent_image[CmdDockerClient]": 0.0019726789998912864, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_non_existent_image[SdkDockerClient]": 0.0897285739997642, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_with_init[CmdDockerClient]": 0.0018673419999686303, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_with_init[SdkDockerClient]": 0.19143987100028426, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_with_stdin[CmdDockerClient]": 0.001975474999881044, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_container_with_stdin[SdkDockerClient]": 0.17708290300015506, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_detached_with_logs[CmdDockerClient]": 0.0020140160002029006, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_run_detached_with_logs[SdkDockerClient]": 0.19486077000010482, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_running_container_names[CmdDockerClient]": 0.0018338009999752103, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_running_container_names[SdkDockerClient]": 10.634625412000105, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_set_container_entrypoint[CmdDockerClient-echo]": 0.0019157530000484257, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_set_container_entrypoint[CmdDockerClient-entrypoint1]": 0.001887940999949933, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_set_container_entrypoint[SdkDockerClient-echo]": 0.20084265300010884, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_set_container_entrypoint[SdkDockerClient-entrypoint1]": 0.19764563499984433, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_start_non_existing_container[CmdDockerClient]": 0.0019863159998294577, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_start_non_existing_container[SdkDockerClient]": 0.0055717730001560994, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stop_non_existing_container[CmdDockerClient]": 0.0020056109999586624, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stop_non_existing_container[SdkDockerClient]": 0.0064888039996731095, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stream_logs[CmdDockerClient]": 0.0018738250000751577, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stream_logs[SdkDockerClient]": 0.19955086599975402, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stream_logs_non_existent_container[CmdDockerClient]": 0.0036469620001753356, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_stream_logs_non_existent_container[SdkDockerClient]": 0.005818804000000455, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_tag_image[CmdDockerClient]": 0.003789048000044204, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_tag_image[SdkDockerClient]": 0.15794090200006394, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_tag_non_existing_image[CmdDockerClient]": 0.0019169859999692562, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_tag_non_existing_image[SdkDockerClient]": 0.008020030999659866, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_unpause_non_existing_container[CmdDockerClient]": 0.004957625999850279, + "tests/integration/docker_utils/test_docker.py::TestDockerClient::test_unpause_non_existing_container[SdkDockerClient]": 0.005480785000145261, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_commit_creates_image_from_running_container[CmdDockerClient]": 0.0034776570003032248, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_commit_creates_image_from_running_container[SdkDockerClient]": 0.5167643710001357, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_commit_image_raises_for_nonexistent_container[CmdDockerClient]": 0.0019051140002375178, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_commit_image_raises_for_nonexistent_container[SdkDockerClient]": 0.006317845000012312, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_remove_image_raises_for_nonexistent_image[CmdDockerClient]": 0.001990052000110154, + "tests/integration/docker_utils/test_docker.py::TestDockerImages::test_remove_image_raises_for_nonexistent_image[SdkDockerClient]": 0.006786487999988822, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_create_container_with_labels[CmdDockerClient]": 0.003449254999850382, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_create_container_with_labels[SdkDockerClient]": 0.04260951800006296, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_get_container_stats[CmdDockerClient]": 0.0018848269999125478, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_get_container_stats[SdkDockerClient]": 1.2000897049999821, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_list_containers_with_labels[CmdDockerClient]": 0.0019074580000051355, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_list_containers_with_labels[SdkDockerClient]": 0.2057743580000988, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_run_container_with_labels[CmdDockerClient]": 0.0019015580000996124, + "tests/integration/docker_utils/test_docker.py::TestDockerLabels::test_run_container_with_labels[SdkDockerClient]": 0.19313940300003196, + "tests/integration/docker_utils/test_docker.py::TestDockerLogging::test_docker_logging_fluentbit[CmdDockerClient]": 0.0018733750000592408, + "tests/integration/docker_utils/test_docker.py::TestDockerLogging::test_docker_logging_fluentbit[SdkDockerClient]": 2.990178680999861, + "tests/integration/docker_utils/test_docker.py::TestDockerLogging::test_docker_logging_none_disables_logs[CmdDockerClient]": 0.0032958169997527875, + "tests/integration/docker_utils/test_docker.py::TestDockerLogging::test_docker_logging_none_disables_logs[SdkDockerClient]": 0.19901383199999145, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network[CmdDockerClient]": 0.005871623000302861, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network[SdkDockerClient]": 0.43204041600029086, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network_with_alias_and_disconnect[CmdDockerClient]": 0.0019517499999892607, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network_with_alias_and_disconnect[SdkDockerClient]": 0.864502899000172, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network_with_link_local_address[CmdDockerClient]": 0.002074599000025046, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_network_with_link_local_address[SdkDockerClient]": 0.18537330200001634, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_nonexistent_network[CmdDockerClient]": 0.0020503549999375537, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_container_to_nonexistent_network[SdkDockerClient]": 0.22841053799970723, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_nonexistent_container_to_network[CmdDockerClient]": 0.0019197220001387905, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_connect_nonexistent_container_to_network[SdkDockerClient]": 0.16316636100032156, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_disconnect_container_from_nonexistent_network[CmdDockerClient]": 0.0018649190001269744, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_disconnect_container_from_nonexistent_network[SdkDockerClient]": 0.20242660400003842, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_disconnect_nonexistent_container_from_network[CmdDockerClient]": 0.0019547859999420325, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_disconnect_nonexistent_container_from_network[SdkDockerClient]": 0.15831655999977556, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_docker_sdk_no_retries": 0.026616520000061428, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_docker_sdk_retries_after_init": 1.0671151779999946, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_docker_sdk_retries_on_init": 1.1294517810001707, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_docker_sdk_timeout_seconds": 0.020414975000448976, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_get_container_ip_with_network[CmdDockerClient]": 0.0019897120000678115, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_get_container_ip_with_network[SdkDockerClient]": 0.3574971589998768, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_network_lifecycle[CmdDockerClient]": 0.00334271700012323, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_network_lifecycle[SdkDockerClient]": 0.1595030199998746, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_set_container_workdir[CmdDockerClient]": 0.0019761270000344666, + "tests/integration/docker_utils/test_docker.py::TestDockerNetworking::test_set_container_workdir[SdkDockerClient]": 0.18252414799985672, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_cap_add[CmdDockerClient]": 0.003574037999896973, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_cap_add[SdkDockerClient]": 0.4320345759999782, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_cap_drop[CmdDockerClient]": 0.0019017579998035217, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_cap_drop[SdkDockerClient]": 0.3700782690000324, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_sec_opt[CmdDockerClient]": 0.00187565999999606, + "tests/integration/docker_utils/test_docker.py::TestDockerPermissions::test_container_with_sec_opt[SdkDockerClient]": 0.02765618000012182, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[CmdDockerClient-None]": 0.0019256020002558216, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[CmdDockerClient-tcp]": 0.0018887050000557792, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[CmdDockerClient-udp]": 0.0018985119997978472, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[SdkDockerClient-None]": 1.4848546409998562, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[SdkDockerClient-tcp]": 1.4984671460001664, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_container_port_can_be_bound[SdkDockerClient-udp]": 1.4964931449999312, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[CmdDockerClient-None]": 0.0033610099999350496, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[CmdDockerClient-tcp]": 0.001973280999891358, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[CmdDockerClient-udp]": 0.002002886999889597, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[SdkDockerClient-None]": 2.601268305000076, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[SdkDockerClient-tcp]": 2.611378226999932, + "tests/integration/docker_utils/test_docker.py::TestDockerPorts::test_reserve_container_port[SdkDockerClient-udp]": 2.8516050480000104, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments[CmdDockerClient]": 0.003584026000226004, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments[SdkDockerClient]": 0.3908667119999336, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_dns[CmdDockerClient-False]": 0.002028122999945481, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_dns[CmdDockerClient-True]": 0.0020554349998747057, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_dns[SdkDockerClient-False]": 0.1266307850000885, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_dns[SdkDockerClient-True]": 0.12432772000011028, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_host[CmdDockerClient]": 0.0018726929999957065, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_add_host[SdkDockerClient]": 0.18973624999989624, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_env_files[CmdDockerClient]": 0.001917736999985209, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_env_files[SdkDockerClient]": 0.7136641140000393, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_random_port[CmdDockerClient]": 0.0020228539999607165, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_additional_arguments_random_port[SdkDockerClient]": 0.2593425549998756, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_ulimit[CmdDockerClient]": 0.0019318839999868942, + "tests/integration/docker_utils/test_docker.py::TestRunWithAdditionalArgs::test_run_with_ulimit[SdkDockerClient]": 0.17863703600028202, + "tests/integration/services/test_internal.py::TestHealthResource::test_get": 0.021054101999880004, + "tests/integration/services/test_internal.py::TestHealthResource::test_head": 0.018252955999969345, + "tests/integration/services/test_internal.py::TestInfoEndpoint::test_get": 0.05460279399994761, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_query_individual_stage_completed[boot-True]": 0.020364598999776717, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_query_individual_stage_completed[ready-True]": 0.024893586999951367, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_query_individual_stage_completed[shutdown-False]": 0.019863297000256352, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_query_individual_stage_completed[start-True]": 0.0305466830000114, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_query_nonexisting_stage": 0.019501690999959465, + "tests/integration/services/test_internal.py::TestInitScriptsResource::test_stages_have_completed": 1.550047032999828, + "tests/integration/test_config_endpoint.py::test_config_endpoint": 0.048597999000094205, + "tests/integration/test_config_service.py::TestConfigService::test_put_configuration_recorder": 0.3496834580000723, + "tests/integration/test_config_service.py::TestConfigService::test_put_delivery_channel": 0.3099454869998226, + "tests/integration/test_forwarder.py::test_forwarding_fallback_dispatcher": 0.0063461790002747875, + "tests/integration/test_forwarder.py::test_forwarding_fallback_dispatcher_avoid_fallback": 0.004403954999816051, + "tests/integration/test_security.py::TestCSRF::test_CSRF": 0.09931153799993808, + "tests/integration/test_security.py::TestCSRF::test_additional_allowed_origins": 0.01958251399969413, + "tests/integration/test_security.py::TestCSRF::test_cors_apigw_not_applied": 0.048958045000063066, + "tests/integration/test_security.py::TestCSRF::test_cors_s3_override": 0.08057531500003279, + "tests/integration/test_security.py::TestCSRF::test_default_cors_headers": 0.015996739999991405, + "tests/integration/test_security.py::TestCSRF::test_disable_cors_checks": 0.016058246999818948, + "tests/integration/test_security.py::TestCSRF::test_disable_cors_headers": 0.019197955999970873, + "tests/integration/test_security.py::TestCSRF::test_internal_route_cors_headers[/_localstack/health]": 0.011076430999764852, + "tests/integration/test_security.py::TestCSRF::test_no_cors_without_origin_header": 0.01053124600002775, + "tests/integration/test_stores.py::test_nonstandard_regions": 0.14873483800010945, + "tests/integration/utils/test_diagnose.py::test_diagnose_resource": 0.23227979500029505 +} diff --git a/CODEOWNERS b/CODEOWNERS index e3d1d1e72d2e1..e165d6d3cc5d3 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -3,176 +3,250 @@ ###################### # CODEOWNERS -/CODEOWNERS @thrau @dominikschubert +/CODEOWNERS @thrau @dominikschubert @alexrashed + +# README / Docs +/docs/ @thrau @HarshCasper +/README.md @HarshCasper +/CODE_OF_CONDUCT.md @HarshCasper +/LICENSE.txt @HarshCasper @alexrashed + +# Docker +/bin/docker-entrypoint.sh @thrau @alexrashed +/.dockerignore @alexrashed +/Dockerfile @alexrashed + +# Git, Pipelines, GitHub config +/.circleci @alexrashed @dfangl @dominikschubert @silv-io @k-a-il +/.github @alexrashed @dfangl @dominikschubert @silv-io @k-a-il +/.test_durations @alexrashed +/.git-blame-ignore-revs @alexrashed @thrau +/bin/release-dev.sh @thrau @alexrashed +/bin/release-helper.sh @thrau @alexrashed # ASF -/localstack/aws/ @thrau +/localstack-core/localstack/aws/ @thrau /tests/unit/aws/ @thrau # ASF parsers and serializers -/localstack/aws/protocol @alexrashed +/localstack-core/localstack/aws/protocol @alexrashed # left empty (without owner) because the generated APIs belong to the specific service owners # you can overwrite this for single services afterwards -/localstack/aws/api/ +/localstack-core/localstack/aws/api/ + +# CLI +/localstack-core/localstack/cli/ @thrau @alexrashed +/tests/unit/cli/ @thrau @alexrashed +/tests/cli/ @thrau @alexrashed -# CLI/plugins -/localstack/cli/ @thrau -/localstack/plugins.py @thrau -/localstack/config.py @thrau -/tests/unit/cli/ @thrau +# Plugins +/localstack-core/localstack/plugins.py @thrau +/localstack-core/localstack/config.py @thrau /tests/unit/services/test_internal.py @thrau +# Extensions +/localstack-core/localstack/extensions/ @thrau + # Container utils -/localstack/utils/container_utils/ @dfangl @dominikschubert -/localstack/utils/docker_utils.py @dfangl @dominikschubert -/tests/integration/docker_utils/ @dfangl @dominikschubert +/localstack-core/localstack/utils/container_utils/ @dfangl @dominikschubert +/localstack-core/localstack/utils/docker_utils.py @dfangl @dominikschubert +/tests/unit/test_docker_utils.py @dfangl @dominikschubert /tests/unit/test_dockerclient.py @dfangl @dominikschubert +# Package Installers +/localstack-core/localstack/packages/ @alexrashed +/localstack-core/localstack/services/kinesis/packages.py @alexrashed + +# DNS server +/localstack-core/localstack/dns @simonrw @dfangl + # HTTP framework -/localstack/http/ @thrau +/localstack-core/localstack/http/ @thrau +/tests/unit/http_/ @thrau -# Stores -/localstack/services/stores.py @viren-nadkarni -/tests/unit/test_stores.py @viren-nadkarni +# Runtime +/localstack-core/localstack/runtime/ @thrau -# Dockerfile -/Dockerfile @alexrashed -/Dockerfile.rh @alexrashed +# Logging +/localstack-core/localstack/logging/ @dfangl @alexrashed @dominikschubert -# Circle CI pipeline -/.circleci/config.yml @thrau @alexrashed @dfangl +# Stores +/localstack-core/localstack/services/stores.py @viren-nadkarni +/tests/unit/test_stores.py @viren-nadkarni # Analytics client -/localstack/utils/analytics/ @thrau +/localstack-core/localstack/utils/analytics/ @thrau +/tests/unit/utils/analytics/ @thrau # Snapshot testing -/localstack/testing/snapshots/ @dominikschubert @steffyP -/localstack/testing/pytest/ @dominikschubert -/localstack/testing/pytest/snapshot.py @dominikschubert @steffyP +/localstack-core/localstack/testing/snapshots/ @dominikschubert @steffyP +/localstack-core/localstack/testing/pytest/ @dominikschubert + +# Scenario testing +/localstack-core/localstack/testing/scenario/ @dominikschubert @steffyP + +# Bootstrap tests +/tests/bootstrap @simonrw +/localstack-core/localstack/testing/pytest/container.py @dominikschubert @simonrw + +# Test Selection +/localstack-core/localstack/testing/testselection @dominikschubert @alexrashed @silv-io ###################### ### SERVICE OWNERS ### ###################### - -# ACM -/localstack/aws/api/acm/ @alexrashed -/localstack/services/acm/ @alexrashed -/tests/integration/test_acm.py @alexrashed - -# API Gateway -/localstack/aws/api/apigateway/ @calvernaz -/localstack/services/apigateway/ @calvernaz -/localstack/services/cloudformation/models/apigateway.py @calvernaz -/tests/integration/test_apigateway*.py @calvernaz -/tests/unit/test_apigateway.py @calvernaz - -# Cloudformation -/localstack/aws/api/cloudformation/ @dominikschubert -/localstack/services/cloudformation/ @dominikschubert -/localstack/utils/cloudformation/ @dominikschubert -/tests/integration/cloudformation/ @dominikschubert -/tests/unit/test_cloudformation.py @dominikschubert -# left empty (without owner) because the models belong to the specific service owners -# you can overwrite this for single services afterwards -/localstack/services/cloudformation/models/ -/localstack/services/cloudformation/models/cloudformation.py @dominikschubert - -# Cloudwatch -/localstack/aws/api/cloudwatch/ @steffyP -/localstack/services/cloudwatch/ @steffyP -/localstack/services/cloudformation/models/cloudwatch.py @steffyP -/tests/integration/test_cloudwatch.py @steffyP - -# EC2 -/localstack/aws/api/ec2/ @viren-nadkarni -/localstack/services/ec2/ @viren-nadkarni -/localstack/services/cloudformation/models/ec2.py @viren-nadkarni -/tests/integration/test_ec2.py @viren-nadkarni - -# ElasticSearch -/localstack/aws/api/es/ @alexrashed -/localstack/services/es/ @alexrashed -/localstack/services/cloudformation/models/elasticsearch.py @alexrashed -/tests/integration/test_es.py @alexrashed - -# Events / EventBridge -/localstack/aws/api/events/ @dominikschubert @dfangl -/localstack/services/events/ @dominikschubert @dfangl -/localstack/services/cloudformation/models/events.py @dominikschubert @dfangl -/tests/integration/test_events.py @dominikschubert @dfangl - -# IAM -/localstack/aws/api/iam/ @dfangl -/localstack/services/iam/ @dfangl -/localstack/services/cloudformation/models/iam.py @dfangl -/tests/integration/test_iam.py @dfangl - -# Lambda -/localstack/aws/api/lambda_/ @dfangl @dominikschubert -/localstack/services/awslambda/ @dfangl @dominikschubert -/localstack/services/cloudformation/models/awslambda.py @dfangl @dominikschubert -/tests/integration/awslambda/ @dfangl @dominikschubert - -# Logs -/localstack/aws/api/logs/ @steffyP -/localstack/services/logs/ @steffyP -/localstack/services/cloudformation/models/logs.py @steffyP -/tests/integration/test_logs.py @steffyP -/tests/unit/test_logs.py @steffyP - -# OpenSearch -/localstack/aws/api/opensearch/ @alexrashed -/localstack/services/opensearch/ @alexrashed -/localstack/services/cloudformation/models/opensearch.py @alexrashed -/tests/integration/test_opensearch.py @alexrashed -/tests/unit/services/opensearch/ @alexrashed - -# Route53 -/localstack/aws/api/route53/ @giograno -/localstack/services/route53/ @giograno -/localstack/services/cloudformation/models/route53.py @giograno -/tests/integration/test_route53.py @giograno - -# S3 -/localstack/aws/api/s3/ @bentsku @macnev2013 -/localstack/services/s3/ @bentsku @macnev2013 -/localstack/services/cloudformation/models/s3.py @bentsku @macnev2013 -/tests/integration/test_s3.py @bentsku @macnev2013 -/tests/unit/test_s3.py @bentsku @macnev2013 - -# Secretsmanager -/localstack/aws/api/secretsmanager/ @dominikschubert -/localstack/services/secretsmanager/ @dominikschubert -/localstack/services/cloudformation/models/secretsmanager.py @dominikschubert -/tests/integration/test_secretsmanager.py @dominikschubert - -# SES -/localstack/aws/api/ses/ @viren-nadkarni -/localstack/services/ses/ @viren-nadkarni -/tests/integration/test_ses.py @viren-nadkarni - -# SNS -/localstack/aws/api/sns/ @bentsku -/localstack/services/sns/ @bentsku -/localstack/services/cloudformation/models/sns.py @bentsku -/tests/integration/test_sns.py @bentsku -/tests/unit/test_sns.py @bentsku - -# SQS -/localstack/aws/api/sqs/ @thrau @baermat -/localstack/services/sqs/ @thrau @baermat -/localstack/services/cloudformation/models/sqs.py @thrau @baermat -/tests/integration/test_sqs.py @thrau @baermat -/tests/unit/test_sqs.py @thrau @baermat -/tests/unit/test_sqs_backdoor.py @thrau @baermat - -# SSM -/localstack/aws/api/ssm/ @dominikschubert -/localstack/services/ssm/ @dominikschubert -/localstack/services/cloudformation/models/ssm.py @dominikschubert -/tests/integration/test_ssm.py @dominikschubert - -# Stepfunctions -/localstack/aws/api/stepfunctions/ @dominikschubert -/localstack/services/stepfunctions/ @dominikschubert -/localstack/services/cloudformation/models/stepfunctions.py @dominikschubert -/tests/integration/test_stepfunctions.py @dominikschubert +# DO NOT modify anything below! +# Everything below is _autogenerated_ and any manual changes will be overwritten. + + +# acm +/localstack-core/localstack/aws/api/acm/ @alexrashed +/localstack-core/localstack/services/acm/ @alexrashed +/tests/aws/services/acm/ @alexrashed + +# apigateway +/localstack-core/localstack/aws/api/apigateway/ @bentsku @cloutierMat +/localstack-core/localstack/services/apigateway/ @bentsku @cloutierMat +/tests/aws/services/apigateway/ @bentsku @cloutierMat +/tests/unit/services/apigateway/ @bentsku @cloutierMat + +# cloudcontrol +/localstack-core/localstack/aws/api/cloudcontrol/ @simonrw +/tests/aws/services/cloudcontrol/ @simonrw + +# cloudformation +/localstack-core/localstack/aws/api/cloudformation/ @dominikschubert @pinzon @simonrw +/localstack-core/localstack/services/cloudformation/ @dominikschubert @pinzon @simonrw +/tests/aws/services/cloudformation/ @dominikschubert @pinzon @simonrw +/tests/unit/services/cloudformation/ @dominikschubert @pinzon @simonrw + +# cloudwatch +/localstack-core/localstack/aws/api/cloudwatch/ @pinzon @steffyP +/localstack-core/localstack/services/cloudwatch/ @pinzon @steffyP +/tests/aws/services/cloudwatch/ @pinzon @steffyP +/tests/unit/services/cloudwatch/ @pinzon @steffyP + +# dynamodb +/localstack-core/localstack/aws/api/dynamodb/ @viren-nadkarni @giograno +/localstack-core/localstack/services/dynamodb/ @viren-nadkarni @giograno +/tests/aws/services/dynamodb/ @viren-nadkarni @giograno +/tests/unit/services/dynamodb/ @viren-nadkarni @giograno + +# ec2 +/localstack-core/localstack/aws/api/ec2/ @viren-nadkarni @macnev2013 +/localstack-core/localstack/services/ec2/ @viren-nadkarni @macnev2013 +/tests/aws/services/ec2/ @viren-nadkarni @macnev2013 + +# ecr +/localstack-core/localstack/services/ecr/ @dfangl + +# es +/localstack-core/localstack/aws/api/es/ @alexrashed @silv-io +/localstack-core/localstack/services/es/ @alexrashed @silv-io +/tests/aws/services/es/ @alexrashed @silv-io + +# events +/localstack-core/localstack/aws/api/events/ @maxhoheiser @bentsku +/localstack-core/localstack/services/events/ @maxhoheiser @bentsku +/tests/aws/services/events/ @maxhoheiser @bentsku +/tests/unit/services/events/ @maxhoheiser @bentsku + +# firehose +/localstack-core/localstack/aws/api/firehose/ @pinzon +/localstack-core/localstack/services/firehose/ @pinzon +/tests/aws/services/firehose/ @pinzon + +# iam +/localstack-core/localstack/aws/api/iam/ @dfangl @pinzon +/localstack-core/localstack/services/iam/ @dfangl @pinzon +/tests/aws/services/iam/ @dfangl @pinzon + +# kms +/localstack-core/localstack/aws/api/kms/ @sannya-singal +/localstack-core/localstack/services/kms/ @sannya-singal +/tests/aws/services/kms/ @sannya-singal +/tests/unit/services/kms/ @sannya-singal + +# lambda +/localstack-core/localstack/aws/api/lambda_/ @joe4dev @dominikschubert @dfangl @gregfurman +/localstack-core/localstack/services/lambda_/ @joe4dev @dominikschubert @dfangl @gregfurman +/tests/aws/services/lambda_/ @joe4dev @dominikschubert @dfangl @gregfurman +/tests/unit/services/lambda_/ @joe4dev @dominikschubert @dfangl @gregfurman + +# logs +/localstack-core/localstack/aws/api/logs/ @pinzon @steffyP +/localstack-core/localstack/services/logs/ @pinzon @steffyP +/tests/aws/services/logs/ @pinzon @steffyP +/tests/unit/services/logs/ @pinzon @steffyP + +# opensearch +/localstack-core/localstack/aws/api/opensearch/ @alexrashed @silv-io +/localstack-core/localstack/services/opensearch/ @alexrashed @silv-io +/tests/aws/services/opensearch/ @alexrashed @silv-io +/tests/unit/services/opensearch/ @alexrashed @silv-io + +# pipes +/localstack-core/localstack/aws/api/pipes/ @tiurin @gregfurman @joe4dev + +# route53 +/localstack-core/localstack/aws/api/route53/ @giograno +/localstack-core/localstack/services/route53/ @giograno +/tests/aws/services/route53/ @giograno + +# route53resolver +/localstack-core/localstack/aws/api/route53resolver/ @macnev2013 @sannya-singal +/localstack-core/localstack/services/route53resolver/ @macnev2013 @sannya-singal +/tests/aws/services/route53resolver/ @macnev2013 @sannya-singal + +# s3 +/localstack-core/localstack/aws/api/s3/ @bentsku +/localstack-core/localstack/services/s3/ @bentsku +/tests/aws/services/s3/ @bentsku +/tests/unit/services/s3/ @bentsku + +# s3control +/localstack-core/localstack/aws/api/s3control/ @bentsku +/localstack-core/localstack/services/s3control/ @bentsku +/tests/aws/services/s3control/ @bentsku + +# secretsmanager +/localstack-core/localstack/aws/api/secretsmanager/ @dominikschubert @macnev2013 @MEPalma +/localstack-core/localstack/services/secretsmanager/ @dominikschubert @macnev2013 @MEPalma +/tests/aws/services/secretsmanager/ @dominikschubert @macnev2013 @MEPalma + +# ses +/localstack-core/localstack/aws/api/ses/ @viren-nadkarni +/localstack-core/localstack/services/ses/ @viren-nadkarni +/tests/aws/services/ses/ @viren-nadkarni + +# sns +/localstack-core/localstack/aws/api/sns/ @bentsku @baermat +/localstack-core/localstack/services/sns/ @bentsku @baermat +/tests/aws/services/sns/ @bentsku @baermat +/tests/unit/services/sns/ @bentsku @baermat + +# sqs +/localstack-core/localstack/aws/api/sqs/ @thrau @baermat @gregfurman +/localstack-core/localstack/services/sqs/ @thrau @baermat @gregfurman +/tests/aws/services/sqs/ @thrau @baermat @gregfurman +/tests/unit/services/sqs/ @thrau @baermat @gregfurman + +# ssm +/localstack-core/localstack/aws/api/ssm/ @dominikschubert +/localstack-core/localstack/services/ssm/ @dominikschubert +/tests/aws/services/ssm/ @dominikschubert + +# stepfunctions +/localstack-core/localstack/aws/api/stepfunctions/ @MEPalma @joe4dev @gregfurman +/localstack-core/localstack/services/stepfunctions/ @MEPalma @joe4dev @gregfurman +/tests/aws/services/stepfunctions/ @MEPalma @joe4dev @gregfurman +/tests/unit/services/stepfunctions/ @MEPalma @joe4dev @gregfurman + +# sts +/localstack-core/localstack/aws/api/sts/ @pinzon @dfangl +/localstack-core/localstack/services/sts/ @pinzon @dfangl +/tests/aws/services/sts/ @pinzon @dfangl + +# transcribe +/localstack-core/localstack/aws/api/transcribe/ @sannya-singal +/localstack-core/localstack/services/transcribe/ @sannya-singal +/tests/aws/services/transcribe/ @sannya-singal diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 4ce45fc176f1b..0000000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Contributing - -We welcome feedback, bug reports, and pull requests! - -For pull requests (PRs), please stick to the following guidelines: - -* Before submitting a PR, verify that [an issue](https://github.com/localstack/localstack/issues) exists that describes the bug fix or feature you want to contribute. If there's no issue yet, please [create one](https://github.com/localstack/localstack/issues/new/choose). -* Fork localstack on your GitHub user account, make code changes there, and then create a PR against main localstack repository. -* Add tests for any new features or bug fixes. Ideally, each PR increases the test coverage. Please read our [integration testing](https://docs.localstack.cloud/contributing/integration-tests/) and [parity testing](https://docs.localstack.cloud/contributing/parity-testing/) guides on how to write tests for AWS services. -* Follow the existing code style. Run `make format` and `make lint` before checking in your code. - * Refer to [Development Environment Setup](https://docs.localstack.cloud/contributing/development-environment-setup/) if your local testing environment is not yet properly set up. -* Document newly introduced methods and classes with pydoc, and add inline comments to code that is not self-documenting. -* Separate unrelated changes into multiple PRs. - -Please note that by contributing any code or documentation to this repository (by raising PRs, or otherwise) you explicitly agree to the [**Contributor License Agreement**](.github/CLA.md). diff --git a/DOCKER.md b/DOCKER.md new file mode 100644 index 0000000000000..9d102b1a0e942 --- /dev/null +++ b/DOCKER.md @@ -0,0 +1,144 @@ +<p align="center"> + <img src="https://raw.githubusercontent.com/localstack/localstack/master/docs/localstack-readme-banner.svg" alt="LocalStack - A fully functional local cloud stack"> +</p> + +<p align="center"> + <a href="https://github.com/localstack/localstack/actions/workflows/aws-main.yml?query=branch%3Amaster"><img alt="GitHub Actions" src="https://github.com/localstack/localstack/actions/workflows/aws-main.yml/badge.svg?branch=master"></a> + <a href="https://coveralls.io/github/localstack/localstack?branch=master"><img alt="Coverage Status" src="https://coveralls.io/repos/github/localstack/localstack/badge.svg?branch=master"></a> + <a href="https://pypi.org/project/localstack/"><img alt="PyPI Version" src="https://img.shields.io/pypi/v/localstack?color=blue"></a> + <a href="https://hub.docker.com/r/localstack/localstack"><img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/localstack/localstack"></a> + <a href="https://pypi.org/project/localstack"><img alt="PyPi downloads" src="https://static.pepy.tech/badge/localstack"></a> + <a href="#backers"><img alt="Backers on Open Collective" src="https://opencollective.com/localstack/backers/badge.svg"></a> + <a href="#sponsors"><img alt="Sponsors on Open Collective" src="https://opencollective.com/localstack/sponsors/badge.svg"></a> + <a href="https://img.shields.io/pypi/l/localstack.svg"><img alt="PyPI License" src="https://img.shields.io/pypi/l/localstack.svg"></a> + <a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a> + <a href="https://github.com/astral-sh/ruff"><img alt="Ruff" src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json"></a> + <a href="https://twitter.com/localstack"><img alt="Twitter" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"></a> +</p> + +# What is LocalStack? + +[LocalStack](https://localstack.cloud) is a cloud service emulator that runs in a single container on your laptop or in your CI environment. With LocalStack, you can run your AWS applications or Lambdas entirely on your local machine without connecting to a remote cloud provider! Whether you are testing complex CDK applications or Terraform configurations, or just beginning to learn about AWS services, LocalStack helps speed up and simplify your testing and development workflow. + +LocalStack supports a growing number of AWS services, like AWS Lambda, S3, Dynamodb, Kinesis, SQS, SNS, and many more! You can find a comprehensive list of supported APIs on our [☑️ Feature Coverage](https://docs.localstack.cloud/user-guide/aws/feature-coverage/) page. + +LocalStack also provides additional features to make your life as a cloud developer easier! Check out LocalStack's [User Guides](https://docs.localstack.cloud/user-guide/) for more information. + +## Usage + +Please make sure that you have a working [Docker environment](https://docs.docker.com/get-docker/) on your machine before moving on. You can check if Docker is correctly configured on your machine by executing `docker info` in your terminal. If it does not report an error (but shows information on your Docker system), you’re good to go. + +### Docker CLI + +You can directly start the LocalStack container using the Docker CLI. This method requires more manual steps and configuration, but it gives you more control over the container settings. + +You can start the Docker container simply by executing the following docker run command: + +```console +$ docker run --rm -it -p 4566:4566 -p 4510-4559:4510-4559 localstack/localstack +``` + +Create an s3 bucket with LocalStack's [`awslocal`](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#localstack-aws-cli-awslocal) CLI: + +``` +$ awslocal s3api create-bucket --bucket sample-bucket +$ awslocal s3api list-buckets +``` + +**Notes** + +- This command reuses the image if it’s already on your machine, i.e. it will **not** pull the latest image automatically from Docker Hub. + +- This command does not bind all ports that are potentially used by LocalStack, nor does it mount any volumes. When using Docker to manually start LocalStack, you will have to configure the container on your own (see [`docker-compose.yml`](https://github.com/localstack/localstack/blob/master/docker-compose.yml) and [Configuration](https://docs.localstack.cloud/references/configuration/)). This could be seen as the “expert mode” of starting LocalStack. If you are looking for a simpler method of starting LocalStack, please use the [LocalStack CLI](https://docs.localstack.cloud/getting-started/installation/#localstack-cli). + +### Docker Compose + +You can start LocalStack with [Docker Compose](https://docs.docker.com/compose/) by configuring a `docker-compose.yml file`. Currently, docker-compose version 1.9.0+ is supported. + +``` +version: "3.8" + +services: + localstack: + container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" + image: localstack/localstack + ports: + - "127.0.0.1:4566:4566" # LocalStack Gateway + - "127.0.0.1:4510-4559:4510-4559" # external services port range + environment: + # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ + - DEBUG=${DEBUG:-0} + volumes: + - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" + - "/var/run/docker.sock:/var/run/docker.sock" +``` + +Start the container by running the following command: + +```console +$ docker-compose up +``` + +Create a queue using SQS with LocalStack's [`awslocal`](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#localstack-aws-cli-awslocal) CLI: + +``` +$ awslocal sqs create-queue --queue-name test-queue +$ awslocal sqs list-queues +``` + +**Notes** + +- This command pulls the current nightly build from the `master` branch (if you don’t have the image locally) and **not** the latest supported version. If you want to use a specific version, set the appropriate localstack image tag at `services.localstack.image` in the `docker-compose.yml` file (for example `localstack/localstack:<version>`). + +- This command reuses the image if it’s already on your machine, i.e. it will **not** pull the latest image automatically from Docker Hub. + +- Mounting the Docker socket `/var/run/docker.sock` as a volume is required for the Lambda service. Check out the [Lambda providers](https://docs.localstack.cloud/user-guide/aws/lambda/) documentation for more information. + +Please note that there are a few pitfalls when configuring your stack manually via docker-compose (e.g., required container name, Docker network, volume mounts, and environment variables). We recommend using the LocalStack CLI to validate your configuration, which will print warning messages in case it detects any potential misconfigurations: + +```console +$ localstack config validate +``` + +## Base Image Tags + +We do push a set of different image tags for the LocalStack Docker images. When using LocalStack, you can decide which tag you want to use.These tags have different semantics and will be updated on different occasions: + +- `latest` (default) + - This is our default tag. + It refers to the latest commit which has been fully tested using our extensive integration test suite. + - This also entails changes that are part of major releases, which means that this tag can contain breaking changes. + - This tag should be used if you want to stay up-to-date with the latest changes. +- `stable` + - This tag refers to the latest tagged release. + It will be updated with every release of LocalStack. + - This also entails major releases, which means that this tag can contain breaking changes. + - This tag should be used if you want to stay up-to-date with releases, but don't necessarily need the latest and greatest changes right away. +- `<major>` (e.g. `3`) + - These tags can be used to refer to the latest release of a specific major release. + It will be updated with every minor and patch release within this major release. + - This tag should be used if you want to avoid any potential breaking changes. +- `<major>.<minor>` (e.g. `3.0`) + - These tags can be used to refer to the latest release of a specific minor release. + It will be updated with every patch release within this minor release. + - This tag can be used if you want to avoid any bigger changes, like new features, but still want to update to the latest bugfix release. +- `<major>.<minor>.<patch>` (e.g. `3.0.2`) + - These tags can be used if you want to use a very specific release. + It will not be updated. + - This tag can be used if you really want to avoid any changes to the image (not even minimal bug fixes). + +## Where to get help + +Get in touch with the LocalStack Team to report 🐞 [issues](https://github.com/localstack/localstack/issues/new/choose),upvote 👍 [feature requests](https://github.com/localstack/localstack/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+),🙋🏽 ask [support questions](https://docs.localstack.cloud/getting-started/help-and-support/),or 🗣️ discuss local cloud development: + +- [LocalStack Slack Community](https://localstack.cloud/contact/) +- [LocalStack GitHub Issue tracker](https://github.com/localstack/localstack/issues) +- [Getting Started - FAQ](https://docs.localstack.cloud/getting-started/faq/) + +## License + +Copyright (c) 2017-2024 LocalStack maintainers and contributors. + +Copyright (c) 2016 Atlassian and others. + +This version of LocalStack is released under the Apache License, Version 2.0 (see [LICENSE](https://github.com/localstack/localstack/blob/master/LICENSE.txt)). By downloading and using this software you agree to the [End-User License Agreement (EULA)](https://github.com/localstack/localstack/blob/master/doc/end_user_license_agreement). diff --git a/Dockerfile b/Dockerfile index aef08cf02dd9c..ecabcde459554 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,115 +1,83 @@ -ARG IMAGE_TYPE=community - -# java-builder: Stage to build a custom JRE (with jlink) -FROM python:3.10.8-slim-buster@sha256:6f0a9332035a0268cdca0bfecb509ec17db855e3d079d134373b3bf5bfb9e98f as java-builder -ARG TARGETARCH - -# install OpenJDK 11 -RUN apt-get update && apt-get install -y openjdk-11-jdk-headless - -ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-${TARGETARCH} - -# create a custom, minimized JRE via jlink -RUN jlink --add-modules \ -# include required modules -java.base,java.desktop,java.instrument,java.management,java.naming,java.scripting,java.sql,java.xml,jdk.compiler,\ -# jdk.unsupported contains sun.misc.Unsafe which is required by certain dependencies -jdk.unsupported,\ -# add additional cipher suites -jdk.crypto.cryptoki,\ -# add ability to open ZIP/JAR files -jdk.zipfs,\ -# OpenSearch needs some jdk modules -jdk.httpserver,jdk.management,\ -# MQ Broker requires management agent -jdk.management.agent,\ -# required for Spark/Hadoop -java.security.jgss,jdk.security.auth,\ -# Elasticsearch 7+ crashes without Thai Segmentation support -jdk.localedata --include-locales en,th \ - --compress 2 --strip-debug --no-header-files --no-man-pages --output /usr/lib/jvm/java-11 && \ - cp ${JAVA_HOME}/bin/javac /usr/lib/jvm/java-11/bin/javac && \ - cp -r ${JAVA_HOME}/include /usr/lib/jvm/java-11/include && \ - mv /usr/lib/jvm/java-11/lib/modules /usr/lib/jvm/java-11/lib/modules.bk; \ - cp -r ${JAVA_HOME}/lib/* /usr/lib/jvm/java-11/lib/; \ - mv /usr/lib/jvm/java-11/lib/modules.bk /usr/lib/jvm/java-11/lib/modules; \ - rm -rf /usr/bin/java ${JAVA_HOME} && ln -s /usr/lib/jvm/java-11/bin/java /usr/bin/java - - - -# base: Stage which installs necessary runtime dependencies (OS packages, java, maven,...) -FROM python:3.10.8-slim-buster@sha256:6f0a9332035a0268cdca0bfecb509ec17db855e3d079d134373b3bf5bfb9e98f as base +# +# base: Stage which installs necessary runtime dependencies (OS packages, etc.) +# +FROM python:3.11.13-slim-bookworm@sha256:7a3ed1226224bcc1fe5443262363d42f48cf832a540c1836ba8ccbeaadf8637c AS base ARG TARGETARCH # Install runtime OS package dependencies -RUN apt-get update && \ +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update && \ # Install dependencies to add additional repos - apt-get install -y --no-install-recommends ca-certificates curl && \ - # Setup Node 18 Repo - curl -sL https://deb.nodesource.com/setup_18.x | bash - && \ - # Install Packages - apt-get update && \ apt-get install -y --no-install-recommends \ # Runtime packages (groff-base is necessary for AWS CLI help) - git make openssl tar pixz zip unzip groff-base iputils-ping nss-passwords \ - # Postgres - postgresql postgresql-client postgresql-plpython3 \ - # NodeJS - nodejs && \ - apt-get clean && rm -rf /var/lib/apt/lists/* + ca-certificates curl gnupg git make openssl tar pixz zip unzip groff-base iputils-ping nss-passwords procps iproute2 xz-utils libatomic1 binutils && \ + # patch for CVE-2024-45490, CVE-2024-45491, CVE-2024-45492 + apt-get install --only-upgrade libexpat1 + +# FIXME Node 18 actually shouldn't be necessary in Community, but we assume its presence in lots of tests +# Install nodejs package from the dist release server. Note: we're installing from dist binaries, and not via +# `apt-get`, to avoid installing `python3.9` into the image (which otherwise comes as a dependency of nodejs). +# See https://github.com/nodejs/docker-node/blob/main/18/bullseye/Dockerfile +RUN ARCH= && dpkgArch="$(dpkg --print-architecture)" \ + && case "${dpkgArch##*-}" in \ + amd64) ARCH='x64';; \ + arm64) ARCH='arm64';; \ + *) echo "unsupported architecture"; exit 1 ;; \ + esac \ + # gpg keys listed at https://github.com/nodejs/node#release-keys + && set -ex \ + && for key in \ + C0D6248439F1D5604AAFFB4021D900FFDB233756 \ + DD792F5973C6DE52C432CBDAC77ABFA00DDBF2B7 \ + CC68F5A3106FF448322E48ED27F5E38D5B0A215F \ + 8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600 \ + 890C08DB8579162FEE0DF9DB8BEAB4DFCF555EF4 \ + C82FA3AE1CBEDC6BE46B9360C43CEC45C17AB93C \ + 108F52B48DB57BB0CC439B2997B01419BD92F80A \ + A363A499291CBBC940DD62E41F10027AF002F8B0 \ + ; do \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys "$key" || \ + gpg --batch --keyserver keyserver.ubuntu.com --recv-keys "$key" ; \ + done \ + && curl -LO https://nodejs.org/dist/latest-v18.x/SHASUMS256.txt \ + && LATEST_VERSION_FILENAME=$(cat SHASUMS256.txt | grep -o "node-v.*-linux-$ARCH" | sort | uniq) \ + && rm SHASUMS256.txt \ + && curl -fsSLO --compressed "https://nodejs.org/dist/latest-v18.x/$LATEST_VERSION_FILENAME.tar.xz" \ + && curl -fsSLO --compressed "https://nodejs.org/dist/latest-v18.x/SHASUMS256.txt.asc" \ + && gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc \ + && grep " $LATEST_VERSION_FILENAME.tar.xz\$" SHASUMS256.txt | sha256sum -c - \ + && tar -xJf "$LATEST_VERSION_FILENAME.tar.xz" -C /usr/local --strip-components=1 --no-same-owner \ + && rm "$LATEST_VERSION_FILENAME.tar.xz" SHASUMS256.txt.asc SHASUMS256.txt \ + && ln -s /usr/local/bin/node /usr/local/bin/nodejs \ + # upgrade npm to the latest version + && npm upgrade -g npm \ + # smoke tests + && node --version \ + && npm --version \ + && test ! $(which python3.9) SHELL [ "/bin/bash", "-c" ] - -# Install Java 11 ENV LANG C.UTF-8 -RUN { \ - echo '#!/bin/sh'; echo 'set -e'; echo; \ - echo 'dirname "$(dirname "$(readlink -f "$(which javac || which java)")")"'; \ - } > /usr/local/bin/docker-java-home \ - && chmod +x /usr/local/bin/docker-java-home -COPY --from=java-builder /usr/lib/jvm/java-11 /usr/lib/jvm/java-11 -COPY --from=java-builder /etc/ssl/certs/java /etc/ssl/certs/java -COPY --from=java-builder /etc/java-11-openjdk/security /etc/java-11-openjdk/security -RUN ln -s /usr/lib/jvm/java-11/bin/java /usr/bin/java -ENV JAVA_HOME /usr/lib/jvm/java-11 -ENV PATH "${PATH}:${JAVA_HOME}/bin" - -# Install Maven - taken from official repo: -# https://github.com/carlossg/docker-maven/blob/master/openjdk-11/Dockerfile) -ARG MAVEN_VERSION=3.6.3 -ARG USER_HOME_DIR="/root" -ARG MAVEN_SHA=26ad91d751b3a9a53087aefa743f4e16a17741d3915b219cf74112bf87a438c5 -ARG MAVEN_BASE_URL=https://apache.osuosl.org/maven/maven-3/${MAVEN_VERSION}/binaries -RUN mkdir -p /usr/share/maven /usr/share/maven/ref \ - && curl -fsSL -o /tmp/apache-maven.tar.gz ${MAVEN_BASE_URL}/apache-maven-$MAVEN_VERSION-bin.tar.gz \ - && echo "${MAVEN_SHA} /tmp/apache-maven.tar.gz" | sha256sum -c - \ - && tar -xzf /tmp/apache-maven.tar.gz -C /usr/share/maven --strip-components=1 \ - && rm -f /tmp/apache-maven.tar.gz \ - && ln -s /usr/share/maven/bin/mvn /usr/bin/mvn -ENV MAVEN_HOME /usr/share/maven -ENV MAVEN_CONFIG "$USER_HOME_DIR/.m2" -ADD https://raw.githubusercontent.com/carlossg/docker-maven/9d82eaf48ee8b14ac15a36c431ba28b735e99c92/openjdk-11/settings-docker.xml /usr/share/maven/ref/ # set workdir RUN mkdir -p /opt/code/localstack +RUN mkdir /opt/code/localstack/localstack-core WORKDIR /opt/code/localstack/ -# create filesystem hierarchy -RUN mkdir -p /var/lib/localstack && \ - mkdir -p /usr/lib/localstack -# backwards compatibility with LEGACY_DIRECTORIES (TODO: deprecate and remove) -RUN mkdir -p /opt/code/localstack/localstack && \ - ln -s /usr/lib/localstack /opt/code/localstack/localstack/infra && \ +# create localstack user and filesystem hierarchy, perform some permission fixes +RUN chmod 777 . && \ + useradd -ms /bin/bash localstack && \ + mkdir -p /var/lib/localstack && \ + chmod -R 777 /var/lib/localstack && \ + mkdir -p /usr/lib/localstack && \ mkdir /tmp/localstack && \ chmod -R 777 /tmp/localstack && \ touch /tmp/localstack/.marker && \ - chmod -R 777 /usr/lib/localstack - -# install basic (global) tools to final image -RUN pip install --no-cache-dir --upgrade supervisor virtualenv + mkdir -p /.npm && \ + chmod 755 /root && \ + chmod -R 777 /.npm -# install supervisor config file and entrypoint script -ADD bin/supervisord.conf /etc/supervisord.conf +# install the entrypoint script ADD bin/docker-entrypoint.sh /usr/local/bin/ # add the shipped hosts file to prevent performance degredation in windows container mode on windows # (where hosts file is not mounted) See https://github.com/localstack/localstack/issues/5178 @@ -118,185 +86,108 @@ ADD bin/hosts /etc/hosts # expose default environment # Set edge bind host so localstack can be reached by other containers # set library path and default LocalStack hostname -ENV MAVEN_CONFIG=/opt/code/localstack -ENV LD_LIBRARY_PATH=/usr/lib/jvm/java-11/lib:/usr/lib/jvm/java-11/lib/server ENV USER=localstack ENV PYTHONUNBUFFERED=1 -ENV EDGE_BIND_HOST=0.0.0.0 -ENV LOCALSTACK_HOSTNAME=localhost - -RUN mkdir /root/.serverless; chmod -R 777 /root/.serverless +# Install the latest version of awslocal globally +RUN --mount=type=cache,target=/root/.cache \ + pip3 install --upgrade awscli awscli-local requests -# builder: Stage which installs/builds the dependencies and infra-components of LocalStack -FROM base as builder +# +# builder: Stage which installs the dependencies of LocalStack Community +# +FROM base AS builder ARG TARGETARCH # Install build dependencies to base -RUN apt-get update && apt-get install -y autoconf automake cmake libsasl2-dev \ - g++ gcc libffi-dev libkrb5-dev libssl-dev \ - postgresql-server-dev-11 libpq-dev - -# Install timescaledb into postgresql -RUN (cd /tmp && git clone https://github.com/timescale/timescaledb.git) && \ - (cd /tmp/timescaledb && git checkout 2.3.1 && ./bootstrap -DREGRESS_CHECKS=OFF && \ - cd build && make && make install) - -# init environment and cache some dependencies -ARG DYNAMODB_ZIP_URL=https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip -RUN mkdir -p /usr/lib/localstack/dynamodb && \ - curl -L -o /tmp/localstack.ddb.zip ${DYNAMODB_ZIP_URL} && \ - (cd /usr/lib/localstack/dynamodb && unzip -q /tmp/localstack.ddb.zip && rm /tmp/localstack.ddb.zip) +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update && \ + # Install dependencies to add additional repos + # g++ is a workaround to fix the JPype1 compile error on ARM Linux "gcc: fatal error: cannot execute ‘cc1plus’" + apt-get install -y gcc g++ # upgrade python build tools -RUN (virtualenv .venv && . .venv/bin/activate && pip3 install --upgrade pip wheel setuptools) +RUN --mount=type=cache,target=/root/.cache \ + (python -m venv .venv && . .venv/bin/activate && pip3 install --upgrade pip wheel setuptools) -# add files necessary to install all dependencies -ADD Makefile setup.py setup.cfg pyproject.toml ./ -# add the root package init to invalidate docker layers with version bumps -ADD localstack/__init__.py localstack/ +# add files necessary to install runtime dependencies +ADD Makefile pyproject.toml requirements-runtime.txt ./ # add the localstack start scripts (necessary for the installation of the runtime dependencies, i.e. `pip install -e .`) -ADD bin/localstack bin/localstack.bat bin/ - -# install dependencies to run the localstack runtime and save which ones were installed -RUN make install-runtime -RUN make freeze > requirements-runtime.txt -# link the extensions virtual environment into the localstack venv -RUN echo /var/lib/localstack/lib/extensions/python_venv/lib/python3.10/site-packages > localstack-extensions-venv.pth && \ - mv localstack-extensions-venv.pth .venv/lib/python*/site-packages/ +ADD bin/localstack bin/localstack.bat bin/localstack-supervisor bin/ +# Install dependencies for running the LocalStack runtime +RUN --mount=type=cache,target=/root/.cache\ + . .venv/bin/activate && pip3 install -r requirements-runtime.txt -# intermediate step which creates a working localstack image without the entrypoint and the version marker -FROM base as unmarked -LABEL authors="LocalStack Contributors" -LABEL maintainer="LocalStack Team (info@localstack.cloud)" -LABEL description="LocalStack Docker image" +# +# final stage: Builds upon base stage and copies resources from builder stages +# +FROM base +COPY --from=builder /opt/code/localstack/.venv /opt/code/localstack/.venv +# The build version is set in the docker-helper.sh script to be the output of setuptools_scm +ARG LOCALSTACK_BUILD_VERSION -# Copy the build dependencies -COPY --from=builder /opt/code/localstack/ /opt/code/localstack/ +# add project files necessary to install all dependencies +ADD Makefile pyproject.toml ./ +# add the localstack start scripts (necessary for the installation of the runtime dependencies, i.e. `pip install -e .`) +ADD bin/localstack bin/localstack.bat bin/localstack-supervisor bin/ -# Copy in postgresql extensions -COPY --from=builder /usr/share/postgresql/11/extension /usr/share/postgresql/11/extension -COPY --from=builder /usr/lib/postgresql/11/lib /usr/lib/postgresql/11/lib +# add the code as late as possible +ADD localstack-core/ /opt/code/localstack/localstack-core -RUN if [ -e /usr/bin/aws ]; then mv /usr/bin/aws /usr/bin/aws.bk; fi; ln -s /opt/code/localstack/.venv/bin/aws /usr/bin/aws +# Install LocalStack Community and generate the version file while doing so +RUN --mount=type=cache,target=/root/.cache \ + . .venv/bin/activate && \ + SETUPTOOLS_SCM_PRETEND_VERSION_FOR_LOCALSTACK_CORE=${LOCALSTACK_BUILD_VERSION} \ + pip install -e .[runtime] -# fix some permissions and create local user -RUN mkdir -p /.npm && \ - chmod 777 . && \ - chmod 755 /root && \ - chmod -R 777 /.npm && \ - chmod -R 777 /var/lib/localstack && \ - useradd -ms /bin/bash localstack && \ - ln -s `pwd` /tmp/localstack_install_dir +# Generate the plugin entrypoints +RUN SETUPTOOLS_SCM_PRETEND_VERSION_FOR_LOCALSTACK_CORE=${LOCALSTACK_BUILD_VERSION} \ + make entrypoints -# Install the latest version of awslocal globally -RUN pip3 install --upgrade awscli awscli-local requests - -# Add the code in the last step -ADD localstack/ localstack/ - -# Install the latest version of localstack-ext and generate the plugin entrypoints. -# If this is a pre-release build, also include dev releases of these packages. -ARG LOCALSTACK_PRE_RELEASE=1 -RUN (PIP_ARGS=$([[ "$LOCALSTACK_PRE_RELEASE" == "1" ]] && echo "--pre" || true); \ - virtualenv .venv && . .venv/bin/activate && \ - pip3 install --upgrade ${PIP_ARGS} localstack-ext[runtime]) -RUN make entrypoints +# Generate service catalog cache in static libs dir +RUN . .venv/bin/activate && python3 -m localstack.aws.spec # Install packages which should be shipped by default -RUN source .venv/bin/activate && \ - python -m localstack.cli.lpm install --parallel 4 \ - cloudformation-libs \ - dynamodb-local \ - iot-rule-engine \ - kinesis-mock \ - lambda-java-libs \ - local-kms \ - mqtt \ - postgres \ - redis \ - stepfunctions \ - stepfunctions-local \ - timescaledb && \ - rm -rf /tmp/localstack/* && \ - rm -rf /var/lib/localstack/cache/* && \ +RUN --mount=type=cache,target=/root/.cache \ + --mount=type=cache,target=/var/lib/localstack/cache \ + source .venv/bin/activate && \ + python -m localstack.cli.lpm install \ + lambda-runtime \ + jpype-jsonata \ + dynamodb-local && \ chown -R localstack:localstack /usr/lib/localstack && \ chmod -R 777 /usr/lib/localstack - -# Add the build date and git hash at last (changes everytime) -ARG LOCALSTACK_BUILD_DATE -ARG LOCALSTACK_BUILD_GIT_HASH -ARG LOCALSTACK_BUILD_VERSION -ENV LOCALSTACK_BUILD_DATE=${LOCALSTACK_BUILD_DATE} -ENV LOCALSTACK_BUILD_GIT_HASH=${LOCALSTACK_BUILD_GIT_HASH} -ENV LOCALSTACK_BUILD_VERSION=${LOCALSTACK_BUILD_VERSION} - -# clean up some libs (e.g., Maven should be no longer required after initial installation has completed) -RUN rm -rf /usr/share/maven +# link the python package installer virtual environments into the localstack venv +RUN echo /var/lib/localstack/lib/python-packages/lib/python3.11/site-packages > localstack-var-python-packages-venv.pth && \ + mv localstack-var-python-packages-venv.pth .venv/lib/python*/site-packages/ +RUN echo /usr/lib/localstack/python-packages/lib/python3.11/site-packages > localstack-static-python-packages-venv.pth && \ + mv localstack-static-python-packages-venv.pth .venv/lib/python*/site-packages/ # expose edge service, external service ports, and debugpy EXPOSE 4566 4510-4559 5678 -HEALTHCHECK --interval=10s --start-period=15s --retries=5 --timeout=5s CMD ./bin/localstack status services --format=json +HEALTHCHECK --interval=10s --start-period=15s --retries=5 --timeout=10s CMD /opt/code/localstack/.venv/bin/localstack status services --format=json # default volume directory VOLUME /var/lib/localstack +# mark the image version +RUN touch /usr/lib/localstack/.community-version -# base-community: Stage which will contain the community-version starting from 2.0.0 -FROM unmarked as unmarked-community - -# base-pro: Stage which will contain the pro-version starting from 2.0.0 -FROM unmarked as unmarked-pro - -# base-light: Stage which does not add additional dependencies (like elasticsearch) -# FIXME deprecated -FROM unmarked as unmarked-light - -# base-full: Stage which adds additional dependencies to avoid installing them at runtime (f.e. elasticsearch) -# FIXME deprecated -FROM unmarked as unmarked-full - -# Install Elasticsearch -# https://github.com/pires/docker-elasticsearch/issues/56 -ENV ES_TMPDIR /tmp - -ENV ES_BASE_DIR=/usr/lib/localstack/elasticsearch/Elasticsearch_7.10 -ENV ES_JAVA_HOME /usr/lib/jvm/java-11 -RUN TARGETARCH_SYNONYM=$([[ "$TARGETARCH" == "amd64" ]] && echo "x86_64" || echo "aarch64"); \ - curl -L -o /tmp/localstack.es.tar.gz \ - https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.0-linux-${TARGETARCH_SYNONYM}.tar.gz && \ - (cd /tmp && tar -xf localstack.es.tar.gz && \ - mkdir -p $ES_BASE_DIR && mv elasticsearch*/* $ES_BASE_DIR && rm /tmp/localstack.es.tar.gz) && \ - (cd $ES_BASE_DIR && \ - bin/elasticsearch-plugin install analysis-icu && \ - bin/elasticsearch-plugin install ingest-attachment --batch && \ - bin/elasticsearch-plugin install analysis-kuromoji && \ - bin/elasticsearch-plugin install mapper-murmur3 && \ - bin/elasticsearch-plugin install mapper-size && \ - bin/elasticsearch-plugin install analysis-phonetic && \ - bin/elasticsearch-plugin install analysis-smartcn && \ - bin/elasticsearch-plugin install analysis-stempel && \ - bin/elasticsearch-plugin install analysis-ukrainian) && \ - ( rm -rf $ES_BASE_DIR/jdk/ ) && \ - ( mkdir -p $ES_BASE_DIR/data && \ - mkdir -p $ES_BASE_DIR/logs && \ - chmod -R 777 $ES_BASE_DIR/config && \ - chmod -R 777 $ES_BASE_DIR/data && \ - chmod -R 777 $ES_BASE_DIR/logs) && \ - ( rm -rf $ES_BASE_DIR/modules/x-pack-ml/platform && \ - rm -rf $ES_BASE_DIR/modules/ingest-geoip) - - -FROM unmarked-${IMAGE_TYPE} -ARG IMAGE_TYPE +LABEL authors="LocalStack Contributors" +LABEL maintainer="LocalStack Team (info@localstack.cloud)" +LABEL description="LocalStack Docker image" -# mark the image version -RUN touch /usr/lib/localstack/.${IMAGE_TYPE}-version +# Add the build date and git hash at last (changes everytime) +ARG LOCALSTACK_BUILD_DATE +ARG LOCALSTACK_BUILD_GIT_HASH +ENV LOCALSTACK_BUILD_DATE=${LOCALSTACK_BUILD_DATE} +ENV LOCALSTACK_BUILD_GIT_HASH=${LOCALSTACK_BUILD_GIT_HASH} +ENV LOCALSTACK_BUILD_VERSION=${LOCALSTACK_BUILD_VERSION} # define command at startup ENTRYPOINT ["docker-entrypoint.sh"] diff --git a/Dockerfile.rh b/Dockerfile.rh deleted file mode 100644 index db3cb210ff068..0000000000000 --- a/Dockerfile.rh +++ /dev/null @@ -1,68 +0,0 @@ -# Disclaimer: This Dockerfile is used to regularly test compatibility with RedHat based images. -# It is _not_ the usually Dockerfile and currently _not_ officially supported. - -FROM redhat/ubi8 - -LABEL authors="LocalStack Contributors" -LABEL maintainer="LocalStack Team (info@localstack.cloud)" -LABEL description="LocalStack Docker image" - -RUN dnf install -y cyrus-sasl-devel gcc gcc-c++ git iputils make npm openssl-devel procps zip \ - && dnf clean all \ - && rm -rf /var/cache/yum - -RUN dnf install -y bzip2-devel sqlite-devel libffi-devel \ - && curl https://www.python.org/ftp/python/3.10.8/Python-3.10.8.tgz -o Python-3.10.8.tgz \ - && tar xzf Python-3.10.8.tgz \ - && cd Python-3.10.8 \ - && ./configure \ - && make -j $(nproc) \ - && make install \ - && cd .. \ - && rm -rf Python-3.10.8 \ - && rm Python-3.10.8.tgz \ - && dnf remove -y bzip2-devel sqlite-devel libffi-devel \ - && dnf clean all \ - && rm -rf /var/cache/yum - -RUN python3 -m pip install -U setuptools pip wheel supervisor - -# Create a localstack user -RUN useradd -ms /bin/bash localstack - -# install entrypoint script -ADD bin/docker-entrypoint.sh /usr/local/bin/ -# add the script to start LocalStack, the supervisor.d config, and the tmp dir marker -RUN mkdir -p /opt/code/localstack/bin/ && mkdir -p /tmp/localstack && touch /tmp/localstack/.marker -WORKDIR /opt/code/localstack/ -ADD bin/localstack /opt/code/localstack/bin/ -ADD bin/supervisord.conf /etc/supervisord.conf - -# Create the venv -RUN python3 -m venv .venv -# Install python packages -RUN source .venv/bin/activate && python3 -m pip install supervisor awscli awscli-local --upgrade --no-cache-dir -# Install localstack dev packages -RUN source .venv/bin/activate && python3 -m pip install 'localstack[full]>=1.2.0' 'localstack-ext[full]>=1.2.0' --pre --upgrade --no-cache-dir -# Install the basic libraries -RUN source .venv/bin/activate && python3 -m localstack.services.install libs - -# Set default settings -ENV USER=localstack -ENV PYTHONUNBUFFERED=1 -ENV EDGE_BIND_HOST=0.0.0.0 -ENV LOCALSTACK_HOSTNAME=localhost - -# Add the build date and git hash at last (changes everytime) -ARG LOCALSTACK_BUILD_DATE -ARG LOCALSTACK_BUILD_GIT_HASH -ENV LOCALSTACK_BUILD_DATE=${LOCALSTACK_BUILD_DATE} -ENV LOCALSTACK_BUILD_GIT_HASH=${LOCALSTACK_BUILD_GIT_HASH} - -# expose ports -EXPOSE 4510-4559 4566 - -HEALTHCHECK --interval=10s --start-period=15s --retries=5 --timeout=5s CMD ./bin/localstack status services - -# define command at startup -ENTRYPOINT ["docker-entrypoint.sh"] diff --git a/Dockerfile.s3 b/Dockerfile.s3 new file mode 100644 index 0000000000000..3f377c27dc4bd --- /dev/null +++ b/Dockerfile.s3 @@ -0,0 +1,133 @@ +# base: Stage which installs necessary runtime dependencies (OS packages, filesystem...) +FROM python:3.11.13-slim-bookworm@sha256:7a3ed1226224bcc1fe5443262363d42f48cf832a540c1836ba8ccbeaadf8637c AS base +ARG TARGETARCH + +# set workdir +RUN mkdir -p /opt/code/localstack +RUN mkdir /opt/code/localstack/localstack-core +WORKDIR /opt/code/localstack/ + +# Install runtime OS package dependencies +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update && \ + # Install dependencies to add additional repos + apt-get install -y --no-install-recommends \ + # Runtime packages (groff-base is necessary for AWS CLI help) + ca-certificates curl make openssl +# TODO: add this if we need the DNS server: iputils-ping iproute2 + +SHELL [ "/bin/bash", "-c" ] + +# create localstack user and filesystem hierarchy, perform some permission fixes +RUN chmod 777 . && \ + useradd -ms /bin/bash localstack && \ + mkdir -p /var/lib/localstack && \ + chmod -R 777 /var/lib/localstack && \ + mkdir -p /usr/lib/localstack && \ + mkdir /tmp/localstack && \ + chmod -R 777 /tmp/localstack && \ + touch /tmp/localstack/.marker + +# install the entrypoint script +ADD bin/docker-entrypoint.sh /usr/local/bin/ +# add the shipped hosts file to prevent performance degredation in windows container mode on windows +# (where hosts file is not mounted) See https://github.com/localstack/localstack/issues/5178 +ADD bin/hosts /etc/hosts + +# expose default environment +# Set edge bind host so localstack can be reached by other containers +# set library path and default LocalStack hostname +ENV USER=localstack +ENV PYTHONUNBUFFERED=1 + + +# builder: Stage which installs the dependencies of LocalStack Community +FROM base AS builder +ARG TARGETARCH + +# Install build dependencies to base +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update && \ + # Install dependencies to add additional repos + apt-get install -y gcc + +# upgrade python build tools +RUN --mount=type=cache,target=/root/.cache \ + (python3 -m venv .venv && . .venv/bin/activate && pip3 install --upgrade pip wheel setuptools setuptools_scm build) + +# add files necessary to install all dependencies +ADD Makefile pyproject.toml requirements-base-runtime.txt ./ +# add the localstack start scripts (necessary for the installation of the runtime dependencies, i.e. `pip install -e .`) +ADD bin/localstack bin/localstack.bat bin/localstack-supervisor bin/ + +# Install dependencies for running the LocalStack base runtime (for S3) +RUN --mount=type=cache,target=/root/.cache \ + . .venv/bin/activate && pip3 install -r requirements-base-runtime.txt + +# delete the botocore specs for other services (>80mb) +# TODO: well now it's compressed and it's much lighter: 20mb maybe not worth it +RUN find .venv/lib/python3.11/site-packages/botocore/data/ -mindepth 1 -maxdepth 1 -type d -not -name s3 -exec rm -rf '{}' \; + + +# final stage: Builds upon base stage and copies resources from builder stages +FROM base +COPY --from=builder /opt/code/localstack/.venv /opt/code/localstack/.venv +# The build version is set in the docker-helper.sh script to be the output of setuptools_scm +ARG LOCALSTACK_BUILD_VERSION + +# add project files necessary to install all dependencies +ADD Makefile pyproject.toml requirements-base-runtime.txt ./ +# add the localstack start scripts (necessary for the installation of the runtime dependencies, i.e. `pip install -e .`) +ADD bin/localstack bin/localstack.bat bin/localstack-supervisor bin/ + +# add the code as late as possible +ADD localstack-core/ /opt/code/localstack/localstack-core + +# Install LocalStack Community and generate the version file while doing so +RUN --mount=type=cache,target=/root/.cache \ + . .venv/bin/activate && \ + SETUPTOOLS_SCM_PRETEND_VERSION_FOR_LOCALSTACK_CORE=${LOCALSTACK_BUILD_VERSION} \ + pip install -e .[base-runtime] + +# Generate the plugin entrypoints +RUN SETUPTOOLS_SCM_PRETEND_VERSION_FOR_LOCALSTACK_CORE=${LOCALSTACK_BUILD_VERSION} \ + make entrypoints + +# Generate service catalog cache in static libs dir +RUN . .venv/bin/activate && python3 -m localstack.aws.spec + +# link the python package installer virtual environments into the localstack venv +RUN echo /var/lib/localstack/lib/python-packages/lib/python3.11/site-packages > localstack-var-python-packages-venv.pth && \ + mv localstack-var-python-packages-venv.pth .venv/lib/python*/site-packages/ +RUN echo /usr/lib/localstack/python-packages/lib/python3.11/site-packages > localstack-static-python-packages-venv.pth && \ + mv localstack-static-python-packages-venv.pth .venv/lib/python*/site-packages/ + +# expose edge service and debugpy +EXPOSE 4566 5678 + +HEALTHCHECK --interval=10s --start-period=15s --retries=5 --timeout=10s CMD /opt/code/localstack/.venv/bin/localstack status services --format=json + +# default volume directory +VOLUME /var/lib/localstack + +# mark the image version +RUN touch /usr/lib/localstack/.s3-version + +LABEL authors="LocalStack Contributors" +LABEL maintainer="LocalStack Team (info@localstack.cloud)" +LABEL description="LocalStack S3 Docker image" + +# Add the build date and git hash at last (changes everytime) +ARG LOCALSTACK_BUILD_DATE +ARG LOCALSTACK_BUILD_GIT_HASH +ENV LOCALSTACK_BUILD_DATE=${LOCALSTACK_BUILD_DATE} +ENV LOCALSTACK_BUILD_GIT_HASH=${LOCALSTACK_BUILD_GIT_HASH} +ENV LOCALSTACK_BUILD_VERSION=${LOCALSTACK_BUILD_VERSION} +ENV EAGER_SERVICE_LOADING=1 +ENV SERVICES=s3 +ENV GATEWAY_SERVER=twisted +# TODO: do we need DNS for the S3 image? +ENV DNS_ADDRESS=false + +# define command at startup +ENTRYPOINT ["docker-entrypoint.sh"] diff --git a/MANIFEST.in b/MANIFEST.in index 47720f78d45d3..07442c11a993f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,10 @@ +exclude .github/** +exclude .circleci/** +exclude docs/** +exclude tests/** +exclude .test_durations +exclude .gitignore +exclude .pre-commit-config.yaml +exclude .python-version include Makefile include LICENSE.txt -recursive-include localstack/ext *.java -recursive-include localstack/ext pom.xml -recursive-include localstack/utils/kinesis *.java -recursive-include localstack/utils/kinesis *.py diff --git a/Makefile b/Makefile index d9df9e18c191f..4f926170e9272 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,18 @@ IMAGE_NAME ?= localstack/localstack -IMAGE_NAME_PRO ?= $(IMAGE_NAME)-pro -IMAGE_NAME_LIGHT ?= $(IMAGE_NAME)-light -IMAGE_NAME_FULL ?= $(IMAGE_NAME)-full -IMAGE_TAG ?= $(shell cat localstack/__init__.py | grep '^__version__ =' | sed "s/__version__ = ['\"]\(.*\)['\"].*/\1/") +DEFAULT_TAG ?= latest VENV_BIN ?= python3 -m venv VENV_DIR ?= .venv PIP_CMD ?= pip3 TEST_PATH ?= . -PYTEST_LOGLEVEL ?= -MAIN_CONTAINER_NAME ?= localstack_main +TEST_EXEC ?= python -m +PYTEST_LOGLEVEL ?= warning -MAJOR_VERSION = $(shell echo ${IMAGE_TAG} | cut -d '.' -f1) -MINOR_VERSION = $(shell echo ${IMAGE_TAG} | cut -d '.' -f2) -PATCH_VERSION = $(shell echo ${IMAGE_TAG} | cut -d '.' -f3) +uname_m := $(shell uname -m) +ifeq ($(uname_m),x86_64) +platform = amd64 +else +platform = arm64 +endif ifeq ($(OS), Windows_NT) VENV_ACTIVATE = $(VENV_DIR)/Scripts/activate @@ -23,9 +23,9 @@ endif VENV_RUN = . $(VENV_ACTIVATE) usage: ## Show this help - @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/:.*##\s*/##/g' | awk -F'##' '{ printf "%-25s %s\n", $$1, $$2 }' + @grep -Fh "##" $(MAKEFILE_LIST) | grep -Fv fgrep | sed -e 's/:.*##\s*/##/g' | awk -F'##' '{ printf "%-25s %s\n", $$1, $$2 }' -$(VENV_ACTIVATE): setup.py setup.cfg +$(VENV_ACTIVATE): pyproject.toml test -d $(VENV_DIR) || $(VENV_BIN) $(VENV_DIR) $(VENV_RUN); $(PIP_CMD) install --upgrade pip setuptools wheel plux touch $(VENV_ACTIVATE) @@ -35,30 +35,53 @@ venv: $(VENV_ACTIVATE) ## Create a new (empty) virtual environment freeze: ## Run pip freeze -l in the virtual environment @$(VENV_RUN); pip freeze -l +upgrade-pinned-dependencies: venv + $(VENV_RUN); $(PIP_CMD) install --upgrade pip-tools pre-commit + $(VENV_RUN); pip-compile --strip-extras --upgrade -o requirements-basic.txt pyproject.toml + $(VENV_RUN); pip-compile --strip-extras --upgrade --extra runtime -o requirements-runtime.txt pyproject.toml + $(VENV_RUN); pip-compile --strip-extras --upgrade --extra test -o requirements-test.txt pyproject.toml + $(VENV_RUN); pip-compile --strip-extras --upgrade --extra dev -o requirements-dev.txt pyproject.toml + $(VENV_RUN); pip-compile --strip-extras --upgrade --extra typehint -o requirements-typehint.txt pyproject.toml + $(VENV_RUN); pip-compile --strip-extras --upgrade --extra base-runtime -o requirements-base-runtime.txt pyproject.toml + $(VENV_RUN); pre-commit autoupdate + install-basic: venv ## Install basic dependencies for CLI usage into venv - $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[cli]" + $(VENV_RUN); $(PIP_CMD) install -r requirements-basic.txt + $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e . install-runtime: venv ## Install dependencies for the localstack runtime into venv - $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[cli,runtime]" + $(VENV_RUN); $(PIP_CMD) install -r requirements-runtime.txt + $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[runtime]" install-test: venv ## Install requirements to run tests into venv - $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[cli,runtime,test]" - -install-test-only: venv + $(VENV_RUN); $(PIP_CMD) install -r requirements-test.txt $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[test]" install-dev: venv ## Install developer requirements into venv - $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[cli,runtime,test,dev]" + $(VENV_RUN); $(PIP_CMD) install -r requirements-dev.txt + $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[dev]" + +install-dev-types: venv ## Install developer requirements incl. type hints into venv + $(VENV_RUN); $(PIP_CMD) install -r requirements-typehint.txt + $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[typehint]" + +install-s3: venv ## Install dependencies for the localstack runtime for s3-only into venv + $(VENV_RUN); $(PIP_CMD) install -r requirements-base-runtime.txt + $(VENV_RUN); $(PIP_CMD) install $(PIP_OPTS) -e ".[base-runtime]" install: install-dev entrypoints ## Install full dependencies into venv -entrypoints: ## Run setup.py develop to build entry points - $(VENV_RUN); python setup.py plugins egg_info +entrypoints: ## Run plux to build entry points + $(VENV_RUN); python3 -c "from setuptools import setup; setup()" plugins egg_info + @# make sure that the entrypoints were correctly created and are non-empty + @test -s localstack-core/localstack_core.egg-info/entry_points.txt || (echo "Entrypoints were not correctly created! Aborting!" && exit 1) dist: entrypoints ## Build source and built (wheel) distributions of the current version - $(VENV_RUN); pip install --upgrade twine; python setup.py sdist bdist_wheel + $(VENV_RUN); pip install --upgrade twine; python -m build publish: clean-dist dist ## Publish the library to the central PyPi repository + # make sure the dist archive contains a non-empty entry_points.txt file before uploading + tar --wildcards --to-stdout -xf dist/localstack?core*.tar.gz "localstack?core*/localstack-core/localstack_core.egg-info/entry_points.txt" | grep . > /dev/null 2>&1 || (echo "Refusing upload, localstack-core dist does not contain entrypoints." && exit 1) $(VENV_RUN); twine upload dist/* coveralls: ## Publish coveralls metrics @@ -67,131 +90,18 @@ coveralls: ## Publish coveralls metrics start: ## Manually start the local infrastructure for testing ($(VENV_RUN); exec bin/localstack start --host) -docker-image-stats: ## TODO remove when image size is acceptable - docker image inspect $(IMAGE_NAME_FULL) --format='{{.Size}}' - docker history $(IMAGE_NAME_FULL) - -# By default we export the full image -TAGS ?= $(IMAGE_NAME) $(IMAGE_NAME_PRO) $(IMAGE_NAME_LIGHT) $(IMAGE_NAME_FULL) -docker-save-images: ## Export the built Docker image - docker save -o target/localstack-docker-images-$(PLATFORM).tar $(TAGS) - -# By default we export the community image -TAG ?= $(IMAGE_NAME) -# By default we load the result to the docker daemon -DOCKER_BUILD_FLAGS ?= "--load" -DOCKERFILE ?= "./Dockerfile" -docker-build: ## Build Docker image - # start build - # --add-host: Fix for Centos host OS - # --build-arg BUILDKIT_INLINE_CACHE=1: Instruct buildkit to inline the caching information into the image - # --cache-from: Use the inlined caching information when building the image - DOCKER_BUILDKIT=1 docker buildx build --pull --progress=plain \ - --cache-from $(TAG) --build-arg BUILDKIT_INLINE_CACHE=1 \ - --build-arg LOCALSTACK_PRE_RELEASE=$(shell cat localstack/__init__.py | grep '^__version__ =' | grep -v '.dev' >> /dev/null && echo "0" || echo "1") \ - --build-arg LOCALSTACK_BUILD_GIT_HASH=$(shell git rev-parse --short HEAD) \ - --build-arg=LOCALSTACK_BUILD_DATE=$(shell date -u +"%Y-%m-%d") \ - --build-arg=LOCALSTACK_BUILD_VERSION=$(IMAGE_TAG) \ - --add-host="localhost.localdomain:127.0.0.1" \ - -t $(TAG) $(DOCKER_BUILD_FLAGS) . -f $(DOCKERFILE) - -docker-build-light: ## Build Light Docker image - make DOCKER_BUILD_FLAGS="--build-arg IMAGE_TYPE=light --load" \ - TAG=$(IMAGE_NAME_LIGHT) docker-build - -docker-build-full: ## Build Full Docker image - make DOCKER_BUILD_FLAGS="--build-arg IMAGE_TYPE=full --load" \ - TAG=$(IMAGE_NAME_FULL) docker-build - -docker-build-pro: ## Build Pro Docker image - make DOCKER_BUILD_FLAGS="--build-arg IMAGE_TYPE=pro --load" \ - TAG=$(IMAGE_NAME_PRO) docker-build - -docker-build-multiarch: ## Build the Multi-Arch Full Docker Image - # Make sure to prepare your environment for cross-platform docker builds! (see doc/developer_guides/README.md) - # Multi-Platform builds cannot be loaded to the docker daemon from buildx, so we can't add "--load". - make DOCKER_BUILD_FLAGS="--platform linux/amd64,linux/arm64" docker-build - -SOURCE_IMAGE_NAME ?= $(IMAGE_NAME) -TARGET_IMAGE_NAME ?= $(IMAGE_NAME) -docker-push-master: ## Push a single platform-specific Docker image to registry IF we are currently on the master branch - (CURRENT_BRANCH=`(git rev-parse --abbrev-ref HEAD | grep '^master$$' || ((git branch -a | grep 'HEAD detached at [0-9a-zA-Z]*)') && git branch -a)) | grep '^[* ]*master$$' | sed 's/[* ]//g' || true`; \ - test "$$CURRENT_BRANCH" != 'master' && echo "Not on master branch.") || \ - ((test "$$DOCKER_USERNAME" = '' || test "$$DOCKER_PASSWORD" = '' ) && \ - echo "Skipping docker push as no credentials are provided.") || \ - (REMOTE_ORIGIN="`git remote -v | grep '/localstack' | grep origin | grep push | awk '{print $$2}'`"; \ - test "$$REMOTE_ORIGIN" != 'https://github.com/localstack/localstack.git' && \ - test "$$REMOTE_ORIGIN" != 'git@github.com:localstack/localstack.git' && \ - echo "This is a fork and not the main repo.") || \ - ( \ - docker info | grep Username || docker login -u $$DOCKER_USERNAME -p $$DOCKER_PASSWORD; \ - docker tag $(SOURCE_IMAGE_NAME):latest $(TARGET_IMAGE_NAME):latest-$(PLATFORM) && \ - ((! (git diff HEAD~1 localstack/__init__.py | grep '^+__version__ =' | grep -v '.dev') && \ - echo "Only pushing tag 'latest' as version has not changed.") || \ - (docker tag $(TARGET_IMAGE_NAME):latest-$(PLATFORM) $(TARGET_IMAGE_NAME):$(IMAGE_TAG)-$(PLATFORM) && \ - docker tag $(TARGET_IMAGE_NAME):latest-$(PLATFORM) $(TARGET_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION)-$(PLATFORM) && \ - docker tag $(TARGET_IMAGE_NAME):latest-$(PLATFORM) $(TARGET_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION)-$(PLATFORM) && \ - docker push $(TARGET_IMAGE_NAME):$(IMAGE_TAG)-$(PLATFORM) && \ - docker push $(TARGET_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION)-$(PLATFORM) && \ - docker push $(TARGET_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION)-$(PLATFORM) \ - )) && \ - docker push $(TARGET_IMAGE_NAME):latest-$(PLATFORM) \ - ) - -docker-push-master-all: ## Push Docker images of localstack, localstack-pro, localstack-light, and localstack-full - make SOURCE_IMAGE_NAME=$(IMAGE_NAME) TARGET_IMAGE_NAME=$(IMAGE_NAME) docker-push-master - make SOURCE_IMAGE_NAME=$(IMAGE_NAME_PRO) TARGET_IMAGE_NAME=$(IMAGE_NAME_PRO) docker-push-master - make SOURCE_IMAGE_NAME=$(IMAGE_NAME_LIGHT) TARGET_IMAGE_NAME=$(IMAGE_NAME_LIGHT) docker-push-master - make SOURCE_IMAGE_NAME=$(IMAGE_NAME_FULL) TARGET_IMAGE_NAME=$(IMAGE_NAME_FULL) docker-push-master - -MANIFEST_IMAGE_NAME ?= $(IMAGE_NAME) -docker-create-push-manifests: ## Create and push manifests for a docker image (default: community) - (CURRENT_BRANCH=`(git rev-parse --abbrev-ref HEAD | grep '^master$$' || ((git branch -a | grep 'HEAD detached at [0-9a-zA-Z]*)') && git branch -a)) | grep '^[* ]*master$$' | sed 's/[* ]//g' || true`; \ - test "$$CURRENT_BRANCH" != 'master' && echo "Not on master branch.") || \ - ((test "$$DOCKER_USERNAME" = '' || test "$$DOCKER_PASSWORD" = '' ) && \ - echo "Skipping docker manifest push as no credentials are provided.") || \ - (REMOTE_ORIGIN="`git remote -v | grep '/localstack' | grep origin | grep push | awk '{print $$2}'`"; \ - test "$$REMOTE_ORIGIN" != 'https://github.com/localstack/localstack.git' && \ - test "$$REMOTE_ORIGIN" != 'git@github.com:localstack/localstack.git' && \ - echo "This is a fork and not the main repo.") || \ - ( \ - docker info | grep Username || docker login -u $$DOCKER_USERNAME -p $$DOCKER_PASSWORD; \ - docker manifest create $(MANIFEST_IMAGE_NAME):latest --amend $(MANIFEST_IMAGE_NAME):latest-amd64 --amend $(MANIFEST_IMAGE_NAME):latest-arm64 && \ - ((! (git diff HEAD~1 localstack/__init__.py | grep '^+__version__ =' | grep -v '.dev') && \ - echo "Only pushing tag 'latest' as version has not changed.") || \ - (docker manifest create $(MANIFEST_IMAGE_NAME):$(IMAGE_TAG) \ - --amend $(MANIFEST_IMAGE_NAME):$(IMAGE_TAG)-amd64 \ - --amend $(MANIFEST_IMAGE_NAME):$(IMAGE_TAG)-arm64 && \ - docker manifest create $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION) \ - --amend $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION)-amd64 \ - --amend $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION)-arm64 && \ - docker manifest create $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION) \ - --amend $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION)-amd64 \ - --amend $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION)-arm64 && \ - docker manifest push $(MANIFEST_IMAGE_NAME):$(IMAGE_TAG) && \ - docker manifest push $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION) && \ - docker manifest push $(MANIFEST_IMAGE_NAME):$(MAJOR_VERSION).$(MINOR_VERSION).$(PATCH_VERSION))) && \ - docker manifest push $(MANIFEST_IMAGE_NAME):latest \ - ) - -docker-create-push-manifests-light: ## Create and push manifests for all light docker images - make MANIFEST_IMAGE_NAME=$(IMAGE_NAME) docker-create-push-manifests - make MANIFEST_IMAGE_NAME=$(IMAGE_NAME_PRO) docker-create-push-manifests - make MANIFEST_IMAGE_NAME=$(IMAGE_NAME_LIGHT) docker-create-push-manifests - docker-run-tests: ## Initializes the test environment and runs the tests in a docker container - # Remove argparse and dataclasses to fix https://github.com/pytest-dev/pytest/issues/5594 - # Note: running "install-test-only" below, to avoid pulling in [runtime] extras from transitive dependencies - docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/target/:/opt/code/localstack/target/ \ - $(IMAGE_NAME_FULL) \ - bash -c "make install-test-only && pip uninstall -y argparse dataclasses && DEBUG=$(DEBUG) LAMBDA_EXECUTOR=local PYTEST_LOGLEVEL=debug PYTEST_ARGS='$(PYTEST_ARGS)' COVERAGE_FILE='$(COVERAGE_FILE)' TEST_PATH='$(TEST_PATH)' make test-coverage" + docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/.git:/opt/code/localstack/.git -v `pwd`/requirements-test.txt:/opt/code/localstack/requirements-test.txt -v `pwd`/.test_durations:/opt/code/localstack/.test_durations -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/dist/:/opt/code/localstack/dist/ -v `pwd`/target/:/opt/code/localstack/target/ -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/localstack:/var/lib/localstack \ + $(IMAGE_NAME):$(DEFAULT_TAG) \ + bash -c "make install-test && DEBUG=$(DEBUG) PYTEST_LOGLEVEL=$(PYTEST_LOGLEVEL) PYTEST_ARGS='$(PYTEST_ARGS)' COVERAGE_FILE='$(COVERAGE_FILE)' JUNIT_REPORTS_FILE=$(JUNIT_REPORTS_FILE) TEST_PATH='$(TEST_PATH)' LAMBDA_IGNORE_ARCHITECTURE=1 LAMBDA_INIT_POST_INVOKE_WAIT_MS=50 TINYBIRD_PYTEST_ARGS='$(TINYBIRD_PYTEST_ARGS)' TINYBIRD_DATASOURCE='$(TINYBIRD_DATASOURCE)' TINYBIRD_TOKEN='$(TINYBIRD_TOKEN)' TINYBIRD_URL='$(TINYBIRD_URL)' CI_REPOSITORY_NAME='$(CI_REPOSITORY_NAME)' CI_WORKFLOW_NAME='$(CI_WORKFLOW_NAME)' CI_COMMIT_BRANCH='$(CI_COMMIT_BRANCH)' CI_COMMIT_SHA='$(CI_COMMIT_SHA)' CI_JOB_URL='$(CI_JOB_URL)' CI_JOB_NAME='$(CI_JOB_NAME)' CI_JOB_ID='$(CI_JOB_ID)' CI='$(CI)' TEST_AWS_REGION_NAME='${TEST_AWS_REGION_NAME}' TEST_AWS_ACCESS_KEY_ID='${TEST_AWS_ACCESS_KEY_ID}' TEST_AWS_ACCOUNT_ID='${TEST_AWS_ACCOUNT_ID}' make test-coverage" -docker-run: ## Run Docker image locally - ($(VENV_RUN); bin/localstack start) +docker-run-tests-s3-only: ## Initializes the test environment and runs the tests in a docker container for the S3 only image + # TODO: We need node as it's a dependency of the InfraProvisioner at import time, remove when we do not need it anymore + # g++ is a workaround to fix the JPype1 compile error on ARM Linux "gcc: fatal error: cannot execute ‘cc1plus’" because the test dependencies include the runtime dependencies. + docker run -e LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 --entrypoint= -v `pwd`/.git:/opt/code/localstack/.git -v `pwd`/requirements-test.txt:/opt/code/localstack/requirements-test.txt -v `pwd`/tests/:/opt/code/localstack/tests/ -v `pwd`/target/:/opt/code/localstack/target/ -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/localstack:/var/lib/localstack \ + $(IMAGE_NAME):$(DEFAULT_TAG) \ + bash -c "apt-get update && apt-get install -y g++ git && make install-test && apt-get install -y --no-install-recommends gnupg && mkdir -p /etc/apt/keyrings && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && echo \"deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_18.x nodistro main\" > /etc/apt/sources.list.d/nodesource.list && apt-get update && apt-get install -y --no-install-recommends nodejs && DEBUG=$(DEBUG) PYTEST_LOGLEVEL=$(PYTEST_LOGLEVEL) PYTEST_ARGS='$(PYTEST_ARGS)' TEST_PATH='$(TEST_PATH)' TINYBIRD_PYTEST_ARGS='$(TINYBIRD_PYTEST_ARGS)' TINYBIRD_DATASOURCE='$(TINYBIRD_DATASOURCE)' TINYBIRD_TOKEN='$(TINYBIRD_TOKEN)' TINYBIRD_URL='$(TINYBIRD_URL)' CI_COMMIT_BRANCH='$(CI_COMMIT_BRANCH)' CI_COMMIT_SHA='$(CI_COMMIT_SHA)' CI_JOB_URL='$(CI_JOB_URL)' CI_JOB_NAME='$(CI_JOB_NAME)' CI_JOB_ID='$(CI_JOB_ID)' CI='$(CI)' make test" -docker-mount-run: - MOTO_DIR=$$(echo $$(pwd)/.venv/lib/python*/site-packages/moto | awk '{print $$NF}'); echo MOTO_DIR $$MOTO_DIR; \ - DOCKER_FLAGS="$(DOCKER_FLAGS) -v `pwd`/localstack/constants.py:/opt/code/localstack/localstack/constants.py -v `pwd`/localstack/config.py:/opt/code/localstack/localstack/config.py -v `pwd`/localstack/plugins.py:/opt/code/localstack/localstack/plugins.py -v `pwd`/localstack/plugin:/opt/code/localstack/localstack/plugin -v `pwd`/localstack/runtime:/opt/code/localstack/localstack/runtime -v `pwd`/localstack/utils:/opt/code/localstack/localstack/utils -v `pwd`/localstack/services:/opt/code/localstack/localstack/services -v `pwd`/localstack/http:/opt/code/localstack/localstack/http -v `pwd`/localstack/contrib:/opt/code/localstack/localstack/contrib -v `pwd`/tests:/opt/code/localstack/tests -v $$MOTO_DIR:/opt/code/localstack/.venv/lib/python3.10/site-packages/moto/" make docker-run docker-cp-coverage: @echo 'Extracting .coverage file from Docker image'; \ @@ -200,98 +110,48 @@ docker-cp-coverage: docker rm -v $$id test: ## Run automated tests - ($(VENV_RUN); DEBUG=$(DEBUG) pytest --durations=10 --log-cli-level=$(PYTEST_LOGLEVEL) -s $(PYTEST_ARGS) $(TEST_PATH)) - -test-coverage: ## Run automated tests and create coverage report - ($(VENV_RUN); python -m coverage --version; \ - DEBUG=$(DEBUG) \ - LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC=1 \ - python -m coverage run $(COVERAGE_ARGS) -m \ - pytest --durations=10 --log-cli-level=$(PYTEST_LOGLEVEL) -s $(PYTEST_ARGS) $(TEST_PATH)) - -test-docker: - DOCKER_FLAGS="--entrypoint= $(DOCKER_FLAGS)" CMD="make test" make docker-run - -test-docker-mount: ## Run automated tests in Docker (mounting local code) - # TODO: find a cleaner way to mount/copy the dependencies into the container... - VENV_DIR=$$(pwd)/.venv/; \ - PKG_DIR=$$(echo $$VENV_DIR/lib/python*/site-packages | awk '{print $$NF}'); \ - PKG_DIR_CON=/opt/code/localstack/.venv/lib/python3.10/site-packages; \ - echo "#!/usr/bin/env python" > /tmp/pytest.ls.bin; cat $$VENV_DIR/bin/pytest >> /tmp/pytest.ls.bin; chmod +x /tmp/pytest.ls.bin; \ - DOCKER_FLAGS="-v `pwd`/tests:/opt/code/localstack/tests -v /tmp/pytest.ls.bin:/opt/code/localstack/.venv/bin/pytest -v $$PKG_DIR/deepdiff:$$PKG_DIR_CON/deepdiff -v $$PKG_DIR/ordered_set:$$PKG_DIR_CON/ordered_set -v $$PKG_DIR/py:$$PKG_DIR_CON/py -v $$PKG_DIR/pluggy:$$PKG_DIR_CON/pluggy -v $$PKG_DIR/iniconfig:$$PKG_DIR_CON/iniconfig -v $$PKG_DIR/jsonpath_ng:$$PKG_DIR_CON/jsonpath_ng -v $$PKG_DIR/packaging:$$PKG_DIR_CON/packaging -v $$PKG_DIR/pytest:$$PKG_DIR_CON/pytest -v $$PKG_DIR/pytest_httpserver:$$PKG_DIR_CON/pytest_httpserver -v $$PKG_DIR/_pytest:$$PKG_DIR_CON/_pytest -v $$PKG_DIR/_pytest:$$PKG_DIR_CON/orjson" make test-docker-mount-code - -test-docker-mount-code: - PACKAGES_DIR=$$(echo $$(pwd)/.venv/lib/python*/site-packages | awk '{print $$NF}'); \ - DOCKER_FLAGS="$(DOCKER_FLAGS) --entrypoint= -v `pwd`/localstack/config.py:/opt/code/localstack/localstack/config.py -v `pwd`/localstack/constants.py:/opt/code/localstack/localstack/constants.py -v `pwd`/localstack/utils:/opt/code/localstack/localstack/utils -v `pwd`/localstack/services:/opt/code/localstack/localstack/services -v `pwd`/localstack/aws:/opt/code/localstack/localstack/aws -v `pwd`/Makefile:/opt/code/localstack/Makefile -v $$PACKAGES_DIR/moto:/opt/code/localstack/.venv/lib/python3.10/site-packages/moto/ -e TEST_PATH=\\'$(TEST_PATH)\\' -e LAMBDA_JAVA_OPTS=$(LAMBDA_JAVA_OPTS) $(ENTRYPOINT)" CMD="make test" make docker-run - -# Note: the ci-* targets below should only be used in CI builds! - -CI_SMOKE_IMAGE_NAME ?= $(IMAGE_NAME_LIGHT) -ci-pro-smoke-tests: - pip3 install --upgrade awscli-local - pip3 install --upgrade localstack - IMAGE_NAME=$(CI_SMOKE_IMAGE_NAME) LOCALSTACK_API_KEY=$(TEST_LOCALSTACK_API_KEY) DNS_ADDRESS=0 DEBUG=1 localstack start -d - docker logs -f $(MAIN_CONTAINER_NAME) & - localstack wait -t 120 - awslocal amplify list-apps - awslocal apigatewayv2 get-apis - awslocal appsync list-graphql-apis - awslocal athena list-data-catalogs - awslocal batch describe-job-definitions - awslocal cloudfront list-distributions - awslocal cloudtrail list-trails - awslocal cognito-idp list-user-pools --max-results 10 - awslocal docdb describe-db-clusters - awslocal ecr describe-repositories - awslocal ecs list-clusters - awslocal emr list-clusters - awslocal elasticache describe-cache-clusters - awslocal glue get-databases - awslocal iot list-things - awslocal kafka list-clusters - awslocal lambda list-layers - awslocal mediastore list-containers - awslocal mwaa list-environments - awslocal qldb list-ledgers - awslocal rds create-db-cluster --db-cluster-identifier test-cluster --engine aurora-postgresql --database-name test --master-username master --master-user-password secret99 --db-subnet-group-name mysubnetgroup - awslocal rds describe-db-instances - awslocal s3 mb s3://test-bucket - awslocal timestream-write create-database --database-name db1 - awslocal xray get-trace-summaries --start-time 2020-01-01 --end-time 2030-12-31 - localstack stop - -lint: ## Run code linter to check code style - ($(VENV_RUN); python -m pflake8 --show-source) - -lint-modified: ## Run code linter on modified files - ($(VENV_RUN); python -m pflake8 --show-source `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs` ) - -format: ## Run black and isort code formatter - ($(VENV_RUN); python -m isort localstack tests; python -m black localstack tests ) - -format-modified: ## Run black and isort code formatter on modified files - ($(VENV_RUN); python -m isort `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs`; python -m black `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs` ) + ($(VENV_RUN); $(TEST_EXEC) pytest --durations=10 --log-cli-level=$(PYTEST_LOGLEVEL) --junitxml=$(JUNIT_REPORTS_FILE) $(PYTEST_ARGS) $(TEST_PATH)) + +test-coverage: LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC = 1 +test-coverage: TEST_EXEC = python -m coverage run $(COVERAGE_ARGS) -m +test-coverage: test ## Run automated tests and create coverage report + +lint: ## Run code linter to check code style, check if formatter would make changes and check if dependency pins need to be updated + @[ -f localstack-core/localstack/__init__.py ] && echo "localstack-core/localstack/__init__.py will break packaging." && exit 1 || : + ($(VENV_RUN); python -m ruff check --output-format=full . && python -m ruff format --check --diff .) + $(VENV_RUN); pre-commit run check-pinned-deps-for-needed-upgrade --files pyproject.toml # run pre-commit hook manually here to ensure that this check runs in CI as well + $(VENV_RUN); openapi-spec-validator localstack-core/localstack/openapi.yaml + $(VENV_RUN); cd localstack-core && mypy --install-types --non-interactive + +lint-modified: ## Run code linter to check code style, check if formatter would make changes on modified files, and check if dependency pins need to be updated because of modified files + ($(VENV_RUN); python -m ruff check --output-format=full `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs` && python -m ruff format --check `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs`) + $(VENV_RUN); pre-commit run check-pinned-deps-for-needed-upgrade --files $(git diff master --name-only) # run pre-commit hook manually here to ensure that this check runs in CI as well + +check-aws-markers: ## Lightweight check to ensure all AWS tests have proper compatibility markers set + ($(VENV_RUN); python -m pytest --co tests/aws/) + +format: ## Run ruff to format the whole codebase + ($(VENV_RUN); python -m ruff format .; python -m ruff check --output-format=full --fix .) + +format-modified: ## Run ruff to format only modified code + ($(VENV_RUN); python -m ruff format `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs`; python -m ruff check --output-format=full --fix `git diff --diff-filter=d --name-only HEAD | grep '\.py$$' | xargs`) init-precommit: ## install te pre-commit hook into your local git repository ($(VENV_RUN); pre-commit install) +docker-build: + IMAGE_NAME=$(IMAGE_NAME) PLATFORM=$(platform) ./bin/docker-helper.sh build + clean: ## Clean up (npm dependencies, downloaded infrastructure code, compiled Java classes) rm -rf .filesystem - # TODO: remove localstack/infra/ as it's no longer used - rm -rf localstack/infra/amazon-kinesis-client - rm -rf localstack/infra/elasticsearch - rm -rf localstack/infra/elasticmq - rm -rf localstack/infra/dynamodb - rm -rf localstack/infra/node_modules - rm -rf localstack/node_modules rm -rf build/ rm -rf dist/ rm -rf *.egg-info + rm -rf localstack-core/*.egg-info rm -rf $(VENV_DIR) - rm -f localstack/utils/kinesis/java/com/atlassian/*.class clean-dist: ## Clean up python distribution directories rm -rf dist/ build/ - rm -rf *.egg-info + rm -rf localstack-core/*.egg-info -.PHONY: usage venv freeze install-basic install-runtime install-test install-dev install entrypoints dist publish coveralls start docker-save-images docker-build docker-build-light docker-build-multi-platform docker-push-master docker-push-master-all docker-create-push-manifests docker-create-push-manifests-light docker-run-tests docker-run docker-mount-run docker-build-lambdas docker-cp-coverage test test-coverage test-docker test-docker-mount test-docker-mount-code ci-pro-smoke-tests lint lint-modified format format-modified init-precommit clean clean-dist vagrant-start vagrant-stop infra +.PHONY: usage freeze install-basic install-runtime install-test install-dev install entrypoints dist publish coveralls start docker-run-tests docker-cp-coverage test test-coverage lint lint-modified format format-modified init-precommit clean clean-dist upgrade-pinned-dependencies diff --git a/README.md b/README.md index cceb3a3799073..a2e28869759a7 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ <p align="center"> -:zap: We are thrilled to announce <a href="https://discuss.localstack.cloud/t/localstack-release-v1-4-0/214">LocalStack 1.4</a> which brings new features, enhancements and bugfixes :zap: +:zap: We are thrilled to announce the release of <a href="https://blog.localstack.cloud/localstack-release-v-4-5-0/">LocalStack 4.5</a> :zap: </p> <p align="center"> - <img src="https://raw.githubusercontent.com/localstack/localstack/master/doc/localstack-readme-banner.svg" alt="LocalStack - A fully functional local cloud stack"> + <img src="https://raw.githubusercontent.com/localstack/localstack/master/docs/localstack-readme-banner.svg" alt="LocalStack - A fully functional local cloud stack"> </p> <p align="center"> - <a href="https://circleci.com/gh/localstack/localstack"><img alt="CircleCI" src="https://img.shields.io/circleci/build/gh/localstack/localstack/master?logo=circleci"></a> + <a href="https://github.com/localstack/localstack/actions/workflows/aws-main.yml?query=branch%3Amaster"><img alt="GitHub Actions" src="https://github.com/localstack/localstack/actions/workflows/aws-main.yml/badge.svg?branch=master"></a> <a href="https://coveralls.io/github/localstack/localstack?branch=master"><img alt="Coverage Status" src="https://coveralls.io/repos/github/localstack/localstack/badge.svg?branch=master"></a> <a href="https://pypi.org/project/localstack/"><img alt="PyPI Version" src="https://img.shields.io/pypi/v/localstack?color=blue"></a> <a href="https://hub.docker.com/r/localstack/localstack"><img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/localstack/localstack"></a> @@ -16,17 +16,18 @@ <a href="#sponsors"><img alt="Sponsors on Open Collective" src="https://opencollective.com/localstack/sponsors/badge.svg"></a> <a href="https://img.shields.io/pypi/l/localstack.svg"><img alt="PyPI License" src="https://img.shields.io/pypi/l/localstack.svg"></a> <a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a> - <a href="https://twitter.com/_localstack"><img alt="Twitter" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"></a> + <a href="https://github.com/astral-sh/ruff"><img alt="Ruff" src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json"></a> + <a href="https://twitter.com/localstack"><img alt="Twitter" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"></a> </p> <p align="center"> - LocalStack provides an easy-to-use test/mocking framework for developing cloud applications. + LocalStack is a cloud software development framework to develop and test your AWS applications locally. </p> <p align="center"> <a href="#overview">Overview</a> • - <a href="#installing">Install</a> • - <a href="#example">Example</a> • + <a href="#install">Install</a> • + <a href="#quickstart">Quickstart</a> • <a href="#running">Run</a> • <a href="#usage">Usage</a> • <a href="#releases">Releases</a> • @@ -34,7 +35,7 @@ <br/> <a href="https://docs.localstack.cloud" target="_blank">📖 Docs</a> • <a href="https://app.localstack.cloud" target="_blank">💻 Pro version</a> • - <a href="https://docs.localstack.cloud/user-guide/aws/feature-coverage/" target="_blank">☑️ Feature coverage</a> + <a href="https://docs.localstack.cloud/references/coverage/" target="_blank">☑️ LocalStack coverage</a> </p> --- @@ -43,33 +44,47 @@ [LocalStack](https://localstack.cloud) is a cloud service emulator that runs in a single container on your laptop or in your CI environment. With LocalStack, you can run your AWS applications or Lambdas entirely on your local machine without connecting to a remote cloud provider! Whether you are testing complex CDK applications or Terraform configurations, or just beginning to learn about AWS services, LocalStack helps speed up and simplify your testing and development workflow. -LocalStack supports a growing number of AWS services, like AWS Lambda, S3, Dynamodb, Kinesis, SQS, SNS, and many more! The [Pro version of LocalStack](https://localstack.cloud/pricing) supports additional APIs and advanced features. You can find a comprehensive list of supported APIs on our [☑️ Feature Coverage](https://docs.localstack.cloud/user-guide/aws/feature-coverage/) page. +LocalStack supports a growing number of AWS services, like AWS Lambda, S3, DynamoDB, Kinesis, SQS, SNS, and many more! The [Pro version of LocalStack](https://localstack.cloud/pricing) supports additional APIs and advanced features. You can find a comprehensive list of supported APIs on our [☑️ Feature Coverage](https://docs.localstack.cloud/user-guide/aws/feature-coverage/) page. -LocalStack also provides additional features to make your life as a cloud developer easier! Check out LocalStack's [Cloud Developer Tools](https://docs.localstack.cloud/user-guide/tools/) for more information. +LocalStack also provides additional features to make your life as a cloud developer easier! Check out LocalStack's [User Guides](https://docs.localstack.cloud/user-guide/) for more information. -## Requirements +## Install -* `python` (Python 3.7 up to 3.11 supported) -* `pip` (Python package manager) -* `Docker` +The quickest way to get started with LocalStack is by using the LocalStack CLI. It enables you to start and manage the LocalStack Docker container directly through your command line. Ensure that your machine has a functional [`docker` environment](https://docs.docker.com/get-docker/) installed before proceeding. -## Installing +### Brew (macOS or Linux with Homebrew) -The easiest way to install LocalStack is via `pip`: +Install the LocalStack CLI through our [official LocalStack Brew Tap](https://github.com/localstack/homebrew-tap): +```bash +brew install localstack/tap/localstack-cli ``` -pip install localstack + +### Binary download (macOS, Linux, Windows) + +If Brew is not installed on your machine, you can download the pre-built LocalStack CLI binary directly: + +- Visit [localstack/localstack-cli](https://github.com/localstack/localstack-cli/releases/latest) and download the latest release for your platform. +- Extract the downloaded archive to a directory included in your `PATH` variable: + - For macOS/Linux, use the command: `sudo tar xvzf ~/Downloads/localstack-cli-*-darwin-*-onefile.tar.gz -C /usr/local/bin` + +### PyPI (macOS, Linux, Windows) + +LocalStack is developed using Python. To install the LocalStack CLI using `pip`, run the following command: + +```bash +python3 -m pip install localstack ``` -This installs the `localstack-cli` which is used to run the Docker image that hosts the LocalStack runtime. +The `localstack-cli` installation enables you to run the Docker image containing the LocalStack runtime. To interact with the local AWS services, you need to install the `awslocal` CLI separately. For installation guidelines, refer to the [`awslocal` documentation](https://docs.localstack.cloud/user-guide/integrations/aws-cli/#localstack-aws-cli-awslocal). > **Important**: Do not use `sudo` or run as `root` user. LocalStack must be installed and started entirely under a local non-root user. If you have problems with permissions in macOS High Sierra, install with `pip install --user localstack` -## Example +## Quickstart Start LocalStack inside a Docker container by running: -``` +```bash % localstack start -d __ _______ __ __ @@ -78,15 +93,20 @@ Start LocalStack inside a Docker container by running: / /___/ /_/ / /__/ /_/ / /___/ / /_/ /_/ / /__/ ,< /_____/\____/\___/\__,_/_//____/\__/\__,_/\___/_/|_| - 💻 LocalStack CLI 1.4.0 +- LocalStack CLI: 4.5.0 +- Profile: default +- App: https://app.localstack.cloud -[20:22:20] starting LocalStack in Docker mode 🐳 -[20:22:21] detaching +[17:00:15] starting LocalStack in Docker mode 🐳 localstack.py:512 + preparing environment bootstrap.py:1322 + configuring container bootstrap.py:1330 + starting container bootstrap.py:1340 +[17:00:16] detaching bootstrap.py:1344 ``` You can query the status of respective services on LocalStack by running: -``` +```bash % localstack status services ┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ ┃ Service ┃ Status ┃ @@ -105,11 +125,11 @@ To use SQS, a fully managed distributed message queuing service, on LocalStack, ```shell % awslocal sqs create-queue --queue-name sample-queue { - "QueueUrl": "http://localhost:4566/000000000000/sample-queue" + "QueueUrl": "http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/sample-queue" } ``` -Learn more about [LocalStack AWS services](https://docs.localstack.cloud/user-guide/aws/feature-coverage/) and using them with LocalStack's `awslocal` CLI. +Learn more about [LocalStack AWS services](https://docs.localstack.cloud/references/coverage/) and using them with LocalStack's `awslocal` CLI. ## Running @@ -122,40 +142,44 @@ You can run LocalStack through the following options: ## Usage -To start using LocalStack, check out our documentation at <https://docs.localstack.cloud>. +To start using LocalStack, check out our [documentation](https://docs.localstack.cloud). - [LocalStack Configuration](https://docs.localstack.cloud/references/configuration/) - [LocalStack in CI](https://docs.localstack.cloud/user-guide/ci/) - [LocalStack Integrations](https://docs.localstack.cloud/user-guide/integrations/) - [LocalStack Tools](https://docs.localstack.cloud/user-guide/tools/) - [Understanding LocalStack](https://docs.localstack.cloud/references/) -- [Troubleshoot](doc/troubleshoot/README.md) +- [Frequently Asked Questions](https://docs.localstack.cloud/getting-started/faq/) To use LocalStack with a graphical user interface, you can use the following UI clients: -* [Commandeer desktop app](https://getcommandeer.com) -* [DynamoDB Admin Web UI](https://www.npmjs.com/package/dynamodb-admin) +* [LocalStack Web Application](https://app.localstack.cloud) +* [LocalStack Desktop](https://docs.localstack.cloud/user-guide/tools/localstack-desktop/) +* [LocalStack Docker Extension](https://docs.localstack.cloud/user-guide/tools/localstack-docker-extension/) ## Releases -Please refer to [GitHub releases](https://github.com/localstack/localstack/releases) to see the complete list of changes for each release. For extended release notes, please refer to the [LocalStack Discuss](https://discuss.localstack.cloud/c/announcement/5). +Please refer to [GitHub releases](https://github.com/localstack/localstack/releases) to see the complete list of changes for each release. For extended release notes, please refer to the [changelog](https://docs.localstack.cloud/references/changelog/). ## Contributing If you are interested in contributing to LocalStack: -- Start by reading our [contributing guide](CONTRIBUTING.md). -- Check out our [developer guide](https://docs.localstack.cloud/contributing/). +- Start by reading our [contributing guide](docs/CONTRIBUTING.md). +- Check out our [development environment setup guide](docs/development-environment-setup/README.md). - Navigate our codebase and [open issues](https://github.com/localstack/localstack/issues). We are thankful for all the contributions and feedback we receive. ## Get in touch -To get in touch with LocalStack team for bugs/feature requests, support questions or general discussions, please use: +Get in touch with the LocalStack Team to +report 🐞 [issues](https://github.com/localstack/localstack/issues/new/choose), +upvote 👍 [feature requests](https://github.com/localstack/localstack/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+), +🙋🏽 ask [support questions](https://docs.localstack.cloud/getting-started/help-and-support/), +or 🗣️ discuss local cloud development: - [LocalStack Slack Community](https://localstack.cloud/contact/) -- [LocalStack Discussion Page](https://discuss.localstack.cloud/) - [LocalStack GitHub Issue tracker](https://github.com/localstack/localstack/issues) ### Contributors @@ -187,8 +211,8 @@ You can also support this project by becoming a sponsor on [Open Collective](htt ## License -Copyright (c) 2017-2023 LocalStack maintainers and contributors. +Copyright (c) 2017-2025 LocalStack maintainers and contributors. Copyright (c) 2016 Atlassian and others. -This version of LocalStack is released under the Apache License, Version 2.0 (see [LICENSE](LICENSE.txt)). By downloading and using this software you agree to the [End-User License Agreement (EULA)](doc/end_user_license_agreement). To know about the external software we use, look at our [third party software tools](doc/third-party-software-tools/README.md) page. +This version of LocalStack is released under the Apache License, Version 2.0 (see [LICENSE](LICENSE.txt)). By downloading and using this software you agree to the [End-User License Agreement (EULA)](docs/end_user_license_agreement). diff --git a/bin/docker-entrypoint.sh b/bin/docker-entrypoint.sh index 0accce7fb9f3f..1c4a297fd1fa1 100755 --- a/bin/docker-entrypoint.sh +++ b/bin/docker-entrypoint.sh @@ -2,151 +2,38 @@ set -eo pipefail shopt -s nullglob -if [[ ! $EDGE_PORT ]] -then - EDGE_PORT=4566 -fi -# FIXME: remove with 2.0 -# the Dockerfile creates .pro-version file for the pro image. When trying to activate pro features with any other -# version, an error is printed. -if [[ $LOCALSTACK_API_KEY ]] && [[ ! -f /usr/lib/localstack/.pro-version ]]; then +# When trying to activate pro features in the community version, raise a warning +if [[ -n $LOCALSTACK_API_KEY || -n $LOCALSTACK_AUTH_TOKEN ]]; then echo "WARNING" echo "============================================================================" - echo " It seems you are using the LocalStack Pro version without using the" - echo " dedicated Pro image." - echo " Future versions will only support running LocalStack Pro with the" - echo " dedicated image." + echo " It seems you are trying to use the LocalStack Pro version without using " + echo " the dedicated Pro image." + echo " LocalStack will only start with community services enabled." echo " To fix this warning, use localstack/localstack-pro instead." echo "" - echo " See: https://github.com/localstack/localstack/issues/7257" - echo "============================================================================" - echo "" -elif [[ -f /usr/lib/localstack/.light-version ]] || [[ -f /usr/lib/localstack/.full-version ]]; then - echo "WARNING" - echo "============================================================================" - echo " It seems you are using a deprecated image (localstack/localstack-light" - echo " or localstack/localstack-full)." - echo " These images are deprecated and will be removed in the future." - echo " To fix this warning, use localstack/localstack instead." - echo "" - echo " See: https://github.com/localstack/localstack/issues/7257" + echo " See: https://github.com/localstack/localstack/issues/7882" echo "============================================================================" echo "" fi -# FIXME: remove with 2.0 -# the Dockerfile creates .marker file that will be overwritten if a volume is mounted into /tmp/localstack -if [ ! -f /tmp/localstack/.marker ]; then - # unless LEGACY_DIRECTORIES is explicitly set to 1, print an error message and exit with a non-zero exit code - if [[ -z ${LEGACY_DIRECTORIES} ]] || [[ ${LEGACY_DIRECTORIES} == "0" ]]; then - echo "ERROR" - echo "============================================================================" - echo " It seems you are mounting the LocalStack volume into /tmp/localstack." - echo " This will break the LocalStack container! Please update your volume mount" - echo " destination to /var/lib/localstack." - echo " You can suppress this error by setting LEGACY_DIRECTORIES=1." - echo "" - echo " See: https://github.com/localstack/localstack/issues/6398" - echo "============================================================================" - echo "" - exit 1 - fi -fi - -# This stores the PID of supervisord for us after forking -suppid=0 - -# Setup the SIGTERM-handler function -term_handler() { - send_sig="-$1" - if [ $suppid -ne 0 ]; then - echo "Sending $send_sig to supervisord" - kill ${send_sig} "$suppid" - wait "$suppid" - fi - exit 0; # 128 + 15 = 143 -- SIGTERM, but 0 is expected if proper shutdown takes place -} - -# Strip `LOCALSTACK_` prefix in environment variables name (except LOCALSTACK_HOSTNAME) +# Strip `LOCALSTACK_` prefix in environment variables name; except LOCALSTACK_HOST and LOCALSTACK_HOSTNAME (deprecated) source <( env | grep -v -e '^LOCALSTACK_HOSTNAME' | + grep -v -e '^LOCALSTACK_HOST' | grep -v -e '^LOCALSTACK_[[:digit:]]' | # See issue #1387 sed -ne 's/^LOCALSTACK_\([^=]\+\)=.*/export \1=${LOCALSTACK_\1}/p' ) -# Setup trap handler(s) -if [ "$DISABLE_TERM_HANDLER" == "" ]; then - # Catch all the main - trap 'kill -1 ${!}; term_handler 1' SIGHUP - trap 'kill -2 ${!}; term_handler 2' SIGINT - trap 'kill -3 ${!}; term_handler 3' SIGQUIT - trap 'kill -15 ${!}; term_handler 15' SIGTERM - trap 'kill -31 ${!}; term_handler 31' SIGUSR2 -fi - LOG_DIR=/var/lib/localstack/logs test -d ${LOG_DIR} || mkdir -p ${LOG_DIR} -cat /dev/null > ${LOG_DIR}/localstack_infra.log -cat /dev/null > ${LOG_DIR}/localstack_infra.err - -# FIXME for backwards compatibility with LEGACY_DIRECTORIES=1 -test -f /tmp/localstack_infra.log || ln -s ${LOG_DIR}/localstack_infra.log /tmp/localstack_infra.log -test -f /tmp/localstack_infra.err || ln -s ${LOG_DIR}/localstack_infra.err /tmp/localstack_infra.err - -# run modern runtime init scripts before starting localstack -test -d /etc/localstack/init/boot.d && /opt/code/localstack/.venv/bin/python -m localstack.runtime.init BOOT - -supervisord -c /etc/supervisord.conf & -suppid="$!" +# activate the virtual environment +source /opt/code/localstack/.venv/bin/activate +# run runtime init hooks BOOT stage before starting localstack +test -d /etc/localstack/init/boot.d && python3 -m localstack.runtime.init BOOT -# FIXME: remove with 2.0 -if [[ ! $INIT_SCRIPTS_PATH ]] -then - INIT_SCRIPTS_PATH=/docker-entrypoint-initaws.d -fi -# check if the init script directory exists (by default it does not) -if [ -d "$INIT_SCRIPTS_PATH" ]; then - # unless LEGACY_INIT_DIR is explicitly set to 1 print a prominent warning - if [[ -z ${LEGACY_INIT_DIR} ]] || [[ ${LEGACY_INIT_DIR} == "0" ]]; then - echo "WARNING" - echo "============================================================================" - echo " It seems you are using an init script in $INIT_SCRIPTS_PATH." - echo " The INIT_SCRIPTS_PATH have been deprecated with v1.1.0 and will be removed in future releases" - echo " of LocalStack. Please use /etc/localstack/init/ready.d instead." - echo " You can suppress this warning by setting LEGACY_INIT_DIR=1." - echo "" - echo " See: https://github.com/localstack/localstack/issues/7257" - echo "============================================================================" - echo "" - fi -fi -function run_startup_scripts { - until grep -q '^Ready.' ${LOG_DIR}/localstack_infra.log >/dev/null 2>&1 ; do - echo "Waiting for all LocalStack services to be ready" - sleep 7 - done - - curl -XPUT -s -H "Content-Type: application/json" -d '{"features:initScripts":"initializing"}' "http://localhost:$EDGE_PORT/_localstack/health" > /dev/null - for f in $INIT_SCRIPTS_PATH/*; do - case "$f" in - *.sh) echo "$0: running $f"; . "$f" ;; - *) echo "$0: ignoring $f" ;; - esac - echo - done - curl -XPUT -s -H "Content-Type: application/json" -d '{"features:initScripts":"initialized"}' "http://localhost:$EDGE_PORT/_localstack/health" > /dev/null -} -run_startup_scripts & - -# Run tail on the localstack log files forever until we are told to terminate -if [ "$DISABLE_TERM_HANDLER" == "" ]; then - while true; do - tail -qF ${LOG_DIR}/localstack_infra.log ${LOG_DIR}/localstack_infra.err & wait ${!} - done -else - tail -qF ${LOG_DIR}/localstack_infra.log ${LOG_DIR}/localstack_infra.err -fi +# run the localstack supervisor. it's important to run with `exec` and don't use pipes so signals are handled correctly +exec localstack-supervisor diff --git a/bin/docker-helper.sh b/bin/docker-helper.sh new file mode 100755 index 0000000000000..2b21a0f1ce4ce --- /dev/null +++ b/bin/docker-helper.sh @@ -0,0 +1,302 @@ +#!/usr/bin/env bash + +set -eo pipefail +# set -x +shopt -s nullglob + +# global defaults +DOCKERFILE=${DOCKERFILE-"Dockerfile"} +DEFAULT_TAG=${DEFAULT_TAG-"latest"} +DOCKER_BUILD_CONTEXT=${DOCKER_BUILD_CONTEXT-"."} + +function usage() { + echo "A set of commands that facilitate building and pushing versioned Docker images" + echo "" + echo "USAGE" + echo " docker-helper <command> [options]" + echo "" + echo "Commands:" + echo " build" + echo " Build a platform-specific Docker image for the project in the working directory" + echo "" + echo " save" + echo " Save the Docker image to disk (to transfer it to other runners / machines)" + echo "" + echo " load" + echo " Load a previously saved Docker image from disk" + echo "" + echo " push" + echo " Push a platform-specific the Docker image for the project" + echo "" + echo " push-manifests" + echo " Create and push the multi-arch Docker manifests for already pushed platform-specific images" + echo "" + echo " help" + echo " Show this message" +} + + + +############# +## Helpers ## +############# + +function _fail { + # send error message to stderr + printf '%s\n' "$1" >&2 + # exit with error code, $2 or by default 1 + exit "${2-1}" +} + +function _get_current_version() { + # check if setuptools_scm is installed, if not prompt to install. python3 is expected to be present + if ! python3 -m pip -qqq show setuptools_scm > /dev/null ; then + echo "ERROR: setuptools_scm is not installed. Run 'pip install --upgrade setuptools setuptools_scm'" >&2 + exit 1 + fi + python3 -m setuptools_scm +} + +function _is_release_commit() { + [[ $(_get_current_version) =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] +} + +function _get_current_branch() { + git branch --show-current +} + +function _enforce_image_name() { + if [ -z "$IMAGE_NAME" ]; then _fail "Mandatory parameter IMAGE_NAME missing."; fi +} + +function _enforce_main_branch() { + MAIN_BRANCH=${MAIN_BRANCH-"master"} + CURRENT_BRANCH=$(_get_current_branch) + echo "Current git branch: '$CURRENT_BRANCH'" + test "$CURRENT_BRANCH" == "$MAIN_BRANCH" || _fail "Current branch ($CURRENT_BRANCH) is not $MAIN_BRANCH." +} + +function _enforce_no_fork() { + REMOTE_ORIGIN=$(git remote -v | grep 'localstack/' | grep origin | grep push | awk '{print $2}') + if [[ "$REMOTE_ORIGIN" != 'https://github.com/localstack/'* ]] && [[ "$REMOTE_ORIGIN" != 'git@github.com:localstack/'* ]]; then + _fail "This is a fork and not the main repo." + fi +} + +function _enforce_docker_credentials() { + if [ -z "$DOCKER_USERNAME" ] || [ -z "$DOCKER_PASSWORD" ]; then _fail "Mandatory Docker credentials are missing."; fi +} + +function _enforce_platform() { + if [ -z "$PLATFORM" ]; then _fail "Mandatory parameter PLATFORM is missing."; fi +} + +function _set_version_defaults() { + # determine major/minor/patch versions + if [ -z "$IMAGE_TAG" ]; then + IMAGE_TAG=$(_get_current_version) + fi + if [ -z "$MAJOR_VERSION" ]; then MAJOR_VERSION=$(echo ${IMAGE_TAG} | cut -d '.' -f1); fi + if [ -z "$MINOR_VERSION" ]; then MINOR_VERSION=$(echo ${IMAGE_TAG} | cut -d '.' -f2); fi + if [ -z "$PATCH_VERSION" ]; then PATCH_VERSION=$(echo ${IMAGE_TAG} | cut -d '.' -f3); fi +} + + + +############## +## Commands ## +############## + +function cmd-build() { + # start build of a platform-specific image (this target will get called for multiple archs like AMD64/ARM64) + _enforce_image_name + _set_version_defaults + + if [ ! -f "pyproject.toml" ]; then + echo "No pyproject.toml found, setuptools_scm will not be able to retrieve configuration." + fi + if [ -z "$DOCKERFILE" ]; then DOCKERFILE=Dockerfile; fi + # by default we load the result to the docker daemon + if [ "$DOCKER_BUILD_FLAGS" = "" ]; then DOCKER_BUILD_FLAGS="--load"; fi + + # --add-host: Fix for Centos host OS + # --build-arg BUILDKIT_INLINE_CACHE=1: Instruct buildkit to inline the caching information into the image + # --cache-from: Use the inlined caching information when building the image + DOCKER_BUILDKIT=1 docker buildx build --pull --progress=plain \ + --cache-from "$IMAGE_NAME" --build-arg BUILDKIT_INLINE_CACHE=1 \ + --build-arg LOCALSTACK_PRE_RELEASE=$(_is_release_commit && echo "0" || echo "1") \ + --build-arg LOCALSTACK_BUILD_GIT_HASH=$(git rev-parse --short HEAD) \ + --build-arg=LOCALSTACK_BUILD_DATE=$(date -u +"%Y-%m-%d") \ + --build-arg=LOCALSTACK_BUILD_VERSION=$IMAGE_TAG \ + --add-host="localhost.localdomain:127.0.0.1" \ + -t "$IMAGE_NAME:$DEFAULT_TAG" $DOCKER_BUILD_FLAGS $DOCKER_BUILD_CONTEXT -f $DOCKERFILE +} + +function cmd-save() { + _enforce_image_name + + if [ -z "$IMAGE_FILENAME" ]; then + _enforce_platform + IMAGE_FILENAME="localstack-docker-image-$PLATFORM.tar.gz"; + fi + + docker save $IMAGE_NAME:$DEFAULT_TAG | gzip > $IMAGE_FILENAME + # set the filename as github output if it's available + if [ -n "$GITHUB_OUTPUT" ]; then + echo "IMAGE_FILENAME=$IMAGE_FILENAME" >> "$GITHUB_OUTPUT" + fi +} + +function cmd-load() { + if [ -z "$IMAGE_FILENAME" ]; then + _enforce_platform + IMAGE_FILENAME="localstack-docker-image-$PLATFORM.tar.gz"; + fi + + docker load -i $IMAGE_FILENAME +} + +function cmd-push() { + _enforce_image_name + _enforce_main_branch + _enforce_no_fork + _enforce_docker_credentials + _enforce_platform + _set_version_defaults + + if [ -z "$TARGET_IMAGE_NAME" ]; then TARGET_IMAGE_NAME=$IMAGE_NAME; fi + + # login to DockerHub + docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" + + # create the platform specific default tag + docker tag $IMAGE_NAME:$DEFAULT_TAG $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM + + # push default tag + docker push $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM + + function _push_versioned_tags() { + # create explicitly set image tag (via $IMAGE_TAG) + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:$IMAGE_TAG-$PLATFORM + + # always create "latest" tag on version push + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:latest-$PLATFORM + + # create "stable" tag + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:stable-$PLATFORM + + # create <major> tag (f.e. 4) + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:$MAJOR_VERSION-$PLATFORM + + # create <major>.<minor> (f.e. 4.0) + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION-$PLATFORM + + # create <major>.<minor>.<patch> (f.e. 4.0.0) + docker tag $TARGET_IMAGE_NAME:$DEFAULT_TAG-$PLATFORM $TARGET_IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION-$PLATFORM + + # push all the created tags + docker push $TARGET_IMAGE_NAME:stable-$PLATFORM + docker push $TARGET_IMAGE_NAME:latest-$PLATFORM + docker push $TARGET_IMAGE_NAME:$IMAGE_TAG-$PLATFORM + docker push $TARGET_IMAGE_NAME:$MAJOR_VERSION-$PLATFORM + docker push $TARGET_IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION-$PLATFORM + docker push $TARGET_IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION-$PLATFORM + } + + if _is_release_commit; then + echo "Pushing version tags, we're building the commit of a version tag." + _push_versioned_tags + else + echo "Not pushing any other tags, we're not building a version-tagged commit." + fi +} + +function cmd-push-manifests() { + _enforce_image_name + _enforce_main_branch + _enforce_no_fork + _enforce_docker_credentials + _set_version_defaults + + # login to DockerHub + docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD + + # create the multiarch manifest + docker manifest create $IMAGE_NAME:$DEFAULT_TAG --amend $IMAGE_NAME:$DEFAULT_TAG-amd64 --amend $IMAGE_NAME:$DEFAULT_TAG-arm64 + + # push default tag + docker manifest push $IMAGE_NAME:$DEFAULT_TAG + + function _push_versioned_tags() { + # create explicitly set image tag (via $IMAGE_TAG) + docker manifest create $IMAGE_NAME:$IMAGE_TAG \ + --amend $IMAGE_NAME:$IMAGE_TAG-amd64 \ + --amend $IMAGE_NAME:$IMAGE_TAG-arm64 + + # always create "latest" tag on version push + docker manifest create $IMAGE_NAME:latest \ + --amend $IMAGE_NAME:latest-amd64 \ + --amend $IMAGE_NAME:latest-arm64 + + # create "stable" tag + docker manifest create $IMAGE_NAME:stable \ + --amend $IMAGE_NAME:stable-amd64 \ + --amend $IMAGE_NAME:stable-arm64 + + # create <major> tag (f.e. 4) + docker manifest create $IMAGE_NAME:$MAJOR_VERSION \ + --amend $IMAGE_NAME:$MAJOR_VERSION-amd64 \ + --amend $IMAGE_NAME:$MAJOR_VERSION-arm64 + + # create <major>.<minor> (f.e. 4.0) + docker manifest create $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION \ + --amend $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION-amd64 \ + --amend $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION-arm64 + + # create <major>.<minor>.<patch> (f.e. 4.0.0) + docker manifest create $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION \ + --amend $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION-amd64 \ + --amend $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION-arm64 + + # push all the created tags + docker manifest push $IMAGE_NAME:$IMAGE_TAG + docker manifest push $IMAGE_NAME:stable + docker manifest push $IMAGE_NAME:latest + docker manifest push $IMAGE_NAME:$MAJOR_VERSION + docker manifest push $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION + docker manifest push $IMAGE_NAME:$MAJOR_VERSION.$MINOR_VERSION.$PATCH_VERSION + } + + if _is_release_commit; then + echo "Pushing version tags, we're building the commit of a version tag." + _push_versioned_tags + else + echo "Not pushing any other tags, we're not building a version-tagged commit." + fi +} + + + +############## +## Commands ## +############## + +function main() { + [[ $# -lt 1 ]] && { usage; exit 1; } + + command_name=$1 + shift + + # invoke command + case $command_name in + "build") cmd-build "$@" ;; + "save") cmd-save "$@" ;; + "load") cmd-load "$@" ;; + "push") cmd-push "$@" ;; + "push-manifests") cmd-push-manifests "$@" ;; + "help") usage && exit 0 ;; + *) usage && exit 1 ;; + esac +} + +main "$@" diff --git a/bin/localstack-start-docker-dev.sh b/bin/localstack-start-docker-dev.sh deleted file mode 100755 index ed1ee923f12cd..0000000000000 --- a/bin/localstack-start-docker-dev.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -source .venv/bin/activate - -export LOCALSTACK_VOLUME_DIR=$(pwd)/.filesystem/var/lib/localstack -export DOCKER_FLAGS="${DOCKER_FLAGS} --v $(pwd)/localstack:/opt/code/localstack/localstack --v $(pwd)/localstack.egg-info:/opt/code/localstack/localstack.egg-info --v $(pwd)/.filesystem/etc/localstack:/etc/localstack --v $(pwd)/bin/docker-entrypoint.sh:/usr/local/bin/docker-entrypoint.sh" - -exec python -m localstack.cli.main start "$@" diff --git a/bin/localstack-supervisor b/bin/localstack-supervisor new file mode 100755 index 0000000000000..f0943b4447781 --- /dev/null +++ b/bin/localstack-supervisor @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 +"""Supervisor script for managing localstack processes, acting like a mini init system tailored to +localstack. This can be used on the host or in the docker-entrypoint.sh. + +The supervisor behaves as follows: +* SIGUSR1 to supervisor will terminate the localstack instance and then start a new process +* SIGTERM to supervisor will terminate the localstack instance and then return +* if the localstack instance exits, then the supervisor exits with the same exit code. + +The methods ``waitpid_reap_other_children`` and ``stop_child_process`` were adapted from baseimage-docker +licensed under MIT: https://github.com/phusion/baseimage-docker/blob/rel-0.9.16/image/bin/my_init""" + +import errno +import os +import signal +import subprocess +import sys +import threading +from typing import Optional + +DEBUG = os.getenv("DEBUG", "").strip().lower() in ["1", "true"] + +# configurable process shutdown timeout, to allow for longer shutdown procedures +DEFAULT_SHUTDOWN_TIMEOUT = int(os.getenv("SHUTDOWN_TIMEOUT", "").strip() or 5) + + +class AlarmException(Exception): + """Special exception raise if SIGALRM is received.""" + + pass + + +def get_localstack_command() -> list[str]: + """ + Allow modification of the command to start LocalStack + :return: Command to start LocalStack + """ + import shlex + + command = os.environ.get("LOCALSTACK_SUPERVISOR_COMMAND") + if not command: + return [sys.executable, "-m", "localstack.runtime.main"] + return shlex.split(command) + + +def log(message: str): + """Prints the given message to stdout with a logging prefix.""" + if not DEBUG: + return + print(f"LocalStack supervisor: {message}") + + +_terminated_child_processes = {} + + +def waitpid_reap_other_children(pid: int) -> Optional[int]: + """ + Waits for the child process with the given PID, while at the same time reaping any other child + processes that have exited (e.g. adopted child processes that have terminated). + + :param pid: the pid of the process + :returns: the status of the process + """ + global _terminated_child_processes + + status = _terminated_child_processes.get(pid) + if status: + # A previous call to waitpid_reap_other_children(), + # with an argument not equal to the current argument, + # already waited for this process. Return the status + # that was obtained back then. + del _terminated_child_processes[pid] + return status + + done = False + status = None + while not done: + try: + this_pid, status = os.waitpid(-1, 0) + + if this_pid == pid: + done = True + else: + # Save status for later. + _terminated_child_processes[this_pid] = status + except OSError as e: + if e.errno == errno.ECHILD or e.errno == errno.ESRCH: + return None + else: + raise + return status + + +def stop_child_process(name: str, pid: int, sig: int = signal.SIGTERM, timeout: int | None = None): + """ + Sends a signal to the given process and then waits for all child processes to avoid zombie processes. + + :param name: readable process name to log + :param pid: the pid to terminate + :param sig: the signal to send to the process + :param timeout: the wait timeout + :return: + """ + log(f"Shutting down {name} (PID {pid})...") + try: + os.kill(pid, sig) + except OSError: + pass + timeout = timeout or DEFAULT_SHUTDOWN_TIMEOUT + signal.alarm(timeout) + try: + waitpid_reap_other_children(pid) + except OSError: + pass + except AlarmException: + log(f"{name} (PID {pid}) did not shut down in time. Forcing it to exit.") + try: + os.kill(pid, signal.SIGKILL) + except OSError: + pass + try: + waitpid_reap_other_children(pid) + except OSError: + pass + finally: + signal.alarm(0) + + +def main(): + # the localstack process + process: Optional[subprocess.Popen] = None + + # signal handlers set these events which further determine which actions should be taken in the main loop + should_restart = threading.Event() + + # signal handlers + + def _raise_alarm_exception(signum, frame): + raise AlarmException() + + def _terminate_localstack(signum, frame): + if not process: + return + stop_child_process("localstack", process.pid, signal.SIGTERM) + + def _restart_localstack(signum, frame): + # this handler terminates localstack but leaves the supervisor in a state to restart it + if not process: + return + should_restart.set() + stop_child_process("localstack", process.pid, signal.SIGTERM) + + signal.signal(signal.SIGALRM, _raise_alarm_exception) + signal.signal(signal.SIGTERM, _terminate_localstack) + # TODO investigate: when we tried to forward SIGINT to LS, for some reason SIGINT was raised twice in LS + # yet setting this to a no-op also worked. since we couldn't really figure out what was going on, we just + # translate SIGINT to SIGTERM for the localstack process. + signal.signal(signal.SIGINT, _terminate_localstack) + signal.signal(signal.SIGUSR1, _restart_localstack) + + # sets the supervisor PID so localstack can signal to it more easily + os.environ["SUPERVISOR_PID"] = str(os.getpid()) + + exit_code = 0 + try: + log("starting") + while True: + # clear force event indicators + should_restart.clear() + + # start a new localstack process + process = subprocess.Popen( + get_localstack_command(), + stdout=sys.stdout, + stderr=subprocess.STDOUT, + ) + log(f"localstack process (PID {process.pid}) starting") + + # wait for the localstack process to return + exit_code = process.wait() + log(f"localstack process (PID {process.pid}) returned with exit code {exit_code}") + + # make sure that, if the localstack process terminates on its own accord, that we still reap all + # child processes + waitpid_reap_other_children(process.pid) + + if should_restart.is_set(): + continue + else: + break + finally: + log("exiting") + + sys.exit(exit_code) + + +if __name__ == "__main__": + main() diff --git a/bin/release-dev.sh b/bin/release-dev.sh index 983ce584ce545..60c8a9c808936 100755 --- a/bin/release-dev.sh +++ b/bin/release-dev.sh @@ -3,11 +3,9 @@ set -e # use UTC timestamp as version -ver=$(date -u +%Y%m%d%H%M%S) +timestamp=$(date -u +%Y%m%d%H%M%S) +sed -i -r "s/^([0-9]+\.[0-9]+\.[0-9]+\.dev).*/\1${timestamp}/" VERSION -sed -i -r "s/^__version__ = \"(.*\.dev)\"/__version__ = \"\1${ver}\"/" localstack/__init__.py - - -echo "release $(cat localstack/__init__.py | grep '__version__')? (press CTRL+C to abort)" +echo "release $(cat VERSION)? (press CTRL+C to abort)" read make publish diff --git a/bin/release-helper.sh b/bin/release-helper.sh new file mode 100755 index 0000000000000..b0ca988df4c73 --- /dev/null +++ b/bin/release-helper.sh @@ -0,0 +1,208 @@ +#!/usr/bin/env bash + +# set -x +set -euo pipefail +shopt -s nullglob +shopt -s globstar + +DEPENDENCY_FILE=${DEPENDENCY_FILE:-pyproject.toml} + +function usage() { + echo "A set of commands that facilitate release automation" + echo "" + echo "USAGE" + echo " release-helper <command> [options]" + echo "" + echo "Commands:" + echo " github-outputs <patch|minor|major>" + echo " print version number outputs for github actions" + echo "" + echo " explain-steps <patch|minor|major>" + echo " print a list of steps that should be executed for the release type" + echo "" + echo " get-ver" + echo " prints the current version number in the version file" + echo "" + echo " set-dep-ver <dep> <range>" + echo " set the dependency version in the dependency file" + echo " example: set-dep-ver 'localstack-ext' '==0.15.0'" + echo "" + echo " pip-download-retry <dep> <version>" + echo " blocks until the given version of the given dependency becomes downloadable by pip" + echo " example: pip-download-retry 'localstack-ext' '0.15.0'" + echo "" + echo " git-commit-release <version>" + echo " creates a tag and release commit for the given version" + echo "" + echo " git-commit-increment" + echo " creates a commit for the next development iteration" + echo "" + echo " help" + echo " show this message" +} + +function get_current_version() { + # check if setuptools_scm is installed, if not prompt to install. python3 is expected to be present + if ! python3 -m pip -qqq show setuptools_scm > /dev/null ; then + echo "ERROR: setuptools_scm is not installed. Run 'pip install --upgrade setuptools setuptools_scm'" >&2 + exit 1 + fi + python3 -m setuptools_scm +} + +function remove_ver_suffix() { + awk -F. '{ print $1 "." $2 "." $3 }' +} + +function add_dev_suffix() { + awk -F. '{ print $1 "." $2 "." $3 ".dev" }' +} + +function increment_patch() { + awk -F. '{ print $1 "." $2 "." $3 + 1 }' +} + +function increment_minor() { + awk -F. '{ print $1 "." $2 + 1 "." 0 }' +} + +function increment_major() { + awk -F. '{ print $1 + 1 "." 0 "." 0 }' +} + +function verify_valid_version() { + read ver + echo $ver | egrep "^([0-9]+)\.([0-9]+)(\.[0-9]+)?" > /dev/null || { echo "invalid version string '$ver'"; exit 1; } +} + +function release_env_compute() { + case $1 in + "patch") + RELEASE_VER=$(get_current_version | remove_ver_suffix) + ;; + "minor") + RELEASE_VER=$(get_current_version | increment_minor) + ;; + "major") + RELEASE_VER=$(get_current_version | increment_major) + ;; + *) + echo "unknown release type '$1'" + exit 1 + ;; + esac + + export CURRENT_VER=$(get_current_version) + export RELEASE_VER=${RELEASE_VER} + export DEVELOP_VER=$(echo ${RELEASE_VER} | increment_patch | add_dev_suffix) + # uses only the minor version. for 1.0.1 -> patch the boundary would be 1.1 + export BOUNDARY_VER=$(echo ${DEVELOP_VER} | increment_minor | cut -d'.' -f-2) + + release_env_validate || { echo "invalid release environment"; exit 1; } +} + +function release_env_validate() { + echo ${CURRENT_VER} | verify_valid_version + echo ${RELEASE_VER} | verify_valid_version + echo ${DEVELOP_VER} | verify_valid_version + echo ${BOUNDARY_VER} | verify_valid_version +} + +function explain_release_steps() { + echo "- perform release" + echo " - set synced dependencies to ==${RELEASE_VER}" + echo " - git commit -a -m 'Release version ${RELEASE_VER}'" + echo " - git tag -a 'v${RELEASE_VER}' -m 'Release version ${RELEASE_VER}'" + echo " - make publish" + echo " - git push && git push --tags" + echo "- prepare development iteration" + echo " - set synced dependencies to >=${DEVELOP_VER},<${BOUNDARY_VER}" + echo " - git commit -a -m 'Prepare next development iteration'" + echo " - git push" +} + +function print_github_outputs() { + echo "current=${CURRENT_VER}" >> $GITHUB_OUTPUT + echo "release=${RELEASE_VER}" >> $GITHUB_OUTPUT + echo "develop=${DEVELOP_VER}" >> $GITHUB_OUTPUT + echo "boundary=${BOUNDARY_VER}" >> $GITHUB_OUTPUT +} + +# commands + +function cmd-get-ver() { + [[ $# -eq 0 ]] || { usage; exit 1; } + get_current_version +} + +function cmd-set-dep-ver() { + [[ $# -eq 2 ]] || { usage; exit 1; } + + dep=$1 + ver=$2 + + egrep -h "^(\s*\"?)(${dep})(\[[a-zA-Z0-9,\-]+\])?(>=|==|<=)([^\"]*)(\")?(,)?$" ${DEPENDENCY_FILE} || { echo "dependency ${dep} not found in ${DEPENDENCY_FILE}"; return 1; } + sed -i -r "s/^(\s*\"?)(${dep})(\[[a-zA-Z0-9,\-]+\])?(>=|==|<=)([^\"]*)(\")?(,)?$/\1\2\3${ver}\6\7/g" ${DEPENDENCY_FILE} +} + +function cmd-github-outputs() { + release_env_compute $1 + print_github_outputs +} + +function cmd-explain-steps() { + release_env_compute $1 + explain_release_steps +} + +function cmd-pip-download-retry() { + [[ $# -eq 2 ]] || { usage; exit 1; } + + dep=$1 + ver=$2 + + export pip_download_tmpdir="$(mktemp -d)" + trap 'rm -rf -- "$pip_download_tmpdir"' EXIT + + while ! python3 -m pip download -d ${pip_download_tmpdir} --no-deps --pre "${dep}==${ver}" &> /dev/null; do + sleep 5 + done +} + +function cmd-git-commit-release() { + [[ $# -eq 1 ]] || { usage; exit 1; } + + echo $1 || verify_valid_version + + git add "${DEPENDENCY_FILE}" + # allow empty commit here as the community version might not have any changes, but we still need a commit for a tag + git commit --allow-empty -m "release version ${1}" + git tag -a "v${1}" -m "Release version ${1}" +} + +function cmd-git-commit-increment() { + git add "${DEPENDENCY_FILE}" + git commit --allow-empty -m "prepare next development iteration" +} + +function main() { + [[ $# -lt 1 ]] && { usage; exit 1; } + + command_name=$1 + shift + + # invoke command + case $command_name in + "get-ver") cmd-get-ver "$@" ;; + "set-dep-ver") cmd-set-dep-ver "$@" ;; + "github-outputs") cmd-github-outputs "$@" ;; + "explain-steps") cmd-explain-steps "$@" ;; + "pip-download-retry") cmd-pip-download-retry "$@" ;; + "git-commit-release") cmd-git-commit-release "$@" ;; + "git-commit-increment") cmd-git-commit-increment "$@" ;; + "help") usage && exit 0 ;; + *) usage && exit 1 ;; + esac +} + +main "$@" diff --git a/bin/supervisord.conf b/bin/supervisord.conf deleted file mode 100644 index ab57621962555..0000000000000 --- a/bin/supervisord.conf +++ /dev/null @@ -1,13 +0,0 @@ -[supervisord] -nodaemon=true -logfile=/tmp/supervisord.log - -[program:infra] -directory=/opt/code/localstack -command=/opt/code/localstack/.venv/bin/python -m localstack.cli.main start --host --no-banner -environment= - LOCALSTACK_INFRA_PROCESS=1 -autostart=true -autorestart=true -stdout_logfile=/var/lib/localstack/logs/localstack_infra.log -stderr_logfile=/var/lib/localstack/logs/localstack_infra.err diff --git a/doc/external_services_integration/kafka_self_managed_cluster/HOWTO.md b/doc/external_services_integration/kafka_self_managed_cluster/HOWTO.md deleted file mode 100644 index f9902a16ad35d..0000000000000 --- a/doc/external_services_integration/kafka_self_managed_cluster/HOWTO.md +++ /dev/null @@ -1,99 +0,0 @@ -## Using Localstack lambda with self-managed kafka cluster - -This is a guide to use your own custom Kafka cluster endpoint, please note this is not a how-to configure or setup Kafka, to do that refer to the [official documentation](https://kafka.apache.org/documentation/) - -## Why is this useful? - -Localstack OSS does not currently support AWS MSK out of the box, but you can run your own self-managed kafka cluster and integrate it with your own applications. - -## How to run it? - -You can find the [example docker compose](docker-compose.yml) file which contains a single-noded zookeeper and kafka cluster and a simple localstack setup as well as [kowl](https://github.com/cloudhut/kowl), an Apache Kafka Web UI. - -1. Run docker compose: -``` -$ docker-compose up -d -``` - -2. Create the lambda function: -``` -$ awslocal lambda create-function \ - --function-name fun1 \ - --handler lambda.handler \ - --runtime python3.8 \ - --role r1 \ - --zip-file fileb://lambda.zip -{ - "FunctionName": "fun1", - "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:fun1", - "Runtime": "python3.8", - "Role": "r1", - "Handler": "lambda.handler", - "CodeSize": 294, - "Description": "", - "Timeout": 3, - "LastModified": "2021-05-19T02:01:06.617+0000", - "CodeSha256": "/GPsiNXaq4tBA4QpxPCwgpeVfP7j+1tTH6zdkJ3jiU4=", - "Version": "$LATEST", - "VpcConfig": {}, - "TracingConfig": { - "Mode": "PassThrough" - }, - "RevisionId": "d85469d2-8558-4d75-bc0e-5926f373e12c", - "State": "Active", - "LastUpdateStatus": "Successful", - "PackageType": "Zip" -} -``` - -3. Create an example secret: -``` -$ awslocal secretsmanager create-secret --name localstack -{ - "ARN": "arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI", - "Name": "localstack", - "VersionId": "32bbb8e2-46ee-4322-b3d5-b6459d54513b" -} -``` - -4. Create an example kafka topic: -``` -$ docker exec -ti kafka kafka-topics --zookeeper zookeeper:2181 --create --replication-factor 1 --partitions 1 --topic t1 -Created topic t1. -``` - -5. Create the event source mapping to your local kafka cluster: -``` -$ awslocal lambda create-event-source-mapping \ - --topics t1 \ - --source-access-configuration Type=SASL_SCRAM_512_AUTH,URI=arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI \ - --function-name arn:aws:lambda:us-east-1:000000000000:function:fun1 \ - --self-managed-event-source '{"Endpoints":{"KAFKA_BOOTSTRAP_SERVERS":["localhost:9092"]}}' -{ - "UUID": "4a2b0ea6-960c-4847-8684-465876dd6dbd", - "BatchSize": 100, - "FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:fun1", - "LastModified": "2021-05-19T04:02:49+02:00", - "LastProcessingResult": "OK", - "State": "Enabled", - "StateTransitionReason": "User action", - "Topics": [ - "t1" - ], - "SourceAccessConfigurations": [ - { - "Type": "SASL_SCRAM_512_AUTH", - "URI": "arn:aws:secretsmanager:us-east-1:000000000000:secret:localstack-TDIuI" - } - ], - "SelfManagedEventSource": { - "Endpoints": { - "KAFKA_BOOTSTRAP_SERVERS": [ - "localhost:9092" - ] - } - } -} -``` - -6. Aditionally check `http://localhost:8080` for kowl's UI. \ No newline at end of file diff --git a/doc/external_services_integration/kafka_self_managed_cluster/docker-compose.yml b/doc/external_services_integration/kafka_self_managed_cluster/docker-compose.yml deleted file mode 100644 index 94870a4db1209..0000000000000 --- a/doc/external_services_integration/kafka_self_managed_cluster/docker-compose.yml +++ /dev/null @@ -1,72 +0,0 @@ -version: '3.9' -services: - zookeeper: - image: confluentinc/cp-zookeeper:6.0.2 - container_name: zookeeper - hostname: zookeeper - ports: - - "2181:2181" - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - - kafka: - image: confluentinc/cp-kafka:6.0.2 - container_name: kafka - hostname: kafka - restart: always - depends_on: - - zookeeper - ports: - - "9092:9092" - - "9101:9101" - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_JMX_PORT: 9101 - KAFKA_JMX_HOSTNAME: localhost - CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092 - CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 - CONFLUENT_METRICS_ENABLE: 'true' - CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - - kowl: - image: quay.io/cloudhut/kowl:v1.3.1 - container_name: kowl - restart: always - ports: - - "8080:8080" - depends_on: - - kafka - environment: - - KAFKA_BROKERS=kafka:29092 - - localstack: - container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}" - image: localstack/localstack - network_mode: bridge - ports: - - "4566:4566" - depends_on: - - kafka - - kowl - environment: - - SERVICES=lambda,secretsmanager - - DEBUG=${DEBUG- } - - DATA_DIR=${DATA_DIR- } - - LAMBDA_EXECUTOR=${LAMBDA_EXECUTOR- } - - KINESIS_ERROR_PROBABILITY=${KINESIS_ERROR_PROBABILITY- } - - DOCKER_HOST=unix:///var/run/docker.sock - - HOST_TMP_FOLDER=${TMPDIR} - volumes: - - "${TMPDIR:-/tmp/localstack}:/tmp/localstack" - - "/var/run/docker.sock:/var/run/docker.sock" \ No newline at end of file diff --git a/doc/interaction/README.md b/doc/interaction/README.md deleted file mode 100644 index 1f49e0a63b34f..0000000000000 --- a/doc/interaction/README.md +++ /dev/null @@ -1,115 +0,0 @@ -# Interact with LocalStack - -There are a number of ways you or your applications can interact with LocalStack. To try LocalStack, the AWS CLI is a good starting point, however you can also use Terraform, [CDK](https://github.com/localstack/aws-cdk-local), AWS client libraries, and many other tools from the AWS ecosystem. - -## AWS CLI - -You can point your `aws` CLI (and other similar tools) to use LocalStack by configuring the service endpoint, for example: - -```shell -aws --endpoint-url=http://localhost:4566 kinesis list-streams -{ - "StreamNames": [] -} -``` - -Use the below command to install `aws CLI`, if not installed already. - -```shell -pip install awscli -``` - -## Setting up local region and credentials to run LocalStack - -aws requires the region and the credentials to be set in order to run the aws commands. Create the default configuration & the credentials. Below key will ask for the Access key id, secret Access Key, region & output format. - -```shell -aws configure --profile default - -# Config & credential file will be created under ~/.aws folder -``` - -**NOTE**: Please use `test` as Access key id and secret Access Key to make S3 presign url work. We have added presign url signature verification algorithm to validate the presign url and its expiration. You can configure credentials into the system environment using `export` command in the linux/Mac system. You also can add credentials in `~/.aws/credentials` file directly. - -```shell -export AWS_ACCESS_KEY_ID=test -export AWS_SECRET_ACCESS_KEY=test -``` - -## awslocal - -[awslocal](https://github.com/localstack/awscli-local) is a thin CLI wrapper that runs commands directly against LocalStack (no need to specify `--endpoint-url` anymore). Install it via `pip install awscli-local`, and then use it as follows: - -```shell -awslocal kinesis list-streams -{ - "StreamNames": [] -} -``` - -**UPDATE**: Use the environment variable `$LOCALSTACK_HOSTNAME` to determine the target host inside your Lambda function. See [Configuration](#configuration) section for more details. - -## AWS CLI v2 with Docker and LocalStack - -By default, the container running [amazon/aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-docker.html) is isolated from `0.0.0.0:4566` on the host machine, that means that aws-cli cannot reach localstack through your shell. - -To ensure that the two docker containers can communicate create a network on the docker engine: - -```bash -$ docker network create localstack -0c9cb3d37b0ea1bfeb6b77ade0ce5525e33c7929d69f49c3e5ed0af457bdf123 -``` -Then modify the `docker-compose.yml` specifying the network to use: - -```yml -networks: - default: - external: - name: "localstack" -``` - -Run AWS Cli v2 docker container using this network (example): - -```shell -$ docker run --network localstack --rm -it amazon/aws-cli --endpoint-url=http://localstack:4566 lambda list-functions -{ - "Functions": [] -} -``` - -If you use AWS CLI v2 from a docker container often, create an alias: - -```shell -$ alias laws='docker run --network localstack --rm -it amazon/aws-cli --endpoint-url=http://localstack:4566' -``` - -So you can type: - -```shell -$ laws lambda list-functions -{ - "Functions": [] -} -``` - -## Client Libraries - -* Python: https://github.com/localstack/localstack-python-client - * alternatively, you can also use `boto3` and use the `endpoint_url` parameter when creating a connection -* .NET: https://github.com/localstack-dotnet/localstack-dotnet-client - * alternatively, you can also use `AWS SDK for .NET` and change `ClientConfig` properties when creating a service client. -* (more coming soon...) - -## Invoking API Gateway - -To invoke the path `/my/path` of an API Gateway with ID `id123` in stage `prod`, you can use the special hostname/URL syntax below: - -```shell -$ curl http://id123.execute-api.localhost.localstack.cloud:4566/prod/my/path -``` - -Alternatively, if your system is facing issues resolving the custom DNS name, you can use this URL pattern instead: - -```shell -$ curl http://localhost:4566/restapis/id123/prod/_user_request_/my/path -``` diff --git a/doc/third-party-software-tools/README.md b/doc/third-party-software-tools/README.md deleted file mode 100644 index 6d152ccffe324..0000000000000 --- a/doc/third-party-software-tools/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Third Party Software Tools - -We build on a number of third-party software tools, including the following: - -Third-Party software | License ---------------------------|----------------------- -**Python/pip modules:** | -airspeed | BSD License -amazon_kclpy | Amazon Software License -boto3 | Apache License 2.0 -coverage | Apache License 2.0 -docopt | MIT License -elasticsearch | Apache License 2.0 -flask | BSD License -flask_swagger | MIT License -jsonpath-rw | Apache License 2.0 -moto | Apache License 2.0 -requests | Apache License 2.0 -subprocess32 | PSF License -**Other tools:** | -Elasticsearch | Apache License 2.0 -local-kms | MIT License -kinesis-mock | MIT License diff --git a/doc/troubleshoot/README.md b/doc/troubleshoot/README.md deleted file mode 100644 index 71e37143bc815..0000000000000 --- a/doc/troubleshoot/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Troubleshoot - -* If you're using AWS Java libraries with Kinesis, please, refer to [CBOR protocol issues with the Java SDK guide](https://github.com/mhart/kinesalite#cbor-protocol-issues-with-the-java-sdk) how to disable CBOR protocol which is not supported by kinesalite. - -* Accessing local S3: To avoid domain name resolution issues, you need to enable **path style access** on your S3 SDK client. Most AWS SDKs provide a config to achieve that, e.g., for Java: - -```shell -s3.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build()); -// There is also an option to do this if you're using any of the client builder classes: -AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); -builder.withPathStyleAccessEnabled(true); -... -``` - -* Mounting the temp. directory: Note that on MacOS you may have to run `TMPDIR=/private$TMPDIR docker-compose up` if -`$TMPDIR` contains a symbolic link that cannot be mounted by Docker. -(See details here: https://bitbucket.org/atlassian/localstack/issues/40/getting-mounts-failed-on-docker-compose-up) - -* If you're seeing Lambda errors like `Cannot find module ...` when using `LAMBDA_REMOTE_DOCKER=false`, make sure to properly set the `HOST_TMP_FOLDER` environment variable and mount the temporary folder from the host into the LocalStack container. - -* If you run into file permission issues on `pip install` under Mac OS (e.g., `Permission denied: '/Library/Python/2.7/site-packages/six.py'`), then you may have to re-install `pip` via Homebrew (see [this discussion thread](https://github.com/localstack/localstack/issues/260#issuecomment-334458631)). Alternatively, try installing -with the `--user` flag: `pip install --user localstack` - -* If you are deploying within OpenShift, please be aware: the pod must run as `root`, and the user must have capabilities added to the running pod, in order to allow Elasticsearch to be run as the non-root `localstack` user. - -* If you are experiencing slow performance with Lambdas in Mac OS, you could either (1) try [mounting local code directly into the Lambda container](https://github.com/localstack/localstack#using-local-code-with-lambda), or (2) disable mounting the temporary directory into the LocalStack container in docker-compose. (See also https://github.com/localstack/localstack/issues/2515) - -* The environment variable `no_proxy` is rewritten by LocalStack. (Internal requests will go straight via localhost, bypassing any proxy configuration). - -* For troubleshooting LocalStack start issues, you can check debug logs by running `DEBUG=1 localstack start` - -* In case you get errors related to node/nodejs, you may find (this issue comment: https://github.com/localstack/localstack/issues/227#issuecomment-319938530) helpful. - -* If you are using AWS Java libraries and need to disable SSL certificate checking, add `-Dcom.amazonaws.sdk.disableCertChecking` to the java invocation. - -* If you are using LAMBDA_REMOTE_DOCKER=true and running in a docker container in CI, do NOT set `DOCKER_HOST` as an environment variable passed into the localstack container. Any calls to lambda CLI operations will fail (https://github.com/localstack/localstack/issues/4801) diff --git a/docker-compose-pro.yml b/docker-compose-pro.yml index 1d51b9a02252d..98061c285824a 100644 --- a/docker-compose-pro.yml +++ b/docker-compose-pro.yml @@ -1,21 +1,17 @@ -version: "3.8" - services: localstack: - container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}" + container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" image: localstack/localstack-pro # required for Pro ports: - "127.0.0.1:4566:4566" # LocalStack Gateway - "127.0.0.1:4510-4559:4510-4559" # external services port range - - "127.0.0.1:53:53" # DNS config (required for Pro) - - "127.0.0.1:53:53/udp" # DNS config (required for Pro) - - "127.0.0.1:443:443" # LocalStack HTTPS Gateway (required for Pro) + - "127.0.0.1:443:443" # LocalStack HTTPS Gateway (Pro) environment: - - DEBUG=${DEBUG-} - - PERSISTENCE=${PERSISTENCE-} - - LAMBDA_EXECUTOR=${LAMBDA_EXECUTOR-} - - LOCALSTACK_API_KEY=${LOCALSTACK_API_KEY-} # required for Pro - - DOCKER_HOST=unix:///var/run/docker.sock + # Activate LocalStack Pro: https://docs.localstack.cloud/getting-started/auth-token/ + - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} # required for Pro + # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ + - DEBUG=${DEBUG:-0} + - PERSISTENCE=${PERSISTENCE:-0} volumes: - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/docker-compose.yml b/docker-compose.yml index afe7983d8cd8c..6d70da64e2e06 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,16 +1,13 @@ -version: "3.8" - services: localstack: - container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}" + container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}" image: localstack/localstack ports: - "127.0.0.1:4566:4566" # LocalStack Gateway - "127.0.0.1:4510-4559:4510-4559" # external services port range environment: - - DEBUG=${DEBUG-} - - LAMBDA_EXECUTOR=${LAMBDA_EXECUTOR-} - - DOCKER_HOST=unix:///var/run/docker.sock + # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ + - DEBUG=${DEBUG:-0} volumes: - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 0000000000000..35af4e8333b5d --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing + +We welcome contributions to LocalStack! Please refer to the following sections to better understand how LocalStack works internally, how to set up local development environments, and how to contribute to the codebase. + +## Sections + +- [Contribution Guidelines](#contribution-guidelines) +- [Development Environment Setup](development-environment-setup/README.md) +- [LocalStack Concepts](localstack-concepts/README.md) +- [Testing](testing/README.md) + +## Contribution Guidelines + +We welcome feedback, bug reports, and pull requests! + +For pull requests (PRs), please stick to the following guidelines: + +* Before submitting a PR, verify that [an issue](https://github.com/localstack/localstack/issues) exists that describes the bug fix or feature you want to contribute. If there's no issue yet, please [create one](https://github.com/localstack/localstack/issues/new/choose). +* Fork localstack on your GitHub user account, make code changes there, and then create a PR against main localstack repository. +* Add tests for any new features or bug fixes. Ideally, each PR increases the test coverage. Please read our [integration testing](testing/integration-tests/README.md) and [parity testing](testing/parity-testing/README.md) guides on how to write tests for AWS services. +* Follow the existing code style. Run `make format` and `make lint` before checking in your code. + * Refer to [Development Environment Setup](development-environment-setup/README.md) if your local testing environment is not yet properly set up. +* Document newly introduced methods and classes with pydoc, and add inline comments to code that is not self-documenting. +* Separate unrelated changes into multiple PRs. +* When creating a PR, classify the size of your change with setting a semver label: + * `semver: patch`: Small, non-breaking changes. + * `semver: minor`: Bigger, non-breaking changes (like features or bigger refactorings). + * `semver: major`: Breaking changes (no matter how big). + +Please note that by contributing any code or documentation to this repository (by raising PRs, or otherwise) you explicitly agree to the [**Contributor License Agreement**](../.github/CLA.md). diff --git a/docs/development-environment-setup/README.md b/docs/development-environment-setup/README.md new file mode 100644 index 0000000000000..81284d433a263 --- /dev/null +++ b/docs/development-environment-setup/README.md @@ -0,0 +1,114 @@ +# Development Environment Setup + +Before you get started with contributing to LocalStack, make sure you’ve familiarized yourself with LocalStack from the perspective of a user. +You can follow our [getting started guide](https://docs.localstack.cloud/get-started/). +Once LocalStack runs in your Docker environment and you’ve played around with the LocalStack and `awslocal` CLI, you can move forward to set up your developer environment. + +## Development requirements + +You will need the following tools for the local development of LocalStack. + +* [Python](https://www.python.org/downloads/) and `pip` + * We recommend to use a Python version management tool like [`pyenv`](https://github.com/pyenv/pyenv/). + This way you will always use the correct Python version as defined in `.python-version`. +* [Node.js & npm](https://nodejs.org/en/download/) +* [Docker](https://docs.docker.com/desktop/) + +We recommend you to individually install the above tools using your favorite package manager. +For example, on macOS, you can use [Homebrew](https://brew.sh/) to install the above tools. + +### Setting up the Development Environment + +To make contributions to LocalStack, you need to be able to run LocalStack in host mode from your IDE, and be able to attach a debugger to the running LocalStack instance. +We have a basic tutorial to cover how you can do that. + +The basic steps include: + +1. Fork the localstack repository on GitHub [https://github.com/localstack/localstack/](https://github.com/localstack/localstack/) +2. Clone the forked localstack repository `git clone git@github.com:<GITHUB_USERNAME>/localstack.git` +3. Ensure you have `python`, `pip`, `node`, and `npm` installed. +> [!NOTE] +> You might also need `java` for some emulated services. +4. Install the Python dependencies using `make install`. +> [!NOTE] +> This will install the required pip dependencies in a local Python 3 `venv` directory called `.venv` (your global Python packages will remain untouched). +> Depending on your system, some `pip` modules may require additional native libs installed. + +> [!NOTE] +> Consider running `make install-dev-types` to enable type hinting for efficient [integration tests](../testing/integration-tests/README.md) development. +5. Start localstack in host mode using `make start` + +<div align="left"> + <a href="https://youtu.be/XHLBy6VKuCM"> + <img src="https://img.youtube.com/vi/XHLBy6VKuCM/0.jpg" style="width:100%;"> + </a> +</div> + +### Building the Docker image for Development + +We generally recommend using this command to build the `localstack/localstack` Docker image locally (works on Linux/macOS): + +```bash +IMAGE_NAME="localstack/localstack" ./bin/docker-helper.sh build +``` + +### Additional Dependencies for running LocalStack in Host Mode + +In host mode, additional dependencies (e.g., Java) are required for developing certain AWS-emulated services (e.g., DynamoDB). +The required dependencies vary depending on the service, [Configuration](https://docs.localstack.cloud/references/configuration/), operating system, and system architecture (i.e., x86 vs ARM). +Refer to our official [Dockerfile](https://github.com/localstack/localstack/blob/master/Dockerfile) and our [package installer LPM](Concepts/index.md#packages-and-installers) for more details. + +#### Root Permissions + +LocalStack runs its own [DNS server](https://docs.localstack.cloud/user-guide/tools/dns-server/) which listens for requests on port 53. This requires root permission. When LocalStack starts in host mode it runs the DNS server as sudo, so a prompt is triggered asking for the sudo password. This is annoying during local development, so to disable this functionality, use `DNS_ADDRESS=0`. + +> [!NOTE] +> We don't recommend disabling the DNS server in general (e.g. in Docker) because the [DNS server](https://docs.localstack.cloud/user-guide/tools/dns-server/) enables seamless connectivity to LocalStack from different environments via the domain name `localhost.localstack.cloud`. + + +#### Python Dependencies + +* [JPype1](https://pypi.org/project/JPype1/) might require `g++` to fix a compile error on ARM Linux `gcc: fatal error: cannot execute ‘cc1plus’` + * Used in EventBridge, EventBridge Pipes, and Lambda Event Source Mapping for a Java-based event ruler via the opt-in configuration `EVENT_RULE_ENGINE=java` + * Introduced in [#10615](https://github.com/localstack/localstack/pull/10615) + +#### Test Dependencies + +* Node.js is required for running LocalStack tests because the test fixture for CDK-based tests needs Node.js + +#### DynamoDB + +* [OpenJDK](https://openjdk.org/install/) + +#### Kinesis + +* [NodeJS & npm](https://nodejs.org/en/download/) + +#### Lambda + +* macOS users need to configure `LAMBDA_DEV_PORT_EXPOSE=1` such that the host can reach Lambda containers via IPv4 in bridge mode (see [#7367](https://github.com/localstack/localstack/pull/7367)). + +### Changing our fork of moto + +1. Fork our moto repository on GitHub [https://github.com/localstack/moto](https://github.com/localstack/moto) +2. Clone the forked moto repository `git clone git@github.com:<GITHUB_USERNAME>/moto.git` (using the `localstack` branch) +3. Within the localstack repository, install moto in **editable** mode: + +```sh +# Assuming the following directory structure: +#. +#├── localstack +#└── moto + +cd localstack +source .venv/bin/activate + +pip install -e ../moto +``` + +### Tips + +* If `virtualenv` chooses system python installations before your pyenv installations, manually initialize `virtualenv` before running `make install`: `virtualenv -p ~/.pyenv/shims/python3.10 .venv` . +* Terraform needs version <0.14 to work currently. Use [`tfenv`](https://github.com/tfutils/tfenv) to manage Terraform versions comfortable. Quick start: `tfenv install 0.13.7 && tfenv use 0.13.7` +* Set env variable `LS_LOG='trace'` to print every `http` request sent to localstack and their responses. It is useful for debugging certain issues. +* Catch linter or format errors early by installing Git pre-commit hooks via `pre-commit install`. [pre-commit](https://pre-commit.com/) installation: `pip install pre-commit` or `brew install pre-commit`. diff --git a/doc/end_user_license_agreement/README.md b/docs/end_user_license_agreement/README.md similarity index 100% rename from doc/end_user_license_agreement/README.md rename to docs/end_user_license_agreement/README.md diff --git a/docs/localstack-concepts/README.md b/docs/localstack-concepts/README.md new file mode 100644 index 0000000000000..53f15bcc2d632 --- /dev/null +++ b/docs/localstack-concepts/README.md @@ -0,0 +1,248 @@ +# LocalStack Concepts + +When you first start working on LocalStack, you will most likely start working on AWS providers, either fixing bugs or adding features. In that case, you probably care mostly about [Services](#service), and, depending on the service and how it interacts with the [Gateway](#gateway), also **custom request handlers** and edge **routes**. + +If you are adding new service providers, then you’ll want to know how [Plugins](#plugins) work, and how to expose a service provider as a [service plugin](#service-plugin). This guide will give you a comprehensive overview about various core architectural concepts of LocalStack. + +## AWS Server Framework (ASF) + +AWS is essentially a Remote Procedure Call (RPC) system, and ASF is our server-side implementation of that system. The principal components of which are: + +- Service specifications +- Stub generation +- Remote objects (service implementations) +- Marshalling +- Skeleton + +### Service specifications + +AWS developed a specification language, [Smithy](https://awslabs.github.io/smithy/), which they use internally to define their APIs in a declarative way. They use these specs to generate client SDKs and client documentation. All these specifications are available, among other repositories, in the [botocore repository](https://github.com/boto/botocore/tree/develop/botocore/data). Botocore are the internals of the AWS Python SDK, which allows ASF to interpret and operate on the service specifications. Take a look at an example, [the `Invoke` operation of the `lambda` API](https://github.com/boto/botocore/blob/474e7a23d0fd178790579638cec9123d7e92d10b/botocore/data/lambda/2015-03-31/service-2.json#L564-L573): + +```bash + "Invoke":{ + "name":"Invoke", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/invocations" + }, + "input":{"shape":"InvocationRequest"}, + "output":{"shape":"InvocationResponse"}, + "errors":[ + {"shape":"ServiceException"}, + ... +``` + +### Scaffold - Generating AWS API stubs + +We use these specifications to generate server-side API stubs using our scaffold script. The stubs comprise Python representations of _Shapes_ (type definitions), and an `<Service>Api` class that contains all the operations as function definitions. Notice the `@handler` decorator, which binds the function to the particular AWS operation. This is how we know where to dispatch the request to. + +<img src="asf-code-generation.png" width="800px" alt="Generating AWS API stubs via ASF" /> + +You can try it using this command in the LocalStack repository: + +```bash +python -m localstack.aws.scaffold generate <service> --save [--doc] +``` + +### Service providers + +A service provider is an implementation of an AWS service API. Service providers are the remote object in the RPC terminology. You will find the modern ASF provider implementations in `localstack/services/<service>/provider.py`. + +### Marshalling + +A server-side protocol implementation requires a marshaller (a parser for incoming requests, and a serializer for outgoing responses). + +- Our [protocol parser](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/aws/protocol/parser.py) translates AWS HTTP requests into objects that can be used to call the respective function of the service provider. +- Our [protocol serializer](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/aws/protocol/serializer.py) translates response objects coming from service provider functions into HTTP responses. + +## Service + +Most services are AWS providers, i.e, implementations of AWS APIs. But don’t necessarily have to be. + +### Provider + +Here’s the anatomy of an AWS service implementation. It implements the API stub generated by the scaffold. + +<img src="service-implementation.png" width="800px" alt="Anatomy of an AWS service implementation" /> + +### Stores + +All data processed by the providers are retained by in-memory structures called Stores. Think of them as an in-memory database for the providers to store state. Stores are written in a declarative manner similar to how one would write SQLAlchemy models. + +Stores support namespacing based on AWS Account ID and Regions, which allows emulation of multi-tenant setups and data isolation between regions, respectively. + +LocalStack has a feature called persistence, where the states of all providers are restored when the LocalStack instance is restarted. This is achieved by pickling and unpickling the provider stores. + +### `call_moto` + +Many LocalStack service providers use [`moto`](https://github.com/spulec/moto) as a backend. Moto is an open-source library that provides mocking for Python tests that use Boto, the Python AWS SDK. We re-use a lot of moto’s internal functionality, which provides mostly CRUD and some basic emulation for AWS services. We often extend services in Moto with additional functionality. Moto plays such a fundamental role for many LocalStack services, that we have introduced our own tooling around it, specifically to make requests directly to moto. + +To add functionality on top of `moto`, you can use `call_moto(context: RequestContext)` to forward the given request to `moto`. When used in a service provider `@handler` method, it will dispatch the request to the correct `moto` implementation of the operation, if it exists, and return the parsed AWS response. + +The `MotoFallbackDispatcher` generalizes the behavior for an entire API. You can wrap any provider with it, and it will forward any request that returns a `NotImplementedError` to moto instead and hope for the best. + +Sometimes we also use `moto` code directly, for example importing and accessing `moto` backend dicts (state storage for services). + +## `@patch` + +[The patch utility](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/utils/patch.py) enables easy [monkey patching](https://en.wikipedia.org/wiki/Monkey_patch) of external functionality. We often use this to modify internal moto functionality. Sometimes it is easier to patch internals than to wrap the entire API method with the custom functionality. + +### Server + +[Server](<https://github.com/localstack/localstack/blob/master/localstack-core/localstack/utils/serving.py>) is an abstract class that provides a basis for serving other backends that run in a separate process. For example, our Kinesis implementation uses [kinesis-mock](https://github.com/etspaceman/kinesis-mock/) as a backend that implements the Kinesis AWS API and also emulates its behavior. + +The provider [starts the kinesis-mock binary in a `Server`](https://github.com/localstack/localstack/blob/2e1e8b4e3e98965a7e99cd58ccdeaa6350a2a414/localstack/services/kinesis/kinesis_mock_server.py), and then forwards all incoming requests to it using `forward_request`. This is a similar construct to `call_moto`, only generalized to arbitrary HTTP AWS backends. + +A server is reachable through some URL (not necessarily HTTP), and the abstract class implements the lifecycle of the process (start, stop, is_started, is_running, etc). To create a new server, you only need to overwrite either `do_run`, or `do_start_thread`, with custom logic to start the binary. + +There are some existing useful utilities and specializations of `Server` which can be found across the codebase. For example, `DockerContainerServer` spins up a Docker container on a specific port, and `ProxiedDockerContainerServer` adds an additional TCP/HTTP proxy server (running inside the LocalStack container) that tunnels requests to the container. + +### External service ports + +Some services create additional user-facing resources. For example, the RDS service starts a PostgreSQL server, and the ElastiCache service starts a Redis server, that users then directly connect to. + +These resources are not hidden behind the service API, and need to be exposed through an available network port. This is what the [external service port range](https://docs.localstack.cloud/references/external-ports/) is for. We expose this port range by default in the docker-compose template, or via the CLI. + +### Service plugin + +A service provider has to be exposed as a service plugin for our code loading framework to pick it up. + +## Gateway + +The Gateway is a simple interface: `process(Request, Response)`. It receives an HTTP request and a response that it should populate. To that end, the Gateway uses a `HandlerChain` to process the request. + +An adapter exposes the gateway as something that can be served by a web server. By default, we use Hypercorn, an ASGI web server, and expose the Gateway as an ASGI app through our WSGI/ASGI bridge. + +The gateway creates a `RequestContext` object for each request, which is passed through the handler chain. + +All components of our HTTP framework build heavily on the Werkzeug HTTP server library [Werkzeug](https://github.com/pallets/werkzeug/), which makes our app WSGI compatible. + +<img src="gateway-overview.png" width="800px" alt="LocalStack Gateway overview" /> + +### Handler Chain + +The handler chain implements a variant of the [chain-of-responsibility pattern](https://en.wikipedia.org/wiki/Chain-of-responsibility_pattern), not unlike [the javax.servlet API](https://docs.oracle.com/javaee/7/api/javax/servlet/package-summary.html). The handler chain knows about three different handlers: Request Handlers, Response Handlers, and Exception Handlers. Request and response handlers have the same interface, they only differ in how they are invoked by the handler chain. + +A handler chain can be _running_, _stopped_ or _terminated_. If a request handler stops the chain using `chain.stop()`, the chain stops invoking the remaining request handlers, and jumps straight to the response handlers. If the chain is _terminated_, then even response handlers are skipped. + +If an exception occurs during the execution of a request handler, no other request handlers are executed, and instead the chain calls the exception handlers, and then all response handlers. Exceptions during response handlers are logged, but they do not interrupt the handler chain flow. + +### LocalStack AWS Gateway + +Here is a figure of the handler chain underlying the `LocalstackAwsGateway`, which every HTTP request to `:4566` goes through. + +Some handlers are designed to be extended dynamically at runtime by other services. For example, a service can add HTTP routes to the edge router, which can then process the request differently. OpenSearch, for example, uses this to register HTTP routes to [cluster endpoints](https://docs.localstack.cloud/user-guide/aws/opensearch/#interact-with-the-cluster), that are proxied through `:4566` to the cluster backend. + +<img src="localstack-handler-chain.png" width="800px" alt="LocalStack Handler chain" /> + +## Plugins + +Plugins provided by [https://github.com/localstack/plux](https://github.com/localstack/plux) are how we load: + +- Service providers +- Hooks +- Extensions + +Key points to understand are that plugins use [Python entry points, which are part of the PyPA specification](https://packaging.python.org/en/latest/specifications/entry-points/). Entry points are discovered from the code during a build step rather than defined manually (this is the main differentiator of Plux to other code loading tools). In LocalStack, the `make entrypoints` make target does that, which is also part of `make install`. + +When you add new hooks or service providers, or any other plugin, make sure to run `make entrypoints`. + +When writing plugins, it is important to understand that any code that sits in the same module as the plugin, will be imported when the plugin is _resolved_. That is, _before_ it is loaded. Resolving a plugin simply means discovering the entry points and loading the code the underlying entry point points to. This is why many times you will see imports deferred to the actual loading of the plugin. + +## Config + +The LocalStack configuration is currently simply a set of well-known environment variables that we parse into python values in `localstack/config.py`. When LocalStack is started via the CLI, we also need to pass those environment variables to the container, which is why we keep [a list of the environment variables we consider to be LocalStack configuration](https://github.com/localstack/localstack/blob/7e3045dcdca255e01c0fbd5dbf0228e500e8f42e/localstack/config.py#L693-L700). + +## Hooks + +Hooks are functions exposed as plugins that are collected and executed at specific points during the LocalStack lifecycle. This can be both in the runtime (executed in the container) and the CLI (executed on the host). + +### **Host/CLI hooks** + +These hooks are relevant only to invocations of the CLI. If you use, for example, a docker-compose file to start LocalStack, these are not used. + +- `@hooks.prepare_host` Hooks to prepare the host that's starting LocalStack. Executed on the host when invoking the CLI. +- `@hooks.onfigure_localstack_container` Hooks to configure the LocalStack container before it starts. Executed on the host when invoking the CLI. This hook receives the `LocalstackContainer` object, which can be used to instrument the `docker run` command that starts LocalStack. + +### **Runtime hooks** + +- `@hooks.on_infra_start` Executed when LocalStack runtime components (previously known as _infrastructure_) are started. +- `@hooks.on_infra_ready` Executed when LocalStack is ready to server HTTP requests. + +## Runtime + +The components necessary to run the LocalStack server application are collectively referred to as the _runtime_. This includes the Gateway, scheduled worker threads, etc. The runtime is distinct from the CLI, which runs on the host. Currently, there is no clear separation between the two, you will notice this, for example, in the configuration, where some config variables are used for both the CLI and the runtime. Similarly, there is code used by both. Separating the two is an ongoing process. + + +## Packages and installers + +Whenever we rely on certain third party software, we install it using our package installation framework, which consists of packages and installers. + +A package defines a specific kind of software we need for certain services, for example [dynamodb-local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html). +It also encapsulates general information like name, available versions, etc., and manages the access to the actual installer that is used. + +The installer manages all installation-related information: the destination, the actual installation routine, etc. +There are various types of installers available as base classes that try to minimize the required effort to install software, depending on what we need to install (executables, jar files, GitHub assets,...). +So before you start reinventing the wheel, please check if there is a suitable class to extend. + +Packages and installers can usually be found in `packages.py` in the `localstack.services.<service>` module of the service that requires the dependency. +Dependencies that are required by multiple services are located in `localstack.packages`. + +Additionally, there is the _LocalStack Package Manager (LPM)_. +`lpm` is a module located in `localstack.cli` that provides a [Click](https://click.palletsprojects.com/)-powered CLI interface to trigger installations. +It uses the [Plugin mechanism](#plugins) to discover packages. +_LPM_ can be used directly as a module, and if called without a specific command it prints an extensive description of its available commands: + +```python +source .venv/bin/activate +python -m localstack.cli.lpm +``` + +### Versions +As dependencies exist in different versions, we need to reflect this in our process. +Every version of a package needs to be explicitly supported by an installer implementation. +The package needs to manage the different installers for the different versions. +Each installer for a specific version should only have one instance (due to lock handling). +Resources that do not use versions (e.g. because there is only a link to the newest one) generally use `latest` as version name. + +### Installation targets +To keep things nice and clean, packages are installed in two locations, `static_libs` and `var_libs`. + +`static_libs` is used for packages installed at build time. +When building the docker container, the packages are installed to a folder which will not be overwritten by a host-mounted volume. +The `static_libs` directory should not be modified at container runtime, as it will be reset when the container is recreated. +This is the default target if a package is installed in the aforementioned way via `python -m localstack.cli.lpm install`. + +`var_libs` is the main and default location used for packages installed at runtime. +When starting the docker container, a host-volume is mounted at `var_libs`. +The content of the directory will persist across multiple containers. + +### Installation life-cycle +The installer base class provides two methods to manage potentially necessary side tasks for the installation: `_prepare_installation` and `_post_process`. +These methods simply `pass` by default and need to be overwritten should they be needed. + +### Package discovery +For LPM to be able to discover a package, we expose it via the package plugin mechanism. +This is usually done by writing a function in `plugins.py` that loads a package instance by using the `@package` decorator. + +### `lpm` commands +The available `lpm` commands are: + +- `python -m localstack.cli.lpm list` +- `python -m localstack.cli.lpm install [OPTIONS] PACKAGE...` + +For help with the specific commands, use `python -m localstack.cli.lpm <command> --help`. + +## Utilities + +The codebase contains a wealth of utility functions for various common tasks like handling strings, JSON/XML, threads/processes, collections, date/time conversions, and much more. + +The utilities are grouped into multiple util modules inside the [localstack.utils](<https://github.com/localstack/localstack/tree/master/localstack-core/localstack/utils>) package. Some of the most commonly used utils modules include: + +- `.files` - file handling utilities (e.g., `load_file`, `save_file`, or `mkdir`) +- `.json` - handle JSON content (e.g., `json_safe`, or `canonical_json`) +- `.net` - network ports (e.g., `wait_for_port_open`, or `is_ip_address`) +- `.run` - run external commands (e.g., `run`, or `ShellCommandThread`) +- `.strings` - string/bytes manipulation (e.g., `to_str`, `to_bytes`, or `short_uid`) +- `.sync` - concurrency synchronization (e.g., `poll_condition`, or `retry`) +- `.threads` - manage threads and processes (e.g., `FuncThread`) diff --git a/docs/localstack-concepts/asf-code-generation.png b/docs/localstack-concepts/asf-code-generation.png new file mode 100644 index 0000000000000..c93e4301fd2af Binary files /dev/null and b/docs/localstack-concepts/asf-code-generation.png differ diff --git a/docs/localstack-concepts/gateway-overview.png b/docs/localstack-concepts/gateway-overview.png new file mode 100644 index 0000000000000..4cb50fa64a24a Binary files /dev/null and b/docs/localstack-concepts/gateway-overview.png differ diff --git a/docs/localstack-concepts/localstack-handler-chain.png b/docs/localstack-concepts/localstack-handler-chain.png new file mode 100644 index 0000000000000..ea79a7dfacffa Binary files /dev/null and b/docs/localstack-concepts/localstack-handler-chain.png differ diff --git a/docs/localstack-concepts/service-implementation.png b/docs/localstack-concepts/service-implementation.png new file mode 100644 index 0000000000000..93f8840255e6d Binary files /dev/null and b/docs/localstack-concepts/service-implementation.png differ diff --git a/doc/localstack-readme-banner.svg b/docs/localstack-readme-banner.svg similarity index 100% rename from doc/localstack-readme-banner.svg rename to docs/localstack-readme-banner.svg diff --git a/docs/testing/README.md b/docs/testing/README.md new file mode 100644 index 0000000000000..ca9a027bf9088 --- /dev/null +++ b/docs/testing/README.md @@ -0,0 +1,427 @@ +# Testing in LocalStack + +- [Test Types](test-types/README.md) +- [Integration Tests](integration-tests/README.md) +- [Parity Testing](parity-testing/README.md) +- [Multi-account and Multi-region Testing](multi-account-region-testing/README.md) +- [Terraform Tests](terraform-tests/README.md) + +## Rules for stable tests + +Through experience, we encountered some guiding principles and rules when it comes to testing LocalStack. +These aim to ensure a stable pipeline, keeping flakes minimal and reducing maintenance effort. +Any newly added test and feature should keep these in mind! + +| **ID** | **Rule** | +|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [R01](#r01) | Inform code owners and/or test authors about flaky tests by creating a PR skipping them (reason: flaky), so that they can be fixed ASAP. | +| [R02](#r02) | Do not assume external dependencies are indefinitely available on the same location. They can move and we need to adapt in the future for it. | +| [R03](#r03) | Where possible, tests should be in control of the resources they use and re-create them if removed. | +| [R04](#r04) | If on-demand creation is not possible, opt for a fail-fast approach and make retrieval failures clearly visible for further investigation. | +| [R05](#r05) | Add mechanisms to avoid access failures caused by rate limiting. | +| [R06](#r06) | Do not wait a set amount of time but instead opt for a reactive approach using notification systems or polling for asynchronous (long-lasting) operations | +| [R07](#r07) | For tests with multiple steps, handle waits separately and start each wait in the correct state. | +| [R08](#r08) | Ensure features interacting with account numbers work with arbitrary account numbers and multiple accounts simultaneously. | +| [R09](#r09) | Make sure that your tests are idempotent and could theoretically run in parallel, by using randomized IDs and not re-using IDs across tests. | +| [R10](#r10) | Ensure deterministic responses for anything that reaches an assertion or a snapshot match. | +| [R11](#r11) | Be vigilant about changes happening to dependencies that can affect stability of your added features and tests. | +| [R12](#r12) | Ensure all dependencies are available and functional on both AMD64 and ARM64 architectures. If a dependency is exclusive to one architecture, mark the corresponding test accordingly. | +| [R13](#r13) | After the test run, make sure that the created resources are cleaned up properly. | +| [R14](#r14) | Utilize fixture scopes for ensuring created resources exist as long as they should. | + + +### R01 + +Inform code owners and/or test authors about flaky tests by creating a PR skipping them (reason: flaky), so that they can be fixed ASAP. +This way, the flaky tests are not blocking the pipeline and can be fixed in a separate PR. +We also set the test author and/or service owner as reviewer to ensure that the test is fixed in a timely manner. + +#### Anti-pattern + +1. Noticing a flake +2. Ignoring it + +#### Best practice + +1. Noticing a flake +2. Creating a new PR skipping the test and marking it as flaky + +```python + +@pytest.mark.skip(reason="flaky") +def test_xyz(): + pass +``` + +3. Setting test author and/or service owner as reviewer + +### R02 + +Do not assume external dependencies (AWS resources, files, packages, images, licenses) are indefinitely available on the same location. +They can move, and we need to adapt in the future for it. +This can be done by checking the status code of the response and reacting accordingly. +Ideally, the test should be able to guide anyone to how to find the new location. + +#### Anti-pattern + +```python +response = requests.get("http://resource.com/my-resource.tar.gz") +use_resource(response.content) +``` + +#### Best practice + +```python +response = requests.get("http://resource.com/my-resource.tar.gz") +if response.status_code == 404: + further_steps() # e.g. clear error message, potential documentation on where to find a new location, etc. +use_resource(response.content) +``` + +### R03 + +Where possible, tests should be in control of the resources they use and re-create them if removed (e.g., S3 buckets, roles). + +#### Anti-pattern + +```python +bucket = s3_client.get_bucket("test-bucket") +use_bucket(bucket) +``` + +#### Best practice + +```python +buckets = s3_client.list_buckets() +if "test-bucket" not in buckets: + s3_client.create_bucket("on-demand-bucket") + +bucket = s3_client.get_bucket("on-demand-bucket") +use_bucket(bucket) +``` + +### R04 + +If on-demand creation is not possible, opt for a fail-fast approach and make retrieval failures clearly visible for further investigation. +We should not proceed with the test if the resource is not available. +This could lead to long-lasting loops with long log files and unclear error messages. + +#### Anti-pattern + +```python +bucket = s3_client.get_bucket("test-bucket") +use_bucket(bucket) +``` + +#### Best practice + +```python +buckets = s3_client.list_buckets() +if "test-bucket" not in buckets: + pytest.fail("Expected test-bucket to exist - it doesn't") +``` + +### R05 + +Add mechanisms to avoid access failures caused by rate limiting. +This can be done by adding exponential backoff or caching mechanisms. +In some cases, rate limits can be avoided by using an authenticated request. + +#### Anti-pattern + +```python +while True: + response = requests.get("http://resource.com") + if response.status_code == 429: # Too many requests + pass # immediately try again + else: + use(response) +``` + +#### Best practice + +```python +cache = TTLCache(ttl=60) + + +@cached(cache) +def get_resource(url, token, retries=10): + retry = 0 + while retry < retries: + response = authenticated_request(url, token) + if response.status_code == 429: + time.sleep(2 ** retry) # Exponential backoff + else: + return response + + +resource = get_resource("http://resource.com", "abdfabdf") +use(resource) +``` + +### R06 + +Do not wait a set amount of time but instead opt for a reactive approach using notification systems or polling for asynchronous (long-lasting) operations. +Waiting a set amount of time can lead to long test runs and flaky tests, as the time needed for the operation can vary. + +#### Anti-pattern + +```python +create_resource() +time.sleep(300) +use_resource() +``` + +#### Best practice + +```python +create_resource() +poll_condition(resource_exists, timeout=60) +use_resource() +``` + +### R07 + +For tests with multiple steps, handle waits separately and start each wait in the correct state. +This way, the test can be more reactive and not wait for a set amount of time. + +#### Anti-pattern + +```python +create_resource() +deploy_resource() +use_resource() +``` + +or + +```python +create_resource() +deploy_resource() +poll_condition(resource_deployed, timeout=60) +use_resource() +``` + +#### Best practice + +```python +create_resource() +poll_condition(resource_exists, timeout=20) +deploy_resource() +poll_condition(resource_deployed, timeout=60) +use_resource() +``` + +### R08 + +Ensure features interacting with account numbers work with arbitrary account numbers and multiple accounts simultaneously. +See [here](multi-account-region-testing/README.md) for further documentation for multi account/region testing. + +#### Anti-pattern + +1. Add new feature +2. Use it with fixed account number +3. Works -> done + +#### Best practice + +1. Add new feature +2. Use it with fixed account number +3. Works +4. Try with randomized account numbers (as in [documentation](multi-account-region-testing/README.md) +5. Works -> done + +### R09 + +Make sure that your tests are idempotent and could theoretically run in parallel, by using randomized IDs and not re-using IDs across tests. +This also means that tests should not depend on each other and should be able to run in any order. + +#### Anti-pattern + +```python +def test_something(): + key = "test-bucket" + create_bucket(key) + +def test_something_else(): + key = "test-bucket" + create_bucket(key) +``` + +#### Best practice + +```python +def test_something(): + key = f"test-bucket-{short_uid()}" + create_bucket(key) + +def test_something_else(): + key = f"test-bucket-{short_uid()}" + create_bucket(key) +``` + +### R10 + +Ensure deterministic responses for anything that reaches an assertion or a snapshot match. +This is especially important when you have randomized IDs in your tests as per [R09](#r09). +You can achieve this by using proper transformations. +See [here](parity-testing/README.md) for further documentation on parity testing and how to use transformers. + +#### Anti-pattern + +```python +snapshot = {"key": "key-asdfasdf"} # representing snapshot as a dict for presentation purposes + + +def test_something(snapshot): + key = f"key-{short_uid()}" + snapshot.match(snapshot, {"key": key}) +``` + +#### Best practice + +```python +snapshot = {"key": "<key:1>"} # representing snapshot as a dict for presentation purposes + + +def test_something(snapshot): + snapshot.add_transformer(...) # add appropriate transformers + key = f"key-{short_uid()}" + snapshot.match(snapshot, {"key": key}) +``` + +### R11 + +Be vigilant about changes happening to dependencies (Python dependencies and other) that can affect stability of your added features and tests. + +#### Anti-pattern + +1. Add dependency +2. Forget about it +3. Dependency adds instability +4. Disregard + +#### Best practice + +1. Add dependency +2. Check weekly python upgrade PR for upgrades to the dependency +3. Keep track of relevant changes from the changelog + +### R12 + +Ensure all dependencies are available and functional on both AMD64 and ARM64 architectures. +If a dependency is exclusive to one architecture, mark the corresponding test accordingly. +However, if possible, try to use multi-platform resources. + +#### Anti-pattern + +```python +def test_docker(): + docker.run(image="amd64-only-image") +``` + +#### Best practice + +```python +def test_docker(): + docker.run(image="multi-platform-image") +``` + +if above not possible, then: + +```python +@markers.only_on_amd64 +def test_docker(): + docker.run(image="amd64-only-image") +``` + +### R13 + +After the test run, make sure that the created resources are cleaned up properly. +This can easily be achieved by using fixtures with a yield statement. +This way, the resources are cleaned up after the test run, even if the test fails. +Furthermore, you could use factory fixtures to create resources on demand and then clean them up together. + +#### Anti-pattern + +```python +def test_something(): + key = f"test-{short_uid()}" + s3_client.create_bucket(key) + use_bucket(key) + # bucket still exists after test run +``` + +#### Best practice + +```python +@pytest.fixture +def bucket(): + key = f"test-{short_uid()}" + s3_client.create_bucket(key) + yield key + s3_client.delete_bucket(key) + +def test_something(bucket): + use_bucket(bucket) + # bucket is deleted after test run +``` + +### R14 + +Utilize fixture scopes for ensuring created resources exist as long as they should. +For example, if a resource should exist for the duration of the test run, use the `session` scope. +If a resource should exist for the duration of the test, use the `function` scope. + +#### Anti-pattern + +```python +@pytest.fixture(scope="function") # function scope is default +def database_server(): + server = start_database_server() + yield server + stop_database_server() + +@pytest.fixture(scope="function") # function scope is default +def database_connection(database_server): + conn = connect_to_database(database_server) + yield conn + conn.close() + +def test_insert_data(database_connection): + insert_data(database_connection) + # The database server is started and stopped for each test function, + # leading to increased overhead and potential performance issues. + +def test_query_data(database_connection): + query_data(database_connection) + # Similar issue here, the server is started and stopped for each test. +``` + +#### Best practice + +```python +@pytest.fixture(scope="session") +def database_server(): + server = start_database_server() + yield server + stop_database_server() + +@pytest.fixture(scope="function") # function scope is default +def database_connection(database_server): + conn = connect_to_database(database_server) + yield conn + conn.close() + +def test_insert_data(database_connection): + insert_data(database_connection) + +def test_query_data(database_connection): + query_data(database_connection) +``` + +## Test markers + +For tests, we offer additional markers which can be found +in: [localstack/testing/pytest/marking.py](../../localstack-core/localstack/testing/pytest/marking.py) + diff --git a/docs/testing/integration-tests/README.md b/docs/testing/integration-tests/README.md new file mode 100644 index 0000000000000..99e2f40795d58 --- /dev/null +++ b/docs/testing/integration-tests/README.md @@ -0,0 +1,182 @@ +# Integration tests + +LocalStack has an extensive set of [integration tests](https://github.com/localstack/localstack/tree/master/tests/integration). This document describes how to run and write integration tests. + +## Writing integration tests + +The following guiding principles apply to writing integration tests in addition to the [general rules](../README.md): + +- Tests should pass when running against AWS: + - Don't make assumptions about the time it takes to create resources. If you do asserts after creating resources, use `poll_condition`, `retry` or one of the waiters included in the boto3 library to wait for the resource to be created. + - Make sure your tests always clean up AWS resources, even if your test fails! Prefer existing factory fixtures (like `sqs_create_queue`). Introduce try/finally blocks if necessary. +- Tests should be runnable concurrently: + - Protect your tests against side effects. Example: never assert on global state that could be modified by a concurrently running test (like `assert len(sqs.list_queues()) == 1`; may not hold!). + - Make sure your tests are side-effect free. Avoid creating top-level resources with constant names. Prefer using generated unique names (like `short_uid`). +- Tests should not be clever. It should be plain to see what they are doing by looking at the test. This means avoiding creating functions, loops, or abstractions, even for repeated behavior (like groups of asserts) and instead preferring a bit of code duplication: +- Group tests logically using classes. +- Avoid injecting more than 2-3 fixtures in a test (unless you are testing complex integrations where your tests requires several different clients). +- Create factory fixtures only for top-level resources (like Queues, Topics, Lambdas, Tables). +- Avoid sleeps! Use `poll_condition`, `retry`, or `threading.Event` internally to control concurrent flows. + +We use [pytest](https://docs.pytest.org) for our testing framework. +Older tests were written using the unittest framework, but its use is discouraged. + +If your test matches the pattern `tests/integration/**/test_*.py` or `tests/aws/**/test_*.py` it will be picked up by the integration test suite. +Any test targeting one or more AWS services should go into `tests/aws/**` in the corresponding service package. +Every test in `tests/aws/**/test_*.py` must be marked by exactly one pytest marker, e.g. `@markers.aws.validated`. + +### Functional-style tests + +You can write functional style tests by defining a function with the prefix `test_` with basic asserts: + +```python +def test_something(): + assert True is not False +``` + +### Class-style tests + +Or you can write class-style tests by grouping tests that logically belong together in a class: + +```python +class TestMyThing: + def test_something(self): + assert True is not False +``` + +### Fixtures + +We use the pytest fixture concept, and provide several fixtures you can use when writing AWS tests. For example, to inject a boto client factory for all services, you can specify the `aws_client` fixture in your test method and access a client from it: + +```python +class TestMyThing: + def test_something(self, aws_client): + assert len(aws_client.sqs.list_queues()["QueueUrls"]) == 0 +``` + +We also provide fixtures for certain disposable resources, like buckets: + +```bash +def test_something_on_a_bucket(s3_bucket): + s3_bucket + # s3_bucket is a boto s3 bucket object that is created before + # the test runs, and removed after it returns. +``` + +Another pattern we use is the [factory as fixture](https://docs.pytest.org/en/6.2.x/fixture.html#factories-as-fixtures) pattern. + +```bash +def test_something_on_multiple_buckets(s3_create_bucket): + bucket1 = s3_create_bucket() + bucket2 = s3_create_bucket() + # both buckets will be deleted after the test returns +``` + +You can find the list of available fixtures in the [fixtures.py](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/testing/pytest/fixtures.py) file. + + +## Running the test suite + +To run the tests you can use the make target and set the `TEST_PATH` variable. + +```bash +TEST_PATH="tests/integration" make test +``` + +or run it manually within the virtual environment: + +```bash +python -m pytest --log-cli-level=INFO tests/integration +``` + +### Running individual tests + +You can further specify the file and test class you want to run in the test path: + +```bash +TEST_PATH="tests/integration/docker_utils/test_docker.py::TestDockerClient" make test +``` + +### Test against a running LocalStack instance + +When you run the integration tests, LocalStack is automatically started (via the pytest conftest mechanism in [tests/integration/conftest.py](https://github.com/localstack/localstack/blob/master/tests/integration/conftest.py)). +You can disable this behavior by setting the environment variable `TEST_SKIP_LOCALSTACK_START=1`. + +### Test against Amazon Web Services + +Ideally every integration is tested against real AWS. To run the integration tests, we prefer you to use an AWS sandbox account, so that you don't accidentally run tests against your production account. + +#### Creating an AWS sandbox account + +1. Login with your credentials into your AWS Sandbox Account with `AWSAdministratorAccess`. +2. Type in **IAM** in the top bar and navigate to the **IAM** service +3. Navigate to `Users` and create a new user (**Add Users**) + 1. Add the username as `localstack-testing`. + 2. Keep the **Provide user access to the AWS Management Console - optional** box unchecked. +4. Attach existing policies directly. +5. Check **AdministratorAccess** and click **Next** before **Next/Create User** until done. +6. Go to the newly created user under `IAM/Users`, go to the `Security Credentials` tab, and click on **Create access key** within the `Access Keys` section. +7. Pick the **Local code** option and check the **I understand the above recommendation and want to proceed to create an access key** box. +8. Click on **Create access key** and copy the Access Key ID and the Secret access key immediately. +9. Run `aws configure --profile ls-sandbox` and enter the Access Key ID, and the Secret access key when prompted. +10. Verify that the profile is set up correctly by running: `aws sts get-caller-identity --profile ls-sandbox`. + +Here is how `~/.aws/credentials` should look like: + +```bash +[ls-sandbox] +aws_access_key_id = <your-key-id> +aws_secret_access_key = <your-secret-key> +``` + +The `~/.aws/config` file should look like: + +```bash +[ls-sandbox] +region=eu-central-1 +# .... you can add additional configuration options for AWS clients here +``` + +#### Running integration tests against AWS + +- Set the environment variable: `TEST_TARGET=AWS_CLOUD`. +- Use the client `fixtures` and other fixtures for resource creation instead of methods from `aws_stack.py` + - While using the environment variable `TEST_TARGET=AWS_CLOUD`, the boto client will be automatically configured to target AWS instead of LocalStack. +- Configure your AWS profile/credentials: + - When running the test, set the environment variable `AWS_PROFILE` to the profile name you chose in the previous step. Example: `AWS_PROFILE=ls-sandbox` +- Ensure that all resources are cleaned up even when the test fails and even when other fixture cleanup operations fail! +- Testing against AWS might require additional roles and policies. + +Here is how a useful environment configuration for testing against AWS could look like: + +```bash +DEBUG=1; # enables debug logging +TEST_DISABLE_RETRIES_AND_TIMEOUTS=1; +TEST_TARGET=AWS_CLOUD; +AWS_DEFAULT_REGION=us-east-1; +AWS_PROFILE=ls-sandbox +``` + +Once you're confident your test is reliably working against AWS you can add the pytest marker `@markers.aws.validated`. + +#### Create a snapshot test + +Once you verified that your test is running against AWS, you can record snapshots for the test run. A snapshot records the response from AWS and can be later on used to compare the response of LocalStack. + +Snapshot tests helps to increase the parity with AWS and to raise the confidence in the service implementations. Therefore, snapshot tests are preferred over normal integrations tests. + +Please check our subsequent guide on [Parity Testing](../parity-testing/README.md) for a detailed explanation on how to write AWS validated snapshot tests. + +#### Force the start of a local instance + +When running test with `TEST_TARGET=AWS_CLOUD`, by default, no localstack instance will be created. This can be bypassed by also setting `TEST_FORCE_LOCALSTACK_START=1`. + +Note that the `aws_client` fixture will keep pointing at the aws instance and you will need to create your own client factory using the `aws_client_factory`. + +```python +local_client = aws_client_factory( + endpoint_url=f"http://{localstack_host()}", + aws_access_key_id="test", + aws_secret_access_key="test", +) +``` diff --git a/docs/testing/multi-account-region-testing/README.md b/docs/testing/multi-account-region-testing/README.md new file mode 100644 index 0000000000000..323643cbc8a97 --- /dev/null +++ b/docs/testing/multi-account-region-testing/README.md @@ -0,0 +1,55 @@ +# Multi-account and Multi-region Testing + +LocalStack has multi-account and multi-region support. This document contains some tips to make sure that your contributions are compatible with this functionality. + +## Overview + +For cross-account inter-service access, specify a role with which permissions the source service makes a request to the target service to access another service's resource. +This role should be in the source account. +When writing an AWS validated test case, you need to properly configure IAM roles. + +For example: +The test case [`test_apigateway_with_step_function_integration`](https://github.com/localstack/localstack/blob/628b96b44a4fc63d880a4c1238a4f15f5803a3f2/tests/aws/services/apigateway/test_apigateway_basic.py#L999) specifies a [role](https://github.com/localstack/localstack/blob/628b96b44a4fc63d880a4c1238a4f15f5803a3f2/tests/aws/services/apigateway/test_apigateway_basic.py#L1029-L1034) which has permissions to access the target step function account. +```python +role_arn = create_iam_role_with_policy( + RoleName=f"sfn_role-{short_uid()}", + PolicyName=f"sfn-role-policy-{short_uid()}", + RoleDefinition=STEPFUNCTIONS_ASSUME_ROLE_POLICY, + PolicyDefinition=APIGATEWAY_LAMBDA_POLICY, +) +``` + +For cross-account inter-service access, you can create the client using `connect_to.with_assumed_role(...)`. +For example: +```python +connect_to.with_assumed_role( + role_arn="role-arn", + service_principal=ServicePrincial.service_name, + region_name=region_name, +).lambda_ +``` + +When there is no role specified, you should use the source arn conceptually if cross-account is allowed. +This can be seen in a case where `account_id` was [added](https://github.com/localstack/localstack/blob/ae31f63bb6d8254edc0c85a66e3c36cd0c7dc7b0/localstack/utils/aws/message_forwarding.py#L42) to [send events to the target](https://github.com/localstack/localstack/blob/ae31f63bb6d8254edc0c85a66e3c36cd0c7dc7b0/localstack/utils/aws/message_forwarding.py#L31) service like SQS, SNS, Lambda, etc. + +Always refer to the official AWS documentation and investigate how the the services communicate with each other. +For example, here are the [AWS Firehose docs](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#cross-account-delivery-s3) explaining Firehose and S3 integration. + + +## Test changes in CI with random credentials + +We regularly run the test suite on GitHub Actions to verify compatibility with multi-account and multi-region features. + +A [scheduled GitHub Actions workflow](https://github.com/localstack/localstack/actions/workflows/aws-tests-mamr.yml) runs on working days at 01:00 UTC, executing the tests with randomized account IDs and regions. +If you have the necessary permissions, you can also manually trigger the [workflow](https://github.com/localstack/localstack/actions/workflows/aws-tests-mamr.yml) directly from GitHub. + +## Test changes locally with random credentials + +To test changes locally for multi-account and multi-region compatibility, set the environment config values as follows: + +- `TEST_AWS_ACCOUNT_ID` (Any value except `000000000000`) +- `TEST_AWS_ACCESS_KEY_ID` (Any value except `000000000000`) +- `TEST_AWS_REGION` (Any value except `us-east-1`) + +Note that within all tests you must use `account_id`, `secondary_account_id`, `region_name`, `secondary_region_name` fixtures. +Importing and using `localstack.constants.TEST_` values is not advised. diff --git a/docs/testing/parity-testing/README.md b/docs/testing/parity-testing/README.md new file mode 100644 index 0000000000000..9127dc5794b45 --- /dev/null +++ b/docs/testing/parity-testing/README.md @@ -0,0 +1,259 @@ +from conftest import aws_client + +# Parity Testing + +Parity tests (also called snapshot tests) are a special form of integration tests that should verify and improve the correctness of LocalStack compared to AWS. + +Initially, the integration test is executed against AWS and collects responses of interest. Those responses are called "snapshots" and will be used later on to compare the results from AWS with the ones from LocalStack. +Those responses aka "snapshots" are stored in a **snapshot.json** file. + +Once the snapshot is recorded, the test can be executed against LocalStack. During this “normal” test execution, the test runs against LocalStack and compares the LocalStack responses with the recorded content. + +In theory, every integration test can be converted to a parity conform snapshot test. + +This guide assumes you are already familiar with writing [integration tests](../integration-tests/README.md) for LocalStack in general. + +## How to write Parity tests + +In a nutshell, the necessary steps include: + +1. Make sure that the test works against AWS. + * Check out our [Integration Test Guide](../integration-tests/README.md#running-integration-tests-against-aws) for tips on how run integration tests against AWS. +2. Add the `snapshot` fixture to your test and identify which responses you want to collect and compare against LocalStack. + * Use `snapshot.match(”identifier”, result)` to mark the result of interest. It will be recorded and stored in a file with the name `<testfile-name>.snapshot.json` + * The **identifier** can be freely selected, but ideally it gives a hint on what is recorded - so typically the name of the function. The **result** is expected to be a `dict`. + * Run the test against AWS: use the parameter `--snapshot-update` (or the environment variable `SNAPSHOT_UPDATE=1`) and set the environment variable as `TEST_TARGET=AWS_CLOUD`. + * Check the recorded result in `<testfile-name>.snapshot.json` and consider [using transformers](#using-transformers) to make the result comparable. +3. Run the test against LocalStack. + * Hint: Ensure that the `AWS_CLOUD` is not set as a test target and that the parameter `--snapshot-update` is removed. + * If you used the environment variable make sure to delete it or reset the value, e.g. `SNAPSHOT_UPDATE=0` + +Here is an example of a parity test: + +```python +def test_invocation(self, aws_client, snapshot): + # add transformers to make the results comparable + snapshot.add_transformer(snapshot.transform.lambda_api()) + + result = aws_client.lambda_.invoke( + .... + ) + # records the 'result' using the identifier 'invoke' + snapshot.match("invoke", result) +``` + + +## The Snapshot + +When an integration test is executed against AWS with the `snapshot-update` flag, the response will automatically be updated in the snapshot-file. + +**The file is automatically created if it doesn't exist yet.** The naming pattern is `<filename>.snapshot.json` where `<filename>` is the name of the file where the test is located. +One file can contain several snapshot recordings, e.g. the result from several tests. + +The snapshot file is a json-file, and each json-object on the root-level represents one test. +E.g., imagine the test file name is `test_lambda_api.py` (example is outlined in ['Reference Replacement'](#reference-replacement)), with the class `TestLambda`. + +When running the test `test_basic_invoke` it will create a json-object `test_lambda_api.py::TestLambda::test_basic_invoke`. + +Each recorded snapshot contains: + * `recorded-date` the timestamp when this test was last updated + * `recorded-content` contains all `identifiers` as keys, with the `response` as values, from the tests `snapshot.match(identifier, response)` definitions + +Note that all json-strings of a response will automatically be parsed to json. This makes the comparison, transformation, and exclusion of certain keys easier (string vs json-object). + +**Snapshot files should never be modified manually.** If one or more snapshots need to be updated, simply execute the test against AWS, and [use transformers](#using-transformers) to make the recorded responses comparable. + +## Using Transformers + +In order to make results comparable, some parts response might need to be adapted before storing the record as a snapshot. +For example, AWS responses could contain special IDs, usernames, timestamps, etc. + +Transformers should bring AWS response in a comparable form by replacing any request-specific parameters. Replacements require thoughtful handling so that important information is not lost in translation. + +The `snapshot` fixture uses some basic transformations by default, including: + +- Trimming MetaData (we only keep the `HTTPStatusCode` and `content-type` if set). +- Replacing all UUIDs (that match a regex) with [reference-replacement](#reference-replacement). +- Replacing everything that matches the ISO8601 pattern with “date”. +- Replacing any value with datatype `datetime` with “datetime”. +- Replace all values where the key contains “timestamp” with “timestamp”. +- Regex replacement of the `account-id`. +- Regex replacement of the location. + +## API Transformer + +APIs for one service often require similar transformations. Therefore, we introduced some utilities that collect common transformations grouped by service. + +Ideally, the service-transformation already includes every transformation that is required. +The [TransformerUtility](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/testing/snapshots/transformer_utility.py) already provides some collections of transformers for specific service APIs. + +For example, to add common transformers for lambda, you can use: `snapshot.add_transformer(snapshot.transform.lambda_api()`. + +## Transformer Types + +The Parity testing framework currently includes some basic transformer types: + +- `KeyValueBasedTransformer` replaces a value directly, or by reference; based on key-value evaluation. +- `JsonPathTransformer` replaces the JSON path value directly, or by reference. [jsonpath-ng](https://pypi.org/project/jsonpath-ng/) is used for the JSON path evaluation. +- `RegexTransformer` replaces the regex pattern globally. Please be aware that this will be applied on the json-string. The JSON will be transformed into a string, and the replacement happens globally - use it with care. + +Hint: There are also some simplified transformers in [TransformerUtility](https://github.com/localstack/localstack/blob/master/localstack-core/localstack/testing/snapshots/transformer_utility.py). + +### Examples + +A transformer, that replaces the key `logGroupName` only if the value matches the value `log_group_name`: + +```python +snapshot.add_transformer( + KeyValueBasedTransformer( + lambda k, v: v if k == "logGroupName" and v == log_group_name else None, + replacement="log-group", + ) + ) +``` + +If you only want to check for the key name, a simplified transformer could look like this: + +```python +snapshot.add_transformer(snapshot.transform.key_value("logGroupName")) +``` + +## Reference Replacement + +Parameters can be replaced by reference. In contrast to the “direct” replacement, the value to be replaced will be **registered, and replaced later on as regex pattern**. It has the advantage of keeping information, when the same reference is used in several recordings in one test. + +Consider the following example: + +```python +def test_basic_invoke( + self, aws_client, create_lambda, snapshot + ): + + # custom transformers + snapshot.add_transformer(snapshot.transform.lambda_api()) + + # predefined names for functions + fn_name = f"ls-fn-{short_uid()}" + fn_name_2 = f"ls-fn-{short_uid()}" + + # create function 1 + response = create_lambda(FunctionName=fn_name, ... ) + snapshot.match("lambda_create_fn", response) + + # create function 2 + response = create_lambda(FunctionName=fn_name_2, ... ) + snapshot.match("lambda_create_fn_2", response) + + # get function 1 + get_fn_result = aws_client.lambda_.get_function(FunctionName=fn_name) + snapshot.match("lambda_get_fn", get_fn_result) + + # get function 2 + get_fn_result_2 = aws_client.lambda_.get_function(FunctionName=fn_name_2) + snapshot.match("lambda_get_fn_2", get_fn_result_2) +``` + +The information that the function-name of the first recording (`lambda_create_fn`) is the same as in the record for `lambda_get_fn` is important. + +Using reference replacement, this information is preserved in the `snapshot.json`. The reference replacement automatically adds an ascending number, to ensure that different values can be differentiated. + +```json +{ + "test_lambda_api.py::TestLambda::test_basic_invoke": { + "recorded-date": ..., + "recorded-content": { + "lambda_create_fn": { + ... + "FunctionName": "<function-name:1>", + "FunctionArn": "arn:aws:lambda:<region>:111111111111:function:<function-name:1>", + "Runtime": "python3.9", + "Role": "arn:aws:iam::111111111111:role/<resource:1>", + ... + }, + "lambda_create_fn_2": { + ... + "FunctionName": "<function-name:2>", + "FunctionArn": "arn:aws:lambda:<region>:111111111111:function:<function-name:2>", + "Runtime": "python3.9", + "Role": "arn:aws:iam::111111111111:role/<resource:1>", + ... + }, + "lambda_get_fn": { + ... + "Configuration": { + "FunctionName": "<function-name:1>", + "FunctionArn": "arn:aws:lambda:<region>:111111111111:function:<function-name:1>", + "Runtime": "python3.9", + "Role": "arn:aws:iam::111111111111:role/<resource:1>", + ... + }, + "lambda_get_fn_2": { + ... + "Configuration": { + "FunctionName": "<function-name:2>", + "FunctionArn": "arn:aws:lambda:<region>:111111111111:function:<function-name:2>", + "Role": "arn:aws:iam::111111111111:role/<resource:1>", + .... + }, + }, + + } + } +} +``` + +## Tips and Tricks for Transformers + +Getting the transformations right can be a tricky task and we appreciate the time you spend on writing parity snapshot tests for LocalStack! We are aware that it might be challenging to implement transformers that work for AWS and LocalStack responses. + +In general, we are interested in transformers that work for AWS. Therefore, we recommend also running the tests and testing the transformers against AWS itself. + +Meaning, after you have executed the test with the `snapshot-update` flag and recorded the snapshot, you can run the test without the update flag against the `AWS_CLOUD` test target. If the test passes, we can be quite certain that the transformers work in general. Any deviations with LocalStack might be due to missing parity. + +You do not have to fix any deviations right away, even though we would appreciate this very much! It is also possible to exclude the snapshot verification of single test cases, or specific json-pathes of the snapshot. + +### Skipping verification of snapshot test + +Snapshot verification is enabled by default. If for some reason you want to skip any snapshot verification, you can set the parameter `--snapshot-skip-all`. + +If you want to skip verification for or a single test case, you can set the pytest marker `skip_snapshot_verify`. If you set the marker without a parameter, the verification will be skipped entirely for this test case. + +Additionally, you can exclude certain paths from the verification only. +Simply include a list of json-paths. Those paths will then be excluded from the comparison: + +```python +@pytest.mark.skip_snapshot_verify( + paths=["$..LogResult", "$..Payload.context.memory_limit_in_mb"] + ) + def test_something_that_does_not_work_completly_yet(self, aws_client, snapshot): + snapshot.add_transformer(snapshot.transform.lambda_api()) + result = aws_client.lambda_.... + snapshot.match("invoke-result", result) +``` + +> [!NOTE] +> Generally, [transformers](#using-transformers) should be used wherever possible to make responses comparable. +> If specific paths are skipped from the verification, it means LocalStack does not have parity yet. + +### Debugging the Transformers + +Sometimes different transformers might interfere, especially regex transformers and reference transformations can be tricky We added debug logs so that each replacement step should be visible in the output to help locate any unexpected behavior. You can enable the debug logs by setting the env `DEBUG_SNAPSHOT=1`. + +```bash +localstack.testing.snapshots.transformer: Registering regex pattern '000000000000' in snapshot with '111111111111' +localstack.testing.snapshots.transformer: Registering regex pattern 'us-east-1' in snapshot with '<region>'localstack.testing.snapshots.transformer: Replacing JsonPath '$.json_encoded_delivery..Body.Signature' in snapshot with '<signature>' +localstack.testing.snapshots.transformer: Registering reference replacement for value: '1ad533b5-ac54-4354-a273-3ea885f0d59d' -> '<uuid:1>' +localstack.testing.snapshots.transformer: Replacing JsonPath '$.json_encoded_delivery..MD5OfBody' in snapshot with '<md5-hash>' +localstack.testing.snapshots.transformer: Replacing regex '000000000000' with '111111111111' +localstack.testing.snapshots.transformer: Replacing regex 'us-east-1' with '<region>' +localstack.testing.snapshots.transformer: Replacing '1ad533b5-ac54-4354-a273-3ea885f0d59d' in snapshot with '<uuid:1>' +``` + +### Test duration recording + +When a test runs successfully against AWS, its last validation date and duration are recorded in a corresponding ***.validation.json** file. +The validation date is recorded precisely, while test durations can vary between runs. +For example, test setup time may differ depending on whether a test runs in isolation or as part of a class test suite with class-level fixtures. +The recorded durations should be treated as approximate indicators of test execution time rather than precise measurements. +The goal of duration recording is to give _an idea_ about execution times. +If no duration is present in the validation file, it means the test has not been re-validated against AWS since duration recording was implemented. diff --git a/docs/testing/terraform-tests/README.md b/docs/testing/terraform-tests/README.md new file mode 100644 index 0000000000000..1a79f76ac51c0 --- /dev/null +++ b/docs/testing/terraform-tests/README.md @@ -0,0 +1,3 @@ +# Terraform test suite + +We regularly run the test suite of the Terraform AWS provider against LocalStack to test the compatibility of LocalStack to Terraform. To achieve that, we have a dedicated [GitHub action](https://github.com/localstack/localstack-terraform-test/blob/main/.github/workflows/main.yml) on [LocalStack](https://github.com/localstack/localstack), which executes the allow listed set of tests of [hashicorp/terraform-provider-aws](https://github.com/hashicorp/terraform-provider-aws/). diff --git a/docs/testing/test-types/README.md b/docs/testing/test-types/README.md new file mode 100644 index 0000000000000..2cf9a8ca9a168 --- /dev/null +++ b/docs/testing/test-types/README.md @@ -0,0 +1,65 @@ +# Test Types + +In the LocalStack codebase we differentiate between the following test types: + +- Unit tests +- Acceptance Tests +- Integration Tests + +Depending on the workflow and its trigger not all of those tests are executed at once. +For ordinary pushes to `master` we only want to execute the Unit and Acceptance tests. +On a regular schedule, however, we want to execute all tests to have as big of a coverage of our logic as possible. +This differentiation also educates what we expect from the different types of tests. + +## Unit tests + +As the name suggests, these tests are performed on smaller units of logic to check if they're sound and perform the operations they claim to. +This small unit can most often be a kind of helper function inside of a larger procedure. +These tests should be able to complete their execution very quickly, so they never contain any interaction with some kind of infrastructure. +If you need some kind of waiting mechanism in your unit test, it is most likely that you are not writing a unit test. + +A good example for a unit test is `tests.unit.testing.testselection.test_matching.test_service_dependency_resolving_with_dependencies`. +It tests whether an algorithm implemented inside of a bigger implementation performs as it is expected of it. + +## Acceptance tests + +We use acceptance tests to gain a quick understanding of whether the recently pushed commit to `master` fulfils minimally viable quality criteria. +This means that these tests do not aim at maximum coverage but instead should test that the most important functionality works. +This in general is the entire serving infrastructure and the main features of the most used services. + +As these tests are executed very often we need them to be as stable, fast and relevant as possible. +We ensure this by the following criteria: + +- It shows some kind of real-world usage. This is usually a scenario or architectural pattern with multiple services. + - When composing these scenarios, the services should not overlap too much with already existing acceptance tests. We want to avoid redundancy where possible. At the same time we want to have our primary services and typical use-cases being covered. + - Existing samples (from [our samples organization](https://github.com/localstack-samples)) might serve as a starting point for constructing such a scenario. + However, keep in mind that we want to use many interacting resources in these tests, so the samples might need further expansion. +- It perfectly conforms to all the testing rules laid out [here](../README.md) +- It does not contain long wait times (e.g., for resources to spin up). + The acceptance tests need to be fast. + Whether they are fast enough is evaluated on a case-by-case basis (e.g., depending on the amount of confidence they provide) +- It is fully parallelizable. + If certain acceptance tests need to run together (e.g., in a scenario), they need to be added to the same test class. +- The test needs to be perfectly stable and only fail because of real issues with the implementation under test. + - Should an acceptance test turn flaky, it will be skipped until it is fixed ([as we already state in our testing rules](../README.md)). +- It needs to be validated against the targeted cloud provider if it is purely testing parity with that cloud provider. + - See [the documentation on parity tests for further information](../parity-testing/README.md) + - This effectively means that the test should not carry the markers `aws.unknown` or `needs_fixing`. + +Note, that some criteria is still not concrete and will evolve over time. +For cases where it is unclear if a test fulfils a criterium, reviewers will need to decide whether it fits the general goals laid out here. +With growing maturity, however, criteria will become more concrete (and strict). + +The first acceptance test that we added to our suite, and which serves as an example is `tests.aws.scenario.bookstore.test_bookstore.TestBookstoreApplication`. +It implements an entire application involving multiple services and tests their interaction with each other. + +## Integration tests + +These tests are quite similar to the acceptance tests, but are less restrictive. +Any acceptance test can be demoted to an integration test should it not satisfy the needs of the acceptance test suite anymore. +However, this does not mean that integration tests do not have any quality requirements in their own right. +Flaky integration tests can (and will) still be skipped until their flake is resolved. +Also, they still should all conform to the testing rules. + +An example for a good integration test, that could not be an acceptance test is `tests.aws.services.s3.test_s3.TestS3.test_object_with_slashes_in_key`. +It tests a concrete feature of the S3 implementation while not being part of a scenario with other services. diff --git a/localstack/aws/__init__.py b/localstack-core/localstack/aws/__init__.py similarity index 100% rename from localstack/aws/__init__.py rename to localstack-core/localstack/aws/__init__.py diff --git a/localstack-core/localstack/aws/accounts.py b/localstack-core/localstack/aws/accounts.py new file mode 100644 index 0000000000000..0308daf468209 --- /dev/null +++ b/localstack-core/localstack/aws/accounts.py @@ -0,0 +1,81 @@ +"""Functionality related to AWS Accounts""" + +import base64 +import binascii +import logging +import re + +from localstack import config +from localstack.constants import DEFAULT_AWS_ACCOUNT_ID + +LOG = logging.getLogger(__name__) + +# Account id offset for id extraction +# generated from int.from_bytes(base64.b32decode(b"QAAAAAAA"), byteorder="big") (user id 000000000000) +ACCOUNT_OFFSET = 549755813888 + +# Basically the base32 alphabet, for better access as constant here +AWS_ACCESS_KEY_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" + + +def extract_account_id_from_access_key_id(access_key_id: str) -> str: + """ + Extract account id from access key id + + Example: + "ASIAQAAAAAAAGMKEM7X5" => "000000000000" + "AKIARZPUZDIKGB2VALC4" => "123456789012" + :param access_key_id: Access key id. Must start with either ASIA or AKIA and has at least 20 characters + :return: Account ID (as string), 12 digits + """ + account_id_part = access_key_id[4:12] + # decode account id part + try: + account_id_part_int = int.from_bytes(base64.b32decode(account_id_part), byteorder="big") + except binascii.Error: + LOG.warning( + "Invalid Access Key Id format. Falling back to default id: %s", DEFAULT_AWS_ACCOUNT_ID + ) + return DEFAULT_AWS_ACCOUNT_ID + + account_id = 2 * (account_id_part_int - ACCOUNT_OFFSET) + try: + if AWS_ACCESS_KEY_ALPHABET.index(access_key_id[12]) >= 16: + account_id += 1 + except ValueError: + LOG.warning( + "Char at index 12 not from base32 alphabet. Falling back to default id: %s", + DEFAULT_AWS_ACCOUNT_ID, + ) + return DEFAULT_AWS_ACCOUNT_ID + if account_id < 0 or account_id > 999999999999: + LOG.warning( + "Extracted account id not between 000000000000 and 999999999999. Falling back to default id: %s", + DEFAULT_AWS_ACCOUNT_ID, + ) + return DEFAULT_AWS_ACCOUNT_ID + return f"{account_id:012}" + + +def get_account_id_from_access_key_id(access_key_id: str) -> str: + """Return the Account ID associated the Access Key ID.""" + + # If AWS_ACCESS_KEY_ID has a 12-digit integer value, use it as the account ID + if re.match(r"\d{12}", access_key_id): + return access_key_id + + elif len(access_key_id) >= 20: + if not config.PARITY_AWS_ACCESS_KEY_ID: + # If AWS_ACCESS_KEY_ID has production AWS credentials, ignore them + if access_key_id.startswith("ASIA") or access_key_id.startswith("AKIA"): + LOG.debug( + "Ignoring production AWS credentials provided to LocalStack. Falling back to default account ID." + ) + + elif access_key_id.startswith("LSIA") or access_key_id.startswith("LKIA"): + return extract_account_id_from_access_key_id(access_key_id) + else: + if access_key_id.startswith("ASIA") or access_key_id.startswith("AKIA"): + return extract_account_id_from_access_key_id(access_key_id) + + return DEFAULT_AWS_ACCOUNT_ID diff --git a/localstack-core/localstack/aws/api/__init__.py b/localstack-core/localstack/aws/api/__init__.py new file mode 100644 index 0000000000000..ab4e6ce81b7f5 --- /dev/null +++ b/localstack-core/localstack/aws/api/__init__.py @@ -0,0 +1,17 @@ +from .core import ( + CommonServiceException, + RequestContext, + ServiceException, + ServiceRequest, + ServiceResponse, + handler, +) + +__all__ = [ + "RequestContext", + "ServiceException", + "CommonServiceException", + "ServiceRequest", + "ServiceResponse", + "handler", +] diff --git a/localstack-core/localstack/aws/api/acm/__init__.py b/localstack-core/localstack/aws/api/acm/__init__.py new file mode 100644 index 0000000000000..9971a0d3ab338 --- /dev/null +++ b/localstack-core/localstack/aws/api/acm/__init__.py @@ -0,0 +1,658 @@ +from datetime import datetime +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Arn = str +AvailabilityErrorMessage = str +CertificateBody = str +CertificateChain = str +DomainNameString = str +IdempotencyToken = str +MaxItems = int +NextToken = str +NullableBoolean = bool +PcaArn = str +PositiveInteger = int +PrivateKey = str +ServiceErrorMessage = str +String = str +TagKey = str +TagValue = str +ValidationExceptionMessage = str + + +class CertificateManagedBy(StrEnum): + CLOUDFRONT = "CLOUDFRONT" + + +class CertificateStatus(StrEnum): + PENDING_VALIDATION = "PENDING_VALIDATION" + ISSUED = "ISSUED" + INACTIVE = "INACTIVE" + EXPIRED = "EXPIRED" + VALIDATION_TIMED_OUT = "VALIDATION_TIMED_OUT" + REVOKED = "REVOKED" + FAILED = "FAILED" + + +class CertificateTransparencyLoggingPreference(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class CertificateType(StrEnum): + IMPORTED = "IMPORTED" + AMAZON_ISSUED = "AMAZON_ISSUED" + PRIVATE = "PRIVATE" + + +class DomainStatus(StrEnum): + PENDING_VALIDATION = "PENDING_VALIDATION" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + + +class ExtendedKeyUsageName(StrEnum): + TLS_WEB_SERVER_AUTHENTICATION = "TLS_WEB_SERVER_AUTHENTICATION" + TLS_WEB_CLIENT_AUTHENTICATION = "TLS_WEB_CLIENT_AUTHENTICATION" + CODE_SIGNING = "CODE_SIGNING" + EMAIL_PROTECTION = "EMAIL_PROTECTION" + TIME_STAMPING = "TIME_STAMPING" + OCSP_SIGNING = "OCSP_SIGNING" + IPSEC_END_SYSTEM = "IPSEC_END_SYSTEM" + IPSEC_TUNNEL = "IPSEC_TUNNEL" + IPSEC_USER = "IPSEC_USER" + ANY = "ANY" + NONE = "NONE" + CUSTOM = "CUSTOM" + + +class FailureReason(StrEnum): + NO_AVAILABLE_CONTACTS = "NO_AVAILABLE_CONTACTS" + ADDITIONAL_VERIFICATION_REQUIRED = "ADDITIONAL_VERIFICATION_REQUIRED" + DOMAIN_NOT_ALLOWED = "DOMAIN_NOT_ALLOWED" + INVALID_PUBLIC_DOMAIN = "INVALID_PUBLIC_DOMAIN" + DOMAIN_VALIDATION_DENIED = "DOMAIN_VALIDATION_DENIED" + CAA_ERROR = "CAA_ERROR" + PCA_LIMIT_EXCEEDED = "PCA_LIMIT_EXCEEDED" + PCA_INVALID_ARN = "PCA_INVALID_ARN" + PCA_INVALID_STATE = "PCA_INVALID_STATE" + PCA_REQUEST_FAILED = "PCA_REQUEST_FAILED" + PCA_NAME_CONSTRAINTS_VALIDATION = "PCA_NAME_CONSTRAINTS_VALIDATION" + PCA_RESOURCE_NOT_FOUND = "PCA_RESOURCE_NOT_FOUND" + PCA_INVALID_ARGS = "PCA_INVALID_ARGS" + PCA_INVALID_DURATION = "PCA_INVALID_DURATION" + PCA_ACCESS_DENIED = "PCA_ACCESS_DENIED" + SLR_NOT_FOUND = "SLR_NOT_FOUND" + OTHER = "OTHER" + + +class KeyAlgorithm(StrEnum): + RSA_1024 = "RSA_1024" + RSA_2048 = "RSA_2048" + RSA_3072 = "RSA_3072" + RSA_4096 = "RSA_4096" + EC_prime256v1 = "EC_prime256v1" + EC_secp384r1 = "EC_secp384r1" + EC_secp521r1 = "EC_secp521r1" + + +class KeyUsageName(StrEnum): + DIGITAL_SIGNATURE = "DIGITAL_SIGNATURE" + NON_REPUDIATION = "NON_REPUDIATION" + KEY_ENCIPHERMENT = "KEY_ENCIPHERMENT" + DATA_ENCIPHERMENT = "DATA_ENCIPHERMENT" + KEY_AGREEMENT = "KEY_AGREEMENT" + CERTIFICATE_SIGNING = "CERTIFICATE_SIGNING" + CRL_SIGNING = "CRL_SIGNING" + ENCIPHER_ONLY = "ENCIPHER_ONLY" + DECIPHER_ONLY = "DECIPHER_ONLY" + ANY = "ANY" + CUSTOM = "CUSTOM" + + +class RecordType(StrEnum): + CNAME = "CNAME" + + +class RenewalEligibility(StrEnum): + ELIGIBLE = "ELIGIBLE" + INELIGIBLE = "INELIGIBLE" + + +class RenewalStatus(StrEnum): + PENDING_AUTO_RENEWAL = "PENDING_AUTO_RENEWAL" + PENDING_VALIDATION = "PENDING_VALIDATION" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + + +class RevocationReason(StrEnum): + UNSPECIFIED = "UNSPECIFIED" + KEY_COMPROMISE = "KEY_COMPROMISE" + CA_COMPROMISE = "CA_COMPROMISE" + AFFILIATION_CHANGED = "AFFILIATION_CHANGED" + SUPERCEDED = "SUPERCEDED" + SUPERSEDED = "SUPERSEDED" + CESSATION_OF_OPERATION = "CESSATION_OF_OPERATION" + CERTIFICATE_HOLD = "CERTIFICATE_HOLD" + REMOVE_FROM_CRL = "REMOVE_FROM_CRL" + PRIVILEGE_WITHDRAWN = "PRIVILEGE_WITHDRAWN" + A_A_COMPROMISE = "A_A_COMPROMISE" + + +class SortBy(StrEnum): + CREATED_AT = "CREATED_AT" + + +class SortOrder(StrEnum): + ASCENDING = "ASCENDING" + DESCENDING = "DESCENDING" + + +class ValidationMethod(StrEnum): + EMAIL = "EMAIL" + DNS = "DNS" + HTTP = "HTTP" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArgsException(ServiceException): + code: str = "InvalidArgsException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArnException(ServiceException): + code: str = "InvalidArnException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDomainValidationOptionsException(ServiceException): + code: str = "InvalidDomainValidationOptionsException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidStateException(ServiceException): + code: str = "InvalidStateException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTagException(ServiceException): + code: str = "InvalidTagException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class RequestInProgressException(ServiceException): + code: str = "RequestInProgressException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TagPolicyException(ServiceException): + code: str = "TagPolicyException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTagsException(ServiceException): + code: str = "TooManyTagsException" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: Optional[TagValue] + + +TagList = List[Tag] + + +class AddTagsToCertificateRequest(ServiceRequest): + CertificateArn: Arn + Tags: TagList + + +CertificateBodyBlob = bytes +CertificateChainBlob = bytes + + +class CertificateOptions(TypedDict, total=False): + CertificateTransparencyLoggingPreference: Optional[CertificateTransparencyLoggingPreference] + + +class ExtendedKeyUsage(TypedDict, total=False): + Name: Optional[ExtendedKeyUsageName] + OID: Optional[String] + + +ExtendedKeyUsageList = List[ExtendedKeyUsage] + + +class KeyUsage(TypedDict, total=False): + Name: Optional[KeyUsageName] + + +KeyUsageList = List[KeyUsage] +TStamp = datetime + + +class HttpRedirect(TypedDict, total=False): + RedirectFrom: Optional[String] + RedirectTo: Optional[String] + + +class ResourceRecord(TypedDict, total=False): + Name: String + Type: RecordType + Value: String + + +ValidationEmailList = List[String] + + +class DomainValidation(TypedDict, total=False): + DomainName: DomainNameString + ValidationEmails: Optional[ValidationEmailList] + ValidationDomain: Optional[DomainNameString] + ValidationStatus: Optional[DomainStatus] + ResourceRecord: Optional[ResourceRecord] + HttpRedirect: Optional[HttpRedirect] + ValidationMethod: Optional[ValidationMethod] + + +DomainValidationList = List[DomainValidation] + + +class RenewalSummary(TypedDict, total=False): + RenewalStatus: RenewalStatus + DomainValidationOptions: DomainValidationList + RenewalStatusReason: Optional[FailureReason] + UpdatedAt: TStamp + + +InUseList = List[String] +DomainList = List[DomainNameString] + + +class CertificateDetail(TypedDict, total=False): + CertificateArn: Optional[Arn] + DomainName: Optional[DomainNameString] + SubjectAlternativeNames: Optional[DomainList] + ManagedBy: Optional[CertificateManagedBy] + DomainValidationOptions: Optional[DomainValidationList] + Serial: Optional[String] + Subject: Optional[String] + Issuer: Optional[String] + CreatedAt: Optional[TStamp] + IssuedAt: Optional[TStamp] + ImportedAt: Optional[TStamp] + Status: Optional[CertificateStatus] + RevokedAt: Optional[TStamp] + RevocationReason: Optional[RevocationReason] + NotBefore: Optional[TStamp] + NotAfter: Optional[TStamp] + KeyAlgorithm: Optional[KeyAlgorithm] + SignatureAlgorithm: Optional[String] + InUseBy: Optional[InUseList] + FailureReason: Optional[FailureReason] + Type: Optional[CertificateType] + RenewalSummary: Optional[RenewalSummary] + KeyUsages: Optional[KeyUsageList] + ExtendedKeyUsages: Optional[ExtendedKeyUsageList] + CertificateAuthorityArn: Optional[Arn] + RenewalEligibility: Optional[RenewalEligibility] + Options: Optional[CertificateOptions] + + +CertificateStatuses = List[CertificateStatus] +ExtendedKeyUsageNames = List[ExtendedKeyUsageName] +KeyUsageNames = List[KeyUsageName] + + +class CertificateSummary(TypedDict, total=False): + CertificateArn: Optional[Arn] + DomainName: Optional[DomainNameString] + SubjectAlternativeNameSummaries: Optional[DomainList] + HasAdditionalSubjectAlternativeNames: Optional[NullableBoolean] + Status: Optional[CertificateStatus] + Type: Optional[CertificateType] + KeyAlgorithm: Optional[KeyAlgorithm] + KeyUsages: Optional[KeyUsageNames] + ExtendedKeyUsages: Optional[ExtendedKeyUsageNames] + InUse: Optional[NullableBoolean] + Exported: Optional[NullableBoolean] + RenewalEligibility: Optional[RenewalEligibility] + NotBefore: Optional[TStamp] + NotAfter: Optional[TStamp] + CreatedAt: Optional[TStamp] + IssuedAt: Optional[TStamp] + ImportedAt: Optional[TStamp] + RevokedAt: Optional[TStamp] + ManagedBy: Optional[CertificateManagedBy] + + +CertificateSummaryList = List[CertificateSummary] + + +class DeleteCertificateRequest(ServiceRequest): + CertificateArn: Arn + + +class DescribeCertificateRequest(ServiceRequest): + CertificateArn: Arn + + +class DescribeCertificateResponse(TypedDict, total=False): + Certificate: Optional[CertificateDetail] + + +class DomainValidationOption(TypedDict, total=False): + DomainName: DomainNameString + ValidationDomain: DomainNameString + + +DomainValidationOptionList = List[DomainValidationOption] + + +class ExpiryEventsConfiguration(TypedDict, total=False): + DaysBeforeExpiry: Optional[PositiveInteger] + + +PassphraseBlob = bytes + + +class ExportCertificateRequest(ServiceRequest): + CertificateArn: Arn + Passphrase: PassphraseBlob + + +class ExportCertificateResponse(TypedDict, total=False): + Certificate: Optional[CertificateBody] + CertificateChain: Optional[CertificateChain] + PrivateKey: Optional[PrivateKey] + + +ExtendedKeyUsageFilterList = List[ExtendedKeyUsageName] +KeyAlgorithmList = List[KeyAlgorithm] +KeyUsageFilterList = List[KeyUsageName] + + +class Filters(TypedDict, total=False): + extendedKeyUsage: Optional[ExtendedKeyUsageFilterList] + keyUsage: Optional[KeyUsageFilterList] + keyTypes: Optional[KeyAlgorithmList] + managedBy: Optional[CertificateManagedBy] + + +class GetAccountConfigurationResponse(TypedDict, total=False): + ExpiryEvents: Optional[ExpiryEventsConfiguration] + + +class GetCertificateRequest(ServiceRequest): + CertificateArn: Arn + + +class GetCertificateResponse(TypedDict, total=False): + Certificate: Optional[CertificateBody] + CertificateChain: Optional[CertificateChain] + + +PrivateKeyBlob = bytes + + +class ImportCertificateRequest(ServiceRequest): + CertificateArn: Optional[Arn] + Certificate: CertificateBodyBlob + PrivateKey: PrivateKeyBlob + CertificateChain: Optional[CertificateChainBlob] + Tags: Optional[TagList] + + +class ImportCertificateResponse(TypedDict, total=False): + CertificateArn: Optional[Arn] + + +class ListCertificatesRequest(ServiceRequest): + CertificateStatuses: Optional[CertificateStatuses] + Includes: Optional[Filters] + NextToken: Optional[NextToken] + MaxItems: Optional[MaxItems] + SortBy: Optional[SortBy] + SortOrder: Optional[SortOrder] + + +class ListCertificatesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + CertificateSummaryList: Optional[CertificateSummaryList] + + +class ListTagsForCertificateRequest(ServiceRequest): + CertificateArn: Arn + + +class ListTagsForCertificateResponse(TypedDict, total=False): + Tags: Optional[TagList] + + +class PutAccountConfigurationRequest(ServiceRequest): + ExpiryEvents: Optional[ExpiryEventsConfiguration] + IdempotencyToken: IdempotencyToken + + +class RemoveTagsFromCertificateRequest(ServiceRequest): + CertificateArn: Arn + Tags: TagList + + +class RenewCertificateRequest(ServiceRequest): + CertificateArn: Arn + + +class RequestCertificateRequest(ServiceRequest): + DomainName: DomainNameString + ValidationMethod: Optional[ValidationMethod] + SubjectAlternativeNames: Optional[DomainList] + IdempotencyToken: Optional[IdempotencyToken] + DomainValidationOptions: Optional[DomainValidationOptionList] + Options: Optional[CertificateOptions] + CertificateAuthorityArn: Optional[PcaArn] + Tags: Optional[TagList] + KeyAlgorithm: Optional[KeyAlgorithm] + ManagedBy: Optional[CertificateManagedBy] + + +class RequestCertificateResponse(TypedDict, total=False): + CertificateArn: Optional[Arn] + + +class ResendValidationEmailRequest(ServiceRequest): + CertificateArn: Arn + Domain: DomainNameString + ValidationDomain: DomainNameString + + +class UpdateCertificateOptionsRequest(ServiceRequest): + CertificateArn: Arn + Options: CertificateOptions + + +class AcmApi: + service = "acm" + version = "2015-12-08" + + @handler("AddTagsToCertificate") + def add_tags_to_certificate( + self, context: RequestContext, certificate_arn: Arn, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteCertificate") + def delete_certificate(self, context: RequestContext, certificate_arn: Arn, **kwargs) -> None: + raise NotImplementedError + + @handler("DescribeCertificate") + def describe_certificate( + self, context: RequestContext, certificate_arn: Arn, **kwargs + ) -> DescribeCertificateResponse: + raise NotImplementedError + + @handler("ExportCertificate") + def export_certificate( + self, context: RequestContext, certificate_arn: Arn, passphrase: PassphraseBlob, **kwargs + ) -> ExportCertificateResponse: + raise NotImplementedError + + @handler("GetAccountConfiguration") + def get_account_configuration( + self, context: RequestContext, **kwargs + ) -> GetAccountConfigurationResponse: + raise NotImplementedError + + @handler("GetCertificate") + def get_certificate( + self, context: RequestContext, certificate_arn: Arn, **kwargs + ) -> GetCertificateResponse: + raise NotImplementedError + + @handler("ImportCertificate") + def import_certificate( + self, + context: RequestContext, + certificate: CertificateBodyBlob, + private_key: PrivateKeyBlob, + certificate_arn: Arn | None = None, + certificate_chain: CertificateChainBlob | None = None, + tags: TagList | None = None, + **kwargs, + ) -> ImportCertificateResponse: + raise NotImplementedError + + @handler("ListCertificates") + def list_certificates( + self, + context: RequestContext, + certificate_statuses: CertificateStatuses | None = None, + includes: Filters | None = None, + next_token: NextToken | None = None, + max_items: MaxItems | None = None, + sort_by: SortBy | None = None, + sort_order: SortOrder | None = None, + **kwargs, + ) -> ListCertificatesResponse: + raise NotImplementedError + + @handler("ListTagsForCertificate") + def list_tags_for_certificate( + self, context: RequestContext, certificate_arn: Arn, **kwargs + ) -> ListTagsForCertificateResponse: + raise NotImplementedError + + @handler("PutAccountConfiguration") + def put_account_configuration( + self, + context: RequestContext, + idempotency_token: IdempotencyToken, + expiry_events: ExpiryEventsConfiguration | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemoveTagsFromCertificate") + def remove_tags_from_certificate( + self, context: RequestContext, certificate_arn: Arn, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RenewCertificate") + def renew_certificate(self, context: RequestContext, certificate_arn: Arn, **kwargs) -> None: + raise NotImplementedError + + @handler("RequestCertificate") + def request_certificate( + self, + context: RequestContext, + domain_name: DomainNameString, + validation_method: ValidationMethod | None = None, + subject_alternative_names: DomainList | None = None, + idempotency_token: IdempotencyToken | None = None, + domain_validation_options: DomainValidationOptionList | None = None, + options: CertificateOptions | None = None, + certificate_authority_arn: PcaArn | None = None, + tags: TagList | None = None, + key_algorithm: KeyAlgorithm | None = None, + managed_by: CertificateManagedBy | None = None, + **kwargs, + ) -> RequestCertificateResponse: + raise NotImplementedError + + @handler("ResendValidationEmail") + def resend_validation_email( + self, + context: RequestContext, + certificate_arn: Arn, + domain: DomainNameString, + validation_domain: DomainNameString, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateCertificateOptions") + def update_certificate_options( + self, context: RequestContext, certificate_arn: Arn, options: CertificateOptions, **kwargs + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/apigateway/__init__.py b/localstack-core/localstack/aws/api/apigateway/__init__.py new file mode 100644 index 0000000000000..b23bd9969aa31 --- /dev/null +++ b/localstack-core/localstack/aws/api/apigateway/__init__.py @@ -0,0 +1,2929 @@ +from datetime import datetime +from enum import StrEnum +from typing import IO, Dict, Iterable, List, Optional, TypedDict, Union + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Boolean = bool +DocumentationPartLocationStatusCode = str +Double = float +Integer = int +NullableBoolean = bool +NullableInteger = int +ProviderARN = str +StatusCode = str +String = str + + +class AccessAssociationSourceType(StrEnum): + VPCE = "VPCE" + + +class ApiKeySourceType(StrEnum): + HEADER = "HEADER" + AUTHORIZER = "AUTHORIZER" + + +class ApiKeysFormat(StrEnum): + csv = "csv" + + +class AuthorizerType(StrEnum): + TOKEN = "TOKEN" + REQUEST = "REQUEST" + COGNITO_USER_POOLS = "COGNITO_USER_POOLS" + + +class CacheClusterSize(StrEnum): + i_0_5 = "0.5" + i_1_6 = "1.6" + i_6_1 = "6.1" + i_13_5 = "13.5" + i_28_4 = "28.4" + i_58_2 = "58.2" + i_118 = "118" + i_237 = "237" + + +class CacheClusterStatus(StrEnum): + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + AVAILABLE = "AVAILABLE" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + NOT_AVAILABLE = "NOT_AVAILABLE" + FLUSH_IN_PROGRESS = "FLUSH_IN_PROGRESS" + + +class ConnectionType(StrEnum): + INTERNET = "INTERNET" + VPC_LINK = "VPC_LINK" + + +class ContentHandlingStrategy(StrEnum): + CONVERT_TO_BINARY = "CONVERT_TO_BINARY" + CONVERT_TO_TEXT = "CONVERT_TO_TEXT" + + +class DocumentationPartType(StrEnum): + API = "API" + AUTHORIZER = "AUTHORIZER" + MODEL = "MODEL" + RESOURCE = "RESOURCE" + METHOD = "METHOD" + PATH_PARAMETER = "PATH_PARAMETER" + QUERY_PARAMETER = "QUERY_PARAMETER" + REQUEST_HEADER = "REQUEST_HEADER" + REQUEST_BODY = "REQUEST_BODY" + RESPONSE = "RESPONSE" + RESPONSE_HEADER = "RESPONSE_HEADER" + RESPONSE_BODY = "RESPONSE_BODY" + + +class DomainNameStatus(StrEnum): + AVAILABLE = "AVAILABLE" + UPDATING = "UPDATING" + PENDING = "PENDING" + PENDING_CERTIFICATE_REIMPORT = "PENDING_CERTIFICATE_REIMPORT" + PENDING_OWNERSHIP_VERIFICATION = "PENDING_OWNERSHIP_VERIFICATION" + + +class EndpointType(StrEnum): + REGIONAL = "REGIONAL" + EDGE = "EDGE" + PRIVATE = "PRIVATE" + + +class GatewayResponseType(StrEnum): + DEFAULT_4XX = "DEFAULT_4XX" + DEFAULT_5XX = "DEFAULT_5XX" + RESOURCE_NOT_FOUND = "RESOURCE_NOT_FOUND" + UNAUTHORIZED = "UNAUTHORIZED" + INVALID_API_KEY = "INVALID_API_KEY" + ACCESS_DENIED = "ACCESS_DENIED" + AUTHORIZER_FAILURE = "AUTHORIZER_FAILURE" + AUTHORIZER_CONFIGURATION_ERROR = "AUTHORIZER_CONFIGURATION_ERROR" + INVALID_SIGNATURE = "INVALID_SIGNATURE" + EXPIRED_TOKEN = "EXPIRED_TOKEN" + MISSING_AUTHENTICATION_TOKEN = "MISSING_AUTHENTICATION_TOKEN" + INTEGRATION_FAILURE = "INTEGRATION_FAILURE" + INTEGRATION_TIMEOUT = "INTEGRATION_TIMEOUT" + API_CONFIGURATION_ERROR = "API_CONFIGURATION_ERROR" + UNSUPPORTED_MEDIA_TYPE = "UNSUPPORTED_MEDIA_TYPE" + BAD_REQUEST_PARAMETERS = "BAD_REQUEST_PARAMETERS" + BAD_REQUEST_BODY = "BAD_REQUEST_BODY" + REQUEST_TOO_LARGE = "REQUEST_TOO_LARGE" + THROTTLED = "THROTTLED" + QUOTA_EXCEEDED = "QUOTA_EXCEEDED" + WAF_FILTERED = "WAF_FILTERED" + + +class IntegrationType(StrEnum): + HTTP = "HTTP" + AWS = "AWS" + MOCK = "MOCK" + HTTP_PROXY = "HTTP_PROXY" + AWS_PROXY = "AWS_PROXY" + + +class IpAddressType(StrEnum): + ipv4 = "ipv4" + dualstack = "dualstack" + + +class LocationStatusType(StrEnum): + DOCUMENTED = "DOCUMENTED" + UNDOCUMENTED = "UNDOCUMENTED" + + +class Op(StrEnum): + add = "add" + remove = "remove" + replace = "replace" + move = "move" + copy = "copy" + test = "test" + + +class PutMode(StrEnum): + merge = "merge" + overwrite = "overwrite" + + +class QuotaPeriodType(StrEnum): + DAY = "DAY" + WEEK = "WEEK" + MONTH = "MONTH" + + +class ResourceOwner(StrEnum): + SELF = "SELF" + OTHER_ACCOUNTS = "OTHER_ACCOUNTS" + + +class SecurityPolicy(StrEnum): + TLS_1_0 = "TLS_1_0" + TLS_1_2 = "TLS_1_2" + + +class UnauthorizedCacheControlHeaderStrategy(StrEnum): + FAIL_WITH_403 = "FAIL_WITH_403" + SUCCEED_WITH_RESPONSE_HEADER = "SUCCEED_WITH_RESPONSE_HEADER" + SUCCEED_WITHOUT_RESPONSE_HEADER = "SUCCEED_WITHOUT_RESPONSE_HEADER" + + +class VpcLinkStatus(StrEnum): + AVAILABLE = "AVAILABLE" + PENDING = "PENDING" + DELETING = "DELETING" + FAILED = "FAILED" + + +class BadRequestException(ServiceException): + code: str = "BadRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 409 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 429 + retryAfterSeconds: Optional[String] + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = False + status_code: int = 404 + + +class ServiceUnavailableException(ServiceException): + code: str = "ServiceUnavailableException" + sender_fault: bool = False + status_code: int = 503 + retryAfterSeconds: Optional[String] + + +class TooManyRequestsException(ServiceException): + code: str = "TooManyRequestsException" + sender_fault: bool = False + status_code: int = 429 + retryAfterSeconds: Optional[String] + + +class UnauthorizedException(ServiceException): + code: str = "UnauthorizedException" + sender_fault: bool = False + status_code: int = 401 + + +class AccessLogSettings(TypedDict, total=False): + format: Optional[String] + destinationArn: Optional[String] + + +ListOfString = List[String] + + +class ThrottleSettings(TypedDict, total=False): + burstLimit: Optional[Integer] + rateLimit: Optional[Double] + + +class Account(TypedDict, total=False): + cloudwatchRoleArn: Optional[String] + throttleSettings: Optional[ThrottleSettings] + features: Optional[ListOfString] + apiKeyVersion: Optional[String] + + +MapOfStringToString = Dict[String, String] +Timestamp = datetime + + +class ApiKey(TypedDict, total=False): + id: Optional[String] + value: Optional[String] + name: Optional[String] + customerId: Optional[String] + description: Optional[String] + enabled: Optional[Boolean] + createdDate: Optional[Timestamp] + lastUpdatedDate: Optional[Timestamp] + stageKeys: Optional[ListOfString] + tags: Optional[MapOfStringToString] + + +class ApiKeyIds(TypedDict, total=False): + ids: Optional[ListOfString] + warnings: Optional[ListOfString] + + +ListOfApiKey = List[ApiKey] + + +class ApiKeys(TypedDict, total=False): + warnings: Optional[ListOfString] + position: Optional[String] + items: Optional[ListOfApiKey] + + +MapOfApiStageThrottleSettings = Dict[String, ThrottleSettings] + + +class ApiStage(TypedDict, total=False): + apiId: Optional[String] + stage: Optional[String] + throttle: Optional[MapOfApiStageThrottleSettings] + + +ListOfARNs = List[ProviderARN] +Authorizer = TypedDict( + "Authorizer", + { + "id": Optional[String], + "name": Optional[String], + "type": Optional[AuthorizerType], + "providerARNs": Optional[ListOfARNs], + "authType": Optional[String], + "authorizerUri": Optional[String], + "authorizerCredentials": Optional[String], + "identitySource": Optional[String], + "identityValidationExpression": Optional[String], + "authorizerResultTtlInSeconds": Optional[NullableInteger], + }, + total=False, +) +ListOfAuthorizer = List[Authorizer] + + +class Authorizers(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfAuthorizer] + + +class BasePathMapping(TypedDict, total=False): + basePath: Optional[String] + restApiId: Optional[String] + stage: Optional[String] + + +ListOfBasePathMapping = List[BasePathMapping] + + +class BasePathMappings(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfBasePathMapping] + + +Blob = bytes + + +class CanarySettings(TypedDict, total=False): + percentTraffic: Optional[Double] + deploymentId: Optional[String] + stageVariableOverrides: Optional[MapOfStringToString] + useStageCache: Optional[Boolean] + + +class ClientCertificate(TypedDict, total=False): + clientCertificateId: Optional[String] + description: Optional[String] + pemEncodedCertificate: Optional[String] + createdDate: Optional[Timestamp] + expirationDate: Optional[Timestamp] + tags: Optional[MapOfStringToString] + + +ListOfClientCertificate = List[ClientCertificate] + + +class ClientCertificates(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfClientCertificate] + + +class StageKey(TypedDict, total=False): + restApiId: Optional[String] + stageName: Optional[String] + + +ListOfStageKeys = List[StageKey] + + +class CreateApiKeyRequest(ServiceRequest): + name: Optional[String] + description: Optional[String] + enabled: Optional[Boolean] + generateDistinctId: Optional[Boolean] + value: Optional[String] + stageKeys: Optional[ListOfStageKeys] + customerId: Optional[String] + tags: Optional[MapOfStringToString] + + +CreateAuthorizerRequest = TypedDict( + "CreateAuthorizerRequest", + { + "restApiId": String, + "name": String, + "type": AuthorizerType, + "providerARNs": Optional[ListOfARNs], + "authType": Optional[String], + "authorizerUri": Optional[String], + "authorizerCredentials": Optional[String], + "identitySource": Optional[String], + "identityValidationExpression": Optional[String], + "authorizerResultTtlInSeconds": Optional[NullableInteger], + }, + total=False, +) + + +class CreateBasePathMappingRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + basePath: Optional[String] + restApiId: String + stage: Optional[String] + + +class DeploymentCanarySettings(TypedDict, total=False): + percentTraffic: Optional[Double] + stageVariableOverrides: Optional[MapOfStringToString] + useStageCache: Optional[Boolean] + + +class CreateDeploymentRequest(ServiceRequest): + restApiId: String + stageName: Optional[String] + stageDescription: Optional[String] + description: Optional[String] + cacheClusterEnabled: Optional[NullableBoolean] + cacheClusterSize: Optional[CacheClusterSize] + variables: Optional[MapOfStringToString] + canarySettings: Optional[DeploymentCanarySettings] + tracingEnabled: Optional[NullableBoolean] + + +DocumentationPartLocation = TypedDict( + "DocumentationPartLocation", + { + "type": DocumentationPartType, + "path": Optional[String], + "method": Optional[String], + "statusCode": Optional[DocumentationPartLocationStatusCode], + "name": Optional[String], + }, + total=False, +) + + +class CreateDocumentationPartRequest(ServiceRequest): + restApiId: String + location: DocumentationPartLocation + properties: String + + +class CreateDocumentationVersionRequest(ServiceRequest): + restApiId: String + documentationVersion: String + stageName: Optional[String] + description: Optional[String] + + +class CreateDomainNameAccessAssociationRequest(ServiceRequest): + domainNameArn: String + accessAssociationSourceType: AccessAssociationSourceType + accessAssociationSource: String + tags: Optional[MapOfStringToString] + + +class MutualTlsAuthenticationInput(TypedDict, total=False): + truststoreUri: Optional[String] + truststoreVersion: Optional[String] + + +ListOfEndpointType = List[EndpointType] + + +class EndpointConfiguration(TypedDict, total=False): + types: Optional[ListOfEndpointType] + ipAddressType: Optional[IpAddressType] + vpcEndpointIds: Optional[ListOfString] + + +class CreateDomainNameRequest(ServiceRequest): + domainName: String + certificateName: Optional[String] + certificateBody: Optional[String] + certificatePrivateKey: Optional[String] + certificateChain: Optional[String] + certificateArn: Optional[String] + regionalCertificateName: Optional[String] + regionalCertificateArn: Optional[String] + endpointConfiguration: Optional[EndpointConfiguration] + tags: Optional[MapOfStringToString] + securityPolicy: Optional[SecurityPolicy] + mutualTlsAuthentication: Optional[MutualTlsAuthenticationInput] + ownershipVerificationCertificateArn: Optional[String] + policy: Optional[String] + + +class CreateModelRequest(ServiceRequest): + restApiId: String + name: String + description: Optional[String] + schema: Optional[String] + contentType: String + + +class CreateRequestValidatorRequest(ServiceRequest): + restApiId: String + name: Optional[String] + validateRequestBody: Optional[Boolean] + validateRequestParameters: Optional[Boolean] + + +class CreateResourceRequest(ServiceRequest): + restApiId: String + parentId: String + pathPart: String + + +class CreateRestApiRequest(ServiceRequest): + name: String + description: Optional[String] + version: Optional[String] + cloneFrom: Optional[String] + binaryMediaTypes: Optional[ListOfString] + minimumCompressionSize: Optional[NullableInteger] + apiKeySource: Optional[ApiKeySourceType] + endpointConfiguration: Optional[EndpointConfiguration] + policy: Optional[String] + tags: Optional[MapOfStringToString] + disableExecuteApiEndpoint: Optional[Boolean] + + +class CreateStageRequest(ServiceRequest): + restApiId: String + stageName: String + deploymentId: String + description: Optional[String] + cacheClusterEnabled: Optional[Boolean] + cacheClusterSize: Optional[CacheClusterSize] + variables: Optional[MapOfStringToString] + documentationVersion: Optional[String] + canarySettings: Optional[CanarySettings] + tracingEnabled: Optional[Boolean] + tags: Optional[MapOfStringToString] + + +class CreateUsagePlanKeyRequest(ServiceRequest): + usagePlanId: String + keyId: String + keyType: String + + +class QuotaSettings(TypedDict, total=False): + limit: Optional[Integer] + offset: Optional[Integer] + period: Optional[QuotaPeriodType] + + +ListOfApiStage = List[ApiStage] + + +class CreateUsagePlanRequest(ServiceRequest): + name: String + description: Optional[String] + apiStages: Optional[ListOfApiStage] + throttle: Optional[ThrottleSettings] + quota: Optional[QuotaSettings] + tags: Optional[MapOfStringToString] + + +class CreateVpcLinkRequest(ServiceRequest): + name: String + description: Optional[String] + targetArns: ListOfString + tags: Optional[MapOfStringToString] + + +class DeleteApiKeyRequest(ServiceRequest): + apiKey: String + + +class DeleteAuthorizerRequest(ServiceRequest): + restApiId: String + authorizerId: String + + +class DeleteBasePathMappingRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + basePath: String + + +class DeleteClientCertificateRequest(ServiceRequest): + clientCertificateId: String + + +class DeleteDeploymentRequest(ServiceRequest): + restApiId: String + deploymentId: String + + +class DeleteDocumentationPartRequest(ServiceRequest): + restApiId: String + documentationPartId: String + + +class DeleteDocumentationVersionRequest(ServiceRequest): + restApiId: String + documentationVersion: String + + +class DeleteDomainNameAccessAssociationRequest(ServiceRequest): + domainNameAccessAssociationArn: String + + +class DeleteDomainNameRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + + +class DeleteGatewayResponseRequest(ServiceRequest): + restApiId: String + responseType: GatewayResponseType + + +class DeleteIntegrationRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + + +class DeleteIntegrationResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + + +class DeleteMethodRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + + +class DeleteMethodResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + + +class DeleteModelRequest(ServiceRequest): + restApiId: String + modelName: String + + +class DeleteRequestValidatorRequest(ServiceRequest): + restApiId: String + requestValidatorId: String + + +class DeleteResourceRequest(ServiceRequest): + restApiId: String + resourceId: String + + +class DeleteRestApiRequest(ServiceRequest): + restApiId: String + + +class DeleteStageRequest(ServiceRequest): + restApiId: String + stageName: String + + +class DeleteUsagePlanKeyRequest(ServiceRequest): + usagePlanId: String + keyId: String + + +class DeleteUsagePlanRequest(ServiceRequest): + usagePlanId: String + + +class DeleteVpcLinkRequest(ServiceRequest): + vpcLinkId: String + + +class MethodSnapshot(TypedDict, total=False): + authorizationType: Optional[String] + apiKeyRequired: Optional[Boolean] + + +MapOfMethodSnapshot = Dict[String, MethodSnapshot] +PathToMapOfMethodSnapshot = Dict[String, MapOfMethodSnapshot] + + +class Deployment(TypedDict, total=False): + id: Optional[String] + description: Optional[String] + createdDate: Optional[Timestamp] + apiSummary: Optional[PathToMapOfMethodSnapshot] + + +ListOfDeployment = List[Deployment] + + +class Deployments(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfDeployment] + + +class DocumentationPart(TypedDict, total=False): + id: Optional[String] + location: Optional[DocumentationPartLocation] + properties: Optional[String] + + +class DocumentationPartIds(TypedDict, total=False): + ids: Optional[ListOfString] + warnings: Optional[ListOfString] + + +ListOfDocumentationPart = List[DocumentationPart] + + +class DocumentationParts(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfDocumentationPart] + + +class DocumentationVersion(TypedDict, total=False): + version: Optional[String] + createdDate: Optional[Timestamp] + description: Optional[String] + + +ListOfDocumentationVersion = List[DocumentationVersion] + + +class DocumentationVersions(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfDocumentationVersion] + + +class MutualTlsAuthentication(TypedDict, total=False): + truststoreUri: Optional[String] + truststoreVersion: Optional[String] + truststoreWarnings: Optional[ListOfString] + + +class DomainName(TypedDict, total=False): + domainName: Optional[String] + domainNameId: Optional[String] + domainNameArn: Optional[String] + certificateName: Optional[String] + certificateArn: Optional[String] + certificateUploadDate: Optional[Timestamp] + regionalDomainName: Optional[String] + regionalHostedZoneId: Optional[String] + regionalCertificateName: Optional[String] + regionalCertificateArn: Optional[String] + distributionDomainName: Optional[String] + distributionHostedZoneId: Optional[String] + endpointConfiguration: Optional[EndpointConfiguration] + domainNameStatus: Optional[DomainNameStatus] + domainNameStatusMessage: Optional[String] + securityPolicy: Optional[SecurityPolicy] + tags: Optional[MapOfStringToString] + mutualTlsAuthentication: Optional[MutualTlsAuthentication] + ownershipVerificationCertificateArn: Optional[String] + managementPolicy: Optional[String] + policy: Optional[String] + + +class DomainNameAccessAssociation(TypedDict, total=False): + domainNameAccessAssociationArn: Optional[String] + domainNameArn: Optional[String] + accessAssociationSourceType: Optional[AccessAssociationSourceType] + accessAssociationSource: Optional[String] + tags: Optional[MapOfStringToString] + + +ListOfDomainNameAccessAssociation = List[DomainNameAccessAssociation] + + +class DomainNameAccessAssociations(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfDomainNameAccessAssociation] + + +ListOfDomainName = List[DomainName] + + +class DomainNames(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfDomainName] + + +class ExportResponse(TypedDict, total=False): + body: Optional[Union[Blob, IO[Blob], Iterable[Blob]]] + contentType: Optional[String] + contentDisposition: Optional[String] + + +class FlushStageAuthorizersCacheRequest(ServiceRequest): + restApiId: String + stageName: String + + +class FlushStageCacheRequest(ServiceRequest): + restApiId: String + stageName: String + + +class GatewayResponse(TypedDict, total=False): + responseType: Optional[GatewayResponseType] + statusCode: Optional[StatusCode] + responseParameters: Optional[MapOfStringToString] + responseTemplates: Optional[MapOfStringToString] + defaultResponse: Optional[Boolean] + + +ListOfGatewayResponse = List[GatewayResponse] + + +class GatewayResponses(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfGatewayResponse] + + +class GenerateClientCertificateRequest(ServiceRequest): + description: Optional[String] + tags: Optional[MapOfStringToString] + + +class GetAccountRequest(ServiceRequest): + pass + + +class GetApiKeyRequest(ServiceRequest): + apiKey: String + includeValue: Optional[NullableBoolean] + + +class GetApiKeysRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + nameQuery: Optional[String] + customerId: Optional[String] + includeValues: Optional[NullableBoolean] + + +class GetAuthorizerRequest(ServiceRequest): + restApiId: String + authorizerId: String + + +class GetAuthorizersRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetBasePathMappingRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + basePath: String + + +class GetBasePathMappingsRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetClientCertificateRequest(ServiceRequest): + clientCertificateId: String + + +class GetClientCertificatesRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetDeploymentRequest(ServiceRequest): + restApiId: String + deploymentId: String + embed: Optional[ListOfString] + + +class GetDeploymentsRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetDocumentationPartRequest(ServiceRequest): + restApiId: String + documentationPartId: String + + +GetDocumentationPartsRequest = TypedDict( + "GetDocumentationPartsRequest", + { + "restApiId": String, + "type": Optional[DocumentationPartType], + "nameQuery": Optional[String], + "path": Optional[String], + "position": Optional[String], + "limit": Optional[NullableInteger], + "locationStatus": Optional[LocationStatusType], + }, + total=False, +) + + +class GetDocumentationVersionRequest(ServiceRequest): + restApiId: String + documentationVersion: String + + +class GetDocumentationVersionsRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetDomainNameAccessAssociationsRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + resourceOwner: Optional[ResourceOwner] + + +class GetDomainNameRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + + +class GetDomainNamesRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + resourceOwner: Optional[ResourceOwner] + + +class GetExportRequest(ServiceRequest): + restApiId: String + stageName: String + exportType: String + parameters: Optional[MapOfStringToString] + accepts: Optional[String] + + +class GetGatewayResponseRequest(ServiceRequest): + restApiId: String + responseType: GatewayResponseType + + +class GetGatewayResponsesRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetIntegrationRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + + +class GetIntegrationResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + + +class GetMethodRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + + +class GetMethodResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + + +class GetModelRequest(ServiceRequest): + restApiId: String + modelName: String + flatten: Optional[Boolean] + + +class GetModelTemplateRequest(ServiceRequest): + restApiId: String + modelName: String + + +class GetModelsRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetRequestValidatorRequest(ServiceRequest): + restApiId: String + requestValidatorId: String + + +class GetRequestValidatorsRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetResourceRequest(ServiceRequest): + restApiId: String + resourceId: String + embed: Optional[ListOfString] + + +class GetResourcesRequest(ServiceRequest): + restApiId: String + position: Optional[String] + limit: Optional[NullableInteger] + embed: Optional[ListOfString] + + +class GetRestApiRequest(ServiceRequest): + restApiId: String + + +class GetRestApisRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetSdkRequest(ServiceRequest): + restApiId: String + stageName: String + sdkType: String + parameters: Optional[MapOfStringToString] + + +class GetSdkTypeRequest(ServiceRequest): + id: String + + +class GetSdkTypesRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetStageRequest(ServiceRequest): + restApiId: String + stageName: String + + +class GetStagesRequest(ServiceRequest): + restApiId: String + deploymentId: Optional[String] + + +class GetTagsRequest(ServiceRequest): + resourceArn: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetUsagePlanKeyRequest(ServiceRequest): + usagePlanId: String + keyId: String + + +class GetUsagePlanKeysRequest(ServiceRequest): + usagePlanId: String + position: Optional[String] + limit: Optional[NullableInteger] + nameQuery: Optional[String] + + +class GetUsagePlanRequest(ServiceRequest): + usagePlanId: String + + +class GetUsagePlansRequest(ServiceRequest): + position: Optional[String] + keyId: Optional[String] + limit: Optional[NullableInteger] + + +class GetUsageRequest(ServiceRequest): + usagePlanId: String + keyId: Optional[String] + startDate: String + endDate: String + position: Optional[String] + limit: Optional[NullableInteger] + + +class GetVpcLinkRequest(ServiceRequest): + vpcLinkId: String + + +class GetVpcLinksRequest(ServiceRequest): + position: Optional[String] + limit: Optional[NullableInteger] + + +class ImportApiKeysRequest(ServiceRequest): + body: IO[Blob] + format: ApiKeysFormat + failOnWarnings: Optional[Boolean] + + +class ImportDocumentationPartsRequest(ServiceRequest): + body: IO[Blob] + restApiId: String + mode: Optional[PutMode] + failOnWarnings: Optional[Boolean] + + +class ImportRestApiRequest(ServiceRequest): + body: IO[Blob] + failOnWarnings: Optional[Boolean] + parameters: Optional[MapOfStringToString] + + +class TlsConfig(TypedDict, total=False): + insecureSkipVerification: Optional[Boolean] + + +class IntegrationResponse(TypedDict, total=False): + statusCode: Optional[StatusCode] + selectionPattern: Optional[String] + responseParameters: Optional[MapOfStringToString] + responseTemplates: Optional[MapOfStringToString] + contentHandling: Optional[ContentHandlingStrategy] + + +MapOfIntegrationResponse = Dict[String, IntegrationResponse] +Integration = TypedDict( + "Integration", + { + "type": Optional[IntegrationType], + "httpMethod": Optional[String], + "uri": Optional[String], + "connectionType": Optional[ConnectionType], + "connectionId": Optional[String], + "credentials": Optional[String], + "requestParameters": Optional[MapOfStringToString], + "requestTemplates": Optional[MapOfStringToString], + "passthroughBehavior": Optional[String], + "contentHandling": Optional[ContentHandlingStrategy], + "timeoutInMillis": Optional[Integer], + "cacheNamespace": Optional[String], + "cacheKeyParameters": Optional[ListOfString], + "integrationResponses": Optional[MapOfIntegrationResponse], + "tlsConfig": Optional[TlsConfig], + }, + total=False, +) +Long = int +ListOfLong = List[Long] + + +class Model(TypedDict, total=False): + id: Optional[String] + name: Optional[String] + description: Optional[String] + schema: Optional[String] + contentType: Optional[String] + + +ListOfModel = List[Model] +PatchOperation = TypedDict( + "PatchOperation", + { + "op": Optional[Op], + "path": Optional[String], + "value": Optional[String], + "from": Optional[String], + }, + total=False, +) +ListOfPatchOperation = List[PatchOperation] + + +class RequestValidator(TypedDict, total=False): + id: Optional[String] + name: Optional[String] + validateRequestBody: Optional[Boolean] + validateRequestParameters: Optional[Boolean] + + +ListOfRequestValidator = List[RequestValidator] +MapOfStringToBoolean = Dict[String, NullableBoolean] + + +class MethodResponse(TypedDict, total=False): + statusCode: Optional[StatusCode] + responseParameters: Optional[MapOfStringToBoolean] + responseModels: Optional[MapOfStringToString] + + +MapOfMethodResponse = Dict[String, MethodResponse] + + +class Method(TypedDict, total=False): + httpMethod: Optional[String] + authorizationType: Optional[String] + authorizerId: Optional[String] + apiKeyRequired: Optional[NullableBoolean] + requestValidatorId: Optional[String] + operationName: Optional[String] + requestParameters: Optional[MapOfStringToBoolean] + requestModels: Optional[MapOfStringToString] + methodResponses: Optional[MapOfMethodResponse] + methodIntegration: Optional[Integration] + authorizationScopes: Optional[ListOfString] + + +MapOfMethod = Dict[String, Method] + + +class Resource(TypedDict, total=False): + id: Optional[String] + parentId: Optional[String] + pathPart: Optional[String] + path: Optional[String] + resourceMethods: Optional[MapOfMethod] + + +ListOfResource = List[Resource] + + +class RestApi(TypedDict, total=False): + id: Optional[String] + name: Optional[String] + description: Optional[String] + createdDate: Optional[Timestamp] + version: Optional[String] + warnings: Optional[ListOfString] + binaryMediaTypes: Optional[ListOfString] + minimumCompressionSize: Optional[NullableInteger] + apiKeySource: Optional[ApiKeySourceType] + endpointConfiguration: Optional[EndpointConfiguration] + policy: Optional[String] + tags: Optional[MapOfStringToString] + disableExecuteApiEndpoint: Optional[Boolean] + rootResourceId: Optional[String] + + +ListOfRestApi = List[RestApi] + + +class SdkConfigurationProperty(TypedDict, total=False): + name: Optional[String] + friendlyName: Optional[String] + description: Optional[String] + required: Optional[Boolean] + defaultValue: Optional[String] + + +ListOfSdkConfigurationProperty = List[SdkConfigurationProperty] + + +class SdkType(TypedDict, total=False): + id: Optional[String] + friendlyName: Optional[String] + description: Optional[String] + configurationProperties: Optional[ListOfSdkConfigurationProperty] + + +ListOfSdkType = List[SdkType] + + +class MethodSetting(TypedDict, total=False): + metricsEnabled: Optional[Boolean] + loggingLevel: Optional[String] + dataTraceEnabled: Optional[Boolean] + throttlingBurstLimit: Optional[Integer] + throttlingRateLimit: Optional[Double] + cachingEnabled: Optional[Boolean] + cacheTtlInSeconds: Optional[Integer] + cacheDataEncrypted: Optional[Boolean] + requireAuthorizationForCacheControl: Optional[Boolean] + unauthorizedCacheControlHeaderStrategy: Optional[UnauthorizedCacheControlHeaderStrategy] + + +MapOfMethodSettings = Dict[String, MethodSetting] + + +class Stage(TypedDict, total=False): + deploymentId: Optional[String] + clientCertificateId: Optional[String] + stageName: Optional[String] + description: Optional[String] + cacheClusterEnabled: Optional[Boolean] + cacheClusterSize: Optional[CacheClusterSize] + cacheClusterStatus: Optional[CacheClusterStatus] + methodSettings: Optional[MapOfMethodSettings] + variables: Optional[MapOfStringToString] + documentationVersion: Optional[String] + accessLogSettings: Optional[AccessLogSettings] + canarySettings: Optional[CanarySettings] + tracingEnabled: Optional[Boolean] + webAclArn: Optional[String] + tags: Optional[MapOfStringToString] + createdDate: Optional[Timestamp] + lastUpdatedDate: Optional[Timestamp] + + +ListOfStage = List[Stage] +ListOfUsage = List[ListOfLong] + + +class UsagePlan(TypedDict, total=False): + id: Optional[String] + name: Optional[String] + description: Optional[String] + apiStages: Optional[ListOfApiStage] + throttle: Optional[ThrottleSettings] + quota: Optional[QuotaSettings] + productCode: Optional[String] + tags: Optional[MapOfStringToString] + + +ListOfUsagePlan = List[UsagePlan] +UsagePlanKey = TypedDict( + "UsagePlanKey", + { + "id": Optional[String], + "type": Optional[String], + "value": Optional[String], + "name": Optional[String], + }, + total=False, +) +ListOfUsagePlanKey = List[UsagePlanKey] + + +class VpcLink(TypedDict, total=False): + id: Optional[String] + name: Optional[String] + description: Optional[String] + targetArns: Optional[ListOfString] + status: Optional[VpcLinkStatus] + statusMessage: Optional[String] + tags: Optional[MapOfStringToString] + + +ListOfVpcLink = List[VpcLink] +MapOfKeyUsages = Dict[String, ListOfUsage] +MapOfStringToList = Dict[String, ListOfString] + + +class Models(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfModel] + + +class PutGatewayResponseRequest(ServiceRequest): + restApiId: String + responseType: GatewayResponseType + statusCode: Optional[StatusCode] + responseParameters: Optional[MapOfStringToString] + responseTemplates: Optional[MapOfStringToString] + + +PutIntegrationRequest = TypedDict( + "PutIntegrationRequest", + { + "restApiId": String, + "resourceId": String, + "httpMethod": String, + "type": IntegrationType, + "integrationHttpMethod": Optional[String], + "uri": Optional[String], + "connectionType": Optional[ConnectionType], + "connectionId": Optional[String], + "credentials": Optional[String], + "requestParameters": Optional[MapOfStringToString], + "requestTemplates": Optional[MapOfStringToString], + "passthroughBehavior": Optional[String], + "cacheNamespace": Optional[String], + "cacheKeyParameters": Optional[ListOfString], + "contentHandling": Optional[ContentHandlingStrategy], + "timeoutInMillis": Optional[NullableInteger], + "tlsConfig": Optional[TlsConfig], + }, + total=False, +) + + +class PutIntegrationResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + selectionPattern: Optional[String] + responseParameters: Optional[MapOfStringToString] + responseTemplates: Optional[MapOfStringToString] + contentHandling: Optional[ContentHandlingStrategy] + + +class PutMethodRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + authorizationType: String + authorizerId: Optional[String] + apiKeyRequired: Optional[Boolean] + operationName: Optional[String] + requestParameters: Optional[MapOfStringToBoolean] + requestModels: Optional[MapOfStringToString] + requestValidatorId: Optional[String] + authorizationScopes: Optional[ListOfString] + + +class PutMethodResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + responseParameters: Optional[MapOfStringToBoolean] + responseModels: Optional[MapOfStringToString] + + +class PutRestApiRequest(ServiceRequest): + body: IO[Blob] + restApiId: String + mode: Optional[PutMode] + failOnWarnings: Optional[Boolean] + parameters: Optional[MapOfStringToString] + + +class RejectDomainNameAccessAssociationRequest(ServiceRequest): + domainNameAccessAssociationArn: String + domainNameArn: String + + +class RequestValidators(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfRequestValidator] + + +class Resources(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfResource] + + +class RestApis(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfRestApi] + + +class SdkResponse(TypedDict, total=False): + body: Optional[Union[Blob, IO[Blob], Iterable[Blob]]] + contentType: Optional[String] + contentDisposition: Optional[String] + + +class SdkTypes(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfSdkType] + + +class Stages(TypedDict, total=False): + item: Optional[ListOfStage] + + +class TagResourceRequest(ServiceRequest): + resourceArn: String + tags: MapOfStringToString + + +class Tags(TypedDict, total=False): + tags: Optional[MapOfStringToString] + + +class Template(TypedDict, total=False): + value: Optional[String] + + +class TestInvokeAuthorizerRequest(ServiceRequest): + restApiId: String + authorizerId: String + headers: Optional[MapOfStringToString] + multiValueHeaders: Optional[MapOfStringToList] + pathWithQueryString: Optional[String] + body: Optional[String] + stageVariables: Optional[MapOfStringToString] + additionalContext: Optional[MapOfStringToString] + + +class TestInvokeAuthorizerResponse(TypedDict, total=False): + clientStatus: Optional[Integer] + log: Optional[String] + latency: Optional[Long] + principalId: Optional[String] + policy: Optional[String] + authorization: Optional[MapOfStringToList] + claims: Optional[MapOfStringToString] + + +class TestInvokeMethodRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + pathWithQueryString: Optional[String] + body: Optional[String] + headers: Optional[MapOfStringToString] + multiValueHeaders: Optional[MapOfStringToList] + clientCertificateId: Optional[String] + stageVariables: Optional[MapOfStringToString] + + +class TestInvokeMethodResponse(TypedDict, total=False): + status: Optional[Integer] + body: Optional[String] + headers: Optional[MapOfStringToString] + multiValueHeaders: Optional[MapOfStringToList] + log: Optional[String] + latency: Optional[Long] + + +class UntagResourceRequest(ServiceRequest): + resourceArn: String + tagKeys: ListOfString + + +class UpdateAccountRequest(ServiceRequest): + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateApiKeyRequest(ServiceRequest): + apiKey: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateAuthorizerRequest(ServiceRequest): + restApiId: String + authorizerId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateBasePathMappingRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + basePath: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateClientCertificateRequest(ServiceRequest): + clientCertificateId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateDeploymentRequest(ServiceRequest): + restApiId: String + deploymentId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateDocumentationPartRequest(ServiceRequest): + restApiId: String + documentationPartId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateDocumentationVersionRequest(ServiceRequest): + restApiId: String + documentationVersion: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateDomainNameRequest(ServiceRequest): + domainName: String + domainNameId: Optional[String] + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateGatewayResponseRequest(ServiceRequest): + restApiId: String + responseType: GatewayResponseType + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateIntegrationRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateIntegrationResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateMethodRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateMethodResponseRequest(ServiceRequest): + restApiId: String + resourceId: String + httpMethod: String + statusCode: StatusCode + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateModelRequest(ServiceRequest): + restApiId: String + modelName: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateRequestValidatorRequest(ServiceRequest): + restApiId: String + requestValidatorId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateResourceRequest(ServiceRequest): + restApiId: String + resourceId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateRestApiRequest(ServiceRequest): + restApiId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateStageRequest(ServiceRequest): + restApiId: String + stageName: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateUsagePlanRequest(ServiceRequest): + usagePlanId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateUsageRequest(ServiceRequest): + usagePlanId: String + keyId: String + patchOperations: Optional[ListOfPatchOperation] + + +class UpdateVpcLinkRequest(ServiceRequest): + vpcLinkId: String + patchOperations: Optional[ListOfPatchOperation] + + +class Usage(TypedDict, total=False): + usagePlanId: Optional[String] + startDate: Optional[String] + endDate: Optional[String] + position: Optional[String] + items: Optional[MapOfKeyUsages] + + +class UsagePlanKeys(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfUsagePlanKey] + + +class UsagePlans(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfUsagePlan] + + +class VpcLinks(TypedDict, total=False): + position: Optional[String] + items: Optional[ListOfVpcLink] + + +class ApigatewayApi: + service = "apigateway" + version = "2015-07-09" + + @handler("CreateApiKey") + def create_api_key( + self, + context: RequestContext, + name: String | None = None, + description: String | None = None, + enabled: Boolean | None = None, + generate_distinct_id: Boolean | None = None, + value: String | None = None, + stage_keys: ListOfStageKeys | None = None, + customer_id: String | None = None, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> ApiKey: + raise NotImplementedError + + @handler("CreateAuthorizer", expand=False) + def create_authorizer( + self, context: RequestContext, request: CreateAuthorizerRequest, **kwargs + ) -> Authorizer: + raise NotImplementedError + + @handler("CreateBasePathMapping") + def create_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + rest_api_id: String, + domain_name_id: String | None = None, + base_path: String | None = None, + stage: String | None = None, + **kwargs, + ) -> BasePathMapping: + raise NotImplementedError + + @handler("CreateDeployment") + def create_deployment( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String | None = None, + stage_description: String | None = None, + description: String | None = None, + cache_cluster_enabled: NullableBoolean | None = None, + cache_cluster_size: CacheClusterSize | None = None, + variables: MapOfStringToString | None = None, + canary_settings: DeploymentCanarySettings | None = None, + tracing_enabled: NullableBoolean | None = None, + **kwargs, + ) -> Deployment: + raise NotImplementedError + + @handler("CreateDocumentationPart") + def create_documentation_part( + self, + context: RequestContext, + rest_api_id: String, + location: DocumentationPartLocation, + properties: String, + **kwargs, + ) -> DocumentationPart: + raise NotImplementedError + + @handler("CreateDocumentationVersion") + def create_documentation_version( + self, + context: RequestContext, + rest_api_id: String, + documentation_version: String, + stage_name: String | None = None, + description: String | None = None, + **kwargs, + ) -> DocumentationVersion: + raise NotImplementedError + + @handler("CreateDomainName") + def create_domain_name( + self, + context: RequestContext, + domain_name: String, + certificate_name: String | None = None, + certificate_body: String | None = None, + certificate_private_key: String | None = None, + certificate_chain: String | None = None, + certificate_arn: String | None = None, + regional_certificate_name: String | None = None, + regional_certificate_arn: String | None = None, + endpoint_configuration: EndpointConfiguration | None = None, + tags: MapOfStringToString | None = None, + security_policy: SecurityPolicy | None = None, + mutual_tls_authentication: MutualTlsAuthenticationInput | None = None, + ownership_verification_certificate_arn: String | None = None, + policy: String | None = None, + **kwargs, + ) -> DomainName: + raise NotImplementedError + + @handler("CreateDomainNameAccessAssociation") + def create_domain_name_access_association( + self, + context: RequestContext, + domain_name_arn: String, + access_association_source_type: AccessAssociationSourceType, + access_association_source: String, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> DomainNameAccessAssociation: + raise NotImplementedError + + @handler("CreateModel") + def create_model( + self, + context: RequestContext, + rest_api_id: String, + name: String, + content_type: String, + description: String | None = None, + schema: String | None = None, + **kwargs, + ) -> Model: + raise NotImplementedError + + @handler("CreateRequestValidator") + def create_request_validator( + self, + context: RequestContext, + rest_api_id: String, + name: String | None = None, + validate_request_body: Boolean | None = None, + validate_request_parameters: Boolean | None = None, + **kwargs, + ) -> RequestValidator: + raise NotImplementedError + + @handler("CreateResource") + def create_resource( + self, + context: RequestContext, + rest_api_id: String, + parent_id: String, + path_part: String, + **kwargs, + ) -> Resource: + raise NotImplementedError + + @handler("CreateRestApi") + def create_rest_api( + self, + context: RequestContext, + name: String, + description: String | None = None, + version: String | None = None, + clone_from: String | None = None, + binary_media_types: ListOfString | None = None, + minimum_compression_size: NullableInteger | None = None, + api_key_source: ApiKeySourceType | None = None, + endpoint_configuration: EndpointConfiguration | None = None, + policy: String | None = None, + tags: MapOfStringToString | None = None, + disable_execute_api_endpoint: Boolean | None = None, + **kwargs, + ) -> RestApi: + raise NotImplementedError + + @handler("CreateStage") + def create_stage( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + deployment_id: String, + description: String | None = None, + cache_cluster_enabled: Boolean | None = None, + cache_cluster_size: CacheClusterSize | None = None, + variables: MapOfStringToString | None = None, + documentation_version: String | None = None, + canary_settings: CanarySettings | None = None, + tracing_enabled: Boolean | None = None, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> Stage: + raise NotImplementedError + + @handler("CreateUsagePlan") + def create_usage_plan( + self, + context: RequestContext, + name: String, + description: String | None = None, + api_stages: ListOfApiStage | None = None, + throttle: ThrottleSettings | None = None, + quota: QuotaSettings | None = None, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> UsagePlan: + raise NotImplementedError + + @handler("CreateUsagePlanKey") + def create_usage_plan_key( + self, + context: RequestContext, + usage_plan_id: String, + key_id: String, + key_type: String, + **kwargs, + ) -> UsagePlanKey: + raise NotImplementedError + + @handler("CreateVpcLink") + def create_vpc_link( + self, + context: RequestContext, + name: String, + target_arns: ListOfString, + description: String | None = None, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> VpcLink: + raise NotImplementedError + + @handler("DeleteApiKey") + def delete_api_key(self, context: RequestContext, api_key: String, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteAuthorizer") + def delete_authorizer( + self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBasePathMapping") + def delete_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteClientCertificate") + def delete_client_certificate( + self, context: RequestContext, client_certificate_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDeployment") + def delete_deployment( + self, context: RequestContext, rest_api_id: String, deployment_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDocumentationPart") + def delete_documentation_part( + self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDocumentationVersion") + def delete_documentation_version( + self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDomainName") + def delete_domain_name( + self, + context: RequestContext, + domain_name: String, + domain_name_id: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteDomainNameAccessAssociation") + def delete_domain_name_access_association( + self, context: RequestContext, domain_name_access_association_arn: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteGatewayResponse") + def delete_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteIntegration") + def delete_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteIntegrationResponse") + def delete_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteMethod") + def delete_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteMethodResponse") + def delete_method_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteModel") + def delete_model( + self, context: RequestContext, rest_api_id: String, model_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteRequestValidator") + def delete_request_validator( + self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteResource") + def delete_resource( + self, context: RequestContext, rest_api_id: String, resource_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteRestApi") + def delete_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteStage") + def delete_stage( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteUsagePlan") + def delete_usage_plan(self, context: RequestContext, usage_plan_id: String, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteUsagePlanKey") + def delete_usage_plan_key( + self, context: RequestContext, usage_plan_id: String, key_id: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteVpcLink") + def delete_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> None: + raise NotImplementedError + + @handler("FlushStageAuthorizersCache") + def flush_stage_authorizers_cache( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("FlushStageCache") + def flush_stage_cache( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("GenerateClientCertificate") + def generate_client_certificate( + self, + context: RequestContext, + description: String | None = None, + tags: MapOfStringToString | None = None, + **kwargs, + ) -> ClientCertificate: + raise NotImplementedError + + @handler("GetAccount") + def get_account(self, context: RequestContext, **kwargs) -> Account: + raise NotImplementedError + + @handler("GetApiKey") + def get_api_key( + self, + context: RequestContext, + api_key: String, + include_value: NullableBoolean | None = None, + **kwargs, + ) -> ApiKey: + raise NotImplementedError + + @handler("GetApiKeys") + def get_api_keys( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + name_query: String | None = None, + customer_id: String | None = None, + include_values: NullableBoolean | None = None, + **kwargs, + ) -> ApiKeys: + raise NotImplementedError + + @handler("GetAuthorizer") + def get_authorizer( + self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs + ) -> Authorizer: + raise NotImplementedError + + @handler("GetAuthorizers") + def get_authorizers( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> Authorizers: + raise NotImplementedError + + @handler("GetBasePathMapping") + def get_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String | None = None, + **kwargs, + ) -> BasePathMapping: + raise NotImplementedError + + @handler("GetBasePathMappings") + def get_base_path_mappings( + self, + context: RequestContext, + domain_name: String, + domain_name_id: String | None = None, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> BasePathMappings: + raise NotImplementedError + + @handler("GetClientCertificate") + def get_client_certificate( + self, context: RequestContext, client_certificate_id: String, **kwargs + ) -> ClientCertificate: + raise NotImplementedError + + @handler("GetClientCertificates") + def get_client_certificates( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> ClientCertificates: + raise NotImplementedError + + @handler("GetDeployment") + def get_deployment( + self, + context: RequestContext, + rest_api_id: String, + deployment_id: String, + embed: ListOfString | None = None, + **kwargs, + ) -> Deployment: + raise NotImplementedError + + @handler("GetDeployments") + def get_deployments( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> Deployments: + raise NotImplementedError + + @handler("GetDocumentationPart") + def get_documentation_part( + self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs + ) -> DocumentationPart: + raise NotImplementedError + + @handler("GetDocumentationParts", expand=False) + def get_documentation_parts( + self, context: RequestContext, request: GetDocumentationPartsRequest, **kwargs + ) -> DocumentationParts: + raise NotImplementedError + + @handler("GetDocumentationVersion") + def get_documentation_version( + self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs + ) -> DocumentationVersion: + raise NotImplementedError + + @handler("GetDocumentationVersions") + def get_documentation_versions( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> DocumentationVersions: + raise NotImplementedError + + @handler("GetDomainName") + def get_domain_name( + self, + context: RequestContext, + domain_name: String, + domain_name_id: String | None = None, + **kwargs, + ) -> DomainName: + raise NotImplementedError + + @handler("GetDomainNameAccessAssociations") + def get_domain_name_access_associations( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + resource_owner: ResourceOwner | None = None, + **kwargs, + ) -> DomainNameAccessAssociations: + raise NotImplementedError + + @handler("GetDomainNames") + def get_domain_names( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + resource_owner: ResourceOwner | None = None, + **kwargs, + ) -> DomainNames: + raise NotImplementedError + + @handler("GetExport") + def get_export( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + export_type: String, + parameters: MapOfStringToString | None = None, + accepts: String | None = None, + **kwargs, + ) -> ExportResponse: + raise NotImplementedError + + @handler("GetGatewayResponse") + def get_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + **kwargs, + ) -> GatewayResponse: + raise NotImplementedError + + @handler("GetGatewayResponses") + def get_gateway_responses( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> GatewayResponses: + raise NotImplementedError + + @handler("GetIntegration") + def get_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> Integration: + raise NotImplementedError + + @handler("GetIntegrationResponse") + def get_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> IntegrationResponse: + raise NotImplementedError + + @handler("GetMethod") + def get_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> Method: + raise NotImplementedError + + @handler("GetMethodResponse") + def get_method_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> MethodResponse: + raise NotImplementedError + + @handler("GetModel") + def get_model( + self, + context: RequestContext, + rest_api_id: String, + model_name: String, + flatten: Boolean | None = None, + **kwargs, + ) -> Model: + raise NotImplementedError + + @handler("GetModelTemplate") + def get_model_template( + self, context: RequestContext, rest_api_id: String, model_name: String, **kwargs + ) -> Template: + raise NotImplementedError + + @handler("GetModels") + def get_models( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> Models: + raise NotImplementedError + + @handler("GetRequestValidator") + def get_request_validator( + self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs + ) -> RequestValidator: + raise NotImplementedError + + @handler("GetRequestValidators") + def get_request_validators( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> RequestValidators: + raise NotImplementedError + + @handler("GetResource") + def get_resource( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + embed: ListOfString | None = None, + **kwargs, + ) -> Resource: + raise NotImplementedError + + @handler("GetResources") + def get_resources( + self, + context: RequestContext, + rest_api_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + embed: ListOfString | None = None, + **kwargs, + ) -> Resources: + raise NotImplementedError + + @handler("GetRestApi") + def get_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> RestApi: + raise NotImplementedError + + @handler("GetRestApis") + def get_rest_apis( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> RestApis: + raise NotImplementedError + + @handler("GetSdk") + def get_sdk( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + sdk_type: String, + parameters: MapOfStringToString | None = None, + **kwargs, + ) -> SdkResponse: + raise NotImplementedError + + @handler("GetSdkType") + def get_sdk_type(self, context: RequestContext, id: String, **kwargs) -> SdkType: + raise NotImplementedError + + @handler("GetSdkTypes") + def get_sdk_types( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> SdkTypes: + raise NotImplementedError + + @handler("GetStage") + def get_stage( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> Stage: + raise NotImplementedError + + @handler("GetStages") + def get_stages( + self, + context: RequestContext, + rest_api_id: String, + deployment_id: String | None = None, + **kwargs, + ) -> Stages: + raise NotImplementedError + + @handler("GetTags") + def get_tags( + self, + context: RequestContext, + resource_arn: String, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> Tags: + raise NotImplementedError + + @handler("GetUsage") + def get_usage( + self, + context: RequestContext, + usage_plan_id: String, + start_date: String, + end_date: String, + key_id: String | None = None, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> Usage: + raise NotImplementedError + + @handler("GetUsagePlan") + def get_usage_plan(self, context: RequestContext, usage_plan_id: String, **kwargs) -> UsagePlan: + raise NotImplementedError + + @handler("GetUsagePlanKey") + def get_usage_plan_key( + self, context: RequestContext, usage_plan_id: String, key_id: String, **kwargs + ) -> UsagePlanKey: + raise NotImplementedError + + @handler("GetUsagePlanKeys") + def get_usage_plan_keys( + self, + context: RequestContext, + usage_plan_id: String, + position: String | None = None, + limit: NullableInteger | None = None, + name_query: String | None = None, + **kwargs, + ) -> UsagePlanKeys: + raise NotImplementedError + + @handler("GetUsagePlans") + def get_usage_plans( + self, + context: RequestContext, + position: String | None = None, + key_id: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> UsagePlans: + raise NotImplementedError + + @handler("GetVpcLink") + def get_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> VpcLink: + raise NotImplementedError + + @handler("GetVpcLinks") + def get_vpc_links( + self, + context: RequestContext, + position: String | None = None, + limit: NullableInteger | None = None, + **kwargs, + ) -> VpcLinks: + raise NotImplementedError + + @handler("ImportApiKeys") + def import_api_keys( + self, + context: RequestContext, + body: IO[Blob], + format: ApiKeysFormat, + fail_on_warnings: Boolean | None = None, + **kwargs, + ) -> ApiKeyIds: + raise NotImplementedError + + @handler("ImportDocumentationParts") + def import_documentation_parts( + self, + context: RequestContext, + rest_api_id: String, + body: IO[Blob], + mode: PutMode | None = None, + fail_on_warnings: Boolean | None = None, + **kwargs, + ) -> DocumentationPartIds: + raise NotImplementedError + + @handler("ImportRestApi") + def import_rest_api( + self, + context: RequestContext, + body: IO[Blob], + fail_on_warnings: Boolean | None = None, + parameters: MapOfStringToString | None = None, + **kwargs, + ) -> RestApi: + raise NotImplementedError + + @handler("PutGatewayResponse") + def put_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + status_code: StatusCode | None = None, + response_parameters: MapOfStringToString | None = None, + response_templates: MapOfStringToString | None = None, + **kwargs, + ) -> GatewayResponse: + raise NotImplementedError + + @handler("PutIntegration", expand=False) + def put_integration( + self, context: RequestContext, request: PutIntegrationRequest, **kwargs + ) -> Integration: + raise NotImplementedError + + @handler("PutIntegrationResponse") + def put_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + selection_pattern: String | None = None, + response_parameters: MapOfStringToString | None = None, + response_templates: MapOfStringToString | None = None, + content_handling: ContentHandlingStrategy | None = None, + **kwargs, + ) -> IntegrationResponse: + raise NotImplementedError + + @handler("PutMethod") + def put_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + authorization_type: String, + authorizer_id: String | None = None, + api_key_required: Boolean | None = None, + operation_name: String | None = None, + request_parameters: MapOfStringToBoolean | None = None, + request_models: MapOfStringToString | None = None, + request_validator_id: String | None = None, + authorization_scopes: ListOfString | None = None, + **kwargs, + ) -> Method: + raise NotImplementedError + + @handler("PutMethodResponse") + def put_method_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + response_parameters: MapOfStringToBoolean | None = None, + response_models: MapOfStringToString | None = None, + **kwargs, + ) -> MethodResponse: + raise NotImplementedError + + @handler("PutRestApi") + def put_rest_api( + self, + context: RequestContext, + rest_api_id: String, + body: IO[Blob], + mode: PutMode | None = None, + fail_on_warnings: Boolean | None = None, + parameters: MapOfStringToString | None = None, + **kwargs, + ) -> RestApi: + raise NotImplementedError + + @handler("RejectDomainNameAccessAssociation") + def reject_domain_name_access_association( + self, + context: RequestContext, + domain_name_access_association_arn: String, + domain_name_arn: String, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: String, tags: MapOfStringToString, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TestInvokeAuthorizer") + def test_invoke_authorizer( + self, + context: RequestContext, + rest_api_id: String, + authorizer_id: String, + headers: MapOfStringToString | None = None, + multi_value_headers: MapOfStringToList | None = None, + path_with_query_string: String | None = None, + body: String | None = None, + stage_variables: MapOfStringToString | None = None, + additional_context: MapOfStringToString | None = None, + **kwargs, + ) -> TestInvokeAuthorizerResponse: + raise NotImplementedError + + @handler("TestInvokeMethod") + def test_invoke_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + path_with_query_string: String | None = None, + body: String | None = None, + headers: MapOfStringToString | None = None, + multi_value_headers: MapOfStringToList | None = None, + client_certificate_id: String | None = None, + stage_variables: MapOfStringToString | None = None, + **kwargs, + ) -> TestInvokeMethodResponse: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: String, tag_keys: ListOfString, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateAccount") + def update_account( + self, + context: RequestContext, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Account: + raise NotImplementedError + + @handler("UpdateApiKey") + def update_api_key( + self, + context: RequestContext, + api_key: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> ApiKey: + raise NotImplementedError + + @handler("UpdateAuthorizer") + def update_authorizer( + self, + context: RequestContext, + rest_api_id: String, + authorizer_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Authorizer: + raise NotImplementedError + + @handler("UpdateBasePathMapping") + def update_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String | None = None, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> BasePathMapping: + raise NotImplementedError + + @handler("UpdateClientCertificate") + def update_client_certificate( + self, + context: RequestContext, + client_certificate_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> ClientCertificate: + raise NotImplementedError + + @handler("UpdateDeployment") + def update_deployment( + self, + context: RequestContext, + rest_api_id: String, + deployment_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Deployment: + raise NotImplementedError + + @handler("UpdateDocumentationPart") + def update_documentation_part( + self, + context: RequestContext, + rest_api_id: String, + documentation_part_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> DocumentationPart: + raise NotImplementedError + + @handler("UpdateDocumentationVersion") + def update_documentation_version( + self, + context: RequestContext, + rest_api_id: String, + documentation_version: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> DocumentationVersion: + raise NotImplementedError + + @handler("UpdateDomainName") + def update_domain_name( + self, + context: RequestContext, + domain_name: String, + domain_name_id: String | None = None, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> DomainName: + raise NotImplementedError + + @handler("UpdateGatewayResponse") + def update_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> GatewayResponse: + raise NotImplementedError + + @handler("UpdateIntegration") + def update_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Integration: + raise NotImplementedError + + @handler("UpdateIntegrationResponse") + def update_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> IntegrationResponse: + raise NotImplementedError + + @handler("UpdateMethod") + def update_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Method: + raise NotImplementedError + + @handler("UpdateMethodResponse") + def update_method_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> MethodResponse: + raise NotImplementedError + + @handler("UpdateModel") + def update_model( + self, + context: RequestContext, + rest_api_id: String, + model_name: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Model: + raise NotImplementedError + + @handler("UpdateRequestValidator") + def update_request_validator( + self, + context: RequestContext, + rest_api_id: String, + request_validator_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> RequestValidator: + raise NotImplementedError + + @handler("UpdateResource") + def update_resource( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Resource: + raise NotImplementedError + + @handler("UpdateRestApi") + def update_rest_api( + self, + context: RequestContext, + rest_api_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> RestApi: + raise NotImplementedError + + @handler("UpdateStage") + def update_stage( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Stage: + raise NotImplementedError + + @handler("UpdateUsage") + def update_usage( + self, + context: RequestContext, + usage_plan_id: String, + key_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> Usage: + raise NotImplementedError + + @handler("UpdateUsagePlan") + def update_usage_plan( + self, + context: RequestContext, + usage_plan_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> UsagePlan: + raise NotImplementedError + + @handler("UpdateVpcLink") + def update_vpc_link( + self, + context: RequestContext, + vpc_link_id: String, + patch_operations: ListOfPatchOperation | None = None, + **kwargs, + ) -> VpcLink: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/cloudcontrol/__init__.py b/localstack-core/localstack/aws/api/cloudcontrol/__init__.py new file mode 100644 index 0000000000000..7420a35c50e8c --- /dev/null +++ b/localstack-core/localstack/aws/api/cloudcontrol/__init__.py @@ -0,0 +1,420 @@ +from datetime import datetime +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ClientToken = str +ErrorMessage = str +HandlerNextToken = str +HookFailureMode = str +HookInvocationPoint = str +HookStatus = str +HookTypeArn = str +Identifier = str +MaxResults = int +NextToken = str +PatchDocument = str +Properties = str +RequestToken = str +RoleArn = str +StatusMessage = str +TypeName = str +TypeVersionId = str + + +class HandlerErrorCode(StrEnum): + NotUpdatable = "NotUpdatable" + InvalidRequest = "InvalidRequest" + AccessDenied = "AccessDenied" + UnauthorizedTaggingOperation = "UnauthorizedTaggingOperation" + InvalidCredentials = "InvalidCredentials" + AlreadyExists = "AlreadyExists" + NotFound = "NotFound" + ResourceConflict = "ResourceConflict" + Throttling = "Throttling" + ServiceLimitExceeded = "ServiceLimitExceeded" + NotStabilized = "NotStabilized" + GeneralServiceException = "GeneralServiceException" + ServiceInternalError = "ServiceInternalError" + ServiceTimeout = "ServiceTimeout" + NetworkFailure = "NetworkFailure" + InternalFailure = "InternalFailure" + + +class Operation(StrEnum): + CREATE = "CREATE" + DELETE = "DELETE" + UPDATE = "UPDATE" + + +class OperationStatus(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + CANCEL_IN_PROGRESS = "CANCEL_IN_PROGRESS" + CANCEL_COMPLETE = "CANCEL_COMPLETE" + + +class AlreadyExistsException(ServiceException): + code: str = "AlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class ClientTokenConflictException(ServiceException): + code: str = "ClientTokenConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModificationException" + sender_fault: bool = False + status_code: int = 400 + + +class ConcurrentOperationException(ServiceException): + code: str = "ConcurrentOperationException" + sender_fault: bool = False + status_code: int = 400 + + +class GeneralServiceException(ServiceException): + code: str = "GeneralServiceException" + sender_fault: bool = False + status_code: int = 400 + + +class HandlerFailureException(ServiceException): + code: str = "HandlerFailureException" + sender_fault: bool = False + status_code: int = 400 + + +class HandlerInternalFailureException(ServiceException): + code: str = "HandlerInternalFailureException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidCredentialsException(ServiceException): + code: str = "InvalidCredentialsException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRequestException(ServiceException): + code: str = "InvalidRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class NetworkFailureException(ServiceException): + code: str = "NetworkFailureException" + sender_fault: bool = False + status_code: int = 400 + + +class NotStabilizedException(ServiceException): + code: str = "NotStabilizedException" + sender_fault: bool = False + status_code: int = 400 + + +class NotUpdatableException(ServiceException): + code: str = "NotUpdatableException" + sender_fault: bool = False + status_code: int = 400 + + +class PrivateTypeException(ServiceException): + code: str = "PrivateTypeException" + sender_fault: bool = False + status_code: int = 400 + + +class RequestTokenNotFoundException(ServiceException): + code: str = "RequestTokenNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceConflictException(ServiceException): + code: str = "ResourceConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceInternalErrorException(ServiceException): + code: str = "ServiceInternalErrorException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceLimitExceededException(ServiceException): + code: str = "ServiceLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class TypeNotFoundException(ServiceException): + code: str = "TypeNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedActionException(ServiceException): + code: str = "UnsupportedActionException" + sender_fault: bool = False + status_code: int = 400 + + +class CancelResourceRequestInput(ServiceRequest): + RequestToken: RequestToken + + +Timestamp = datetime + + +class ProgressEvent(TypedDict, total=False): + TypeName: Optional[TypeName] + Identifier: Optional[Identifier] + RequestToken: Optional[RequestToken] + HooksRequestToken: Optional[RequestToken] + Operation: Optional[Operation] + OperationStatus: Optional[OperationStatus] + EventTime: Optional[Timestamp] + ResourceModel: Optional[Properties] + StatusMessage: Optional[StatusMessage] + ErrorCode: Optional[HandlerErrorCode] + RetryAfter: Optional[Timestamp] + + +class CancelResourceRequestOutput(TypedDict, total=False): + ProgressEvent: Optional[ProgressEvent] + + +class CreateResourceInput(ServiceRequest): + TypeName: TypeName + TypeVersionId: Optional[TypeVersionId] + RoleArn: Optional[RoleArn] + ClientToken: Optional[ClientToken] + DesiredState: Properties + + +class CreateResourceOutput(TypedDict, total=False): + ProgressEvent: Optional[ProgressEvent] + + +class DeleteResourceInput(ServiceRequest): + TypeName: TypeName + TypeVersionId: Optional[TypeVersionId] + RoleArn: Optional[RoleArn] + ClientToken: Optional[ClientToken] + Identifier: Identifier + + +class DeleteResourceOutput(TypedDict, total=False): + ProgressEvent: Optional[ProgressEvent] + + +class GetResourceInput(ServiceRequest): + TypeName: TypeName + TypeVersionId: Optional[TypeVersionId] + RoleArn: Optional[RoleArn] + Identifier: Identifier + + +class ResourceDescription(TypedDict, total=False): + Identifier: Optional[Identifier] + Properties: Optional[Properties] + + +class GetResourceOutput(TypedDict, total=False): + TypeName: Optional[TypeName] + ResourceDescription: Optional[ResourceDescription] + + +class GetResourceRequestStatusInput(ServiceRequest): + RequestToken: RequestToken + + +class HookProgressEvent(TypedDict, total=False): + HookTypeName: Optional[TypeName] + HookTypeVersionId: Optional[TypeVersionId] + HookTypeArn: Optional[HookTypeArn] + InvocationPoint: Optional[HookInvocationPoint] + HookStatus: Optional[HookStatus] + HookEventTime: Optional[Timestamp] + HookStatusMessage: Optional[StatusMessage] + FailureMode: Optional[HookFailureMode] + + +HooksProgressEvent = List[HookProgressEvent] + + +class GetResourceRequestStatusOutput(TypedDict, total=False): + ProgressEvent: Optional[ProgressEvent] + HooksProgressEvent: Optional[HooksProgressEvent] + + +OperationStatuses = List[OperationStatus] +Operations = List[Operation] + + +class ResourceRequestStatusFilter(TypedDict, total=False): + Operations: Optional[Operations] + OperationStatuses: Optional[OperationStatuses] + + +class ListResourceRequestsInput(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + ResourceRequestStatusFilter: Optional[ResourceRequestStatusFilter] + + +ResourceRequestStatusSummaries = List[ProgressEvent] + + +class ListResourceRequestsOutput(TypedDict, total=False): + ResourceRequestStatusSummaries: Optional[ResourceRequestStatusSummaries] + NextToken: Optional[NextToken] + + +class ListResourcesInput(ServiceRequest): + TypeName: TypeName + TypeVersionId: Optional[TypeVersionId] + RoleArn: Optional[RoleArn] + NextToken: Optional[HandlerNextToken] + MaxResults: Optional[MaxResults] + ResourceModel: Optional[Properties] + + +ResourceDescriptions = List[ResourceDescription] + + +class ListResourcesOutput(TypedDict, total=False): + TypeName: Optional[TypeName] + ResourceDescriptions: Optional[ResourceDescriptions] + NextToken: Optional[HandlerNextToken] + + +class UpdateResourceInput(ServiceRequest): + TypeName: TypeName + TypeVersionId: Optional[TypeVersionId] + RoleArn: Optional[RoleArn] + ClientToken: Optional[ClientToken] + Identifier: Identifier + PatchDocument: PatchDocument + + +class UpdateResourceOutput(TypedDict, total=False): + ProgressEvent: Optional[ProgressEvent] + + +class CloudcontrolApi: + service = "cloudcontrol" + version = "2021-09-30" + + @handler("CancelResourceRequest") + def cancel_resource_request( + self, context: RequestContext, request_token: RequestToken, **kwargs + ) -> CancelResourceRequestOutput: + raise NotImplementedError + + @handler("CreateResource") + def create_resource( + self, + context: RequestContext, + type_name: TypeName, + desired_state: Properties, + type_version_id: TypeVersionId | None = None, + role_arn: RoleArn | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> CreateResourceOutput: + raise NotImplementedError + + @handler("DeleteResource") + def delete_resource( + self, + context: RequestContext, + type_name: TypeName, + identifier: Identifier, + type_version_id: TypeVersionId | None = None, + role_arn: RoleArn | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> DeleteResourceOutput: + raise NotImplementedError + + @handler("GetResource") + def get_resource( + self, + context: RequestContext, + type_name: TypeName, + identifier: Identifier, + type_version_id: TypeVersionId | None = None, + role_arn: RoleArn | None = None, + **kwargs, + ) -> GetResourceOutput: + raise NotImplementedError + + @handler("GetResourceRequestStatus") + def get_resource_request_status( + self, context: RequestContext, request_token: RequestToken, **kwargs + ) -> GetResourceRequestStatusOutput: + raise NotImplementedError + + @handler("ListResourceRequests") + def list_resource_requests( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + resource_request_status_filter: ResourceRequestStatusFilter | None = None, + **kwargs, + ) -> ListResourceRequestsOutput: + raise NotImplementedError + + @handler("ListResources") + def list_resources( + self, + context: RequestContext, + type_name: TypeName, + type_version_id: TypeVersionId | None = None, + role_arn: RoleArn | None = None, + next_token: HandlerNextToken | None = None, + max_results: MaxResults | None = None, + resource_model: Properties | None = None, + **kwargs, + ) -> ListResourcesOutput: + raise NotImplementedError + + @handler("UpdateResource") + def update_resource( + self, + context: RequestContext, + type_name: TypeName, + identifier: Identifier, + patch_document: PatchDocument, + type_version_id: TypeVersionId | None = None, + role_arn: RoleArn | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> UpdateResourceOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/cloudformation/__init__.py b/localstack-core/localstack/aws/api/cloudformation/__init__.py new file mode 100644 index 0000000000000..c0621eca7d581 --- /dev/null +++ b/localstack-core/localstack/aws/api/cloudformation/__init__.py @@ -0,0 +1,3863 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AcceptTermsAndConditions = bool +Account = str +AccountGateStatusReason = str +AccountsUrl = str +AfterContext = str +AfterValue = str +AllowedValue = str +Arn = str +AutoDeploymentNullable = bool +AutoUpdate = bool +BeforeContext = str +BeforeValue = str +BoxedInteger = int +BoxedMaxResults = int +CapabilitiesReason = str +CausingEntity = str +ChangeSetId = str +ChangeSetName = str +ChangeSetNameOrId = str +ChangeSetStatusReason = str +ClientRequestToken = str +ClientToken = str +ConfigurationSchema = str +ConnectionArn = str +Description = str +DetectionReason = str +DisableRollback = bool +DriftedStackInstancesCount = int +EnableStackCreation = bool +EnableTerminationProtection = bool +ErrorCode = str +ErrorMessage = str +EventId = str +ExecutionRoleName = str +ExecutionStatusReason = str +ExportName = str +ExportValue = str +FailedStackInstancesCount = int +FailureToleranceCount = int +FailureTolerancePercentage = int +GeneratedTemplateId = str +GeneratedTemplateName = str +HookInvocationCount = int +HookResultId = str +HookStatusReason = str +HookTargetTypeName = str +HookType = str +HookTypeConfigurationVersionId = str +HookTypeName = str +HookTypeVersionId = str +ImportExistingResources = bool +InProgressStackInstancesCount = int +InSyncStackInstancesCount = int +IncludeNestedStacks = bool +IncludePropertyValues = bool +IsActivated = bool +IsDefaultConfiguration = bool +IsDefaultVersion = bool +JazzResourceIdentifierPropertyKey = str +JazzResourceIdentifierPropertyValue = str +Key = str +LimitName = str +LimitValue = int +LogGroupName = str +LogicalIdHierarchy = str +LogicalResourceId = str +ManagedByStack = bool +ManagedExecutionNullable = bool +MaxConcurrentCount = int +MaxConcurrentPercentage = int +MaxResults = int +Metadata = str +MonitoringTimeInMinutes = int +NextToken = str +NoEcho = bool +NotificationARN = str +NumberOfResources = int +OperationResultFilterValues = str +OptionalSecureUrl = str +OrganizationalUnitId = str +OutputKey = str +OutputValue = str +ParameterKey = str +ParameterType = str +ParameterValue = str +PercentageCompleted = float +PhysicalResourceId = str +PrivateTypeArn = str +Properties = str +PropertyDescription = str +PropertyName = str +PropertyPath = str +PropertyValue = str +PublicVersionNumber = str +PublisherId = str +PublisherName = str +PublisherProfile = str +Reason = str +RefreshAllResources = bool +Region = str +RegistrationToken = str +RequestToken = str +RequiredProperty = bool +ResourceIdentifier = str +ResourceIdentifierPropertyKey = str +ResourceIdentifierPropertyValue = str +ResourceModel = str +ResourceProperties = str +ResourcePropertyPath = str +ResourceScanId = str +ResourceScanStatusReason = str +ResourceScannerMaxResults = int +ResourceSignalUniqueId = str +ResourceStatusReason = str +ResourceToSkip = str +ResourceType = str +ResourceTypeFilter = str +ResourceTypePrefix = str +ResourcesFailed = int +ResourcesPending = int +ResourcesProcessing = int +ResourcesRead = int +ResourcesScanned = int +ResourcesSucceeded = int +RetainExceptOnCreate = bool +RetainStacks = bool +RetainStacksNullable = bool +RetainStacksOnAccountRemovalNullable = bool +RoleARN = str +RoleArn = str +S3Bucket = str +S3Url = str +StackDriftDetectionId = str +StackDriftDetectionStatusReason = str +StackId = str +StackIdsUrl = str +StackInstanceFilterValues = str +StackName = str +StackNameOrId = str +StackPolicyBody = str +StackPolicyDuringUpdateBody = str +StackPolicyDuringUpdateURL = str +StackPolicyURL = str +StackRefactorId = str +StackRefactorResourceIdentifier = str +StackRefactorStatusReason = str +StackSetARN = str +StackSetId = str +StackSetName = str +StackSetNameOrId = str +StackSetOperationStatusReason = str +StackStatusReason = str +StatusMessage = str +SupportedMajorVersion = int +TagKey = str +TagValue = str +TemplateBody = str +TemplateDescription = str +TemplateStatusReason = str +TemplateURL = str +ThirdPartyTypeArn = str +TimeoutMinutes = int +TotalStackInstancesCount = int +TotalWarnings = int +TransformName = str +TreatUnrecognizedResourceTypesAsWarnings = bool +Type = str +TypeArn = str +TypeConfiguration = str +TypeConfigurationAlias = str +TypeConfigurationArn = str +TypeHierarchy = str +TypeName = str +TypeNamePrefix = str +TypeSchema = str +TypeTestsStatusDescription = str +TypeVersionId = str +Url = str +UsePreviousTemplate = bool +UsePreviousValue = bool +Value = str +Version = str + + +class AccountFilterType(StrEnum): + NONE = "NONE" + INTERSECTION = "INTERSECTION" + DIFFERENCE = "DIFFERENCE" + UNION = "UNION" + + +class AccountGateStatus(StrEnum): + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + SKIPPED = "SKIPPED" + + +class AttributeChangeType(StrEnum): + Add = "Add" + Remove = "Remove" + Modify = "Modify" + + +class CallAs(StrEnum): + SELF = "SELF" + DELEGATED_ADMIN = "DELEGATED_ADMIN" + + +class Capability(StrEnum): + CAPABILITY_IAM = "CAPABILITY_IAM" + CAPABILITY_NAMED_IAM = "CAPABILITY_NAMED_IAM" + CAPABILITY_AUTO_EXPAND = "CAPABILITY_AUTO_EXPAND" + + +class Category(StrEnum): + REGISTERED = "REGISTERED" + ACTIVATED = "ACTIVATED" + THIRD_PARTY = "THIRD_PARTY" + AWS_TYPES = "AWS_TYPES" + + +class ChangeAction(StrEnum): + Add = "Add" + Modify = "Modify" + Remove = "Remove" + Import = "Import" + Dynamic = "Dynamic" + + +class ChangeSetHooksStatus(StrEnum): + PLANNING = "PLANNING" + PLANNED = "PLANNED" + UNAVAILABLE = "UNAVAILABLE" + + +class ChangeSetStatus(StrEnum): + CREATE_PENDING = "CREATE_PENDING" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_COMPLETE = "CREATE_COMPLETE" + DELETE_PENDING = "DELETE_PENDING" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + DELETE_COMPLETE = "DELETE_COMPLETE" + DELETE_FAILED = "DELETE_FAILED" + FAILED = "FAILED" + + +class ChangeSetType(StrEnum): + CREATE = "CREATE" + UPDATE = "UPDATE" + IMPORT = "IMPORT" + + +class ChangeSource(StrEnum): + ResourceReference = "ResourceReference" + ParameterReference = "ParameterReference" + ResourceAttribute = "ResourceAttribute" + DirectModification = "DirectModification" + Automatic = "Automatic" + + +class ChangeType(StrEnum): + Resource = "Resource" + + +class ConcurrencyMode(StrEnum): + STRICT_FAILURE_TOLERANCE = "STRICT_FAILURE_TOLERANCE" + SOFT_FAILURE_TOLERANCE = "SOFT_FAILURE_TOLERANCE" + + +class DeletionMode(StrEnum): + STANDARD = "STANDARD" + FORCE_DELETE_STACK = "FORCE_DELETE_STACK" + + +class DeprecatedStatus(StrEnum): + LIVE = "LIVE" + DEPRECATED = "DEPRECATED" + + +class DetailedStatus(StrEnum): + CONFIGURATION_COMPLETE = "CONFIGURATION_COMPLETE" + VALIDATION_FAILED = "VALIDATION_FAILED" + + +class DifferenceType(StrEnum): + ADD = "ADD" + REMOVE = "REMOVE" + NOT_EQUAL = "NOT_EQUAL" + + +class EvaluationType(StrEnum): + Static = "Static" + Dynamic = "Dynamic" + + +class ExecutionStatus(StrEnum): + UNAVAILABLE = "UNAVAILABLE" + AVAILABLE = "AVAILABLE" + EXECUTE_IN_PROGRESS = "EXECUTE_IN_PROGRESS" + EXECUTE_COMPLETE = "EXECUTE_COMPLETE" + EXECUTE_FAILED = "EXECUTE_FAILED" + OBSOLETE = "OBSOLETE" + + +class GeneratedTemplateDeletionPolicy(StrEnum): + DELETE = "DELETE" + RETAIN = "RETAIN" + + +class GeneratedTemplateResourceStatus(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETE = "COMPLETE" + + +class GeneratedTemplateStatus(StrEnum): + CREATE_PENDING = "CREATE_PENDING" + UPDATE_PENDING = "UPDATE_PENDING" + DELETE_PENDING = "DELETE_PENDING" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + FAILED = "FAILED" + COMPLETE = "COMPLETE" + + +class GeneratedTemplateUpdateReplacePolicy(StrEnum): + DELETE = "DELETE" + RETAIN = "RETAIN" + + +class HandlerErrorCode(StrEnum): + NotUpdatable = "NotUpdatable" + InvalidRequest = "InvalidRequest" + AccessDenied = "AccessDenied" + InvalidCredentials = "InvalidCredentials" + AlreadyExists = "AlreadyExists" + NotFound = "NotFound" + ResourceConflict = "ResourceConflict" + Throttling = "Throttling" + ServiceLimitExceeded = "ServiceLimitExceeded" + NotStabilized = "NotStabilized" + GeneralServiceException = "GeneralServiceException" + ServiceInternalError = "ServiceInternalError" + NetworkFailure = "NetworkFailure" + InternalFailure = "InternalFailure" + InvalidTypeConfiguration = "InvalidTypeConfiguration" + HandlerInternalFailure = "HandlerInternalFailure" + NonCompliant = "NonCompliant" + Unknown = "Unknown" + UnsupportedTarget = "UnsupportedTarget" + + +class HookFailureMode(StrEnum): + FAIL = "FAIL" + WARN = "WARN" + + +class HookInvocationPoint(StrEnum): + PRE_PROVISION = "PRE_PROVISION" + + +class HookStatus(StrEnum): + HOOK_IN_PROGRESS = "HOOK_IN_PROGRESS" + HOOK_COMPLETE_SUCCEEDED = "HOOK_COMPLETE_SUCCEEDED" + HOOK_COMPLETE_FAILED = "HOOK_COMPLETE_FAILED" + HOOK_FAILED = "HOOK_FAILED" + + +class HookTargetType(StrEnum): + RESOURCE = "RESOURCE" + + +class IdentityProvider(StrEnum): + AWS_Marketplace = "AWS_Marketplace" + GitHub = "GitHub" + Bitbucket = "Bitbucket" + + +class ListHookResultsTargetType(StrEnum): + CHANGE_SET = "CHANGE_SET" + STACK = "STACK" + RESOURCE = "RESOURCE" + CLOUD_CONTROL = "CLOUD_CONTROL" + + +class OnFailure(StrEnum): + DO_NOTHING = "DO_NOTHING" + ROLLBACK = "ROLLBACK" + DELETE = "DELETE" + + +class OnStackFailure(StrEnum): + DO_NOTHING = "DO_NOTHING" + ROLLBACK = "ROLLBACK" + DELETE = "DELETE" + + +class OperationResultFilterName(StrEnum): + OPERATION_RESULT_STATUS = "OPERATION_RESULT_STATUS" + + +class OperationStatus(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + + +class OrganizationStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + DISABLED_PERMANENTLY = "DISABLED_PERMANENTLY" + + +class PermissionModels(StrEnum): + SERVICE_MANAGED = "SERVICE_MANAGED" + SELF_MANAGED = "SELF_MANAGED" + + +class PolicyAction(StrEnum): + Delete = "Delete" + Retain = "Retain" + Snapshot = "Snapshot" + ReplaceAndDelete = "ReplaceAndDelete" + ReplaceAndRetain = "ReplaceAndRetain" + ReplaceAndSnapshot = "ReplaceAndSnapshot" + + +class ProvisioningType(StrEnum): + NON_PROVISIONABLE = "NON_PROVISIONABLE" + IMMUTABLE = "IMMUTABLE" + FULLY_MUTABLE = "FULLY_MUTABLE" + + +class PublisherStatus(StrEnum): + VERIFIED = "VERIFIED" + UNVERIFIED = "UNVERIFIED" + + +class RegionConcurrencyType(StrEnum): + SEQUENTIAL = "SEQUENTIAL" + PARALLEL = "PARALLEL" + + +class RegistrationStatus(StrEnum): + COMPLETE = "COMPLETE" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + + +class RegistryType(StrEnum): + RESOURCE = "RESOURCE" + MODULE = "MODULE" + HOOK = "HOOK" + + +class Replacement(StrEnum): + True_ = "True" + False_ = "False" + Conditional = "Conditional" + + +class RequiresRecreation(StrEnum): + Never = "Never" + Conditionally = "Conditionally" + Always = "Always" + + +class ResourceAttribute(StrEnum): + Properties = "Properties" + Metadata = "Metadata" + CreationPolicy = "CreationPolicy" + UpdatePolicy = "UpdatePolicy" + DeletionPolicy = "DeletionPolicy" + UpdateReplacePolicy = "UpdateReplacePolicy" + Tags = "Tags" + + +class ResourceScanStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETE = "COMPLETE" + EXPIRED = "EXPIRED" + + +class ResourceSignalStatus(StrEnum): + SUCCESS = "SUCCESS" + FAILURE = "FAILURE" + + +class ResourceStatus(StrEnum): + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + CREATE_COMPLETE = "CREATE_COMPLETE" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + DELETE_FAILED = "DELETE_FAILED" + DELETE_COMPLETE = "DELETE_COMPLETE" + DELETE_SKIPPED = "DELETE_SKIPPED" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_FAILED = "UPDATE_FAILED" + UPDATE_COMPLETE = "UPDATE_COMPLETE" + IMPORT_FAILED = "IMPORT_FAILED" + IMPORT_COMPLETE = "IMPORT_COMPLETE" + IMPORT_IN_PROGRESS = "IMPORT_IN_PROGRESS" + IMPORT_ROLLBACK_IN_PROGRESS = "IMPORT_ROLLBACK_IN_PROGRESS" + IMPORT_ROLLBACK_FAILED = "IMPORT_ROLLBACK_FAILED" + IMPORT_ROLLBACK_COMPLETE = "IMPORT_ROLLBACK_COMPLETE" + EXPORT_FAILED = "EXPORT_FAILED" + EXPORT_COMPLETE = "EXPORT_COMPLETE" + EXPORT_IN_PROGRESS = "EXPORT_IN_PROGRESS" + EXPORT_ROLLBACK_IN_PROGRESS = "EXPORT_ROLLBACK_IN_PROGRESS" + EXPORT_ROLLBACK_FAILED = "EXPORT_ROLLBACK_FAILED" + EXPORT_ROLLBACK_COMPLETE = "EXPORT_ROLLBACK_COMPLETE" + UPDATE_ROLLBACK_IN_PROGRESS = "UPDATE_ROLLBACK_IN_PROGRESS" + UPDATE_ROLLBACK_COMPLETE = "UPDATE_ROLLBACK_COMPLETE" + UPDATE_ROLLBACK_FAILED = "UPDATE_ROLLBACK_FAILED" + ROLLBACK_IN_PROGRESS = "ROLLBACK_IN_PROGRESS" + ROLLBACK_COMPLETE = "ROLLBACK_COMPLETE" + ROLLBACK_FAILED = "ROLLBACK_FAILED" + + +class ScanType(StrEnum): + FULL = "FULL" + PARTIAL = "PARTIAL" + + +class StackDriftDetectionStatus(StrEnum): + DETECTION_IN_PROGRESS = "DETECTION_IN_PROGRESS" + DETECTION_FAILED = "DETECTION_FAILED" + DETECTION_COMPLETE = "DETECTION_COMPLETE" + + +class StackDriftStatus(StrEnum): + DRIFTED = "DRIFTED" + IN_SYNC = "IN_SYNC" + UNKNOWN = "UNKNOWN" + NOT_CHECKED = "NOT_CHECKED" + + +class StackInstanceDetailedStatus(StrEnum): + PENDING = "PENDING" + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + CANCELLED = "CANCELLED" + INOPERABLE = "INOPERABLE" + SKIPPED_SUSPENDED_ACCOUNT = "SKIPPED_SUSPENDED_ACCOUNT" + FAILED_IMPORT = "FAILED_IMPORT" + + +class StackInstanceFilterName(StrEnum): + DETAILED_STATUS = "DETAILED_STATUS" + LAST_OPERATION_ID = "LAST_OPERATION_ID" + DRIFT_STATUS = "DRIFT_STATUS" + + +class StackInstanceStatus(StrEnum): + CURRENT = "CURRENT" + OUTDATED = "OUTDATED" + INOPERABLE = "INOPERABLE" + + +class StackRefactorActionEntity(StrEnum): + RESOURCE = "RESOURCE" + STACK = "STACK" + + +class StackRefactorActionType(StrEnum): + MOVE = "MOVE" + CREATE = "CREATE" + + +class StackRefactorDetection(StrEnum): + AUTO = "AUTO" + MANUAL = "MANUAL" + + +class StackRefactorExecutionStatus(StrEnum): + UNAVAILABLE = "UNAVAILABLE" + AVAILABLE = "AVAILABLE" + OBSOLETE = "OBSOLETE" + EXECUTE_IN_PROGRESS = "EXECUTE_IN_PROGRESS" + EXECUTE_COMPLETE = "EXECUTE_COMPLETE" + EXECUTE_FAILED = "EXECUTE_FAILED" + ROLLBACK_IN_PROGRESS = "ROLLBACK_IN_PROGRESS" + ROLLBACK_COMPLETE = "ROLLBACK_COMPLETE" + ROLLBACK_FAILED = "ROLLBACK_FAILED" + + +class StackRefactorStatus(StrEnum): + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_COMPLETE = "CREATE_COMPLETE" + CREATE_FAILED = "CREATE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + DELETE_COMPLETE = "DELETE_COMPLETE" + DELETE_FAILED = "DELETE_FAILED" + + +class StackResourceDriftStatus(StrEnum): + IN_SYNC = "IN_SYNC" + MODIFIED = "MODIFIED" + DELETED = "DELETED" + NOT_CHECKED = "NOT_CHECKED" + + +class StackSetDriftDetectionStatus(StrEnum): + COMPLETED = "COMPLETED" + FAILED = "FAILED" + PARTIAL_SUCCESS = "PARTIAL_SUCCESS" + IN_PROGRESS = "IN_PROGRESS" + STOPPED = "STOPPED" + + +class StackSetDriftStatus(StrEnum): + DRIFTED = "DRIFTED" + IN_SYNC = "IN_SYNC" + NOT_CHECKED = "NOT_CHECKED" + + +class StackSetOperationAction(StrEnum): + CREATE = "CREATE" + UPDATE = "UPDATE" + DELETE = "DELETE" + DETECT_DRIFT = "DETECT_DRIFT" + + +class StackSetOperationResultStatus(StrEnum): + PENDING = "PENDING" + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + CANCELLED = "CANCELLED" + + +class StackSetOperationStatus(StrEnum): + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + STOPPING = "STOPPING" + STOPPED = "STOPPED" + QUEUED = "QUEUED" + + +class StackSetStatus(StrEnum): + ACTIVE = "ACTIVE" + DELETED = "DELETED" + + +class StackStatus(StrEnum): + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + CREATE_COMPLETE = "CREATE_COMPLETE" + ROLLBACK_IN_PROGRESS = "ROLLBACK_IN_PROGRESS" + ROLLBACK_FAILED = "ROLLBACK_FAILED" + ROLLBACK_COMPLETE = "ROLLBACK_COMPLETE" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + DELETE_FAILED = "DELETE_FAILED" + DELETE_COMPLETE = "DELETE_COMPLETE" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_COMPLETE_CLEANUP_IN_PROGRESS = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + UPDATE_COMPLETE = "UPDATE_COMPLETE" + UPDATE_FAILED = "UPDATE_FAILED" + UPDATE_ROLLBACK_IN_PROGRESS = "UPDATE_ROLLBACK_IN_PROGRESS" + UPDATE_ROLLBACK_FAILED = "UPDATE_ROLLBACK_FAILED" + UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" + UPDATE_ROLLBACK_COMPLETE = "UPDATE_ROLLBACK_COMPLETE" + REVIEW_IN_PROGRESS = "REVIEW_IN_PROGRESS" + IMPORT_IN_PROGRESS = "IMPORT_IN_PROGRESS" + IMPORT_COMPLETE = "IMPORT_COMPLETE" + IMPORT_ROLLBACK_IN_PROGRESS = "IMPORT_ROLLBACK_IN_PROGRESS" + IMPORT_ROLLBACK_FAILED = "IMPORT_ROLLBACK_FAILED" + IMPORT_ROLLBACK_COMPLETE = "IMPORT_ROLLBACK_COMPLETE" + + +class TemplateFormat(StrEnum): + JSON = "JSON" + YAML = "YAML" + + +class TemplateStage(StrEnum): + Original = "Original" + Processed = "Processed" + + +class ThirdPartyType(StrEnum): + RESOURCE = "RESOURCE" + MODULE = "MODULE" + HOOK = "HOOK" + + +class TypeTestsStatus(StrEnum): + PASSED = "PASSED" + FAILED = "FAILED" + IN_PROGRESS = "IN_PROGRESS" + NOT_TESTED = "NOT_TESTED" + + +class VersionBump(StrEnum): + MAJOR = "MAJOR" + MINOR = "MINOR" + + +class Visibility(StrEnum): + PUBLIC = "PUBLIC" + PRIVATE = "PRIVATE" + + +class WarningType(StrEnum): + MUTUALLY_EXCLUSIVE_PROPERTIES = "MUTUALLY_EXCLUSIVE_PROPERTIES" + UNSUPPORTED_PROPERTIES = "UNSUPPORTED_PROPERTIES" + MUTUALLY_EXCLUSIVE_TYPES = "MUTUALLY_EXCLUSIVE_TYPES" + + +class AlreadyExistsException(ServiceException): + code: str = "AlreadyExistsException" + sender_fault: bool = True + status_code: int = 400 + + +class CFNRegistryException(ServiceException): + code: str = "CFNRegistryException" + sender_fault: bool = True + status_code: int = 400 + + +class ChangeSetNotFoundException(ServiceException): + code: str = "ChangeSetNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ConcurrentResourcesLimitExceededException(ServiceException): + code: str = "ConcurrentResourcesLimitExceeded" + sender_fault: bool = True + status_code: int = 429 + + +class CreatedButModifiedException(ServiceException): + code: str = "CreatedButModifiedException" + sender_fault: bool = True + status_code: int = 409 + + +class GeneratedTemplateNotFoundException(ServiceException): + code: str = "GeneratedTemplateNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class HookResultNotFoundException(ServiceException): + code: str = "HookResultNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class InsufficientCapabilitiesException(ServiceException): + code: str = "InsufficientCapabilitiesException" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidChangeSetStatusException(ServiceException): + code: str = "InvalidChangeSetStatus" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidOperationException(ServiceException): + code: str = "InvalidOperationException" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidStateTransitionException(ServiceException): + code: str = "InvalidStateTransition" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = True + status_code: int = 400 + + +class NameAlreadyExistsException(ServiceException): + code: str = "NameAlreadyExistsException" + sender_fault: bool = True + status_code: int = 409 + + +class OperationIdAlreadyExistsException(ServiceException): + code: str = "OperationIdAlreadyExistsException" + sender_fault: bool = True + status_code: int = 409 + + +class OperationInProgressException(ServiceException): + code: str = "OperationInProgressException" + sender_fault: bool = True + status_code: int = 409 + + +class OperationNotFoundException(ServiceException): + code: str = "OperationNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class OperationStatusCheckFailedException(ServiceException): + code: str = "ConditionalCheckFailed" + sender_fault: bool = True + status_code: int = 400 + + +class ResourceScanInProgressException(ServiceException): + code: str = "ResourceScanInProgress" + sender_fault: bool = True + status_code: int = 400 + + +class ResourceScanLimitExceededException(ServiceException): + code: str = "ResourceScanLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ResourceScanNotFoundException(ServiceException): + code: str = "ResourceScanNotFound" + sender_fault: bool = True + status_code: int = 400 + + +class StackInstanceNotFoundException(ServiceException): + code: str = "StackInstanceNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class StackNotFoundException(ServiceException): + code: str = "StackNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class StackRefactorNotFoundException(ServiceException): + code: str = "StackRefactorNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class StackSetNotEmptyException(ServiceException): + code: str = "StackSetNotEmptyException" + sender_fault: bool = True + status_code: int = 409 + + +class StackSetNotFoundException(ServiceException): + code: str = "StackSetNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class StaleRequestException(ServiceException): + code: str = "StaleRequestException" + sender_fault: bool = True + status_code: int = 409 + + +class TokenAlreadyExistsException(ServiceException): + code: str = "TokenAlreadyExistsException" + sender_fault: bool = True + status_code: int = 400 + + +class TypeConfigurationNotFoundException(ServiceException): + code: str = "TypeConfigurationNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class TypeNotFoundException(ServiceException): + code: str = "TypeNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class AccountGateResult(TypedDict, total=False): + Status: Optional[AccountGateStatus] + StatusReason: Optional[AccountGateStatusReason] + + +class AccountLimit(TypedDict, total=False): + Name: Optional[LimitName] + Value: Optional[LimitValue] + + +AccountLimitList = List[AccountLimit] +AccountList = List[Account] + + +class ActivateOrganizationsAccessInput(ServiceRequest): + pass + + +class ActivateOrganizationsAccessOutput(TypedDict, total=False): + pass + + +MajorVersion = int + + +class LoggingConfig(TypedDict, total=False): + LogRoleArn: RoleArn + LogGroupName: LogGroupName + + +class ActivateTypeInput(ServiceRequest): + Type: Optional[ThirdPartyType] + PublicTypeArn: Optional[ThirdPartyTypeArn] + PublisherId: Optional[PublisherId] + TypeName: Optional[TypeName] + TypeNameAlias: Optional[TypeName] + AutoUpdate: Optional[AutoUpdate] + LoggingConfig: Optional[LoggingConfig] + ExecutionRoleArn: Optional[RoleArn] + VersionBump: Optional[VersionBump] + MajorVersion: Optional[MajorVersion] + + +class ActivateTypeOutput(TypedDict, total=False): + Arn: Optional[PrivateTypeArn] + + +AllowedValues = List[AllowedValue] + + +class AutoDeployment(TypedDict, total=False): + Enabled: Optional[AutoDeploymentNullable] + RetainStacksOnAccountRemoval: Optional[RetainStacksOnAccountRemovalNullable] + + +class TypeConfigurationIdentifier(TypedDict, total=False): + TypeArn: Optional[TypeArn] + TypeConfigurationAlias: Optional[TypeConfigurationAlias] + TypeConfigurationArn: Optional[TypeConfigurationArn] + Type: Optional[ThirdPartyType] + TypeName: Optional[TypeName] + + +class BatchDescribeTypeConfigurationsError(TypedDict, total=False): + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + TypeConfigurationIdentifier: Optional[TypeConfigurationIdentifier] + + +BatchDescribeTypeConfigurationsErrors = List[BatchDescribeTypeConfigurationsError] +TypeConfigurationIdentifiers = List[TypeConfigurationIdentifier] + + +class BatchDescribeTypeConfigurationsInput(ServiceRequest): + TypeConfigurationIdentifiers: TypeConfigurationIdentifiers + + +Timestamp = datetime + + +class TypeConfigurationDetails(TypedDict, total=False): + Arn: Optional[TypeConfigurationArn] + Alias: Optional[TypeConfigurationAlias] + Configuration: Optional[TypeConfiguration] + LastUpdated: Optional[Timestamp] + TypeArn: Optional[TypeArn] + TypeName: Optional[TypeName] + IsDefaultConfiguration: Optional[IsDefaultConfiguration] + + +TypeConfigurationDetailsList = List[TypeConfigurationDetails] +UnprocessedTypeConfigurations = List[TypeConfigurationIdentifier] + + +class BatchDescribeTypeConfigurationsOutput(TypedDict, total=False): + Errors: Optional[BatchDescribeTypeConfigurationsErrors] + UnprocessedTypeConfigurations: Optional[UnprocessedTypeConfigurations] + TypeConfigurations: Optional[TypeConfigurationDetailsList] + + +class CancelUpdateStackInput(ServiceRequest): + StackName: StackName + ClientRequestToken: Optional[ClientRequestToken] + + +Capabilities = List[Capability] + + +class ModuleInfo(TypedDict, total=False): + TypeHierarchy: Optional[TypeHierarchy] + LogicalIdHierarchy: Optional[LogicalIdHierarchy] + + +class ResourceTargetDefinition(TypedDict, total=False): + Attribute: Optional[ResourceAttribute] + Name: Optional[PropertyName] + RequiresRecreation: Optional[RequiresRecreation] + Path: Optional[ResourcePropertyPath] + BeforeValue: Optional[BeforeValue] + AfterValue: Optional[AfterValue] + AttributeChangeType: Optional[AttributeChangeType] + + +class ResourceChangeDetail(TypedDict, total=False): + Target: Optional[ResourceTargetDefinition] + Evaluation: Optional[EvaluationType] + ChangeSource: Optional[ChangeSource] + CausingEntity: Optional[CausingEntity] + + +ResourceChangeDetails = List[ResourceChangeDetail] +Scope = List[ResourceAttribute] + + +class ResourceChange(TypedDict, total=False): + PolicyAction: Optional[PolicyAction] + Action: Optional[ChangeAction] + LogicalResourceId: Optional[LogicalResourceId] + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceType: Optional[ResourceType] + Replacement: Optional[Replacement] + Scope: Optional[Scope] + Details: Optional[ResourceChangeDetails] + ChangeSetId: Optional[ChangeSetId] + ModuleInfo: Optional[ModuleInfo] + BeforeContext: Optional[BeforeContext] + AfterContext: Optional[AfterContext] + + +class Change(TypedDict, total=False): + Type: Optional[ChangeType] + HookInvocationCount: Optional[HookInvocationCount] + ResourceChange: Optional[ResourceChange] + + +class ChangeSetHookResourceTargetDetails(TypedDict, total=False): + LogicalResourceId: Optional[LogicalResourceId] + ResourceType: Optional[HookTargetTypeName] + ResourceAction: Optional[ChangeAction] + + +class ChangeSetHookTargetDetails(TypedDict, total=False): + TargetType: Optional[HookTargetType] + ResourceTargetDetails: Optional[ChangeSetHookResourceTargetDetails] + + +class ChangeSetHook(TypedDict, total=False): + InvocationPoint: Optional[HookInvocationPoint] + FailureMode: Optional[HookFailureMode] + TypeName: Optional[HookTypeName] + TypeVersionId: Optional[HookTypeVersionId] + TypeConfigurationVersionId: Optional[HookTypeConfigurationVersionId] + TargetDetails: Optional[ChangeSetHookTargetDetails] + + +ChangeSetHooks = List[ChangeSetHook] +CreationTime = datetime + + +class ChangeSetSummary(TypedDict, total=False): + StackId: Optional[StackId] + StackName: Optional[StackName] + ChangeSetId: Optional[ChangeSetId] + ChangeSetName: Optional[ChangeSetName] + ExecutionStatus: Optional[ExecutionStatus] + Status: Optional[ChangeSetStatus] + StatusReason: Optional[ChangeSetStatusReason] + CreationTime: Optional[CreationTime] + Description: Optional[Description] + IncludeNestedStacks: Optional[IncludeNestedStacks] + ParentChangeSetId: Optional[ChangeSetId] + RootChangeSetId: Optional[ChangeSetId] + ImportExistingResources: Optional[ImportExistingResources] + + +ChangeSetSummaries = List[ChangeSetSummary] +Changes = List[Change] +ResourcesToSkip = List[ResourceToSkip] + + +class ContinueUpdateRollbackInput(ServiceRequest): + StackName: StackNameOrId + RoleARN: Optional[RoleARN] + ResourcesToSkip: Optional[ResourcesToSkip] + ClientRequestToken: Optional[ClientRequestToken] + + +class ContinueUpdateRollbackOutput(TypedDict, total=False): + pass + + +ResourceIdentifierProperties = Dict[ResourceIdentifierPropertyKey, ResourceIdentifierPropertyValue] + + +class ResourceToImport(TypedDict, total=False): + ResourceType: ResourceType + LogicalResourceId: LogicalResourceId + ResourceIdentifier: ResourceIdentifierProperties + + +ResourcesToImport = List[ResourceToImport] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +Tags = List[Tag] +NotificationARNs = List[NotificationARN] + + +class RollbackTrigger(TypedDict, total=False): + Arn: Arn + Type: Type + + +RollbackTriggers = List[RollbackTrigger] + + +class RollbackConfiguration(TypedDict, total=False): + RollbackTriggers: Optional[RollbackTriggers] + MonitoringTimeInMinutes: Optional[MonitoringTimeInMinutes] + + +ResourceTypes = List[ResourceType] + + +class Parameter(TypedDict, total=False): + ParameterKey: Optional[ParameterKey] + ParameterValue: Optional[ParameterValue] + UsePreviousValue: Optional[UsePreviousValue] + ResolvedValue: Optional[ParameterValue] + + +Parameters = List[Parameter] + + +class CreateChangeSetInput(ServiceRequest): + StackName: StackNameOrId + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + UsePreviousTemplate: Optional[UsePreviousTemplate] + Parameters: Optional[Parameters] + Capabilities: Optional[Capabilities] + ResourceTypes: Optional[ResourceTypes] + RoleARN: Optional[RoleARN] + RollbackConfiguration: Optional[RollbackConfiguration] + NotificationARNs: Optional[NotificationARNs] + Tags: Optional[Tags] + ChangeSetName: ChangeSetName + ClientToken: Optional[ClientToken] + Description: Optional[Description] + ChangeSetType: Optional[ChangeSetType] + ResourcesToImport: Optional[ResourcesToImport] + IncludeNestedStacks: Optional[IncludeNestedStacks] + OnStackFailure: Optional[OnStackFailure] + ImportExistingResources: Optional[ImportExistingResources] + + +class CreateChangeSetOutput(TypedDict, total=False): + Id: Optional[ChangeSetId] + StackId: Optional[StackId] + + +class TemplateConfiguration(TypedDict, total=False): + DeletionPolicy: Optional[GeneratedTemplateDeletionPolicy] + UpdateReplacePolicy: Optional[GeneratedTemplateUpdateReplacePolicy] + + +class ResourceDefinition(TypedDict, total=False): + ResourceType: ResourceType + LogicalResourceId: Optional[LogicalResourceId] + ResourceIdentifier: ResourceIdentifierProperties + + +ResourceDefinitions = List[ResourceDefinition] + + +class CreateGeneratedTemplateInput(ServiceRequest): + Resources: Optional[ResourceDefinitions] + GeneratedTemplateName: GeneratedTemplateName + StackName: Optional[StackName] + TemplateConfiguration: Optional[TemplateConfiguration] + + +class CreateGeneratedTemplateOutput(TypedDict, total=False): + GeneratedTemplateId: Optional[GeneratedTemplateId] + + +class CreateStackInput(ServiceRequest): + StackName: StackName + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + Parameters: Optional[Parameters] + DisableRollback: Optional[DisableRollback] + RollbackConfiguration: Optional[RollbackConfiguration] + TimeoutInMinutes: Optional[TimeoutMinutes] + NotificationARNs: Optional[NotificationARNs] + Capabilities: Optional[Capabilities] + ResourceTypes: Optional[ResourceTypes] + RoleARN: Optional[RoleARN] + OnFailure: Optional[OnFailure] + StackPolicyBody: Optional[StackPolicyBody] + StackPolicyURL: Optional[StackPolicyURL] + Tags: Optional[Tags] + ClientRequestToken: Optional[ClientRequestToken] + EnableTerminationProtection: Optional[EnableTerminationProtection] + RetainExceptOnCreate: Optional[RetainExceptOnCreate] + + +RegionList = List[Region] + + +class StackSetOperationPreferences(TypedDict, total=False): + RegionConcurrencyType: Optional[RegionConcurrencyType] + RegionOrder: Optional[RegionList] + FailureToleranceCount: Optional[FailureToleranceCount] + FailureTolerancePercentage: Optional[FailureTolerancePercentage] + MaxConcurrentCount: Optional[MaxConcurrentCount] + MaxConcurrentPercentage: Optional[MaxConcurrentPercentage] + ConcurrencyMode: Optional[ConcurrencyMode] + + +OrganizationalUnitIdList = List[OrganizationalUnitId] + + +class DeploymentTargets(TypedDict, total=False): + Accounts: Optional[AccountList] + AccountsUrl: Optional[AccountsUrl] + OrganizationalUnitIds: Optional[OrganizationalUnitIdList] + AccountFilterType: Optional[AccountFilterType] + + +class CreateStackInstancesInput(ServiceRequest): + StackSetName: StackSetName + Accounts: Optional[AccountList] + DeploymentTargets: Optional[DeploymentTargets] + Regions: RegionList + ParameterOverrides: Optional[Parameters] + OperationPreferences: Optional[StackSetOperationPreferences] + OperationId: Optional[ClientRequestToken] + CallAs: Optional[CallAs] + + +class CreateStackInstancesOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +class CreateStackOutput(TypedDict, total=False): + StackId: Optional[StackId] + + +class StackDefinition(TypedDict, total=False): + StackName: Optional[StackName] + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + + +StackDefinitions = List[StackDefinition] + + +class ResourceLocation(TypedDict, total=False): + StackName: StackName + LogicalResourceId: LogicalResourceId + + +class ResourceMapping(TypedDict, total=False): + Source: ResourceLocation + Destination: ResourceLocation + + +ResourceMappings = List[ResourceMapping] + + +class CreateStackRefactorInput(ServiceRequest): + Description: Optional[Description] + EnableStackCreation: Optional[EnableStackCreation] + ResourceMappings: Optional[ResourceMappings] + StackDefinitions: StackDefinitions + + +class CreateStackRefactorOutput(TypedDict, total=False): + StackRefactorId: StackRefactorId + + +class ManagedExecution(TypedDict, total=False): + Active: Optional[ManagedExecutionNullable] + + +class CreateStackSetInput(ServiceRequest): + StackSetName: StackSetName + Description: Optional[Description] + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + StackId: Optional[StackId] + Parameters: Optional[Parameters] + Capabilities: Optional[Capabilities] + Tags: Optional[Tags] + AdministrationRoleARN: Optional[RoleARN] + ExecutionRoleName: Optional[ExecutionRoleName] + PermissionModel: Optional[PermissionModels] + AutoDeployment: Optional[AutoDeployment] + CallAs: Optional[CallAs] + ClientRequestToken: Optional[ClientRequestToken] + ManagedExecution: Optional[ManagedExecution] + + +class CreateStackSetOutput(TypedDict, total=False): + StackSetId: Optional[StackSetId] + + +class DeactivateOrganizationsAccessInput(ServiceRequest): + pass + + +class DeactivateOrganizationsAccessOutput(TypedDict, total=False): + pass + + +class DeactivateTypeInput(ServiceRequest): + TypeName: Optional[TypeName] + Type: Optional[ThirdPartyType] + Arn: Optional[PrivateTypeArn] + + +class DeactivateTypeOutput(TypedDict, total=False): + pass + + +class DeleteChangeSetInput(ServiceRequest): + ChangeSetName: ChangeSetNameOrId + StackName: Optional[StackNameOrId] + + +class DeleteChangeSetOutput(TypedDict, total=False): + pass + + +class DeleteGeneratedTemplateInput(ServiceRequest): + GeneratedTemplateName: GeneratedTemplateName + + +RetainResources = List[LogicalResourceId] + + +class DeleteStackInput(ServiceRequest): + StackName: StackName + RetainResources: Optional[RetainResources] + RoleARN: Optional[RoleARN] + ClientRequestToken: Optional[ClientRequestToken] + DeletionMode: Optional[DeletionMode] + + +class DeleteStackInstancesInput(ServiceRequest): + StackSetName: StackSetName + Accounts: Optional[AccountList] + DeploymentTargets: Optional[DeploymentTargets] + Regions: RegionList + OperationPreferences: Optional[StackSetOperationPreferences] + RetainStacks: RetainStacks + OperationId: Optional[ClientRequestToken] + CallAs: Optional[CallAs] + + +class DeleteStackInstancesOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +class DeleteStackSetInput(ServiceRequest): + StackSetName: StackSetName + CallAs: Optional[CallAs] + + +class DeleteStackSetOutput(TypedDict, total=False): + pass + + +DeletionTime = datetime + + +class DeregisterTypeInput(ServiceRequest): + Arn: Optional[PrivateTypeArn] + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + VersionId: Optional[TypeVersionId] + + +class DeregisterTypeOutput(TypedDict, total=False): + pass + + +class DescribeAccountLimitsInput(ServiceRequest): + NextToken: Optional[NextToken] + + +class DescribeAccountLimitsOutput(TypedDict, total=False): + AccountLimits: Optional[AccountLimitList] + NextToken: Optional[NextToken] + + +class DescribeChangeSetHooksInput(ServiceRequest): + ChangeSetName: ChangeSetNameOrId + StackName: Optional[StackNameOrId] + NextToken: Optional[NextToken] + LogicalResourceId: Optional[LogicalResourceId] + + +class DescribeChangeSetHooksOutput(TypedDict, total=False): + ChangeSetId: Optional[ChangeSetId] + ChangeSetName: Optional[ChangeSetName] + Hooks: Optional[ChangeSetHooks] + Status: Optional[ChangeSetHooksStatus] + NextToken: Optional[NextToken] + StackId: Optional[StackId] + StackName: Optional[StackName] + + +class DescribeChangeSetInput(ServiceRequest): + ChangeSetName: ChangeSetNameOrId + StackName: Optional[StackNameOrId] + NextToken: Optional[NextToken] + IncludePropertyValues: Optional[IncludePropertyValues] + + +class DescribeChangeSetOutput(TypedDict, total=False): + ChangeSetName: Optional[ChangeSetName] + ChangeSetId: Optional[ChangeSetId] + StackId: Optional[StackId] + StackName: Optional[StackName] + Description: Optional[Description] + Parameters: Optional[Parameters] + CreationTime: Optional[CreationTime] + ExecutionStatus: Optional[ExecutionStatus] + Status: Optional[ChangeSetStatus] + StatusReason: Optional[ChangeSetStatusReason] + NotificationARNs: Optional[NotificationARNs] + RollbackConfiguration: Optional[RollbackConfiguration] + Capabilities: Optional[Capabilities] + Tags: Optional[Tags] + Changes: Optional[Changes] + NextToken: Optional[NextToken] + IncludeNestedStacks: Optional[IncludeNestedStacks] + ParentChangeSetId: Optional[ChangeSetId] + RootChangeSetId: Optional[ChangeSetId] + OnStackFailure: Optional[OnStackFailure] + ImportExistingResources: Optional[ImportExistingResources] + + +class DescribeGeneratedTemplateInput(ServiceRequest): + GeneratedTemplateName: GeneratedTemplateName + + +class TemplateProgress(TypedDict, total=False): + ResourcesSucceeded: Optional[ResourcesSucceeded] + ResourcesFailed: Optional[ResourcesFailed] + ResourcesProcessing: Optional[ResourcesProcessing] + ResourcesPending: Optional[ResourcesPending] + + +LastUpdatedTime = datetime + + +class WarningProperty(TypedDict, total=False): + PropertyPath: Optional[PropertyPath] + Required: Optional[RequiredProperty] + Description: Optional[PropertyDescription] + + +WarningProperties = List[WarningProperty] + + +class WarningDetail(TypedDict, total=False): + Type: Optional[WarningType] + Properties: Optional[WarningProperties] + + +WarningDetails = List[WarningDetail] + + +class ResourceDetail(TypedDict, total=False): + ResourceType: Optional[ResourceType] + LogicalResourceId: Optional[LogicalResourceId] + ResourceIdentifier: Optional[ResourceIdentifierProperties] + ResourceStatus: Optional[GeneratedTemplateResourceStatus] + ResourceStatusReason: Optional[ResourceStatusReason] + Warnings: Optional[WarningDetails] + + +ResourceDetails = List[ResourceDetail] + + +class DescribeGeneratedTemplateOutput(TypedDict, total=False): + GeneratedTemplateId: Optional[GeneratedTemplateId] + GeneratedTemplateName: Optional[GeneratedTemplateName] + Resources: Optional[ResourceDetails] + Status: Optional[GeneratedTemplateStatus] + StatusReason: Optional[TemplateStatusReason] + CreationTime: Optional[CreationTime] + LastUpdatedTime: Optional[LastUpdatedTime] + Progress: Optional[TemplateProgress] + StackId: Optional[StackId] + TemplateConfiguration: Optional[TemplateConfiguration] + TotalWarnings: Optional[TotalWarnings] + + +class DescribeOrganizationsAccessInput(ServiceRequest): + CallAs: Optional[CallAs] + + +class DescribeOrganizationsAccessOutput(TypedDict, total=False): + Status: Optional[OrganizationStatus] + + +class DescribePublisherInput(ServiceRequest): + PublisherId: Optional[PublisherId] + + +class DescribePublisherOutput(TypedDict, total=False): + PublisherId: Optional[PublisherId] + PublisherStatus: Optional[PublisherStatus] + IdentityProvider: Optional[IdentityProvider] + PublisherProfile: Optional[PublisherProfile] + + +class DescribeResourceScanInput(ServiceRequest): + ResourceScanId: ResourceScanId + + +ResourceTypeFilters = List[ResourceTypeFilter] + + +class ScanFilter(TypedDict, total=False): + Types: Optional[ResourceTypeFilters] + + +ScanFilters = List[ScanFilter] + + +class DescribeResourceScanOutput(TypedDict, total=False): + ResourceScanId: Optional[ResourceScanId] + Status: Optional[ResourceScanStatus] + StatusReason: Optional[ResourceScanStatusReason] + StartTime: Optional[Timestamp] + EndTime: Optional[Timestamp] + PercentageCompleted: Optional[PercentageCompleted] + ResourceTypes: Optional[ResourceTypes] + ResourcesScanned: Optional[ResourcesScanned] + ResourcesRead: Optional[ResourcesRead] + ScanFilters: Optional[ScanFilters] + + +class DescribeStackDriftDetectionStatusInput(ServiceRequest): + StackDriftDetectionId: StackDriftDetectionId + + +class DescribeStackDriftDetectionStatusOutput(TypedDict, total=False): + StackId: StackId + StackDriftDetectionId: StackDriftDetectionId + StackDriftStatus: Optional[StackDriftStatus] + DetectionStatus: StackDriftDetectionStatus + DetectionStatusReason: Optional[StackDriftDetectionStatusReason] + DriftedStackResourceCount: Optional[BoxedInteger] + Timestamp: Timestamp + + +class DescribeStackEventsInput(ServiceRequest): + StackName: Optional[StackName] + NextToken: Optional[NextToken] + + +class StackEvent(TypedDict, total=False): + StackId: StackId + EventId: EventId + StackName: StackName + LogicalResourceId: Optional[LogicalResourceId] + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceType: Optional[ResourceType] + Timestamp: Timestamp + ResourceStatus: Optional[ResourceStatus] + ResourceStatusReason: Optional[ResourceStatusReason] + ResourceProperties: Optional[ResourceProperties] + ClientRequestToken: Optional[ClientRequestToken] + HookType: Optional[HookType] + HookStatus: Optional[HookStatus] + HookStatusReason: Optional[HookStatusReason] + HookInvocationPoint: Optional[HookInvocationPoint] + HookFailureMode: Optional[HookFailureMode] + DetailedStatus: Optional[DetailedStatus] + + +StackEvents = List[StackEvent] + + +class DescribeStackEventsOutput(TypedDict, total=False): + StackEvents: Optional[StackEvents] + NextToken: Optional[NextToken] + + +class DescribeStackInstanceInput(ServiceRequest): + StackSetName: StackSetName + StackInstanceAccount: Account + StackInstanceRegion: Region + CallAs: Optional[CallAs] + + +class StackInstanceComprehensiveStatus(TypedDict, total=False): + DetailedStatus: Optional[StackInstanceDetailedStatus] + + +class StackInstance(TypedDict, total=False): + StackSetId: Optional[StackSetId] + Region: Optional[Region] + Account: Optional[Account] + StackId: Optional[StackId] + ParameterOverrides: Optional[Parameters] + Status: Optional[StackInstanceStatus] + StackInstanceStatus: Optional[StackInstanceComprehensiveStatus] + StatusReason: Optional[Reason] + OrganizationalUnitId: Optional[OrganizationalUnitId] + DriftStatus: Optional[StackDriftStatus] + LastDriftCheckTimestamp: Optional[Timestamp] + LastOperationId: Optional[ClientRequestToken] + + +class DescribeStackInstanceOutput(TypedDict, total=False): + StackInstance: Optional[StackInstance] + + +class DescribeStackRefactorInput(ServiceRequest): + StackRefactorId: StackRefactorId + + +StackIds = List[StackId] + + +class DescribeStackRefactorOutput(TypedDict, total=False): + Description: Optional[Description] + StackRefactorId: Optional[StackRefactorId] + StackIds: Optional[StackIds] + ExecutionStatus: Optional[StackRefactorExecutionStatus] + ExecutionStatusReason: Optional[ExecutionStatusReason] + Status: Optional[StackRefactorStatus] + StatusReason: Optional[StackRefactorStatusReason] + + +StackResourceDriftStatusFilters = List[StackResourceDriftStatus] + + +class DescribeStackResourceDriftsInput(ServiceRequest): + StackName: StackNameOrId + StackResourceDriftStatusFilters: Optional[StackResourceDriftStatusFilters] + NextToken: Optional[NextToken] + MaxResults: Optional[BoxedMaxResults] + + +class PropertyDifference(TypedDict, total=False): + PropertyPath: PropertyPath + ExpectedValue: PropertyValue + ActualValue: PropertyValue + DifferenceType: DifferenceType + + +PropertyDifferences = List[PropertyDifference] + + +class PhysicalResourceIdContextKeyValuePair(TypedDict, total=False): + Key: Key + Value: Value + + +PhysicalResourceIdContext = List[PhysicalResourceIdContextKeyValuePair] + + +class StackResourceDrift(TypedDict, total=False): + StackId: StackId + LogicalResourceId: LogicalResourceId + PhysicalResourceId: Optional[PhysicalResourceId] + PhysicalResourceIdContext: Optional[PhysicalResourceIdContext] + ResourceType: ResourceType + ExpectedProperties: Optional[Properties] + ActualProperties: Optional[Properties] + PropertyDifferences: Optional[PropertyDifferences] + StackResourceDriftStatus: StackResourceDriftStatus + Timestamp: Timestamp + ModuleInfo: Optional[ModuleInfo] + + +StackResourceDrifts = List[StackResourceDrift] + + +class DescribeStackResourceDriftsOutput(TypedDict, total=False): + StackResourceDrifts: StackResourceDrifts + NextToken: Optional[NextToken] + + +class DescribeStackResourceInput(ServiceRequest): + StackName: StackName + LogicalResourceId: LogicalResourceId + + +class StackResourceDriftInformation(TypedDict, total=False): + StackResourceDriftStatus: StackResourceDriftStatus + LastCheckTimestamp: Optional[Timestamp] + + +class StackResourceDetail(TypedDict, total=False): + StackName: Optional[StackName] + StackId: Optional[StackId] + LogicalResourceId: LogicalResourceId + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceType: ResourceType + LastUpdatedTimestamp: Timestamp + ResourceStatus: ResourceStatus + ResourceStatusReason: Optional[ResourceStatusReason] + Description: Optional[Description] + Metadata: Optional[Metadata] + DriftInformation: Optional[StackResourceDriftInformation] + ModuleInfo: Optional[ModuleInfo] + + +class DescribeStackResourceOutput(TypedDict, total=False): + StackResourceDetail: Optional[StackResourceDetail] + + +class DescribeStackResourcesInput(ServiceRequest): + StackName: Optional[StackName] + LogicalResourceId: Optional[LogicalResourceId] + PhysicalResourceId: Optional[PhysicalResourceId] + + +class StackResource(TypedDict, total=False): + StackName: Optional[StackName] + StackId: Optional[StackId] + LogicalResourceId: LogicalResourceId + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceType: ResourceType + Timestamp: Timestamp + ResourceStatus: ResourceStatus + ResourceStatusReason: Optional[ResourceStatusReason] + Description: Optional[Description] + DriftInformation: Optional[StackResourceDriftInformation] + ModuleInfo: Optional[ModuleInfo] + + +StackResources = List[StackResource] + + +class DescribeStackResourcesOutput(TypedDict, total=False): + StackResources: Optional[StackResources] + + +class DescribeStackSetInput(ServiceRequest): + StackSetName: StackSetName + CallAs: Optional[CallAs] + + +class DescribeStackSetOperationInput(ServiceRequest): + StackSetName: StackSetName + OperationId: ClientRequestToken + CallAs: Optional[CallAs] + + +class StackSetOperationStatusDetails(TypedDict, total=False): + FailedStackInstancesCount: Optional[FailedStackInstancesCount] + + +class StackSetDriftDetectionDetails(TypedDict, total=False): + DriftStatus: Optional[StackSetDriftStatus] + DriftDetectionStatus: Optional[StackSetDriftDetectionStatus] + LastDriftCheckTimestamp: Optional[Timestamp] + TotalStackInstancesCount: Optional[TotalStackInstancesCount] + DriftedStackInstancesCount: Optional[DriftedStackInstancesCount] + InSyncStackInstancesCount: Optional[InSyncStackInstancesCount] + InProgressStackInstancesCount: Optional[InProgressStackInstancesCount] + FailedStackInstancesCount: Optional[FailedStackInstancesCount] + + +class StackSetOperation(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + StackSetId: Optional[StackSetId] + Action: Optional[StackSetOperationAction] + Status: Optional[StackSetOperationStatus] + OperationPreferences: Optional[StackSetOperationPreferences] + RetainStacks: Optional[RetainStacksNullable] + AdministrationRoleARN: Optional[RoleARN] + ExecutionRoleName: Optional[ExecutionRoleName] + CreationTimestamp: Optional[Timestamp] + EndTimestamp: Optional[Timestamp] + DeploymentTargets: Optional[DeploymentTargets] + StackSetDriftDetectionDetails: Optional[StackSetDriftDetectionDetails] + StatusReason: Optional[StackSetOperationStatusReason] + StatusDetails: Optional[StackSetOperationStatusDetails] + + +class DescribeStackSetOperationOutput(TypedDict, total=False): + StackSetOperation: Optional[StackSetOperation] + + +class StackSet(TypedDict, total=False): + StackSetName: Optional[StackSetName] + StackSetId: Optional[StackSetId] + Description: Optional[Description] + Status: Optional[StackSetStatus] + TemplateBody: Optional[TemplateBody] + Parameters: Optional[Parameters] + Capabilities: Optional[Capabilities] + Tags: Optional[Tags] + StackSetARN: Optional[StackSetARN] + AdministrationRoleARN: Optional[RoleARN] + ExecutionRoleName: Optional[ExecutionRoleName] + StackSetDriftDetectionDetails: Optional[StackSetDriftDetectionDetails] + AutoDeployment: Optional[AutoDeployment] + PermissionModel: Optional[PermissionModels] + OrganizationalUnitIds: Optional[OrganizationalUnitIdList] + ManagedExecution: Optional[ManagedExecution] + Regions: Optional[RegionList] + + +class DescribeStackSetOutput(TypedDict, total=False): + StackSet: Optional[StackSet] + + +class DescribeStacksInput(ServiceRequest): + StackName: Optional[StackName] + NextToken: Optional[NextToken] + + +class StackDriftInformation(TypedDict, total=False): + StackDriftStatus: StackDriftStatus + LastCheckTimestamp: Optional[Timestamp] + + +class Output(TypedDict, total=False): + OutputKey: Optional[OutputKey] + OutputValue: Optional[OutputValue] + Description: Optional[Description] + ExportName: Optional[ExportName] + + +Outputs = List[Output] + + +class Stack(TypedDict, total=False): + StackId: Optional[StackId] + StackName: StackName + ChangeSetId: Optional[ChangeSetId] + Description: Optional[Description] + Parameters: Optional[Parameters] + CreationTime: CreationTime + DeletionTime: Optional[DeletionTime] + LastUpdatedTime: Optional[LastUpdatedTime] + RollbackConfiguration: Optional[RollbackConfiguration] + StackStatus: StackStatus + StackStatusReason: Optional[StackStatusReason] + DisableRollback: Optional[DisableRollback] + NotificationARNs: Optional[NotificationARNs] + TimeoutInMinutes: Optional[TimeoutMinutes] + Capabilities: Optional[Capabilities] + Outputs: Optional[Outputs] + RoleARN: Optional[RoleARN] + Tags: Optional[Tags] + EnableTerminationProtection: Optional[EnableTerminationProtection] + ParentId: Optional[StackId] + RootId: Optional[StackId] + DriftInformation: Optional[StackDriftInformation] + RetainExceptOnCreate: Optional[RetainExceptOnCreate] + DeletionMode: Optional[DeletionMode] + DetailedStatus: Optional[DetailedStatus] + + +Stacks = List[Stack] + + +class DescribeStacksOutput(TypedDict, total=False): + Stacks: Optional[Stacks] + NextToken: Optional[NextToken] + + +class DescribeTypeInput(ServiceRequest): + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + Arn: Optional[TypeArn] + VersionId: Optional[TypeVersionId] + PublisherId: Optional[PublisherId] + PublicVersionNumber: Optional[PublicVersionNumber] + + +SupportedMajorVersions = List[SupportedMajorVersion] + + +class RequiredActivatedType(TypedDict, total=False): + TypeNameAlias: Optional[TypeName] + OriginalTypeName: Optional[TypeName] + PublisherId: Optional[PublisherId] + SupportedMajorVersions: Optional[SupportedMajorVersions] + + +RequiredActivatedTypes = List[RequiredActivatedType] + + +class DescribeTypeOutput(TypedDict, total=False): + Arn: Optional[TypeArn] + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + DefaultVersionId: Optional[TypeVersionId] + IsDefaultVersion: Optional[IsDefaultVersion] + TypeTestsStatus: Optional[TypeTestsStatus] + TypeTestsStatusDescription: Optional[TypeTestsStatusDescription] + Description: Optional[Description] + Schema: Optional[TypeSchema] + ProvisioningType: Optional[ProvisioningType] + DeprecatedStatus: Optional[DeprecatedStatus] + LoggingConfig: Optional[LoggingConfig] + RequiredActivatedTypes: Optional[RequiredActivatedTypes] + ExecutionRoleArn: Optional[RoleArn] + Visibility: Optional[Visibility] + SourceUrl: Optional[OptionalSecureUrl] + DocumentationUrl: Optional[OptionalSecureUrl] + LastUpdated: Optional[Timestamp] + TimeCreated: Optional[Timestamp] + ConfigurationSchema: Optional[ConfigurationSchema] + PublisherId: Optional[PublisherId] + OriginalTypeName: Optional[TypeName] + OriginalTypeArn: Optional[TypeArn] + PublicVersionNumber: Optional[PublicVersionNumber] + LatestPublicVersion: Optional[PublicVersionNumber] + IsActivated: Optional[IsActivated] + AutoUpdate: Optional[AutoUpdate] + + +class DescribeTypeRegistrationInput(ServiceRequest): + RegistrationToken: RegistrationToken + + +class DescribeTypeRegistrationOutput(TypedDict, total=False): + ProgressStatus: Optional[RegistrationStatus] + Description: Optional[Description] + TypeArn: Optional[TypeArn] + TypeVersionArn: Optional[TypeArn] + + +LogicalResourceIds = List[LogicalResourceId] + + +class DetectStackDriftInput(ServiceRequest): + StackName: StackNameOrId + LogicalResourceIds: Optional[LogicalResourceIds] + + +class DetectStackDriftOutput(TypedDict, total=False): + StackDriftDetectionId: StackDriftDetectionId + + +class DetectStackResourceDriftInput(ServiceRequest): + StackName: StackNameOrId + LogicalResourceId: LogicalResourceId + + +class DetectStackResourceDriftOutput(TypedDict, total=False): + StackResourceDrift: StackResourceDrift + + +class DetectStackSetDriftInput(ServiceRequest): + StackSetName: StackSetNameOrId + OperationPreferences: Optional[StackSetOperationPreferences] + OperationId: Optional[ClientRequestToken] + CallAs: Optional[CallAs] + + +class DetectStackSetDriftOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +class EstimateTemplateCostInput(ServiceRequest): + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + Parameters: Optional[Parameters] + + +class EstimateTemplateCostOutput(TypedDict, total=False): + Url: Optional[Url] + + +class ExecuteChangeSetInput(ServiceRequest): + ChangeSetName: ChangeSetNameOrId + StackName: Optional[StackNameOrId] + ClientRequestToken: Optional[ClientRequestToken] + DisableRollback: Optional[DisableRollback] + RetainExceptOnCreate: Optional[RetainExceptOnCreate] + + +class ExecuteChangeSetOutput(TypedDict, total=False): + pass + + +class ExecuteStackRefactorInput(ServiceRequest): + StackRefactorId: StackRefactorId + + +class Export(TypedDict, total=False): + ExportingStackId: Optional[StackId] + Name: Optional[ExportName] + Value: Optional[ExportValue] + + +Exports = List[Export] + + +class GetGeneratedTemplateInput(ServiceRequest): + Format: Optional[TemplateFormat] + GeneratedTemplateName: GeneratedTemplateName + + +class GetGeneratedTemplateOutput(TypedDict, total=False): + Status: Optional[GeneratedTemplateStatus] + TemplateBody: Optional[TemplateBody] + + +class GetStackPolicyInput(ServiceRequest): + StackName: StackName + + +class GetStackPolicyOutput(TypedDict, total=False): + StackPolicyBody: Optional[StackPolicyBody] + + +class GetTemplateInput(ServiceRequest): + StackName: Optional[StackName] + ChangeSetName: Optional[ChangeSetNameOrId] + TemplateStage: Optional[TemplateStage] + + +StageList = List[TemplateStage] + + +class GetTemplateOutput(TypedDict, total=False): + TemplateBody: Optional[TemplateBody] + StagesAvailable: Optional[StageList] + + +class TemplateSummaryConfig(TypedDict, total=False): + TreatUnrecognizedResourceTypesAsWarnings: Optional[TreatUnrecognizedResourceTypesAsWarnings] + + +class GetTemplateSummaryInput(ServiceRequest): + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + StackName: Optional[StackNameOrId] + StackSetName: Optional[StackSetNameOrId] + CallAs: Optional[CallAs] + TemplateSummaryConfig: Optional[TemplateSummaryConfig] + + +class Warnings(TypedDict, total=False): + UnrecognizedResourceTypes: Optional[ResourceTypes] + + +ResourceIdentifiers = List[ResourceIdentifierPropertyKey] + + +class ResourceIdentifierSummary(TypedDict, total=False): + ResourceType: Optional[ResourceType] + LogicalResourceIds: Optional[LogicalResourceIds] + ResourceIdentifiers: Optional[ResourceIdentifiers] + + +ResourceIdentifierSummaries = List[ResourceIdentifierSummary] +TransformsList = List[TransformName] + + +class ParameterConstraints(TypedDict, total=False): + AllowedValues: Optional[AllowedValues] + + +class ParameterDeclaration(TypedDict, total=False): + ParameterKey: Optional[ParameterKey] + DefaultValue: Optional[ParameterValue] + ParameterType: Optional[ParameterType] + NoEcho: Optional[NoEcho] + Description: Optional[Description] + ParameterConstraints: Optional[ParameterConstraints] + + +ParameterDeclarations = List[ParameterDeclaration] + + +class GetTemplateSummaryOutput(TypedDict, total=False): + Parameters: Optional[ParameterDeclarations] + Description: Optional[Description] + Capabilities: Optional[Capabilities] + CapabilitiesReason: Optional[CapabilitiesReason] + ResourceTypes: Optional[ResourceTypes] + Version: Optional[Version] + Metadata: Optional[Metadata] + DeclaredTransforms: Optional[TransformsList] + ResourceIdentifierSummaries: Optional[ResourceIdentifierSummaries] + Warnings: Optional[Warnings] + + +class HookResultSummary(TypedDict, total=False): + InvocationPoint: Optional[HookInvocationPoint] + FailureMode: Optional[HookFailureMode] + TypeName: Optional[HookTypeName] + TypeVersionId: Optional[HookTypeVersionId] + TypeConfigurationVersionId: Optional[HookTypeConfigurationVersionId] + Status: Optional[HookStatus] + HookStatusReason: Optional[HookStatusReason] + + +HookResultSummaries = List[HookResultSummary] +StackIdList = List[StackId] + + +class ImportStacksToStackSetInput(ServiceRequest): + StackSetName: StackSetNameOrId + StackIds: Optional[StackIdList] + StackIdsUrl: Optional[StackIdsUrl] + OrganizationalUnitIds: Optional[OrganizationalUnitIdList] + OperationPreferences: Optional[StackSetOperationPreferences] + OperationId: Optional[ClientRequestToken] + CallAs: Optional[CallAs] + + +class ImportStacksToStackSetOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +Imports = List[StackName] +JazzLogicalResourceIds = List[LogicalResourceId] +JazzResourceIdentifierProperties = Dict[ + JazzResourceIdentifierPropertyKey, JazzResourceIdentifierPropertyValue +] + + +class ListChangeSetsInput(ServiceRequest): + StackName: StackNameOrId + NextToken: Optional[NextToken] + + +class ListChangeSetsOutput(TypedDict, total=False): + Summaries: Optional[ChangeSetSummaries] + NextToken: Optional[NextToken] + + +class ListExportsInput(ServiceRequest): + NextToken: Optional[NextToken] + + +class ListExportsOutput(TypedDict, total=False): + Exports: Optional[Exports] + NextToken: Optional[NextToken] + + +class ListGeneratedTemplatesInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class TemplateSummary(TypedDict, total=False): + GeneratedTemplateId: Optional[GeneratedTemplateId] + GeneratedTemplateName: Optional[GeneratedTemplateName] + Status: Optional[GeneratedTemplateStatus] + StatusReason: Optional[TemplateStatusReason] + CreationTime: Optional[CreationTime] + LastUpdatedTime: Optional[LastUpdatedTime] + NumberOfResources: Optional[NumberOfResources] + + +TemplateSummaries = List[TemplateSummary] + + +class ListGeneratedTemplatesOutput(TypedDict, total=False): + Summaries: Optional[TemplateSummaries] + NextToken: Optional[NextToken] + + +class ListHookResultsInput(ServiceRequest): + TargetType: ListHookResultsTargetType + TargetId: HookResultId + NextToken: Optional[NextToken] + + +class ListHookResultsOutput(TypedDict, total=False): + TargetType: Optional[ListHookResultsTargetType] + TargetId: Optional[HookResultId] + HookResults: Optional[HookResultSummaries] + NextToken: Optional[NextToken] + + +class ListImportsInput(ServiceRequest): + ExportName: ExportName + NextToken: Optional[NextToken] + + +class ListImportsOutput(TypedDict, total=False): + Imports: Optional[Imports] + NextToken: Optional[NextToken] + + +class ScannedResourceIdentifier(TypedDict, total=False): + ResourceType: ResourceType + ResourceIdentifier: JazzResourceIdentifierProperties + + +ScannedResourceIdentifiers = List[ScannedResourceIdentifier] + + +class ListResourceScanRelatedResourcesInput(ServiceRequest): + ResourceScanId: ResourceScanId + Resources: ScannedResourceIdentifiers + NextToken: Optional[NextToken] + MaxResults: Optional[BoxedMaxResults] + + +class ScannedResource(TypedDict, total=False): + ResourceType: Optional[ResourceType] + ResourceIdentifier: Optional[JazzResourceIdentifierProperties] + ManagedByStack: Optional[ManagedByStack] + + +RelatedResources = List[ScannedResource] + + +class ListResourceScanRelatedResourcesOutput(TypedDict, total=False): + RelatedResources: Optional[RelatedResources] + NextToken: Optional[NextToken] + + +class ListResourceScanResourcesInput(ServiceRequest): + ResourceScanId: ResourceScanId + ResourceIdentifier: Optional[ResourceIdentifier] + ResourceTypePrefix: Optional[ResourceTypePrefix] + TagKey: Optional[TagKey] + TagValue: Optional[TagValue] + NextToken: Optional[NextToken] + MaxResults: Optional[ResourceScannerMaxResults] + + +ScannedResources = List[ScannedResource] + + +class ListResourceScanResourcesOutput(TypedDict, total=False): + Resources: Optional[ScannedResources] + NextToken: Optional[NextToken] + + +class ListResourceScansInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[ResourceScannerMaxResults] + ScanTypeFilter: Optional[ScanType] + + +class ResourceScanSummary(TypedDict, total=False): + ResourceScanId: Optional[ResourceScanId] + Status: Optional[ResourceScanStatus] + StatusReason: Optional[ResourceScanStatusReason] + StartTime: Optional[Timestamp] + EndTime: Optional[Timestamp] + PercentageCompleted: Optional[PercentageCompleted] + ScanType: Optional[ScanType] + + +ResourceScanSummaries = List[ResourceScanSummary] + + +class ListResourceScansOutput(TypedDict, total=False): + ResourceScanSummaries: Optional[ResourceScanSummaries] + NextToken: Optional[NextToken] + + +class ListStackInstanceResourceDriftsInput(ServiceRequest): + StackSetName: StackSetNameOrId + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + StackInstanceResourceDriftStatuses: Optional[StackResourceDriftStatusFilters] + StackInstanceAccount: Account + StackInstanceRegion: Region + OperationId: ClientRequestToken + CallAs: Optional[CallAs] + + +class StackInstanceResourceDriftsSummary(TypedDict, total=False): + StackId: StackId + LogicalResourceId: LogicalResourceId + PhysicalResourceId: Optional[PhysicalResourceId] + PhysicalResourceIdContext: Optional[PhysicalResourceIdContext] + ResourceType: ResourceType + PropertyDifferences: Optional[PropertyDifferences] + StackResourceDriftStatus: StackResourceDriftStatus + Timestamp: Timestamp + + +StackInstanceResourceDriftsSummaries = List[StackInstanceResourceDriftsSummary] + + +class ListStackInstanceResourceDriftsOutput(TypedDict, total=False): + Summaries: Optional[StackInstanceResourceDriftsSummaries] + NextToken: Optional[NextToken] + + +class StackInstanceFilter(TypedDict, total=False): + Name: Optional[StackInstanceFilterName] + Values: Optional[StackInstanceFilterValues] + + +StackInstanceFilters = List[StackInstanceFilter] + + +class ListStackInstancesInput(ServiceRequest): + StackSetName: StackSetName + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + Filters: Optional[StackInstanceFilters] + StackInstanceAccount: Optional[Account] + StackInstanceRegion: Optional[Region] + CallAs: Optional[CallAs] + + +class StackInstanceSummary(TypedDict, total=False): + StackSetId: Optional[StackSetId] + Region: Optional[Region] + Account: Optional[Account] + StackId: Optional[StackId] + Status: Optional[StackInstanceStatus] + StatusReason: Optional[Reason] + StackInstanceStatus: Optional[StackInstanceComprehensiveStatus] + OrganizationalUnitId: Optional[OrganizationalUnitId] + DriftStatus: Optional[StackDriftStatus] + LastDriftCheckTimestamp: Optional[Timestamp] + LastOperationId: Optional[ClientRequestToken] + + +StackInstanceSummaries = List[StackInstanceSummary] + + +class ListStackInstancesOutput(TypedDict, total=False): + Summaries: Optional[StackInstanceSummaries] + NextToken: Optional[NextToken] + + +class ListStackRefactorActionsInput(ServiceRequest): + StackRefactorId: StackRefactorId + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +StackRefactorUntagResources = List[TagKey] +StackRefactorTagResources = List[Tag] + + +class StackRefactorAction(TypedDict, total=False): + Action: Optional[StackRefactorActionType] + Entity: Optional[StackRefactorActionEntity] + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceIdentifier: Optional[StackRefactorResourceIdentifier] + Description: Optional[Description] + Detection: Optional[StackRefactorDetection] + DetectionReason: Optional[DetectionReason] + TagResources: Optional[StackRefactorTagResources] + UntagResources: Optional[StackRefactorUntagResources] + ResourceMapping: Optional[ResourceMapping] + + +StackRefactorActions = List[StackRefactorAction] + + +class ListStackRefactorActionsOutput(TypedDict, total=False): + StackRefactorActions: StackRefactorActions + NextToken: Optional[NextToken] + + +StackRefactorExecutionStatusFilter = List[StackRefactorExecutionStatus] + + +class ListStackRefactorsInput(ServiceRequest): + ExecutionStatusFilter: Optional[StackRefactorExecutionStatusFilter] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class StackRefactorSummary(TypedDict, total=False): + StackRefactorId: Optional[StackRefactorId] + Description: Optional[Description] + ExecutionStatus: Optional[StackRefactorExecutionStatus] + ExecutionStatusReason: Optional[ExecutionStatusReason] + Status: Optional[StackRefactorStatus] + StatusReason: Optional[StackRefactorStatusReason] + + +StackRefactorSummaries = List[StackRefactorSummary] + + +class ListStackRefactorsOutput(TypedDict, total=False): + StackRefactorSummaries: StackRefactorSummaries + NextToken: Optional[NextToken] + + +class ListStackResourcesInput(ServiceRequest): + StackName: StackName + NextToken: Optional[NextToken] + + +class StackResourceDriftInformationSummary(TypedDict, total=False): + StackResourceDriftStatus: StackResourceDriftStatus + LastCheckTimestamp: Optional[Timestamp] + + +class StackResourceSummary(TypedDict, total=False): + LogicalResourceId: LogicalResourceId + PhysicalResourceId: Optional[PhysicalResourceId] + ResourceType: ResourceType + LastUpdatedTimestamp: Timestamp + ResourceStatus: ResourceStatus + ResourceStatusReason: Optional[ResourceStatusReason] + DriftInformation: Optional[StackResourceDriftInformationSummary] + ModuleInfo: Optional[ModuleInfo] + + +StackResourceSummaries = List[StackResourceSummary] + + +class ListStackResourcesOutput(TypedDict, total=False): + StackResourceSummaries: Optional[StackResourceSummaries] + NextToken: Optional[NextToken] + + +class ListStackSetAutoDeploymentTargetsInput(ServiceRequest): + StackSetName: StackSetNameOrId + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + CallAs: Optional[CallAs] + + +class StackSetAutoDeploymentTargetSummary(TypedDict, total=False): + OrganizationalUnitId: Optional[OrganizationalUnitId] + Regions: Optional[RegionList] + + +StackSetAutoDeploymentTargetSummaries = List[StackSetAutoDeploymentTargetSummary] + + +class ListStackSetAutoDeploymentTargetsOutput(TypedDict, total=False): + Summaries: Optional[StackSetAutoDeploymentTargetSummaries] + NextToken: Optional[NextToken] + + +class OperationResultFilter(TypedDict, total=False): + Name: Optional[OperationResultFilterName] + Values: Optional[OperationResultFilterValues] + + +OperationResultFilters = List[OperationResultFilter] + + +class ListStackSetOperationResultsInput(ServiceRequest): + StackSetName: StackSetName + OperationId: ClientRequestToken + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + CallAs: Optional[CallAs] + Filters: Optional[OperationResultFilters] + + +class StackSetOperationResultSummary(TypedDict, total=False): + Account: Optional[Account] + Region: Optional[Region] + Status: Optional[StackSetOperationResultStatus] + StatusReason: Optional[Reason] + AccountGateResult: Optional[AccountGateResult] + OrganizationalUnitId: Optional[OrganizationalUnitId] + + +StackSetOperationResultSummaries = List[StackSetOperationResultSummary] + + +class ListStackSetOperationResultsOutput(TypedDict, total=False): + Summaries: Optional[StackSetOperationResultSummaries] + NextToken: Optional[NextToken] + + +class ListStackSetOperationsInput(ServiceRequest): + StackSetName: StackSetName + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + CallAs: Optional[CallAs] + + +class StackSetOperationSummary(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + Action: Optional[StackSetOperationAction] + Status: Optional[StackSetOperationStatus] + CreationTimestamp: Optional[Timestamp] + EndTimestamp: Optional[Timestamp] + StatusReason: Optional[StackSetOperationStatusReason] + StatusDetails: Optional[StackSetOperationStatusDetails] + OperationPreferences: Optional[StackSetOperationPreferences] + + +StackSetOperationSummaries = List[StackSetOperationSummary] + + +class ListStackSetOperationsOutput(TypedDict, total=False): + Summaries: Optional[StackSetOperationSummaries] + NextToken: Optional[NextToken] + + +class ListStackSetsInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + Status: Optional[StackSetStatus] + CallAs: Optional[CallAs] + + +class StackSetSummary(TypedDict, total=False): + StackSetName: Optional[StackSetName] + StackSetId: Optional[StackSetId] + Description: Optional[Description] + Status: Optional[StackSetStatus] + AutoDeployment: Optional[AutoDeployment] + PermissionModel: Optional[PermissionModels] + DriftStatus: Optional[StackDriftStatus] + LastDriftCheckTimestamp: Optional[Timestamp] + ManagedExecution: Optional[ManagedExecution] + + +StackSetSummaries = List[StackSetSummary] + + +class ListStackSetsOutput(TypedDict, total=False): + Summaries: Optional[StackSetSummaries] + NextToken: Optional[NextToken] + + +StackStatusFilter = List[StackStatus] + + +class ListStacksInput(ServiceRequest): + NextToken: Optional[NextToken] + StackStatusFilter: Optional[StackStatusFilter] + + +class StackDriftInformationSummary(TypedDict, total=False): + StackDriftStatus: StackDriftStatus + LastCheckTimestamp: Optional[Timestamp] + + +class StackSummary(TypedDict, total=False): + StackId: Optional[StackId] + StackName: StackName + TemplateDescription: Optional[TemplateDescription] + CreationTime: CreationTime + LastUpdatedTime: Optional[LastUpdatedTime] + DeletionTime: Optional[DeletionTime] + StackStatus: StackStatus + StackStatusReason: Optional[StackStatusReason] + ParentId: Optional[StackId] + RootId: Optional[StackId] + DriftInformation: Optional[StackDriftInformationSummary] + + +StackSummaries = List[StackSummary] + + +class ListStacksOutput(TypedDict, total=False): + StackSummaries: Optional[StackSummaries] + NextToken: Optional[NextToken] + + +class ListTypeRegistrationsInput(ServiceRequest): + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + TypeArn: Optional[TypeArn] + RegistrationStatusFilter: Optional[RegistrationStatus] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +RegistrationTokenList = List[RegistrationToken] + + +class ListTypeRegistrationsOutput(TypedDict, total=False): + RegistrationTokenList: Optional[RegistrationTokenList] + NextToken: Optional[NextToken] + + +class ListTypeVersionsInput(ServiceRequest): + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + Arn: Optional[TypeArn] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + DeprecatedStatus: Optional[DeprecatedStatus] + PublisherId: Optional[PublisherId] + + +class TypeVersionSummary(TypedDict, total=False): + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + VersionId: Optional[TypeVersionId] + IsDefaultVersion: Optional[IsDefaultVersion] + Arn: Optional[TypeArn] + TimeCreated: Optional[Timestamp] + Description: Optional[Description] + PublicVersionNumber: Optional[PublicVersionNumber] + + +TypeVersionSummaries = List[TypeVersionSummary] + + +class ListTypeVersionsOutput(TypedDict, total=False): + TypeVersionSummaries: Optional[TypeVersionSummaries] + NextToken: Optional[NextToken] + + +class TypeFilters(TypedDict, total=False): + Category: Optional[Category] + PublisherId: Optional[PublisherId] + TypeNamePrefix: Optional[TypeNamePrefix] + + +class ListTypesInput(ServiceRequest): + Visibility: Optional[Visibility] + ProvisioningType: Optional[ProvisioningType] + DeprecatedStatus: Optional[DeprecatedStatus] + Type: Optional[RegistryType] + Filters: Optional[TypeFilters] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class TypeSummary(TypedDict, total=False): + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + DefaultVersionId: Optional[TypeVersionId] + TypeArn: Optional[TypeArn] + LastUpdated: Optional[Timestamp] + Description: Optional[Description] + PublisherId: Optional[PublisherId] + OriginalTypeName: Optional[TypeName] + PublicVersionNumber: Optional[PublicVersionNumber] + LatestPublicVersion: Optional[PublicVersionNumber] + PublisherIdentity: Optional[IdentityProvider] + PublisherName: Optional[PublisherName] + IsActivated: Optional[IsActivated] + + +TypeSummaries = List[TypeSummary] + + +class ListTypesOutput(TypedDict, total=False): + TypeSummaries: Optional[TypeSummaries] + NextToken: Optional[NextToken] + + +class PublishTypeInput(ServiceRequest): + Type: Optional[ThirdPartyType] + Arn: Optional[PrivateTypeArn] + TypeName: Optional[TypeName] + PublicVersionNumber: Optional[PublicVersionNumber] + + +class PublishTypeOutput(TypedDict, total=False): + PublicTypeArn: Optional[TypeArn] + + +class RecordHandlerProgressInput(ServiceRequest): + BearerToken: ClientToken + OperationStatus: OperationStatus + CurrentOperationStatus: Optional[OperationStatus] + StatusMessage: Optional[StatusMessage] + ErrorCode: Optional[HandlerErrorCode] + ResourceModel: Optional[ResourceModel] + ClientRequestToken: Optional[ClientRequestToken] + + +class RecordHandlerProgressOutput(TypedDict, total=False): + pass + + +class RegisterPublisherInput(ServiceRequest): + AcceptTermsAndConditions: Optional[AcceptTermsAndConditions] + ConnectionArn: Optional[ConnectionArn] + + +class RegisterPublisherOutput(TypedDict, total=False): + PublisherId: Optional[PublisherId] + + +class RegisterTypeInput(ServiceRequest): + Type: Optional[RegistryType] + TypeName: TypeName + SchemaHandlerPackage: S3Url + LoggingConfig: Optional[LoggingConfig] + ExecutionRoleArn: Optional[RoleArn] + ClientRequestToken: Optional[RequestToken] + + +class RegisterTypeOutput(TypedDict, total=False): + RegistrationToken: Optional[RegistrationToken] + + +class RollbackStackInput(ServiceRequest): + StackName: StackNameOrId + RoleARN: Optional[RoleARN] + ClientRequestToken: Optional[ClientRequestToken] + RetainExceptOnCreate: Optional[RetainExceptOnCreate] + + +class RollbackStackOutput(TypedDict, total=False): + StackId: Optional[StackId] + + +class SetStackPolicyInput(ServiceRequest): + StackName: StackName + StackPolicyBody: Optional[StackPolicyBody] + StackPolicyURL: Optional[StackPolicyURL] + + +class SetTypeConfigurationInput(ServiceRequest): + TypeArn: Optional[TypeArn] + Configuration: TypeConfiguration + ConfigurationAlias: Optional[TypeConfigurationAlias] + TypeName: Optional[TypeName] + Type: Optional[ThirdPartyType] + + +class SetTypeConfigurationOutput(TypedDict, total=False): + ConfigurationArn: Optional[TypeConfigurationArn] + + +class SetTypeDefaultVersionInput(ServiceRequest): + Arn: Optional[PrivateTypeArn] + Type: Optional[RegistryType] + TypeName: Optional[TypeName] + VersionId: Optional[TypeVersionId] + + +class SetTypeDefaultVersionOutput(TypedDict, total=False): + pass + + +class SignalResourceInput(ServiceRequest): + StackName: StackNameOrId + LogicalResourceId: LogicalResourceId + UniqueId: ResourceSignalUniqueId + Status: ResourceSignalStatus + + +class StartResourceScanInput(ServiceRequest): + ClientRequestToken: Optional[ClientRequestToken] + ScanFilters: Optional[ScanFilters] + + +class StartResourceScanOutput(TypedDict, total=False): + ResourceScanId: Optional[ResourceScanId] + + +class StopStackSetOperationInput(ServiceRequest): + StackSetName: StackSetName + OperationId: ClientRequestToken + CallAs: Optional[CallAs] + + +class StopStackSetOperationOutput(TypedDict, total=False): + pass + + +class TemplateParameter(TypedDict, total=False): + ParameterKey: Optional[ParameterKey] + DefaultValue: Optional[ParameterValue] + NoEcho: Optional[NoEcho] + Description: Optional[Description] + + +TemplateParameters = List[TemplateParameter] + + +class TestTypeInput(ServiceRequest): + Arn: Optional[TypeArn] + Type: Optional[ThirdPartyType] + TypeName: Optional[TypeName] + VersionId: Optional[TypeVersionId] + LogDeliveryBucket: Optional[S3Bucket] + + +class TestTypeOutput(TypedDict, total=False): + TypeVersionArn: Optional[TypeArn] + + +class UpdateGeneratedTemplateInput(ServiceRequest): + GeneratedTemplateName: GeneratedTemplateName + NewGeneratedTemplateName: Optional[GeneratedTemplateName] + AddResources: Optional[ResourceDefinitions] + RemoveResources: Optional[JazzLogicalResourceIds] + RefreshAllResources: Optional[RefreshAllResources] + TemplateConfiguration: Optional[TemplateConfiguration] + + +class UpdateGeneratedTemplateOutput(TypedDict, total=False): + GeneratedTemplateId: Optional[GeneratedTemplateId] + + +class UpdateStackInput(ServiceRequest): + StackName: StackName + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + UsePreviousTemplate: Optional[UsePreviousTemplate] + StackPolicyDuringUpdateBody: Optional[StackPolicyDuringUpdateBody] + StackPolicyDuringUpdateURL: Optional[StackPolicyDuringUpdateURL] + Parameters: Optional[Parameters] + Capabilities: Optional[Capabilities] + ResourceTypes: Optional[ResourceTypes] + RoleARN: Optional[RoleARN] + RollbackConfiguration: Optional[RollbackConfiguration] + StackPolicyBody: Optional[StackPolicyBody] + StackPolicyURL: Optional[StackPolicyURL] + NotificationARNs: Optional[NotificationARNs] + Tags: Optional[Tags] + DisableRollback: Optional[DisableRollback] + ClientRequestToken: Optional[ClientRequestToken] + RetainExceptOnCreate: Optional[RetainExceptOnCreate] + + +class UpdateStackInstancesInput(ServiceRequest): + StackSetName: StackSetNameOrId + Accounts: Optional[AccountList] + DeploymentTargets: Optional[DeploymentTargets] + Regions: RegionList + ParameterOverrides: Optional[Parameters] + OperationPreferences: Optional[StackSetOperationPreferences] + OperationId: Optional[ClientRequestToken] + CallAs: Optional[CallAs] + + +class UpdateStackInstancesOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +class UpdateStackOutput(TypedDict, total=False): + StackId: Optional[StackId] + + +class UpdateStackSetInput(ServiceRequest): + StackSetName: StackSetName + Description: Optional[Description] + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + UsePreviousTemplate: Optional[UsePreviousTemplate] + Parameters: Optional[Parameters] + Capabilities: Optional[Capabilities] + Tags: Optional[Tags] + OperationPreferences: Optional[StackSetOperationPreferences] + AdministrationRoleARN: Optional[RoleARN] + ExecutionRoleName: Optional[ExecutionRoleName] + DeploymentTargets: Optional[DeploymentTargets] + PermissionModel: Optional[PermissionModels] + AutoDeployment: Optional[AutoDeployment] + OperationId: Optional[ClientRequestToken] + Accounts: Optional[AccountList] + Regions: Optional[RegionList] + CallAs: Optional[CallAs] + ManagedExecution: Optional[ManagedExecution] + + +class UpdateStackSetOutput(TypedDict, total=False): + OperationId: Optional[ClientRequestToken] + + +class UpdateTerminationProtectionInput(ServiceRequest): + EnableTerminationProtection: EnableTerminationProtection + StackName: StackNameOrId + + +class UpdateTerminationProtectionOutput(TypedDict, total=False): + StackId: Optional[StackId] + + +class ValidateTemplateInput(ServiceRequest): + TemplateBody: Optional[TemplateBody] + TemplateURL: Optional[TemplateURL] + + +class ValidateTemplateOutput(TypedDict, total=False): + Parameters: Optional[TemplateParameters] + Description: Optional[Description] + Capabilities: Optional[Capabilities] + CapabilitiesReason: Optional[CapabilitiesReason] + DeclaredTransforms: Optional[TransformsList] + + +class CloudformationApi: + service = "cloudformation" + version = "2010-05-15" + + @handler("ActivateOrganizationsAccess") + def activate_organizations_access( + self, context: RequestContext, **kwargs + ) -> ActivateOrganizationsAccessOutput: + raise NotImplementedError + + @handler("ActivateType", expand=False) + def activate_type( + self, context: RequestContext, request: ActivateTypeInput, **kwargs + ) -> ActivateTypeOutput: + raise NotImplementedError + + @handler("BatchDescribeTypeConfigurations") + def batch_describe_type_configurations( + self, + context: RequestContext, + type_configuration_identifiers: TypeConfigurationIdentifiers, + **kwargs, + ) -> BatchDescribeTypeConfigurationsOutput: + raise NotImplementedError + + @handler("CancelUpdateStack") + def cancel_update_stack( + self, + context: RequestContext, + stack_name: StackName, + client_request_token: ClientRequestToken | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ContinueUpdateRollback") + def continue_update_rollback( + self, + context: RequestContext, + stack_name: StackNameOrId, + role_arn: RoleARN | None = None, + resources_to_skip: ResourcesToSkip | None = None, + client_request_token: ClientRequestToken | None = None, + **kwargs, + ) -> ContinueUpdateRollbackOutput: + raise NotImplementedError + + @handler("CreateChangeSet") + def create_change_set( + self, + context: RequestContext, + stack_name: StackNameOrId, + change_set_name: ChangeSetName, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + use_previous_template: UsePreviousTemplate | None = None, + parameters: Parameters | None = None, + capabilities: Capabilities | None = None, + resource_types: ResourceTypes | None = None, + role_arn: RoleARN | None = None, + rollback_configuration: RollbackConfiguration | None = None, + notification_arns: NotificationARNs | None = None, + tags: Tags | None = None, + client_token: ClientToken | None = None, + description: Description | None = None, + change_set_type: ChangeSetType | None = None, + resources_to_import: ResourcesToImport | None = None, + include_nested_stacks: IncludeNestedStacks | None = None, + on_stack_failure: OnStackFailure | None = None, + import_existing_resources: ImportExistingResources | None = None, + **kwargs, + ) -> CreateChangeSetOutput: + raise NotImplementedError + + @handler("CreateGeneratedTemplate") + def create_generated_template( + self, + context: RequestContext, + generated_template_name: GeneratedTemplateName, + resources: ResourceDefinitions | None = None, + stack_name: StackName | None = None, + template_configuration: TemplateConfiguration | None = None, + **kwargs, + ) -> CreateGeneratedTemplateOutput: + raise NotImplementedError + + @handler("CreateStack") + def create_stack( + self, + context: RequestContext, + stack_name: StackName, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + parameters: Parameters | None = None, + disable_rollback: DisableRollback | None = None, + rollback_configuration: RollbackConfiguration | None = None, + timeout_in_minutes: TimeoutMinutes | None = None, + notification_arns: NotificationARNs | None = None, + capabilities: Capabilities | None = None, + resource_types: ResourceTypes | None = None, + role_arn: RoleARN | None = None, + on_failure: OnFailure | None = None, + stack_policy_body: StackPolicyBody | None = None, + stack_policy_url: StackPolicyURL | None = None, + tags: Tags | None = None, + client_request_token: ClientRequestToken | None = None, + enable_termination_protection: EnableTerminationProtection | None = None, + retain_except_on_create: RetainExceptOnCreate | None = None, + **kwargs, + ) -> CreateStackOutput: + raise NotImplementedError + + @handler("CreateStackInstances") + def create_stack_instances( + self, + context: RequestContext, + stack_set_name: StackSetName, + regions: RegionList, + accounts: AccountList | None = None, + deployment_targets: DeploymentTargets | None = None, + parameter_overrides: Parameters | None = None, + operation_preferences: StackSetOperationPreferences | None = None, + operation_id: ClientRequestToken | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> CreateStackInstancesOutput: + raise NotImplementedError + + @handler("CreateStackRefactor") + def create_stack_refactor( + self, + context: RequestContext, + stack_definitions: StackDefinitions, + description: Description | None = None, + enable_stack_creation: EnableStackCreation | None = None, + resource_mappings: ResourceMappings | None = None, + **kwargs, + ) -> CreateStackRefactorOutput: + raise NotImplementedError + + @handler("CreateStackSet") + def create_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + description: Description | None = None, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + stack_id: StackId | None = None, + parameters: Parameters | None = None, + capabilities: Capabilities | None = None, + tags: Tags | None = None, + administration_role_arn: RoleARN | None = None, + execution_role_name: ExecutionRoleName | None = None, + permission_model: PermissionModels | None = None, + auto_deployment: AutoDeployment | None = None, + call_as: CallAs | None = None, + client_request_token: ClientRequestToken | None = None, + managed_execution: ManagedExecution | None = None, + **kwargs, + ) -> CreateStackSetOutput: + raise NotImplementedError + + @handler("DeactivateOrganizationsAccess") + def deactivate_organizations_access( + self, context: RequestContext, **kwargs + ) -> DeactivateOrganizationsAccessOutput: + raise NotImplementedError + + @handler("DeactivateType", expand=False) + def deactivate_type( + self, context: RequestContext, request: DeactivateTypeInput, **kwargs + ) -> DeactivateTypeOutput: + raise NotImplementedError + + @handler("DeleteChangeSet") + def delete_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + **kwargs, + ) -> DeleteChangeSetOutput: + raise NotImplementedError + + @handler("DeleteGeneratedTemplate") + def delete_generated_template( + self, context: RequestContext, generated_template_name: GeneratedTemplateName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteStack") + def delete_stack( + self, + context: RequestContext, + stack_name: StackName, + retain_resources: RetainResources | None = None, + role_arn: RoleARN | None = None, + client_request_token: ClientRequestToken | None = None, + deletion_mode: DeletionMode | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteStackInstances") + def delete_stack_instances( + self, + context: RequestContext, + stack_set_name: StackSetName, + regions: RegionList, + retain_stacks: RetainStacks, + accounts: AccountList | None = None, + deployment_targets: DeploymentTargets | None = None, + operation_preferences: StackSetOperationPreferences | None = None, + operation_id: ClientRequestToken | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> DeleteStackInstancesOutput: + raise NotImplementedError + + @handler("DeleteStackSet") + def delete_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + call_as: CallAs | None = None, + **kwargs, + ) -> DeleteStackSetOutput: + raise NotImplementedError + + @handler("DeregisterType", expand=False) + def deregister_type( + self, context: RequestContext, request: DeregisterTypeInput, **kwargs + ) -> DeregisterTypeOutput: + raise NotImplementedError + + @handler("DescribeAccountLimits") + def describe_account_limits( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> DescribeAccountLimitsOutput: + raise NotImplementedError + + @handler("DescribeChangeSet") + def describe_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + next_token: NextToken | None = None, + include_property_values: IncludePropertyValues | None = None, + **kwargs, + ) -> DescribeChangeSetOutput: + raise NotImplementedError + + @handler("DescribeChangeSetHooks") + def describe_change_set_hooks( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + next_token: NextToken | None = None, + logical_resource_id: LogicalResourceId | None = None, + **kwargs, + ) -> DescribeChangeSetHooksOutput: + raise NotImplementedError + + @handler("DescribeGeneratedTemplate") + def describe_generated_template( + self, context: RequestContext, generated_template_name: GeneratedTemplateName, **kwargs + ) -> DescribeGeneratedTemplateOutput: + raise NotImplementedError + + @handler("DescribeOrganizationsAccess") + def describe_organizations_access( + self, context: RequestContext, call_as: CallAs | None = None, **kwargs + ) -> DescribeOrganizationsAccessOutput: + raise NotImplementedError + + @handler("DescribePublisher") + def describe_publisher( + self, context: RequestContext, publisher_id: PublisherId | None = None, **kwargs + ) -> DescribePublisherOutput: + raise NotImplementedError + + @handler("DescribeResourceScan") + def describe_resource_scan( + self, context: RequestContext, resource_scan_id: ResourceScanId, **kwargs + ) -> DescribeResourceScanOutput: + raise NotImplementedError + + @handler("DescribeStackDriftDetectionStatus") + def describe_stack_drift_detection_status( + self, context: RequestContext, stack_drift_detection_id: StackDriftDetectionId, **kwargs + ) -> DescribeStackDriftDetectionStatusOutput: + raise NotImplementedError + + @handler("DescribeStackEvents") + def describe_stack_events( + self, + context: RequestContext, + stack_name: StackName | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeStackEventsOutput: + raise NotImplementedError + + @handler("DescribeStackInstance") + def describe_stack_instance( + self, + context: RequestContext, + stack_set_name: StackSetName, + stack_instance_account: Account, + stack_instance_region: Region, + call_as: CallAs | None = None, + **kwargs, + ) -> DescribeStackInstanceOutput: + raise NotImplementedError + + @handler("DescribeStackRefactor") + def describe_stack_refactor( + self, context: RequestContext, stack_refactor_id: StackRefactorId, **kwargs + ) -> DescribeStackRefactorOutput: + raise NotImplementedError + + @handler("DescribeStackResource") + def describe_stack_resource( + self, + context: RequestContext, + stack_name: StackName, + logical_resource_id: LogicalResourceId, + **kwargs, + ) -> DescribeStackResourceOutput: + raise NotImplementedError + + @handler("DescribeStackResourceDrifts") + def describe_stack_resource_drifts( + self, + context: RequestContext, + stack_name: StackNameOrId, + stack_resource_drift_status_filters: StackResourceDriftStatusFilters | None = None, + next_token: NextToken | None = None, + max_results: BoxedMaxResults | None = None, + **kwargs, + ) -> DescribeStackResourceDriftsOutput: + raise NotImplementedError + + @handler("DescribeStackResources") + def describe_stack_resources( + self, + context: RequestContext, + stack_name: StackName | None = None, + logical_resource_id: LogicalResourceId | None = None, + physical_resource_id: PhysicalResourceId | None = None, + **kwargs, + ) -> DescribeStackResourcesOutput: + raise NotImplementedError + + @handler("DescribeStackSet") + def describe_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + call_as: CallAs | None = None, + **kwargs, + ) -> DescribeStackSetOutput: + raise NotImplementedError + + @handler("DescribeStackSetOperation") + def describe_stack_set_operation( + self, + context: RequestContext, + stack_set_name: StackSetName, + operation_id: ClientRequestToken, + call_as: CallAs | None = None, + **kwargs, + ) -> DescribeStackSetOperationOutput: + raise NotImplementedError + + @handler("DescribeStacks") + def describe_stacks( + self, + context: RequestContext, + stack_name: StackName | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeStacksOutput: + raise NotImplementedError + + @handler("DescribeType", expand=False) + def describe_type( + self, context: RequestContext, request: DescribeTypeInput, **kwargs + ) -> DescribeTypeOutput: + raise NotImplementedError + + @handler("DescribeTypeRegistration") + def describe_type_registration( + self, context: RequestContext, registration_token: RegistrationToken, **kwargs + ) -> DescribeTypeRegistrationOutput: + raise NotImplementedError + + @handler("DetectStackDrift") + def detect_stack_drift( + self, + context: RequestContext, + stack_name: StackNameOrId, + logical_resource_ids: LogicalResourceIds | None = None, + **kwargs, + ) -> DetectStackDriftOutput: + raise NotImplementedError + + @handler("DetectStackResourceDrift") + def detect_stack_resource_drift( + self, + context: RequestContext, + stack_name: StackNameOrId, + logical_resource_id: LogicalResourceId, + **kwargs, + ) -> DetectStackResourceDriftOutput: + raise NotImplementedError + + @handler("DetectStackSetDrift") + def detect_stack_set_drift( + self, + context: RequestContext, + stack_set_name: StackSetNameOrId, + operation_preferences: StackSetOperationPreferences | None = None, + operation_id: ClientRequestToken | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> DetectStackSetDriftOutput: + raise NotImplementedError + + @handler("EstimateTemplateCost") + def estimate_template_cost( + self, + context: RequestContext, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + parameters: Parameters | None = None, + **kwargs, + ) -> EstimateTemplateCostOutput: + raise NotImplementedError + + @handler("ExecuteChangeSet") + def execute_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + client_request_token: ClientRequestToken | None = None, + disable_rollback: DisableRollback | None = None, + retain_except_on_create: RetainExceptOnCreate | None = None, + **kwargs, + ) -> ExecuteChangeSetOutput: + raise NotImplementedError + + @handler("ExecuteStackRefactor") + def execute_stack_refactor( + self, context: RequestContext, stack_refactor_id: StackRefactorId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("GetGeneratedTemplate") + def get_generated_template( + self, + context: RequestContext, + generated_template_name: GeneratedTemplateName, + format: TemplateFormat | None = None, + **kwargs, + ) -> GetGeneratedTemplateOutput: + raise NotImplementedError + + @handler("GetStackPolicy") + def get_stack_policy( + self, context: RequestContext, stack_name: StackName, **kwargs + ) -> GetStackPolicyOutput: + raise NotImplementedError + + @handler("GetTemplate") + def get_template( + self, + context: RequestContext, + stack_name: StackName | None = None, + change_set_name: ChangeSetNameOrId | None = None, + template_stage: TemplateStage | None = None, + **kwargs, + ) -> GetTemplateOutput: + raise NotImplementedError + + @handler("GetTemplateSummary") + def get_template_summary( + self, + context: RequestContext, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + stack_name: StackNameOrId | None = None, + stack_set_name: StackSetNameOrId | None = None, + call_as: CallAs | None = None, + template_summary_config: TemplateSummaryConfig | None = None, + **kwargs, + ) -> GetTemplateSummaryOutput: + raise NotImplementedError + + @handler("ImportStacksToStackSet") + def import_stacks_to_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetNameOrId, + stack_ids: StackIdList | None = None, + stack_ids_url: StackIdsUrl | None = None, + organizational_unit_ids: OrganizationalUnitIdList | None = None, + operation_preferences: StackSetOperationPreferences | None = None, + operation_id: ClientRequestToken | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ImportStacksToStackSetOutput: + raise NotImplementedError + + @handler("ListChangeSets") + def list_change_sets( + self, + context: RequestContext, + stack_name: StackNameOrId, + next_token: NextToken | None = None, + **kwargs, + ) -> ListChangeSetsOutput: + raise NotImplementedError + + @handler("ListExports") + def list_exports( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> ListExportsOutput: + raise NotImplementedError + + @handler("ListGeneratedTemplates") + def list_generated_templates( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListGeneratedTemplatesOutput: + raise NotImplementedError + + @handler("ListHookResults") + def list_hook_results( + self, + context: RequestContext, + target_type: ListHookResultsTargetType, + target_id: HookResultId, + next_token: NextToken | None = None, + **kwargs, + ) -> ListHookResultsOutput: + raise NotImplementedError + + @handler("ListImports") + def list_imports( + self, + context: RequestContext, + export_name: ExportName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListImportsOutput: + raise NotImplementedError + + @handler("ListResourceScanRelatedResources") + def list_resource_scan_related_resources( + self, + context: RequestContext, + resource_scan_id: ResourceScanId, + resources: ScannedResourceIdentifiers, + next_token: NextToken | None = None, + max_results: BoxedMaxResults | None = None, + **kwargs, + ) -> ListResourceScanRelatedResourcesOutput: + raise NotImplementedError + + @handler("ListResourceScanResources") + def list_resource_scan_resources( + self, + context: RequestContext, + resource_scan_id: ResourceScanId, + resource_identifier: ResourceIdentifier | None = None, + resource_type_prefix: ResourceTypePrefix | None = None, + tag_key: TagKey | None = None, + tag_value: TagValue | None = None, + next_token: NextToken | None = None, + max_results: ResourceScannerMaxResults | None = None, + **kwargs, + ) -> ListResourceScanResourcesOutput: + raise NotImplementedError + + @handler("ListResourceScans") + def list_resource_scans( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: ResourceScannerMaxResults | None = None, + scan_type_filter: ScanType | None = None, + **kwargs, + ) -> ListResourceScansOutput: + raise NotImplementedError + + @handler("ListStackInstanceResourceDrifts") + def list_stack_instance_resource_drifts( + self, + context: RequestContext, + stack_set_name: StackSetNameOrId, + stack_instance_account: Account, + stack_instance_region: Region, + operation_id: ClientRequestToken, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + stack_instance_resource_drift_statuses: StackResourceDriftStatusFilters | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ListStackInstanceResourceDriftsOutput: + raise NotImplementedError + + @handler("ListStackInstances") + def list_stack_instances( + self, + context: RequestContext, + stack_set_name: StackSetName, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + filters: StackInstanceFilters | None = None, + stack_instance_account: Account | None = None, + stack_instance_region: Region | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ListStackInstancesOutput: + raise NotImplementedError + + @handler("ListStackRefactorActions") + def list_stack_refactor_actions( + self, + context: RequestContext, + stack_refactor_id: StackRefactorId, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListStackRefactorActionsOutput: + raise NotImplementedError + + @handler("ListStackRefactors") + def list_stack_refactors( + self, + context: RequestContext, + execution_status_filter: StackRefactorExecutionStatusFilter | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListStackRefactorsOutput: + raise NotImplementedError + + @handler("ListStackResources") + def list_stack_resources( + self, + context: RequestContext, + stack_name: StackName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListStackResourcesOutput: + raise NotImplementedError + + @handler("ListStackSetAutoDeploymentTargets") + def list_stack_set_auto_deployment_targets( + self, + context: RequestContext, + stack_set_name: StackSetNameOrId, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ListStackSetAutoDeploymentTargetsOutput: + raise NotImplementedError + + @handler("ListStackSetOperationResults") + def list_stack_set_operation_results( + self, + context: RequestContext, + stack_set_name: StackSetName, + operation_id: ClientRequestToken, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + call_as: CallAs | None = None, + filters: OperationResultFilters | None = None, + **kwargs, + ) -> ListStackSetOperationResultsOutput: + raise NotImplementedError + + @handler("ListStackSetOperations") + def list_stack_set_operations( + self, + context: RequestContext, + stack_set_name: StackSetName, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ListStackSetOperationsOutput: + raise NotImplementedError + + @handler("ListStackSets") + def list_stack_sets( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + status: StackSetStatus | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> ListStackSetsOutput: + raise NotImplementedError + + @handler("ListStacks") + def list_stacks( + self, + context: RequestContext, + next_token: NextToken | None = None, + stack_status_filter: StackStatusFilter | None = None, + **kwargs, + ) -> ListStacksOutput: + raise NotImplementedError + + @handler("ListTypeRegistrations", expand=False) + def list_type_registrations( + self, context: RequestContext, request: ListTypeRegistrationsInput, **kwargs + ) -> ListTypeRegistrationsOutput: + raise NotImplementedError + + @handler("ListTypeVersions", expand=False) + def list_type_versions( + self, context: RequestContext, request: ListTypeVersionsInput, **kwargs + ) -> ListTypeVersionsOutput: + raise NotImplementedError + + @handler("ListTypes", expand=False) + def list_types( + self, context: RequestContext, request: ListTypesInput, **kwargs + ) -> ListTypesOutput: + raise NotImplementedError + + @handler("PublishType", expand=False) + def publish_type( + self, context: RequestContext, request: PublishTypeInput, **kwargs + ) -> PublishTypeOutput: + raise NotImplementedError + + @handler("RecordHandlerProgress") + def record_handler_progress( + self, + context: RequestContext, + bearer_token: ClientToken, + operation_status: OperationStatus, + current_operation_status: OperationStatus | None = None, + status_message: StatusMessage | None = None, + error_code: HandlerErrorCode | None = None, + resource_model: ResourceModel | None = None, + client_request_token: ClientRequestToken | None = None, + **kwargs, + ) -> RecordHandlerProgressOutput: + raise NotImplementedError + + @handler("RegisterPublisher") + def register_publisher( + self, + context: RequestContext, + accept_terms_and_conditions: AcceptTermsAndConditions | None = None, + connection_arn: ConnectionArn | None = None, + **kwargs, + ) -> RegisterPublisherOutput: + raise NotImplementedError + + @handler("RegisterType", expand=False) + def register_type( + self, context: RequestContext, request: RegisterTypeInput, **kwargs + ) -> RegisterTypeOutput: + raise NotImplementedError + + @handler("RollbackStack") + def rollback_stack( + self, + context: RequestContext, + stack_name: StackNameOrId, + role_arn: RoleARN | None = None, + client_request_token: ClientRequestToken | None = None, + retain_except_on_create: RetainExceptOnCreate | None = None, + **kwargs, + ) -> RollbackStackOutput: + raise NotImplementedError + + @handler("SetStackPolicy") + def set_stack_policy( + self, + context: RequestContext, + stack_name: StackName, + stack_policy_body: StackPolicyBody | None = None, + stack_policy_url: StackPolicyURL | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SetTypeConfiguration", expand=False) + def set_type_configuration( + self, context: RequestContext, request: SetTypeConfigurationInput, **kwargs + ) -> SetTypeConfigurationOutput: + raise NotImplementedError + + @handler("SetTypeDefaultVersion", expand=False) + def set_type_default_version( + self, context: RequestContext, request: SetTypeDefaultVersionInput, **kwargs + ) -> SetTypeDefaultVersionOutput: + raise NotImplementedError + + @handler("SignalResource") + def signal_resource( + self, + context: RequestContext, + stack_name: StackNameOrId, + logical_resource_id: LogicalResourceId, + unique_id: ResourceSignalUniqueId, + status: ResourceSignalStatus, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartResourceScan") + def start_resource_scan( + self, + context: RequestContext, + client_request_token: ClientRequestToken | None = None, + scan_filters: ScanFilters | None = None, + **kwargs, + ) -> StartResourceScanOutput: + raise NotImplementedError + + @handler("StopStackSetOperation") + def stop_stack_set_operation( + self, + context: RequestContext, + stack_set_name: StackSetName, + operation_id: ClientRequestToken, + call_as: CallAs | None = None, + **kwargs, + ) -> StopStackSetOperationOutput: + raise NotImplementedError + + @handler("TestType", expand=False) + def test_type( + self, context: RequestContext, request: TestTypeInput, **kwargs + ) -> TestTypeOutput: + raise NotImplementedError + + @handler("UpdateGeneratedTemplate") + def update_generated_template( + self, + context: RequestContext, + generated_template_name: GeneratedTemplateName, + new_generated_template_name: GeneratedTemplateName | None = None, + add_resources: ResourceDefinitions | None = None, + remove_resources: JazzLogicalResourceIds | None = None, + refresh_all_resources: RefreshAllResources | None = None, + template_configuration: TemplateConfiguration | None = None, + **kwargs, + ) -> UpdateGeneratedTemplateOutput: + raise NotImplementedError + + @handler("UpdateStack") + def update_stack( + self, + context: RequestContext, + stack_name: StackName, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + use_previous_template: UsePreviousTemplate | None = None, + stack_policy_during_update_body: StackPolicyDuringUpdateBody | None = None, + stack_policy_during_update_url: StackPolicyDuringUpdateURL | None = None, + parameters: Parameters | None = None, + capabilities: Capabilities | None = None, + resource_types: ResourceTypes | None = None, + role_arn: RoleARN | None = None, + rollback_configuration: RollbackConfiguration | None = None, + stack_policy_body: StackPolicyBody | None = None, + stack_policy_url: StackPolicyURL | None = None, + notification_arns: NotificationARNs | None = None, + tags: Tags | None = None, + disable_rollback: DisableRollback | None = None, + client_request_token: ClientRequestToken | None = None, + retain_except_on_create: RetainExceptOnCreate | None = None, + **kwargs, + ) -> UpdateStackOutput: + raise NotImplementedError + + @handler("UpdateStackInstances") + def update_stack_instances( + self, + context: RequestContext, + stack_set_name: StackSetNameOrId, + regions: RegionList, + accounts: AccountList | None = None, + deployment_targets: DeploymentTargets | None = None, + parameter_overrides: Parameters | None = None, + operation_preferences: StackSetOperationPreferences | None = None, + operation_id: ClientRequestToken | None = None, + call_as: CallAs | None = None, + **kwargs, + ) -> UpdateStackInstancesOutput: + raise NotImplementedError + + @handler("UpdateStackSet") + def update_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + description: Description | None = None, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + use_previous_template: UsePreviousTemplate | None = None, + parameters: Parameters | None = None, + capabilities: Capabilities | None = None, + tags: Tags | None = None, + operation_preferences: StackSetOperationPreferences | None = None, + administration_role_arn: RoleARN | None = None, + execution_role_name: ExecutionRoleName | None = None, + deployment_targets: DeploymentTargets | None = None, + permission_model: PermissionModels | None = None, + auto_deployment: AutoDeployment | None = None, + operation_id: ClientRequestToken | None = None, + accounts: AccountList | None = None, + regions: RegionList | None = None, + call_as: CallAs | None = None, + managed_execution: ManagedExecution | None = None, + **kwargs, + ) -> UpdateStackSetOutput: + raise NotImplementedError + + @handler("UpdateTerminationProtection") + def update_termination_protection( + self, + context: RequestContext, + enable_termination_protection: EnableTerminationProtection, + stack_name: StackNameOrId, + **kwargs, + ) -> UpdateTerminationProtectionOutput: + raise NotImplementedError + + @handler("ValidateTemplate") + def validate_template( + self, + context: RequestContext, + template_body: TemplateBody | None = None, + template_url: TemplateURL | None = None, + **kwargs, + ) -> ValidateTemplateOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/cloudwatch/__init__.py b/localstack-core/localstack/aws/api/cloudwatch/__init__.py new file mode 100644 index 0000000000000..e05e85a069dee --- /dev/null +++ b/localstack-core/localstack/aws/api/cloudwatch/__init__.py @@ -0,0 +1,1560 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccountId = str +ActionPrefix = str +ActionsEnabled = bool +ActionsSuppressedReason = str +AlarmArn = str +AlarmDescription = str +AlarmName = str +AlarmNamePrefix = str +AlarmRule = str +AmazonResourceName = str +AnomalyDetectorMetricStat = str +AnomalyDetectorMetricTimezone = str +AwsQueryErrorMessage = str +DashboardArn = str +DashboardBody = str +DashboardErrorMessage = str +DashboardName = str +DashboardNamePrefix = str +DataPath = str +DatapointValue = float +DatapointsToAlarm = int +DimensionName = str +DimensionValue = str +EntityAttributesMapKeyString = str +EntityAttributesMapValueString = str +EntityKeyAttributesMapKeyString = str +EntityKeyAttributesMapValueString = str +ErrorMessage = str +EvaluateLowSampleCountPercentile = str +EvaluationPeriods = int +ExceptionType = str +ExtendedStatistic = str +FailureCode = str +FailureDescription = str +FailureResource = str +FaultDescription = str +GetMetricDataLabelTimezone = str +GetMetricDataMaxDatapoints = int +HistoryData = str +HistorySummary = str +IncludeLinkedAccounts = bool +IncludeLinkedAccountsMetrics = bool +InsightRuleAggregationStatistic = str +InsightRuleContributorKey = str +InsightRuleContributorKeyLabel = str +InsightRuleDefinition = str +InsightRuleIsManaged = bool +InsightRuleMaxResults = int +InsightRuleMetricName = str +InsightRuleName = str +InsightRuleOnTransformedLogs = bool +InsightRuleOrderBy = str +InsightRuleSchema = str +InsightRuleState = str +InsightRuleUnboundDouble = float +InsightRuleUnboundInteger = int +ListMetricStreamsMaxResults = int +MaxRecords = int +MaxReturnedResultsCount = int +Message = str +MessageDataCode = str +MessageDataValue = str +MetricExpression = str +MetricId = str +MetricLabel = str +MetricName = str +MetricStreamName = str +MetricStreamState = str +MetricStreamStatistic = str +MetricWidget = str +Namespace = str +NextToken = str +OutputFormat = str +Period = int +PeriodicSpikes = bool +ResourceId = str +ResourceName = str +ResourceType = str +ReturnData = bool +Stat = str +StateReason = str +StateReasonData = str +StorageResolution = int +StrictEntityValidation = bool +SuppressorPeriod = int +TagKey = str +TagValue = str +TemplateName = str +Threshold = float +TreatMissingData = str + + +class ActionsSuppressedBy(StrEnum): + WaitPeriod = "WaitPeriod" + ExtensionPeriod = "ExtensionPeriod" + Alarm = "Alarm" + + +class AlarmType(StrEnum): + CompositeAlarm = "CompositeAlarm" + MetricAlarm = "MetricAlarm" + + +class AnomalyDetectorStateValue(StrEnum): + PENDING_TRAINING = "PENDING_TRAINING" + TRAINED_INSUFFICIENT_DATA = "TRAINED_INSUFFICIENT_DATA" + TRAINED = "TRAINED" + + +class AnomalyDetectorType(StrEnum): + SINGLE_METRIC = "SINGLE_METRIC" + METRIC_MATH = "METRIC_MATH" + + +class ComparisonOperator(StrEnum): + GreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + GreaterThanThreshold = "GreaterThanThreshold" + LessThanThreshold = "LessThanThreshold" + LessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" + LessThanLowerOrGreaterThanUpperThreshold = "LessThanLowerOrGreaterThanUpperThreshold" + LessThanLowerThreshold = "LessThanLowerThreshold" + GreaterThanUpperThreshold = "GreaterThanUpperThreshold" + + +class EvaluationState(StrEnum): + PARTIAL_DATA = "PARTIAL_DATA" + + +class HistoryItemType(StrEnum): + ConfigurationUpdate = "ConfigurationUpdate" + StateUpdate = "StateUpdate" + Action = "Action" + + +class MetricStreamOutputFormat(StrEnum): + json = "json" + opentelemetry0_7 = "opentelemetry0.7" + opentelemetry1_0 = "opentelemetry1.0" + + +class RecentlyActive(StrEnum): + PT3H = "PT3H" + + +class ScanBy(StrEnum): + TimestampDescending = "TimestampDescending" + TimestampAscending = "TimestampAscending" + + +class StandardUnit(StrEnum): + Seconds = "Seconds" + Microseconds = "Microseconds" + Milliseconds = "Milliseconds" + Bytes = "Bytes" + Kilobytes = "Kilobytes" + Megabytes = "Megabytes" + Gigabytes = "Gigabytes" + Terabytes = "Terabytes" + Bits = "Bits" + Kilobits = "Kilobits" + Megabits = "Megabits" + Gigabits = "Gigabits" + Terabits = "Terabits" + Percent = "Percent" + Count = "Count" + Bytes_Second = "Bytes/Second" + Kilobytes_Second = "Kilobytes/Second" + Megabytes_Second = "Megabytes/Second" + Gigabytes_Second = "Gigabytes/Second" + Terabytes_Second = "Terabytes/Second" + Bits_Second = "Bits/Second" + Kilobits_Second = "Kilobits/Second" + Megabits_Second = "Megabits/Second" + Gigabits_Second = "Gigabits/Second" + Terabits_Second = "Terabits/Second" + Count_Second = "Count/Second" + None_ = "None" + + +class StateValue(StrEnum): + OK = "OK" + ALARM = "ALARM" + INSUFFICIENT_DATA = "INSUFFICIENT_DATA" + + +class Statistic(StrEnum): + SampleCount = "SampleCount" + Average = "Average" + Sum = "Sum" + Minimum = "Minimum" + Maximum = "Maximum" + + +class StatusCode(StrEnum): + Complete = "Complete" + InternalError = "InternalError" + PartialData = "PartialData" + Forbidden = "Forbidden" + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModificationException" + sender_fault: bool = True + status_code: int = 429 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class DashboardValidationMessage(TypedDict, total=False): + DataPath: Optional[DataPath] + Message: Optional[Message] + + +DashboardValidationMessages = List[DashboardValidationMessage] + + +class DashboardInvalidInputError(ServiceException): + code: str = "InvalidParameterInput" + sender_fault: bool = True + status_code: int = 400 + dashboardValidationMessages: Optional[DashboardValidationMessages] + + +class DashboardNotFoundError(ServiceException): + code: str = "ResourceNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class InternalServiceFault(ServiceException): + code: str = "InternalServiceError" + sender_fault: bool = False + status_code: int = 500 + + +class InvalidFormatFault(ServiceException): + code: str = "InvalidFormat" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidNextToken(ServiceException): + code: str = "InvalidNextToken" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidParameterCombinationException(ServiceException): + code: str = "InvalidParameterCombination" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidParameterValueException(ServiceException): + code: str = "InvalidParameterValue" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededFault(ServiceException): + code: str = "LimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class MissingRequiredParameterException(ServiceException): + code: str = "MissingParameter" + sender_fault: bool = True + status_code: int = 400 + + +class ResourceNotFound(ServiceException): + code: str = "ResourceNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = True + status_code: int = 404 + ResourceType: Optional[ResourceType] + ResourceId: Optional[ResourceId] + + +Timestamp = datetime + + +class AlarmHistoryItem(TypedDict, total=False): + AlarmName: Optional[AlarmName] + AlarmType: Optional[AlarmType] + Timestamp: Optional[Timestamp] + HistoryItemType: Optional[HistoryItemType] + HistorySummary: Optional[HistorySummary] + HistoryData: Optional[HistoryData] + + +AlarmHistoryItems = List[AlarmHistoryItem] +AlarmNames = List[AlarmName] +AlarmTypes = List[AlarmType] + + +class Dimension(TypedDict, total=False): + Name: DimensionName + Value: DimensionValue + + +Dimensions = List[Dimension] + + +class Metric(TypedDict, total=False): + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + + +class MetricStat(TypedDict, total=False): + Metric: Metric + Period: Period + Stat: Stat + Unit: Optional[StandardUnit] + + +class MetricDataQuery(TypedDict, total=False): + Id: MetricId + MetricStat: Optional[MetricStat] + Expression: Optional[MetricExpression] + Label: Optional[MetricLabel] + ReturnData: Optional[ReturnData] + Period: Optional[Period] + AccountId: Optional[AccountId] + + +MetricDataQueries = List[MetricDataQuery] + + +class MetricMathAnomalyDetector(TypedDict, total=False): + MetricDataQueries: Optional[MetricDataQueries] + + +class SingleMetricAnomalyDetector(TypedDict, total=False): + AccountId: Optional[AccountId] + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + Stat: Optional[AnomalyDetectorMetricStat] + + +class MetricCharacteristics(TypedDict, total=False): + PeriodicSpikes: Optional[PeriodicSpikes] + + +class Range(TypedDict, total=False): + StartTime: Timestamp + EndTime: Timestamp + + +AnomalyDetectorExcludedTimeRanges = List[Range] + + +class AnomalyDetectorConfiguration(TypedDict, total=False): + ExcludedTimeRanges: Optional[AnomalyDetectorExcludedTimeRanges] + MetricTimezone: Optional[AnomalyDetectorMetricTimezone] + + +class AnomalyDetector(TypedDict, total=False): + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + Stat: Optional[AnomalyDetectorMetricStat] + Configuration: Optional[AnomalyDetectorConfiguration] + StateValue: Optional[AnomalyDetectorStateValue] + MetricCharacteristics: Optional[MetricCharacteristics] + SingleMetricAnomalyDetector: Optional[SingleMetricAnomalyDetector] + MetricMathAnomalyDetector: Optional[MetricMathAnomalyDetector] + + +AnomalyDetectorTypes = List[AnomalyDetectorType] +AnomalyDetectors = List[AnomalyDetector] + + +class PartialFailure(TypedDict, total=False): + FailureResource: Optional[FailureResource] + ExceptionType: Optional[ExceptionType] + FailureCode: Optional[FailureCode] + FailureDescription: Optional[FailureDescription] + + +BatchFailures = List[PartialFailure] +ResourceList = List[ResourceName] + + +class CompositeAlarm(TypedDict, total=False): + ActionsEnabled: Optional[ActionsEnabled] + AlarmActions: Optional[ResourceList] + AlarmArn: Optional[AlarmArn] + AlarmConfigurationUpdatedTimestamp: Optional[Timestamp] + AlarmDescription: Optional[AlarmDescription] + AlarmName: Optional[AlarmName] + AlarmRule: Optional[AlarmRule] + InsufficientDataActions: Optional[ResourceList] + OKActions: Optional[ResourceList] + StateReason: Optional[StateReason] + StateReasonData: Optional[StateReasonData] + StateUpdatedTimestamp: Optional[Timestamp] + StateValue: Optional[StateValue] + StateTransitionedTimestamp: Optional[Timestamp] + ActionsSuppressedBy: Optional[ActionsSuppressedBy] + ActionsSuppressedReason: Optional[ActionsSuppressedReason] + ActionsSuppressor: Optional[AlarmArn] + ActionsSuppressorWaitPeriod: Optional[SuppressorPeriod] + ActionsSuppressorExtensionPeriod: Optional[SuppressorPeriod] + + +CompositeAlarms = List[CompositeAlarm] +Counts = List[DatapointValue] +Size = int +LastModified = datetime + + +class DashboardEntry(TypedDict, total=False): + DashboardName: Optional[DashboardName] + DashboardArn: Optional[DashboardArn] + LastModified: Optional[LastModified] + Size: Optional[Size] + + +DashboardEntries = List[DashboardEntry] +DashboardNames = List[DashboardName] +DatapointValueMap = Dict[ExtendedStatistic, DatapointValue] + + +class Datapoint(TypedDict, total=False): + Timestamp: Optional[Timestamp] + SampleCount: Optional[DatapointValue] + Average: Optional[DatapointValue] + Sum: Optional[DatapointValue] + Minimum: Optional[DatapointValue] + Maximum: Optional[DatapointValue] + Unit: Optional[StandardUnit] + ExtendedStatistics: Optional[DatapointValueMap] + + +DatapointValues = List[DatapointValue] +Datapoints = List[Datapoint] + + +class DeleteAlarmsInput(ServiceRequest): + AlarmNames: AlarmNames + + +class DeleteAnomalyDetectorInput(ServiceRequest): + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + Stat: Optional[AnomalyDetectorMetricStat] + SingleMetricAnomalyDetector: Optional[SingleMetricAnomalyDetector] + MetricMathAnomalyDetector: Optional[MetricMathAnomalyDetector] + + +class DeleteAnomalyDetectorOutput(TypedDict, total=False): + pass + + +class DeleteDashboardsInput(ServiceRequest): + DashboardNames: DashboardNames + + +class DeleteDashboardsOutput(TypedDict, total=False): + pass + + +InsightRuleNames = List[InsightRuleName] + + +class DeleteInsightRulesInput(ServiceRequest): + RuleNames: InsightRuleNames + + +class DeleteInsightRulesOutput(TypedDict, total=False): + Failures: Optional[BatchFailures] + + +class DeleteMetricStreamInput(ServiceRequest): + Name: MetricStreamName + + +class DeleteMetricStreamOutput(TypedDict, total=False): + pass + + +class DescribeAlarmHistoryInput(ServiceRequest): + AlarmName: Optional[AlarmName] + AlarmTypes: Optional[AlarmTypes] + HistoryItemType: Optional[HistoryItemType] + StartDate: Optional[Timestamp] + EndDate: Optional[Timestamp] + MaxRecords: Optional[MaxRecords] + NextToken: Optional[NextToken] + ScanBy: Optional[ScanBy] + + +class DescribeAlarmHistoryOutput(TypedDict, total=False): + AlarmHistoryItems: Optional[AlarmHistoryItems] + NextToken: Optional[NextToken] + + +class DescribeAlarmsForMetricInput(ServiceRequest): + MetricName: MetricName + Namespace: Namespace + Statistic: Optional[Statistic] + ExtendedStatistic: Optional[ExtendedStatistic] + Dimensions: Optional[Dimensions] + Period: Optional[Period] + Unit: Optional[StandardUnit] + + +class MetricAlarm(TypedDict, total=False): + AlarmName: Optional[AlarmName] + AlarmArn: Optional[AlarmArn] + AlarmDescription: Optional[AlarmDescription] + AlarmConfigurationUpdatedTimestamp: Optional[Timestamp] + ActionsEnabled: Optional[ActionsEnabled] + OKActions: Optional[ResourceList] + AlarmActions: Optional[ResourceList] + InsufficientDataActions: Optional[ResourceList] + StateValue: Optional[StateValue] + StateReason: Optional[StateReason] + StateReasonData: Optional[StateReasonData] + StateUpdatedTimestamp: Optional[Timestamp] + MetricName: Optional[MetricName] + Namespace: Optional[Namespace] + Statistic: Optional[Statistic] + ExtendedStatistic: Optional[ExtendedStatistic] + Dimensions: Optional[Dimensions] + Period: Optional[Period] + Unit: Optional[StandardUnit] + EvaluationPeriods: Optional[EvaluationPeriods] + DatapointsToAlarm: Optional[DatapointsToAlarm] + Threshold: Optional[Threshold] + ComparisonOperator: Optional[ComparisonOperator] + TreatMissingData: Optional[TreatMissingData] + EvaluateLowSampleCountPercentile: Optional[EvaluateLowSampleCountPercentile] + Metrics: Optional[MetricDataQueries] + ThresholdMetricId: Optional[MetricId] + EvaluationState: Optional[EvaluationState] + StateTransitionedTimestamp: Optional[Timestamp] + + +MetricAlarms = List[MetricAlarm] + + +class DescribeAlarmsForMetricOutput(TypedDict, total=False): + MetricAlarms: Optional[MetricAlarms] + + +class DescribeAlarmsInput(ServiceRequest): + AlarmNames: Optional[AlarmNames] + AlarmNamePrefix: Optional[AlarmNamePrefix] + AlarmTypes: Optional[AlarmTypes] + ChildrenOfAlarmName: Optional[AlarmName] + ParentsOfAlarmName: Optional[AlarmName] + StateValue: Optional[StateValue] + ActionPrefix: Optional[ActionPrefix] + MaxRecords: Optional[MaxRecords] + NextToken: Optional[NextToken] + + +class DescribeAlarmsOutput(TypedDict, total=False): + CompositeAlarms: Optional[CompositeAlarms] + MetricAlarms: Optional[MetricAlarms] + NextToken: Optional[NextToken] + + +class DescribeAnomalyDetectorsInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxReturnedResultsCount] + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + AnomalyDetectorTypes: Optional[AnomalyDetectorTypes] + + +class DescribeAnomalyDetectorsOutput(TypedDict, total=False): + AnomalyDetectors: Optional[AnomalyDetectors] + NextToken: Optional[NextToken] + + +class DescribeInsightRulesInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[InsightRuleMaxResults] + + +class InsightRule(TypedDict, total=False): + Name: InsightRuleName + State: InsightRuleState + Schema: InsightRuleSchema + Definition: InsightRuleDefinition + ManagedRule: Optional[InsightRuleIsManaged] + ApplyOnTransformedLogs: Optional[InsightRuleOnTransformedLogs] + + +InsightRules = List[InsightRule] + + +class DescribeInsightRulesOutput(TypedDict, total=False): + NextToken: Optional[NextToken] + InsightRules: Optional[InsightRules] + + +class DimensionFilter(TypedDict, total=False): + Name: DimensionName + Value: Optional[DimensionValue] + + +DimensionFilters = List[DimensionFilter] + + +class DisableAlarmActionsInput(ServiceRequest): + AlarmNames: AlarmNames + + +class DisableInsightRulesInput(ServiceRequest): + RuleNames: InsightRuleNames + + +class DisableInsightRulesOutput(TypedDict, total=False): + Failures: Optional[BatchFailures] + + +class EnableAlarmActionsInput(ServiceRequest): + AlarmNames: AlarmNames + + +class EnableInsightRulesInput(ServiceRequest): + RuleNames: InsightRuleNames + + +class EnableInsightRulesOutput(TypedDict, total=False): + Failures: Optional[BatchFailures] + + +EntityAttributesMap = Dict[EntityAttributesMapKeyString, EntityAttributesMapValueString] +EntityKeyAttributesMap = Dict[EntityKeyAttributesMapKeyString, EntityKeyAttributesMapValueString] + + +class Entity(TypedDict, total=False): + KeyAttributes: Optional[EntityKeyAttributesMap] + Attributes: Optional[EntityAttributesMap] + + +Values = List[DatapointValue] + + +class StatisticSet(TypedDict, total=False): + SampleCount: DatapointValue + Sum: DatapointValue + Minimum: DatapointValue + Maximum: DatapointValue + + +class MetricDatum(TypedDict, total=False): + MetricName: MetricName + Dimensions: Optional[Dimensions] + Timestamp: Optional[Timestamp] + Value: Optional[DatapointValue] + StatisticValues: Optional[StatisticSet] + Values: Optional[Values] + Counts: Optional[Counts] + Unit: Optional[StandardUnit] + StorageResolution: Optional[StorageResolution] + + +MetricData = List[MetricDatum] + + +class EntityMetricData(TypedDict, total=False): + Entity: Optional[Entity] + MetricData: Optional[MetricData] + + +EntityMetricDataList = List[EntityMetricData] +ExtendedStatistics = List[ExtendedStatistic] + + +class GetDashboardInput(ServiceRequest): + DashboardName: DashboardName + + +class GetDashboardOutput(TypedDict, total=False): + DashboardArn: Optional[DashboardArn] + DashboardBody: Optional[DashboardBody] + DashboardName: Optional[DashboardName] + + +InsightRuleMetricList = List[InsightRuleMetricName] + + +class GetInsightRuleReportInput(ServiceRequest): + RuleName: InsightRuleName + StartTime: Timestamp + EndTime: Timestamp + Period: Period + MaxContributorCount: Optional[InsightRuleUnboundInteger] + Metrics: Optional[InsightRuleMetricList] + OrderBy: Optional[InsightRuleOrderBy] + + +class InsightRuleMetricDatapoint(TypedDict, total=False): + Timestamp: Timestamp + UniqueContributors: Optional[InsightRuleUnboundDouble] + MaxContributorValue: Optional[InsightRuleUnboundDouble] + SampleCount: Optional[InsightRuleUnboundDouble] + Average: Optional[InsightRuleUnboundDouble] + Sum: Optional[InsightRuleUnboundDouble] + Minimum: Optional[InsightRuleUnboundDouble] + Maximum: Optional[InsightRuleUnboundDouble] + + +InsightRuleMetricDatapoints = List[InsightRuleMetricDatapoint] + + +class InsightRuleContributorDatapoint(TypedDict, total=False): + Timestamp: Timestamp + ApproximateValue: InsightRuleUnboundDouble + + +InsightRuleContributorDatapoints = List[InsightRuleContributorDatapoint] +InsightRuleContributorKeys = List[InsightRuleContributorKey] + + +class InsightRuleContributor(TypedDict, total=False): + Keys: InsightRuleContributorKeys + ApproximateAggregateValue: InsightRuleUnboundDouble + Datapoints: InsightRuleContributorDatapoints + + +InsightRuleContributors = List[InsightRuleContributor] +InsightRuleUnboundLong = int +InsightRuleContributorKeyLabels = List[InsightRuleContributorKeyLabel] + + +class GetInsightRuleReportOutput(TypedDict, total=False): + KeyLabels: Optional[InsightRuleContributorKeyLabels] + AggregationStatistic: Optional[InsightRuleAggregationStatistic] + AggregateValue: Optional[InsightRuleUnboundDouble] + ApproximateUniqueCount: Optional[InsightRuleUnboundLong] + Contributors: Optional[InsightRuleContributors] + MetricDatapoints: Optional[InsightRuleMetricDatapoints] + + +class LabelOptions(TypedDict, total=False): + Timezone: Optional[GetMetricDataLabelTimezone] + + +class GetMetricDataInput(ServiceRequest): + MetricDataQueries: MetricDataQueries + StartTime: Timestamp + EndTime: Timestamp + NextToken: Optional[NextToken] + ScanBy: Optional[ScanBy] + MaxDatapoints: Optional[GetMetricDataMaxDatapoints] + LabelOptions: Optional[LabelOptions] + + +class MessageData(TypedDict, total=False): + Code: Optional[MessageDataCode] + Value: Optional[MessageDataValue] + + +MetricDataResultMessages = List[MessageData] +Timestamps = List[Timestamp] + + +class MetricDataResult(TypedDict, total=False): + Id: Optional[MetricId] + Label: Optional[MetricLabel] + Timestamps: Optional[Timestamps] + Values: Optional[DatapointValues] + StatusCode: Optional[StatusCode] + Messages: Optional[MetricDataResultMessages] + + +MetricDataResults = List[MetricDataResult] + + +class GetMetricDataOutput(TypedDict, total=False): + MetricDataResults: Optional[MetricDataResults] + NextToken: Optional[NextToken] + Messages: Optional[MetricDataResultMessages] + + +Statistics = List[Statistic] + + +class GetMetricStatisticsInput(ServiceRequest): + Namespace: Namespace + MetricName: MetricName + Dimensions: Optional[Dimensions] + StartTime: Timestamp + EndTime: Timestamp + Period: Period + Statistics: Optional[Statistics] + ExtendedStatistics: Optional[ExtendedStatistics] + Unit: Optional[StandardUnit] + + +class GetMetricStatisticsOutput(TypedDict, total=False): + Label: Optional[MetricLabel] + Datapoints: Optional[Datapoints] + + +class GetMetricStreamInput(ServiceRequest): + Name: MetricStreamName + + +MetricStreamStatisticsAdditionalStatistics = List[MetricStreamStatistic] + + +class MetricStreamStatisticsMetric(TypedDict, total=False): + Namespace: Namespace + MetricName: MetricName + + +MetricStreamStatisticsIncludeMetrics = List[MetricStreamStatisticsMetric] + + +class MetricStreamStatisticsConfiguration(TypedDict, total=False): + IncludeMetrics: MetricStreamStatisticsIncludeMetrics + AdditionalStatistics: MetricStreamStatisticsAdditionalStatistics + + +MetricStreamStatisticsConfigurations = List[MetricStreamStatisticsConfiguration] +MetricStreamFilterMetricNames = List[MetricName] + + +class MetricStreamFilter(TypedDict, total=False): + Namespace: Optional[Namespace] + MetricNames: Optional[MetricStreamFilterMetricNames] + + +MetricStreamFilters = List[MetricStreamFilter] + + +class GetMetricStreamOutput(TypedDict, total=False): + Arn: Optional[AmazonResourceName] + Name: Optional[MetricStreamName] + IncludeFilters: Optional[MetricStreamFilters] + ExcludeFilters: Optional[MetricStreamFilters] + FirehoseArn: Optional[AmazonResourceName] + RoleArn: Optional[AmazonResourceName] + State: Optional[MetricStreamState] + CreationDate: Optional[Timestamp] + LastUpdateDate: Optional[Timestamp] + OutputFormat: Optional[MetricStreamOutputFormat] + StatisticsConfigurations: Optional[MetricStreamStatisticsConfigurations] + IncludeLinkedAccountsMetrics: Optional[IncludeLinkedAccountsMetrics] + + +class GetMetricWidgetImageInput(ServiceRequest): + MetricWidget: MetricWidget + OutputFormat: Optional[OutputFormat] + + +MetricWidgetImage = bytes + + +class GetMetricWidgetImageOutput(TypedDict, total=False): + MetricWidgetImage: Optional[MetricWidgetImage] + + +class ListDashboardsInput(ServiceRequest): + DashboardNamePrefix: Optional[DashboardNamePrefix] + NextToken: Optional[NextToken] + + +class ListDashboardsOutput(TypedDict, total=False): + DashboardEntries: Optional[DashboardEntries] + NextToken: Optional[NextToken] + + +class ListManagedInsightRulesInput(ServiceRequest): + ResourceARN: AmazonResourceName + NextToken: Optional[NextToken] + MaxResults: Optional[InsightRuleMaxResults] + + +class ManagedRuleState(TypedDict, total=False): + RuleName: InsightRuleName + State: InsightRuleState + + +class ManagedRuleDescription(TypedDict, total=False): + TemplateName: Optional[TemplateName] + ResourceARN: Optional[AmazonResourceName] + RuleState: Optional[ManagedRuleState] + + +ManagedRuleDescriptions = List[ManagedRuleDescription] + + +class ListManagedInsightRulesOutput(TypedDict, total=False): + ManagedRules: Optional[ManagedRuleDescriptions] + NextToken: Optional[NextToken] + + +class ListMetricStreamsInput(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[ListMetricStreamsMaxResults] + + +class MetricStreamEntry(TypedDict, total=False): + Arn: Optional[AmazonResourceName] + CreationDate: Optional[Timestamp] + LastUpdateDate: Optional[Timestamp] + Name: Optional[MetricStreamName] + FirehoseArn: Optional[AmazonResourceName] + State: Optional[MetricStreamState] + OutputFormat: Optional[MetricStreamOutputFormat] + + +MetricStreamEntries = List[MetricStreamEntry] + + +class ListMetricStreamsOutput(TypedDict, total=False): + NextToken: Optional[NextToken] + Entries: Optional[MetricStreamEntries] + + +class ListMetricsInput(ServiceRequest): + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[DimensionFilters] + NextToken: Optional[NextToken] + RecentlyActive: Optional[RecentlyActive] + IncludeLinkedAccounts: Optional[IncludeLinkedAccounts] + OwningAccount: Optional[AccountId] + + +OwningAccounts = List[AccountId] +Metrics = List[Metric] + + +class ListMetricsOutput(TypedDict, total=False): + Metrics: Optional[Metrics] + NextToken: Optional[NextToken] + OwningAccounts: Optional[OwningAccounts] + + +class ListTagsForResourceInput(ServiceRequest): + ResourceARN: AmazonResourceName + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class ListTagsForResourceOutput(TypedDict, total=False): + Tags: Optional[TagList] + + +class ManagedRule(TypedDict, total=False): + TemplateName: TemplateName + ResourceARN: AmazonResourceName + Tags: Optional[TagList] + + +ManagedRules = List[ManagedRule] +MetricStreamNames = List[MetricStreamName] + + +class PutAnomalyDetectorInput(ServiceRequest): + Namespace: Optional[Namespace] + MetricName: Optional[MetricName] + Dimensions: Optional[Dimensions] + Stat: Optional[AnomalyDetectorMetricStat] + Configuration: Optional[AnomalyDetectorConfiguration] + MetricCharacteristics: Optional[MetricCharacteristics] + SingleMetricAnomalyDetector: Optional[SingleMetricAnomalyDetector] + MetricMathAnomalyDetector: Optional[MetricMathAnomalyDetector] + + +class PutAnomalyDetectorOutput(TypedDict, total=False): + pass + + +class PutCompositeAlarmInput(ServiceRequest): + ActionsEnabled: Optional[ActionsEnabled] + AlarmActions: Optional[ResourceList] + AlarmDescription: Optional[AlarmDescription] + AlarmName: AlarmName + AlarmRule: AlarmRule + InsufficientDataActions: Optional[ResourceList] + OKActions: Optional[ResourceList] + Tags: Optional[TagList] + ActionsSuppressor: Optional[AlarmArn] + ActionsSuppressorWaitPeriod: Optional[SuppressorPeriod] + ActionsSuppressorExtensionPeriod: Optional[SuppressorPeriod] + + +class PutDashboardInput(ServiceRequest): + DashboardName: DashboardName + DashboardBody: DashboardBody + + +class PutDashboardOutput(TypedDict, total=False): + DashboardValidationMessages: Optional[DashboardValidationMessages] + + +class PutInsightRuleInput(ServiceRequest): + RuleName: InsightRuleName + RuleState: Optional[InsightRuleState] + RuleDefinition: InsightRuleDefinition + Tags: Optional[TagList] + ApplyOnTransformedLogs: Optional[InsightRuleOnTransformedLogs] + + +class PutInsightRuleOutput(TypedDict, total=False): + pass + + +class PutManagedInsightRulesInput(ServiceRequest): + ManagedRules: ManagedRules + + +class PutManagedInsightRulesOutput(TypedDict, total=False): + Failures: Optional[BatchFailures] + + +class PutMetricAlarmInput(ServiceRequest): + AlarmName: AlarmName + AlarmDescription: Optional[AlarmDescription] + ActionsEnabled: Optional[ActionsEnabled] + OKActions: Optional[ResourceList] + AlarmActions: Optional[ResourceList] + InsufficientDataActions: Optional[ResourceList] + MetricName: Optional[MetricName] + Namespace: Optional[Namespace] + Statistic: Optional[Statistic] + ExtendedStatistic: Optional[ExtendedStatistic] + Dimensions: Optional[Dimensions] + Period: Optional[Period] + Unit: Optional[StandardUnit] + EvaluationPeriods: EvaluationPeriods + DatapointsToAlarm: Optional[DatapointsToAlarm] + Threshold: Optional[Threshold] + ComparisonOperator: ComparisonOperator + TreatMissingData: Optional[TreatMissingData] + EvaluateLowSampleCountPercentile: Optional[EvaluateLowSampleCountPercentile] + Metrics: Optional[MetricDataQueries] + Tags: Optional[TagList] + ThresholdMetricId: Optional[MetricId] + + +class PutMetricDataInput(ServiceRequest): + Namespace: Namespace + MetricData: Optional[MetricData] + EntityMetricData: Optional[EntityMetricDataList] + StrictEntityValidation: Optional[StrictEntityValidation] + + +class PutMetricStreamInput(ServiceRequest): + Name: MetricStreamName + IncludeFilters: Optional[MetricStreamFilters] + ExcludeFilters: Optional[MetricStreamFilters] + FirehoseArn: AmazonResourceName + RoleArn: AmazonResourceName + OutputFormat: MetricStreamOutputFormat + Tags: Optional[TagList] + StatisticsConfigurations: Optional[MetricStreamStatisticsConfigurations] + IncludeLinkedAccountsMetrics: Optional[IncludeLinkedAccountsMetrics] + + +class PutMetricStreamOutput(TypedDict, total=False): + Arn: Optional[AmazonResourceName] + + +class SetAlarmStateInput(ServiceRequest): + AlarmName: AlarmName + StateValue: StateValue + StateReason: StateReason + StateReasonData: Optional[StateReasonData] + + +class StartMetricStreamsInput(ServiceRequest): + Names: MetricStreamNames + + +class StartMetricStreamsOutput(TypedDict, total=False): + pass + + +class StopMetricStreamsInput(ServiceRequest): + Names: MetricStreamNames + + +class StopMetricStreamsOutput(TypedDict, total=False): + pass + + +TagKeyList = List[TagKey] + + +class TagResourceInput(ServiceRequest): + ResourceARN: AmazonResourceName + Tags: TagList + + +class TagResourceOutput(TypedDict, total=False): + pass + + +class UntagResourceInput(ServiceRequest): + ResourceARN: AmazonResourceName + TagKeys: TagKeyList + + +class UntagResourceOutput(TypedDict, total=False): + pass + + +class CloudwatchApi: + service = "cloudwatch" + version = "2010-08-01" + + @handler("DeleteAlarms") + def delete_alarms(self, context: RequestContext, alarm_names: AlarmNames, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteAnomalyDetector") + def delete_anomaly_detector( + self, + context: RequestContext, + namespace: Namespace | None = None, + metric_name: MetricName | None = None, + dimensions: Dimensions | None = None, + stat: AnomalyDetectorMetricStat | None = None, + single_metric_anomaly_detector: SingleMetricAnomalyDetector | None = None, + metric_math_anomaly_detector: MetricMathAnomalyDetector | None = None, + **kwargs, + ) -> DeleteAnomalyDetectorOutput: + raise NotImplementedError + + @handler("DeleteDashboards") + def delete_dashboards( + self, context: RequestContext, dashboard_names: DashboardNames, **kwargs + ) -> DeleteDashboardsOutput: + raise NotImplementedError + + @handler("DeleteInsightRules") + def delete_insight_rules( + self, context: RequestContext, rule_names: InsightRuleNames, **kwargs + ) -> DeleteInsightRulesOutput: + raise NotImplementedError + + @handler("DeleteMetricStream") + def delete_metric_stream( + self, context: RequestContext, name: MetricStreamName, **kwargs + ) -> DeleteMetricStreamOutput: + raise NotImplementedError + + @handler("DescribeAlarmHistory") + def describe_alarm_history( + self, + context: RequestContext, + alarm_name: AlarmName | None = None, + alarm_types: AlarmTypes | None = None, + history_item_type: HistoryItemType | None = None, + start_date: Timestamp | None = None, + end_date: Timestamp | None = None, + max_records: MaxRecords | None = None, + next_token: NextToken | None = None, + scan_by: ScanBy | None = None, + **kwargs, + ) -> DescribeAlarmHistoryOutput: + raise NotImplementedError + + @handler("DescribeAlarms") + def describe_alarms( + self, + context: RequestContext, + alarm_names: AlarmNames | None = None, + alarm_name_prefix: AlarmNamePrefix | None = None, + alarm_types: AlarmTypes | None = None, + children_of_alarm_name: AlarmName | None = None, + parents_of_alarm_name: AlarmName | None = None, + state_value: StateValue | None = None, + action_prefix: ActionPrefix | None = None, + max_records: MaxRecords | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAlarmsOutput: + raise NotImplementedError + + @handler("DescribeAlarmsForMetric") + def describe_alarms_for_metric( + self, + context: RequestContext, + metric_name: MetricName, + namespace: Namespace, + statistic: Statistic | None = None, + extended_statistic: ExtendedStatistic | None = None, + dimensions: Dimensions | None = None, + period: Period | None = None, + unit: StandardUnit | None = None, + **kwargs, + ) -> DescribeAlarmsForMetricOutput: + raise NotImplementedError + + @handler("DescribeAnomalyDetectors") + def describe_anomaly_detectors( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxReturnedResultsCount | None = None, + namespace: Namespace | None = None, + metric_name: MetricName | None = None, + dimensions: Dimensions | None = None, + anomaly_detector_types: AnomalyDetectorTypes | None = None, + **kwargs, + ) -> DescribeAnomalyDetectorsOutput: + raise NotImplementedError + + @handler("DescribeInsightRules") + def describe_insight_rules( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: InsightRuleMaxResults | None = None, + **kwargs, + ) -> DescribeInsightRulesOutput: + raise NotImplementedError + + @handler("DisableAlarmActions") + def disable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DisableInsightRules") + def disable_insight_rules( + self, context: RequestContext, rule_names: InsightRuleNames, **kwargs + ) -> DisableInsightRulesOutput: + raise NotImplementedError + + @handler("EnableAlarmActions") + def enable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + raise NotImplementedError + + @handler("EnableInsightRules") + def enable_insight_rules( + self, context: RequestContext, rule_names: InsightRuleNames, **kwargs + ) -> EnableInsightRulesOutput: + raise NotImplementedError + + @handler("GetDashboard") + def get_dashboard( + self, context: RequestContext, dashboard_name: DashboardName, **kwargs + ) -> GetDashboardOutput: + raise NotImplementedError + + @handler("GetInsightRuleReport") + def get_insight_rule_report( + self, + context: RequestContext, + rule_name: InsightRuleName, + start_time: Timestamp, + end_time: Timestamp, + period: Period, + max_contributor_count: InsightRuleUnboundInteger | None = None, + metrics: InsightRuleMetricList | None = None, + order_by: InsightRuleOrderBy | None = None, + **kwargs, + ) -> GetInsightRuleReportOutput: + raise NotImplementedError + + @handler("GetMetricData") + def get_metric_data( + self, + context: RequestContext, + metric_data_queries: MetricDataQueries, + start_time: Timestamp, + end_time: Timestamp, + next_token: NextToken | None = None, + scan_by: ScanBy | None = None, + max_datapoints: GetMetricDataMaxDatapoints | None = None, + label_options: LabelOptions | None = None, + **kwargs, + ) -> GetMetricDataOutput: + raise NotImplementedError + + @handler("GetMetricStatistics") + def get_metric_statistics( + self, + context: RequestContext, + namespace: Namespace, + metric_name: MetricName, + start_time: Timestamp, + end_time: Timestamp, + period: Period, + dimensions: Dimensions | None = None, + statistics: Statistics | None = None, + extended_statistics: ExtendedStatistics | None = None, + unit: StandardUnit | None = None, + **kwargs, + ) -> GetMetricStatisticsOutput: + raise NotImplementedError + + @handler("GetMetricStream") + def get_metric_stream( + self, context: RequestContext, name: MetricStreamName, **kwargs + ) -> GetMetricStreamOutput: + raise NotImplementedError + + @handler("GetMetricWidgetImage") + def get_metric_widget_image( + self, + context: RequestContext, + metric_widget: MetricWidget, + output_format: OutputFormat | None = None, + **kwargs, + ) -> GetMetricWidgetImageOutput: + raise NotImplementedError + + @handler("ListDashboards") + def list_dashboards( + self, + context: RequestContext, + dashboard_name_prefix: DashboardNamePrefix | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDashboardsOutput: + raise NotImplementedError + + @handler("ListManagedInsightRules") + def list_managed_insight_rules( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + next_token: NextToken | None = None, + max_results: InsightRuleMaxResults | None = None, + **kwargs, + ) -> ListManagedInsightRulesOutput: + raise NotImplementedError + + @handler("ListMetricStreams") + def list_metric_streams( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: ListMetricStreamsMaxResults | None = None, + **kwargs, + ) -> ListMetricStreamsOutput: + raise NotImplementedError + + @handler("ListMetrics") + def list_metrics( + self, + context: RequestContext, + namespace: Namespace | None = None, + metric_name: MetricName | None = None, + dimensions: DimensionFilters | None = None, + next_token: NextToken | None = None, + recently_active: RecentlyActive | None = None, + include_linked_accounts: IncludeLinkedAccounts | None = None, + owning_account: AccountId | None = None, + **kwargs, + ) -> ListMetricsOutput: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceOutput: + raise NotImplementedError + + @handler("PutAnomalyDetector") + def put_anomaly_detector( + self, + context: RequestContext, + namespace: Namespace | None = None, + metric_name: MetricName | None = None, + dimensions: Dimensions | None = None, + stat: AnomalyDetectorMetricStat | None = None, + configuration: AnomalyDetectorConfiguration | None = None, + metric_characteristics: MetricCharacteristics | None = None, + single_metric_anomaly_detector: SingleMetricAnomalyDetector | None = None, + metric_math_anomaly_detector: MetricMathAnomalyDetector | None = None, + **kwargs, + ) -> PutAnomalyDetectorOutput: + raise NotImplementedError + + @handler("PutCompositeAlarm") + def put_composite_alarm( + self, + context: RequestContext, + alarm_name: AlarmName, + alarm_rule: AlarmRule, + actions_enabled: ActionsEnabled | None = None, + alarm_actions: ResourceList | None = None, + alarm_description: AlarmDescription | None = None, + insufficient_data_actions: ResourceList | None = None, + ok_actions: ResourceList | None = None, + tags: TagList | None = None, + actions_suppressor: AlarmArn | None = None, + actions_suppressor_wait_period: SuppressorPeriod | None = None, + actions_suppressor_extension_period: SuppressorPeriod | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutDashboard") + def put_dashboard( + self, + context: RequestContext, + dashboard_name: DashboardName, + dashboard_body: DashboardBody, + **kwargs, + ) -> PutDashboardOutput: + raise NotImplementedError + + @handler("PutInsightRule") + def put_insight_rule( + self, + context: RequestContext, + rule_name: InsightRuleName, + rule_definition: InsightRuleDefinition, + rule_state: InsightRuleState | None = None, + tags: TagList | None = None, + apply_on_transformed_logs: InsightRuleOnTransformedLogs | None = None, + **kwargs, + ) -> PutInsightRuleOutput: + raise NotImplementedError + + @handler("PutManagedInsightRules") + def put_managed_insight_rules( + self, context: RequestContext, managed_rules: ManagedRules, **kwargs + ) -> PutManagedInsightRulesOutput: + raise NotImplementedError + + @handler("PutMetricAlarm") + def put_metric_alarm( + self, + context: RequestContext, + alarm_name: AlarmName, + evaluation_periods: EvaluationPeriods, + comparison_operator: ComparisonOperator, + alarm_description: AlarmDescription | None = None, + actions_enabled: ActionsEnabled | None = None, + ok_actions: ResourceList | None = None, + alarm_actions: ResourceList | None = None, + insufficient_data_actions: ResourceList | None = None, + metric_name: MetricName | None = None, + namespace: Namespace | None = None, + statistic: Statistic | None = None, + extended_statistic: ExtendedStatistic | None = None, + dimensions: Dimensions | None = None, + period: Period | None = None, + unit: StandardUnit | None = None, + datapoints_to_alarm: DatapointsToAlarm | None = None, + threshold: Threshold | None = None, + treat_missing_data: TreatMissingData | None = None, + evaluate_low_sample_count_percentile: EvaluateLowSampleCountPercentile | None = None, + metrics: MetricDataQueries | None = None, + tags: TagList | None = None, + threshold_metric_id: MetricId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutMetricData") + def put_metric_data( + self, + context: RequestContext, + namespace: Namespace, + metric_data: MetricData | None = None, + entity_metric_data: EntityMetricDataList | None = None, + strict_entity_validation: StrictEntityValidation | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutMetricStream") + def put_metric_stream( + self, + context: RequestContext, + name: MetricStreamName, + firehose_arn: AmazonResourceName, + role_arn: AmazonResourceName, + output_format: MetricStreamOutputFormat, + include_filters: MetricStreamFilters | None = None, + exclude_filters: MetricStreamFilters | None = None, + tags: TagList | None = None, + statistics_configurations: MetricStreamStatisticsConfigurations | None = None, + include_linked_accounts_metrics: IncludeLinkedAccountsMetrics | None = None, + **kwargs, + ) -> PutMetricStreamOutput: + raise NotImplementedError + + @handler("SetAlarmState") + def set_alarm_state( + self, + context: RequestContext, + alarm_name: AlarmName, + state_value: StateValue, + state_reason: StateReason, + state_reason_data: StateReasonData | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartMetricStreams") + def start_metric_streams( + self, context: RequestContext, names: MetricStreamNames, **kwargs + ) -> StartMetricStreamsOutput: + raise NotImplementedError + + @handler("StopMetricStreams") + def stop_metric_streams( + self, context: RequestContext, names: MetricStreamNames, **kwargs + ) -> StopMetricStreamsOutput: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> TagResourceOutput: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/config/__init__.py b/localstack-core/localstack/aws/api/config/__init__.py new file mode 100644 index 0000000000000..80d86b2edb05d --- /dev/null +++ b/localstack-core/localstack/aws/api/config/__init__.py @@ -0,0 +1,4078 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ARN = str +AccountId = str +AllSupported = bool +AmazonResourceName = str +Annotation = str +AutoRemediationAttempts = int +AvailabilityZone = str +AwsRegion = str +BaseResourceId = str +Boolean = bool +ChannelName = str +ClientToken = str +ComplianceScore = str +ConfigRuleName = str +Configuration = str +ConfigurationAggregatorArn = str +ConfigurationAggregatorName = str +ConfigurationItemMD5Hash = str +ConfigurationRecorderFilterValue = str +ConfigurationStateId = str +ConformancePackArn = str +ConformancePackId = str +ConformancePackName = str +ConformancePackStatusReason = str +CosmosPageLimit = int +DeliveryS3Bucket = str +DeliveryS3KeyPrefix = str +DescribeConformancePackComplianceLimit = int +DescribePendingAggregationRequestsLimit = int +Description = str +EmptiableStringWithCharLimit256 = str +ErrorMessage = str +EvaluationContextIdentifier = str +EvaluationTimeout = int +Expression = str +FieldName = str +GetConformancePackComplianceDetailsLimit = int +GroupByAPILimit = int +IncludeGlobalResourceTypes = bool +Integer = int +Limit = int +ListResourceEvaluationsPageItemLimit = int +MaxResults = int +Name = str +NextToken = str +OrganizationConfigRuleName = str +OrganizationConformancePackName = str +PageSizeLimit = int +ParameterName = str +ParameterValue = str +Percentage = int +PolicyRuntime = str +PolicyText = str +QueryArn = str +QueryDescription = str +QueryExpression = str +QueryId = str +QueryName = str +RecorderName = str +RelatedEvent = str +RelationshipName = str +ResourceConfiguration = str +ResourceEvaluationId = str +ResourceId = str +ResourceName = str +ResourceTypeString = str +ResourceTypeValue = str +RetentionConfigurationName = str +RetentionPeriodInDays = int +RuleLimit = int +SSMDocumentName = str +SSMDocumentVersion = str +SchemaVersionId = str +ServicePrincipal = str +ServicePrincipalValue = str +StackArn = str +String = str +StringWithCharLimit1024 = str +StringWithCharLimit128 = str +StringWithCharLimit2048 = str +StringWithCharLimit256 = str +StringWithCharLimit256Min0 = str +StringWithCharLimit64 = str +StringWithCharLimit768 = str +SupplementaryConfigurationName = str +SupplementaryConfigurationValue = str +TagKey = str +TagValue = str +TemplateBody = str +TemplateS3Uri = str +Value = str +Version = str + + +class AggregateConformancePackComplianceSummaryGroupKey(StrEnum): + ACCOUNT_ID = "ACCOUNT_ID" + AWS_REGION = "AWS_REGION" + + +class AggregatedSourceStatusType(StrEnum): + FAILED = "FAILED" + SUCCEEDED = "SUCCEEDED" + OUTDATED = "OUTDATED" + + +class AggregatedSourceType(StrEnum): + ACCOUNT = "ACCOUNT" + ORGANIZATION = "ORGANIZATION" + + +class AggregatorFilterType(StrEnum): + INCLUDE = "INCLUDE" + + +class ChronologicalOrder(StrEnum): + Reverse = "Reverse" + Forward = "Forward" + + +class ComplianceType(StrEnum): + COMPLIANT = "COMPLIANT" + NON_COMPLIANT = "NON_COMPLIANT" + NOT_APPLICABLE = "NOT_APPLICABLE" + INSUFFICIENT_DATA = "INSUFFICIENT_DATA" + + +class ConfigRuleComplianceSummaryGroupKey(StrEnum): + ACCOUNT_ID = "ACCOUNT_ID" + AWS_REGION = "AWS_REGION" + + +class ConfigRuleState(StrEnum): + ACTIVE = "ACTIVE" + DELETING = "DELETING" + DELETING_RESULTS = "DELETING_RESULTS" + EVALUATING = "EVALUATING" + + +class ConfigurationItemStatus(StrEnum): + OK = "OK" + ResourceDiscovered = "ResourceDiscovered" + ResourceNotRecorded = "ResourceNotRecorded" + ResourceDeleted = "ResourceDeleted" + ResourceDeletedNotRecorded = "ResourceDeletedNotRecorded" + + +class ConfigurationRecorderFilterName(StrEnum): + recordingScope = "recordingScope" + + +class ConformancePackComplianceType(StrEnum): + COMPLIANT = "COMPLIANT" + NON_COMPLIANT = "NON_COMPLIANT" + INSUFFICIENT_DATA = "INSUFFICIENT_DATA" + + +class ConformancePackState(StrEnum): + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_COMPLETE = "CREATE_COMPLETE" + CREATE_FAILED = "CREATE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + DELETE_FAILED = "DELETE_FAILED" + + +class DeliveryStatus(StrEnum): + Success = "Success" + Failure = "Failure" + Not_Applicable = "Not_Applicable" + + +class EvaluationMode(StrEnum): + DETECTIVE = "DETECTIVE" + PROACTIVE = "PROACTIVE" + + +class EventSource(StrEnum): + aws_config = "aws.config" + + +class MaximumExecutionFrequency(StrEnum): + One_Hour = "One_Hour" + Three_Hours = "Three_Hours" + Six_Hours = "Six_Hours" + Twelve_Hours = "Twelve_Hours" + TwentyFour_Hours = "TwentyFour_Hours" + + +class MemberAccountRuleStatus(StrEnum): + CREATE_SUCCESSFUL = "CREATE_SUCCESSFUL" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + DELETE_SUCCESSFUL = "DELETE_SUCCESSFUL" + DELETE_FAILED = "DELETE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + UPDATE_SUCCESSFUL = "UPDATE_SUCCESSFUL" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_FAILED = "UPDATE_FAILED" + + +class MessageType(StrEnum): + ConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + ConfigurationSnapshotDeliveryCompleted = "ConfigurationSnapshotDeliveryCompleted" + ScheduledNotification = "ScheduledNotification" + OversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" + + +class OrganizationConfigRuleTriggerType(StrEnum): + ConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + OversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" + ScheduledNotification = "ScheduledNotification" + + +class OrganizationConfigRuleTriggerTypeNoSN(StrEnum): + ConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + OversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" + + +class OrganizationResourceDetailedStatus(StrEnum): + CREATE_SUCCESSFUL = "CREATE_SUCCESSFUL" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + DELETE_SUCCESSFUL = "DELETE_SUCCESSFUL" + DELETE_FAILED = "DELETE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + UPDATE_SUCCESSFUL = "UPDATE_SUCCESSFUL" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_FAILED = "UPDATE_FAILED" + + +class OrganizationResourceStatus(StrEnum): + CREATE_SUCCESSFUL = "CREATE_SUCCESSFUL" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + DELETE_SUCCESSFUL = "DELETE_SUCCESSFUL" + DELETE_FAILED = "DELETE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + UPDATE_SUCCESSFUL = "UPDATE_SUCCESSFUL" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_FAILED = "UPDATE_FAILED" + + +class OrganizationRuleStatus(StrEnum): + CREATE_SUCCESSFUL = "CREATE_SUCCESSFUL" + CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS" + CREATE_FAILED = "CREATE_FAILED" + DELETE_SUCCESSFUL = "DELETE_SUCCESSFUL" + DELETE_FAILED = "DELETE_FAILED" + DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS" + UPDATE_SUCCESSFUL = "UPDATE_SUCCESSFUL" + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS" + UPDATE_FAILED = "UPDATE_FAILED" + + +class Owner(StrEnum): + CUSTOM_LAMBDA = "CUSTOM_LAMBDA" + AWS = "AWS" + CUSTOM_POLICY = "CUSTOM_POLICY" + + +class RecorderStatus(StrEnum): + Pending = "Pending" + Success = "Success" + Failure = "Failure" + NotApplicable = "NotApplicable" + + +class RecordingFrequency(StrEnum): + CONTINUOUS = "CONTINUOUS" + DAILY = "DAILY" + + +class RecordingScope(StrEnum): + INTERNAL = "INTERNAL" + PAID = "PAID" + + +class RecordingStrategyType(StrEnum): + ALL_SUPPORTED_RESOURCE_TYPES = "ALL_SUPPORTED_RESOURCE_TYPES" + INCLUSION_BY_RESOURCE_TYPES = "INCLUSION_BY_RESOURCE_TYPES" + EXCLUSION_BY_RESOURCE_TYPES = "EXCLUSION_BY_RESOURCE_TYPES" + + +class RemediationExecutionState(StrEnum): + QUEUED = "QUEUED" + IN_PROGRESS = "IN_PROGRESS" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + + +class RemediationExecutionStepState(StrEnum): + SUCCEEDED = "SUCCEEDED" + PENDING = "PENDING" + FAILED = "FAILED" + + +class RemediationTargetType(StrEnum): + SSM_DOCUMENT = "SSM_DOCUMENT" + + +class ResourceConfigurationSchemaType(StrEnum): + CFN_RESOURCE_SCHEMA = "CFN_RESOURCE_SCHEMA" + + +class ResourceCountGroupKey(StrEnum): + RESOURCE_TYPE = "RESOURCE_TYPE" + ACCOUNT_ID = "ACCOUNT_ID" + AWS_REGION = "AWS_REGION" + + +class ResourceEvaluationStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + SUCCEEDED = "SUCCEEDED" + + +class ResourceType(StrEnum): + AWS_EC2_CustomerGateway = "AWS::EC2::CustomerGateway" + AWS_EC2_EIP = "AWS::EC2::EIP" + AWS_EC2_Host = "AWS::EC2::Host" + AWS_EC2_Instance = "AWS::EC2::Instance" + AWS_EC2_InternetGateway = "AWS::EC2::InternetGateway" + AWS_EC2_NetworkAcl = "AWS::EC2::NetworkAcl" + AWS_EC2_NetworkInterface = "AWS::EC2::NetworkInterface" + AWS_EC2_RouteTable = "AWS::EC2::RouteTable" + AWS_EC2_SecurityGroup = "AWS::EC2::SecurityGroup" + AWS_EC2_Subnet = "AWS::EC2::Subnet" + AWS_CloudTrail_Trail = "AWS::CloudTrail::Trail" + AWS_EC2_Volume = "AWS::EC2::Volume" + AWS_EC2_VPC = "AWS::EC2::VPC" + AWS_EC2_VPNConnection = "AWS::EC2::VPNConnection" + AWS_EC2_VPNGateway = "AWS::EC2::VPNGateway" + AWS_EC2_RegisteredHAInstance = "AWS::EC2::RegisteredHAInstance" + AWS_EC2_NatGateway = "AWS::EC2::NatGateway" + AWS_EC2_EgressOnlyInternetGateway = "AWS::EC2::EgressOnlyInternetGateway" + AWS_EC2_VPCEndpoint = "AWS::EC2::VPCEndpoint" + AWS_EC2_VPCEndpointService = "AWS::EC2::VPCEndpointService" + AWS_EC2_FlowLog = "AWS::EC2::FlowLog" + AWS_EC2_VPCPeeringConnection = "AWS::EC2::VPCPeeringConnection" + AWS_Elasticsearch_Domain = "AWS::Elasticsearch::Domain" + AWS_IAM_Group = "AWS::IAM::Group" + AWS_IAM_Policy = "AWS::IAM::Policy" + AWS_IAM_Role = "AWS::IAM::Role" + AWS_IAM_User = "AWS::IAM::User" + AWS_ElasticLoadBalancingV2_LoadBalancer = "AWS::ElasticLoadBalancingV2::LoadBalancer" + AWS_ACM_Certificate = "AWS::ACM::Certificate" + AWS_RDS_DBInstance = "AWS::RDS::DBInstance" + AWS_RDS_DBSubnetGroup = "AWS::RDS::DBSubnetGroup" + AWS_RDS_DBSecurityGroup = "AWS::RDS::DBSecurityGroup" + AWS_RDS_DBSnapshot = "AWS::RDS::DBSnapshot" + AWS_RDS_DBCluster = "AWS::RDS::DBCluster" + AWS_RDS_DBClusterSnapshot = "AWS::RDS::DBClusterSnapshot" + AWS_RDS_EventSubscription = "AWS::RDS::EventSubscription" + AWS_S3_Bucket = "AWS::S3::Bucket" + AWS_S3_AccountPublicAccessBlock = "AWS::S3::AccountPublicAccessBlock" + AWS_Redshift_Cluster = "AWS::Redshift::Cluster" + AWS_Redshift_ClusterSnapshot = "AWS::Redshift::ClusterSnapshot" + AWS_Redshift_ClusterParameterGroup = "AWS::Redshift::ClusterParameterGroup" + AWS_Redshift_ClusterSecurityGroup = "AWS::Redshift::ClusterSecurityGroup" + AWS_Redshift_ClusterSubnetGroup = "AWS::Redshift::ClusterSubnetGroup" + AWS_Redshift_EventSubscription = "AWS::Redshift::EventSubscription" + AWS_SSM_ManagedInstanceInventory = "AWS::SSM::ManagedInstanceInventory" + AWS_CloudWatch_Alarm = "AWS::CloudWatch::Alarm" + AWS_CloudFormation_Stack = "AWS::CloudFormation::Stack" + AWS_ElasticLoadBalancing_LoadBalancer = "AWS::ElasticLoadBalancing::LoadBalancer" + AWS_AutoScaling_AutoScalingGroup = "AWS::AutoScaling::AutoScalingGroup" + AWS_AutoScaling_LaunchConfiguration = "AWS::AutoScaling::LaunchConfiguration" + AWS_AutoScaling_ScalingPolicy = "AWS::AutoScaling::ScalingPolicy" + AWS_AutoScaling_ScheduledAction = "AWS::AutoScaling::ScheduledAction" + AWS_DynamoDB_Table = "AWS::DynamoDB::Table" + AWS_CodeBuild_Project = "AWS::CodeBuild::Project" + AWS_WAF_RateBasedRule = "AWS::WAF::RateBasedRule" + AWS_WAF_Rule = "AWS::WAF::Rule" + AWS_WAF_RuleGroup = "AWS::WAF::RuleGroup" + AWS_WAF_WebACL = "AWS::WAF::WebACL" + AWS_WAFRegional_RateBasedRule = "AWS::WAFRegional::RateBasedRule" + AWS_WAFRegional_Rule = "AWS::WAFRegional::Rule" + AWS_WAFRegional_RuleGroup = "AWS::WAFRegional::RuleGroup" + AWS_WAFRegional_WebACL = "AWS::WAFRegional::WebACL" + AWS_CloudFront_Distribution = "AWS::CloudFront::Distribution" + AWS_CloudFront_StreamingDistribution = "AWS::CloudFront::StreamingDistribution" + AWS_Lambda_Function = "AWS::Lambda::Function" + AWS_NetworkFirewall_Firewall = "AWS::NetworkFirewall::Firewall" + AWS_NetworkFirewall_FirewallPolicy = "AWS::NetworkFirewall::FirewallPolicy" + AWS_NetworkFirewall_RuleGroup = "AWS::NetworkFirewall::RuleGroup" + AWS_ElasticBeanstalk_Application = "AWS::ElasticBeanstalk::Application" + AWS_ElasticBeanstalk_ApplicationVersion = "AWS::ElasticBeanstalk::ApplicationVersion" + AWS_ElasticBeanstalk_Environment = "AWS::ElasticBeanstalk::Environment" + AWS_WAFv2_WebACL = "AWS::WAFv2::WebACL" + AWS_WAFv2_RuleGroup = "AWS::WAFv2::RuleGroup" + AWS_WAFv2_IPSet = "AWS::WAFv2::IPSet" + AWS_WAFv2_RegexPatternSet = "AWS::WAFv2::RegexPatternSet" + AWS_WAFv2_ManagedRuleSet = "AWS::WAFv2::ManagedRuleSet" + AWS_XRay_EncryptionConfig = "AWS::XRay::EncryptionConfig" + AWS_SSM_AssociationCompliance = "AWS::SSM::AssociationCompliance" + AWS_SSM_PatchCompliance = "AWS::SSM::PatchCompliance" + AWS_Shield_Protection = "AWS::Shield::Protection" + AWS_ShieldRegional_Protection = "AWS::ShieldRegional::Protection" + AWS_Config_ConformancePackCompliance = "AWS::Config::ConformancePackCompliance" + AWS_Config_ResourceCompliance = "AWS::Config::ResourceCompliance" + AWS_ApiGateway_Stage = "AWS::ApiGateway::Stage" + AWS_ApiGateway_RestApi = "AWS::ApiGateway::RestApi" + AWS_ApiGatewayV2_Stage = "AWS::ApiGatewayV2::Stage" + AWS_ApiGatewayV2_Api = "AWS::ApiGatewayV2::Api" + AWS_CodePipeline_Pipeline = "AWS::CodePipeline::Pipeline" + AWS_ServiceCatalog_CloudFormationProvisionedProduct = ( + "AWS::ServiceCatalog::CloudFormationProvisionedProduct" + ) + AWS_ServiceCatalog_CloudFormationProduct = "AWS::ServiceCatalog::CloudFormationProduct" + AWS_ServiceCatalog_Portfolio = "AWS::ServiceCatalog::Portfolio" + AWS_SQS_Queue = "AWS::SQS::Queue" + AWS_KMS_Key = "AWS::KMS::Key" + AWS_QLDB_Ledger = "AWS::QLDB::Ledger" + AWS_SecretsManager_Secret = "AWS::SecretsManager::Secret" + AWS_SNS_Topic = "AWS::SNS::Topic" + AWS_SSM_FileData = "AWS::SSM::FileData" + AWS_Backup_BackupPlan = "AWS::Backup::BackupPlan" + AWS_Backup_BackupSelection = "AWS::Backup::BackupSelection" + AWS_Backup_BackupVault = "AWS::Backup::BackupVault" + AWS_Backup_RecoveryPoint = "AWS::Backup::RecoveryPoint" + AWS_ECR_Repository = "AWS::ECR::Repository" + AWS_ECS_Cluster = "AWS::ECS::Cluster" + AWS_ECS_Service = "AWS::ECS::Service" + AWS_ECS_TaskDefinition = "AWS::ECS::TaskDefinition" + AWS_EFS_AccessPoint = "AWS::EFS::AccessPoint" + AWS_EFS_FileSystem = "AWS::EFS::FileSystem" + AWS_EKS_Cluster = "AWS::EKS::Cluster" + AWS_OpenSearch_Domain = "AWS::OpenSearch::Domain" + AWS_EC2_TransitGateway = "AWS::EC2::TransitGateway" + AWS_Kinesis_Stream = "AWS::Kinesis::Stream" + AWS_Kinesis_StreamConsumer = "AWS::Kinesis::StreamConsumer" + AWS_CodeDeploy_Application = "AWS::CodeDeploy::Application" + AWS_CodeDeploy_DeploymentConfig = "AWS::CodeDeploy::DeploymentConfig" + AWS_CodeDeploy_DeploymentGroup = "AWS::CodeDeploy::DeploymentGroup" + AWS_EC2_LaunchTemplate = "AWS::EC2::LaunchTemplate" + AWS_ECR_PublicRepository = "AWS::ECR::PublicRepository" + AWS_GuardDuty_Detector = "AWS::GuardDuty::Detector" + AWS_EMR_SecurityConfiguration = "AWS::EMR::SecurityConfiguration" + AWS_SageMaker_CodeRepository = "AWS::SageMaker::CodeRepository" + AWS_Route53Resolver_ResolverEndpoint = "AWS::Route53Resolver::ResolverEndpoint" + AWS_Route53Resolver_ResolverRule = "AWS::Route53Resolver::ResolverRule" + AWS_Route53Resolver_ResolverRuleAssociation = "AWS::Route53Resolver::ResolverRuleAssociation" + AWS_DMS_ReplicationSubnetGroup = "AWS::DMS::ReplicationSubnetGroup" + AWS_DMS_EventSubscription = "AWS::DMS::EventSubscription" + AWS_MSK_Cluster = "AWS::MSK::Cluster" + AWS_StepFunctions_Activity = "AWS::StepFunctions::Activity" + AWS_WorkSpaces_Workspace = "AWS::WorkSpaces::Workspace" + AWS_WorkSpaces_ConnectionAlias = "AWS::WorkSpaces::ConnectionAlias" + AWS_SageMaker_Model = "AWS::SageMaker::Model" + AWS_ElasticLoadBalancingV2_Listener = "AWS::ElasticLoadBalancingV2::Listener" + AWS_StepFunctions_StateMachine = "AWS::StepFunctions::StateMachine" + AWS_Batch_JobQueue = "AWS::Batch::JobQueue" + AWS_Batch_ComputeEnvironment = "AWS::Batch::ComputeEnvironment" + AWS_AccessAnalyzer_Analyzer = "AWS::AccessAnalyzer::Analyzer" + AWS_Athena_WorkGroup = "AWS::Athena::WorkGroup" + AWS_Athena_DataCatalog = "AWS::Athena::DataCatalog" + AWS_Detective_Graph = "AWS::Detective::Graph" + AWS_GlobalAccelerator_Accelerator = "AWS::GlobalAccelerator::Accelerator" + AWS_GlobalAccelerator_EndpointGroup = "AWS::GlobalAccelerator::EndpointGroup" + AWS_GlobalAccelerator_Listener = "AWS::GlobalAccelerator::Listener" + AWS_EC2_TransitGatewayAttachment = "AWS::EC2::TransitGatewayAttachment" + AWS_EC2_TransitGatewayRouteTable = "AWS::EC2::TransitGatewayRouteTable" + AWS_DMS_Certificate = "AWS::DMS::Certificate" + AWS_AppConfig_Application = "AWS::AppConfig::Application" + AWS_AppSync_GraphQLApi = "AWS::AppSync::GraphQLApi" + AWS_DataSync_LocationSMB = "AWS::DataSync::LocationSMB" + AWS_DataSync_LocationFSxLustre = "AWS::DataSync::LocationFSxLustre" + AWS_DataSync_LocationS3 = "AWS::DataSync::LocationS3" + AWS_DataSync_LocationEFS = "AWS::DataSync::LocationEFS" + AWS_DataSync_Task = "AWS::DataSync::Task" + AWS_DataSync_LocationNFS = "AWS::DataSync::LocationNFS" + AWS_EC2_NetworkInsightsAccessScopeAnalysis = "AWS::EC2::NetworkInsightsAccessScopeAnalysis" + AWS_EKS_FargateProfile = "AWS::EKS::FargateProfile" + AWS_Glue_Job = "AWS::Glue::Job" + AWS_GuardDuty_ThreatIntelSet = "AWS::GuardDuty::ThreatIntelSet" + AWS_GuardDuty_IPSet = "AWS::GuardDuty::IPSet" + AWS_SageMaker_Workteam = "AWS::SageMaker::Workteam" + AWS_SageMaker_NotebookInstanceLifecycleConfig = ( + "AWS::SageMaker::NotebookInstanceLifecycleConfig" + ) + AWS_ServiceDiscovery_Service = "AWS::ServiceDiscovery::Service" + AWS_ServiceDiscovery_PublicDnsNamespace = "AWS::ServiceDiscovery::PublicDnsNamespace" + AWS_SES_ContactList = "AWS::SES::ContactList" + AWS_SES_ConfigurationSet = "AWS::SES::ConfigurationSet" + AWS_Route53_HostedZone = "AWS::Route53::HostedZone" + AWS_IoTEvents_Input = "AWS::IoTEvents::Input" + AWS_IoTEvents_DetectorModel = "AWS::IoTEvents::DetectorModel" + AWS_IoTEvents_AlarmModel = "AWS::IoTEvents::AlarmModel" + AWS_ServiceDiscovery_HttpNamespace = "AWS::ServiceDiscovery::HttpNamespace" + AWS_Events_EventBus = "AWS::Events::EventBus" + AWS_ImageBuilder_ContainerRecipe = "AWS::ImageBuilder::ContainerRecipe" + AWS_ImageBuilder_DistributionConfiguration = "AWS::ImageBuilder::DistributionConfiguration" + AWS_ImageBuilder_InfrastructureConfiguration = "AWS::ImageBuilder::InfrastructureConfiguration" + AWS_DataSync_LocationObjectStorage = "AWS::DataSync::LocationObjectStorage" + AWS_DataSync_LocationHDFS = "AWS::DataSync::LocationHDFS" + AWS_Glue_Classifier = "AWS::Glue::Classifier" + AWS_Route53RecoveryReadiness_Cell = "AWS::Route53RecoveryReadiness::Cell" + AWS_Route53RecoveryReadiness_ReadinessCheck = "AWS::Route53RecoveryReadiness::ReadinessCheck" + AWS_ECR_RegistryPolicy = "AWS::ECR::RegistryPolicy" + AWS_Backup_ReportPlan = "AWS::Backup::ReportPlan" + AWS_Lightsail_Certificate = "AWS::Lightsail::Certificate" + AWS_RUM_AppMonitor = "AWS::RUM::AppMonitor" + AWS_Events_Endpoint = "AWS::Events::Endpoint" + AWS_SES_ReceiptRuleSet = "AWS::SES::ReceiptRuleSet" + AWS_Events_Archive = "AWS::Events::Archive" + AWS_Events_ApiDestination = "AWS::Events::ApiDestination" + AWS_Lightsail_Disk = "AWS::Lightsail::Disk" + AWS_FIS_ExperimentTemplate = "AWS::FIS::ExperimentTemplate" + AWS_DataSync_LocationFSxWindows = "AWS::DataSync::LocationFSxWindows" + AWS_SES_ReceiptFilter = "AWS::SES::ReceiptFilter" + AWS_GuardDuty_Filter = "AWS::GuardDuty::Filter" + AWS_SES_Template = "AWS::SES::Template" + AWS_AmazonMQ_Broker = "AWS::AmazonMQ::Broker" + AWS_AppConfig_Environment = "AWS::AppConfig::Environment" + AWS_AppConfig_ConfigurationProfile = "AWS::AppConfig::ConfigurationProfile" + AWS_Cloud9_EnvironmentEC2 = "AWS::Cloud9::EnvironmentEC2" + AWS_EventSchemas_Registry = "AWS::EventSchemas::Registry" + AWS_EventSchemas_RegistryPolicy = "AWS::EventSchemas::RegistryPolicy" + AWS_EventSchemas_Discoverer = "AWS::EventSchemas::Discoverer" + AWS_FraudDetector_Label = "AWS::FraudDetector::Label" + AWS_FraudDetector_EntityType = "AWS::FraudDetector::EntityType" + AWS_FraudDetector_Variable = "AWS::FraudDetector::Variable" + AWS_FraudDetector_Outcome = "AWS::FraudDetector::Outcome" + AWS_IoT_Authorizer = "AWS::IoT::Authorizer" + AWS_IoT_SecurityProfile = "AWS::IoT::SecurityProfile" + AWS_IoT_RoleAlias = "AWS::IoT::RoleAlias" + AWS_IoT_Dimension = "AWS::IoT::Dimension" + AWS_IoTAnalytics_Datastore = "AWS::IoTAnalytics::Datastore" + AWS_Lightsail_Bucket = "AWS::Lightsail::Bucket" + AWS_Lightsail_StaticIp = "AWS::Lightsail::StaticIp" + AWS_MediaPackage_PackagingGroup = "AWS::MediaPackage::PackagingGroup" + AWS_Route53RecoveryReadiness_RecoveryGroup = "AWS::Route53RecoveryReadiness::RecoveryGroup" + AWS_ResilienceHub_ResiliencyPolicy = "AWS::ResilienceHub::ResiliencyPolicy" + AWS_Transfer_Workflow = "AWS::Transfer::Workflow" + AWS_EKS_IdentityProviderConfig = "AWS::EKS::IdentityProviderConfig" + AWS_EKS_Addon = "AWS::EKS::Addon" + AWS_Glue_MLTransform = "AWS::Glue::MLTransform" + AWS_IoT_Policy = "AWS::IoT::Policy" + AWS_IoT_MitigationAction = "AWS::IoT::MitigationAction" + AWS_IoTTwinMaker_Workspace = "AWS::IoTTwinMaker::Workspace" + AWS_IoTTwinMaker_Entity = "AWS::IoTTwinMaker::Entity" + AWS_IoTAnalytics_Dataset = "AWS::IoTAnalytics::Dataset" + AWS_IoTAnalytics_Pipeline = "AWS::IoTAnalytics::Pipeline" + AWS_IoTAnalytics_Channel = "AWS::IoTAnalytics::Channel" + AWS_IoTSiteWise_Dashboard = "AWS::IoTSiteWise::Dashboard" + AWS_IoTSiteWise_Project = "AWS::IoTSiteWise::Project" + AWS_IoTSiteWise_Portal = "AWS::IoTSiteWise::Portal" + AWS_IoTSiteWise_AssetModel = "AWS::IoTSiteWise::AssetModel" + AWS_IVS_Channel = "AWS::IVS::Channel" + AWS_IVS_RecordingConfiguration = "AWS::IVS::RecordingConfiguration" + AWS_IVS_PlaybackKeyPair = "AWS::IVS::PlaybackKeyPair" + AWS_KinesisAnalyticsV2_Application = "AWS::KinesisAnalyticsV2::Application" + AWS_RDS_GlobalCluster = "AWS::RDS::GlobalCluster" + AWS_S3_MultiRegionAccessPoint = "AWS::S3::MultiRegionAccessPoint" + AWS_DeviceFarm_TestGridProject = "AWS::DeviceFarm::TestGridProject" + AWS_Budgets_BudgetsAction = "AWS::Budgets::BudgetsAction" + AWS_Lex_Bot = "AWS::Lex::Bot" + AWS_CodeGuruReviewer_RepositoryAssociation = "AWS::CodeGuruReviewer::RepositoryAssociation" + AWS_IoT_CustomMetric = "AWS::IoT::CustomMetric" + AWS_Route53Resolver_FirewallDomainList = "AWS::Route53Resolver::FirewallDomainList" + AWS_RoboMaker_RobotApplicationVersion = "AWS::RoboMaker::RobotApplicationVersion" + AWS_EC2_TrafficMirrorSession = "AWS::EC2::TrafficMirrorSession" + AWS_IoTSiteWise_Gateway = "AWS::IoTSiteWise::Gateway" + AWS_Lex_BotAlias = "AWS::Lex::BotAlias" + AWS_LookoutMetrics_Alert = "AWS::LookoutMetrics::Alert" + AWS_IoT_AccountAuditConfiguration = "AWS::IoT::AccountAuditConfiguration" + AWS_EC2_TrafficMirrorTarget = "AWS::EC2::TrafficMirrorTarget" + AWS_S3_StorageLens = "AWS::S3::StorageLens" + AWS_IoT_ScheduledAudit = "AWS::IoT::ScheduledAudit" + AWS_Events_Connection = "AWS::Events::Connection" + AWS_EventSchemas_Schema = "AWS::EventSchemas::Schema" + AWS_MediaPackage_PackagingConfiguration = "AWS::MediaPackage::PackagingConfiguration" + AWS_KinesisVideo_SignalingChannel = "AWS::KinesisVideo::SignalingChannel" + AWS_AppStream_DirectoryConfig = "AWS::AppStream::DirectoryConfig" + AWS_LookoutVision_Project = "AWS::LookoutVision::Project" + AWS_Route53RecoveryControl_Cluster = "AWS::Route53RecoveryControl::Cluster" + AWS_Route53RecoveryControl_SafetyRule = "AWS::Route53RecoveryControl::SafetyRule" + AWS_Route53RecoveryControl_ControlPanel = "AWS::Route53RecoveryControl::ControlPanel" + AWS_Route53RecoveryControl_RoutingControl = "AWS::Route53RecoveryControl::RoutingControl" + AWS_Route53RecoveryReadiness_ResourceSet = "AWS::Route53RecoveryReadiness::ResourceSet" + AWS_RoboMaker_SimulationApplication = "AWS::RoboMaker::SimulationApplication" + AWS_RoboMaker_RobotApplication = "AWS::RoboMaker::RobotApplication" + AWS_HealthLake_FHIRDatastore = "AWS::HealthLake::FHIRDatastore" + AWS_Pinpoint_Segment = "AWS::Pinpoint::Segment" + AWS_Pinpoint_ApplicationSettings = "AWS::Pinpoint::ApplicationSettings" + AWS_Events_Rule = "AWS::Events::Rule" + AWS_EC2_DHCPOptions = "AWS::EC2::DHCPOptions" + AWS_EC2_NetworkInsightsPath = "AWS::EC2::NetworkInsightsPath" + AWS_EC2_TrafficMirrorFilter = "AWS::EC2::TrafficMirrorFilter" + AWS_EC2_IPAM = "AWS::EC2::IPAM" + AWS_IoTTwinMaker_Scene = "AWS::IoTTwinMaker::Scene" + AWS_NetworkManager_TransitGatewayRegistration = ( + "AWS::NetworkManager::TransitGatewayRegistration" + ) + AWS_CustomerProfiles_Domain = "AWS::CustomerProfiles::Domain" + AWS_AutoScaling_WarmPool = "AWS::AutoScaling::WarmPool" + AWS_Connect_PhoneNumber = "AWS::Connect::PhoneNumber" + AWS_AppConfig_DeploymentStrategy = "AWS::AppConfig::DeploymentStrategy" + AWS_AppFlow_Flow = "AWS::AppFlow::Flow" + AWS_AuditManager_Assessment = "AWS::AuditManager::Assessment" + AWS_CloudWatch_MetricStream = "AWS::CloudWatch::MetricStream" + AWS_DeviceFarm_InstanceProfile = "AWS::DeviceFarm::InstanceProfile" + AWS_DeviceFarm_Project = "AWS::DeviceFarm::Project" + AWS_EC2_EC2Fleet = "AWS::EC2::EC2Fleet" + AWS_EC2_SubnetRouteTableAssociation = "AWS::EC2::SubnetRouteTableAssociation" + AWS_ECR_PullThroughCacheRule = "AWS::ECR::PullThroughCacheRule" + AWS_GroundStation_Config = "AWS::GroundStation::Config" + AWS_ImageBuilder_ImagePipeline = "AWS::ImageBuilder::ImagePipeline" + AWS_IoT_FleetMetric = "AWS::IoT::FleetMetric" + AWS_IoTWireless_ServiceProfile = "AWS::IoTWireless::ServiceProfile" + AWS_NetworkManager_Device = "AWS::NetworkManager::Device" + AWS_NetworkManager_GlobalNetwork = "AWS::NetworkManager::GlobalNetwork" + AWS_NetworkManager_Link = "AWS::NetworkManager::Link" + AWS_NetworkManager_Site = "AWS::NetworkManager::Site" + AWS_Panorama_Package = "AWS::Panorama::Package" + AWS_Pinpoint_App = "AWS::Pinpoint::App" + AWS_Redshift_ScheduledAction = "AWS::Redshift::ScheduledAction" + AWS_Route53Resolver_FirewallRuleGroupAssociation = ( + "AWS::Route53Resolver::FirewallRuleGroupAssociation" + ) + AWS_SageMaker_AppImageConfig = "AWS::SageMaker::AppImageConfig" + AWS_SageMaker_Image = "AWS::SageMaker::Image" + AWS_ECS_TaskSet = "AWS::ECS::TaskSet" + AWS_Cassandra_Keyspace = "AWS::Cassandra::Keyspace" + AWS_Signer_SigningProfile = "AWS::Signer::SigningProfile" + AWS_Amplify_App = "AWS::Amplify::App" + AWS_AppMesh_VirtualNode = "AWS::AppMesh::VirtualNode" + AWS_AppMesh_VirtualService = "AWS::AppMesh::VirtualService" + AWS_AppRunner_VpcConnector = "AWS::AppRunner::VpcConnector" + AWS_AppStream_Application = "AWS::AppStream::Application" + AWS_CodeArtifact_Repository = "AWS::CodeArtifact::Repository" + AWS_EC2_PrefixList = "AWS::EC2::PrefixList" + AWS_EC2_SpotFleet = "AWS::EC2::SpotFleet" + AWS_Evidently_Project = "AWS::Evidently::Project" + AWS_Forecast_Dataset = "AWS::Forecast::Dataset" + AWS_IAM_SAMLProvider = "AWS::IAM::SAMLProvider" + AWS_IAM_ServerCertificate = "AWS::IAM::ServerCertificate" + AWS_Pinpoint_Campaign = "AWS::Pinpoint::Campaign" + AWS_Pinpoint_InAppTemplate = "AWS::Pinpoint::InAppTemplate" + AWS_SageMaker_Domain = "AWS::SageMaker::Domain" + AWS_Transfer_Agreement = "AWS::Transfer::Agreement" + AWS_Transfer_Connector = "AWS::Transfer::Connector" + AWS_KinesisFirehose_DeliveryStream = "AWS::KinesisFirehose::DeliveryStream" + AWS_Amplify_Branch = "AWS::Amplify::Branch" + AWS_AppIntegrations_EventIntegration = "AWS::AppIntegrations::EventIntegration" + AWS_AppMesh_Route = "AWS::AppMesh::Route" + AWS_Athena_PreparedStatement = "AWS::Athena::PreparedStatement" + AWS_EC2_IPAMScope = "AWS::EC2::IPAMScope" + AWS_Evidently_Launch = "AWS::Evidently::Launch" + AWS_Forecast_DatasetGroup = "AWS::Forecast::DatasetGroup" + AWS_GreengrassV2_ComponentVersion = "AWS::GreengrassV2::ComponentVersion" + AWS_GroundStation_MissionProfile = "AWS::GroundStation::MissionProfile" + AWS_MediaConnect_FlowEntitlement = "AWS::MediaConnect::FlowEntitlement" + AWS_MediaConnect_FlowVpcInterface = "AWS::MediaConnect::FlowVpcInterface" + AWS_MediaTailor_PlaybackConfiguration = "AWS::MediaTailor::PlaybackConfiguration" + AWS_MSK_Configuration = "AWS::MSK::Configuration" + AWS_Personalize_Dataset = "AWS::Personalize::Dataset" + AWS_Personalize_Schema = "AWS::Personalize::Schema" + AWS_Personalize_Solution = "AWS::Personalize::Solution" + AWS_Pinpoint_EmailTemplate = "AWS::Pinpoint::EmailTemplate" + AWS_Pinpoint_EventStream = "AWS::Pinpoint::EventStream" + AWS_ResilienceHub_App = "AWS::ResilienceHub::App" + AWS_ACMPCA_CertificateAuthority = "AWS::ACMPCA::CertificateAuthority" + AWS_AppConfig_HostedConfigurationVersion = "AWS::AppConfig::HostedConfigurationVersion" + AWS_AppMesh_VirtualGateway = "AWS::AppMesh::VirtualGateway" + AWS_AppMesh_VirtualRouter = "AWS::AppMesh::VirtualRouter" + AWS_AppRunner_Service = "AWS::AppRunner::Service" + AWS_CustomerProfiles_ObjectType = "AWS::CustomerProfiles::ObjectType" + AWS_DMS_Endpoint = "AWS::DMS::Endpoint" + AWS_EC2_CapacityReservation = "AWS::EC2::CapacityReservation" + AWS_EC2_ClientVpnEndpoint = "AWS::EC2::ClientVpnEndpoint" + AWS_Kendra_Index = "AWS::Kendra::Index" + AWS_KinesisVideo_Stream = "AWS::KinesisVideo::Stream" + AWS_Logs_Destination = "AWS::Logs::Destination" + AWS_Pinpoint_EmailChannel = "AWS::Pinpoint::EmailChannel" + AWS_S3_AccessPoint = "AWS::S3::AccessPoint" + AWS_NetworkManager_CustomerGatewayAssociation = ( + "AWS::NetworkManager::CustomerGatewayAssociation" + ) + AWS_NetworkManager_LinkAssociation = "AWS::NetworkManager::LinkAssociation" + AWS_IoTWireless_MulticastGroup = "AWS::IoTWireless::MulticastGroup" + AWS_Personalize_DatasetGroup = "AWS::Personalize::DatasetGroup" + AWS_IoTTwinMaker_ComponentType = "AWS::IoTTwinMaker::ComponentType" + AWS_CodeBuild_ReportGroup = "AWS::CodeBuild::ReportGroup" + AWS_SageMaker_FeatureGroup = "AWS::SageMaker::FeatureGroup" + AWS_MSK_BatchScramSecret = "AWS::MSK::BatchScramSecret" + AWS_AppStream_Stack = "AWS::AppStream::Stack" + AWS_IoT_JobTemplate = "AWS::IoT::JobTemplate" + AWS_IoTWireless_FuotaTask = "AWS::IoTWireless::FuotaTask" + AWS_IoT_ProvisioningTemplate = "AWS::IoT::ProvisioningTemplate" + AWS_InspectorV2_Filter = "AWS::InspectorV2::Filter" + AWS_Route53Resolver_ResolverQueryLoggingConfigAssociation = ( + "AWS::Route53Resolver::ResolverQueryLoggingConfigAssociation" + ) + AWS_ServiceDiscovery_Instance = "AWS::ServiceDiscovery::Instance" + AWS_Transfer_Certificate = "AWS::Transfer::Certificate" + AWS_MediaConnect_FlowSource = "AWS::MediaConnect::FlowSource" + AWS_APS_RuleGroupsNamespace = "AWS::APS::RuleGroupsNamespace" + AWS_CodeGuruProfiler_ProfilingGroup = "AWS::CodeGuruProfiler::ProfilingGroup" + AWS_Route53Resolver_ResolverQueryLoggingConfig = ( + "AWS::Route53Resolver::ResolverQueryLoggingConfig" + ) + AWS_Batch_SchedulingPolicy = "AWS::Batch::SchedulingPolicy" + AWS_ACMPCA_CertificateAuthorityActivation = "AWS::ACMPCA::CertificateAuthorityActivation" + AWS_AppMesh_GatewayRoute = "AWS::AppMesh::GatewayRoute" + AWS_AppMesh_Mesh = "AWS::AppMesh::Mesh" + AWS_Connect_Instance = "AWS::Connect::Instance" + AWS_Connect_QuickConnect = "AWS::Connect::QuickConnect" + AWS_EC2_CarrierGateway = "AWS::EC2::CarrierGateway" + AWS_EC2_IPAMPool = "AWS::EC2::IPAMPool" + AWS_EC2_TransitGatewayConnect = "AWS::EC2::TransitGatewayConnect" + AWS_EC2_TransitGatewayMulticastDomain = "AWS::EC2::TransitGatewayMulticastDomain" + AWS_ECS_CapacityProvider = "AWS::ECS::CapacityProvider" + AWS_IAM_InstanceProfile = "AWS::IAM::InstanceProfile" + AWS_IoT_CACertificate = "AWS::IoT::CACertificate" + AWS_IoTTwinMaker_SyncJob = "AWS::IoTTwinMaker::SyncJob" + AWS_KafkaConnect_Connector = "AWS::KafkaConnect::Connector" + AWS_Lambda_CodeSigningConfig = "AWS::Lambda::CodeSigningConfig" + AWS_NetworkManager_ConnectPeer = "AWS::NetworkManager::ConnectPeer" + AWS_ResourceExplorer2_Index = "AWS::ResourceExplorer2::Index" + AWS_AppStream_Fleet = "AWS::AppStream::Fleet" + AWS_Cognito_UserPool = "AWS::Cognito::UserPool" + AWS_Cognito_UserPoolClient = "AWS::Cognito::UserPoolClient" + AWS_Cognito_UserPoolGroup = "AWS::Cognito::UserPoolGroup" + AWS_EC2_NetworkInsightsAccessScope = "AWS::EC2::NetworkInsightsAccessScope" + AWS_EC2_NetworkInsightsAnalysis = "AWS::EC2::NetworkInsightsAnalysis" + AWS_Grafana_Workspace = "AWS::Grafana::Workspace" + AWS_GroundStation_DataflowEndpointGroup = "AWS::GroundStation::DataflowEndpointGroup" + AWS_ImageBuilder_ImageRecipe = "AWS::ImageBuilder::ImageRecipe" + AWS_KMS_Alias = "AWS::KMS::Alias" + AWS_M2_Environment = "AWS::M2::Environment" + AWS_QuickSight_DataSource = "AWS::QuickSight::DataSource" + AWS_QuickSight_Template = "AWS::QuickSight::Template" + AWS_QuickSight_Theme = "AWS::QuickSight::Theme" + AWS_RDS_OptionGroup = "AWS::RDS::OptionGroup" + AWS_Redshift_EndpointAccess = "AWS::Redshift::EndpointAccess" + AWS_Route53Resolver_FirewallRuleGroup = "AWS::Route53Resolver::FirewallRuleGroup" + AWS_SSM_Document = "AWS::SSM::Document" + + +class ResourceValueType(StrEnum): + RESOURCE_ID = "RESOURCE_ID" + + +class SortBy(StrEnum): + SCORE = "SCORE" + + +class SortOrder(StrEnum): + ASCENDING = "ASCENDING" + DESCENDING = "DESCENDING" + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ConformancePackTemplateValidationException(ServiceException): + code: str = "ConformancePackTemplateValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class IdempotentParameterMismatch(ServiceException): + code: str = "IdempotentParameterMismatch" + sender_fault: bool = False + status_code: int = 400 + + +class InsufficientDeliveryPolicyException(ServiceException): + code: str = "InsufficientDeliveryPolicyException" + sender_fault: bool = False + status_code: int = 400 + + +class InsufficientPermissionsException(ServiceException): + code: str = "InsufficientPermissionsException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidConfigurationRecorderNameException(ServiceException): + code: str = "InvalidConfigurationRecorderNameException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDeliveryChannelNameException(ServiceException): + code: str = "InvalidDeliveryChannelNameException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidExpressionException(ServiceException): + code: str = "InvalidExpressionException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidLimitException(ServiceException): + code: str = "InvalidLimitException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNextTokenException(ServiceException): + code: str = "InvalidNextTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterValueException(ServiceException): + code: str = "InvalidParameterValueException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRecordingGroupException(ServiceException): + code: str = "InvalidRecordingGroupException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidResultTokenException(ServiceException): + code: str = "InvalidResultTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRoleException(ServiceException): + code: str = "InvalidRoleException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidS3KeyPrefixException(ServiceException): + code: str = "InvalidS3KeyPrefixException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidS3KmsKeyArnException(ServiceException): + code: str = "InvalidS3KmsKeyArnException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSNSTopicARNException(ServiceException): + code: str = "InvalidSNSTopicARNException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTimeRangeException(ServiceException): + code: str = "InvalidTimeRangeException" + sender_fault: bool = False + status_code: int = 400 + + +class LastDeliveryChannelDeleteFailedException(ServiceException): + code: str = "LastDeliveryChannelDeleteFailedException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxActiveResourcesExceededException(ServiceException): + code: str = "MaxActiveResourcesExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfConfigRulesExceededException(ServiceException): + code: str = "MaxNumberOfConfigRulesExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfConfigurationRecordersExceededException(ServiceException): + code: str = "MaxNumberOfConfigurationRecordersExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfConformancePacksExceededException(ServiceException): + code: str = "MaxNumberOfConformancePacksExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfDeliveryChannelsExceededException(ServiceException): + code: str = "MaxNumberOfDeliveryChannelsExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfOrganizationConfigRulesExceededException(ServiceException): + code: str = "MaxNumberOfOrganizationConfigRulesExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfOrganizationConformancePacksExceededException(ServiceException): + code: str = "MaxNumberOfOrganizationConformancePacksExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxNumberOfRetentionConfigurationsExceededException(ServiceException): + code: str = "MaxNumberOfRetentionConfigurationsExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class NoAvailableConfigurationRecorderException(ServiceException): + code: str = "NoAvailableConfigurationRecorderException" + sender_fault: bool = False + status_code: int = 400 + + +class NoAvailableDeliveryChannelException(ServiceException): + code: str = "NoAvailableDeliveryChannelException" + sender_fault: bool = False + status_code: int = 400 + + +class NoAvailableOrganizationException(ServiceException): + code: str = "NoAvailableOrganizationException" + sender_fault: bool = False + status_code: int = 400 + + +class NoRunningConfigurationRecorderException(ServiceException): + code: str = "NoRunningConfigurationRecorderException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchBucketException(ServiceException): + code: str = "NoSuchBucketException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchConfigRuleException(ServiceException): + code: str = "NoSuchConfigRuleException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchConfigRuleInConformancePackException(ServiceException): + code: str = "NoSuchConfigRuleInConformancePackException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchConfigurationAggregatorException(ServiceException): + code: str = "NoSuchConfigurationAggregatorException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchConfigurationRecorderException(ServiceException): + code: str = "NoSuchConfigurationRecorderException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchConformancePackException(ServiceException): + code: str = "NoSuchConformancePackException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchDeliveryChannelException(ServiceException): + code: str = "NoSuchDeliveryChannelException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchOrganizationConfigRuleException(ServiceException): + code: str = "NoSuchOrganizationConfigRuleException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchOrganizationConformancePackException(ServiceException): + code: str = "NoSuchOrganizationConformancePackException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchRemediationConfigurationException(ServiceException): + code: str = "NoSuchRemediationConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchRemediationExceptionException(ServiceException): + code: str = "NoSuchRemediationExceptionException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchRetentionConfigurationException(ServiceException): + code: str = "NoSuchRetentionConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class OrganizationAccessDeniedException(ServiceException): + code: str = "OrganizationAccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class OrganizationAllFeaturesNotEnabledException(ServiceException): + code: str = "OrganizationAllFeaturesNotEnabledException" + sender_fault: bool = False + status_code: int = 400 + + +class OrganizationConformancePackTemplateValidationException(ServiceException): + code: str = "OrganizationConformancePackTemplateValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class OversizedConfigurationItemException(ServiceException): + code: str = "OversizedConfigurationItemException" + sender_fault: bool = False + status_code: int = 400 + + +class RemediationInProgressException(ServiceException): + code: str = "RemediationInProgressException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceConcurrentModificationException(ServiceException): + code: str = "ResourceConcurrentModificationException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotDiscoveredException(ServiceException): + code: str = "ResourceNotDiscoveredException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTagsException(ServiceException): + code: str = "TooManyTagsException" + sender_fault: bool = False + status_code: int = 400 + + +class UnmodifiableEntityException(ServiceException): + code: str = "UnmodifiableEntityException" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +AggregatorRegionList = List[String] +AccountAggregationSourceAccountList = List[AccountId] + + +class AccountAggregationSource(TypedDict, total=False): + AccountIds: AccountAggregationSourceAccountList + AllAwsRegions: Optional[Boolean] + AwsRegions: Optional[AggregatorRegionList] + + +AccountAggregationSourceList = List[AccountAggregationSource] + + +class ComplianceContributorCount(TypedDict, total=False): + CappedCount: Optional[Integer] + CapExceeded: Optional[Boolean] + + +class Compliance(TypedDict, total=False): + ComplianceType: Optional[ComplianceType] + ComplianceContributorCount: Optional[ComplianceContributorCount] + + +class AggregateComplianceByConfigRule(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + Compliance: Optional[Compliance] + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +AggregateComplianceByConfigRuleList = List[AggregateComplianceByConfigRule] + + +class AggregateConformancePackCompliance(TypedDict, total=False): + ComplianceType: Optional[ConformancePackComplianceType] + CompliantRuleCount: Optional[Integer] + NonCompliantRuleCount: Optional[Integer] + TotalRuleCount: Optional[Integer] + + +class AggregateComplianceByConformancePack(TypedDict, total=False): + ConformancePackName: Optional[ConformancePackName] + Compliance: Optional[AggregateConformancePackCompliance] + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +AggregateComplianceByConformancePackList = List[AggregateComplianceByConformancePack] +Date = datetime + + +class ComplianceSummary(TypedDict, total=False): + CompliantResourceCount: Optional[ComplianceContributorCount] + NonCompliantResourceCount: Optional[ComplianceContributorCount] + ComplianceSummaryTimestamp: Optional[Date] + + +class AggregateComplianceCount(TypedDict, total=False): + GroupName: Optional[StringWithCharLimit256] + ComplianceSummary: Optional[ComplianceSummary] + + +AggregateComplianceCountList = List[AggregateComplianceCount] + + +class AggregateConformancePackComplianceCount(TypedDict, total=False): + CompliantConformancePackCount: Optional[Integer] + NonCompliantConformancePackCount: Optional[Integer] + + +class AggregateConformancePackComplianceFilters(TypedDict, total=False): + ConformancePackName: Optional[ConformancePackName] + ComplianceType: Optional[ConformancePackComplianceType] + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +class AggregateConformancePackComplianceSummary(TypedDict, total=False): + ComplianceSummary: Optional[AggregateConformancePackComplianceCount] + GroupName: Optional[StringWithCharLimit256] + + +class AggregateConformancePackComplianceSummaryFilters(TypedDict, total=False): + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +AggregateConformancePackComplianceSummaryList = List[AggregateConformancePackComplianceSummary] + + +class EvaluationResultQualifier(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + ResourceType: Optional[StringWithCharLimit256] + ResourceId: Optional[BaseResourceId] + EvaluationMode: Optional[EvaluationMode] + + +class EvaluationResultIdentifier(TypedDict, total=False): + EvaluationResultQualifier: Optional[EvaluationResultQualifier] + OrderingTimestamp: Optional[Date] + ResourceEvaluationId: Optional[ResourceEvaluationId] + + +class AggregateEvaluationResult(TypedDict, total=False): + EvaluationResultIdentifier: Optional[EvaluationResultIdentifier] + ComplianceType: Optional[ComplianceType] + ResultRecordedTime: Optional[Date] + ConfigRuleInvokedTime: Optional[Date] + Annotation: Optional[StringWithCharLimit256] + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +AggregateEvaluationResultList = List[AggregateEvaluationResult] + + +class AggregateResourceIdentifier(TypedDict, total=False): + SourceAccountId: AccountId + SourceRegion: AwsRegion + ResourceId: ResourceId + ResourceType: ResourceType + ResourceName: Optional[ResourceName] + + +class AggregatedSourceStatus(TypedDict, total=False): + SourceId: Optional[String] + SourceType: Optional[AggregatedSourceType] + AwsRegion: Optional[AwsRegion] + LastUpdateStatus: Optional[AggregatedSourceStatusType] + LastUpdateTime: Optional[Date] + LastErrorCode: Optional[String] + LastErrorMessage: Optional[String] + + +AggregatedSourceStatusList = List[AggregatedSourceStatus] +AggregatedSourceStatusTypeList = List[AggregatedSourceStatusType] + + +class AggregationAuthorization(TypedDict, total=False): + AggregationAuthorizationArn: Optional[String] + AuthorizedAccountId: Optional[AccountId] + AuthorizedAwsRegion: Optional[AwsRegion] + CreationTime: Optional[Date] + + +AggregationAuthorizationList = List[AggregationAuthorization] +ResourceTypeValueList = List[ResourceTypeValue] + + +class AggregatorFilterResourceType(TypedDict, total=False): + Type: Optional[AggregatorFilterType] + Value: Optional[ResourceTypeValueList] + + +ServicePrincipalValueList = List[ServicePrincipalValue] + + +class AggregatorFilterServicePrincipal(TypedDict, total=False): + Type: Optional[AggregatorFilterType] + Value: Optional[ServicePrincipalValueList] + + +class AggregatorFilters(TypedDict, total=False): + ResourceType: Optional[AggregatorFilterResourceType] + ServicePrincipal: Optional[AggregatorFilterServicePrincipal] + + +ResourceTypeList = List[ResourceType] + + +class AssociateResourceTypesRequest(ServiceRequest): + ConfigurationRecorderArn: AmazonResourceName + ResourceTypes: ResourceTypeList + + +RecordingModeResourceTypesList = List[ResourceType] + + +class RecordingModeOverride(TypedDict, total=False): + description: Optional[Description] + resourceTypes: RecordingModeResourceTypesList + recordingFrequency: RecordingFrequency + + +RecordingModeOverrides = List[RecordingModeOverride] + + +class RecordingMode(TypedDict, total=False): + recordingFrequency: RecordingFrequency + recordingModeOverrides: Optional[RecordingModeOverrides] + + +class RecordingStrategy(TypedDict, total=False): + useOnly: Optional[RecordingStrategyType] + + +class ExclusionByResourceTypes(TypedDict, total=False): + resourceTypes: Optional[ResourceTypeList] + + +class RecordingGroup(TypedDict, total=False): + allSupported: Optional[AllSupported] + includeGlobalResourceTypes: Optional[IncludeGlobalResourceTypes] + resourceTypes: Optional[ResourceTypeList] + exclusionByResourceTypes: Optional[ExclusionByResourceTypes] + recordingStrategy: Optional[RecordingStrategy] + + +class ConfigurationRecorder(TypedDict, total=False): + arn: Optional[AmazonResourceName] + name: Optional[RecorderName] + roleARN: Optional[String] + recordingGroup: Optional[RecordingGroup] + recordingMode: Optional[RecordingMode] + recordingScope: Optional[RecordingScope] + servicePrincipal: Optional[ServicePrincipal] + + +class AssociateResourceTypesResponse(TypedDict, total=False): + ConfigurationRecorder: ConfigurationRecorder + + +AutoRemediationAttemptSeconds = int +ConfigurationItemDeliveryTime = datetime +SupplementaryConfiguration = Dict[SupplementaryConfigurationName, SupplementaryConfigurationValue] +ResourceCreationTime = datetime +ConfigurationItemCaptureTime = datetime + + +class BaseConfigurationItem(TypedDict, total=False): + version: Optional[Version] + accountId: Optional[AccountId] + configurationItemCaptureTime: Optional[ConfigurationItemCaptureTime] + configurationItemStatus: Optional[ConfigurationItemStatus] + configurationStateId: Optional[ConfigurationStateId] + arn: Optional[ARN] + resourceType: Optional[ResourceType] + resourceId: Optional[ResourceId] + resourceName: Optional[ResourceName] + awsRegion: Optional[AwsRegion] + availabilityZone: Optional[AvailabilityZone] + resourceCreationTime: Optional[ResourceCreationTime] + configuration: Optional[Configuration] + supplementaryConfiguration: Optional[SupplementaryConfiguration] + recordingFrequency: Optional[RecordingFrequency] + configurationItemDeliveryTime: Optional[ConfigurationItemDeliveryTime] + + +BaseConfigurationItems = List[BaseConfigurationItem] +ResourceIdentifiersList = List[AggregateResourceIdentifier] + + +class BatchGetAggregateResourceConfigRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + ResourceIdentifiers: ResourceIdentifiersList + + +UnprocessedResourceIdentifierList = List[AggregateResourceIdentifier] + + +class BatchGetAggregateResourceConfigResponse(TypedDict, total=False): + BaseConfigurationItems: Optional[BaseConfigurationItems] + UnprocessedResourceIdentifiers: Optional[UnprocessedResourceIdentifierList] + + +class ResourceKey(TypedDict, total=False): + resourceType: ResourceType + resourceId: ResourceId + + +ResourceKeys = List[ResourceKey] + + +class BatchGetResourceConfigRequest(ServiceRequest): + resourceKeys: ResourceKeys + + +class BatchGetResourceConfigResponse(TypedDict, total=False): + baseConfigurationItems: Optional[BaseConfigurationItems] + unprocessedResourceKeys: Optional[ResourceKeys] + + +class ComplianceByConfigRule(TypedDict, total=False): + ConfigRuleName: Optional[StringWithCharLimit64] + Compliance: Optional[Compliance] + + +ComplianceByConfigRules = List[ComplianceByConfigRule] + + +class ComplianceByResource(TypedDict, total=False): + ResourceType: Optional[StringWithCharLimit256] + ResourceId: Optional[BaseResourceId] + Compliance: Optional[Compliance] + + +ComplianceByResources = List[ComplianceByResource] +ComplianceResourceTypes = List[StringWithCharLimit256] + + +class ComplianceSummaryByResourceType(TypedDict, total=False): + ResourceType: Optional[StringWithCharLimit256] + ComplianceSummary: Optional[ComplianceSummary] + + +ComplianceSummariesByResourceType = List[ComplianceSummaryByResourceType] +ComplianceTypes = List[ComplianceType] + + +class ConfigExportDeliveryInfo(TypedDict, total=False): + lastStatus: Optional[DeliveryStatus] + lastErrorCode: Optional[String] + lastErrorMessage: Optional[String] + lastAttemptTime: Optional[Date] + lastSuccessfulTime: Optional[Date] + nextDeliveryTime: Optional[Date] + + +class EvaluationModeConfiguration(TypedDict, total=False): + Mode: Optional[EvaluationMode] + + +EvaluationModes = List[EvaluationModeConfiguration] + + +class CustomPolicyDetails(TypedDict, total=False): + PolicyRuntime: PolicyRuntime + PolicyText: PolicyText + EnableDebugLogDelivery: Optional[Boolean] + + +class SourceDetail(TypedDict, total=False): + EventSource: Optional[EventSource] + MessageType: Optional[MessageType] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + + +SourceDetails = List[SourceDetail] + + +class Source(TypedDict, total=False): + Owner: Owner + SourceIdentifier: Optional[StringWithCharLimit256] + SourceDetails: Optional[SourceDetails] + CustomPolicyDetails: Optional[CustomPolicyDetails] + + +class Scope(TypedDict, total=False): + ComplianceResourceTypes: Optional[ComplianceResourceTypes] + TagKey: Optional[StringWithCharLimit128] + TagValue: Optional[StringWithCharLimit256] + ComplianceResourceId: Optional[BaseResourceId] + + +class ConfigRule(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + ConfigRuleArn: Optional[StringWithCharLimit256] + ConfigRuleId: Optional[StringWithCharLimit64] + Description: Optional[EmptiableStringWithCharLimit256] + Scope: Optional[Scope] + Source: Source + InputParameters: Optional[StringWithCharLimit1024] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + ConfigRuleState: Optional[ConfigRuleState] + CreatedBy: Optional[StringWithCharLimit256] + EvaluationModes: Optional[EvaluationModes] + + +class ConfigRuleComplianceFilters(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + ComplianceType: Optional[ComplianceType] + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +class ConfigRuleComplianceSummaryFilters(TypedDict, total=False): + AccountId: Optional[AccountId] + AwsRegion: Optional[AwsRegion] + + +class ConfigRuleEvaluationStatus(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + ConfigRuleArn: Optional[String] + ConfigRuleId: Optional[String] + LastSuccessfulInvocationTime: Optional[Date] + LastFailedInvocationTime: Optional[Date] + LastSuccessfulEvaluationTime: Optional[Date] + LastFailedEvaluationTime: Optional[Date] + FirstActivatedTime: Optional[Date] + LastDeactivatedTime: Optional[Date] + LastErrorCode: Optional[String] + LastErrorMessage: Optional[String] + FirstEvaluationStarted: Optional[Boolean] + LastDebugLogDeliveryStatus: Optional[String] + LastDebugLogDeliveryStatusReason: Optional[String] + LastDebugLogDeliveryTime: Optional[Date] + + +ConfigRuleEvaluationStatusList = List[ConfigRuleEvaluationStatus] +ConfigRuleNames = List[ConfigRuleName] +ConfigRules = List[ConfigRule] + + +class ConfigSnapshotDeliveryProperties(TypedDict, total=False): + deliveryFrequency: Optional[MaximumExecutionFrequency] + + +class ConfigStreamDeliveryInfo(TypedDict, total=False): + lastStatus: Optional[DeliveryStatus] + lastErrorCode: Optional[String] + lastErrorMessage: Optional[String] + lastStatusChangeTime: Optional[Date] + + +class OrganizationAggregationSource(TypedDict, total=False): + RoleArn: String + AwsRegions: Optional[AggregatorRegionList] + AllAwsRegions: Optional[Boolean] + + +class ConfigurationAggregator(TypedDict, total=False): + ConfigurationAggregatorName: Optional[ConfigurationAggregatorName] + ConfigurationAggregatorArn: Optional[ConfigurationAggregatorArn] + AccountAggregationSources: Optional[AccountAggregationSourceList] + OrganizationAggregationSource: Optional[OrganizationAggregationSource] + CreationTime: Optional[Date] + LastUpdatedTime: Optional[Date] + CreatedBy: Optional[StringWithCharLimit256] + AggregatorFilters: Optional[AggregatorFilters] + + +ConfigurationAggregatorList = List[ConfigurationAggregator] +ConfigurationAggregatorNameList = List[ConfigurationAggregatorName] + + +class Relationship(TypedDict, total=False): + resourceType: Optional[ResourceType] + resourceId: Optional[ResourceId] + resourceName: Optional[ResourceName] + relationshipName: Optional[RelationshipName] + + +RelationshipList = List[Relationship] +RelatedEventList = List[RelatedEvent] +Tags = Dict[Name, Value] + + +class ConfigurationItem(TypedDict, total=False): + version: Optional[Version] + accountId: Optional[AccountId] + configurationItemCaptureTime: Optional[ConfigurationItemCaptureTime] + configurationItemStatus: Optional[ConfigurationItemStatus] + configurationStateId: Optional[ConfigurationStateId] + configurationItemMD5Hash: Optional[ConfigurationItemMD5Hash] + arn: Optional[ARN] + resourceType: Optional[ResourceType] + resourceId: Optional[ResourceId] + resourceName: Optional[ResourceName] + awsRegion: Optional[AwsRegion] + availabilityZone: Optional[AvailabilityZone] + resourceCreationTime: Optional[ResourceCreationTime] + tags: Optional[Tags] + relatedEvents: Optional[RelatedEventList] + relationships: Optional[RelationshipList] + configuration: Optional[Configuration] + supplementaryConfiguration: Optional[SupplementaryConfiguration] + recordingFrequency: Optional[RecordingFrequency] + configurationItemDeliveryTime: Optional[ConfigurationItemDeliveryTime] + + +ConfigurationItemList = List[ConfigurationItem] +ConfigurationRecorderFilterValues = List[ConfigurationRecorderFilterValue] + + +class ConfigurationRecorderFilter(TypedDict, total=False): + filterName: Optional[ConfigurationRecorderFilterName] + filterValue: Optional[ConfigurationRecorderFilterValues] + + +ConfigurationRecorderFilterList = List[ConfigurationRecorderFilter] +ConfigurationRecorderList = List[ConfigurationRecorder] +ConfigurationRecorderNameList = List[RecorderName] + + +class ConfigurationRecorderStatus(TypedDict, total=False): + arn: Optional[AmazonResourceName] + name: Optional[String] + lastStartTime: Optional[Date] + lastStopTime: Optional[Date] + recording: Optional[Boolean] + lastStatus: Optional[RecorderStatus] + lastErrorCode: Optional[String] + lastErrorMessage: Optional[String] + lastStatusChangeTime: Optional[Date] + servicePrincipal: Optional[ServicePrincipal] + + +ConfigurationRecorderStatusList = List[ConfigurationRecorderStatus] + + +class ConfigurationRecorderSummary(TypedDict, total=False): + arn: AmazonResourceName + name: RecorderName + servicePrincipal: Optional[ServicePrincipal] + recordingScope: RecordingScope + + +ConfigurationRecorderSummaries = List[ConfigurationRecorderSummary] +ConformancePackConfigRuleNames = List[StringWithCharLimit64] + + +class ConformancePackComplianceFilters(TypedDict, total=False): + ConfigRuleNames: Optional[ConformancePackConfigRuleNames] + ComplianceType: Optional[ConformancePackComplianceType] + + +ConformancePackComplianceResourceIds = List[StringWithCharLimit256] +LastUpdatedTime = datetime + + +class ConformancePackComplianceScore(TypedDict, total=False): + Score: Optional[ComplianceScore] + ConformancePackName: Optional[ConformancePackName] + LastUpdatedTime: Optional[LastUpdatedTime] + + +ConformancePackComplianceScores = List[ConformancePackComplianceScore] +ConformancePackNameFilter = List[ConformancePackName] + + +class ConformancePackComplianceScoresFilters(TypedDict, total=False): + ConformancePackNames: ConformancePackNameFilter + + +class ConformancePackComplianceSummary(TypedDict, total=False): + ConformancePackName: ConformancePackName + ConformancePackComplianceStatus: ConformancePackComplianceType + + +ConformancePackComplianceSummaryList = List[ConformancePackComplianceSummary] + + +class TemplateSSMDocumentDetails(TypedDict, total=False): + DocumentName: SSMDocumentName + DocumentVersion: Optional[SSMDocumentVersion] + + +class ConformancePackInputParameter(TypedDict, total=False): + ParameterName: ParameterName + ParameterValue: ParameterValue + + +ConformancePackInputParameters = List[ConformancePackInputParameter] + + +class ConformancePackDetail(TypedDict, total=False): + ConformancePackName: ConformancePackName + ConformancePackArn: ConformancePackArn + ConformancePackId: ConformancePackId + DeliveryS3Bucket: Optional[DeliveryS3Bucket] + DeliveryS3KeyPrefix: Optional[DeliveryS3KeyPrefix] + ConformancePackInputParameters: Optional[ConformancePackInputParameters] + LastUpdateRequestedTime: Optional[Date] + CreatedBy: Optional[StringWithCharLimit256] + TemplateSSMDocumentDetails: Optional[TemplateSSMDocumentDetails] + + +ConformancePackDetailList = List[ConformancePackDetail] + + +class ConformancePackEvaluationFilters(TypedDict, total=False): + ConfigRuleNames: Optional[ConformancePackConfigRuleNames] + ComplianceType: Optional[ConformancePackComplianceType] + ResourceType: Optional[StringWithCharLimit256] + ResourceIds: Optional[ConformancePackComplianceResourceIds] + + +class ConformancePackEvaluationResult(TypedDict, total=False): + ComplianceType: ConformancePackComplianceType + EvaluationResultIdentifier: EvaluationResultIdentifier + ConfigRuleInvokedTime: Date + ResultRecordedTime: Date + Annotation: Optional[Annotation] + + +ConformancePackNamesList = List[ConformancePackName] +ConformancePackNamesToSummarizeList = List[ConformancePackName] +ControlsList = List[StringWithCharLimit128] + + +class ConformancePackRuleCompliance(TypedDict, total=False): + ConfigRuleName: Optional[ConfigRuleName] + ComplianceType: Optional[ConformancePackComplianceType] + Controls: Optional[ControlsList] + + +ConformancePackRuleComplianceList = List[ConformancePackRuleCompliance] +ConformancePackRuleEvaluationResultsList = List[ConformancePackEvaluationResult] + + +class ConformancePackStatusDetail(TypedDict, total=False): + ConformancePackName: ConformancePackName + ConformancePackId: ConformancePackId + ConformancePackArn: ConformancePackArn + ConformancePackState: ConformancePackState + StackArn: StackArn + ConformancePackStatusReason: Optional[ConformancePackStatusReason] + LastUpdateRequestedTime: Date + LastUpdateCompletedTime: Optional[Date] + + +ConformancePackStatusDetailsList = List[ConformancePackStatusDetail] +DebugLogDeliveryAccounts = List[AccountId] + + +class DeleteAggregationAuthorizationRequest(ServiceRequest): + AuthorizedAccountId: AccountId + AuthorizedAwsRegion: AwsRegion + + +class DeleteConfigRuleRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + + +class DeleteConfigurationAggregatorRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + + +class DeleteConfigurationRecorderRequest(ServiceRequest): + ConfigurationRecorderName: RecorderName + + +class DeleteConformancePackRequest(ServiceRequest): + ConformancePackName: ConformancePackName + + +class DeleteDeliveryChannelRequest(ServiceRequest): + DeliveryChannelName: ChannelName + + +class DeleteEvaluationResultsRequest(ServiceRequest): + ConfigRuleName: StringWithCharLimit64 + + +class DeleteEvaluationResultsResponse(TypedDict, total=False): + pass + + +class DeleteOrganizationConfigRuleRequest(ServiceRequest): + OrganizationConfigRuleName: OrganizationConfigRuleName + + +class DeleteOrganizationConformancePackRequest(ServiceRequest): + OrganizationConformancePackName: OrganizationConformancePackName + + +class DeletePendingAggregationRequestRequest(ServiceRequest): + RequesterAccountId: AccountId + RequesterAwsRegion: AwsRegion + + +class DeleteRemediationConfigurationRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceType: Optional[String] + + +class DeleteRemediationConfigurationResponse(TypedDict, total=False): + pass + + +class RemediationExceptionResourceKey(TypedDict, total=False): + ResourceType: Optional[StringWithCharLimit256] + ResourceId: Optional[StringWithCharLimit1024] + + +RemediationExceptionResourceKeys = List[RemediationExceptionResourceKey] + + +class DeleteRemediationExceptionsRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceKeys: RemediationExceptionResourceKeys + + +class FailedDeleteRemediationExceptionsBatch(TypedDict, total=False): + FailureMessage: Optional[String] + FailedItems: Optional[RemediationExceptionResourceKeys] + + +FailedDeleteRemediationExceptionsBatches = List[FailedDeleteRemediationExceptionsBatch] + + +class DeleteRemediationExceptionsResponse(TypedDict, total=False): + FailedBatches: Optional[FailedDeleteRemediationExceptionsBatches] + + +class DeleteResourceConfigRequest(ServiceRequest): + ResourceType: ResourceTypeString + ResourceId: ResourceId + + +class DeleteRetentionConfigurationRequest(ServiceRequest): + RetentionConfigurationName: RetentionConfigurationName + + +class DeleteServiceLinkedConfigurationRecorderRequest(ServiceRequest): + ServicePrincipal: ServicePrincipal + + +class DeleteServiceLinkedConfigurationRecorderResponse(TypedDict, total=False): + Arn: AmazonResourceName + Name: RecorderName + + +class DeleteStoredQueryRequest(ServiceRequest): + QueryName: QueryName + + +class DeleteStoredQueryResponse(TypedDict, total=False): + pass + + +class DeliverConfigSnapshotRequest(ServiceRequest): + deliveryChannelName: ChannelName + + +class DeliverConfigSnapshotResponse(TypedDict, total=False): + configSnapshotId: Optional[String] + + +class DeliveryChannel(TypedDict, total=False): + name: Optional[ChannelName] + s3BucketName: Optional[String] + s3KeyPrefix: Optional[String] + s3KmsKeyArn: Optional[String] + snsTopicARN: Optional[String] + configSnapshotDeliveryProperties: Optional[ConfigSnapshotDeliveryProperties] + + +DeliveryChannelList = List[DeliveryChannel] +DeliveryChannelNameList = List[ChannelName] + + +class DeliveryChannelStatus(TypedDict, total=False): + name: Optional[String] + configSnapshotDeliveryInfo: Optional[ConfigExportDeliveryInfo] + configHistoryDeliveryInfo: Optional[ConfigExportDeliveryInfo] + configStreamDeliveryInfo: Optional[ConfigStreamDeliveryInfo] + + +DeliveryChannelStatusList = List[DeliveryChannelStatus] + + +class DescribeAggregateComplianceByConfigRulesRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + Filters: Optional[ConfigRuleComplianceFilters] + Limit: Optional[GroupByAPILimit] + NextToken: Optional[NextToken] + + +class DescribeAggregateComplianceByConfigRulesResponse(TypedDict, total=False): + AggregateComplianceByConfigRules: Optional[AggregateComplianceByConfigRuleList] + NextToken: Optional[NextToken] + + +class DescribeAggregateComplianceByConformancePacksRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + Filters: Optional[AggregateConformancePackComplianceFilters] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class DescribeAggregateComplianceByConformancePacksResponse(TypedDict, total=False): + AggregateComplianceByConformancePacks: Optional[AggregateComplianceByConformancePackList] + NextToken: Optional[NextToken] + + +class DescribeAggregationAuthorizationsRequest(ServiceRequest): + Limit: Optional[Limit] + NextToken: Optional[String] + + +class DescribeAggregationAuthorizationsResponse(TypedDict, total=False): + AggregationAuthorizations: Optional[AggregationAuthorizationList] + NextToken: Optional[String] + + +class DescribeComplianceByConfigRuleRequest(ServiceRequest): + ConfigRuleNames: Optional[ConfigRuleNames] + ComplianceTypes: Optional[ComplianceTypes] + NextToken: Optional[String] + + +class DescribeComplianceByConfigRuleResponse(TypedDict, total=False): + ComplianceByConfigRules: Optional[ComplianceByConfigRules] + NextToken: Optional[String] + + +class DescribeComplianceByResourceRequest(ServiceRequest): + ResourceType: Optional[StringWithCharLimit256] + ResourceId: Optional[BaseResourceId] + ComplianceTypes: Optional[ComplianceTypes] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class DescribeComplianceByResourceResponse(TypedDict, total=False): + ComplianceByResources: Optional[ComplianceByResources] + NextToken: Optional[NextToken] + + +class DescribeConfigRuleEvaluationStatusRequest(ServiceRequest): + ConfigRuleNames: Optional[ConfigRuleNames] + NextToken: Optional[String] + Limit: Optional[RuleLimit] + + +class DescribeConfigRuleEvaluationStatusResponse(TypedDict, total=False): + ConfigRulesEvaluationStatus: Optional[ConfigRuleEvaluationStatusList] + NextToken: Optional[String] + + +class DescribeConfigRulesFilters(TypedDict, total=False): + EvaluationMode: Optional[EvaluationMode] + + +class DescribeConfigRulesRequest(ServiceRequest): + ConfigRuleNames: Optional[ConfigRuleNames] + NextToken: Optional[String] + Filters: Optional[DescribeConfigRulesFilters] + + +class DescribeConfigRulesResponse(TypedDict, total=False): + ConfigRules: Optional[ConfigRules] + NextToken: Optional[String] + + +class DescribeConfigurationAggregatorSourcesStatusRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + UpdateStatus: Optional[AggregatedSourceStatusTypeList] + NextToken: Optional[String] + Limit: Optional[Limit] + + +class DescribeConfigurationAggregatorSourcesStatusResponse(TypedDict, total=False): + AggregatedSourceStatusList: Optional[AggregatedSourceStatusList] + NextToken: Optional[String] + + +class DescribeConfigurationAggregatorsRequest(ServiceRequest): + ConfigurationAggregatorNames: Optional[ConfigurationAggregatorNameList] + NextToken: Optional[String] + Limit: Optional[Limit] + + +class DescribeConfigurationAggregatorsResponse(TypedDict, total=False): + ConfigurationAggregators: Optional[ConfigurationAggregatorList] + NextToken: Optional[String] + + +class DescribeConfigurationRecorderStatusRequest(ServiceRequest): + ConfigurationRecorderNames: Optional[ConfigurationRecorderNameList] + ServicePrincipal: Optional[ServicePrincipal] + Arn: Optional[AmazonResourceName] + + +class DescribeConfigurationRecorderStatusResponse(TypedDict, total=False): + ConfigurationRecordersStatus: Optional[ConfigurationRecorderStatusList] + + +class DescribeConfigurationRecordersRequest(ServiceRequest): + ConfigurationRecorderNames: Optional[ConfigurationRecorderNameList] + ServicePrincipal: Optional[ServicePrincipal] + Arn: Optional[AmazonResourceName] + + +class DescribeConfigurationRecordersResponse(TypedDict, total=False): + ConfigurationRecorders: Optional[ConfigurationRecorderList] + + +class DescribeConformancePackComplianceRequest(ServiceRequest): + ConformancePackName: ConformancePackName + Filters: Optional[ConformancePackComplianceFilters] + Limit: Optional[DescribeConformancePackComplianceLimit] + NextToken: Optional[NextToken] + + +class DescribeConformancePackComplianceResponse(TypedDict, total=False): + ConformancePackName: ConformancePackName + ConformancePackRuleComplianceList: ConformancePackRuleComplianceList + NextToken: Optional[NextToken] + + +class DescribeConformancePackStatusRequest(ServiceRequest): + ConformancePackNames: Optional[ConformancePackNamesList] + Limit: Optional[PageSizeLimit] + NextToken: Optional[NextToken] + + +class DescribeConformancePackStatusResponse(TypedDict, total=False): + ConformancePackStatusDetails: Optional[ConformancePackStatusDetailsList] + NextToken: Optional[NextToken] + + +class DescribeConformancePacksRequest(ServiceRequest): + ConformancePackNames: Optional[ConformancePackNamesList] + Limit: Optional[PageSizeLimit] + NextToken: Optional[NextToken] + + +class DescribeConformancePacksResponse(TypedDict, total=False): + ConformancePackDetails: Optional[ConformancePackDetailList] + NextToken: Optional[NextToken] + + +class DescribeDeliveryChannelStatusRequest(ServiceRequest): + DeliveryChannelNames: Optional[DeliveryChannelNameList] + + +class DescribeDeliveryChannelStatusResponse(TypedDict, total=False): + DeliveryChannelsStatus: Optional[DeliveryChannelStatusList] + + +class DescribeDeliveryChannelsRequest(ServiceRequest): + DeliveryChannelNames: Optional[DeliveryChannelNameList] + + +class DescribeDeliveryChannelsResponse(TypedDict, total=False): + DeliveryChannels: Optional[DeliveryChannelList] + + +OrganizationConfigRuleNames = List[StringWithCharLimit64] + + +class DescribeOrganizationConfigRuleStatusesRequest(ServiceRequest): + OrganizationConfigRuleNames: Optional[OrganizationConfigRuleNames] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +class OrganizationConfigRuleStatus(TypedDict, total=False): + OrganizationConfigRuleName: OrganizationConfigRuleName + OrganizationRuleStatus: OrganizationRuleStatus + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + LastUpdateTime: Optional[Date] + + +OrganizationConfigRuleStatuses = List[OrganizationConfigRuleStatus] + + +class DescribeOrganizationConfigRuleStatusesResponse(TypedDict, total=False): + OrganizationConfigRuleStatuses: Optional[OrganizationConfigRuleStatuses] + NextToken: Optional[String] + + +class DescribeOrganizationConfigRulesRequest(ServiceRequest): + OrganizationConfigRuleNames: Optional[OrganizationConfigRuleNames] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +ResourceTypesScope = List[StringWithCharLimit256] +OrganizationConfigRuleTriggerTypeNoSNs = List[OrganizationConfigRuleTriggerTypeNoSN] + + +class OrganizationCustomPolicyRuleMetadataNoPolicy(TypedDict, total=False): + Description: Optional[StringWithCharLimit256Min0] + OrganizationConfigRuleTriggerTypes: Optional[OrganizationConfigRuleTriggerTypeNoSNs] + InputParameters: Optional[StringWithCharLimit2048] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + ResourceTypesScope: Optional[ResourceTypesScope] + ResourceIdScope: Optional[StringWithCharLimit768] + TagKeyScope: Optional[StringWithCharLimit128] + TagValueScope: Optional[StringWithCharLimit256] + PolicyRuntime: Optional[PolicyRuntime] + DebugLogDeliveryAccounts: Optional[DebugLogDeliveryAccounts] + + +ExcludedAccounts = List[AccountId] +OrganizationConfigRuleTriggerTypes = List[OrganizationConfigRuleTriggerType] + + +class OrganizationCustomRuleMetadata(TypedDict, total=False): + Description: Optional[StringWithCharLimit256Min0] + LambdaFunctionArn: StringWithCharLimit256 + OrganizationConfigRuleTriggerTypes: OrganizationConfigRuleTriggerTypes + InputParameters: Optional[StringWithCharLimit2048] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + ResourceTypesScope: Optional[ResourceTypesScope] + ResourceIdScope: Optional[StringWithCharLimit768] + TagKeyScope: Optional[StringWithCharLimit128] + TagValueScope: Optional[StringWithCharLimit256] + + +class OrganizationManagedRuleMetadata(TypedDict, total=False): + Description: Optional[StringWithCharLimit256Min0] + RuleIdentifier: StringWithCharLimit256 + InputParameters: Optional[StringWithCharLimit2048] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + ResourceTypesScope: Optional[ResourceTypesScope] + ResourceIdScope: Optional[StringWithCharLimit768] + TagKeyScope: Optional[StringWithCharLimit128] + TagValueScope: Optional[StringWithCharLimit256] + + +class OrganizationConfigRule(TypedDict, total=False): + OrganizationConfigRuleName: OrganizationConfigRuleName + OrganizationConfigRuleArn: StringWithCharLimit256 + OrganizationManagedRuleMetadata: Optional[OrganizationManagedRuleMetadata] + OrganizationCustomRuleMetadata: Optional[OrganizationCustomRuleMetadata] + ExcludedAccounts: Optional[ExcludedAccounts] + LastUpdateTime: Optional[Date] + OrganizationCustomPolicyRuleMetadata: Optional[OrganizationCustomPolicyRuleMetadataNoPolicy] + + +OrganizationConfigRules = List[OrganizationConfigRule] + + +class DescribeOrganizationConfigRulesResponse(TypedDict, total=False): + OrganizationConfigRules: Optional[OrganizationConfigRules] + NextToken: Optional[String] + + +OrganizationConformancePackNames = List[OrganizationConformancePackName] + + +class DescribeOrganizationConformancePackStatusesRequest(ServiceRequest): + OrganizationConformancePackNames: Optional[OrganizationConformancePackNames] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +class OrganizationConformancePackStatus(TypedDict, total=False): + OrganizationConformancePackName: OrganizationConformancePackName + Status: OrganizationResourceStatus + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + LastUpdateTime: Optional[Date] + + +OrganizationConformancePackStatuses = List[OrganizationConformancePackStatus] + + +class DescribeOrganizationConformancePackStatusesResponse(TypedDict, total=False): + OrganizationConformancePackStatuses: Optional[OrganizationConformancePackStatuses] + NextToken: Optional[String] + + +class DescribeOrganizationConformancePacksRequest(ServiceRequest): + OrganizationConformancePackNames: Optional[OrganizationConformancePackNames] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +class OrganizationConformancePack(TypedDict, total=False): + OrganizationConformancePackName: OrganizationConformancePackName + OrganizationConformancePackArn: StringWithCharLimit256 + DeliveryS3Bucket: Optional[DeliveryS3Bucket] + DeliveryS3KeyPrefix: Optional[DeliveryS3KeyPrefix] + ConformancePackInputParameters: Optional[ConformancePackInputParameters] + ExcludedAccounts: Optional[ExcludedAccounts] + LastUpdateTime: Date + + +OrganizationConformancePacks = List[OrganizationConformancePack] + + +class DescribeOrganizationConformancePacksResponse(TypedDict, total=False): + OrganizationConformancePacks: Optional[OrganizationConformancePacks] + NextToken: Optional[String] + + +class DescribePendingAggregationRequestsRequest(ServiceRequest): + Limit: Optional[DescribePendingAggregationRequestsLimit] + NextToken: Optional[String] + + +class PendingAggregationRequest(TypedDict, total=False): + RequesterAccountId: Optional[AccountId] + RequesterAwsRegion: Optional[AwsRegion] + + +PendingAggregationRequestList = List[PendingAggregationRequest] + + +class DescribePendingAggregationRequestsResponse(TypedDict, total=False): + PendingAggregationRequests: Optional[PendingAggregationRequestList] + NextToken: Optional[String] + + +class DescribeRemediationConfigurationsRequest(ServiceRequest): + ConfigRuleNames: ConfigRuleNames + + +class SsmControls(TypedDict, total=False): + ConcurrentExecutionRatePercentage: Optional[Percentage] + ErrorPercentage: Optional[Percentage] + + +class ExecutionControls(TypedDict, total=False): + SsmControls: Optional[SsmControls] + + +StaticParameterValues = List[StringWithCharLimit256] + + +class StaticValue(TypedDict, total=False): + Values: StaticParameterValues + + +class ResourceValue(TypedDict, total=False): + Value: ResourceValueType + + +class RemediationParameterValue(TypedDict, total=False): + ResourceValue: Optional[ResourceValue] + StaticValue: Optional[StaticValue] + + +RemediationParameters = Dict[StringWithCharLimit256, RemediationParameterValue] + + +class RemediationConfiguration(TypedDict, total=False): + ConfigRuleName: ConfigRuleName + TargetType: RemediationTargetType + TargetId: StringWithCharLimit256 + TargetVersion: Optional[String] + Parameters: Optional[RemediationParameters] + ResourceType: Optional[String] + Automatic: Optional[Boolean] + ExecutionControls: Optional[ExecutionControls] + MaximumAutomaticAttempts: Optional[AutoRemediationAttempts] + RetryAttemptSeconds: Optional[AutoRemediationAttemptSeconds] + Arn: Optional[StringWithCharLimit1024] + CreatedByService: Optional[StringWithCharLimit1024] + + +RemediationConfigurations = List[RemediationConfiguration] + + +class DescribeRemediationConfigurationsResponse(TypedDict, total=False): + RemediationConfigurations: Optional[RemediationConfigurations] + + +class DescribeRemediationExceptionsRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceKeys: Optional[RemediationExceptionResourceKeys] + Limit: Optional[Limit] + NextToken: Optional[String] + + +class RemediationException(TypedDict, total=False): + ConfigRuleName: ConfigRuleName + ResourceType: StringWithCharLimit256 + ResourceId: StringWithCharLimit1024 + Message: Optional[StringWithCharLimit1024] + ExpirationTime: Optional[Date] + + +RemediationExceptions = List[RemediationException] + + +class DescribeRemediationExceptionsResponse(TypedDict, total=False): + RemediationExceptions: Optional[RemediationExceptions] + NextToken: Optional[String] + + +class DescribeRemediationExecutionStatusRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceKeys: Optional[ResourceKeys] + Limit: Optional[Limit] + NextToken: Optional[String] + + +class RemediationExecutionStep(TypedDict, total=False): + Name: Optional[String] + State: Optional[RemediationExecutionStepState] + ErrorMessage: Optional[String] + StartTime: Optional[Date] + StopTime: Optional[Date] + + +RemediationExecutionSteps = List[RemediationExecutionStep] + + +class RemediationExecutionStatus(TypedDict, total=False): + ResourceKey: Optional[ResourceKey] + State: Optional[RemediationExecutionState] + StepDetails: Optional[RemediationExecutionSteps] + InvocationTime: Optional[Date] + LastUpdatedTime: Optional[Date] + + +RemediationExecutionStatuses = List[RemediationExecutionStatus] + + +class DescribeRemediationExecutionStatusResponse(TypedDict, total=False): + RemediationExecutionStatuses: Optional[RemediationExecutionStatuses] + NextToken: Optional[String] + + +RetentionConfigurationNameList = List[RetentionConfigurationName] + + +class DescribeRetentionConfigurationsRequest(ServiceRequest): + RetentionConfigurationNames: Optional[RetentionConfigurationNameList] + NextToken: Optional[NextToken] + + +class RetentionConfiguration(TypedDict, total=False): + Name: RetentionConfigurationName + RetentionPeriodInDays: RetentionPeriodInDays + + +RetentionConfigurationList = List[RetentionConfiguration] + + +class DescribeRetentionConfigurationsResponse(TypedDict, total=False): + RetentionConfigurations: Optional[RetentionConfigurationList] + NextToken: Optional[NextToken] + + +class DisassociateResourceTypesRequest(ServiceRequest): + ConfigurationRecorderArn: AmazonResourceName + ResourceTypes: ResourceTypeList + + +class DisassociateResourceTypesResponse(TypedDict, total=False): + ConfigurationRecorder: ConfigurationRecorder + + +DiscoveredResourceIdentifierList = List[AggregateResourceIdentifier] +EarlierTime = datetime +OrderingTimestamp = datetime + + +class Evaluation(TypedDict, total=False): + ComplianceResourceType: StringWithCharLimit256 + ComplianceResourceId: BaseResourceId + ComplianceType: ComplianceType + Annotation: Optional[StringWithCharLimit256] + OrderingTimestamp: OrderingTimestamp + + +class EvaluationContext(TypedDict, total=False): + EvaluationContextIdentifier: Optional[EvaluationContextIdentifier] + + +class EvaluationResult(TypedDict, total=False): + EvaluationResultIdentifier: Optional[EvaluationResultIdentifier] + ComplianceType: Optional[ComplianceType] + ResultRecordedTime: Optional[Date] + ConfigRuleInvokedTime: Optional[Date] + Annotation: Optional[StringWithCharLimit256] + ResultToken: Optional[String] + + +EvaluationResults = List[EvaluationResult] + + +class EvaluationStatus(TypedDict, total=False): + Status: ResourceEvaluationStatus + FailureReason: Optional[StringWithCharLimit1024] + + +Evaluations = List[Evaluation] + + +class ExternalEvaluation(TypedDict, total=False): + ComplianceResourceType: StringWithCharLimit256 + ComplianceResourceId: BaseResourceId + ComplianceType: ComplianceType + Annotation: Optional[StringWithCharLimit256] + OrderingTimestamp: OrderingTimestamp + + +class FailedRemediationBatch(TypedDict, total=False): + FailureMessage: Optional[String] + FailedItems: Optional[RemediationConfigurations] + + +FailedRemediationBatches = List[FailedRemediationBatch] + + +class FailedRemediationExceptionBatch(TypedDict, total=False): + FailureMessage: Optional[String] + FailedItems: Optional[RemediationExceptions] + + +FailedRemediationExceptionBatches = List[FailedRemediationExceptionBatch] + + +class FieldInfo(TypedDict, total=False): + Name: Optional[FieldName] + + +FieldInfoList = List[FieldInfo] + + +class GetAggregateComplianceDetailsByConfigRuleRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + ConfigRuleName: ConfigRuleName + AccountId: AccountId + AwsRegion: AwsRegion + ComplianceType: Optional[ComplianceType] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class GetAggregateComplianceDetailsByConfigRuleResponse(TypedDict, total=False): + AggregateEvaluationResults: Optional[AggregateEvaluationResultList] + NextToken: Optional[NextToken] + + +class GetAggregateConfigRuleComplianceSummaryRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + Filters: Optional[ConfigRuleComplianceSummaryFilters] + GroupByKey: Optional[ConfigRuleComplianceSummaryGroupKey] + Limit: Optional[GroupByAPILimit] + NextToken: Optional[NextToken] + + +class GetAggregateConfigRuleComplianceSummaryResponse(TypedDict, total=False): + GroupByKey: Optional[StringWithCharLimit256] + AggregateComplianceCounts: Optional[AggregateComplianceCountList] + NextToken: Optional[NextToken] + + +class GetAggregateConformancePackComplianceSummaryRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + Filters: Optional[AggregateConformancePackComplianceSummaryFilters] + GroupByKey: Optional[AggregateConformancePackComplianceSummaryGroupKey] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class GetAggregateConformancePackComplianceSummaryResponse(TypedDict, total=False): + AggregateConformancePackComplianceSummaries: Optional[ + AggregateConformancePackComplianceSummaryList + ] + GroupByKey: Optional[StringWithCharLimit256] + NextToken: Optional[NextToken] + + +class ResourceCountFilters(TypedDict, total=False): + ResourceType: Optional[ResourceType] + AccountId: Optional[AccountId] + Region: Optional[AwsRegion] + + +class GetAggregateDiscoveredResourceCountsRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + Filters: Optional[ResourceCountFilters] + GroupByKey: Optional[ResourceCountGroupKey] + Limit: Optional[GroupByAPILimit] + NextToken: Optional[NextToken] + + +Long = int + + +class GroupedResourceCount(TypedDict, total=False): + GroupName: StringWithCharLimit256 + ResourceCount: Long + + +GroupedResourceCountList = List[GroupedResourceCount] + + +class GetAggregateDiscoveredResourceCountsResponse(TypedDict, total=False): + TotalDiscoveredResources: Long + GroupByKey: Optional[StringWithCharLimit256] + GroupedResourceCounts: Optional[GroupedResourceCountList] + NextToken: Optional[NextToken] + + +class GetAggregateResourceConfigRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + ResourceIdentifier: AggregateResourceIdentifier + + +class GetAggregateResourceConfigResponse(TypedDict, total=False): + ConfigurationItem: Optional[ConfigurationItem] + + +class GetComplianceDetailsByConfigRuleRequest(ServiceRequest): + ConfigRuleName: StringWithCharLimit64 + ComplianceTypes: Optional[ComplianceTypes] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class GetComplianceDetailsByConfigRuleResponse(TypedDict, total=False): + EvaluationResults: Optional[EvaluationResults] + NextToken: Optional[NextToken] + + +class GetComplianceDetailsByResourceRequest(ServiceRequest): + ResourceType: Optional[StringWithCharLimit256] + ResourceId: Optional[BaseResourceId] + ComplianceTypes: Optional[ComplianceTypes] + NextToken: Optional[String] + ResourceEvaluationId: Optional[ResourceEvaluationId] + + +class GetComplianceDetailsByResourceResponse(TypedDict, total=False): + EvaluationResults: Optional[EvaluationResults] + NextToken: Optional[String] + + +class GetComplianceSummaryByConfigRuleResponse(TypedDict, total=False): + ComplianceSummary: Optional[ComplianceSummary] + + +ResourceTypes = List[StringWithCharLimit256] + + +class GetComplianceSummaryByResourceTypeRequest(ServiceRequest): + ResourceTypes: Optional[ResourceTypes] + + +class GetComplianceSummaryByResourceTypeResponse(TypedDict, total=False): + ComplianceSummariesByResourceType: Optional[ComplianceSummariesByResourceType] + + +class GetConformancePackComplianceDetailsRequest(ServiceRequest): + ConformancePackName: ConformancePackName + Filters: Optional[ConformancePackEvaluationFilters] + Limit: Optional[GetConformancePackComplianceDetailsLimit] + NextToken: Optional[NextToken] + + +class GetConformancePackComplianceDetailsResponse(TypedDict, total=False): + ConformancePackName: ConformancePackName + ConformancePackRuleEvaluationResults: Optional[ConformancePackRuleEvaluationResultsList] + NextToken: Optional[NextToken] + + +class GetConformancePackComplianceSummaryRequest(ServiceRequest): + ConformancePackNames: ConformancePackNamesToSummarizeList + Limit: Optional[PageSizeLimit] + NextToken: Optional[NextToken] + + +class GetConformancePackComplianceSummaryResponse(TypedDict, total=False): + ConformancePackComplianceSummaryList: Optional[ConformancePackComplianceSummaryList] + NextToken: Optional[NextToken] + + +class GetCustomRulePolicyRequest(ServiceRequest): + ConfigRuleName: Optional[ConfigRuleName] + + +class GetCustomRulePolicyResponse(TypedDict, total=False): + PolicyText: Optional[PolicyText] + + +class GetDiscoveredResourceCountsRequest(ServiceRequest): + resourceTypes: Optional[ResourceTypes] + limit: Optional[Limit] + nextToken: Optional[NextToken] + + +class ResourceCount(TypedDict, total=False): + resourceType: Optional[ResourceType] + count: Optional[Long] + + +ResourceCounts = List[ResourceCount] + + +class GetDiscoveredResourceCountsResponse(TypedDict, total=False): + totalDiscoveredResources: Optional[Long] + resourceCounts: Optional[ResourceCounts] + nextToken: Optional[NextToken] + + +class StatusDetailFilters(TypedDict, total=False): + AccountId: Optional[AccountId] + MemberAccountRuleStatus: Optional[MemberAccountRuleStatus] + + +class GetOrganizationConfigRuleDetailedStatusRequest(ServiceRequest): + OrganizationConfigRuleName: OrganizationConfigRuleName + Filters: Optional[StatusDetailFilters] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +class MemberAccountStatus(TypedDict, total=False): + AccountId: AccountId + ConfigRuleName: StringWithCharLimit64 + MemberAccountRuleStatus: MemberAccountRuleStatus + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + LastUpdateTime: Optional[Date] + + +OrganizationConfigRuleDetailedStatus = List[MemberAccountStatus] + + +class GetOrganizationConfigRuleDetailedStatusResponse(TypedDict, total=False): + OrganizationConfigRuleDetailedStatus: Optional[OrganizationConfigRuleDetailedStatus] + NextToken: Optional[String] + + +class OrganizationResourceDetailedStatusFilters(TypedDict, total=False): + AccountId: Optional[AccountId] + Status: Optional[OrganizationResourceDetailedStatus] + + +class GetOrganizationConformancePackDetailedStatusRequest(ServiceRequest): + OrganizationConformancePackName: OrganizationConformancePackName + Filters: Optional[OrganizationResourceDetailedStatusFilters] + Limit: Optional[CosmosPageLimit] + NextToken: Optional[String] + + +class OrganizationConformancePackDetailedStatus(TypedDict, total=False): + AccountId: AccountId + ConformancePackName: StringWithCharLimit256 + Status: OrganizationResourceDetailedStatus + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + LastUpdateTime: Optional[Date] + + +OrganizationConformancePackDetailedStatuses = List[OrganizationConformancePackDetailedStatus] + + +class GetOrganizationConformancePackDetailedStatusResponse(TypedDict, total=False): + OrganizationConformancePackDetailedStatuses: Optional[ + OrganizationConformancePackDetailedStatuses + ] + NextToken: Optional[String] + + +class GetOrganizationCustomRulePolicyRequest(ServiceRequest): + OrganizationConfigRuleName: OrganizationConfigRuleName + + +class GetOrganizationCustomRulePolicyResponse(TypedDict, total=False): + PolicyText: Optional[PolicyText] + + +LaterTime = datetime + + +class GetResourceConfigHistoryRequest(ServiceRequest): + resourceType: ResourceType + resourceId: ResourceId + laterTime: Optional[LaterTime] + earlierTime: Optional[EarlierTime] + chronologicalOrder: Optional[ChronologicalOrder] + limit: Optional[Limit] + nextToken: Optional[NextToken] + + +class GetResourceConfigHistoryResponse(TypedDict, total=False): + configurationItems: Optional[ConfigurationItemList] + nextToken: Optional[NextToken] + + +class GetResourceEvaluationSummaryRequest(ServiceRequest): + ResourceEvaluationId: ResourceEvaluationId + + +class ResourceDetails(TypedDict, total=False): + ResourceId: BaseResourceId + ResourceType: StringWithCharLimit256 + ResourceConfiguration: ResourceConfiguration + ResourceConfigurationSchemaType: Optional[ResourceConfigurationSchemaType] + + +class GetResourceEvaluationSummaryResponse(TypedDict, total=False): + ResourceEvaluationId: Optional[ResourceEvaluationId] + EvaluationMode: Optional[EvaluationMode] + EvaluationStatus: Optional[EvaluationStatus] + EvaluationStartTimestamp: Optional[Date] + Compliance: Optional[ComplianceType] + EvaluationContext: Optional[EvaluationContext] + ResourceDetails: Optional[ResourceDetails] + + +class GetStoredQueryRequest(ServiceRequest): + QueryName: QueryName + + +class StoredQuery(TypedDict, total=False): + QueryId: Optional[QueryId] + QueryArn: Optional[QueryArn] + QueryName: QueryName + Description: Optional[QueryDescription] + Expression: Optional[QueryExpression] + + +class GetStoredQueryResponse(TypedDict, total=False): + StoredQuery: Optional[StoredQuery] + + +class ResourceFilters(TypedDict, total=False): + AccountId: Optional[AccountId] + ResourceId: Optional[ResourceId] + ResourceName: Optional[ResourceName] + Region: Optional[AwsRegion] + + +class ListAggregateDiscoveredResourcesRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + ResourceType: ResourceType + Filters: Optional[ResourceFilters] + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class ListAggregateDiscoveredResourcesResponse(TypedDict, total=False): + ResourceIdentifiers: Optional[DiscoveredResourceIdentifierList] + NextToken: Optional[NextToken] + + +class ListConfigurationRecordersRequest(ServiceRequest): + Filters: Optional[ConfigurationRecorderFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListConfigurationRecordersResponse(TypedDict, total=False): + ConfigurationRecorderSummaries: ConfigurationRecorderSummaries + NextToken: Optional[NextToken] + + +class ListConformancePackComplianceScoresRequest(ServiceRequest): + Filters: Optional[ConformancePackComplianceScoresFilters] + SortOrder: Optional[SortOrder] + SortBy: Optional[SortBy] + Limit: Optional[PageSizeLimit] + NextToken: Optional[NextToken] + + +class ListConformancePackComplianceScoresResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + ConformancePackComplianceScores: ConformancePackComplianceScores + + +ResourceIdList = List[ResourceId] + + +class ListDiscoveredResourcesRequest(ServiceRequest): + resourceType: ResourceType + resourceIds: Optional[ResourceIdList] + resourceName: Optional[ResourceName] + limit: Optional[Limit] + includeDeletedResources: Optional[Boolean] + nextToken: Optional[NextToken] + + +ResourceDeletionTime = datetime + + +class ResourceIdentifier(TypedDict, total=False): + resourceType: Optional[ResourceType] + resourceId: Optional[ResourceId] + resourceName: Optional[ResourceName] + resourceDeletionTime: Optional[ResourceDeletionTime] + + +ResourceIdentifierList = List[ResourceIdentifier] + + +class ListDiscoveredResourcesResponse(TypedDict, total=False): + resourceIdentifiers: Optional[ResourceIdentifierList] + nextToken: Optional[NextToken] + + +class TimeWindow(TypedDict, total=False): + StartTime: Optional[Date] + EndTime: Optional[Date] + + +class ResourceEvaluationFilters(TypedDict, total=False): + EvaluationMode: Optional[EvaluationMode] + TimeWindow: Optional[TimeWindow] + EvaluationContextIdentifier: Optional[EvaluationContextIdentifier] + + +class ListResourceEvaluationsRequest(ServiceRequest): + Filters: Optional[ResourceEvaluationFilters] + Limit: Optional[ListResourceEvaluationsPageItemLimit] + NextToken: Optional[String] + + +class ResourceEvaluation(TypedDict, total=False): + ResourceEvaluationId: Optional[ResourceEvaluationId] + EvaluationMode: Optional[EvaluationMode] + EvaluationStartTimestamp: Optional[Date] + + +ResourceEvaluations = List[ResourceEvaluation] + + +class ListResourceEvaluationsResponse(TypedDict, total=False): + ResourceEvaluations: Optional[ResourceEvaluations] + NextToken: Optional[String] + + +class ListStoredQueriesRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[Limit] + + +class StoredQueryMetadata(TypedDict, total=False): + QueryId: QueryId + QueryArn: QueryArn + QueryName: QueryName + Description: Optional[QueryDescription] + + +StoredQueryMetadataList = List[StoredQueryMetadata] + + +class ListStoredQueriesResponse(TypedDict, total=False): + StoredQueryMetadata: Optional[StoredQueryMetadataList] + NextToken: Optional[String] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class Tag(TypedDict, total=False): + Key: Optional[TagKey] + Value: Optional[TagValue] + + +TagList = List[Tag] + + +class ListTagsForResourceResponse(TypedDict, total=False): + Tags: Optional[TagList] + NextToken: Optional[NextToken] + + +class OrganizationCustomPolicyRuleMetadata(TypedDict, total=False): + Description: Optional[StringWithCharLimit256Min0] + OrganizationConfigRuleTriggerTypes: Optional[OrganizationConfigRuleTriggerTypeNoSNs] + InputParameters: Optional[StringWithCharLimit2048] + MaximumExecutionFrequency: Optional[MaximumExecutionFrequency] + ResourceTypesScope: Optional[ResourceTypesScope] + ResourceIdScope: Optional[StringWithCharLimit768] + TagKeyScope: Optional[StringWithCharLimit128] + TagValueScope: Optional[StringWithCharLimit256] + PolicyRuntime: PolicyRuntime + PolicyText: PolicyText + DebugLogDeliveryAccounts: Optional[DebugLogDeliveryAccounts] + + +TagsList = List[Tag] + + +class PutAggregationAuthorizationRequest(ServiceRequest): + AuthorizedAccountId: AccountId + AuthorizedAwsRegion: AwsRegion + Tags: Optional[TagsList] + + +class PutAggregationAuthorizationResponse(TypedDict, total=False): + AggregationAuthorization: Optional[AggregationAuthorization] + + +class PutConfigRuleRequest(ServiceRequest): + ConfigRule: ConfigRule + Tags: Optional[TagsList] + + +class PutConfigurationAggregatorRequest(ServiceRequest): + ConfigurationAggregatorName: ConfigurationAggregatorName + AccountAggregationSources: Optional[AccountAggregationSourceList] + OrganizationAggregationSource: Optional[OrganizationAggregationSource] + Tags: Optional[TagsList] + AggregatorFilters: Optional[AggregatorFilters] + + +class PutConfigurationAggregatorResponse(TypedDict, total=False): + ConfigurationAggregator: Optional[ConfigurationAggregator] + + +class PutConfigurationRecorderRequest(ServiceRequest): + ConfigurationRecorder: ConfigurationRecorder + Tags: Optional[TagsList] + + +class PutConformancePackRequest(ServiceRequest): + ConformancePackName: ConformancePackName + TemplateS3Uri: Optional[TemplateS3Uri] + TemplateBody: Optional[TemplateBody] + DeliveryS3Bucket: Optional[DeliveryS3Bucket] + DeliveryS3KeyPrefix: Optional[DeliveryS3KeyPrefix] + ConformancePackInputParameters: Optional[ConformancePackInputParameters] + TemplateSSMDocumentDetails: Optional[TemplateSSMDocumentDetails] + + +class PutConformancePackResponse(TypedDict, total=False): + ConformancePackArn: Optional[ConformancePackArn] + + +class PutDeliveryChannelRequest(ServiceRequest): + DeliveryChannel: DeliveryChannel + + +class PutEvaluationsRequest(ServiceRequest): + Evaluations: Optional[Evaluations] + ResultToken: String + TestMode: Optional[Boolean] + + +class PutEvaluationsResponse(TypedDict, total=False): + FailedEvaluations: Optional[Evaluations] + + +class PutExternalEvaluationRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ExternalEvaluation: ExternalEvaluation + + +class PutExternalEvaluationResponse(TypedDict, total=False): + pass + + +class PutOrganizationConfigRuleRequest(ServiceRequest): + OrganizationConfigRuleName: OrganizationConfigRuleName + OrganizationManagedRuleMetadata: Optional[OrganizationManagedRuleMetadata] + OrganizationCustomRuleMetadata: Optional[OrganizationCustomRuleMetadata] + ExcludedAccounts: Optional[ExcludedAccounts] + OrganizationCustomPolicyRuleMetadata: Optional[OrganizationCustomPolicyRuleMetadata] + + +class PutOrganizationConfigRuleResponse(TypedDict, total=False): + OrganizationConfigRuleArn: Optional[StringWithCharLimit256] + + +class PutOrganizationConformancePackRequest(ServiceRequest): + OrganizationConformancePackName: OrganizationConformancePackName + TemplateS3Uri: Optional[TemplateS3Uri] + TemplateBody: Optional[TemplateBody] + DeliveryS3Bucket: Optional[DeliveryS3Bucket] + DeliveryS3KeyPrefix: Optional[DeliveryS3KeyPrefix] + ConformancePackInputParameters: Optional[ConformancePackInputParameters] + ExcludedAccounts: Optional[ExcludedAccounts] + + +class PutOrganizationConformancePackResponse(TypedDict, total=False): + OrganizationConformancePackArn: Optional[StringWithCharLimit256] + + +class PutRemediationConfigurationsRequest(ServiceRequest): + RemediationConfigurations: RemediationConfigurations + + +class PutRemediationConfigurationsResponse(TypedDict, total=False): + FailedBatches: Optional[FailedRemediationBatches] + + +class PutRemediationExceptionsRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceKeys: RemediationExceptionResourceKeys + Message: Optional[StringWithCharLimit1024] + ExpirationTime: Optional[Date] + + +class PutRemediationExceptionsResponse(TypedDict, total=False): + FailedBatches: Optional[FailedRemediationExceptionBatches] + + +class PutResourceConfigRequest(ServiceRequest): + ResourceType: ResourceTypeString + SchemaVersionId: SchemaVersionId + ResourceId: ResourceId + ResourceName: Optional[ResourceName] + Configuration: Configuration + Tags: Optional[Tags] + + +class PutRetentionConfigurationRequest(ServiceRequest): + RetentionPeriodInDays: RetentionPeriodInDays + + +class PutRetentionConfigurationResponse(TypedDict, total=False): + RetentionConfiguration: Optional[RetentionConfiguration] + + +class PutServiceLinkedConfigurationRecorderRequest(ServiceRequest): + ServicePrincipal: ServicePrincipal + Tags: Optional[TagsList] + + +class PutServiceLinkedConfigurationRecorderResponse(TypedDict, total=False): + Arn: Optional[AmazonResourceName] + Name: Optional[RecorderName] + + +class PutStoredQueryRequest(ServiceRequest): + StoredQuery: StoredQuery + Tags: Optional[TagsList] + + +class PutStoredQueryResponse(TypedDict, total=False): + QueryArn: Optional[QueryArn] + + +class QueryInfo(TypedDict, total=False): + SelectFields: Optional[FieldInfoList] + + +ReevaluateConfigRuleNames = List[ConfigRuleName] +Results = List[String] + + +class SelectAggregateResourceConfigRequest(ServiceRequest): + Expression: Expression + ConfigurationAggregatorName: ConfigurationAggregatorName + Limit: Optional[Limit] + MaxResults: Optional[Limit] + NextToken: Optional[NextToken] + + +class SelectAggregateResourceConfigResponse(TypedDict, total=False): + Results: Optional[Results] + QueryInfo: Optional[QueryInfo] + NextToken: Optional[NextToken] + + +class SelectResourceConfigRequest(ServiceRequest): + Expression: Expression + Limit: Optional[Limit] + NextToken: Optional[NextToken] + + +class SelectResourceConfigResponse(TypedDict, total=False): + Results: Optional[Results] + QueryInfo: Optional[QueryInfo] + NextToken: Optional[NextToken] + + +class StartConfigRulesEvaluationRequest(ServiceRequest): + ConfigRuleNames: Optional[ReevaluateConfigRuleNames] + + +class StartConfigRulesEvaluationResponse(TypedDict, total=False): + pass + + +class StartConfigurationRecorderRequest(ServiceRequest): + ConfigurationRecorderName: RecorderName + + +class StartRemediationExecutionRequest(ServiceRequest): + ConfigRuleName: ConfigRuleName + ResourceKeys: ResourceKeys + + +class StartRemediationExecutionResponse(TypedDict, total=False): + FailureMessage: Optional[String] + FailedItems: Optional[ResourceKeys] + + +class StartResourceEvaluationRequest(ServiceRequest): + ResourceDetails: ResourceDetails + EvaluationContext: Optional[EvaluationContext] + EvaluationMode: EvaluationMode + EvaluationTimeout: Optional[EvaluationTimeout] + ClientToken: Optional[ClientToken] + + +class StartResourceEvaluationResponse(TypedDict, total=False): + ResourceEvaluationId: Optional[ResourceEvaluationId] + + +class StopConfigurationRecorderRequest(ServiceRequest): + ConfigurationRecorderName: RecorderName + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + Tags: TagList + + +class UntagResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + TagKeys: TagKeyList + + +class ConfigApi: + service = "config" + version = "2014-11-12" + + @handler("AssociateResourceTypes") + def associate_resource_types( + self, + context: RequestContext, + configuration_recorder_arn: AmazonResourceName, + resource_types: ResourceTypeList, + **kwargs, + ) -> AssociateResourceTypesResponse: + raise NotImplementedError + + @handler("BatchGetAggregateResourceConfig") + def batch_get_aggregate_resource_config( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + resource_identifiers: ResourceIdentifiersList, + **kwargs, + ) -> BatchGetAggregateResourceConfigResponse: + raise NotImplementedError + + @handler("BatchGetResourceConfig") + def batch_get_resource_config( + self, context: RequestContext, resource_keys: ResourceKeys, **kwargs + ) -> BatchGetResourceConfigResponse: + raise NotImplementedError + + @handler("DeleteAggregationAuthorization") + def delete_aggregation_authorization( + self, + context: RequestContext, + authorized_account_id: AccountId, + authorized_aws_region: AwsRegion, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteConfigRule") + def delete_config_rule( + self, context: RequestContext, config_rule_name: ConfigRuleName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteConfigurationAggregator") + def delete_configuration_aggregator( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteConfigurationRecorder") + def delete_configuration_recorder( + self, context: RequestContext, configuration_recorder_name: RecorderName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteConformancePack") + def delete_conformance_pack( + self, context: RequestContext, conformance_pack_name: ConformancePackName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDeliveryChannel") + def delete_delivery_channel( + self, context: RequestContext, delivery_channel_name: ChannelName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteEvaluationResults") + def delete_evaluation_results( + self, context: RequestContext, config_rule_name: StringWithCharLimit64, **kwargs + ) -> DeleteEvaluationResultsResponse: + raise NotImplementedError + + @handler("DeleteOrganizationConfigRule") + def delete_organization_config_rule( + self, + context: RequestContext, + organization_config_rule_name: OrganizationConfigRuleName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteOrganizationConformancePack") + def delete_organization_conformance_pack( + self, + context: RequestContext, + organization_conformance_pack_name: OrganizationConformancePackName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeletePendingAggregationRequest") + def delete_pending_aggregation_request( + self, + context: RequestContext, + requester_account_id: AccountId, + requester_aws_region: AwsRegion, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteRemediationConfiguration") + def delete_remediation_configuration( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_type: String | None = None, + **kwargs, + ) -> DeleteRemediationConfigurationResponse: + raise NotImplementedError + + @handler("DeleteRemediationExceptions") + def delete_remediation_exceptions( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_keys: RemediationExceptionResourceKeys, + **kwargs, + ) -> DeleteRemediationExceptionsResponse: + raise NotImplementedError + + @handler("DeleteResourceConfig") + def delete_resource_config( + self, + context: RequestContext, + resource_type: ResourceTypeString, + resource_id: ResourceId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteRetentionConfiguration") + def delete_retention_configuration( + self, + context: RequestContext, + retention_configuration_name: RetentionConfigurationName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteServiceLinkedConfigurationRecorder") + def delete_service_linked_configuration_recorder( + self, context: RequestContext, service_principal: ServicePrincipal, **kwargs + ) -> DeleteServiceLinkedConfigurationRecorderResponse: + raise NotImplementedError + + @handler("DeleteStoredQuery") + def delete_stored_query( + self, context: RequestContext, query_name: QueryName, **kwargs + ) -> DeleteStoredQueryResponse: + raise NotImplementedError + + @handler("DeliverConfigSnapshot") + def deliver_config_snapshot( + self, context: RequestContext, delivery_channel_name: ChannelName, **kwargs + ) -> DeliverConfigSnapshotResponse: + raise NotImplementedError + + @handler("DescribeAggregateComplianceByConfigRules") + def describe_aggregate_compliance_by_config_rules( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + filters: ConfigRuleComplianceFilters | None = None, + limit: GroupByAPILimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAggregateComplianceByConfigRulesResponse: + raise NotImplementedError + + @handler("DescribeAggregateComplianceByConformancePacks") + def describe_aggregate_compliance_by_conformance_packs( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + filters: AggregateConformancePackComplianceFilters | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAggregateComplianceByConformancePacksResponse: + raise NotImplementedError + + @handler("DescribeAggregationAuthorizations") + def describe_aggregation_authorizations( + self, + context: RequestContext, + limit: Limit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeAggregationAuthorizationsResponse: + raise NotImplementedError + + @handler("DescribeComplianceByConfigRule") + def describe_compliance_by_config_rule( + self, + context: RequestContext, + config_rule_names: ConfigRuleNames | None = None, + compliance_types: ComplianceTypes | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeComplianceByConfigRuleResponse: + raise NotImplementedError + + @handler("DescribeComplianceByResource") + def describe_compliance_by_resource( + self, + context: RequestContext, + resource_type: StringWithCharLimit256 | None = None, + resource_id: BaseResourceId | None = None, + compliance_types: ComplianceTypes | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeComplianceByResourceResponse: + raise NotImplementedError + + @handler("DescribeConfigRuleEvaluationStatus") + def describe_config_rule_evaluation_status( + self, + context: RequestContext, + config_rule_names: ConfigRuleNames | None = None, + next_token: String | None = None, + limit: RuleLimit | None = None, + **kwargs, + ) -> DescribeConfigRuleEvaluationStatusResponse: + raise NotImplementedError + + @handler("DescribeConfigRules") + def describe_config_rules( + self, + context: RequestContext, + config_rule_names: ConfigRuleNames | None = None, + next_token: String | None = None, + filters: DescribeConfigRulesFilters | None = None, + **kwargs, + ) -> DescribeConfigRulesResponse: + raise NotImplementedError + + @handler("DescribeConfigurationAggregatorSourcesStatus") + def describe_configuration_aggregator_sources_status( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + update_status: AggregatedSourceStatusTypeList | None = None, + next_token: String | None = None, + limit: Limit | None = None, + **kwargs, + ) -> DescribeConfigurationAggregatorSourcesStatusResponse: + raise NotImplementedError + + @handler("DescribeConfigurationAggregators") + def describe_configuration_aggregators( + self, + context: RequestContext, + configuration_aggregator_names: ConfigurationAggregatorNameList | None = None, + next_token: String | None = None, + limit: Limit | None = None, + **kwargs, + ) -> DescribeConfigurationAggregatorsResponse: + raise NotImplementedError + + @handler("DescribeConfigurationRecorderStatus") + def describe_configuration_recorder_status( + self, + context: RequestContext, + configuration_recorder_names: ConfigurationRecorderNameList | None = None, + service_principal: ServicePrincipal | None = None, + arn: AmazonResourceName | None = None, + **kwargs, + ) -> DescribeConfigurationRecorderStatusResponse: + raise NotImplementedError + + @handler("DescribeConfigurationRecorders") + def describe_configuration_recorders( + self, + context: RequestContext, + configuration_recorder_names: ConfigurationRecorderNameList | None = None, + service_principal: ServicePrincipal | None = None, + arn: AmazonResourceName | None = None, + **kwargs, + ) -> DescribeConfigurationRecordersResponse: + raise NotImplementedError + + @handler("DescribeConformancePackCompliance") + def describe_conformance_pack_compliance( + self, + context: RequestContext, + conformance_pack_name: ConformancePackName, + filters: ConformancePackComplianceFilters | None = None, + limit: DescribeConformancePackComplianceLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeConformancePackComplianceResponse: + raise NotImplementedError + + @handler("DescribeConformancePackStatus") + def describe_conformance_pack_status( + self, + context: RequestContext, + conformance_pack_names: ConformancePackNamesList | None = None, + limit: PageSizeLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeConformancePackStatusResponse: + raise NotImplementedError + + @handler("DescribeConformancePacks") + def describe_conformance_packs( + self, + context: RequestContext, + conformance_pack_names: ConformancePackNamesList | None = None, + limit: PageSizeLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeConformancePacksResponse: + raise NotImplementedError + + @handler("DescribeDeliveryChannelStatus") + def describe_delivery_channel_status( + self, + context: RequestContext, + delivery_channel_names: DeliveryChannelNameList | None = None, + **kwargs, + ) -> DescribeDeliveryChannelStatusResponse: + raise NotImplementedError + + @handler("DescribeDeliveryChannels") + def describe_delivery_channels( + self, + context: RequestContext, + delivery_channel_names: DeliveryChannelNameList | None = None, + **kwargs, + ) -> DescribeDeliveryChannelsResponse: + raise NotImplementedError + + @handler("DescribeOrganizationConfigRuleStatuses") + def describe_organization_config_rule_statuses( + self, + context: RequestContext, + organization_config_rule_names: OrganizationConfigRuleNames | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeOrganizationConfigRuleStatusesResponse: + raise NotImplementedError + + @handler("DescribeOrganizationConfigRules") + def describe_organization_config_rules( + self, + context: RequestContext, + organization_config_rule_names: OrganizationConfigRuleNames | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeOrganizationConfigRulesResponse: + raise NotImplementedError + + @handler("DescribeOrganizationConformancePackStatuses") + def describe_organization_conformance_pack_statuses( + self, + context: RequestContext, + organization_conformance_pack_names: OrganizationConformancePackNames | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeOrganizationConformancePackStatusesResponse: + raise NotImplementedError + + @handler("DescribeOrganizationConformancePacks") + def describe_organization_conformance_packs( + self, + context: RequestContext, + organization_conformance_pack_names: OrganizationConformancePackNames | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeOrganizationConformancePacksResponse: + raise NotImplementedError + + @handler("DescribePendingAggregationRequests") + def describe_pending_aggregation_requests( + self, + context: RequestContext, + limit: DescribePendingAggregationRequestsLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribePendingAggregationRequestsResponse: + raise NotImplementedError + + @handler("DescribeRemediationConfigurations") + def describe_remediation_configurations( + self, context: RequestContext, config_rule_names: ConfigRuleNames, **kwargs + ) -> DescribeRemediationConfigurationsResponse: + raise NotImplementedError + + @handler("DescribeRemediationExceptions") + def describe_remediation_exceptions( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_keys: RemediationExceptionResourceKeys | None = None, + limit: Limit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeRemediationExceptionsResponse: + raise NotImplementedError + + @handler("DescribeRemediationExecutionStatus") + def describe_remediation_execution_status( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_keys: ResourceKeys | None = None, + limit: Limit | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeRemediationExecutionStatusResponse: + raise NotImplementedError + + @handler("DescribeRetentionConfigurations") + def describe_retention_configurations( + self, + context: RequestContext, + retention_configuration_names: RetentionConfigurationNameList | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeRetentionConfigurationsResponse: + raise NotImplementedError + + @handler("DisassociateResourceTypes") + def disassociate_resource_types( + self, + context: RequestContext, + configuration_recorder_arn: AmazonResourceName, + resource_types: ResourceTypeList, + **kwargs, + ) -> DisassociateResourceTypesResponse: + raise NotImplementedError + + @handler("GetAggregateComplianceDetailsByConfigRule") + def get_aggregate_compliance_details_by_config_rule( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + config_rule_name: ConfigRuleName, + account_id: AccountId, + aws_region: AwsRegion, + compliance_type: ComplianceType | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetAggregateComplianceDetailsByConfigRuleResponse: + raise NotImplementedError + + @handler("GetAggregateConfigRuleComplianceSummary") + def get_aggregate_config_rule_compliance_summary( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + filters: ConfigRuleComplianceSummaryFilters | None = None, + group_by_key: ConfigRuleComplianceSummaryGroupKey | None = None, + limit: GroupByAPILimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetAggregateConfigRuleComplianceSummaryResponse: + raise NotImplementedError + + @handler("GetAggregateConformancePackComplianceSummary") + def get_aggregate_conformance_pack_compliance_summary( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + filters: AggregateConformancePackComplianceSummaryFilters | None = None, + group_by_key: AggregateConformancePackComplianceSummaryGroupKey | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetAggregateConformancePackComplianceSummaryResponse: + raise NotImplementedError + + @handler("GetAggregateDiscoveredResourceCounts") + def get_aggregate_discovered_resource_counts( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + filters: ResourceCountFilters | None = None, + group_by_key: ResourceCountGroupKey | None = None, + limit: GroupByAPILimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetAggregateDiscoveredResourceCountsResponse: + raise NotImplementedError + + @handler("GetAggregateResourceConfig") + def get_aggregate_resource_config( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + resource_identifier: AggregateResourceIdentifier, + **kwargs, + ) -> GetAggregateResourceConfigResponse: + raise NotImplementedError + + @handler("GetComplianceDetailsByConfigRule") + def get_compliance_details_by_config_rule( + self, + context: RequestContext, + config_rule_name: StringWithCharLimit64, + compliance_types: ComplianceTypes | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetComplianceDetailsByConfigRuleResponse: + raise NotImplementedError + + @handler("GetComplianceDetailsByResource") + def get_compliance_details_by_resource( + self, + context: RequestContext, + resource_type: StringWithCharLimit256 | None = None, + resource_id: BaseResourceId | None = None, + compliance_types: ComplianceTypes | None = None, + next_token: String | None = None, + resource_evaluation_id: ResourceEvaluationId | None = None, + **kwargs, + ) -> GetComplianceDetailsByResourceResponse: + raise NotImplementedError + + @handler("GetComplianceSummaryByConfigRule") + def get_compliance_summary_by_config_rule( + self, context: RequestContext, **kwargs + ) -> GetComplianceSummaryByConfigRuleResponse: + raise NotImplementedError + + @handler("GetComplianceSummaryByResourceType") + def get_compliance_summary_by_resource_type( + self, context: RequestContext, resource_types: ResourceTypes | None = None, **kwargs + ) -> GetComplianceSummaryByResourceTypeResponse: + raise NotImplementedError + + @handler("GetConformancePackComplianceDetails") + def get_conformance_pack_compliance_details( + self, + context: RequestContext, + conformance_pack_name: ConformancePackName, + filters: ConformancePackEvaluationFilters | None = None, + limit: GetConformancePackComplianceDetailsLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetConformancePackComplianceDetailsResponse: + raise NotImplementedError + + @handler("GetConformancePackComplianceSummary") + def get_conformance_pack_compliance_summary( + self, + context: RequestContext, + conformance_pack_names: ConformancePackNamesToSummarizeList, + limit: PageSizeLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetConformancePackComplianceSummaryResponse: + raise NotImplementedError + + @handler("GetCustomRulePolicy") + def get_custom_rule_policy( + self, context: RequestContext, config_rule_name: ConfigRuleName | None = None, **kwargs + ) -> GetCustomRulePolicyResponse: + raise NotImplementedError + + @handler("GetDiscoveredResourceCounts") + def get_discovered_resource_counts( + self, + context: RequestContext, + resource_types: ResourceTypes | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetDiscoveredResourceCountsResponse: + raise NotImplementedError + + @handler("GetOrganizationConfigRuleDetailedStatus") + def get_organization_config_rule_detailed_status( + self, + context: RequestContext, + organization_config_rule_name: OrganizationConfigRuleName, + filters: StatusDetailFilters | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> GetOrganizationConfigRuleDetailedStatusResponse: + raise NotImplementedError + + @handler("GetOrganizationConformancePackDetailedStatus") + def get_organization_conformance_pack_detailed_status( + self, + context: RequestContext, + organization_conformance_pack_name: OrganizationConformancePackName, + filters: OrganizationResourceDetailedStatusFilters | None = None, + limit: CosmosPageLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> GetOrganizationConformancePackDetailedStatusResponse: + raise NotImplementedError + + @handler("GetOrganizationCustomRulePolicy") + def get_organization_custom_rule_policy( + self, + context: RequestContext, + organization_config_rule_name: OrganizationConfigRuleName, + **kwargs, + ) -> GetOrganizationCustomRulePolicyResponse: + raise NotImplementedError + + @handler("GetResourceConfigHistory") + def get_resource_config_history( + self, + context: RequestContext, + resource_type: ResourceType, + resource_id: ResourceId, + later_time: LaterTime | None = None, + earlier_time: EarlierTime | None = None, + chronological_order: ChronologicalOrder | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetResourceConfigHistoryResponse: + raise NotImplementedError + + @handler("GetResourceEvaluationSummary") + def get_resource_evaluation_summary( + self, context: RequestContext, resource_evaluation_id: ResourceEvaluationId, **kwargs + ) -> GetResourceEvaluationSummaryResponse: + raise NotImplementedError + + @handler("GetStoredQuery") + def get_stored_query( + self, context: RequestContext, query_name: QueryName, **kwargs + ) -> GetStoredQueryResponse: + raise NotImplementedError + + @handler("ListAggregateDiscoveredResources") + def list_aggregate_discovered_resources( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + resource_type: ResourceType, + filters: ResourceFilters | None = None, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListAggregateDiscoveredResourcesResponse: + raise NotImplementedError + + @handler("ListConfigurationRecorders") + def list_configuration_recorders( + self, + context: RequestContext, + filters: ConfigurationRecorderFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListConfigurationRecordersResponse: + raise NotImplementedError + + @handler("ListConformancePackComplianceScores") + def list_conformance_pack_compliance_scores( + self, + context: RequestContext, + filters: ConformancePackComplianceScoresFilters | None = None, + sort_order: SortOrder | None = None, + sort_by: SortBy | None = None, + limit: PageSizeLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListConformancePackComplianceScoresResponse: + raise NotImplementedError + + @handler("ListDiscoveredResources") + def list_discovered_resources( + self, + context: RequestContext, + resource_type: ResourceType, + resource_ids: ResourceIdList | None = None, + resource_name: ResourceName | None = None, + limit: Limit | None = None, + include_deleted_resources: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDiscoveredResourcesResponse: + raise NotImplementedError + + @handler("ListResourceEvaluations") + def list_resource_evaluations( + self, + context: RequestContext, + filters: ResourceEvaluationFilters | None = None, + limit: ListResourceEvaluationsPageItemLimit | None = None, + next_token: String | None = None, + **kwargs, + ) -> ListResourceEvaluationsResponse: + raise NotImplementedError + + @handler("ListStoredQueries") + def list_stored_queries( + self, + context: RequestContext, + next_token: String | None = None, + max_results: Limit | None = None, + **kwargs, + ) -> ListStoredQueriesResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("PutAggregationAuthorization") + def put_aggregation_authorization( + self, + context: RequestContext, + authorized_account_id: AccountId, + authorized_aws_region: AwsRegion, + tags: TagsList | None = None, + **kwargs, + ) -> PutAggregationAuthorizationResponse: + raise NotImplementedError + + @handler("PutConfigRule") + def put_config_rule( + self, + context: RequestContext, + config_rule: ConfigRule, + tags: TagsList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutConfigurationAggregator") + def put_configuration_aggregator( + self, + context: RequestContext, + configuration_aggregator_name: ConfigurationAggregatorName, + account_aggregation_sources: AccountAggregationSourceList | None = None, + organization_aggregation_source: OrganizationAggregationSource | None = None, + tags: TagsList | None = None, + aggregator_filters: AggregatorFilters | None = None, + **kwargs, + ) -> PutConfigurationAggregatorResponse: + raise NotImplementedError + + @handler("PutConfigurationRecorder") + def put_configuration_recorder( + self, + context: RequestContext, + configuration_recorder: ConfigurationRecorder, + tags: TagsList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutConformancePack") + def put_conformance_pack( + self, + context: RequestContext, + conformance_pack_name: ConformancePackName, + template_s3_uri: TemplateS3Uri | None = None, + template_body: TemplateBody | None = None, + delivery_s3_bucket: DeliveryS3Bucket | None = None, + delivery_s3_key_prefix: DeliveryS3KeyPrefix | None = None, + conformance_pack_input_parameters: ConformancePackInputParameters | None = None, + template_ssm_document_details: TemplateSSMDocumentDetails | None = None, + **kwargs, + ) -> PutConformancePackResponse: + raise NotImplementedError + + @handler("PutDeliveryChannel") + def put_delivery_channel( + self, context: RequestContext, delivery_channel: DeliveryChannel, **kwargs + ) -> None: + raise NotImplementedError + + @handler("PutEvaluations") + def put_evaluations( + self, + context: RequestContext, + result_token: String, + evaluations: Evaluations | None = None, + test_mode: Boolean | None = None, + **kwargs, + ) -> PutEvaluationsResponse: + raise NotImplementedError + + @handler("PutExternalEvaluation") + def put_external_evaluation( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + external_evaluation: ExternalEvaluation, + **kwargs, + ) -> PutExternalEvaluationResponse: + raise NotImplementedError + + @handler("PutOrganizationConfigRule") + def put_organization_config_rule( + self, + context: RequestContext, + organization_config_rule_name: OrganizationConfigRuleName, + organization_managed_rule_metadata: OrganizationManagedRuleMetadata | None = None, + organization_custom_rule_metadata: OrganizationCustomRuleMetadata | None = None, + excluded_accounts: ExcludedAccounts | None = None, + organization_custom_policy_rule_metadata: OrganizationCustomPolicyRuleMetadata + | None = None, + **kwargs, + ) -> PutOrganizationConfigRuleResponse: + raise NotImplementedError + + @handler("PutOrganizationConformancePack") + def put_organization_conformance_pack( + self, + context: RequestContext, + organization_conformance_pack_name: OrganizationConformancePackName, + template_s3_uri: TemplateS3Uri | None = None, + template_body: TemplateBody | None = None, + delivery_s3_bucket: DeliveryS3Bucket | None = None, + delivery_s3_key_prefix: DeliveryS3KeyPrefix | None = None, + conformance_pack_input_parameters: ConformancePackInputParameters | None = None, + excluded_accounts: ExcludedAccounts | None = None, + **kwargs, + ) -> PutOrganizationConformancePackResponse: + raise NotImplementedError + + @handler("PutRemediationConfigurations") + def put_remediation_configurations( + self, + context: RequestContext, + remediation_configurations: RemediationConfigurations, + **kwargs, + ) -> PutRemediationConfigurationsResponse: + raise NotImplementedError + + @handler("PutRemediationExceptions") + def put_remediation_exceptions( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_keys: RemediationExceptionResourceKeys, + message: StringWithCharLimit1024 | None = None, + expiration_time: Date | None = None, + **kwargs, + ) -> PutRemediationExceptionsResponse: + raise NotImplementedError + + @handler("PutResourceConfig") + def put_resource_config( + self, + context: RequestContext, + resource_type: ResourceTypeString, + schema_version_id: SchemaVersionId, + resource_id: ResourceId, + configuration: Configuration, + resource_name: ResourceName | None = None, + tags: Tags | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutRetentionConfiguration") + def put_retention_configuration( + self, context: RequestContext, retention_period_in_days: RetentionPeriodInDays, **kwargs + ) -> PutRetentionConfigurationResponse: + raise NotImplementedError + + @handler("PutServiceLinkedConfigurationRecorder") + def put_service_linked_configuration_recorder( + self, + context: RequestContext, + service_principal: ServicePrincipal, + tags: TagsList | None = None, + **kwargs, + ) -> PutServiceLinkedConfigurationRecorderResponse: + raise NotImplementedError + + @handler("PutStoredQuery") + def put_stored_query( + self, + context: RequestContext, + stored_query: StoredQuery, + tags: TagsList | None = None, + **kwargs, + ) -> PutStoredQueryResponse: + raise NotImplementedError + + @handler("SelectAggregateResourceConfig") + def select_aggregate_resource_config( + self, + context: RequestContext, + expression: Expression, + configuration_aggregator_name: ConfigurationAggregatorName, + limit: Limit | None = None, + max_results: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> SelectAggregateResourceConfigResponse: + raise NotImplementedError + + @handler("SelectResourceConfig") + def select_resource_config( + self, + context: RequestContext, + expression: Expression, + limit: Limit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> SelectResourceConfigResponse: + raise NotImplementedError + + @handler("StartConfigRulesEvaluation") + def start_config_rules_evaluation( + self, + context: RequestContext, + config_rule_names: ReevaluateConfigRuleNames | None = None, + **kwargs, + ) -> StartConfigRulesEvaluationResponse: + raise NotImplementedError + + @handler("StartConfigurationRecorder") + def start_configuration_recorder( + self, context: RequestContext, configuration_recorder_name: RecorderName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("StartRemediationExecution") + def start_remediation_execution( + self, + context: RequestContext, + config_rule_name: ConfigRuleName, + resource_keys: ResourceKeys, + **kwargs, + ) -> StartRemediationExecutionResponse: + raise NotImplementedError + + @handler("StartResourceEvaluation") + def start_resource_evaluation( + self, + context: RequestContext, + resource_details: ResourceDetails, + evaluation_mode: EvaluationMode, + evaluation_context: EvaluationContext | None = None, + evaluation_timeout: EvaluationTimeout | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> StartResourceEvaluationResponse: + raise NotImplementedError + + @handler("StopConfigurationRecorder") + def stop_configuration_recorder( + self, context: RequestContext, configuration_recorder_name: RecorderName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/core.py b/localstack-core/localstack/aws/api/core.py new file mode 100644 index 0000000000000..dbe32d7973284 --- /dev/null +++ b/localstack-core/localstack/aws/api/core.py @@ -0,0 +1,186 @@ +import functools +from typing import ( + Any, + Callable, + NamedTuple, + ParamSpec, + Protocol, + Type, + TypedDict, + TypeVar, +) + +from botocore.model import OperationModel, ServiceModel +from rolo.gateway import RequestContext as RoloRequestContext + +from localstack.aws.connect import InternalRequestParameters +from localstack.http import Request, Response +from localstack.utils.strings import long_uid + + +class ServiceRequest(TypedDict): + pass + + +P = ParamSpec("P") +T = TypeVar("T") + + +ServiceResponse = Any + + +class ServiceException(Exception): + """ + An exception that indicates that a service error occurred. + These exceptions, when raised during the execution of a service function, will be serialized and sent to the client. + Do not use this exception directly (use the generated subclasses or CommonsServiceException instead). + """ + + code: str + status_code: int + sender_fault: bool + message: str + + def __init__(self, *args: Any, **kwargs: Any): + super(ServiceException, self).__init__(*args) + + if len(args) >= 1: + self.message = args[0] + else: + self.message = "" + for key, value in kwargs.items(): + setattr(self, key, value) + + +class CommonServiceException(ServiceException): + """ + An exception which can be raised within a service during its execution, even if it is not specified (i.e. it's not + generated based on the service specification). + In the AWS API references, this kind of errors are usually referred to as "Common Errors", f.e.: + https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/CommonErrors.html + """ + + def __init__(self, code: str, message: str, status_code: int = 400, sender_fault: bool = False): + super(CommonServiceException, self).__init__(message) + self.code = code + self.status_code = status_code + self.sender_fault = sender_fault + + +Operation = Type[ServiceRequest] + + +class ServiceOperation(NamedTuple): + service: str + operation: str + + +class RequestContext(RoloRequestContext): + """ + A RequestContext object holds the information of an HTTP request that is processed by the LocalStack Gateway. The + context holds information about the request, such as which AWS service the request is made to, which operation is + being invoked, and other metadata such as the account or the region. The context is continuously populated as the + request moves through the handler chain. Once the HTTP request has been parsed, the context also holds the parsed + request parameters of the AWS API call. The handler chain may also add the AWS response from the backend to the + context, so it can be used for logging or modification before going to the serializer. + """ + + request: Request + """The underlying incoming HTTP request.""" + service: ServiceModel | None + """The botocore ServiceModel of the service the request is made to.""" + operation: OperationModel | None + """The botocore OperationModel of the AWS operation being invoked.""" + region: str + """The region the request is made to.""" + partition: str + """The partition the request is made to.""" + account_id: str + """The account the request is made from.""" + request_id: str | None + """The autogenerated AWS request ID identifying the original request""" + service_request: ServiceRequest | None + """The AWS operation parameters.""" + service_response: ServiceResponse | None + """The response from the AWS emulator backend.""" + service_exception: ServiceException | None + """The exception the AWS emulator backend may have raised.""" + internal_request_params: InternalRequestParameters | None + """Data sent by client-side LocalStack during internal calls.""" + trace_context: dict[str, Any] + """Tracing metadata such as X-Ray trace headers""" + + def __init__(self, request: Request): + super().__init__(request) + self.service = None + self.operation = None + self.region = None # type: ignore[assignment] # type=str, because we know it will always be set downstream + self.partition = "aws" # Sensible default - will be overwritten by region-handler + self.account_id = None # type: ignore[assignment] # type=str, because we know it will always be set downstream + self.request_id = long_uid() + self.service_request = None + self.service_response = None + self.service_exception = None + self.trace_context = {} + self.internal_request_params = None + + @property + def is_internal_call(self) -> bool: + """ + Whether this request is an internal cross-service call. + """ + return self.internal_request_params is not None + + @property + def service_operation(self) -> ServiceOperation | None: + """ + If both the service model and the operation model are set, this returns a tuple of the service name and + operation name. + + :return: a tuple like ("s3", "PutObject") or ("lambda", "CreateFunction") + """ + if not self.service or not self.operation: + return None + return ServiceOperation(self.service.service_name, self.operation.name) + + def __repr__(self) -> str: + return f"<RequestContext {self.service=}, {self.operation=}, {self.region=}, {self.account_id=}, {self.request=}>" + + +class ServiceRequestHandler(Protocol): + """ + A protocol to describe a Request--Response handler that processes an AWS API call with the already parsed request. + """ + + def __call__( + self, context: RequestContext, request: ServiceRequest + ) -> ServiceResponse | Response | None: + """ + Handle the given request. + + :param context: the request context + :param request: the request parameters, e.g., ``{"Bucket": "my-bucket-name"}`` for an s3 create bucket operation + :return: either an already serialized HTTP Response object, or a service response dictionary. + """ + raise NotImplementedError + + +def handler( + operation: str | None = None, context: bool = True, expand: bool = True +) -> Callable[[Callable[P, T]], Callable[P, T]]: + """ + Decorator that indicates that the given function is a handler + """ + + def wrapper(fn: Callable[P, T]) -> Callable[P, T]: + @functools.wraps(fn) + def operation_marker(*args: P.args, **kwargs: P.kwargs) -> T: + return fn(*args, **kwargs) + + operation_marker.operation = operation # type: ignore[attr-defined] + operation_marker.expand_parameters = expand # type: ignore[attr-defined] + operation_marker.pass_context = context # type: ignore[attr-defined] + + return operation_marker + + return wrapper diff --git a/localstack-core/localstack/aws/api/dynamodb/__init__.py b/localstack-core/localstack/aws/api/dynamodb/__init__.py new file mode 100644 index 0000000000000..5f43f351e8ba4 --- /dev/null +++ b/localstack-core/localstack/aws/api/dynamodb/__init__.py @@ -0,0 +1,2930 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ArchivalReason = str +AttributeName = str +AutoScalingPolicyName = str +AutoScalingRoleArn = str +Backfilling = bool +BackupArn = str +BackupName = str +BackupsInputLimit = int +BooleanAttributeValue = bool +BooleanObject = bool +ClientRequestToken = str +ClientToken = str +CloudWatchLogGroupArn = str +Code = str +ConditionExpression = str +ConfirmRemoveSelfResourceAccess = bool +ConsistentRead = bool +ConsumedCapacityUnits = float +ContributorInsightsRule = str +CsvDelimiter = str +CsvHeader = str +DeletionProtectionEnabled = bool +DoubleObject = float +ErrorMessage = str +ExceptionDescription = str +ExceptionName = str +ExportArn = str +ExportManifest = str +ExportNextToken = str +ExpressionAttributeNameVariable = str +ExpressionAttributeValueVariable = str +FailureCode = str +FailureMessage = str +GlobalTableArnString = str +ImportArn = str +ImportNextToken = str +IndexName = str +Integer = int +IntegerObject = int +ItemCollectionSizeEstimateBound = float +KMSMasterKeyArn = str +KMSMasterKeyId = str +KeyExpression = str +KeySchemaAttributeName = str +ListContributorInsightsLimit = int +ListExportsMaxLimit = int +ListImportsMaxLimit = int +ListTablesInputLimit = int +NextTokenString = str +NonKeyAttributeName = str +NullAttributeValue = bool +NumberAttributeValue = str +PartiQLNextToken = str +PartiQLStatement = str +PolicyRevisionId = str +PositiveIntegerObject = int +ProjectionExpression = str +RecoveryPeriodInDays = int +RegionName = str +ReplicaStatusDescription = str +ReplicaStatusPercentProgress = str +ResourceArnString = str +ResourcePolicy = str +RestoreInProgress = bool +S3Bucket = str +S3BucketOwner = str +S3Prefix = str +S3SseKmsKeyId = str +SSEEnabled = bool +ScanSegment = int +ScanTotalSegments = int +StreamArn = str +StreamEnabled = bool +String = str +StringAttributeValue = str +TableArn = str +TableId = str +TableName = str +TagKeyString = str +TagValueString = str +TimeToLiveAttributeName = str +TimeToLiveEnabled = bool +UpdateExpression = str + + +class ApproximateCreationDateTimePrecision(StrEnum): + MILLISECOND = "MILLISECOND" + MICROSECOND = "MICROSECOND" + + +class AttributeAction(StrEnum): + ADD = "ADD" + PUT = "PUT" + DELETE = "DELETE" + + +class BackupStatus(StrEnum): + CREATING = "CREATING" + DELETED = "DELETED" + AVAILABLE = "AVAILABLE" + + +class BackupType(StrEnum): + USER = "USER" + SYSTEM = "SYSTEM" + AWS_BACKUP = "AWS_BACKUP" + + +class BackupTypeFilter(StrEnum): + USER = "USER" + SYSTEM = "SYSTEM" + AWS_BACKUP = "AWS_BACKUP" + ALL = "ALL" + + +class BatchStatementErrorCodeEnum(StrEnum): + ConditionalCheckFailed = "ConditionalCheckFailed" + ItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded" + RequestLimitExceeded = "RequestLimitExceeded" + ValidationError = "ValidationError" + ProvisionedThroughputExceeded = "ProvisionedThroughputExceeded" + TransactionConflict = "TransactionConflict" + ThrottlingError = "ThrottlingError" + InternalServerError = "InternalServerError" + ResourceNotFound = "ResourceNotFound" + AccessDenied = "AccessDenied" + DuplicateItem = "DuplicateItem" + + +class BillingMode(StrEnum): + PROVISIONED = "PROVISIONED" + PAY_PER_REQUEST = "PAY_PER_REQUEST" + + +class ComparisonOperator(StrEnum): + EQ = "EQ" + NE = "NE" + IN = "IN" + LE = "LE" + LT = "LT" + GE = "GE" + GT = "GT" + BETWEEN = "BETWEEN" + NOT_NULL = "NOT_NULL" + NULL = "NULL" + CONTAINS = "CONTAINS" + NOT_CONTAINS = "NOT_CONTAINS" + BEGINS_WITH = "BEGINS_WITH" + + +class ConditionalOperator(StrEnum): + AND = "AND" + OR = "OR" + + +class ContinuousBackupsStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class ContributorInsightsAction(StrEnum): + ENABLE = "ENABLE" + DISABLE = "DISABLE" + + +class ContributorInsightsStatus(StrEnum): + ENABLING = "ENABLING" + ENABLED = "ENABLED" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + FAILED = "FAILED" + + +class DestinationStatus(StrEnum): + ENABLING = "ENABLING" + ACTIVE = "ACTIVE" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + ENABLE_FAILED = "ENABLE_FAILED" + UPDATING = "UPDATING" + + +class ExportFormat(StrEnum): + DYNAMODB_JSON = "DYNAMODB_JSON" + ION = "ION" + + +class ExportStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + +class ExportType(StrEnum): + FULL_EXPORT = "FULL_EXPORT" + INCREMENTAL_EXPORT = "INCREMENTAL_EXPORT" + + +class ExportViewType(StrEnum): + NEW_IMAGE = "NEW_IMAGE" + NEW_AND_OLD_IMAGES = "NEW_AND_OLD_IMAGES" + + +class GlobalTableStatus(StrEnum): + CREATING = "CREATING" + ACTIVE = "ACTIVE" + DELETING = "DELETING" + UPDATING = "UPDATING" + + +class ImportStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + CANCELLING = "CANCELLING" + CANCELLED = "CANCELLED" + FAILED = "FAILED" + + +class IndexStatus(StrEnum): + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + + +class InputCompressionType(StrEnum): + GZIP = "GZIP" + ZSTD = "ZSTD" + NONE = "NONE" + + +class InputFormat(StrEnum): + DYNAMODB_JSON = "DYNAMODB_JSON" + ION = "ION" + CSV = "CSV" + + +class KeyType(StrEnum): + HASH = "HASH" + RANGE = "RANGE" + + +class MultiRegionConsistency(StrEnum): + EVENTUAL = "EVENTUAL" + STRONG = "STRONG" + + +class PointInTimeRecoveryStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class ProjectionType(StrEnum): + ALL = "ALL" + KEYS_ONLY = "KEYS_ONLY" + INCLUDE = "INCLUDE" + + +class ReplicaStatus(StrEnum): + CREATING = "CREATING" + CREATION_FAILED = "CREATION_FAILED" + UPDATING = "UPDATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + REGION_DISABLED = "REGION_DISABLED" + INACCESSIBLE_ENCRYPTION_CREDENTIALS = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + + +class ReturnConsumedCapacity(StrEnum): + INDEXES = "INDEXES" + TOTAL = "TOTAL" + NONE = "NONE" + + +class ReturnItemCollectionMetrics(StrEnum): + SIZE = "SIZE" + NONE = "NONE" + + +class ReturnValue(StrEnum): + NONE = "NONE" + ALL_OLD = "ALL_OLD" + UPDATED_OLD = "UPDATED_OLD" + ALL_NEW = "ALL_NEW" + UPDATED_NEW = "UPDATED_NEW" + + +class ReturnValuesOnConditionCheckFailure(StrEnum): + ALL_OLD = "ALL_OLD" + NONE = "NONE" + + +class S3SseAlgorithm(StrEnum): + AES256 = "AES256" + KMS = "KMS" + + +class SSEStatus(StrEnum): + ENABLING = "ENABLING" + ENABLED = "ENABLED" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + UPDATING = "UPDATING" + + +class SSEType(StrEnum): + AES256 = "AES256" + KMS = "KMS" + + +class ScalarAttributeType(StrEnum): + S = "S" + N = "N" + B = "B" + + +class Select(StrEnum): + ALL_ATTRIBUTES = "ALL_ATTRIBUTES" + ALL_PROJECTED_ATTRIBUTES = "ALL_PROJECTED_ATTRIBUTES" + SPECIFIC_ATTRIBUTES = "SPECIFIC_ATTRIBUTES" + COUNT = "COUNT" + + +class StreamViewType(StrEnum): + NEW_IMAGE = "NEW_IMAGE" + OLD_IMAGE = "OLD_IMAGE" + NEW_AND_OLD_IMAGES = "NEW_AND_OLD_IMAGES" + KEYS_ONLY = "KEYS_ONLY" + + +class TableClass(StrEnum): + STANDARD = "STANDARD" + STANDARD_INFREQUENT_ACCESS = "STANDARD_INFREQUENT_ACCESS" + + +class TableStatus(StrEnum): + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + INACCESSIBLE_ENCRYPTION_CREDENTIALS = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + ARCHIVING = "ARCHIVING" + ARCHIVED = "ARCHIVED" + + +class TimeToLiveStatus(StrEnum): + ENABLING = "ENABLING" + DISABLING = "DISABLING" + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class BackupInUseException(ServiceException): + code: str = "BackupInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class BackupNotFoundException(ServiceException): + code: str = "BackupNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class AttributeValue(TypedDict, total=False): + S: Optional["StringAttributeValue"] + N: Optional["NumberAttributeValue"] + B: Optional["BinaryAttributeValue"] + SS: Optional["StringSetAttributeValue"] + NS: Optional["NumberSetAttributeValue"] + BS: Optional["BinarySetAttributeValue"] + M: Optional["MapAttributeValue"] + L: Optional["ListAttributeValue"] + NULL: Optional["NullAttributeValue"] + BOOL: Optional["BooleanAttributeValue"] + + +ListAttributeValue = List[AttributeValue] +MapAttributeValue = Dict[AttributeName, AttributeValue] +BinaryAttributeValue = bytes +BinarySetAttributeValue = List[BinaryAttributeValue] +NumberSetAttributeValue = List[NumberAttributeValue] +StringSetAttributeValue = List[StringAttributeValue] +AttributeMap = Dict[AttributeName, AttributeValue] + + +class ConditionalCheckFailedException(ServiceException): + code: str = "ConditionalCheckFailedException" + sender_fault: bool = False + status_code: int = 400 + Item: Optional[AttributeMap] + + +class ContinuousBackupsUnavailableException(ServiceException): + code: str = "ContinuousBackupsUnavailableException" + sender_fault: bool = False + status_code: int = 400 + + +class DuplicateItemException(ServiceException): + code: str = "DuplicateItemException" + sender_fault: bool = False + status_code: int = 400 + + +class ExportConflictException(ServiceException): + code: str = "ExportConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ExportNotFoundException(ServiceException): + code: str = "ExportNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class GlobalTableAlreadyExistsException(ServiceException): + code: str = "GlobalTableAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class GlobalTableNotFoundException(ServiceException): + code: str = "GlobalTableNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class IdempotentParameterMismatchException(ServiceException): + code: str = "IdempotentParameterMismatchException" + sender_fault: bool = False + status_code: int = 400 + + +class ImportConflictException(ServiceException): + code: str = "ImportConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ImportNotFoundException(ServiceException): + code: str = "ImportNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class IndexNotFoundException(ServiceException): + code: str = "IndexNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServerError(ServiceException): + code: str = "InternalServerError" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidExportTimeException(ServiceException): + code: str = "InvalidExportTimeException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRestoreTimeException(ServiceException): + code: str = "InvalidRestoreTimeException" + sender_fault: bool = False + status_code: int = 400 + + +class ItemCollectionSizeLimitExceededException(ServiceException): + code: str = "ItemCollectionSizeLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class PointInTimeRecoveryUnavailableException(ServiceException): + code: str = "PointInTimeRecoveryUnavailableException" + sender_fault: bool = False + status_code: int = 400 + + +class PolicyNotFoundException(ServiceException): + code: str = "PolicyNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ProvisionedThroughputExceededException(ServiceException): + code: str = "ProvisionedThroughputExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ReplicaAlreadyExistsException(ServiceException): + code: str = "ReplicaAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class ReplicaNotFoundException(ServiceException): + code: str = "ReplicaNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ReplicatedWriteConflictException(ServiceException): + code: str = "ReplicatedWriteConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class RequestLimitExceeded(ServiceException): + code: str = "RequestLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TableAlreadyExistsException(ServiceException): + code: str = "TableAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class TableInUseException(ServiceException): + code: str = "TableInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class TableNotFoundException(ServiceException): + code: str = "TableNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class CancellationReason(TypedDict, total=False): + Item: Optional[AttributeMap] + Code: Optional[Code] + Message: Optional[ErrorMessage] + + +CancellationReasonList = List[CancellationReason] + + +class TransactionCanceledException(ServiceException): + code: str = "TransactionCanceledException" + sender_fault: bool = False + status_code: int = 400 + CancellationReasons: Optional[CancellationReasonList] + + +class TransactionConflictException(ServiceException): + code: str = "TransactionConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class TransactionInProgressException(ServiceException): + code: str = "TransactionInProgressException" + sender_fault: bool = False + status_code: int = 400 + + +Date = datetime + + +class ArchivalSummary(TypedDict, total=False): + ArchivalDateTime: Optional[Date] + ArchivalReason: Optional[ArchivalReason] + ArchivalBackupArn: Optional[BackupArn] + + +class AttributeDefinition(TypedDict, total=False): + AttributeName: KeySchemaAttributeName + AttributeType: ScalarAttributeType + + +AttributeDefinitions = List[AttributeDefinition] +AttributeNameList = List[AttributeName] + + +class AttributeValueUpdate(TypedDict, total=False): + Value: Optional[AttributeValue] + Action: Optional[AttributeAction] + + +AttributeUpdates = Dict[AttributeName, AttributeValueUpdate] +AttributeValueList = List[AttributeValue] + + +class AutoScalingTargetTrackingScalingPolicyConfigurationDescription(TypedDict, total=False): + DisableScaleIn: Optional[BooleanObject] + ScaleInCooldown: Optional[IntegerObject] + ScaleOutCooldown: Optional[IntegerObject] + TargetValue: DoubleObject + + +class AutoScalingPolicyDescription(TypedDict, total=False): + PolicyName: Optional[AutoScalingPolicyName] + TargetTrackingScalingPolicyConfiguration: Optional[ + AutoScalingTargetTrackingScalingPolicyConfigurationDescription + ] + + +AutoScalingPolicyDescriptionList = List[AutoScalingPolicyDescription] + + +class AutoScalingTargetTrackingScalingPolicyConfigurationUpdate(TypedDict, total=False): + DisableScaleIn: Optional[BooleanObject] + ScaleInCooldown: Optional[IntegerObject] + ScaleOutCooldown: Optional[IntegerObject] + TargetValue: DoubleObject + + +class AutoScalingPolicyUpdate(TypedDict, total=False): + PolicyName: Optional[AutoScalingPolicyName] + TargetTrackingScalingPolicyConfiguration: ( + AutoScalingTargetTrackingScalingPolicyConfigurationUpdate + ) + + +PositiveLongObject = int + + +class AutoScalingSettingsDescription(TypedDict, total=False): + MinimumUnits: Optional[PositiveLongObject] + MaximumUnits: Optional[PositiveLongObject] + AutoScalingDisabled: Optional[BooleanObject] + AutoScalingRoleArn: Optional[String] + ScalingPolicies: Optional[AutoScalingPolicyDescriptionList] + + +class AutoScalingSettingsUpdate(TypedDict, total=False): + MinimumUnits: Optional[PositiveLongObject] + MaximumUnits: Optional[PositiveLongObject] + AutoScalingDisabled: Optional[BooleanObject] + AutoScalingRoleArn: Optional[AutoScalingRoleArn] + ScalingPolicyUpdate: Optional[AutoScalingPolicyUpdate] + + +BackupCreationDateTime = datetime + + +class SSEDescription(TypedDict, total=False): + Status: Optional[SSEStatus] + SSEType: Optional[SSEType] + KMSMasterKeyArn: Optional[KMSMasterKeyArn] + InaccessibleEncryptionDateTime: Optional[Date] + + +class TimeToLiveDescription(TypedDict, total=False): + TimeToLiveStatus: Optional[TimeToLiveStatus] + AttributeName: Optional[TimeToLiveAttributeName] + + +class StreamSpecification(TypedDict, total=False): + StreamEnabled: StreamEnabled + StreamViewType: Optional[StreamViewType] + + +LongObject = int + + +class OnDemandThroughput(TypedDict, total=False): + MaxReadRequestUnits: Optional[LongObject] + MaxWriteRequestUnits: Optional[LongObject] + + +class ProvisionedThroughput(TypedDict, total=False): + ReadCapacityUnits: PositiveLongObject + WriteCapacityUnits: PositiveLongObject + + +NonKeyAttributeNameList = List[NonKeyAttributeName] + + +class Projection(TypedDict, total=False): + ProjectionType: Optional[ProjectionType] + NonKeyAttributes: Optional[NonKeyAttributeNameList] + + +class KeySchemaElement(TypedDict, total=False): + AttributeName: KeySchemaAttributeName + KeyType: KeyType + + +KeySchema = List[KeySchemaElement] + + +class GlobalSecondaryIndexInfo(TypedDict, total=False): + IndexName: Optional[IndexName] + KeySchema: Optional[KeySchema] + Projection: Optional[Projection] + ProvisionedThroughput: Optional[ProvisionedThroughput] + OnDemandThroughput: Optional[OnDemandThroughput] + + +GlobalSecondaryIndexes = List[GlobalSecondaryIndexInfo] + + +class LocalSecondaryIndexInfo(TypedDict, total=False): + IndexName: Optional[IndexName] + KeySchema: Optional[KeySchema] + Projection: Optional[Projection] + + +LocalSecondaryIndexes = List[LocalSecondaryIndexInfo] + + +class SourceTableFeatureDetails(TypedDict, total=False): + LocalSecondaryIndexes: Optional[LocalSecondaryIndexes] + GlobalSecondaryIndexes: Optional[GlobalSecondaryIndexes] + StreamDescription: Optional[StreamSpecification] + TimeToLiveDescription: Optional[TimeToLiveDescription] + SSEDescription: Optional[SSEDescription] + + +ItemCount = int +TableCreationDateTime = datetime + + +class SourceTableDetails(TypedDict, total=False): + TableName: TableName + TableId: TableId + TableArn: Optional[TableArn] + TableSizeBytes: Optional[LongObject] + KeySchema: KeySchema + TableCreationDateTime: TableCreationDateTime + ProvisionedThroughput: ProvisionedThroughput + OnDemandThroughput: Optional[OnDemandThroughput] + ItemCount: Optional[ItemCount] + BillingMode: Optional[BillingMode] + + +BackupSizeBytes = int + + +class BackupDetails(TypedDict, total=False): + BackupArn: BackupArn + BackupName: BackupName + BackupSizeBytes: Optional[BackupSizeBytes] + BackupStatus: BackupStatus + BackupType: BackupType + BackupCreationDateTime: BackupCreationDateTime + BackupExpiryDateTime: Optional[Date] + + +class BackupDescription(TypedDict, total=False): + BackupDetails: Optional[BackupDetails] + SourceTableDetails: Optional[SourceTableDetails] + SourceTableFeatureDetails: Optional[SourceTableFeatureDetails] + + +class BackupSummary(TypedDict, total=False): + TableName: Optional[TableName] + TableId: Optional[TableId] + TableArn: Optional[TableArn] + BackupArn: Optional[BackupArn] + BackupName: Optional[BackupName] + BackupCreationDateTime: Optional[BackupCreationDateTime] + BackupExpiryDateTime: Optional[Date] + BackupStatus: Optional[BackupStatus] + BackupType: Optional[BackupType] + BackupSizeBytes: Optional[BackupSizeBytes] + + +BackupSummaries = List[BackupSummary] +PreparedStatementParameters = List[AttributeValue] + + +class BatchStatementRequest(TypedDict, total=False): + Statement: PartiQLStatement + Parameters: Optional[PreparedStatementParameters] + ConsistentRead: Optional[ConsistentRead] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +PartiQLBatchRequest = List[BatchStatementRequest] + + +class BatchExecuteStatementInput(ServiceRequest): + Statements: PartiQLBatchRequest + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + + +class Capacity(TypedDict, total=False): + ReadCapacityUnits: Optional[ConsumedCapacityUnits] + WriteCapacityUnits: Optional[ConsumedCapacityUnits] + CapacityUnits: Optional[ConsumedCapacityUnits] + + +SecondaryIndexesCapacityMap = Dict[IndexName, Capacity] + + +class ConsumedCapacity(TypedDict, total=False): + TableName: Optional[TableArn] + CapacityUnits: Optional[ConsumedCapacityUnits] + ReadCapacityUnits: Optional[ConsumedCapacityUnits] + WriteCapacityUnits: Optional[ConsumedCapacityUnits] + Table: Optional[Capacity] + LocalSecondaryIndexes: Optional[SecondaryIndexesCapacityMap] + GlobalSecondaryIndexes: Optional[SecondaryIndexesCapacityMap] + + +ConsumedCapacityMultiple = List[ConsumedCapacity] + + +class BatchStatementError(TypedDict, total=False): + Code: Optional[BatchStatementErrorCodeEnum] + Message: Optional[String] + Item: Optional[AttributeMap] + + +class BatchStatementResponse(TypedDict, total=False): + Error: Optional[BatchStatementError] + TableName: Optional[TableName] + Item: Optional[AttributeMap] + + +PartiQLBatchResponse = List[BatchStatementResponse] + + +class BatchExecuteStatementOutput(TypedDict, total=False): + Responses: Optional[PartiQLBatchResponse] + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + + +ExpressionAttributeNameMap = Dict[ExpressionAttributeNameVariable, AttributeName] +Key = Dict[AttributeName, AttributeValue] +KeyList = List[Key] + + +class KeysAndAttributes(TypedDict, total=False): + Keys: KeyList + AttributesToGet: Optional[AttributeNameList] + ConsistentRead: Optional[ConsistentRead] + ProjectionExpression: Optional[ProjectionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + + +BatchGetRequestMap = Dict[TableArn, KeysAndAttributes] + + +class BatchGetItemInput(ServiceRequest): + RequestItems: BatchGetRequestMap + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + + +ItemList = List[AttributeMap] +BatchGetResponseMap = Dict[TableArn, ItemList] + + +class BatchGetItemOutput(TypedDict, total=False): + Responses: Optional[BatchGetResponseMap] + UnprocessedKeys: Optional[BatchGetRequestMap] + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + + +class DeleteRequest(TypedDict, total=False): + Key: Key + + +PutItemInputAttributeMap = Dict[AttributeName, AttributeValue] + + +class PutRequest(TypedDict, total=False): + Item: PutItemInputAttributeMap + + +class WriteRequest(TypedDict, total=False): + PutRequest: Optional[PutRequest] + DeleteRequest: Optional[DeleteRequest] + + +WriteRequests = List[WriteRequest] +BatchWriteItemRequestMap = Dict[TableArn, WriteRequests] + + +class BatchWriteItemInput(ServiceRequest): + RequestItems: BatchWriteItemRequestMap + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ReturnItemCollectionMetrics: Optional[ReturnItemCollectionMetrics] + + +ItemCollectionSizeEstimateRange = List[ItemCollectionSizeEstimateBound] +ItemCollectionKeyAttributeMap = Dict[AttributeName, AttributeValue] + + +class ItemCollectionMetrics(TypedDict, total=False): + ItemCollectionKey: Optional[ItemCollectionKeyAttributeMap] + SizeEstimateRangeGB: Optional[ItemCollectionSizeEstimateRange] + + +ItemCollectionMetricsMultiple = List[ItemCollectionMetrics] +ItemCollectionMetricsPerTable = Dict[TableArn, ItemCollectionMetricsMultiple] + + +class BatchWriteItemOutput(TypedDict, total=False): + UnprocessedItems: Optional[BatchWriteItemRequestMap] + ItemCollectionMetrics: Optional[ItemCollectionMetricsPerTable] + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + + +BilledSizeBytes = int + + +class BillingModeSummary(TypedDict, total=False): + BillingMode: Optional[BillingMode] + LastUpdateToPayPerRequestDateTime: Optional[Date] + + +class Condition(TypedDict, total=False): + AttributeValueList: Optional[AttributeValueList] + ComparisonOperator: ComparisonOperator + + +ExpressionAttributeValueMap = Dict[ExpressionAttributeValueVariable, AttributeValue] + + +class ConditionCheck(TypedDict, total=False): + Key: Key + TableName: TableArn + ConditionExpression: ConditionExpression + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class PointInTimeRecoveryDescription(TypedDict, total=False): + PointInTimeRecoveryStatus: Optional[PointInTimeRecoveryStatus] + RecoveryPeriodInDays: Optional[RecoveryPeriodInDays] + EarliestRestorableDateTime: Optional[Date] + LatestRestorableDateTime: Optional[Date] + + +class ContinuousBackupsDescription(TypedDict, total=False): + ContinuousBackupsStatus: ContinuousBackupsStatus + PointInTimeRecoveryDescription: Optional[PointInTimeRecoveryDescription] + + +ContributorInsightsRuleList = List[ContributorInsightsRule] + + +class ContributorInsightsSummary(TypedDict, total=False): + TableName: Optional[TableName] + IndexName: Optional[IndexName] + ContributorInsightsStatus: Optional[ContributorInsightsStatus] + + +ContributorInsightsSummaries = List[ContributorInsightsSummary] + + +class CreateBackupInput(ServiceRequest): + TableName: TableArn + BackupName: BackupName + + +class CreateBackupOutput(TypedDict, total=False): + BackupDetails: Optional[BackupDetails] + + +class WarmThroughput(TypedDict, total=False): + ReadUnitsPerSecond: Optional[LongObject] + WriteUnitsPerSecond: Optional[LongObject] + + +class CreateGlobalSecondaryIndexAction(TypedDict, total=False): + IndexName: IndexName + KeySchema: KeySchema + Projection: Projection + ProvisionedThroughput: Optional[ProvisionedThroughput] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[WarmThroughput] + + +class Replica(TypedDict, total=False): + RegionName: Optional[RegionName] + + +ReplicaList = List[Replica] + + +class CreateGlobalTableInput(ServiceRequest): + GlobalTableName: TableName + ReplicationGroup: ReplicaList + + +class TableClassSummary(TypedDict, total=False): + TableClass: Optional[TableClass] + LastUpdateDateTime: Optional[Date] + + +class GlobalSecondaryIndexWarmThroughputDescription(TypedDict, total=False): + ReadUnitsPerSecond: Optional[PositiveLongObject] + WriteUnitsPerSecond: Optional[PositiveLongObject] + Status: Optional[IndexStatus] + + +class OnDemandThroughputOverride(TypedDict, total=False): + MaxReadRequestUnits: Optional[LongObject] + + +class ProvisionedThroughputOverride(TypedDict, total=False): + ReadCapacityUnits: Optional[PositiveLongObject] + + +class ReplicaGlobalSecondaryIndexDescription(TypedDict, total=False): + IndexName: Optional[IndexName] + ProvisionedThroughputOverride: Optional[ProvisionedThroughputOverride] + OnDemandThroughputOverride: Optional[OnDemandThroughputOverride] + WarmThroughput: Optional[GlobalSecondaryIndexWarmThroughputDescription] + + +ReplicaGlobalSecondaryIndexDescriptionList = List[ReplicaGlobalSecondaryIndexDescription] + + +class TableWarmThroughputDescription(TypedDict, total=False): + ReadUnitsPerSecond: Optional[PositiveLongObject] + WriteUnitsPerSecond: Optional[PositiveLongObject] + Status: Optional[TableStatus] + + +class ReplicaDescription(TypedDict, total=False): + RegionName: Optional[RegionName] + ReplicaStatus: Optional[ReplicaStatus] + ReplicaStatusDescription: Optional[ReplicaStatusDescription] + ReplicaStatusPercentProgress: Optional[ReplicaStatusPercentProgress] + KMSMasterKeyId: Optional[KMSMasterKeyId] + ProvisionedThroughputOverride: Optional[ProvisionedThroughputOverride] + OnDemandThroughputOverride: Optional[OnDemandThroughputOverride] + WarmThroughput: Optional[TableWarmThroughputDescription] + GlobalSecondaryIndexes: Optional[ReplicaGlobalSecondaryIndexDescriptionList] + ReplicaInaccessibleDateTime: Optional[Date] + ReplicaTableClassSummary: Optional[TableClassSummary] + + +ReplicaDescriptionList = List[ReplicaDescription] + + +class GlobalTableDescription(TypedDict, total=False): + ReplicationGroup: Optional[ReplicaDescriptionList] + GlobalTableArn: Optional[GlobalTableArnString] + CreationDateTime: Optional[Date] + GlobalTableStatus: Optional[GlobalTableStatus] + GlobalTableName: Optional[TableName] + + +class CreateGlobalTableOutput(TypedDict, total=False): + GlobalTableDescription: Optional[GlobalTableDescription] + + +class CreateReplicaAction(TypedDict, total=False): + RegionName: RegionName + + +class ReplicaGlobalSecondaryIndex(TypedDict, total=False): + IndexName: IndexName + ProvisionedThroughputOverride: Optional[ProvisionedThroughputOverride] + OnDemandThroughputOverride: Optional[OnDemandThroughputOverride] + + +ReplicaGlobalSecondaryIndexList = List[ReplicaGlobalSecondaryIndex] + + +class CreateReplicationGroupMemberAction(TypedDict, total=False): + RegionName: RegionName + KMSMasterKeyId: Optional[KMSMasterKeyId] + ProvisionedThroughputOverride: Optional[ProvisionedThroughputOverride] + OnDemandThroughputOverride: Optional[OnDemandThroughputOverride] + GlobalSecondaryIndexes: Optional[ReplicaGlobalSecondaryIndexList] + TableClassOverride: Optional[TableClass] + + +class Tag(TypedDict, total=False): + Key: TagKeyString + Value: TagValueString + + +TagList = List[Tag] + + +class SSESpecification(TypedDict, total=False): + Enabled: Optional[SSEEnabled] + SSEType: Optional[SSEType] + KMSMasterKeyId: Optional[KMSMasterKeyId] + + +class GlobalSecondaryIndex(TypedDict, total=False): + IndexName: IndexName + KeySchema: KeySchema + Projection: Projection + ProvisionedThroughput: Optional[ProvisionedThroughput] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[WarmThroughput] + + +GlobalSecondaryIndexList = List[GlobalSecondaryIndex] + + +class LocalSecondaryIndex(TypedDict, total=False): + IndexName: IndexName + KeySchema: KeySchema + Projection: Projection + + +LocalSecondaryIndexList = List[LocalSecondaryIndex] + + +class CreateTableInput(ServiceRequest): + AttributeDefinitions: AttributeDefinitions + TableName: TableArn + KeySchema: KeySchema + LocalSecondaryIndexes: Optional[LocalSecondaryIndexList] + GlobalSecondaryIndexes: Optional[GlobalSecondaryIndexList] + BillingMode: Optional[BillingMode] + ProvisionedThroughput: Optional[ProvisionedThroughput] + StreamSpecification: Optional[StreamSpecification] + SSESpecification: Optional[SSESpecification] + Tags: Optional[TagList] + TableClass: Optional[TableClass] + DeletionProtectionEnabled: Optional[DeletionProtectionEnabled] + WarmThroughput: Optional[WarmThroughput] + ResourcePolicy: Optional[ResourcePolicy] + OnDemandThroughput: Optional[OnDemandThroughput] + + +class RestoreSummary(TypedDict, total=False): + SourceBackupArn: Optional[BackupArn] + SourceTableArn: Optional[TableArn] + RestoreDateTime: Date + RestoreInProgress: RestoreInProgress + + +NonNegativeLongObject = int + + +class ProvisionedThroughputDescription(TypedDict, total=False): + LastIncreaseDateTime: Optional[Date] + LastDecreaseDateTime: Optional[Date] + NumberOfDecreasesToday: Optional[PositiveLongObject] + ReadCapacityUnits: Optional[NonNegativeLongObject] + WriteCapacityUnits: Optional[NonNegativeLongObject] + + +class GlobalSecondaryIndexDescription(TypedDict, total=False): + IndexName: Optional[IndexName] + KeySchema: Optional[KeySchema] + Projection: Optional[Projection] + IndexStatus: Optional[IndexStatus] + Backfilling: Optional[Backfilling] + ProvisionedThroughput: Optional[ProvisionedThroughputDescription] + IndexSizeBytes: Optional[LongObject] + ItemCount: Optional[LongObject] + IndexArn: Optional[String] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[GlobalSecondaryIndexWarmThroughputDescription] + + +GlobalSecondaryIndexDescriptionList = List[GlobalSecondaryIndexDescription] + + +class LocalSecondaryIndexDescription(TypedDict, total=False): + IndexName: Optional[IndexName] + KeySchema: Optional[KeySchema] + Projection: Optional[Projection] + IndexSizeBytes: Optional[LongObject] + ItemCount: Optional[LongObject] + IndexArn: Optional[String] + + +LocalSecondaryIndexDescriptionList = List[LocalSecondaryIndexDescription] + + +class TableDescription(TypedDict, total=False): + AttributeDefinitions: Optional[AttributeDefinitions] + TableName: Optional[TableName] + KeySchema: Optional[KeySchema] + TableStatus: Optional[TableStatus] + CreationDateTime: Optional[Date] + ProvisionedThroughput: Optional[ProvisionedThroughputDescription] + TableSizeBytes: Optional[LongObject] + ItemCount: Optional[LongObject] + TableArn: Optional[String] + TableId: Optional[TableId] + BillingModeSummary: Optional[BillingModeSummary] + LocalSecondaryIndexes: Optional[LocalSecondaryIndexDescriptionList] + GlobalSecondaryIndexes: Optional[GlobalSecondaryIndexDescriptionList] + StreamSpecification: Optional[StreamSpecification] + LatestStreamLabel: Optional[String] + LatestStreamArn: Optional[StreamArn] + GlobalTableVersion: Optional[String] + Replicas: Optional[ReplicaDescriptionList] + RestoreSummary: Optional[RestoreSummary] + SSEDescription: Optional[SSEDescription] + ArchivalSummary: Optional[ArchivalSummary] + TableClassSummary: Optional[TableClassSummary] + DeletionProtectionEnabled: Optional[DeletionProtectionEnabled] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[TableWarmThroughputDescription] + MultiRegionConsistency: Optional[MultiRegionConsistency] + + +class CreateTableOutput(TypedDict, total=False): + TableDescription: Optional[TableDescription] + + +CsvHeaderList = List[CsvHeader] + + +class CsvOptions(TypedDict, total=False): + Delimiter: Optional[CsvDelimiter] + HeaderList: Optional[CsvHeaderList] + + +class Delete(TypedDict, total=False): + Key: Key + TableName: TableArn + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class DeleteBackupInput(ServiceRequest): + BackupArn: BackupArn + + +class DeleteBackupOutput(TypedDict, total=False): + BackupDescription: Optional[BackupDescription] + + +class DeleteGlobalSecondaryIndexAction(TypedDict, total=False): + IndexName: IndexName + + +class ExpectedAttributeValue(TypedDict, total=False): + Value: Optional[AttributeValue] + Exists: Optional[BooleanObject] + ComparisonOperator: Optional[ComparisonOperator] + AttributeValueList: Optional[AttributeValueList] + + +ExpectedAttributeMap = Dict[AttributeName, ExpectedAttributeValue] + + +class DeleteItemInput(ServiceRequest): + TableName: TableArn + Key: Key + Expected: Optional[ExpectedAttributeMap] + ConditionalOperator: Optional[ConditionalOperator] + ReturnValues: Optional[ReturnValue] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ReturnItemCollectionMetrics: Optional[ReturnItemCollectionMetrics] + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class DeleteItemOutput(TypedDict, total=False): + Attributes: Optional[AttributeMap] + ConsumedCapacity: Optional[ConsumedCapacity] + ItemCollectionMetrics: Optional[ItemCollectionMetrics] + + +class DeleteReplicaAction(TypedDict, total=False): + RegionName: RegionName + + +class DeleteReplicationGroupMemberAction(TypedDict, total=False): + RegionName: RegionName + + +class DeleteResourcePolicyInput(ServiceRequest): + ResourceArn: ResourceArnString + ExpectedRevisionId: Optional[PolicyRevisionId] + + +class DeleteResourcePolicyOutput(TypedDict, total=False): + RevisionId: Optional[PolicyRevisionId] + + +class DeleteTableInput(ServiceRequest): + TableName: TableArn + + +class DeleteTableOutput(TypedDict, total=False): + TableDescription: Optional[TableDescription] + + +class DescribeBackupInput(ServiceRequest): + BackupArn: BackupArn + + +class DescribeBackupOutput(TypedDict, total=False): + BackupDescription: Optional[BackupDescription] + + +class DescribeContinuousBackupsInput(ServiceRequest): + TableName: TableArn + + +class DescribeContinuousBackupsOutput(TypedDict, total=False): + ContinuousBackupsDescription: Optional[ContinuousBackupsDescription] + + +class DescribeContributorInsightsInput(ServiceRequest): + TableName: TableArn + IndexName: Optional[IndexName] + + +class FailureException(TypedDict, total=False): + ExceptionName: Optional[ExceptionName] + ExceptionDescription: Optional[ExceptionDescription] + + +LastUpdateDateTime = datetime + + +class DescribeContributorInsightsOutput(TypedDict, total=False): + TableName: Optional[TableName] + IndexName: Optional[IndexName] + ContributorInsightsRuleList: Optional[ContributorInsightsRuleList] + ContributorInsightsStatus: Optional[ContributorInsightsStatus] + LastUpdateDateTime: Optional[LastUpdateDateTime] + FailureException: Optional[FailureException] + + +class DescribeEndpointsRequest(ServiceRequest): + pass + + +Long = int + + +class Endpoint(TypedDict, total=False): + Address: String + CachePeriodInMinutes: Long + + +Endpoints = List[Endpoint] + + +class DescribeEndpointsResponse(TypedDict, total=False): + Endpoints: Endpoints + + +class DescribeExportInput(ServiceRequest): + ExportArn: ExportArn + + +ExportToTime = datetime +ExportFromTime = datetime + + +class IncrementalExportSpecification(TypedDict, total=False): + ExportFromTime: Optional[ExportFromTime] + ExportToTime: Optional[ExportToTime] + ExportViewType: Optional[ExportViewType] + + +ExportTime = datetime +ExportEndTime = datetime +ExportStartTime = datetime + + +class ExportDescription(TypedDict, total=False): + ExportArn: Optional[ExportArn] + ExportStatus: Optional[ExportStatus] + StartTime: Optional[ExportStartTime] + EndTime: Optional[ExportEndTime] + ExportManifest: Optional[ExportManifest] + TableArn: Optional[TableArn] + TableId: Optional[TableId] + ExportTime: Optional[ExportTime] + ClientToken: Optional[ClientToken] + S3Bucket: Optional[S3Bucket] + S3BucketOwner: Optional[S3BucketOwner] + S3Prefix: Optional[S3Prefix] + S3SseAlgorithm: Optional[S3SseAlgorithm] + S3SseKmsKeyId: Optional[S3SseKmsKeyId] + FailureCode: Optional[FailureCode] + FailureMessage: Optional[FailureMessage] + ExportFormat: Optional[ExportFormat] + BilledSizeBytes: Optional[BilledSizeBytes] + ItemCount: Optional[ItemCount] + ExportType: Optional[ExportType] + IncrementalExportSpecification: Optional[IncrementalExportSpecification] + + +class DescribeExportOutput(TypedDict, total=False): + ExportDescription: Optional[ExportDescription] + + +class DescribeGlobalTableInput(ServiceRequest): + GlobalTableName: TableName + + +class DescribeGlobalTableOutput(TypedDict, total=False): + GlobalTableDescription: Optional[GlobalTableDescription] + + +class DescribeGlobalTableSettingsInput(ServiceRequest): + GlobalTableName: TableName + + +class ReplicaGlobalSecondaryIndexSettingsDescription(TypedDict, total=False): + IndexName: IndexName + IndexStatus: Optional[IndexStatus] + ProvisionedReadCapacityUnits: Optional[PositiveLongObject] + ProvisionedReadCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ProvisionedWriteCapacityUnits: Optional[PositiveLongObject] + ProvisionedWriteCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + + +ReplicaGlobalSecondaryIndexSettingsDescriptionList = List[ + ReplicaGlobalSecondaryIndexSettingsDescription +] + + +class ReplicaSettingsDescription(TypedDict, total=False): + RegionName: RegionName + ReplicaStatus: Optional[ReplicaStatus] + ReplicaBillingModeSummary: Optional[BillingModeSummary] + ReplicaProvisionedReadCapacityUnits: Optional[NonNegativeLongObject] + ReplicaProvisionedReadCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ReplicaProvisionedWriteCapacityUnits: Optional[NonNegativeLongObject] + ReplicaProvisionedWriteCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ReplicaGlobalSecondaryIndexSettings: Optional[ + ReplicaGlobalSecondaryIndexSettingsDescriptionList + ] + ReplicaTableClassSummary: Optional[TableClassSummary] + + +ReplicaSettingsDescriptionList = List[ReplicaSettingsDescription] + + +class DescribeGlobalTableSettingsOutput(TypedDict, total=False): + GlobalTableName: Optional[TableName] + ReplicaSettings: Optional[ReplicaSettingsDescriptionList] + + +class DescribeImportInput(ServiceRequest): + ImportArn: ImportArn + + +ImportedItemCount = int +ProcessedItemCount = int +ImportEndTime = datetime +ImportStartTime = datetime + + +class TableCreationParameters(TypedDict, total=False): + TableName: TableName + AttributeDefinitions: AttributeDefinitions + KeySchema: KeySchema + BillingMode: Optional[BillingMode] + ProvisionedThroughput: Optional[ProvisionedThroughput] + OnDemandThroughput: Optional[OnDemandThroughput] + SSESpecification: Optional[SSESpecification] + GlobalSecondaryIndexes: Optional[GlobalSecondaryIndexList] + + +class InputFormatOptions(TypedDict, total=False): + Csv: Optional[CsvOptions] + + +ErrorCount = int + + +class S3BucketSource(TypedDict, total=False): + S3BucketOwner: Optional[S3BucketOwner] + S3Bucket: S3Bucket + S3KeyPrefix: Optional[S3Prefix] + + +class ImportTableDescription(TypedDict, total=False): + ImportArn: Optional[ImportArn] + ImportStatus: Optional[ImportStatus] + TableArn: Optional[TableArn] + TableId: Optional[TableId] + ClientToken: Optional[ClientToken] + S3BucketSource: Optional[S3BucketSource] + ErrorCount: Optional[ErrorCount] + CloudWatchLogGroupArn: Optional[CloudWatchLogGroupArn] + InputFormat: Optional[InputFormat] + InputFormatOptions: Optional[InputFormatOptions] + InputCompressionType: Optional[InputCompressionType] + TableCreationParameters: Optional[TableCreationParameters] + StartTime: Optional[ImportStartTime] + EndTime: Optional[ImportEndTime] + ProcessedSizeBytes: Optional[LongObject] + ProcessedItemCount: Optional[ProcessedItemCount] + ImportedItemCount: Optional[ImportedItemCount] + FailureCode: Optional[FailureCode] + FailureMessage: Optional[FailureMessage] + + +class DescribeImportOutput(TypedDict, total=False): + ImportTableDescription: ImportTableDescription + + +class DescribeKinesisStreamingDestinationInput(ServiceRequest): + TableName: TableArn + + +class KinesisDataStreamDestination(TypedDict, total=False): + StreamArn: Optional[StreamArn] + DestinationStatus: Optional[DestinationStatus] + DestinationStatusDescription: Optional[String] + ApproximateCreationDateTimePrecision: Optional[ApproximateCreationDateTimePrecision] + + +KinesisDataStreamDestinations = List[KinesisDataStreamDestination] + + +class DescribeKinesisStreamingDestinationOutput(TypedDict, total=False): + TableName: Optional[TableName] + KinesisDataStreamDestinations: Optional[KinesisDataStreamDestinations] + + +class DescribeLimitsInput(ServiceRequest): + pass + + +class DescribeLimitsOutput(TypedDict, total=False): + AccountMaxReadCapacityUnits: Optional[PositiveLongObject] + AccountMaxWriteCapacityUnits: Optional[PositiveLongObject] + TableMaxReadCapacityUnits: Optional[PositiveLongObject] + TableMaxWriteCapacityUnits: Optional[PositiveLongObject] + + +class DescribeTableInput(ServiceRequest): + TableName: TableArn + + +class DescribeTableOutput(TypedDict, total=False): + Table: Optional[TableDescription] + + +class DescribeTableReplicaAutoScalingInput(ServiceRequest): + TableName: TableArn + + +class ReplicaGlobalSecondaryIndexAutoScalingDescription(TypedDict, total=False): + IndexName: Optional[IndexName] + IndexStatus: Optional[IndexStatus] + ProvisionedReadCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ProvisionedWriteCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + + +ReplicaGlobalSecondaryIndexAutoScalingDescriptionList = List[ + ReplicaGlobalSecondaryIndexAutoScalingDescription +] + + +class ReplicaAutoScalingDescription(TypedDict, total=False): + RegionName: Optional[RegionName] + GlobalSecondaryIndexes: Optional[ReplicaGlobalSecondaryIndexAutoScalingDescriptionList] + ReplicaProvisionedReadCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ReplicaProvisionedWriteCapacityAutoScalingSettings: Optional[AutoScalingSettingsDescription] + ReplicaStatus: Optional[ReplicaStatus] + + +ReplicaAutoScalingDescriptionList = List[ReplicaAutoScalingDescription] + + +class TableAutoScalingDescription(TypedDict, total=False): + TableName: Optional[TableName] + TableStatus: Optional[TableStatus] + Replicas: Optional[ReplicaAutoScalingDescriptionList] + + +class DescribeTableReplicaAutoScalingOutput(TypedDict, total=False): + TableAutoScalingDescription: Optional[TableAutoScalingDescription] + + +class DescribeTimeToLiveInput(ServiceRequest): + TableName: TableArn + + +class DescribeTimeToLiveOutput(TypedDict, total=False): + TimeToLiveDescription: Optional[TimeToLiveDescription] + + +class EnableKinesisStreamingConfiguration(TypedDict, total=False): + ApproximateCreationDateTimePrecision: Optional[ApproximateCreationDateTimePrecision] + + +class ExecuteStatementInput(ServiceRequest): + Statement: PartiQLStatement + Parameters: Optional[PreparedStatementParameters] + ConsistentRead: Optional[ConsistentRead] + NextToken: Optional[PartiQLNextToken] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + Limit: Optional[PositiveIntegerObject] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class ExecuteStatementOutput(TypedDict, total=False): + Items: Optional[ItemList] + NextToken: Optional[PartiQLNextToken] + ConsumedCapacity: Optional[ConsumedCapacity] + LastEvaluatedKey: Optional[Key] + + +class ParameterizedStatement(TypedDict, total=False): + Statement: PartiQLStatement + Parameters: Optional[PreparedStatementParameters] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +ParameterizedStatements = List[ParameterizedStatement] + + +class ExecuteTransactionInput(ServiceRequest): + TransactStatements: ParameterizedStatements + ClientRequestToken: Optional[ClientRequestToken] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + + +class ItemResponse(TypedDict, total=False): + Item: Optional[AttributeMap] + + +ItemResponseList = List[ItemResponse] + + +class ExecuteTransactionOutput(TypedDict, total=False): + Responses: Optional[ItemResponseList] + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + + +class ExportSummary(TypedDict, total=False): + ExportArn: Optional[ExportArn] + ExportStatus: Optional[ExportStatus] + ExportType: Optional[ExportType] + + +ExportSummaries = List[ExportSummary] + + +class ExportTableToPointInTimeInput(ServiceRequest): + TableArn: TableArn + ExportTime: Optional[ExportTime] + ClientToken: Optional[ClientToken] + S3Bucket: S3Bucket + S3BucketOwner: Optional[S3BucketOwner] + S3Prefix: Optional[S3Prefix] + S3SseAlgorithm: Optional[S3SseAlgorithm] + S3SseKmsKeyId: Optional[S3SseKmsKeyId] + ExportFormat: Optional[ExportFormat] + ExportType: Optional[ExportType] + IncrementalExportSpecification: Optional[IncrementalExportSpecification] + + +class ExportTableToPointInTimeOutput(TypedDict, total=False): + ExportDescription: Optional[ExportDescription] + + +FilterConditionMap = Dict[AttributeName, Condition] + + +class Get(TypedDict, total=False): + Key: Key + TableName: TableArn + ProjectionExpression: Optional[ProjectionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + + +class GetItemInput(ServiceRequest): + TableName: TableArn + Key: Key + AttributesToGet: Optional[AttributeNameList] + ConsistentRead: Optional[ConsistentRead] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ProjectionExpression: Optional[ProjectionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + + +class GetItemOutput(TypedDict, total=False): + Item: Optional[AttributeMap] + ConsumedCapacity: Optional[ConsumedCapacity] + + +class GetResourcePolicyInput(ServiceRequest): + ResourceArn: ResourceArnString + + +class GetResourcePolicyOutput(TypedDict, total=False): + Policy: Optional[ResourcePolicy] + RevisionId: Optional[PolicyRevisionId] + + +class GlobalSecondaryIndexAutoScalingUpdate(TypedDict, total=False): + IndexName: Optional[IndexName] + ProvisionedWriteCapacityAutoScalingUpdate: Optional[AutoScalingSettingsUpdate] + + +GlobalSecondaryIndexAutoScalingUpdateList = List[GlobalSecondaryIndexAutoScalingUpdate] + + +class UpdateGlobalSecondaryIndexAction(TypedDict, total=False): + IndexName: IndexName + ProvisionedThroughput: Optional[ProvisionedThroughput] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[WarmThroughput] + + +class GlobalSecondaryIndexUpdate(TypedDict, total=False): + Update: Optional[UpdateGlobalSecondaryIndexAction] + Create: Optional[CreateGlobalSecondaryIndexAction] + Delete: Optional[DeleteGlobalSecondaryIndexAction] + + +GlobalSecondaryIndexUpdateList = List[GlobalSecondaryIndexUpdate] + + +class GlobalTable(TypedDict, total=False): + GlobalTableName: Optional[TableName] + ReplicationGroup: Optional[ReplicaList] + + +class GlobalTableGlobalSecondaryIndexSettingsUpdate(TypedDict, total=False): + IndexName: IndexName + ProvisionedWriteCapacityUnits: Optional[PositiveLongObject] + ProvisionedWriteCapacityAutoScalingSettingsUpdate: Optional[AutoScalingSettingsUpdate] + + +GlobalTableGlobalSecondaryIndexSettingsUpdateList = List[ + GlobalTableGlobalSecondaryIndexSettingsUpdate +] +GlobalTableList = List[GlobalTable] + + +class ImportSummary(TypedDict, total=False): + ImportArn: Optional[ImportArn] + ImportStatus: Optional[ImportStatus] + TableArn: Optional[TableArn] + S3BucketSource: Optional[S3BucketSource] + CloudWatchLogGroupArn: Optional[CloudWatchLogGroupArn] + InputFormat: Optional[InputFormat] + StartTime: Optional[ImportStartTime] + EndTime: Optional[ImportEndTime] + + +ImportSummaryList = List[ImportSummary] + + +class ImportTableInput(ServiceRequest): + ClientToken: Optional[ClientToken] + S3BucketSource: S3BucketSource + InputFormat: InputFormat + InputFormatOptions: Optional[InputFormatOptions] + InputCompressionType: Optional[InputCompressionType] + TableCreationParameters: TableCreationParameters + + +class ImportTableOutput(TypedDict, total=False): + ImportTableDescription: ImportTableDescription + + +KeyConditions = Dict[AttributeName, Condition] + + +class KinesisStreamingDestinationInput(ServiceRequest): + TableName: TableArn + StreamArn: StreamArn + EnableKinesisStreamingConfiguration: Optional[EnableKinesisStreamingConfiguration] + + +class KinesisStreamingDestinationOutput(TypedDict, total=False): + TableName: Optional[TableName] + StreamArn: Optional[StreamArn] + DestinationStatus: Optional[DestinationStatus] + EnableKinesisStreamingConfiguration: Optional[EnableKinesisStreamingConfiguration] + + +TimeRangeUpperBound = datetime +TimeRangeLowerBound = datetime + + +class ListBackupsInput(ServiceRequest): + TableName: Optional[TableArn] + Limit: Optional[BackupsInputLimit] + TimeRangeLowerBound: Optional[TimeRangeLowerBound] + TimeRangeUpperBound: Optional[TimeRangeUpperBound] + ExclusiveStartBackupArn: Optional[BackupArn] + BackupType: Optional[BackupTypeFilter] + + +class ListBackupsOutput(TypedDict, total=False): + BackupSummaries: Optional[BackupSummaries] + LastEvaluatedBackupArn: Optional[BackupArn] + + +class ListContributorInsightsInput(ServiceRequest): + TableName: Optional[TableArn] + NextToken: Optional[NextTokenString] + MaxResults: Optional[ListContributorInsightsLimit] + + +class ListContributorInsightsOutput(TypedDict, total=False): + ContributorInsightsSummaries: Optional[ContributorInsightsSummaries] + NextToken: Optional[NextTokenString] + + +class ListExportsInput(ServiceRequest): + TableArn: Optional[TableArn] + MaxResults: Optional[ListExportsMaxLimit] + NextToken: Optional[ExportNextToken] + + +class ListExportsOutput(TypedDict, total=False): + ExportSummaries: Optional[ExportSummaries] + NextToken: Optional[ExportNextToken] + + +class ListGlobalTablesInput(ServiceRequest): + ExclusiveStartGlobalTableName: Optional[TableName] + Limit: Optional[PositiveIntegerObject] + RegionName: Optional[RegionName] + + +class ListGlobalTablesOutput(TypedDict, total=False): + GlobalTables: Optional[GlobalTableList] + LastEvaluatedGlobalTableName: Optional[TableName] + + +class ListImportsInput(ServiceRequest): + TableArn: Optional[TableArn] + PageSize: Optional[ListImportsMaxLimit] + NextToken: Optional[ImportNextToken] + + +class ListImportsOutput(TypedDict, total=False): + ImportSummaryList: Optional[ImportSummaryList] + NextToken: Optional[ImportNextToken] + + +class ListTablesInput(ServiceRequest): + ExclusiveStartTableName: Optional[TableName] + Limit: Optional[ListTablesInputLimit] + + +TableNameList = List[TableName] + + +class ListTablesOutput(TypedDict, total=False): + TableNames: Optional[TableNameList] + LastEvaluatedTableName: Optional[TableName] + + +class ListTagsOfResourceInput(ServiceRequest): + ResourceArn: ResourceArnString + NextToken: Optional[NextTokenString] + + +class ListTagsOfResourceOutput(TypedDict, total=False): + Tags: Optional[TagList] + NextToken: Optional[NextTokenString] + + +class PointInTimeRecoverySpecification(TypedDict, total=False): + PointInTimeRecoveryEnabled: BooleanObject + RecoveryPeriodInDays: Optional[RecoveryPeriodInDays] + + +class Put(TypedDict, total=False): + Item: PutItemInputAttributeMap + TableName: TableArn + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class PutItemInput(ServiceRequest): + TableName: TableArn + Item: PutItemInputAttributeMap + Expected: Optional[ExpectedAttributeMap] + ReturnValues: Optional[ReturnValue] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ReturnItemCollectionMetrics: Optional[ReturnItemCollectionMetrics] + ConditionalOperator: Optional[ConditionalOperator] + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class PutItemOutput(TypedDict, total=False): + Attributes: Optional[AttributeMap] + ConsumedCapacity: Optional[ConsumedCapacity] + ItemCollectionMetrics: Optional[ItemCollectionMetrics] + + +class PutResourcePolicyInput(ServiceRequest): + ResourceArn: ResourceArnString + Policy: ResourcePolicy + ExpectedRevisionId: Optional[PolicyRevisionId] + ConfirmRemoveSelfResourceAccess: Optional[ConfirmRemoveSelfResourceAccess] + + +class PutResourcePolicyOutput(TypedDict, total=False): + RevisionId: Optional[PolicyRevisionId] + + +class QueryInput(ServiceRequest): + TableName: TableArn + IndexName: Optional[IndexName] + Select: Optional[Select] + AttributesToGet: Optional[AttributeNameList] + Limit: Optional[PositiveIntegerObject] + ConsistentRead: Optional[ConsistentRead] + KeyConditions: Optional[KeyConditions] + QueryFilter: Optional[FilterConditionMap] + ConditionalOperator: Optional[ConditionalOperator] + ScanIndexForward: Optional[BooleanObject] + ExclusiveStartKey: Optional[Key] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ProjectionExpression: Optional[ProjectionExpression] + FilterExpression: Optional[ConditionExpression] + KeyConditionExpression: Optional[KeyExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + + +class QueryOutput(TypedDict, total=False): + Items: Optional[ItemList] + Count: Optional[Integer] + ScannedCount: Optional[Integer] + LastEvaluatedKey: Optional[Key] + ConsumedCapacity: Optional[ConsumedCapacity] + + +class ReplicaGlobalSecondaryIndexAutoScalingUpdate(TypedDict, total=False): + IndexName: Optional[IndexName] + ProvisionedReadCapacityAutoScalingUpdate: Optional[AutoScalingSettingsUpdate] + + +ReplicaGlobalSecondaryIndexAutoScalingUpdateList = List[ + ReplicaGlobalSecondaryIndexAutoScalingUpdate +] + + +class ReplicaAutoScalingUpdate(TypedDict, total=False): + RegionName: RegionName + ReplicaGlobalSecondaryIndexUpdates: Optional[ReplicaGlobalSecondaryIndexAutoScalingUpdateList] + ReplicaProvisionedReadCapacityAutoScalingUpdate: Optional[AutoScalingSettingsUpdate] + + +ReplicaAutoScalingUpdateList = List[ReplicaAutoScalingUpdate] + + +class ReplicaGlobalSecondaryIndexSettingsUpdate(TypedDict, total=False): + IndexName: IndexName + ProvisionedReadCapacityUnits: Optional[PositiveLongObject] + ProvisionedReadCapacityAutoScalingSettingsUpdate: Optional[AutoScalingSettingsUpdate] + + +ReplicaGlobalSecondaryIndexSettingsUpdateList = List[ReplicaGlobalSecondaryIndexSettingsUpdate] + + +class ReplicaSettingsUpdate(TypedDict, total=False): + RegionName: RegionName + ReplicaProvisionedReadCapacityUnits: Optional[PositiveLongObject] + ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate: Optional[AutoScalingSettingsUpdate] + ReplicaGlobalSecondaryIndexSettingsUpdate: Optional[ + ReplicaGlobalSecondaryIndexSettingsUpdateList + ] + ReplicaTableClass: Optional[TableClass] + + +ReplicaSettingsUpdateList = List[ReplicaSettingsUpdate] + + +class ReplicaUpdate(TypedDict, total=False): + Create: Optional[CreateReplicaAction] + Delete: Optional[DeleteReplicaAction] + + +ReplicaUpdateList = List[ReplicaUpdate] + + +class UpdateReplicationGroupMemberAction(TypedDict, total=False): + RegionName: RegionName + KMSMasterKeyId: Optional[KMSMasterKeyId] + ProvisionedThroughputOverride: Optional[ProvisionedThroughputOverride] + OnDemandThroughputOverride: Optional[OnDemandThroughputOverride] + GlobalSecondaryIndexes: Optional[ReplicaGlobalSecondaryIndexList] + TableClassOverride: Optional[TableClass] + + +class ReplicationGroupUpdate(TypedDict, total=False): + Create: Optional[CreateReplicationGroupMemberAction] + Update: Optional[UpdateReplicationGroupMemberAction] + Delete: Optional[DeleteReplicationGroupMemberAction] + + +ReplicationGroupUpdateList = List[ReplicationGroupUpdate] + + +class RestoreTableFromBackupInput(ServiceRequest): + TargetTableName: TableName + BackupArn: BackupArn + BillingModeOverride: Optional[BillingMode] + GlobalSecondaryIndexOverride: Optional[GlobalSecondaryIndexList] + LocalSecondaryIndexOverride: Optional[LocalSecondaryIndexList] + ProvisionedThroughputOverride: Optional[ProvisionedThroughput] + OnDemandThroughputOverride: Optional[OnDemandThroughput] + SSESpecificationOverride: Optional[SSESpecification] + + +class RestoreTableFromBackupOutput(TypedDict, total=False): + TableDescription: Optional[TableDescription] + + +class RestoreTableToPointInTimeInput(ServiceRequest): + SourceTableArn: Optional[TableArn] + SourceTableName: Optional[TableName] + TargetTableName: TableName + UseLatestRestorableTime: Optional[BooleanObject] + RestoreDateTime: Optional[Date] + BillingModeOverride: Optional[BillingMode] + GlobalSecondaryIndexOverride: Optional[GlobalSecondaryIndexList] + LocalSecondaryIndexOverride: Optional[LocalSecondaryIndexList] + ProvisionedThroughputOverride: Optional[ProvisionedThroughput] + OnDemandThroughputOverride: Optional[OnDemandThroughput] + SSESpecificationOverride: Optional[SSESpecification] + + +class RestoreTableToPointInTimeOutput(TypedDict, total=False): + TableDescription: Optional[TableDescription] + + +class ScanInput(ServiceRequest): + TableName: TableArn + IndexName: Optional[IndexName] + AttributesToGet: Optional[AttributeNameList] + Limit: Optional[PositiveIntegerObject] + Select: Optional[Select] + ScanFilter: Optional[FilterConditionMap] + ConditionalOperator: Optional[ConditionalOperator] + ExclusiveStartKey: Optional[Key] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + TotalSegments: Optional[ScanTotalSegments] + Segment: Optional[ScanSegment] + ProjectionExpression: Optional[ProjectionExpression] + FilterExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ConsistentRead: Optional[ConsistentRead] + + +class ScanOutput(TypedDict, total=False): + Items: Optional[ItemList] + Count: Optional[Integer] + ScannedCount: Optional[Integer] + LastEvaluatedKey: Optional[Key] + ConsumedCapacity: Optional[ConsumedCapacity] + + +TagKeyList = List[TagKeyString] + + +class TagResourceInput(ServiceRequest): + ResourceArn: ResourceArnString + Tags: TagList + + +class TimeToLiveSpecification(TypedDict, total=False): + Enabled: TimeToLiveEnabled + AttributeName: TimeToLiveAttributeName + + +class TransactGetItem(TypedDict, total=False): + Get: Get + + +TransactGetItemList = List[TransactGetItem] + + +class TransactGetItemsInput(ServiceRequest): + TransactItems: TransactGetItemList + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + + +class TransactGetItemsOutput(TypedDict, total=False): + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + Responses: Optional[ItemResponseList] + + +class Update(TypedDict, total=False): + Key: Key + UpdateExpression: UpdateExpression + TableName: TableArn + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class TransactWriteItem(TypedDict, total=False): + ConditionCheck: Optional[ConditionCheck] + Put: Optional[Put] + Delete: Optional[Delete] + Update: Optional[Update] + + +TransactWriteItemList = List[TransactWriteItem] + + +class TransactWriteItemsInput(ServiceRequest): + TransactItems: TransactWriteItemList + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ReturnItemCollectionMetrics: Optional[ReturnItemCollectionMetrics] + ClientRequestToken: Optional[ClientRequestToken] + + +class TransactWriteItemsOutput(TypedDict, total=False): + ConsumedCapacity: Optional[ConsumedCapacityMultiple] + ItemCollectionMetrics: Optional[ItemCollectionMetricsPerTable] + + +class UntagResourceInput(ServiceRequest): + ResourceArn: ResourceArnString + TagKeys: TagKeyList + + +class UpdateContinuousBackupsInput(ServiceRequest): + TableName: TableArn + PointInTimeRecoverySpecification: PointInTimeRecoverySpecification + + +class UpdateContinuousBackupsOutput(TypedDict, total=False): + ContinuousBackupsDescription: Optional[ContinuousBackupsDescription] + + +class UpdateContributorInsightsInput(ServiceRequest): + TableName: TableArn + IndexName: Optional[IndexName] + ContributorInsightsAction: ContributorInsightsAction + + +class UpdateContributorInsightsOutput(TypedDict, total=False): + TableName: Optional[TableName] + IndexName: Optional[IndexName] + ContributorInsightsStatus: Optional[ContributorInsightsStatus] + + +class UpdateGlobalTableInput(ServiceRequest): + GlobalTableName: TableName + ReplicaUpdates: ReplicaUpdateList + + +class UpdateGlobalTableOutput(TypedDict, total=False): + GlobalTableDescription: Optional[GlobalTableDescription] + + +class UpdateGlobalTableSettingsInput(ServiceRequest): + GlobalTableName: TableName + GlobalTableBillingMode: Optional[BillingMode] + GlobalTableProvisionedWriteCapacityUnits: Optional[PositiveLongObject] + GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate: Optional[ + AutoScalingSettingsUpdate + ] + GlobalTableGlobalSecondaryIndexSettingsUpdate: Optional[ + GlobalTableGlobalSecondaryIndexSettingsUpdateList + ] + ReplicaSettingsUpdate: Optional[ReplicaSettingsUpdateList] + + +class UpdateGlobalTableSettingsOutput(TypedDict, total=False): + GlobalTableName: Optional[TableName] + ReplicaSettings: Optional[ReplicaSettingsDescriptionList] + + +class UpdateItemInput(ServiceRequest): + TableName: TableArn + Key: Key + AttributeUpdates: Optional[AttributeUpdates] + Expected: Optional[ExpectedAttributeMap] + ConditionalOperator: Optional[ConditionalOperator] + ReturnValues: Optional[ReturnValue] + ReturnConsumedCapacity: Optional[ReturnConsumedCapacity] + ReturnItemCollectionMetrics: Optional[ReturnItemCollectionMetrics] + UpdateExpression: Optional[UpdateExpression] + ConditionExpression: Optional[ConditionExpression] + ExpressionAttributeNames: Optional[ExpressionAttributeNameMap] + ExpressionAttributeValues: Optional[ExpressionAttributeValueMap] + ReturnValuesOnConditionCheckFailure: Optional[ReturnValuesOnConditionCheckFailure] + + +class UpdateItemOutput(TypedDict, total=False): + Attributes: Optional[AttributeMap] + ConsumedCapacity: Optional[ConsumedCapacity] + ItemCollectionMetrics: Optional[ItemCollectionMetrics] + + +class UpdateKinesisStreamingConfiguration(TypedDict, total=False): + ApproximateCreationDateTimePrecision: Optional[ApproximateCreationDateTimePrecision] + + +class UpdateKinesisStreamingDestinationInput(ServiceRequest): + TableName: TableArn + StreamArn: StreamArn + UpdateKinesisStreamingConfiguration: Optional[UpdateKinesisStreamingConfiguration] + + +class UpdateKinesisStreamingDestinationOutput(TypedDict, total=False): + TableName: Optional[TableName] + StreamArn: Optional[StreamArn] + DestinationStatus: Optional[DestinationStatus] + UpdateKinesisStreamingConfiguration: Optional[UpdateKinesisStreamingConfiguration] + + +class UpdateTableInput(ServiceRequest): + AttributeDefinitions: Optional[AttributeDefinitions] + TableName: TableArn + BillingMode: Optional[BillingMode] + ProvisionedThroughput: Optional[ProvisionedThroughput] + GlobalSecondaryIndexUpdates: Optional[GlobalSecondaryIndexUpdateList] + StreamSpecification: Optional[StreamSpecification] + SSESpecification: Optional[SSESpecification] + ReplicaUpdates: Optional[ReplicationGroupUpdateList] + TableClass: Optional[TableClass] + DeletionProtectionEnabled: Optional[DeletionProtectionEnabled] + MultiRegionConsistency: Optional[MultiRegionConsistency] + OnDemandThroughput: Optional[OnDemandThroughput] + WarmThroughput: Optional[WarmThroughput] + + +class UpdateTableOutput(TypedDict, total=False): + TableDescription: Optional[TableDescription] + + +class UpdateTableReplicaAutoScalingInput(ServiceRequest): + GlobalSecondaryIndexUpdates: Optional[GlobalSecondaryIndexAutoScalingUpdateList] + TableName: TableArn + ProvisionedWriteCapacityAutoScalingUpdate: Optional[AutoScalingSettingsUpdate] + ReplicaUpdates: Optional[ReplicaAutoScalingUpdateList] + + +class UpdateTableReplicaAutoScalingOutput(TypedDict, total=False): + TableAutoScalingDescription: Optional[TableAutoScalingDescription] + + +class UpdateTimeToLiveInput(ServiceRequest): + TableName: TableArn + TimeToLiveSpecification: TimeToLiveSpecification + + +class UpdateTimeToLiveOutput(TypedDict, total=False): + TimeToLiveSpecification: Optional[TimeToLiveSpecification] + + +class DynamodbApi: + service = "dynamodb" + version = "2012-08-10" + + @handler("BatchExecuteStatement") + def batch_execute_statement( + self, + context: RequestContext, + statements: PartiQLBatchRequest, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + **kwargs, + ) -> BatchExecuteStatementOutput: + raise NotImplementedError + + @handler("BatchGetItem") + def batch_get_item( + self, + context: RequestContext, + request_items: BatchGetRequestMap, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + **kwargs, + ) -> BatchGetItemOutput: + raise NotImplementedError + + @handler("BatchWriteItem") + def batch_write_item( + self, + context: RequestContext, + request_items: BatchWriteItemRequestMap, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + return_item_collection_metrics: ReturnItemCollectionMetrics | None = None, + **kwargs, + ) -> BatchWriteItemOutput: + raise NotImplementedError + + @handler("CreateBackup") + def create_backup( + self, context: RequestContext, table_name: TableArn, backup_name: BackupName, **kwargs + ) -> CreateBackupOutput: + raise NotImplementedError + + @handler("CreateGlobalTable") + def create_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replication_group: ReplicaList, + **kwargs, + ) -> CreateGlobalTableOutput: + raise NotImplementedError + + @handler("CreateTable") + def create_table( + self, + context: RequestContext, + attribute_definitions: AttributeDefinitions, + table_name: TableArn, + key_schema: KeySchema, + local_secondary_indexes: LocalSecondaryIndexList | None = None, + global_secondary_indexes: GlobalSecondaryIndexList | None = None, + billing_mode: BillingMode | None = None, + provisioned_throughput: ProvisionedThroughput | None = None, + stream_specification: StreamSpecification | None = None, + sse_specification: SSESpecification | None = None, + tags: TagList | None = None, + table_class: TableClass | None = None, + deletion_protection_enabled: DeletionProtectionEnabled | None = None, + warm_throughput: WarmThroughput | None = None, + resource_policy: ResourcePolicy | None = None, + on_demand_throughput: OnDemandThroughput | None = None, + **kwargs, + ) -> CreateTableOutput: + raise NotImplementedError + + @handler("DeleteBackup") + def delete_backup( + self, context: RequestContext, backup_arn: BackupArn, **kwargs + ) -> DeleteBackupOutput: + raise NotImplementedError + + @handler("DeleteItem") + def delete_item( + self, + context: RequestContext, + table_name: TableArn, + key: Key, + expected: ExpectedAttributeMap | None = None, + conditional_operator: ConditionalOperator | None = None, + return_values: ReturnValue | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + return_item_collection_metrics: ReturnItemCollectionMetrics | None = None, + condition_expression: ConditionExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + expression_attribute_values: ExpressionAttributeValueMap | None = None, + return_values_on_condition_check_failure: ReturnValuesOnConditionCheckFailure | None = None, + **kwargs, + ) -> DeleteItemOutput: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, + context: RequestContext, + resource_arn: ResourceArnString, + expected_revision_id: PolicyRevisionId | None = None, + **kwargs, + ) -> DeleteResourcePolicyOutput: + raise NotImplementedError + + @handler("DeleteTable") + def delete_table( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DeleteTableOutput: + raise NotImplementedError + + @handler("DescribeBackup") + def describe_backup( + self, context: RequestContext, backup_arn: BackupArn, **kwargs + ) -> DescribeBackupOutput: + raise NotImplementedError + + @handler("DescribeContinuousBackups") + def describe_continuous_backups( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DescribeContinuousBackupsOutput: + raise NotImplementedError + + @handler("DescribeContributorInsights") + def describe_contributor_insights( + self, + context: RequestContext, + table_name: TableArn, + index_name: IndexName | None = None, + **kwargs, + ) -> DescribeContributorInsightsOutput: + raise NotImplementedError + + @handler("DescribeEndpoints") + def describe_endpoints(self, context: RequestContext, **kwargs) -> DescribeEndpointsResponse: + raise NotImplementedError + + @handler("DescribeExport") + def describe_export( + self, context: RequestContext, export_arn: ExportArn, **kwargs + ) -> DescribeExportOutput: + raise NotImplementedError + + @handler("DescribeGlobalTable") + def describe_global_table( + self, context: RequestContext, global_table_name: TableName, **kwargs + ) -> DescribeGlobalTableOutput: + raise NotImplementedError + + @handler("DescribeGlobalTableSettings") + def describe_global_table_settings( + self, context: RequestContext, global_table_name: TableName, **kwargs + ) -> DescribeGlobalTableSettingsOutput: + raise NotImplementedError + + @handler("DescribeImport") + def describe_import( + self, context: RequestContext, import_arn: ImportArn, **kwargs + ) -> DescribeImportOutput: + raise NotImplementedError + + @handler("DescribeKinesisStreamingDestination") + def describe_kinesis_streaming_destination( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DescribeKinesisStreamingDestinationOutput: + raise NotImplementedError + + @handler("DescribeLimits") + def describe_limits(self, context: RequestContext, **kwargs) -> DescribeLimitsOutput: + raise NotImplementedError + + @handler("DescribeTable") + def describe_table( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DescribeTableOutput: + raise NotImplementedError + + @handler("DescribeTableReplicaAutoScaling") + def describe_table_replica_auto_scaling( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DescribeTableReplicaAutoScalingOutput: + raise NotImplementedError + + @handler("DescribeTimeToLive") + def describe_time_to_live( + self, context: RequestContext, table_name: TableArn, **kwargs + ) -> DescribeTimeToLiveOutput: + raise NotImplementedError + + @handler("DisableKinesisStreamingDestination") + def disable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableArn, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration | None = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + raise NotImplementedError + + @handler("EnableKinesisStreamingDestination") + def enable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableArn, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration | None = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + raise NotImplementedError + + @handler("ExecuteStatement") + def execute_statement( + self, + context: RequestContext, + statement: PartiQLStatement, + parameters: PreparedStatementParameters | None = None, + consistent_read: ConsistentRead | None = None, + next_token: PartiQLNextToken | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + limit: PositiveIntegerObject | None = None, + return_values_on_condition_check_failure: ReturnValuesOnConditionCheckFailure | None = None, + **kwargs, + ) -> ExecuteStatementOutput: + raise NotImplementedError + + @handler("ExecuteTransaction") + def execute_transaction( + self, + context: RequestContext, + transact_statements: ParameterizedStatements, + client_request_token: ClientRequestToken | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + **kwargs, + ) -> ExecuteTransactionOutput: + raise NotImplementedError + + @handler("ExportTableToPointInTime") + def export_table_to_point_in_time( + self, + context: RequestContext, + table_arn: TableArn, + s3_bucket: S3Bucket, + export_time: ExportTime | None = None, + client_token: ClientToken | None = None, + s3_bucket_owner: S3BucketOwner | None = None, + s3_prefix: S3Prefix | None = None, + s3_sse_algorithm: S3SseAlgorithm | None = None, + s3_sse_kms_key_id: S3SseKmsKeyId | None = None, + export_format: ExportFormat | None = None, + export_type: ExportType | None = None, + incremental_export_specification: IncrementalExportSpecification | None = None, + **kwargs, + ) -> ExportTableToPointInTimeOutput: + raise NotImplementedError + + @handler("GetItem") + def get_item( + self, + context: RequestContext, + table_name: TableArn, + key: Key, + attributes_to_get: AttributeNameList | None = None, + consistent_read: ConsistentRead | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + projection_expression: ProjectionExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + **kwargs, + ) -> GetItemOutput: + raise NotImplementedError + + @handler("GetResourcePolicy") + def get_resource_policy( + self, context: RequestContext, resource_arn: ResourceArnString, **kwargs + ) -> GetResourcePolicyOutput: + raise NotImplementedError + + @handler("ImportTable") + def import_table( + self, + context: RequestContext, + s3_bucket_source: S3BucketSource, + input_format: InputFormat, + table_creation_parameters: TableCreationParameters, + client_token: ClientToken | None = None, + input_format_options: InputFormatOptions | None = None, + input_compression_type: InputCompressionType | None = None, + **kwargs, + ) -> ImportTableOutput: + raise NotImplementedError + + @handler("ListBackups") + def list_backups( + self, + context: RequestContext, + table_name: TableArn | None = None, + limit: BackupsInputLimit | None = None, + time_range_lower_bound: TimeRangeLowerBound | None = None, + time_range_upper_bound: TimeRangeUpperBound | None = None, + exclusive_start_backup_arn: BackupArn | None = None, + backup_type: BackupTypeFilter | None = None, + **kwargs, + ) -> ListBackupsOutput: + raise NotImplementedError + + @handler("ListContributorInsights") + def list_contributor_insights( + self, + context: RequestContext, + table_name: TableArn | None = None, + next_token: NextTokenString | None = None, + max_results: ListContributorInsightsLimit | None = None, + **kwargs, + ) -> ListContributorInsightsOutput: + raise NotImplementedError + + @handler("ListExports") + def list_exports( + self, + context: RequestContext, + table_arn: TableArn | None = None, + max_results: ListExportsMaxLimit | None = None, + next_token: ExportNextToken | None = None, + **kwargs, + ) -> ListExportsOutput: + raise NotImplementedError + + @handler("ListGlobalTables") + def list_global_tables( + self, + context: RequestContext, + exclusive_start_global_table_name: TableName | None = None, + limit: PositiveIntegerObject | None = None, + region_name: RegionName | None = None, + **kwargs, + ) -> ListGlobalTablesOutput: + raise NotImplementedError + + @handler("ListImports") + def list_imports( + self, + context: RequestContext, + table_arn: TableArn | None = None, + page_size: ListImportsMaxLimit | None = None, + next_token: ImportNextToken | None = None, + **kwargs, + ) -> ListImportsOutput: + raise NotImplementedError + + @handler("ListTables") + def list_tables( + self, + context: RequestContext, + exclusive_start_table_name: TableName | None = None, + limit: ListTablesInputLimit | None = None, + **kwargs, + ) -> ListTablesOutput: + raise NotImplementedError + + @handler("ListTagsOfResource") + def list_tags_of_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + next_token: NextTokenString | None = None, + **kwargs, + ) -> ListTagsOfResourceOutput: + raise NotImplementedError + + @handler("PutItem") + def put_item( + self, + context: RequestContext, + table_name: TableArn, + item: PutItemInputAttributeMap, + expected: ExpectedAttributeMap | None = None, + return_values: ReturnValue | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + return_item_collection_metrics: ReturnItemCollectionMetrics | None = None, + conditional_operator: ConditionalOperator | None = None, + condition_expression: ConditionExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + expression_attribute_values: ExpressionAttributeValueMap | None = None, + return_values_on_condition_check_failure: ReturnValuesOnConditionCheckFailure | None = None, + **kwargs, + ) -> PutItemOutput: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, + context: RequestContext, + resource_arn: ResourceArnString, + policy: ResourcePolicy, + expected_revision_id: PolicyRevisionId | None = None, + confirm_remove_self_resource_access: ConfirmRemoveSelfResourceAccess | None = None, + **kwargs, + ) -> PutResourcePolicyOutput: + raise NotImplementedError + + @handler("Query") + def query( + self, + context: RequestContext, + table_name: TableArn, + index_name: IndexName | None = None, + select: Select | None = None, + attributes_to_get: AttributeNameList | None = None, + limit: PositiveIntegerObject | None = None, + consistent_read: ConsistentRead | None = None, + key_conditions: KeyConditions | None = None, + query_filter: FilterConditionMap | None = None, + conditional_operator: ConditionalOperator | None = None, + scan_index_forward: BooleanObject | None = None, + exclusive_start_key: Key | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + projection_expression: ProjectionExpression | None = None, + filter_expression: ConditionExpression | None = None, + key_condition_expression: KeyExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + expression_attribute_values: ExpressionAttributeValueMap | None = None, + **kwargs, + ) -> QueryOutput: + raise NotImplementedError + + @handler("RestoreTableFromBackup") + def restore_table_from_backup( + self, + context: RequestContext, + target_table_name: TableName, + backup_arn: BackupArn, + billing_mode_override: BillingMode | None = None, + global_secondary_index_override: GlobalSecondaryIndexList | None = None, + local_secondary_index_override: LocalSecondaryIndexList | None = None, + provisioned_throughput_override: ProvisionedThroughput | None = None, + on_demand_throughput_override: OnDemandThroughput | None = None, + sse_specification_override: SSESpecification | None = None, + **kwargs, + ) -> RestoreTableFromBackupOutput: + raise NotImplementedError + + @handler("RestoreTableToPointInTime") + def restore_table_to_point_in_time( + self, + context: RequestContext, + target_table_name: TableName, + source_table_arn: TableArn | None = None, + source_table_name: TableName | None = None, + use_latest_restorable_time: BooleanObject | None = None, + restore_date_time: Date | None = None, + billing_mode_override: BillingMode | None = None, + global_secondary_index_override: GlobalSecondaryIndexList | None = None, + local_secondary_index_override: LocalSecondaryIndexList | None = None, + provisioned_throughput_override: ProvisionedThroughput | None = None, + on_demand_throughput_override: OnDemandThroughput | None = None, + sse_specification_override: SSESpecification | None = None, + **kwargs, + ) -> RestoreTableToPointInTimeOutput: + raise NotImplementedError + + @handler("Scan") + def scan( + self, + context: RequestContext, + table_name: TableArn, + index_name: IndexName | None = None, + attributes_to_get: AttributeNameList | None = None, + limit: PositiveIntegerObject | None = None, + select: Select | None = None, + scan_filter: FilterConditionMap | None = None, + conditional_operator: ConditionalOperator | None = None, + exclusive_start_key: Key | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + total_segments: ScanTotalSegments | None = None, + segment: ScanSegment | None = None, + projection_expression: ProjectionExpression | None = None, + filter_expression: ConditionExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + expression_attribute_values: ExpressionAttributeValueMap | None = None, + consistent_read: ConsistentRead | None = None, + **kwargs, + ) -> ScanOutput: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: ResourceArnString, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TransactGetItems") + def transact_get_items( + self, + context: RequestContext, + transact_items: TransactGetItemList, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + **kwargs, + ) -> TransactGetItemsOutput: + raise NotImplementedError + + @handler("TransactWriteItems") + def transact_write_items( + self, + context: RequestContext, + transact_items: TransactWriteItemList, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + return_item_collection_metrics: ReturnItemCollectionMetrics | None = None, + client_request_token: ClientRequestToken | None = None, + **kwargs, + ) -> TransactWriteItemsOutput: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateContinuousBackups") + def update_continuous_backups( + self, + context: RequestContext, + table_name: TableArn, + point_in_time_recovery_specification: PointInTimeRecoverySpecification, + **kwargs, + ) -> UpdateContinuousBackupsOutput: + raise NotImplementedError + + @handler("UpdateContributorInsights") + def update_contributor_insights( + self, + context: RequestContext, + table_name: TableArn, + contributor_insights_action: ContributorInsightsAction, + index_name: IndexName | None = None, + **kwargs, + ) -> UpdateContributorInsightsOutput: + raise NotImplementedError + + @handler("UpdateGlobalTable") + def update_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replica_updates: ReplicaUpdateList, + **kwargs, + ) -> UpdateGlobalTableOutput: + raise NotImplementedError + + @handler("UpdateGlobalTableSettings") + def update_global_table_settings( + self, + context: RequestContext, + global_table_name: TableName, + global_table_billing_mode: BillingMode | None = None, + global_table_provisioned_write_capacity_units: PositiveLongObject | None = None, + global_table_provisioned_write_capacity_auto_scaling_settings_update: AutoScalingSettingsUpdate + | None = None, + global_table_global_secondary_index_settings_update: GlobalTableGlobalSecondaryIndexSettingsUpdateList + | None = None, + replica_settings_update: ReplicaSettingsUpdateList | None = None, + **kwargs, + ) -> UpdateGlobalTableSettingsOutput: + raise NotImplementedError + + @handler("UpdateItem") + def update_item( + self, + context: RequestContext, + table_name: TableArn, + key: Key, + attribute_updates: AttributeUpdates | None = None, + expected: ExpectedAttributeMap | None = None, + conditional_operator: ConditionalOperator | None = None, + return_values: ReturnValue | None = None, + return_consumed_capacity: ReturnConsumedCapacity | None = None, + return_item_collection_metrics: ReturnItemCollectionMetrics | None = None, + update_expression: UpdateExpression | None = None, + condition_expression: ConditionExpression | None = None, + expression_attribute_names: ExpressionAttributeNameMap | None = None, + expression_attribute_values: ExpressionAttributeValueMap | None = None, + return_values_on_condition_check_failure: ReturnValuesOnConditionCheckFailure | None = None, + **kwargs, + ) -> UpdateItemOutput: + raise NotImplementedError + + @handler("UpdateKinesisStreamingDestination") + def update_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableArn, + stream_arn: StreamArn, + update_kinesis_streaming_configuration: UpdateKinesisStreamingConfiguration | None = None, + **kwargs, + ) -> UpdateKinesisStreamingDestinationOutput: + raise NotImplementedError + + @handler("UpdateTable") + def update_table( + self, + context: RequestContext, + table_name: TableArn, + attribute_definitions: AttributeDefinitions | None = None, + billing_mode: BillingMode | None = None, + provisioned_throughput: ProvisionedThroughput | None = None, + global_secondary_index_updates: GlobalSecondaryIndexUpdateList | None = None, + stream_specification: StreamSpecification | None = None, + sse_specification: SSESpecification | None = None, + replica_updates: ReplicationGroupUpdateList | None = None, + table_class: TableClass | None = None, + deletion_protection_enabled: DeletionProtectionEnabled | None = None, + multi_region_consistency: MultiRegionConsistency | None = None, + on_demand_throughput: OnDemandThroughput | None = None, + warm_throughput: WarmThroughput | None = None, + **kwargs, + ) -> UpdateTableOutput: + raise NotImplementedError + + @handler("UpdateTableReplicaAutoScaling") + def update_table_replica_auto_scaling( + self, + context: RequestContext, + table_name: TableArn, + global_secondary_index_updates: GlobalSecondaryIndexAutoScalingUpdateList | None = None, + provisioned_write_capacity_auto_scaling_update: AutoScalingSettingsUpdate | None = None, + replica_updates: ReplicaAutoScalingUpdateList | None = None, + **kwargs, + ) -> UpdateTableReplicaAutoScalingOutput: + raise NotImplementedError + + @handler("UpdateTimeToLive") + def update_time_to_live( + self, + context: RequestContext, + table_name: TableArn, + time_to_live_specification: TimeToLiveSpecification, + **kwargs, + ) -> UpdateTimeToLiveOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/dynamodbstreams/__init__.py b/localstack-core/localstack/aws/api/dynamodbstreams/__init__.py new file mode 100644 index 0000000000000..a9ecabeff5864 --- /dev/null +++ b/localstack-core/localstack/aws/api/dynamodbstreams/__init__.py @@ -0,0 +1,270 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AttributeName = str +BooleanAttributeValue = bool +ErrorMessage = str +KeySchemaAttributeName = str +NullAttributeValue = bool +NumberAttributeValue = str +PositiveIntegerObject = int +SequenceNumber = str +ShardId = str +ShardIterator = str +StreamArn = str +String = str +StringAttributeValue = str +TableName = str + + +class KeyType(StrEnum): + HASH = "HASH" + RANGE = "RANGE" + + +class OperationType(StrEnum): + INSERT = "INSERT" + MODIFY = "MODIFY" + REMOVE = "REMOVE" + + +class ShardIteratorType(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + AT_SEQUENCE_NUMBER = "AT_SEQUENCE_NUMBER" + AFTER_SEQUENCE_NUMBER = "AFTER_SEQUENCE_NUMBER" + + +class StreamStatus(StrEnum): + ENABLING = "ENABLING" + ENABLED = "ENABLED" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + + +class StreamViewType(StrEnum): + NEW_IMAGE = "NEW_IMAGE" + OLD_IMAGE = "OLD_IMAGE" + NEW_AND_OLD_IMAGES = "NEW_AND_OLD_IMAGES" + KEYS_ONLY = "KEYS_ONLY" + + +class ExpiredIteratorException(ServiceException): + code: str = "ExpiredIteratorException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServerError(ServiceException): + code: str = "InternalServerError" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TrimmedDataAccessException(ServiceException): + code: str = "TrimmedDataAccessException" + sender_fault: bool = False + status_code: int = 400 + + +class AttributeValue(TypedDict, total=False): + S: Optional["StringAttributeValue"] + N: Optional["NumberAttributeValue"] + B: Optional["BinaryAttributeValue"] + SS: Optional["StringSetAttributeValue"] + NS: Optional["NumberSetAttributeValue"] + BS: Optional["BinarySetAttributeValue"] + M: Optional["MapAttributeValue"] + L: Optional["ListAttributeValue"] + NULL: Optional["NullAttributeValue"] + BOOL: Optional["BooleanAttributeValue"] + + +ListAttributeValue = List[AttributeValue] +MapAttributeValue = Dict[AttributeName, AttributeValue] +BinaryAttributeValue = bytes +BinarySetAttributeValue = List[BinaryAttributeValue] +NumberSetAttributeValue = List[NumberAttributeValue] +StringSetAttributeValue = List[StringAttributeValue] +AttributeMap = Dict[AttributeName, AttributeValue] +Date = datetime + + +class DescribeStreamInput(ServiceRequest): + StreamArn: StreamArn + Limit: Optional[PositiveIntegerObject] + ExclusiveStartShardId: Optional[ShardId] + + +class SequenceNumberRange(TypedDict, total=False): + StartingSequenceNumber: Optional[SequenceNumber] + EndingSequenceNumber: Optional[SequenceNumber] + + +class Shard(TypedDict, total=False): + ShardId: Optional[ShardId] + SequenceNumberRange: Optional[SequenceNumberRange] + ParentShardId: Optional[ShardId] + + +ShardDescriptionList = List[Shard] + + +class KeySchemaElement(TypedDict, total=False): + AttributeName: KeySchemaAttributeName + KeyType: KeyType + + +KeySchema = List[KeySchemaElement] + + +class StreamDescription(TypedDict, total=False): + StreamArn: Optional[StreamArn] + StreamLabel: Optional[String] + StreamStatus: Optional[StreamStatus] + StreamViewType: Optional[StreamViewType] + CreationRequestDateTime: Optional[Date] + TableName: Optional[TableName] + KeySchema: Optional[KeySchema] + Shards: Optional[ShardDescriptionList] + LastEvaluatedShardId: Optional[ShardId] + + +class DescribeStreamOutput(TypedDict, total=False): + StreamDescription: Optional[StreamDescription] + + +class GetRecordsInput(ServiceRequest): + ShardIterator: ShardIterator + Limit: Optional[PositiveIntegerObject] + + +class Identity(TypedDict, total=False): + PrincipalId: Optional[String] + Type: Optional[String] + + +PositiveLongObject = int + + +class StreamRecord(TypedDict, total=False): + ApproximateCreationDateTime: Optional[Date] + Keys: Optional[AttributeMap] + NewImage: Optional[AttributeMap] + OldImage: Optional[AttributeMap] + SequenceNumber: Optional[SequenceNumber] + SizeBytes: Optional[PositiveLongObject] + StreamViewType: Optional[StreamViewType] + + +class Record(TypedDict, total=False): + eventID: Optional[String] + eventName: Optional[OperationType] + eventVersion: Optional[String] + eventSource: Optional[String] + awsRegion: Optional[String] + dynamodb: Optional[StreamRecord] + userIdentity: Optional[Identity] + + +RecordList = List[Record] + + +class GetRecordsOutput(TypedDict, total=False): + Records: Optional[RecordList] + NextShardIterator: Optional[ShardIterator] + + +class GetShardIteratorInput(ServiceRequest): + StreamArn: StreamArn + ShardId: ShardId + ShardIteratorType: ShardIteratorType + SequenceNumber: Optional[SequenceNumber] + + +class GetShardIteratorOutput(TypedDict, total=False): + ShardIterator: Optional[ShardIterator] + + +class ListStreamsInput(ServiceRequest): + TableName: Optional[TableName] + Limit: Optional[PositiveIntegerObject] + ExclusiveStartStreamArn: Optional[StreamArn] + + +class Stream(TypedDict, total=False): + StreamArn: Optional[StreamArn] + TableName: Optional[TableName] + StreamLabel: Optional[String] + + +StreamList = List[Stream] + + +class ListStreamsOutput(TypedDict, total=False): + Streams: Optional[StreamList] + LastEvaluatedStreamArn: Optional[StreamArn] + + +class DynamodbstreamsApi: + service = "dynamodbstreams" + version = "2012-08-10" + + @handler("DescribeStream") + def describe_stream( + self, + context: RequestContext, + stream_arn: StreamArn, + limit: PositiveIntegerObject | None = None, + exclusive_start_shard_id: ShardId | None = None, + **kwargs, + ) -> DescribeStreamOutput: + raise NotImplementedError + + @handler("GetRecords") + def get_records( + self, + context: RequestContext, + shard_iterator: ShardIterator, + limit: PositiveIntegerObject | None = None, + **kwargs, + ) -> GetRecordsOutput: + raise NotImplementedError + + @handler("GetShardIterator") + def get_shard_iterator( + self, + context: RequestContext, + stream_arn: StreamArn, + shard_id: ShardId, + shard_iterator_type: ShardIteratorType, + sequence_number: SequenceNumber | None = None, + **kwargs, + ) -> GetShardIteratorOutput: + raise NotImplementedError + + @handler("ListStreams") + def list_streams( + self, + context: RequestContext, + table_name: TableName | None = None, + limit: PositiveIntegerObject | None = None, + exclusive_start_stream_arn: StreamArn | None = None, + **kwargs, + ) -> ListStreamsOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/ec2/__init__.py b/localstack-core/localstack/aws/api/ec2/__init__.py new file mode 100644 index 0000000000000..2c54e41e41615 --- /dev/null +++ b/localstack-core/localstack/aws/api/ec2/__init__.py @@ -0,0 +1,29034 @@ +from datetime import datetime +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import ( + RequestContext, + ServiceRequest, + handler, +) +from localstack.aws.api import ( + ServiceException as ServiceException, +) + +AccountID = str +AddressMaxResults = int +AllocationId = str +AllowedInstanceType = str +AssetId = str +AutoRecoveryFlag = bool +AvailabilityZoneId = str +AvailabilityZoneName = str +BareMetalFlag = bool +BaselineBandwidthInGbps = float +BaselineBandwidthInMbps = int +BaselineIops = int +BaselineThroughputInMBps = float +Boolean = bool +BoxedDouble = float +BoxedInteger = int +BundleId = str +BurstablePerformanceFlag = bool +CancelCapacityReservationFleetErrorCode = str +CancelCapacityReservationFleetErrorMessage = str +CapacityReservationFleetId = str +CapacityReservationId = str +CarrierGatewayId = str +CarrierGatewayMaxResults = int +CertificateArn = str +CertificateId = str +ClientSecretType = str +ClientVpnEndpointId = str +CloudWatchLogGroupArn = str +CoipPoolId = str +CoipPoolMaxResults = int +ComponentAccount = str +ComponentRegion = str +ConnectionNotificationId = str +ConversionTaskId = str +CoolOffPeriodRequestHours = int +CoolOffPeriodResponseHours = int +CopySnapshotRequestPSU = str +CoreCount = int +CoreNetworkArn = str +CpuManufacturerName = str +CurrentGenerationFlag = bool +CustomerGatewayId = str +DITMaxResults = int +DITOMaxResults = int +DeclarativePoliciesMaxResults = int +DeclarativePoliciesReportId = str +DedicatedHostFlag = bool +DedicatedHostId = str +DefaultEnaQueueCountPerInterface = int +DefaultNetworkCardIndex = int +DefaultingDhcpOptionsId = str +DescribeAddressTransfersMaxResults = int +DescribeByoipCidrsMaxResults = int +DescribeCapacityBlockExtensionOfferingsMaxResults = int +DescribeCapacityBlockOfferingsMaxResults = int +DescribeCapacityReservationBillingRequestsRequestMaxResults = int +DescribeCapacityReservationFleetsMaxResults = int +DescribeCapacityReservationsMaxResults = int +DescribeClassicLinkInstancesMaxResults = int +DescribeClientVpnAuthorizationRulesMaxResults = int +DescribeClientVpnConnectionsMaxResults = int +DescribeClientVpnEndpointMaxResults = int +DescribeClientVpnRoutesMaxResults = int +DescribeClientVpnTargetNetworksMaxResults = int +DescribeDhcpOptionsMaxResults = int +DescribeEgressOnlyInternetGatewaysMaxResults = int +DescribeElasticGpusMaxResults = int +DescribeExportImageTasksMaxResults = int +DescribeFastLaunchImagesRequestMaxResults = int +DescribeFastSnapshotRestoresMaxResults = int +DescribeFpgaImagesMaxResults = int +DescribeFutureCapacityMaxResults = int +DescribeHostReservationsMaxResults = int +DescribeIamInstanceProfileAssociationsMaxResults = int +DescribeInstanceCreditSpecificationsMaxResults = int +DescribeInstanceImageMetadataMaxResults = int +DescribeInstanceTopologyMaxResults = int +DescribeInternetGatewaysMaxResults = int +DescribeIpamByoasnMaxResults = int +DescribeLaunchTemplatesMaxResults = int +DescribeLockedSnapshotsMaxResults = int +DescribeMacHostsRequestMaxResults = int +DescribeMacModificationTasksMaxResults = int +DescribeMovingAddressesMaxResults = int +DescribeNatGatewaysMaxResults = int +DescribeNetworkAclsMaxResults = int +DescribeNetworkInterfacePermissionsMaxResults = int +DescribeNetworkInterfacesMaxResults = int +DescribePrincipalIdFormatMaxResults = int +DescribeReplaceRootVolumeTasksMaxResults = int +DescribeRouteTablesMaxResults = int +DescribeScheduledInstanceAvailabilityMaxResults = int +DescribeSecurityGroupRulesMaxResults = int +DescribeSecurityGroupVpcAssociationsMaxResults = int +DescribeSecurityGroupsMaxResults = int +DescribeSnapshotTierStatusMaxResults = int +DescribeSpotFleetInstancesMaxResults = int +DescribeSpotFleetRequestHistoryMaxResults = int +DescribeStaleSecurityGroupsMaxResults = int +DescribeStaleSecurityGroupsNextToken = str +DescribeStoreImageTasksRequestMaxResults = int +DescribeSubnetsMaxResults = int +DescribeTrunkInterfaceAssociationsMaxResults = int +DescribeVerifiedAccessEndpointsMaxResults = int +DescribeVerifiedAccessGroupMaxResults = int +DescribeVerifiedAccessInstanceLoggingConfigurationsMaxResults = int +DescribeVerifiedAccessInstancesMaxResults = int +DescribeVerifiedAccessTrustProvidersMaxResults = int +DescribeVpcBlockPublicAccessExclusionsMaxResults = int +DescribeVpcClassicLinkDnsSupportMaxResults = int +DescribeVpcClassicLinkDnsSupportNextToken = str +DescribeVpcPeeringConnectionsMaxResults = int +DescribeVpcsMaxResults = int +DhcpOptionsId = str +DisassociateSecurityGroupVpcSecurityGroupId = str +DiskCount = int +Double = float +DoubleWithConstraints = float +DrainSeconds = int +EfaSupportedFlag = bool +EgressOnlyInternetGatewayId = str +EipAllocationPublicIp = str +EkPubKeyValue = str +ElasticGpuId = str +ElasticInferenceAcceleratorCount = int +ElasticIpAssociationId = str +EnaSrdSupported = bool +EncryptionInTransitSupported = bool +ExcludedInstanceType = str +ExportImageTaskId = str +ExportTaskId = str +ExportVmTaskId = str +FleetId = str +Float = float +FlowLogResourceId = str +FpgaDeviceCount = int +FpgaDeviceManufacturerName = str +FpgaDeviceMemorySize = int +FpgaDeviceName = str +FpgaImageId = str +FreeTierEligibleFlag = bool +GVCDMaxResults = int +GetCapacityReservationUsageRequestMaxResults = int +GetGroupsForCapacityReservationRequestMaxResults = int +GetIpamPoolAllocationsMaxResults = int +GetManagedPrefixListAssociationsMaxResults = int +GetNetworkInsightsAccessScopeAnalysisFindingsMaxResults = int +GetSecurityGroupsForVpcRequestMaxResults = int +GetSubnetCidrReservationsMaxResults = int +GetVerifiedAccessEndpointTargetsMaxResults = int +GpuDeviceCount = int +GpuDeviceManufacturerName = str +GpuDeviceMemorySize = int +GpuDeviceName = str +HibernationFlag = bool +HostReservationId = str +Hour = int +IamInstanceProfileAssociationId = str +ImageId = str +ImageProvider = str +ImageProviderRequest = str +ImportImageTaskId = str +ImportManifestUrl = str +ImportSnapshotTaskId = str +ImportTaskId = str +InferenceDeviceCount = int +InferenceDeviceManufacturerName = str +InferenceDeviceMemorySize = int +InferenceDeviceName = str +InstanceConnectEndpointId = str +InstanceConnectEndpointMaxResults = int +InstanceEventId = str +InstanceEventWindowCronExpression = str +InstanceEventWindowId = str +InstanceId = str +InstanceIdForResolver = str +InstanceIdWithVolumeResolver = str +InstanceStorageFlag = bool +Integer = int +IntegerWithConstraints = int +InternetGatewayId = str +IpAddress = str +IpamAddressHistoryMaxResults = int +IpamExternalResourceVerificationTokenId = str +IpamId = str +IpamMaxResults = int +IpamNetmaskLength = int +IpamPoolAllocationId = str +IpamPoolCidrId = str +IpamPoolId = str +IpamResourceDiscoveryAssociationId = str +IpamResourceDiscoveryId = str +IpamScopeId = str +Ipv4PoolCoipId = str +Ipv4PoolEc2Id = str +Ipv6Address = str +Ipv6Flag = bool +Ipv6PoolEc2Id = str +Ipv6PoolMaxResults = int +KernelId = str +KeyPairId = str +KeyPairName = str +KeyPairNameWithResolver = str +KmsKeyArn = str +KmsKeyId = str +LaunchTemplateElasticInferenceAcceleratorCount = int +LaunchTemplateId = str +LaunchTemplateName = str +ListImagesInRecycleBinMaxResults = int +ListSnapshotsInRecycleBinMaxResults = int +LoadBalancerArn = str +LocalGatewayId = str +LocalGatewayMaxResults = int +LocalGatewayRouteTableVirtualInterfaceGroupAssociationId = str +LocalGatewayRouteTableVpcAssociationId = str +LocalGatewayRoutetableId = str +LocalGatewayVirtualInterfaceGroupId = str +LocalGatewayVirtualInterfaceId = str +Location = str +MacModificationTaskId = str +MaxIpv4AddrPerInterface = int +MaxIpv6AddrPerInterface = int +MaxNetworkInterfaces = int +MaxResults = int +MaxResultsParam = int +MaximumBandwidthInMbps = int +MaximumEfaInterfaces = int +MaximumEnaQueueCount = int +MaximumEnaQueueCountPerInterface = int +MaximumIops = int +MaximumNetworkCards = int +MaximumThroughputInMBps = float +MediaDeviceCount = int +MediaDeviceManufacturerName = str +MediaDeviceMemorySize = int +MediaDeviceName = str +NatGatewayId = str +NetmaskLength = int +NetworkAclAssociationId = str +NetworkAclId = str +NetworkCardIndex = int +NetworkInsightsAccessScopeAnalysisId = str +NetworkInsightsAccessScopeId = str +NetworkInsightsAnalysisId = str +NetworkInsightsMaxResults = int +NetworkInsightsPathId = str +NetworkInsightsResourceId = str +NetworkInterfaceAttachmentId = str +NetworkInterfaceId = str +NetworkInterfacePermissionId = str +NetworkPerformance = str +NeuronDeviceCoreCount = int +NeuronDeviceCoreVersion = int +NeuronDeviceCount = int +NeuronDeviceMemorySize = int +NeuronDeviceName = str +NextToken = str +NitroTpmSupportedVersionType = str +OfferingId = str +OutpostArn = str +OutpostLagId = str +OutpostLagMaxResults = int +PasswordData = str +PeakBandwidthInGbps = float +PlacementGroupArn = str +PlacementGroupId = str +PlacementGroupName = str +PoolMaxResults = int +Port = int +PrefixListMaxResults = int +PrefixListResourceId = str +Priority = int +PrivateIpAddressCount = int +ProcessorSustainedClockSpeed = float +ProtocolInt = int +PublicIpAddress = str +RamdiskId = str +RdsDbClusterArn = str +RdsDbInstanceArn = str +RdsDbProxyArn = str +ReplaceRootVolumeTaskId = str +ReportInstanceStatusRequestDescription = str +ReservationId = str +ReservedInstancesListingId = str +ReservedInstancesModificationId = str +ReservedInstancesOfferingId = str +ResourceArn = str +ResourceConfigurationArn = str +RestoreSnapshotTierRequestTemporaryRestoreDays = int +ResultRange = int +RetentionPeriodRequestDays = int +RetentionPeriodResponseDays = int +RoleId = str +RouteGatewayId = str +RouteServerEndpointId = str +RouteServerId = str +RouteServerMaxResults = int +RouteServerPeerId = str +RouteTableAssociationId = str +RouteTableId = str +RunInstancesUserData = str +S3StorageUploadPolicy = str +S3StorageUploadPolicySignature = str +ScheduledInstanceId = str +SecurityGroupId = str +SecurityGroupName = str +SecurityGroupRuleId = str +SensitiveMacCredentials = str +SensitiveUrl = str +SensitiveUserData = str +ServiceLinkMaxResults = int +ServiceLinkVirtualInterfaceId = str +ServiceNetworkArn = str +SnapshotCompletionDurationMinutesRequest = int +SnapshotCompletionDurationMinutesResponse = int +SnapshotId = str +SpotFleetRequestId = str +SpotInstanceRequestId = str +SpotPlacementScoresMaxResults = int +SpotPlacementScoresTargetCapacity = int +String = str +StringType = str +SubnetCidrAssociationId = str +SubnetCidrReservationId = str +SubnetId = str +TaggableResourceId = str +ThreadsPerCore = int +TotalMediaMemory = int +TotalNeuronMemory = int +TrafficMirrorFilterId = str +TrafficMirrorFilterRuleIdWithResolver = str +TrafficMirrorSessionId = str +TrafficMirrorTargetId = str +TrafficMirroringMaxResults = int +TransitAssociationGatewayId = str +TransitGatewayAttachmentId = str +TransitGatewayConnectPeerId = str +TransitGatewayId = str +TransitGatewayMaxResults = int +TransitGatewayMulticastDomainId = str +TransitGatewayPolicyTableId = str +TransitGatewayRouteTableAnnouncementId = str +TransitGatewayRouteTableId = str +TrunkInterfaceAssociationId = str +VCpuCount = int +VerifiedAccessEndpointId = str +VerifiedAccessEndpointPortNumber = int +VerifiedAccessGroupId = str +VerifiedAccessInstanceId = str +VerifiedAccessTrustProviderId = str +VersionDescription = str +VolumeId = str +VolumeIdWithResolver = str +VpcBlockPublicAccessExclusionId = str +VpcCidrAssociationId = str +VpcEncryptionControlId = str +VpcEndpointId = str +VpcEndpointServiceId = str +VpcFlowLogId = str +VpcId = str +VpcPeeringConnectionId = str +VpcPeeringConnectionIdWithResolver = str +VpnConnectionDeviceSampleConfiguration = str +VpnConnectionDeviceTypeId = str +VpnConnectionId = str +VpnGatewayId = str +customerGatewayConfiguration = str +maxResults = int +preSharedKey = str +totalFpgaMemory = int +totalGpuMemory = int +totalInferenceMemory = int + + +class AcceleratorManufacturer(StrEnum): + amazon_web_services = "amazon-web-services" + amd = "amd" + nvidia = "nvidia" + xilinx = "xilinx" + habana = "habana" + + +class AcceleratorName(StrEnum): + a100 = "a100" + inferentia = "inferentia" + k520 = "k520" + k80 = "k80" + m60 = "m60" + radeon_pro_v520 = "radeon-pro-v520" + t4 = "t4" + vu9p = "vu9p" + v100 = "v100" + a10g = "a10g" + h100 = "h100" + t4g = "t4g" + + +class AcceleratorType(StrEnum): + gpu = "gpu" + fpga = "fpga" + inference = "inference" + + +class AccountAttributeName(StrEnum): + supported_platforms = "supported-platforms" + default_vpc = "default-vpc" + + +class ActivityStatus(StrEnum): + error = "error" + pending_fulfillment = "pending_fulfillment" + pending_termination = "pending_termination" + fulfilled = "fulfilled" + + +class AddressAttributeName(StrEnum): + domain_name = "domain-name" + + +class AddressFamily(StrEnum): + ipv4 = "ipv4" + ipv6 = "ipv6" + + +class AddressTransferStatus(StrEnum): + pending = "pending" + disabled = "disabled" + accepted = "accepted" + + +class Affinity(StrEnum): + default = "default" + host = "host" + + +class AllocationState(StrEnum): + available = "available" + under_assessment = "under-assessment" + permanent_failure = "permanent-failure" + released = "released" + released_permanent_failure = "released-permanent-failure" + pending = "pending" + + +class AllocationStrategy(StrEnum): + lowestPrice = "lowestPrice" + diversified = "diversified" + capacityOptimized = "capacityOptimized" + capacityOptimizedPrioritized = "capacityOptimizedPrioritized" + priceCapacityOptimized = "priceCapacityOptimized" + + +class AllocationType(StrEnum): + used = "used" + future = "future" + + +class AllowedImagesSettingsDisabledState(StrEnum): + disabled = "disabled" + + +class AllowedImagesSettingsEnabledState(StrEnum): + enabled = "enabled" + audit_mode = "audit-mode" + + +class AllowsMultipleInstanceTypes(StrEnum): + on = "on" + off = "off" + + +class AmdSevSnpSpecification(StrEnum): + enabled = "enabled" + disabled = "disabled" + + +class AnalysisStatus(StrEnum): + running = "running" + succeeded = "succeeded" + failed = "failed" + + +class ApplianceModeSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class ArchitectureType(StrEnum): + i386 = "i386" + x86_64 = "x86_64" + arm64 = "arm64" + x86_64_mac = "x86_64_mac" + arm64_mac = "arm64_mac" + + +class ArchitectureValues(StrEnum): + i386 = "i386" + x86_64 = "x86_64" + arm64 = "arm64" + x86_64_mac = "x86_64_mac" + arm64_mac = "arm64_mac" + + +class AsnAssociationState(StrEnum): + disassociated = "disassociated" + failed_disassociation = "failed-disassociation" + failed_association = "failed-association" + pending_disassociation = "pending-disassociation" + pending_association = "pending-association" + associated = "associated" + + +class AsnState(StrEnum): + deprovisioned = "deprovisioned" + failed_deprovision = "failed-deprovision" + failed_provision = "failed-provision" + pending_deprovision = "pending-deprovision" + pending_provision = "pending-provision" + provisioned = "provisioned" + + +class AssociatedNetworkType(StrEnum): + vpc = "vpc" + + +class AssociationStatusCode(StrEnum): + associating = "associating" + associated = "associated" + association_failed = "association-failed" + disassociating = "disassociating" + disassociated = "disassociated" + + +class AttachmentStatus(StrEnum): + attaching = "attaching" + attached = "attached" + detaching = "detaching" + detached = "detached" + + +class AutoAcceptSharedAssociationsValue(StrEnum): + enable = "enable" + disable = "disable" + + +class AutoAcceptSharedAttachmentsValue(StrEnum): + enable = "enable" + disable = "disable" + + +class AutoPlacement(StrEnum): + on = "on" + off = "off" + + +class AvailabilityZoneOptInStatus(StrEnum): + opt_in_not_required = "opt-in-not-required" + opted_in = "opted-in" + not_opted_in = "not-opted-in" + + +class AvailabilityZoneState(StrEnum): + available = "available" + information = "information" + impaired = "impaired" + unavailable = "unavailable" + constrained = "constrained" + + +class BandwidthWeightingType(StrEnum): + default = "default" + vpc_1 = "vpc-1" + ebs_1 = "ebs-1" + + +class BareMetal(StrEnum): + included = "included" + required = "required" + excluded = "excluded" + + +class BatchState(StrEnum): + submitted = "submitted" + active = "active" + cancelled = "cancelled" + failed = "failed" + cancelled_running = "cancelled_running" + cancelled_terminating = "cancelled_terminating" + modifying = "modifying" + + +class BgpStatus(StrEnum): + up = "up" + down = "down" + + +class BlockPublicAccessMode(StrEnum): + off = "off" + block_bidirectional = "block-bidirectional" + block_ingress = "block-ingress" + + +class BootModeType(StrEnum): + legacy_bios = "legacy-bios" + uefi = "uefi" + + +class BootModeValues(StrEnum): + legacy_bios = "legacy-bios" + uefi = "uefi" + uefi_preferred = "uefi-preferred" + + +class BundleTaskState(StrEnum): + pending = "pending" + waiting_for_shutdown = "waiting-for-shutdown" + bundling = "bundling" + storing = "storing" + cancelling = "cancelling" + complete = "complete" + failed = "failed" + + +class BurstablePerformance(StrEnum): + included = "included" + required = "required" + excluded = "excluded" + + +class ByoipCidrState(StrEnum): + advertised = "advertised" + deprovisioned = "deprovisioned" + failed_deprovision = "failed-deprovision" + failed_provision = "failed-provision" + pending_deprovision = "pending-deprovision" + pending_provision = "pending-provision" + provisioned = "provisioned" + provisioned_not_publicly_advertisable = "provisioned-not-publicly-advertisable" + + +class CallerRole(StrEnum): + odcr_owner = "odcr-owner" + unused_reservation_billing_owner = "unused-reservation-billing-owner" + + +class CancelBatchErrorCode(StrEnum): + fleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + fleetRequestIdMalformed = "fleetRequestIdMalformed" + fleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + unexpectedError = "unexpectedError" + + +class CancelSpotInstanceRequestState(StrEnum): + active = "active" + open = "open" + closed = "closed" + cancelled = "cancelled" + completed = "completed" + + +class CapacityBlockExtensionStatus(StrEnum): + payment_pending = "payment-pending" + payment_failed = "payment-failed" + payment_succeeded = "payment-succeeded" + + +class CapacityReservationBillingRequestStatus(StrEnum): + pending = "pending" + accepted = "accepted" + rejected = "rejected" + cancelled = "cancelled" + revoked = "revoked" + expired = "expired" + + +class CapacityReservationDeliveryPreference(StrEnum): + fixed = "fixed" + incremental = "incremental" + + +class CapacityReservationFleetState(StrEnum): + submitted = "submitted" + modifying = "modifying" + active = "active" + partially_fulfilled = "partially_fulfilled" + expiring = "expiring" + expired = "expired" + cancelling = "cancelling" + cancelled = "cancelled" + failed = "failed" + + +class CapacityReservationInstancePlatform(StrEnum): + Linux_UNIX = "Linux/UNIX" + Red_Hat_Enterprise_Linux = "Red Hat Enterprise Linux" + SUSE_Linux = "SUSE Linux" + Windows = "Windows" + Windows_with_SQL_Server = "Windows with SQL Server" + Windows_with_SQL_Server_Enterprise = "Windows with SQL Server Enterprise" + Windows_with_SQL_Server_Standard = "Windows with SQL Server Standard" + Windows_with_SQL_Server_Web = "Windows with SQL Server Web" + Linux_with_SQL_Server_Standard = "Linux with SQL Server Standard" + Linux_with_SQL_Server_Web = "Linux with SQL Server Web" + Linux_with_SQL_Server_Enterprise = "Linux with SQL Server Enterprise" + RHEL_with_SQL_Server_Standard = "RHEL with SQL Server Standard" + RHEL_with_SQL_Server_Enterprise = "RHEL with SQL Server Enterprise" + RHEL_with_SQL_Server_Web = "RHEL with SQL Server Web" + RHEL_with_HA = "RHEL with HA" + RHEL_with_HA_and_SQL_Server_Standard = "RHEL with HA and SQL Server Standard" + RHEL_with_HA_and_SQL_Server_Enterprise = "RHEL with HA and SQL Server Enterprise" + Ubuntu_Pro = "Ubuntu Pro" + + +class CapacityReservationPreference(StrEnum): + capacity_reservations_only = "capacity-reservations-only" + open = "open" + none = "none" + + +class CapacityReservationState(StrEnum): + active = "active" + expired = "expired" + cancelled = "cancelled" + pending = "pending" + failed = "failed" + scheduled = "scheduled" + payment_pending = "payment-pending" + payment_failed = "payment-failed" + assessing = "assessing" + delayed = "delayed" + unsupported = "unsupported" + + +class CapacityReservationTenancy(StrEnum): + default = "default" + dedicated = "dedicated" + + +class CapacityReservationType(StrEnum): + default = "default" + capacity_block = "capacity-block" + + +class CarrierGatewayState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class ClientCertificateRevocationListStatusCode(StrEnum): + pending = "pending" + active = "active" + + +class ClientVpnAuthenticationType(StrEnum): + certificate_authentication = "certificate-authentication" + directory_service_authentication = "directory-service-authentication" + federated_authentication = "federated-authentication" + + +class ClientVpnAuthorizationRuleStatusCode(StrEnum): + authorizing = "authorizing" + active = "active" + failed = "failed" + revoking = "revoking" + + +class ClientVpnConnectionStatusCode(StrEnum): + active = "active" + failed_to_terminate = "failed-to-terminate" + terminating = "terminating" + terminated = "terminated" + + +class ClientVpnEndpointAttributeStatusCode(StrEnum): + applying = "applying" + applied = "applied" + + +class ClientVpnEndpointStatusCode(StrEnum): + pending_associate = "pending-associate" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class ClientVpnRouteStatusCode(StrEnum): + creating = "creating" + active = "active" + failed = "failed" + deleting = "deleting" + + +class ConnectionNotificationState(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ConnectionNotificationType(StrEnum): + Topic = "Topic" + + +class ConnectivityType(StrEnum): + private = "private" + public = "public" + + +class ContainerFormat(StrEnum): + ova = "ova" + + +class ConversionTaskState(StrEnum): + active = "active" + cancelling = "cancelling" + cancelled = "cancelled" + completed = "completed" + + +class CopyTagsFromSource(StrEnum): + volume = "volume" + + +class CpuManufacturer(StrEnum): + intel = "intel" + amd = "amd" + amazon_web_services = "amazon-web-services" + apple = "apple" + + +class CurrencyCodeValues(StrEnum): + USD = "USD" + + +class DatafeedSubscriptionState(StrEnum): + Active = "Active" + Inactive = "Inactive" + + +class DefaultInstanceMetadataEndpointState(StrEnum): + disabled = "disabled" + enabled = "enabled" + no_preference = "no-preference" + + +class DefaultInstanceMetadataTagsState(StrEnum): + disabled = "disabled" + enabled = "enabled" + no_preference = "no-preference" + + +class DefaultRouteTableAssociationValue(StrEnum): + enable = "enable" + disable = "disable" + + +class DefaultRouteTablePropagationValue(StrEnum): + enable = "enable" + disable = "disable" + + +class DefaultTargetCapacityType(StrEnum): + spot = "spot" + on_demand = "on-demand" + capacity_block = "capacity-block" + + +class DeleteFleetErrorCode(StrEnum): + fleetIdDoesNotExist = "fleetIdDoesNotExist" + fleetIdMalformed = "fleetIdMalformed" + fleetNotInDeletableState = "fleetNotInDeletableState" + unexpectedError = "unexpectedError" + + +class DeleteQueuedReservedInstancesErrorCode(StrEnum): + reserved_instances_id_invalid = "reserved-instances-id-invalid" + reserved_instances_not_in_queued_state = "reserved-instances-not-in-queued-state" + unexpected_error = "unexpected-error" + + +class DestinationFileFormat(StrEnum): + plain_text = "plain-text" + parquet = "parquet" + + +class DeviceTrustProviderType(StrEnum): + jamf = "jamf" + crowdstrike = "crowdstrike" + jumpcloud = "jumpcloud" + + +class DeviceType(StrEnum): + ebs = "ebs" + instance_store = "instance-store" + + +class DiskImageFormat(StrEnum): + VMDK = "VMDK" + RAW = "RAW" + VHD = "VHD" + + +class DiskType(StrEnum): + hdd = "hdd" + ssd = "ssd" + + +class DnsNameState(StrEnum): + pendingVerification = "pendingVerification" + verified = "verified" + failed = "failed" + + +class DnsRecordIpType(StrEnum): + ipv4 = "ipv4" + dualstack = "dualstack" + ipv6 = "ipv6" + service_defined = "service-defined" + + +class DnsSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class DomainType(StrEnum): + vpc = "vpc" + standard = "standard" + + +class DynamicRoutingValue(StrEnum): + enable = "enable" + disable = "disable" + + +class EbsEncryptionSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class EbsNvmeSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + required = "required" + + +class EbsOptimizedSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + default = "default" + + +class Ec2InstanceConnectEndpointState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + + +class EkPubKeyFormat(StrEnum): + der = "der" + tpmt = "tpmt" + + +class EkPubKeyType(StrEnum): + rsa_2048 = "rsa-2048" + ecc_sec_p384 = "ecc-sec-p384" + + +class ElasticGpuState(StrEnum): + ATTACHED = "ATTACHED" + + +class ElasticGpuStatus(StrEnum): + OK = "OK" + IMPAIRED = "IMPAIRED" + + +class EnaSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + required = "required" + + +class EndDateType(StrEnum): + unlimited = "unlimited" + limited = "limited" + + +class EphemeralNvmeSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + required = "required" + + +class EventCode(StrEnum): + instance_reboot = "instance-reboot" + system_reboot = "system-reboot" + system_maintenance = "system-maintenance" + instance_retirement = "instance-retirement" + instance_stop = "instance-stop" + + +class EventType(StrEnum): + instanceChange = "instanceChange" + fleetRequestChange = "fleetRequestChange" + error = "error" + information = "information" + + +class ExcessCapacityTerminationPolicy(StrEnum): + noTermination = "noTermination" + default = "default" + + +class ExportEnvironment(StrEnum): + citrix = "citrix" + vmware = "vmware" + microsoft = "microsoft" + + +class ExportTaskState(StrEnum): + active = "active" + cancelling = "cancelling" + cancelled = "cancelled" + completed = "completed" + + +class FastLaunchResourceType(StrEnum): + snapshot = "snapshot" + + +class FastLaunchStateCode(StrEnum): + enabling = "enabling" + enabling_failed = "enabling-failed" + enabled = "enabled" + enabled_failed = "enabled-failed" + disabling = "disabling" + disabling_failed = "disabling-failed" + + +class FastSnapshotRestoreStateCode(StrEnum): + enabling = "enabling" + optimizing = "optimizing" + enabled = "enabled" + disabling = "disabling" + disabled = "disabled" + + +class FindingsFound(StrEnum): + true = "true" + false = "false" + unknown = "unknown" + + +class FleetActivityStatus(StrEnum): + error = "error" + pending_fulfillment = "pending_fulfillment" + pending_termination = "pending_termination" + fulfilled = "fulfilled" + + +class FleetCapacityReservationTenancy(StrEnum): + default = "default" + + +class FleetCapacityReservationUsageStrategy(StrEnum): + use_capacity_reservations_first = "use-capacity-reservations-first" + + +class FleetEventType(StrEnum): + instance_change = "instance-change" + fleet_change = "fleet-change" + service_error = "service-error" + + +class FleetExcessCapacityTerminationPolicy(StrEnum): + no_termination = "no-termination" + termination = "termination" + + +class FleetInstanceMatchCriteria(StrEnum): + open = "open" + + +class FleetOnDemandAllocationStrategy(StrEnum): + lowest_price = "lowest-price" + prioritized = "prioritized" + + +class FleetReplacementStrategy(StrEnum): + launch = "launch" + launch_before_terminate = "launch-before-terminate" + + +class FleetStateCode(StrEnum): + submitted = "submitted" + active = "active" + deleted = "deleted" + failed = "failed" + deleted_running = "deleted_running" + deleted_terminating = "deleted_terminating" + modifying = "modifying" + + +class FleetType(StrEnum): + request = "request" + maintain = "maintain" + instant = "instant" + + +class FlexibleEnaQueuesSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class FlowLogsResourceType(StrEnum): + VPC = "VPC" + Subnet = "Subnet" + NetworkInterface = "NetworkInterface" + TransitGateway = "TransitGateway" + TransitGatewayAttachment = "TransitGatewayAttachment" + + +class FpgaImageAttributeName(StrEnum): + description = "description" + name = "name" + loadPermission = "loadPermission" + productCodes = "productCodes" + + +class FpgaImageStateCode(StrEnum): + pending = "pending" + failed = "failed" + available = "available" + unavailable = "unavailable" + + +class GatewayAssociationState(StrEnum): + associated = "associated" + not_associated = "not-associated" + associating = "associating" + disassociating = "disassociating" + + +class GatewayType(StrEnum): + ipsec_1 = "ipsec.1" + + +class HostMaintenance(StrEnum): + on = "on" + off = "off" + + +class HostRecovery(StrEnum): + on = "on" + off = "off" + + +class HostTenancy(StrEnum): + default = "default" + dedicated = "dedicated" + host = "host" + + +class HostnameType(StrEnum): + ip_name = "ip-name" + resource_name = "resource-name" + + +class HttpTokensState(StrEnum): + optional = "optional" + required = "required" + + +class HypervisorType(StrEnum): + ovm = "ovm" + xen = "xen" + + +class IamInstanceProfileAssociationState(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + + +class Igmpv2SupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class ImageAttributeName(StrEnum): + description = "description" + kernel = "kernel" + ramdisk = "ramdisk" + launchPermission = "launchPermission" + productCodes = "productCodes" + blockDeviceMapping = "blockDeviceMapping" + sriovNetSupport = "sriovNetSupport" + bootMode = "bootMode" + tpmSupport = "tpmSupport" + uefiData = "uefiData" + lastLaunchedTime = "lastLaunchedTime" + imdsSupport = "imdsSupport" + deregistrationProtection = "deregistrationProtection" + + +class ImageBlockPublicAccessDisabledState(StrEnum): + unblocked = "unblocked" + + +class ImageBlockPublicAccessEnabledState(StrEnum): + block_new_sharing = "block-new-sharing" + + +class ImageState(StrEnum): + pending = "pending" + available = "available" + invalid = "invalid" + deregistered = "deregistered" + transient = "transient" + failed = "failed" + error = "error" + disabled = "disabled" + + +class ImageTypeValues(StrEnum): + machine = "machine" + kernel = "kernel" + ramdisk = "ramdisk" + + +class ImdsSupportValues(StrEnum): + v2_0 = "v2.0" + + +class InstanceAttributeName(StrEnum): + instanceType = "instanceType" + kernel = "kernel" + ramdisk = "ramdisk" + userData = "userData" + disableApiTermination = "disableApiTermination" + instanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + rootDeviceName = "rootDeviceName" + blockDeviceMapping = "blockDeviceMapping" + productCodes = "productCodes" + sourceDestCheck = "sourceDestCheck" + groupSet = "groupSet" + ebsOptimized = "ebsOptimized" + sriovNetSupport = "sriovNetSupport" + enaSupport = "enaSupport" + enclaveOptions = "enclaveOptions" + disableApiStop = "disableApiStop" + + +class InstanceAutoRecoveryState(StrEnum): + disabled = "disabled" + default = "default" + + +class InstanceBandwidthWeighting(StrEnum): + default = "default" + vpc_1 = "vpc-1" + ebs_1 = "ebs-1" + + +class InstanceBootModeValues(StrEnum): + legacy_bios = "legacy-bios" + uefi = "uefi" + + +class InstanceEventWindowState(StrEnum): + creating = "creating" + deleting = "deleting" + active = "active" + deleted = "deleted" + + +class InstanceGeneration(StrEnum): + current = "current" + previous = "previous" + + +class InstanceHealthStatus(StrEnum): + healthy = "healthy" + unhealthy = "unhealthy" + + +class InstanceInterruptionBehavior(StrEnum): + hibernate = "hibernate" + stop = "stop" + terminate = "terminate" + + +class InstanceLifecycle(StrEnum): + spot = "spot" + on_demand = "on-demand" + + +class InstanceLifecycleType(StrEnum): + spot = "spot" + scheduled = "scheduled" + capacity_block = "capacity-block" + + +class InstanceMatchCriteria(StrEnum): + open = "open" + targeted = "targeted" + + +class InstanceMetadataEndpointState(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class InstanceMetadataOptionsState(StrEnum): + pending = "pending" + applied = "applied" + + +class InstanceMetadataProtocolState(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class InstanceMetadataTagsState(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class InstanceRebootMigrationState(StrEnum): + disabled = "disabled" + default = "default" + + +class InstanceStateName(StrEnum): + pending = "pending" + running = "running" + shutting_down = "shutting-down" + terminated = "terminated" + stopping = "stopping" + stopped = "stopped" + + +class InstanceStorageEncryptionSupport(StrEnum): + unsupported = "unsupported" + required = "required" + + +class InstanceType(StrEnum): + a1_medium = "a1.medium" + a1_large = "a1.large" + a1_xlarge = "a1.xlarge" + a1_2xlarge = "a1.2xlarge" + a1_4xlarge = "a1.4xlarge" + a1_metal = "a1.metal" + c1_medium = "c1.medium" + c1_xlarge = "c1.xlarge" + c3_large = "c3.large" + c3_xlarge = "c3.xlarge" + c3_2xlarge = "c3.2xlarge" + c3_4xlarge = "c3.4xlarge" + c3_8xlarge = "c3.8xlarge" + c4_large = "c4.large" + c4_xlarge = "c4.xlarge" + c4_2xlarge = "c4.2xlarge" + c4_4xlarge = "c4.4xlarge" + c4_8xlarge = "c4.8xlarge" + c5_large = "c5.large" + c5_xlarge = "c5.xlarge" + c5_2xlarge = "c5.2xlarge" + c5_4xlarge = "c5.4xlarge" + c5_9xlarge = "c5.9xlarge" + c5_12xlarge = "c5.12xlarge" + c5_18xlarge = "c5.18xlarge" + c5_24xlarge = "c5.24xlarge" + c5_metal = "c5.metal" + c5a_large = "c5a.large" + c5a_xlarge = "c5a.xlarge" + c5a_2xlarge = "c5a.2xlarge" + c5a_4xlarge = "c5a.4xlarge" + c5a_8xlarge = "c5a.8xlarge" + c5a_12xlarge = "c5a.12xlarge" + c5a_16xlarge = "c5a.16xlarge" + c5a_24xlarge = "c5a.24xlarge" + c5ad_large = "c5ad.large" + c5ad_xlarge = "c5ad.xlarge" + c5ad_2xlarge = "c5ad.2xlarge" + c5ad_4xlarge = "c5ad.4xlarge" + c5ad_8xlarge = "c5ad.8xlarge" + c5ad_12xlarge = "c5ad.12xlarge" + c5ad_16xlarge = "c5ad.16xlarge" + c5ad_24xlarge = "c5ad.24xlarge" + c5d_large = "c5d.large" + c5d_xlarge = "c5d.xlarge" + c5d_2xlarge = "c5d.2xlarge" + c5d_4xlarge = "c5d.4xlarge" + c5d_9xlarge = "c5d.9xlarge" + c5d_12xlarge = "c5d.12xlarge" + c5d_18xlarge = "c5d.18xlarge" + c5d_24xlarge = "c5d.24xlarge" + c5d_metal = "c5d.metal" + c5n_large = "c5n.large" + c5n_xlarge = "c5n.xlarge" + c5n_2xlarge = "c5n.2xlarge" + c5n_4xlarge = "c5n.4xlarge" + c5n_9xlarge = "c5n.9xlarge" + c5n_18xlarge = "c5n.18xlarge" + c5n_metal = "c5n.metal" + c6g_medium = "c6g.medium" + c6g_large = "c6g.large" + c6g_xlarge = "c6g.xlarge" + c6g_2xlarge = "c6g.2xlarge" + c6g_4xlarge = "c6g.4xlarge" + c6g_8xlarge = "c6g.8xlarge" + c6g_12xlarge = "c6g.12xlarge" + c6g_16xlarge = "c6g.16xlarge" + c6g_metal = "c6g.metal" + c6gd_medium = "c6gd.medium" + c6gd_large = "c6gd.large" + c6gd_xlarge = "c6gd.xlarge" + c6gd_2xlarge = "c6gd.2xlarge" + c6gd_4xlarge = "c6gd.4xlarge" + c6gd_8xlarge = "c6gd.8xlarge" + c6gd_12xlarge = "c6gd.12xlarge" + c6gd_16xlarge = "c6gd.16xlarge" + c6gd_metal = "c6gd.metal" + c6gn_medium = "c6gn.medium" + c6gn_large = "c6gn.large" + c6gn_xlarge = "c6gn.xlarge" + c6gn_2xlarge = "c6gn.2xlarge" + c6gn_4xlarge = "c6gn.4xlarge" + c6gn_8xlarge = "c6gn.8xlarge" + c6gn_12xlarge = "c6gn.12xlarge" + c6gn_16xlarge = "c6gn.16xlarge" + c6i_large = "c6i.large" + c6i_xlarge = "c6i.xlarge" + c6i_2xlarge = "c6i.2xlarge" + c6i_4xlarge = "c6i.4xlarge" + c6i_8xlarge = "c6i.8xlarge" + c6i_12xlarge = "c6i.12xlarge" + c6i_16xlarge = "c6i.16xlarge" + c6i_24xlarge = "c6i.24xlarge" + c6i_32xlarge = "c6i.32xlarge" + c6i_metal = "c6i.metal" + cc1_4xlarge = "cc1.4xlarge" + cc2_8xlarge = "cc2.8xlarge" + cg1_4xlarge = "cg1.4xlarge" + cr1_8xlarge = "cr1.8xlarge" + d2_xlarge = "d2.xlarge" + d2_2xlarge = "d2.2xlarge" + d2_4xlarge = "d2.4xlarge" + d2_8xlarge = "d2.8xlarge" + d3_xlarge = "d3.xlarge" + d3_2xlarge = "d3.2xlarge" + d3_4xlarge = "d3.4xlarge" + d3_8xlarge = "d3.8xlarge" + d3en_xlarge = "d3en.xlarge" + d3en_2xlarge = "d3en.2xlarge" + d3en_4xlarge = "d3en.4xlarge" + d3en_6xlarge = "d3en.6xlarge" + d3en_8xlarge = "d3en.8xlarge" + d3en_12xlarge = "d3en.12xlarge" + dl1_24xlarge = "dl1.24xlarge" + f1_2xlarge = "f1.2xlarge" + f1_4xlarge = "f1.4xlarge" + f1_16xlarge = "f1.16xlarge" + g2_2xlarge = "g2.2xlarge" + g2_8xlarge = "g2.8xlarge" + g3_4xlarge = "g3.4xlarge" + g3_8xlarge = "g3.8xlarge" + g3_16xlarge = "g3.16xlarge" + g3s_xlarge = "g3s.xlarge" + g4ad_xlarge = "g4ad.xlarge" + g4ad_2xlarge = "g4ad.2xlarge" + g4ad_4xlarge = "g4ad.4xlarge" + g4ad_8xlarge = "g4ad.8xlarge" + g4ad_16xlarge = "g4ad.16xlarge" + g4dn_xlarge = "g4dn.xlarge" + g4dn_2xlarge = "g4dn.2xlarge" + g4dn_4xlarge = "g4dn.4xlarge" + g4dn_8xlarge = "g4dn.8xlarge" + g4dn_12xlarge = "g4dn.12xlarge" + g4dn_16xlarge = "g4dn.16xlarge" + g4dn_metal = "g4dn.metal" + g5_xlarge = "g5.xlarge" + g5_2xlarge = "g5.2xlarge" + g5_4xlarge = "g5.4xlarge" + g5_8xlarge = "g5.8xlarge" + g5_12xlarge = "g5.12xlarge" + g5_16xlarge = "g5.16xlarge" + g5_24xlarge = "g5.24xlarge" + g5_48xlarge = "g5.48xlarge" + g5g_xlarge = "g5g.xlarge" + g5g_2xlarge = "g5g.2xlarge" + g5g_4xlarge = "g5g.4xlarge" + g5g_8xlarge = "g5g.8xlarge" + g5g_16xlarge = "g5g.16xlarge" + g5g_metal = "g5g.metal" + hi1_4xlarge = "hi1.4xlarge" + hpc6a_48xlarge = "hpc6a.48xlarge" + hs1_8xlarge = "hs1.8xlarge" + h1_2xlarge = "h1.2xlarge" + h1_4xlarge = "h1.4xlarge" + h1_8xlarge = "h1.8xlarge" + h1_16xlarge = "h1.16xlarge" + i2_xlarge = "i2.xlarge" + i2_2xlarge = "i2.2xlarge" + i2_4xlarge = "i2.4xlarge" + i2_8xlarge = "i2.8xlarge" + i3_large = "i3.large" + i3_xlarge = "i3.xlarge" + i3_2xlarge = "i3.2xlarge" + i3_4xlarge = "i3.4xlarge" + i3_8xlarge = "i3.8xlarge" + i3_16xlarge = "i3.16xlarge" + i3_metal = "i3.metal" + i3en_large = "i3en.large" + i3en_xlarge = "i3en.xlarge" + i3en_2xlarge = "i3en.2xlarge" + i3en_3xlarge = "i3en.3xlarge" + i3en_6xlarge = "i3en.6xlarge" + i3en_12xlarge = "i3en.12xlarge" + i3en_24xlarge = "i3en.24xlarge" + i3en_metal = "i3en.metal" + im4gn_large = "im4gn.large" + im4gn_xlarge = "im4gn.xlarge" + im4gn_2xlarge = "im4gn.2xlarge" + im4gn_4xlarge = "im4gn.4xlarge" + im4gn_8xlarge = "im4gn.8xlarge" + im4gn_16xlarge = "im4gn.16xlarge" + inf1_xlarge = "inf1.xlarge" + inf1_2xlarge = "inf1.2xlarge" + inf1_6xlarge = "inf1.6xlarge" + inf1_24xlarge = "inf1.24xlarge" + is4gen_medium = "is4gen.medium" + is4gen_large = "is4gen.large" + is4gen_xlarge = "is4gen.xlarge" + is4gen_2xlarge = "is4gen.2xlarge" + is4gen_4xlarge = "is4gen.4xlarge" + is4gen_8xlarge = "is4gen.8xlarge" + m1_small = "m1.small" + m1_medium = "m1.medium" + m1_large = "m1.large" + m1_xlarge = "m1.xlarge" + m2_xlarge = "m2.xlarge" + m2_2xlarge = "m2.2xlarge" + m2_4xlarge = "m2.4xlarge" + m3_medium = "m3.medium" + m3_large = "m3.large" + m3_xlarge = "m3.xlarge" + m3_2xlarge = "m3.2xlarge" + m4_large = "m4.large" + m4_xlarge = "m4.xlarge" + m4_2xlarge = "m4.2xlarge" + m4_4xlarge = "m4.4xlarge" + m4_10xlarge = "m4.10xlarge" + m4_16xlarge = "m4.16xlarge" + m5_large = "m5.large" + m5_xlarge = "m5.xlarge" + m5_2xlarge = "m5.2xlarge" + m5_4xlarge = "m5.4xlarge" + m5_8xlarge = "m5.8xlarge" + m5_12xlarge = "m5.12xlarge" + m5_16xlarge = "m5.16xlarge" + m5_24xlarge = "m5.24xlarge" + m5_metal = "m5.metal" + m5a_large = "m5a.large" + m5a_xlarge = "m5a.xlarge" + m5a_2xlarge = "m5a.2xlarge" + m5a_4xlarge = "m5a.4xlarge" + m5a_8xlarge = "m5a.8xlarge" + m5a_12xlarge = "m5a.12xlarge" + m5a_16xlarge = "m5a.16xlarge" + m5a_24xlarge = "m5a.24xlarge" + m5ad_large = "m5ad.large" + m5ad_xlarge = "m5ad.xlarge" + m5ad_2xlarge = "m5ad.2xlarge" + m5ad_4xlarge = "m5ad.4xlarge" + m5ad_8xlarge = "m5ad.8xlarge" + m5ad_12xlarge = "m5ad.12xlarge" + m5ad_16xlarge = "m5ad.16xlarge" + m5ad_24xlarge = "m5ad.24xlarge" + m5d_large = "m5d.large" + m5d_xlarge = "m5d.xlarge" + m5d_2xlarge = "m5d.2xlarge" + m5d_4xlarge = "m5d.4xlarge" + m5d_8xlarge = "m5d.8xlarge" + m5d_12xlarge = "m5d.12xlarge" + m5d_16xlarge = "m5d.16xlarge" + m5d_24xlarge = "m5d.24xlarge" + m5d_metal = "m5d.metal" + m5dn_large = "m5dn.large" + m5dn_xlarge = "m5dn.xlarge" + m5dn_2xlarge = "m5dn.2xlarge" + m5dn_4xlarge = "m5dn.4xlarge" + m5dn_8xlarge = "m5dn.8xlarge" + m5dn_12xlarge = "m5dn.12xlarge" + m5dn_16xlarge = "m5dn.16xlarge" + m5dn_24xlarge = "m5dn.24xlarge" + m5dn_metal = "m5dn.metal" + m5n_large = "m5n.large" + m5n_xlarge = "m5n.xlarge" + m5n_2xlarge = "m5n.2xlarge" + m5n_4xlarge = "m5n.4xlarge" + m5n_8xlarge = "m5n.8xlarge" + m5n_12xlarge = "m5n.12xlarge" + m5n_16xlarge = "m5n.16xlarge" + m5n_24xlarge = "m5n.24xlarge" + m5n_metal = "m5n.metal" + m5zn_large = "m5zn.large" + m5zn_xlarge = "m5zn.xlarge" + m5zn_2xlarge = "m5zn.2xlarge" + m5zn_3xlarge = "m5zn.3xlarge" + m5zn_6xlarge = "m5zn.6xlarge" + m5zn_12xlarge = "m5zn.12xlarge" + m5zn_metal = "m5zn.metal" + m6a_large = "m6a.large" + m6a_xlarge = "m6a.xlarge" + m6a_2xlarge = "m6a.2xlarge" + m6a_4xlarge = "m6a.4xlarge" + m6a_8xlarge = "m6a.8xlarge" + m6a_12xlarge = "m6a.12xlarge" + m6a_16xlarge = "m6a.16xlarge" + m6a_24xlarge = "m6a.24xlarge" + m6a_32xlarge = "m6a.32xlarge" + m6a_48xlarge = "m6a.48xlarge" + m6g_metal = "m6g.metal" + m6g_medium = "m6g.medium" + m6g_large = "m6g.large" + m6g_xlarge = "m6g.xlarge" + m6g_2xlarge = "m6g.2xlarge" + m6g_4xlarge = "m6g.4xlarge" + m6g_8xlarge = "m6g.8xlarge" + m6g_12xlarge = "m6g.12xlarge" + m6g_16xlarge = "m6g.16xlarge" + m6gd_metal = "m6gd.metal" + m6gd_medium = "m6gd.medium" + m6gd_large = "m6gd.large" + m6gd_xlarge = "m6gd.xlarge" + m6gd_2xlarge = "m6gd.2xlarge" + m6gd_4xlarge = "m6gd.4xlarge" + m6gd_8xlarge = "m6gd.8xlarge" + m6gd_12xlarge = "m6gd.12xlarge" + m6gd_16xlarge = "m6gd.16xlarge" + m6i_large = "m6i.large" + m6i_xlarge = "m6i.xlarge" + m6i_2xlarge = "m6i.2xlarge" + m6i_4xlarge = "m6i.4xlarge" + m6i_8xlarge = "m6i.8xlarge" + m6i_12xlarge = "m6i.12xlarge" + m6i_16xlarge = "m6i.16xlarge" + m6i_24xlarge = "m6i.24xlarge" + m6i_32xlarge = "m6i.32xlarge" + m6i_metal = "m6i.metal" + mac1_metal = "mac1.metal" + p2_xlarge = "p2.xlarge" + p2_8xlarge = "p2.8xlarge" + p2_16xlarge = "p2.16xlarge" + p3_2xlarge = "p3.2xlarge" + p3_8xlarge = "p3.8xlarge" + p3_16xlarge = "p3.16xlarge" + p3dn_24xlarge = "p3dn.24xlarge" + p4d_24xlarge = "p4d.24xlarge" + r3_large = "r3.large" + r3_xlarge = "r3.xlarge" + r3_2xlarge = "r3.2xlarge" + r3_4xlarge = "r3.4xlarge" + r3_8xlarge = "r3.8xlarge" + r4_large = "r4.large" + r4_xlarge = "r4.xlarge" + r4_2xlarge = "r4.2xlarge" + r4_4xlarge = "r4.4xlarge" + r4_8xlarge = "r4.8xlarge" + r4_16xlarge = "r4.16xlarge" + r5_large = "r5.large" + r5_xlarge = "r5.xlarge" + r5_2xlarge = "r5.2xlarge" + r5_4xlarge = "r5.4xlarge" + r5_8xlarge = "r5.8xlarge" + r5_12xlarge = "r5.12xlarge" + r5_16xlarge = "r5.16xlarge" + r5_24xlarge = "r5.24xlarge" + r5_metal = "r5.metal" + r5a_large = "r5a.large" + r5a_xlarge = "r5a.xlarge" + r5a_2xlarge = "r5a.2xlarge" + r5a_4xlarge = "r5a.4xlarge" + r5a_8xlarge = "r5a.8xlarge" + r5a_12xlarge = "r5a.12xlarge" + r5a_16xlarge = "r5a.16xlarge" + r5a_24xlarge = "r5a.24xlarge" + r5ad_large = "r5ad.large" + r5ad_xlarge = "r5ad.xlarge" + r5ad_2xlarge = "r5ad.2xlarge" + r5ad_4xlarge = "r5ad.4xlarge" + r5ad_8xlarge = "r5ad.8xlarge" + r5ad_12xlarge = "r5ad.12xlarge" + r5ad_16xlarge = "r5ad.16xlarge" + r5ad_24xlarge = "r5ad.24xlarge" + r5b_large = "r5b.large" + r5b_xlarge = "r5b.xlarge" + r5b_2xlarge = "r5b.2xlarge" + r5b_4xlarge = "r5b.4xlarge" + r5b_8xlarge = "r5b.8xlarge" + r5b_12xlarge = "r5b.12xlarge" + r5b_16xlarge = "r5b.16xlarge" + r5b_24xlarge = "r5b.24xlarge" + r5b_metal = "r5b.metal" + r5d_large = "r5d.large" + r5d_xlarge = "r5d.xlarge" + r5d_2xlarge = "r5d.2xlarge" + r5d_4xlarge = "r5d.4xlarge" + r5d_8xlarge = "r5d.8xlarge" + r5d_12xlarge = "r5d.12xlarge" + r5d_16xlarge = "r5d.16xlarge" + r5d_24xlarge = "r5d.24xlarge" + r5d_metal = "r5d.metal" + r5dn_large = "r5dn.large" + r5dn_xlarge = "r5dn.xlarge" + r5dn_2xlarge = "r5dn.2xlarge" + r5dn_4xlarge = "r5dn.4xlarge" + r5dn_8xlarge = "r5dn.8xlarge" + r5dn_12xlarge = "r5dn.12xlarge" + r5dn_16xlarge = "r5dn.16xlarge" + r5dn_24xlarge = "r5dn.24xlarge" + r5dn_metal = "r5dn.metal" + r5n_large = "r5n.large" + r5n_xlarge = "r5n.xlarge" + r5n_2xlarge = "r5n.2xlarge" + r5n_4xlarge = "r5n.4xlarge" + r5n_8xlarge = "r5n.8xlarge" + r5n_12xlarge = "r5n.12xlarge" + r5n_16xlarge = "r5n.16xlarge" + r5n_24xlarge = "r5n.24xlarge" + r5n_metal = "r5n.metal" + r6g_medium = "r6g.medium" + r6g_large = "r6g.large" + r6g_xlarge = "r6g.xlarge" + r6g_2xlarge = "r6g.2xlarge" + r6g_4xlarge = "r6g.4xlarge" + r6g_8xlarge = "r6g.8xlarge" + r6g_12xlarge = "r6g.12xlarge" + r6g_16xlarge = "r6g.16xlarge" + r6g_metal = "r6g.metal" + r6gd_medium = "r6gd.medium" + r6gd_large = "r6gd.large" + r6gd_xlarge = "r6gd.xlarge" + r6gd_2xlarge = "r6gd.2xlarge" + r6gd_4xlarge = "r6gd.4xlarge" + r6gd_8xlarge = "r6gd.8xlarge" + r6gd_12xlarge = "r6gd.12xlarge" + r6gd_16xlarge = "r6gd.16xlarge" + r6gd_metal = "r6gd.metal" + r6i_large = "r6i.large" + r6i_xlarge = "r6i.xlarge" + r6i_2xlarge = "r6i.2xlarge" + r6i_4xlarge = "r6i.4xlarge" + r6i_8xlarge = "r6i.8xlarge" + r6i_12xlarge = "r6i.12xlarge" + r6i_16xlarge = "r6i.16xlarge" + r6i_24xlarge = "r6i.24xlarge" + r6i_32xlarge = "r6i.32xlarge" + r6i_metal = "r6i.metal" + t1_micro = "t1.micro" + t2_nano = "t2.nano" + t2_micro = "t2.micro" + t2_small = "t2.small" + t2_medium = "t2.medium" + t2_large = "t2.large" + t2_xlarge = "t2.xlarge" + t2_2xlarge = "t2.2xlarge" + t3_nano = "t3.nano" + t3_micro = "t3.micro" + t3_small = "t3.small" + t3_medium = "t3.medium" + t3_large = "t3.large" + t3_xlarge = "t3.xlarge" + t3_2xlarge = "t3.2xlarge" + t3a_nano = "t3a.nano" + t3a_micro = "t3a.micro" + t3a_small = "t3a.small" + t3a_medium = "t3a.medium" + t3a_large = "t3a.large" + t3a_xlarge = "t3a.xlarge" + t3a_2xlarge = "t3a.2xlarge" + t4g_nano = "t4g.nano" + t4g_micro = "t4g.micro" + t4g_small = "t4g.small" + t4g_medium = "t4g.medium" + t4g_large = "t4g.large" + t4g_xlarge = "t4g.xlarge" + t4g_2xlarge = "t4g.2xlarge" + u_6tb1_56xlarge = "u-6tb1.56xlarge" + u_6tb1_112xlarge = "u-6tb1.112xlarge" + u_9tb1_112xlarge = "u-9tb1.112xlarge" + u_12tb1_112xlarge = "u-12tb1.112xlarge" + u_6tb1_metal = "u-6tb1.metal" + u_9tb1_metal = "u-9tb1.metal" + u_12tb1_metal = "u-12tb1.metal" + u_18tb1_metal = "u-18tb1.metal" + u_24tb1_metal = "u-24tb1.metal" + vt1_3xlarge = "vt1.3xlarge" + vt1_6xlarge = "vt1.6xlarge" + vt1_24xlarge = "vt1.24xlarge" + x1_16xlarge = "x1.16xlarge" + x1_32xlarge = "x1.32xlarge" + x1e_xlarge = "x1e.xlarge" + x1e_2xlarge = "x1e.2xlarge" + x1e_4xlarge = "x1e.4xlarge" + x1e_8xlarge = "x1e.8xlarge" + x1e_16xlarge = "x1e.16xlarge" + x1e_32xlarge = "x1e.32xlarge" + x2iezn_2xlarge = "x2iezn.2xlarge" + x2iezn_4xlarge = "x2iezn.4xlarge" + x2iezn_6xlarge = "x2iezn.6xlarge" + x2iezn_8xlarge = "x2iezn.8xlarge" + x2iezn_12xlarge = "x2iezn.12xlarge" + x2iezn_metal = "x2iezn.metal" + x2gd_medium = "x2gd.medium" + x2gd_large = "x2gd.large" + x2gd_xlarge = "x2gd.xlarge" + x2gd_2xlarge = "x2gd.2xlarge" + x2gd_4xlarge = "x2gd.4xlarge" + x2gd_8xlarge = "x2gd.8xlarge" + x2gd_12xlarge = "x2gd.12xlarge" + x2gd_16xlarge = "x2gd.16xlarge" + x2gd_metal = "x2gd.metal" + z1d_large = "z1d.large" + z1d_xlarge = "z1d.xlarge" + z1d_2xlarge = "z1d.2xlarge" + z1d_3xlarge = "z1d.3xlarge" + z1d_6xlarge = "z1d.6xlarge" + z1d_12xlarge = "z1d.12xlarge" + z1d_metal = "z1d.metal" + x2idn_16xlarge = "x2idn.16xlarge" + x2idn_24xlarge = "x2idn.24xlarge" + x2idn_32xlarge = "x2idn.32xlarge" + x2iedn_xlarge = "x2iedn.xlarge" + x2iedn_2xlarge = "x2iedn.2xlarge" + x2iedn_4xlarge = "x2iedn.4xlarge" + x2iedn_8xlarge = "x2iedn.8xlarge" + x2iedn_16xlarge = "x2iedn.16xlarge" + x2iedn_24xlarge = "x2iedn.24xlarge" + x2iedn_32xlarge = "x2iedn.32xlarge" + c6a_large = "c6a.large" + c6a_xlarge = "c6a.xlarge" + c6a_2xlarge = "c6a.2xlarge" + c6a_4xlarge = "c6a.4xlarge" + c6a_8xlarge = "c6a.8xlarge" + c6a_12xlarge = "c6a.12xlarge" + c6a_16xlarge = "c6a.16xlarge" + c6a_24xlarge = "c6a.24xlarge" + c6a_32xlarge = "c6a.32xlarge" + c6a_48xlarge = "c6a.48xlarge" + c6a_metal = "c6a.metal" + m6a_metal = "m6a.metal" + i4i_large = "i4i.large" + i4i_xlarge = "i4i.xlarge" + i4i_2xlarge = "i4i.2xlarge" + i4i_4xlarge = "i4i.4xlarge" + i4i_8xlarge = "i4i.8xlarge" + i4i_16xlarge = "i4i.16xlarge" + i4i_32xlarge = "i4i.32xlarge" + i4i_metal = "i4i.metal" + x2idn_metal = "x2idn.metal" + x2iedn_metal = "x2iedn.metal" + c7g_medium = "c7g.medium" + c7g_large = "c7g.large" + c7g_xlarge = "c7g.xlarge" + c7g_2xlarge = "c7g.2xlarge" + c7g_4xlarge = "c7g.4xlarge" + c7g_8xlarge = "c7g.8xlarge" + c7g_12xlarge = "c7g.12xlarge" + c7g_16xlarge = "c7g.16xlarge" + mac2_metal = "mac2.metal" + c6id_large = "c6id.large" + c6id_xlarge = "c6id.xlarge" + c6id_2xlarge = "c6id.2xlarge" + c6id_4xlarge = "c6id.4xlarge" + c6id_8xlarge = "c6id.8xlarge" + c6id_12xlarge = "c6id.12xlarge" + c6id_16xlarge = "c6id.16xlarge" + c6id_24xlarge = "c6id.24xlarge" + c6id_32xlarge = "c6id.32xlarge" + c6id_metal = "c6id.metal" + m6id_large = "m6id.large" + m6id_xlarge = "m6id.xlarge" + m6id_2xlarge = "m6id.2xlarge" + m6id_4xlarge = "m6id.4xlarge" + m6id_8xlarge = "m6id.8xlarge" + m6id_12xlarge = "m6id.12xlarge" + m6id_16xlarge = "m6id.16xlarge" + m6id_24xlarge = "m6id.24xlarge" + m6id_32xlarge = "m6id.32xlarge" + m6id_metal = "m6id.metal" + r6id_large = "r6id.large" + r6id_xlarge = "r6id.xlarge" + r6id_2xlarge = "r6id.2xlarge" + r6id_4xlarge = "r6id.4xlarge" + r6id_8xlarge = "r6id.8xlarge" + r6id_12xlarge = "r6id.12xlarge" + r6id_16xlarge = "r6id.16xlarge" + r6id_24xlarge = "r6id.24xlarge" + r6id_32xlarge = "r6id.32xlarge" + r6id_metal = "r6id.metal" + r6a_large = "r6a.large" + r6a_xlarge = "r6a.xlarge" + r6a_2xlarge = "r6a.2xlarge" + r6a_4xlarge = "r6a.4xlarge" + r6a_8xlarge = "r6a.8xlarge" + r6a_12xlarge = "r6a.12xlarge" + r6a_16xlarge = "r6a.16xlarge" + r6a_24xlarge = "r6a.24xlarge" + r6a_32xlarge = "r6a.32xlarge" + r6a_48xlarge = "r6a.48xlarge" + r6a_metal = "r6a.metal" + p4de_24xlarge = "p4de.24xlarge" + u_3tb1_56xlarge = "u-3tb1.56xlarge" + u_18tb1_112xlarge = "u-18tb1.112xlarge" + u_24tb1_112xlarge = "u-24tb1.112xlarge" + trn1_2xlarge = "trn1.2xlarge" + trn1_32xlarge = "trn1.32xlarge" + hpc6id_32xlarge = "hpc6id.32xlarge" + c6in_large = "c6in.large" + c6in_xlarge = "c6in.xlarge" + c6in_2xlarge = "c6in.2xlarge" + c6in_4xlarge = "c6in.4xlarge" + c6in_8xlarge = "c6in.8xlarge" + c6in_12xlarge = "c6in.12xlarge" + c6in_16xlarge = "c6in.16xlarge" + c6in_24xlarge = "c6in.24xlarge" + c6in_32xlarge = "c6in.32xlarge" + m6in_large = "m6in.large" + m6in_xlarge = "m6in.xlarge" + m6in_2xlarge = "m6in.2xlarge" + m6in_4xlarge = "m6in.4xlarge" + m6in_8xlarge = "m6in.8xlarge" + m6in_12xlarge = "m6in.12xlarge" + m6in_16xlarge = "m6in.16xlarge" + m6in_24xlarge = "m6in.24xlarge" + m6in_32xlarge = "m6in.32xlarge" + m6idn_large = "m6idn.large" + m6idn_xlarge = "m6idn.xlarge" + m6idn_2xlarge = "m6idn.2xlarge" + m6idn_4xlarge = "m6idn.4xlarge" + m6idn_8xlarge = "m6idn.8xlarge" + m6idn_12xlarge = "m6idn.12xlarge" + m6idn_16xlarge = "m6idn.16xlarge" + m6idn_24xlarge = "m6idn.24xlarge" + m6idn_32xlarge = "m6idn.32xlarge" + r6in_large = "r6in.large" + r6in_xlarge = "r6in.xlarge" + r6in_2xlarge = "r6in.2xlarge" + r6in_4xlarge = "r6in.4xlarge" + r6in_8xlarge = "r6in.8xlarge" + r6in_12xlarge = "r6in.12xlarge" + r6in_16xlarge = "r6in.16xlarge" + r6in_24xlarge = "r6in.24xlarge" + r6in_32xlarge = "r6in.32xlarge" + r6idn_large = "r6idn.large" + r6idn_xlarge = "r6idn.xlarge" + r6idn_2xlarge = "r6idn.2xlarge" + r6idn_4xlarge = "r6idn.4xlarge" + r6idn_8xlarge = "r6idn.8xlarge" + r6idn_12xlarge = "r6idn.12xlarge" + r6idn_16xlarge = "r6idn.16xlarge" + r6idn_24xlarge = "r6idn.24xlarge" + r6idn_32xlarge = "r6idn.32xlarge" + c7g_metal = "c7g.metal" + m7g_medium = "m7g.medium" + m7g_large = "m7g.large" + m7g_xlarge = "m7g.xlarge" + m7g_2xlarge = "m7g.2xlarge" + m7g_4xlarge = "m7g.4xlarge" + m7g_8xlarge = "m7g.8xlarge" + m7g_12xlarge = "m7g.12xlarge" + m7g_16xlarge = "m7g.16xlarge" + m7g_metal = "m7g.metal" + r7g_medium = "r7g.medium" + r7g_large = "r7g.large" + r7g_xlarge = "r7g.xlarge" + r7g_2xlarge = "r7g.2xlarge" + r7g_4xlarge = "r7g.4xlarge" + r7g_8xlarge = "r7g.8xlarge" + r7g_12xlarge = "r7g.12xlarge" + r7g_16xlarge = "r7g.16xlarge" + r7g_metal = "r7g.metal" + c6in_metal = "c6in.metal" + m6in_metal = "m6in.metal" + m6idn_metal = "m6idn.metal" + r6in_metal = "r6in.metal" + r6idn_metal = "r6idn.metal" + inf2_xlarge = "inf2.xlarge" + inf2_8xlarge = "inf2.8xlarge" + inf2_24xlarge = "inf2.24xlarge" + inf2_48xlarge = "inf2.48xlarge" + trn1n_32xlarge = "trn1n.32xlarge" + i4g_large = "i4g.large" + i4g_xlarge = "i4g.xlarge" + i4g_2xlarge = "i4g.2xlarge" + i4g_4xlarge = "i4g.4xlarge" + i4g_8xlarge = "i4g.8xlarge" + i4g_16xlarge = "i4g.16xlarge" + hpc7g_4xlarge = "hpc7g.4xlarge" + hpc7g_8xlarge = "hpc7g.8xlarge" + hpc7g_16xlarge = "hpc7g.16xlarge" + c7gn_medium = "c7gn.medium" + c7gn_large = "c7gn.large" + c7gn_xlarge = "c7gn.xlarge" + c7gn_2xlarge = "c7gn.2xlarge" + c7gn_4xlarge = "c7gn.4xlarge" + c7gn_8xlarge = "c7gn.8xlarge" + c7gn_12xlarge = "c7gn.12xlarge" + c7gn_16xlarge = "c7gn.16xlarge" + p5_48xlarge = "p5.48xlarge" + m7i_large = "m7i.large" + m7i_xlarge = "m7i.xlarge" + m7i_2xlarge = "m7i.2xlarge" + m7i_4xlarge = "m7i.4xlarge" + m7i_8xlarge = "m7i.8xlarge" + m7i_12xlarge = "m7i.12xlarge" + m7i_16xlarge = "m7i.16xlarge" + m7i_24xlarge = "m7i.24xlarge" + m7i_48xlarge = "m7i.48xlarge" + m7i_flex_large = "m7i-flex.large" + m7i_flex_xlarge = "m7i-flex.xlarge" + m7i_flex_2xlarge = "m7i-flex.2xlarge" + m7i_flex_4xlarge = "m7i-flex.4xlarge" + m7i_flex_8xlarge = "m7i-flex.8xlarge" + m7a_medium = "m7a.medium" + m7a_large = "m7a.large" + m7a_xlarge = "m7a.xlarge" + m7a_2xlarge = "m7a.2xlarge" + m7a_4xlarge = "m7a.4xlarge" + m7a_8xlarge = "m7a.8xlarge" + m7a_12xlarge = "m7a.12xlarge" + m7a_16xlarge = "m7a.16xlarge" + m7a_24xlarge = "m7a.24xlarge" + m7a_32xlarge = "m7a.32xlarge" + m7a_48xlarge = "m7a.48xlarge" + m7a_metal_48xl = "m7a.metal-48xl" + hpc7a_12xlarge = "hpc7a.12xlarge" + hpc7a_24xlarge = "hpc7a.24xlarge" + hpc7a_48xlarge = "hpc7a.48xlarge" + hpc7a_96xlarge = "hpc7a.96xlarge" + c7gd_medium = "c7gd.medium" + c7gd_large = "c7gd.large" + c7gd_xlarge = "c7gd.xlarge" + c7gd_2xlarge = "c7gd.2xlarge" + c7gd_4xlarge = "c7gd.4xlarge" + c7gd_8xlarge = "c7gd.8xlarge" + c7gd_12xlarge = "c7gd.12xlarge" + c7gd_16xlarge = "c7gd.16xlarge" + m7gd_medium = "m7gd.medium" + m7gd_large = "m7gd.large" + m7gd_xlarge = "m7gd.xlarge" + m7gd_2xlarge = "m7gd.2xlarge" + m7gd_4xlarge = "m7gd.4xlarge" + m7gd_8xlarge = "m7gd.8xlarge" + m7gd_12xlarge = "m7gd.12xlarge" + m7gd_16xlarge = "m7gd.16xlarge" + r7gd_medium = "r7gd.medium" + r7gd_large = "r7gd.large" + r7gd_xlarge = "r7gd.xlarge" + r7gd_2xlarge = "r7gd.2xlarge" + r7gd_4xlarge = "r7gd.4xlarge" + r7gd_8xlarge = "r7gd.8xlarge" + r7gd_12xlarge = "r7gd.12xlarge" + r7gd_16xlarge = "r7gd.16xlarge" + r7a_medium = "r7a.medium" + r7a_large = "r7a.large" + r7a_xlarge = "r7a.xlarge" + r7a_2xlarge = "r7a.2xlarge" + r7a_4xlarge = "r7a.4xlarge" + r7a_8xlarge = "r7a.8xlarge" + r7a_12xlarge = "r7a.12xlarge" + r7a_16xlarge = "r7a.16xlarge" + r7a_24xlarge = "r7a.24xlarge" + r7a_32xlarge = "r7a.32xlarge" + r7a_48xlarge = "r7a.48xlarge" + c7i_large = "c7i.large" + c7i_xlarge = "c7i.xlarge" + c7i_2xlarge = "c7i.2xlarge" + c7i_4xlarge = "c7i.4xlarge" + c7i_8xlarge = "c7i.8xlarge" + c7i_12xlarge = "c7i.12xlarge" + c7i_16xlarge = "c7i.16xlarge" + c7i_24xlarge = "c7i.24xlarge" + c7i_48xlarge = "c7i.48xlarge" + mac2_m2pro_metal = "mac2-m2pro.metal" + r7iz_large = "r7iz.large" + r7iz_xlarge = "r7iz.xlarge" + r7iz_2xlarge = "r7iz.2xlarge" + r7iz_4xlarge = "r7iz.4xlarge" + r7iz_8xlarge = "r7iz.8xlarge" + r7iz_12xlarge = "r7iz.12xlarge" + r7iz_16xlarge = "r7iz.16xlarge" + r7iz_32xlarge = "r7iz.32xlarge" + c7a_medium = "c7a.medium" + c7a_large = "c7a.large" + c7a_xlarge = "c7a.xlarge" + c7a_2xlarge = "c7a.2xlarge" + c7a_4xlarge = "c7a.4xlarge" + c7a_8xlarge = "c7a.8xlarge" + c7a_12xlarge = "c7a.12xlarge" + c7a_16xlarge = "c7a.16xlarge" + c7a_24xlarge = "c7a.24xlarge" + c7a_32xlarge = "c7a.32xlarge" + c7a_48xlarge = "c7a.48xlarge" + c7a_metal_48xl = "c7a.metal-48xl" + r7a_metal_48xl = "r7a.metal-48xl" + r7i_large = "r7i.large" + r7i_xlarge = "r7i.xlarge" + r7i_2xlarge = "r7i.2xlarge" + r7i_4xlarge = "r7i.4xlarge" + r7i_8xlarge = "r7i.8xlarge" + r7i_12xlarge = "r7i.12xlarge" + r7i_16xlarge = "r7i.16xlarge" + r7i_24xlarge = "r7i.24xlarge" + r7i_48xlarge = "r7i.48xlarge" + dl2q_24xlarge = "dl2q.24xlarge" + mac2_m2_metal = "mac2-m2.metal" + i4i_12xlarge = "i4i.12xlarge" + i4i_24xlarge = "i4i.24xlarge" + c7i_metal_24xl = "c7i.metal-24xl" + c7i_metal_48xl = "c7i.metal-48xl" + m7i_metal_24xl = "m7i.metal-24xl" + m7i_metal_48xl = "m7i.metal-48xl" + r7i_metal_24xl = "r7i.metal-24xl" + r7i_metal_48xl = "r7i.metal-48xl" + r7iz_metal_16xl = "r7iz.metal-16xl" + r7iz_metal_32xl = "r7iz.metal-32xl" + c7gd_metal = "c7gd.metal" + m7gd_metal = "m7gd.metal" + r7gd_metal = "r7gd.metal" + g6_xlarge = "g6.xlarge" + g6_2xlarge = "g6.2xlarge" + g6_4xlarge = "g6.4xlarge" + g6_8xlarge = "g6.8xlarge" + g6_12xlarge = "g6.12xlarge" + g6_16xlarge = "g6.16xlarge" + g6_24xlarge = "g6.24xlarge" + g6_48xlarge = "g6.48xlarge" + gr6_4xlarge = "gr6.4xlarge" + gr6_8xlarge = "gr6.8xlarge" + c7i_flex_large = "c7i-flex.large" + c7i_flex_xlarge = "c7i-flex.xlarge" + c7i_flex_2xlarge = "c7i-flex.2xlarge" + c7i_flex_4xlarge = "c7i-flex.4xlarge" + c7i_flex_8xlarge = "c7i-flex.8xlarge" + u7i_12tb_224xlarge = "u7i-12tb.224xlarge" + u7in_16tb_224xlarge = "u7in-16tb.224xlarge" + u7in_24tb_224xlarge = "u7in-24tb.224xlarge" + u7in_32tb_224xlarge = "u7in-32tb.224xlarge" + u7ib_12tb_224xlarge = "u7ib-12tb.224xlarge" + c7gn_metal = "c7gn.metal" + r8g_medium = "r8g.medium" + r8g_large = "r8g.large" + r8g_xlarge = "r8g.xlarge" + r8g_2xlarge = "r8g.2xlarge" + r8g_4xlarge = "r8g.4xlarge" + r8g_8xlarge = "r8g.8xlarge" + r8g_12xlarge = "r8g.12xlarge" + r8g_16xlarge = "r8g.16xlarge" + r8g_24xlarge = "r8g.24xlarge" + r8g_48xlarge = "r8g.48xlarge" + r8g_metal_24xl = "r8g.metal-24xl" + r8g_metal_48xl = "r8g.metal-48xl" + mac2_m1ultra_metal = "mac2-m1ultra.metal" + g6e_xlarge = "g6e.xlarge" + g6e_2xlarge = "g6e.2xlarge" + g6e_4xlarge = "g6e.4xlarge" + g6e_8xlarge = "g6e.8xlarge" + g6e_12xlarge = "g6e.12xlarge" + g6e_16xlarge = "g6e.16xlarge" + g6e_24xlarge = "g6e.24xlarge" + g6e_48xlarge = "g6e.48xlarge" + c8g_medium = "c8g.medium" + c8g_large = "c8g.large" + c8g_xlarge = "c8g.xlarge" + c8g_2xlarge = "c8g.2xlarge" + c8g_4xlarge = "c8g.4xlarge" + c8g_8xlarge = "c8g.8xlarge" + c8g_12xlarge = "c8g.12xlarge" + c8g_16xlarge = "c8g.16xlarge" + c8g_24xlarge = "c8g.24xlarge" + c8g_48xlarge = "c8g.48xlarge" + c8g_metal_24xl = "c8g.metal-24xl" + c8g_metal_48xl = "c8g.metal-48xl" + m8g_medium = "m8g.medium" + m8g_large = "m8g.large" + m8g_xlarge = "m8g.xlarge" + m8g_2xlarge = "m8g.2xlarge" + m8g_4xlarge = "m8g.4xlarge" + m8g_8xlarge = "m8g.8xlarge" + m8g_12xlarge = "m8g.12xlarge" + m8g_16xlarge = "m8g.16xlarge" + m8g_24xlarge = "m8g.24xlarge" + m8g_48xlarge = "m8g.48xlarge" + m8g_metal_24xl = "m8g.metal-24xl" + m8g_metal_48xl = "m8g.metal-48xl" + x8g_medium = "x8g.medium" + x8g_large = "x8g.large" + x8g_xlarge = "x8g.xlarge" + x8g_2xlarge = "x8g.2xlarge" + x8g_4xlarge = "x8g.4xlarge" + x8g_8xlarge = "x8g.8xlarge" + x8g_12xlarge = "x8g.12xlarge" + x8g_16xlarge = "x8g.16xlarge" + x8g_24xlarge = "x8g.24xlarge" + x8g_48xlarge = "x8g.48xlarge" + x8g_metal_24xl = "x8g.metal-24xl" + x8g_metal_48xl = "x8g.metal-48xl" + i7ie_large = "i7ie.large" + i7ie_xlarge = "i7ie.xlarge" + i7ie_2xlarge = "i7ie.2xlarge" + i7ie_3xlarge = "i7ie.3xlarge" + i7ie_6xlarge = "i7ie.6xlarge" + i7ie_12xlarge = "i7ie.12xlarge" + i7ie_18xlarge = "i7ie.18xlarge" + i7ie_24xlarge = "i7ie.24xlarge" + i7ie_48xlarge = "i7ie.48xlarge" + i8g_large = "i8g.large" + i8g_xlarge = "i8g.xlarge" + i8g_2xlarge = "i8g.2xlarge" + i8g_4xlarge = "i8g.4xlarge" + i8g_8xlarge = "i8g.8xlarge" + i8g_12xlarge = "i8g.12xlarge" + i8g_16xlarge = "i8g.16xlarge" + i8g_24xlarge = "i8g.24xlarge" + i8g_metal_24xl = "i8g.metal-24xl" + u7i_6tb_112xlarge = "u7i-6tb.112xlarge" + u7i_8tb_112xlarge = "u7i-8tb.112xlarge" + u7inh_32tb_480xlarge = "u7inh-32tb.480xlarge" + p5e_48xlarge = "p5e.48xlarge" + p5en_48xlarge = "p5en.48xlarge" + f2_12xlarge = "f2.12xlarge" + f2_48xlarge = "f2.48xlarge" + trn2_48xlarge = "trn2.48xlarge" + c7i_flex_12xlarge = "c7i-flex.12xlarge" + c7i_flex_16xlarge = "c7i-flex.16xlarge" + m7i_flex_12xlarge = "m7i-flex.12xlarge" + m7i_flex_16xlarge = "m7i-flex.16xlarge" + i7ie_metal_24xl = "i7ie.metal-24xl" + i7ie_metal_48xl = "i7ie.metal-48xl" + i8g_48xlarge = "i8g.48xlarge" + c8gd_medium = "c8gd.medium" + c8gd_large = "c8gd.large" + c8gd_xlarge = "c8gd.xlarge" + c8gd_2xlarge = "c8gd.2xlarge" + c8gd_4xlarge = "c8gd.4xlarge" + c8gd_8xlarge = "c8gd.8xlarge" + c8gd_12xlarge = "c8gd.12xlarge" + c8gd_16xlarge = "c8gd.16xlarge" + c8gd_24xlarge = "c8gd.24xlarge" + c8gd_48xlarge = "c8gd.48xlarge" + c8gd_metal_24xl = "c8gd.metal-24xl" + c8gd_metal_48xl = "c8gd.metal-48xl" + i7i_large = "i7i.large" + i7i_xlarge = "i7i.xlarge" + i7i_2xlarge = "i7i.2xlarge" + i7i_4xlarge = "i7i.4xlarge" + i7i_8xlarge = "i7i.8xlarge" + i7i_12xlarge = "i7i.12xlarge" + i7i_16xlarge = "i7i.16xlarge" + i7i_24xlarge = "i7i.24xlarge" + i7i_48xlarge = "i7i.48xlarge" + i7i_metal_24xl = "i7i.metal-24xl" + i7i_metal_48xl = "i7i.metal-48xl" + p6_b200_48xlarge = "p6-b200.48xlarge" + m8gd_medium = "m8gd.medium" + m8gd_large = "m8gd.large" + m8gd_xlarge = "m8gd.xlarge" + m8gd_2xlarge = "m8gd.2xlarge" + m8gd_4xlarge = "m8gd.4xlarge" + m8gd_8xlarge = "m8gd.8xlarge" + m8gd_12xlarge = "m8gd.12xlarge" + m8gd_16xlarge = "m8gd.16xlarge" + m8gd_24xlarge = "m8gd.24xlarge" + m8gd_48xlarge = "m8gd.48xlarge" + m8gd_metal_24xl = "m8gd.metal-24xl" + m8gd_metal_48xl = "m8gd.metal-48xl" + r8gd_medium = "r8gd.medium" + r8gd_large = "r8gd.large" + r8gd_xlarge = "r8gd.xlarge" + r8gd_2xlarge = "r8gd.2xlarge" + r8gd_4xlarge = "r8gd.4xlarge" + r8gd_8xlarge = "r8gd.8xlarge" + r8gd_12xlarge = "r8gd.12xlarge" + r8gd_16xlarge = "r8gd.16xlarge" + r8gd_24xlarge = "r8gd.24xlarge" + r8gd_48xlarge = "r8gd.48xlarge" + r8gd_metal_24xl = "r8gd.metal-24xl" + r8gd_metal_48xl = "r8gd.metal-48xl" + + +class InstanceTypeHypervisor(StrEnum): + nitro = "nitro" + xen = "xen" + + +class InterfacePermissionType(StrEnum): + INSTANCE_ATTACH = "INSTANCE-ATTACH" + EIP_ASSOCIATE = "EIP-ASSOCIATE" + + +class InterfaceProtocolType(StrEnum): + VLAN = "VLAN" + GRE = "GRE" + + +class InternetGatewayBlockMode(StrEnum): + off = "off" + block_bidirectional = "block-bidirectional" + block_ingress = "block-ingress" + + +class InternetGatewayExclusionMode(StrEnum): + allow_bidirectional = "allow-bidirectional" + allow_egress = "allow-egress" + + +class IpAddressType(StrEnum): + ipv4 = "ipv4" + dualstack = "dualstack" + ipv6 = "ipv6" + + +class IpSource(StrEnum): + amazon = "amazon" + byoip = "byoip" + none = "none" + + +class IpamAddressHistoryResourceType(StrEnum): + eip = "eip" + vpc = "vpc" + subnet = "subnet" + network_interface = "network-interface" + instance = "instance" + + +class IpamAssociatedResourceDiscoveryStatus(StrEnum): + active = "active" + not_found = "not-found" + + +class IpamComplianceStatus(StrEnum): + compliant = "compliant" + noncompliant = "noncompliant" + unmanaged = "unmanaged" + ignored = "ignored" + + +class IpamDiscoveryFailureCode(StrEnum): + assume_role_failure = "assume-role-failure" + throttling_failure = "throttling-failure" + unauthorized_failure = "unauthorized-failure" + + +class IpamExternalResourceVerificationTokenState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + + +class IpamManagementState(StrEnum): + managed = "managed" + unmanaged = "unmanaged" + ignored = "ignored" + + +class IpamMeteredAccount(StrEnum): + ipam_owner = "ipam-owner" + resource_owner = "resource-owner" + + +class IpamNetworkInterfaceAttachmentStatus(StrEnum): + available = "available" + in_use = "in-use" + + +class IpamOverlapStatus(StrEnum): + overlapping = "overlapping" + nonoverlapping = "nonoverlapping" + ignored = "ignored" + + +class IpamPoolAllocationResourceType(StrEnum): + ipam_pool = "ipam-pool" + vpc = "vpc" + ec2_public_ipv4_pool = "ec2-public-ipv4-pool" + custom = "custom" + subnet = "subnet" + eip = "eip" + + +class IpamPoolAwsService(StrEnum): + ec2 = "ec2" + + +class IpamPoolCidrFailureCode(StrEnum): + cidr_not_available = "cidr-not-available" + limit_exceeded = "limit-exceeded" + + +class IpamPoolCidrState(StrEnum): + pending_provision = "pending-provision" + provisioned = "provisioned" + failed_provision = "failed-provision" + pending_deprovision = "pending-deprovision" + deprovisioned = "deprovisioned" + failed_deprovision = "failed-deprovision" + pending_import = "pending-import" + failed_import = "failed-import" + + +class IpamPoolPublicIpSource(StrEnum): + amazon = "amazon" + byoip = "byoip" + + +class IpamPoolSourceResourceType(StrEnum): + vpc = "vpc" + + +class IpamPoolState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + modify_in_progress = "modify-in-progress" + modify_complete = "modify-complete" + modify_failed = "modify-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + isolate_in_progress = "isolate-in-progress" + isolate_complete = "isolate-complete" + restore_in_progress = "restore-in-progress" + + +class IpamPublicAddressAssociationStatus(StrEnum): + associated = "associated" + disassociated = "disassociated" + + +class IpamPublicAddressAwsService(StrEnum): + nat_gateway = "nat-gateway" + database_migration_service = "database-migration-service" + redshift = "redshift" + elastic_container_service = "elastic-container-service" + relational_database_service = "relational-database-service" + site_to_site_vpn = "site-to-site-vpn" + load_balancer = "load-balancer" + global_accelerator = "global-accelerator" + other = "other" + + +class IpamPublicAddressType(StrEnum): + service_managed_ip = "service-managed-ip" + service_managed_byoip = "service-managed-byoip" + amazon_owned_eip = "amazon-owned-eip" + amazon_owned_contig = "amazon-owned-contig" + byoip = "byoip" + ec2_public_ip = "ec2-public-ip" + + +class IpamResourceCidrIpSource(StrEnum): + amazon = "amazon" + byoip = "byoip" + none = "none" + + +class IpamResourceDiscoveryAssociationState(StrEnum): + associate_in_progress = "associate-in-progress" + associate_complete = "associate-complete" + associate_failed = "associate-failed" + disassociate_in_progress = "disassociate-in-progress" + disassociate_complete = "disassociate-complete" + disassociate_failed = "disassociate-failed" + isolate_in_progress = "isolate-in-progress" + isolate_complete = "isolate-complete" + restore_in_progress = "restore-in-progress" + + +class IpamResourceDiscoveryState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + modify_in_progress = "modify-in-progress" + modify_complete = "modify-complete" + modify_failed = "modify-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + isolate_in_progress = "isolate-in-progress" + isolate_complete = "isolate-complete" + restore_in_progress = "restore-in-progress" + + +class IpamResourceType(StrEnum): + vpc = "vpc" + subnet = "subnet" + eip = "eip" + public_ipv4_pool = "public-ipv4-pool" + ipv6_pool = "ipv6-pool" + eni = "eni" + + +class IpamScopeState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + modify_in_progress = "modify-in-progress" + modify_complete = "modify-complete" + modify_failed = "modify-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + isolate_in_progress = "isolate-in-progress" + isolate_complete = "isolate-complete" + restore_in_progress = "restore-in-progress" + + +class IpamScopeType(StrEnum): + public = "public" + private = "private" + + +class IpamState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + modify_in_progress = "modify-in-progress" + modify_complete = "modify-complete" + modify_failed = "modify-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + isolate_in_progress = "isolate-in-progress" + isolate_complete = "isolate-complete" + restore_in_progress = "restore-in-progress" + + +class IpamTier(StrEnum): + free = "free" + advanced = "advanced" + + +class Ipv6AddressAttribute(StrEnum): + public = "public" + private = "private" + + +class Ipv6SupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class KeyFormat(StrEnum): + pem = "pem" + ppk = "ppk" + + +class KeyType(StrEnum): + rsa = "rsa" + ed25519 = "ed25519" + + +class LaunchTemplateAutoRecoveryState(StrEnum): + default = "default" + disabled = "disabled" + + +class LaunchTemplateErrorCode(StrEnum): + launchTemplateIdDoesNotExist = "launchTemplateIdDoesNotExist" + launchTemplateIdMalformed = "launchTemplateIdMalformed" + launchTemplateNameDoesNotExist = "launchTemplateNameDoesNotExist" + launchTemplateNameMalformed = "launchTemplateNameMalformed" + launchTemplateVersionDoesNotExist = "launchTemplateVersionDoesNotExist" + unexpectedError = "unexpectedError" + + +class LaunchTemplateHttpTokensState(StrEnum): + optional = "optional" + required = "required" + + +class LaunchTemplateInstanceMetadataEndpointState(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class LaunchTemplateInstanceMetadataOptionsState(StrEnum): + pending = "pending" + applied = "applied" + + +class LaunchTemplateInstanceMetadataProtocolIpv6(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class LaunchTemplateInstanceMetadataTagsState(StrEnum): + disabled = "disabled" + enabled = "enabled" + + +class ListingState(StrEnum): + available = "available" + sold = "sold" + cancelled = "cancelled" + pending = "pending" + + +class ListingStatus(StrEnum): + active = "active" + pending = "pending" + cancelled = "cancelled" + closed = "closed" + + +class LocalGatewayRouteState(StrEnum): + pending = "pending" + active = "active" + blackhole = "blackhole" + deleting = "deleting" + deleted = "deleted" + + +class LocalGatewayRouteTableMode(StrEnum): + direct_vpc_routing = "direct-vpc-routing" + coip = "coip" + + +class LocalGatewayRouteType(StrEnum): + static = "static" + propagated = "propagated" + + +class LocalGatewayVirtualInterfaceConfigurationState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class LocalGatewayVirtualInterfaceGroupConfigurationState(StrEnum): + pending = "pending" + incomplete = "incomplete" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class LocalStorage(StrEnum): + included = "included" + required = "required" + excluded = "excluded" + + +class LocalStorageType(StrEnum): + hdd = "hdd" + ssd = "ssd" + + +class LocationType(StrEnum): + region = "region" + availability_zone = "availability-zone" + availability_zone_id = "availability-zone-id" + outpost = "outpost" + + +class LockMode(StrEnum): + compliance = "compliance" + governance = "governance" + + +class LockState(StrEnum): + compliance = "compliance" + governance = "governance" + compliance_cooloff = "compliance-cooloff" + expired = "expired" + + +class LogDestinationType(StrEnum): + cloud_watch_logs = "cloud-watch-logs" + s3 = "s3" + kinesis_data_firehose = "kinesis-data-firehose" + + +class MacModificationTaskState(StrEnum): + successful = "successful" + failed = "failed" + in_progress = "in-progress" + pending = "pending" + + +class MacModificationTaskType(StrEnum): + sip_modification = "sip-modification" + volume_ownership_delegation = "volume-ownership-delegation" + + +class MacSystemIntegrityProtectionSettingStatus(StrEnum): + enabled = "enabled" + disabled = "disabled" + + +class ManagedBy(StrEnum): + account = "account" + declarative_policy = "declarative-policy" + + +class MarketType(StrEnum): + spot = "spot" + capacity_block = "capacity-block" + + +class MembershipType(StrEnum): + static = "static" + igmp = "igmp" + + +class MetadataDefaultHttpTokensState(StrEnum): + optional = "optional" + required = "required" + no_preference = "no-preference" + + +class MetricType(StrEnum): + aggregate_latency = "aggregate-latency" + + +class ModifyAvailabilityZoneOptInStatus(StrEnum): + opted_in = "opted-in" + not_opted_in = "not-opted-in" + + +class MonitoringState(StrEnum): + disabled = "disabled" + disabling = "disabling" + enabled = "enabled" + pending = "pending" + + +class MoveStatus(StrEnum): + movingToVpc = "movingToVpc" + restoringToClassic = "restoringToClassic" + + +class MulticastSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class NatGatewayAddressStatus(StrEnum): + assigning = "assigning" + unassigning = "unassigning" + associating = "associating" + disassociating = "disassociating" + succeeded = "succeeded" + failed = "failed" + + +class NatGatewayState(StrEnum): + pending = "pending" + failed = "failed" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class NetworkInterfaceAttribute(StrEnum): + description = "description" + groupSet = "groupSet" + sourceDestCheck = "sourceDestCheck" + attachment = "attachment" + associatePublicIpAddress = "associatePublicIpAddress" + + +class NetworkInterfaceCreationType(StrEnum): + efa = "efa" + efa_only = "efa-only" + branch = "branch" + trunk = "trunk" + + +class NetworkInterfacePermissionStateCode(StrEnum): + pending = "pending" + granted = "granted" + revoking = "revoking" + revoked = "revoked" + + +class NetworkInterfaceStatus(StrEnum): + available = "available" + associated = "associated" + attaching = "attaching" + in_use = "in-use" + detaching = "detaching" + + +class NetworkInterfaceType(StrEnum): + interface = "interface" + natGateway = "natGateway" + efa = "efa" + efa_only = "efa-only" + trunk = "trunk" + load_balancer = "load_balancer" + network_load_balancer = "network_load_balancer" + vpc_endpoint = "vpc_endpoint" + branch = "branch" + transit_gateway = "transit_gateway" + lambda_ = "lambda" + quicksight = "quicksight" + global_accelerator_managed = "global_accelerator_managed" + api_gateway_managed = "api_gateway_managed" + gateway_load_balancer = "gateway_load_balancer" + gateway_load_balancer_endpoint = "gateway_load_balancer_endpoint" + iot_rules_managed = "iot_rules_managed" + aws_codestar_connections_managed = "aws_codestar_connections_managed" + + +class NitroEnclavesSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class NitroTpmSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class OfferingClassType(StrEnum): + standard = "standard" + convertible = "convertible" + + +class OfferingTypeValues(StrEnum): + Heavy_Utilization = "Heavy Utilization" + Medium_Utilization = "Medium Utilization" + Light_Utilization = "Light Utilization" + No_Upfront = "No Upfront" + Partial_Upfront = "Partial Upfront" + All_Upfront = "All Upfront" + + +class OnDemandAllocationStrategy(StrEnum): + lowestPrice = "lowestPrice" + prioritized = "prioritized" + + +class OperationType(StrEnum): + add = "add" + remove = "remove" + + +class PartitionLoadFrequency(StrEnum): + none = "none" + daily = "daily" + weekly = "weekly" + monthly = "monthly" + + +class PayerResponsibility(StrEnum): + ServiceOwner = "ServiceOwner" + + +class PaymentOption(StrEnum): + AllUpfront = "AllUpfront" + PartialUpfront = "PartialUpfront" + NoUpfront = "NoUpfront" + + +class PeriodType(StrEnum): + five_minutes = "five-minutes" + fifteen_minutes = "fifteen-minutes" + one_hour = "one-hour" + three_hours = "three-hours" + one_day = "one-day" + one_week = "one-week" + + +class PermissionGroup(StrEnum): + all = "all" + + +class PhcSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class PlacementGroupState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class PlacementGroupStrategy(StrEnum): + cluster = "cluster" + partition = "partition" + spread = "spread" + + +class PlacementStrategy(StrEnum): + cluster = "cluster" + spread = "spread" + partition = "partition" + + +class PlatformValues(StrEnum): + Windows = "Windows" + + +class PrefixListState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + modify_in_progress = "modify-in-progress" + modify_complete = "modify-complete" + modify_failed = "modify-failed" + restore_in_progress = "restore-in-progress" + restore_complete = "restore-complete" + restore_failed = "restore-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + delete_failed = "delete-failed" + + +class PrincipalType(StrEnum): + All = "All" + Service = "Service" + OrganizationUnit = "OrganizationUnit" + Account = "Account" + User = "User" + Role = "Role" + + +class ProductCodeValues(StrEnum): + devpay = "devpay" + marketplace = "marketplace" + + +class Protocol(StrEnum): + tcp = "tcp" + udp = "udp" + + +class ProtocolValue(StrEnum): + gre = "gre" + + +class PublicIpDnsOption(StrEnum): + public_dual_stack_dns_name = "public-dual-stack-dns-name" + public_ipv4_dns_name = "public-ipv4-dns-name" + public_ipv6_dns_name = "public-ipv6-dns-name" + + +class RIProductDescription(StrEnum): + Linux_UNIX = "Linux/UNIX" + Linux_UNIX_Amazon_VPC_ = "Linux/UNIX (Amazon VPC)" + Windows = "Windows" + Windows_Amazon_VPC_ = "Windows (Amazon VPC)" + + +class RebootMigrationSupport(StrEnum): + unsupported = "unsupported" + supported = "supported" + + +class RecurringChargeFrequency(StrEnum): + Hourly = "Hourly" + + +class ReplaceRootVolumeTaskState(StrEnum): + pending = "pending" + in_progress = "in-progress" + failing = "failing" + succeeded = "succeeded" + failed = "failed" + failed_detached = "failed-detached" + + +class ReplacementStrategy(StrEnum): + launch = "launch" + launch_before_terminate = "launch-before-terminate" + + +class ReportInstanceReasonCodes(StrEnum): + instance_stuck_in_state = "instance-stuck-in-state" + unresponsive = "unresponsive" + not_accepting_credentials = "not-accepting-credentials" + password_not_available = "password-not-available" + performance_network = "performance-network" + performance_instance_store = "performance-instance-store" + performance_ebs_volume = "performance-ebs-volume" + performance_other = "performance-other" + other = "other" + + +class ReportState(StrEnum): + running = "running" + cancelled = "cancelled" + complete = "complete" + error = "error" + + +class ReportStatusType(StrEnum): + ok = "ok" + impaired = "impaired" + + +class ReservationState(StrEnum): + payment_pending = "payment-pending" + payment_failed = "payment-failed" + active = "active" + retired = "retired" + + +class ReservedInstanceState(StrEnum): + payment_pending = "payment-pending" + active = "active" + payment_failed = "payment-failed" + retired = "retired" + queued = "queued" + queued_deleted = "queued-deleted" + + +class ResetFpgaImageAttributeName(StrEnum): + loadPermission = "loadPermission" + + +class ResetImageAttributeName(StrEnum): + launchPermission = "launchPermission" + + +class ResourceType(StrEnum): + capacity_reservation = "capacity-reservation" + client_vpn_endpoint = "client-vpn-endpoint" + customer_gateway = "customer-gateway" + carrier_gateway = "carrier-gateway" + coip_pool = "coip-pool" + declarative_policies_report = "declarative-policies-report" + dedicated_host = "dedicated-host" + dhcp_options = "dhcp-options" + egress_only_internet_gateway = "egress-only-internet-gateway" + elastic_ip = "elastic-ip" + elastic_gpu = "elastic-gpu" + export_image_task = "export-image-task" + export_instance_task = "export-instance-task" + fleet = "fleet" + fpga_image = "fpga-image" + host_reservation = "host-reservation" + image = "image" + import_image_task = "import-image-task" + import_snapshot_task = "import-snapshot-task" + instance = "instance" + instance_event_window = "instance-event-window" + internet_gateway = "internet-gateway" + ipam = "ipam" + ipam_pool = "ipam-pool" + ipam_scope = "ipam-scope" + ipv4pool_ec2 = "ipv4pool-ec2" + ipv6pool_ec2 = "ipv6pool-ec2" + key_pair = "key-pair" + launch_template = "launch-template" + local_gateway = "local-gateway" + local_gateway_route_table = "local-gateway-route-table" + local_gateway_virtual_interface = "local-gateway-virtual-interface" + local_gateway_virtual_interface_group = "local-gateway-virtual-interface-group" + local_gateway_route_table_vpc_association = "local-gateway-route-table-vpc-association" + local_gateway_route_table_virtual_interface_group_association = ( + "local-gateway-route-table-virtual-interface-group-association" + ) + natgateway = "natgateway" + network_acl = "network-acl" + network_interface = "network-interface" + network_insights_analysis = "network-insights-analysis" + network_insights_path = "network-insights-path" + network_insights_access_scope = "network-insights-access-scope" + network_insights_access_scope_analysis = "network-insights-access-scope-analysis" + outpost_lag = "outpost-lag" + placement_group = "placement-group" + prefix_list = "prefix-list" + replace_root_volume_task = "replace-root-volume-task" + reserved_instances = "reserved-instances" + route_table = "route-table" + security_group = "security-group" + security_group_rule = "security-group-rule" + service_link_virtual_interface = "service-link-virtual-interface" + snapshot = "snapshot" + spot_fleet_request = "spot-fleet-request" + spot_instances_request = "spot-instances-request" + subnet = "subnet" + subnet_cidr_reservation = "subnet-cidr-reservation" + traffic_mirror_filter = "traffic-mirror-filter" + traffic_mirror_session = "traffic-mirror-session" + traffic_mirror_target = "traffic-mirror-target" + transit_gateway = "transit-gateway" + transit_gateway_attachment = "transit-gateway-attachment" + transit_gateway_connect_peer = "transit-gateway-connect-peer" + transit_gateway_multicast_domain = "transit-gateway-multicast-domain" + transit_gateway_policy_table = "transit-gateway-policy-table" + transit_gateway_route_table = "transit-gateway-route-table" + transit_gateway_route_table_announcement = "transit-gateway-route-table-announcement" + volume = "volume" + vpc = "vpc" + vpc_endpoint = "vpc-endpoint" + vpc_endpoint_connection = "vpc-endpoint-connection" + vpc_endpoint_service = "vpc-endpoint-service" + vpc_endpoint_service_permission = "vpc-endpoint-service-permission" + vpc_peering_connection = "vpc-peering-connection" + vpn_connection = "vpn-connection" + vpn_gateway = "vpn-gateway" + vpc_flow_log = "vpc-flow-log" + capacity_reservation_fleet = "capacity-reservation-fleet" + traffic_mirror_filter_rule = "traffic-mirror-filter-rule" + vpc_endpoint_connection_device_type = "vpc-endpoint-connection-device-type" + verified_access_instance = "verified-access-instance" + verified_access_group = "verified-access-group" + verified_access_endpoint = "verified-access-endpoint" + verified_access_policy = "verified-access-policy" + verified_access_trust_provider = "verified-access-trust-provider" + vpn_connection_device_type = "vpn-connection-device-type" + vpc_block_public_access_exclusion = "vpc-block-public-access-exclusion" + route_server = "route-server" + route_server_endpoint = "route-server-endpoint" + route_server_peer = "route-server-peer" + ipam_resource_discovery = "ipam-resource-discovery" + ipam_resource_discovery_association = "ipam-resource-discovery-association" + instance_connect_endpoint = "instance-connect-endpoint" + verified_access_endpoint_target = "verified-access-endpoint-target" + ipam_external_resource_verification_token = "ipam-external-resource-verification-token" + mac_modification_task = "mac-modification-task" + + +class RootDeviceType(StrEnum): + ebs = "ebs" + instance_store = "instance-store" + + +class RouteOrigin(StrEnum): + CreateRouteTable = "CreateRouteTable" + CreateRoute = "CreateRoute" + EnableVgwRoutePropagation = "EnableVgwRoutePropagation" + + +class RouteServerAssociationState(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + + +class RouteServerBfdState(StrEnum): + up = "up" + down = "down" + + +class RouteServerBgpState(StrEnum): + up = "up" + down = "down" + + +class RouteServerEndpointState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + failing = "failing" + failed = "failed" + delete_failed = "delete-failed" + + +class RouteServerPeerLivenessMode(StrEnum): + bfd = "bfd" + bgp_keepalive = "bgp-keepalive" + + +class RouteServerPeerState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + failing = "failing" + failed = "failed" + + +class RouteServerPersistRoutesAction(StrEnum): + enable = "enable" + disable = "disable" + reset = "reset" + + +class RouteServerPersistRoutesState(StrEnum): + enabling = "enabling" + enabled = "enabled" + resetting = "resetting" + disabling = "disabling" + disabled = "disabled" + modifying = "modifying" + + +class RouteServerPropagationState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + + +class RouteServerRouteInstallationStatus(StrEnum): + installed = "installed" + rejected = "rejected" + + +class RouteServerRouteStatus(StrEnum): + in_rib = "in-rib" + in_fib = "in-fib" + + +class RouteServerState(StrEnum): + pending = "pending" + available = "available" + modifying = "modifying" + deleting = "deleting" + deleted = "deleted" + + +class RouteState(StrEnum): + active = "active" + blackhole = "blackhole" + + +class RouteTableAssociationStateCode(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + failed = "failed" + + +class RuleAction(StrEnum): + allow = "allow" + deny = "deny" + + +class SSEType(StrEnum): + sse_ebs = "sse-ebs" + sse_kms = "sse-kms" + none = "none" + + +class SecurityGroupReferencingSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class SecurityGroupVpcAssociationState(StrEnum): + associating = "associating" + associated = "associated" + association_failed = "association-failed" + disassociating = "disassociating" + disassociated = "disassociated" + disassociation_failed = "disassociation-failed" + + +class SelfServicePortal(StrEnum): + enabled = "enabled" + disabled = "disabled" + + +class ServiceConnectivityType(StrEnum): + ipv4 = "ipv4" + ipv6 = "ipv6" + + +class ServiceLinkVirtualInterfaceConfigurationState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class ServiceManaged(StrEnum): + alb = "alb" + nlb = "nlb" + rnat = "rnat" + + +class ServiceState(StrEnum): + Pending = "Pending" + Available = "Available" + Deleting = "Deleting" + Deleted = "Deleted" + Failed = "Failed" + + +class ServiceType(StrEnum): + Interface = "Interface" + Gateway = "Gateway" + GatewayLoadBalancer = "GatewayLoadBalancer" + + +class ShutdownBehavior(StrEnum): + stop = "stop" + terminate = "terminate" + + +class SnapshotAttributeName(StrEnum): + productCodes = "productCodes" + createVolumePermission = "createVolumePermission" + + +class SnapshotBlockPublicAccessState(StrEnum): + block_all_sharing = "block-all-sharing" + block_new_sharing = "block-new-sharing" + unblocked = "unblocked" + + +class SnapshotLocationEnum(StrEnum): + regional = "regional" + local = "local" + + +class SnapshotReturnCodes(StrEnum): + success = "success" + skipped = "skipped" + missing_permissions = "missing-permissions" + internal_error = "internal-error" + client_error = "client-error" + + +class SnapshotState(StrEnum): + pending = "pending" + completed = "completed" + error = "error" + recoverable = "recoverable" + recovering = "recovering" + + +class SpotAllocationStrategy(StrEnum): + lowest_price = "lowest-price" + diversified = "diversified" + capacity_optimized = "capacity-optimized" + capacity_optimized_prioritized = "capacity-optimized-prioritized" + price_capacity_optimized = "price-capacity-optimized" + + +class SpotInstanceInterruptionBehavior(StrEnum): + hibernate = "hibernate" + stop = "stop" + terminate = "terminate" + + +class SpotInstanceState(StrEnum): + open = "open" + active = "active" + closed = "closed" + cancelled = "cancelled" + failed = "failed" + disabled = "disabled" + + +class SpotInstanceType(StrEnum): + one_time = "one-time" + persistent = "persistent" + + +class SpreadLevel(StrEnum): + host = "host" + rack = "rack" + + +class State(StrEnum): + PendingAcceptance = "PendingAcceptance" + Pending = "Pending" + Available = "Available" + Deleting = "Deleting" + Deleted = "Deleted" + Rejected = "Rejected" + Failed = "Failed" + Expired = "Expired" + Partial = "Partial" + + +class StaticSourcesSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class StatisticType(StrEnum): + p50 = "p50" + + +class Status(StrEnum): + MoveInProgress = "MoveInProgress" + InVpc = "InVpc" + InClassic = "InClassic" + + +class StatusName(StrEnum): + reachability = "reachability" + + +class StatusType(StrEnum): + passed = "passed" + failed = "failed" + insufficient_data = "insufficient-data" + initializing = "initializing" + + +class StorageTier(StrEnum): + archive = "archive" + standard = "standard" + + +class SubnetCidrBlockStateCode(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + failing = "failing" + failed = "failed" + + +class SubnetCidrReservationType(StrEnum): + prefix = "prefix" + explicit = "explicit" + + +class SubnetState(StrEnum): + pending = "pending" + available = "available" + unavailable = "unavailable" + + +class SummaryStatus(StrEnum): + ok = "ok" + impaired = "impaired" + insufficient_data = "insufficient-data" + not_applicable = "not-applicable" + initializing = "initializing" + + +class SupportedAdditionalProcessorFeature(StrEnum): + amd_sev_snp = "amd-sev-snp" + + +class TargetCapacityUnitType(StrEnum): + vcpu = "vcpu" + memory_mib = "memory-mib" + units = "units" + + +class TargetStorageTier(StrEnum): + archive = "archive" + + +class TelemetryStatus(StrEnum): + UP = "UP" + DOWN = "DOWN" + + +class Tenancy(StrEnum): + default = "default" + dedicated = "dedicated" + host = "host" + + +class TieringOperationStatus(StrEnum): + archival_in_progress = "archival-in-progress" + archival_completed = "archival-completed" + archival_failed = "archival-failed" + temporary_restore_in_progress = "temporary-restore-in-progress" + temporary_restore_completed = "temporary-restore-completed" + temporary_restore_failed = "temporary-restore-failed" + permanent_restore_in_progress = "permanent-restore-in-progress" + permanent_restore_completed = "permanent-restore-completed" + permanent_restore_failed = "permanent-restore-failed" + + +class TokenState(StrEnum): + valid = "valid" + expired = "expired" + + +class TpmSupportValues(StrEnum): + v2_0 = "v2.0" + + +class TrafficDirection(StrEnum): + ingress = "ingress" + egress = "egress" + + +class TrafficMirrorFilterRuleField(StrEnum): + destination_port_range = "destination-port-range" + source_port_range = "source-port-range" + protocol = "protocol" + description = "description" + + +class TrafficMirrorNetworkService(StrEnum): + amazon_dns = "amazon-dns" + + +class TrafficMirrorRuleAction(StrEnum): + accept = "accept" + reject = "reject" + + +class TrafficMirrorSessionField(StrEnum): + packet_length = "packet-length" + description = "description" + virtual_network_id = "virtual-network-id" + + +class TrafficMirrorTargetType(StrEnum): + network_interface = "network-interface" + network_load_balancer = "network-load-balancer" + gateway_load_balancer_endpoint = "gateway-load-balancer-endpoint" + + +class TrafficType(StrEnum): + ACCEPT = "ACCEPT" + REJECT = "REJECT" + ALL = "ALL" + + +class TransferType(StrEnum): + time_based = "time-based" + standard = "standard" + + +class TransitGatewayAssociationState(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + + +class TransitGatewayAttachmentResourceType(StrEnum): + vpc = "vpc" + vpn = "vpn" + direct_connect_gateway = "direct-connect-gateway" + connect = "connect" + peering = "peering" + tgw_peering = "tgw-peering" + + +class TransitGatewayAttachmentState(StrEnum): + initiating = "initiating" + initiatingRequest = "initiatingRequest" + pendingAcceptance = "pendingAcceptance" + rollingBack = "rollingBack" + pending = "pending" + available = "available" + modifying = "modifying" + deleting = "deleting" + deleted = "deleted" + failed = "failed" + rejected = "rejected" + rejecting = "rejecting" + failing = "failing" + + +class TransitGatewayConnectPeerState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayMulitcastDomainAssociationState(StrEnum): + pendingAcceptance = "pendingAcceptance" + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + rejected = "rejected" + failed = "failed" + + +class TransitGatewayMulticastDomainState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayPolicyTableState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayPrefixListReferenceState(StrEnum): + pending = "pending" + available = "available" + modifying = "modifying" + deleting = "deleting" + + +class TransitGatewayPropagationState(StrEnum): + enabling = "enabling" + enabled = "enabled" + disabling = "disabling" + disabled = "disabled" + + +class TransitGatewayRouteState(StrEnum): + pending = "pending" + active = "active" + blackhole = "blackhole" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayRouteTableAnnouncementDirection(StrEnum): + outgoing = "outgoing" + incoming = "incoming" + + +class TransitGatewayRouteTableAnnouncementState(StrEnum): + available = "available" + pending = "pending" + failing = "failing" + failed = "failed" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayRouteTableState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class TransitGatewayRouteType(StrEnum): + static = "static" + propagated = "propagated" + + +class TransitGatewayState(StrEnum): + pending = "pending" + available = "available" + modifying = "modifying" + deleting = "deleting" + deleted = "deleted" + + +class TransportProtocol(StrEnum): + tcp = "tcp" + udp = "udp" + + +class TrustProviderType(StrEnum): + user = "user" + device = "device" + + +class TunnelInsideIpVersion(StrEnum): + ipv4 = "ipv4" + ipv6 = "ipv6" + + +class UnlimitedSupportedInstanceFamily(StrEnum): + t2 = "t2" + t3 = "t3" + t3a = "t3a" + t4g = "t4g" + + +class UnsuccessfulInstanceCreditSpecificationErrorCode(StrEnum): + InvalidInstanceID_Malformed = "InvalidInstanceID.Malformed" + InvalidInstanceID_NotFound = "InvalidInstanceID.NotFound" + IncorrectInstanceState = "IncorrectInstanceState" + InstanceCreditSpecification_NotSupported = "InstanceCreditSpecification.NotSupported" + + +class UsageClassType(StrEnum): + spot = "spot" + on_demand = "on-demand" + capacity_block = "capacity-block" + + +class UserTrustProviderType(StrEnum): + iam_identity_center = "iam-identity-center" + oidc = "oidc" + + +class VerificationMethod(StrEnum): + remarks_x509 = "remarks-x509" + dns_token = "dns-token" + + +class VerifiedAccessEndpointAttachmentType(StrEnum): + vpc = "vpc" + + +class VerifiedAccessEndpointProtocol(StrEnum): + http = "http" + https = "https" + tcp = "tcp" + + +class VerifiedAccessEndpointStatusCode(StrEnum): + pending = "pending" + active = "active" + updating = "updating" + deleting = "deleting" + deleted = "deleted" + + +class VerifiedAccessEndpointType(StrEnum): + load_balancer = "load-balancer" + network_interface = "network-interface" + rds = "rds" + cidr = "cidr" + + +class VerifiedAccessLogDeliveryStatusCode(StrEnum): + success = "success" + failed = "failed" + + +class VirtualizationType(StrEnum): + hvm = "hvm" + paravirtual = "paravirtual" + + +class VolumeAttachmentState(StrEnum): + attaching = "attaching" + attached = "attached" + detaching = "detaching" + detached = "detached" + busy = "busy" + + +class VolumeAttributeName(StrEnum): + autoEnableIO = "autoEnableIO" + productCodes = "productCodes" + + +class VolumeModificationState(StrEnum): + modifying = "modifying" + optimizing = "optimizing" + completed = "completed" + failed = "failed" + + +class VolumeState(StrEnum): + creating = "creating" + available = "available" + in_use = "in-use" + deleting = "deleting" + deleted = "deleted" + error = "error" + + +class VolumeStatusInfoStatus(StrEnum): + ok = "ok" + impaired = "impaired" + insufficient_data = "insufficient-data" + + +class VolumeStatusName(StrEnum): + io_enabled = "io-enabled" + io_performance = "io-performance" + + +class VolumeType(StrEnum): + standard = "standard" + io1 = "io1" + io2 = "io2" + gp2 = "gp2" + sc1 = "sc1" + st1 = "st1" + gp3 = "gp3" + + +class VpcAttributeName(StrEnum): + enableDnsSupport = "enableDnsSupport" + enableDnsHostnames = "enableDnsHostnames" + enableNetworkAddressUsageMetrics = "enableNetworkAddressUsageMetrics" + + +class VpcBlockPublicAccessExclusionState(StrEnum): + create_in_progress = "create-in-progress" + create_complete = "create-complete" + create_failed = "create-failed" + update_in_progress = "update-in-progress" + update_complete = "update-complete" + update_failed = "update-failed" + delete_in_progress = "delete-in-progress" + delete_complete = "delete-complete" + disable_in_progress = "disable-in-progress" + disable_complete = "disable-complete" + + +class VpcBlockPublicAccessExclusionsAllowed(StrEnum): + allowed = "allowed" + not_allowed = "not-allowed" + + +class VpcBlockPublicAccessState(StrEnum): + default_state = "default-state" + update_in_progress = "update-in-progress" + update_complete = "update-complete" + + +class VpcCidrBlockStateCode(StrEnum): + associating = "associating" + associated = "associated" + disassociating = "disassociating" + disassociated = "disassociated" + failing = "failing" + failed = "failed" + + +class VpcEncryptionControlExclusionState(StrEnum): + enabling = "enabling" + enabled = "enabled" + disabling = "disabling" + disabled = "disabled" + + +class VpcEncryptionControlMode(StrEnum): + monitor = "monitor" + enforce = "enforce" + + +class VpcEncryptionControlState(StrEnum): + enforce_in_progress = "enforce-in-progress" + monitor_in_progress = "monitor-in-progress" + enforce_failed = "enforce-failed" + monitor_failed = "monitor-failed" + deleting = "deleting" + deleted = "deleted" + available = "available" + creating = "creating" + delete_failed = "delete-failed" + + +class VpcEndpointType(StrEnum): + Interface = "Interface" + Gateway = "Gateway" + GatewayLoadBalancer = "GatewayLoadBalancer" + Resource = "Resource" + ServiceNetwork = "ServiceNetwork" + + +class VpcPeeringConnectionStateReasonCode(StrEnum): + initiating_request = "initiating-request" + pending_acceptance = "pending-acceptance" + active = "active" + deleted = "deleted" + rejected = "rejected" + failed = "failed" + expired = "expired" + provisioning = "provisioning" + deleting = "deleting" + + +class VpcState(StrEnum): + pending = "pending" + available = "available" + + +class VpcTenancy(StrEnum): + default = "default" + + +class VpnEcmpSupportValue(StrEnum): + enable = "enable" + disable = "disable" + + +class VpnProtocol(StrEnum): + openvpn = "openvpn" + + +class VpnState(StrEnum): + pending = "pending" + available = "available" + deleting = "deleting" + deleted = "deleted" + + +class VpnStaticRouteSource(StrEnum): + Static = "Static" + + +class VpnTunnelProvisioningStatus(StrEnum): + available = "available" + pending = "pending" + failed = "failed" + + +class WeekDay(StrEnum): + sunday = "sunday" + monday = "monday" + tuesday = "tuesday" + wednesday = "wednesday" + thursday = "thursday" + friday = "friday" + saturday = "saturday" + + +class scope(StrEnum): + Availability_Zone = "Availability Zone" + Region = "Region" + + +class AcceleratorCount(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class AcceleratorCountRequest(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +AcceleratorManufacturerSet = List[AcceleratorManufacturer] +AcceleratorNameSet = List[AcceleratorName] + + +class AcceleratorTotalMemoryMiB(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class AcceleratorTotalMemoryMiBRequest(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +AcceleratorTypeSet = List[AcceleratorType] + + +class Tag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +TagList = List[Tag] + + +class TagSpecification(TypedDict, total=False): + ResourceType: Optional[ResourceType] + Tags: Optional[TagList] + + +TagSpecificationList = List[TagSpecification] + + +class AcceptAddressTransferRequest(ServiceRequest): + Address: String + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +MillisecondDateTime = datetime + + +class AddressTransfer(TypedDict, total=False): + PublicIp: Optional[String] + AllocationId: Optional[String] + TransferAccountId: Optional[String] + TransferOfferExpirationTimestamp: Optional[MillisecondDateTime] + TransferOfferAcceptedTimestamp: Optional[MillisecondDateTime] + AddressTransferStatus: Optional[AddressTransferStatus] + + +class AcceptAddressTransferResult(TypedDict, total=False): + AddressTransfer: Optional[AddressTransfer] + + +class AcceptCapacityReservationBillingOwnershipRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityReservationId: CapacityReservationId + + +class AcceptCapacityReservationBillingOwnershipResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class TargetConfigurationRequest(TypedDict, total=False): + InstanceCount: Optional[Integer] + OfferingId: ReservedInstancesOfferingId + + +TargetConfigurationRequestSet = List[TargetConfigurationRequest] +ReservedInstanceIdSet = List[ReservationId] + + +class AcceptReservedInstancesExchangeQuoteRequest(ServiceRequest): + DryRun: Optional[Boolean] + ReservedInstanceIds: ReservedInstanceIdSet + TargetConfigurations: Optional[TargetConfigurationRequestSet] + + +class AcceptReservedInstancesExchangeQuoteResult(TypedDict, total=False): + ExchangeId: Optional[String] + + +ValueStringList = List[String] + + +class AcceptTransitGatewayMulticastDomainAssociationsRequest(ServiceRequest): + TransitGatewayMulticastDomainId: Optional[TransitGatewayMulticastDomainId] + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + SubnetIds: Optional[ValueStringList] + DryRun: Optional[Boolean] + + +class SubnetAssociation(TypedDict, total=False): + SubnetId: Optional[String] + State: Optional[TransitGatewayMulitcastDomainAssociationState] + + +SubnetAssociationList = List[SubnetAssociation] + + +class TransitGatewayMulticastDomainAssociations(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + TransitGatewayAttachmentId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + ResourceOwnerId: Optional[String] + Subnets: Optional[SubnetAssociationList] + + +class AcceptTransitGatewayMulticastDomainAssociationsResult(TypedDict, total=False): + Associations: Optional[TransitGatewayMulticastDomainAssociations] + + +class AcceptTransitGatewayPeeringAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +DateTime = datetime + + +class PeeringAttachmentStatus(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class TransitGatewayPeeringAttachmentOptions(TypedDict, total=False): + DynamicRouting: Optional[DynamicRoutingValue] + + +class PeeringTgwInfo(TypedDict, total=False): + TransitGatewayId: Optional[String] + CoreNetworkId: Optional[String] + OwnerId: Optional[String] + Region: Optional[String] + + +class TransitGatewayPeeringAttachment(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + AccepterTransitGatewayAttachmentId: Optional[String] + RequesterTgwInfo: Optional[PeeringTgwInfo] + AccepterTgwInfo: Optional[PeeringTgwInfo] + Options: Optional[TransitGatewayPeeringAttachmentOptions] + Status: Optional[PeeringAttachmentStatus] + State: Optional[TransitGatewayAttachmentState] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +class AcceptTransitGatewayPeeringAttachmentResult(TypedDict, total=False): + TransitGatewayPeeringAttachment: Optional[TransitGatewayPeeringAttachment] + + +class AcceptTransitGatewayVpcAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class TransitGatewayVpcAttachmentOptions(TypedDict, total=False): + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + Ipv6Support: Optional[Ipv6SupportValue] + ApplianceModeSupport: Optional[ApplianceModeSupportValue] + + +class TransitGatewayVpcAttachment(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + TransitGatewayId: Optional[String] + VpcId: Optional[String] + VpcOwnerId: Optional[String] + State: Optional[TransitGatewayAttachmentState] + SubnetIds: Optional[ValueStringList] + CreationTime: Optional[DateTime] + Options: Optional[TransitGatewayVpcAttachmentOptions] + Tags: Optional[TagList] + + +class AcceptTransitGatewayVpcAttachmentResult(TypedDict, total=False): + TransitGatewayVpcAttachment: Optional[TransitGatewayVpcAttachment] + + +VpcEndpointIdList = List[VpcEndpointId] + + +class AcceptVpcEndpointConnectionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + VpcEndpointIds: VpcEndpointIdList + + +class UnsuccessfulItemError(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class UnsuccessfulItem(TypedDict, total=False): + Error: Optional[UnsuccessfulItemError] + ResourceId: Optional[String] + + +UnsuccessfulItemSet = List[UnsuccessfulItem] + + +class AcceptVpcEndpointConnectionsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class AcceptVpcPeeringConnectionRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcPeeringConnectionId: VpcPeeringConnectionIdWithResolver + + +class VpcPeeringConnectionStateReason(TypedDict, total=False): + Code: Optional[VpcPeeringConnectionStateReasonCode] + Message: Optional[String] + + +class VpcPeeringConnectionOptionsDescription(TypedDict, total=False): + AllowDnsResolutionFromRemoteVpc: Optional[Boolean] + AllowEgressFromLocalClassicLinkToRemoteVpc: Optional[Boolean] + AllowEgressFromLocalVpcToRemoteClassicLink: Optional[Boolean] + + +class CidrBlock(TypedDict, total=False): + CidrBlock: Optional[String] + + +CidrBlockSet = List[CidrBlock] + + +class Ipv6CidrBlock(TypedDict, total=False): + Ipv6CidrBlock: Optional[String] + + +Ipv6CidrBlockSet = List[Ipv6CidrBlock] + + +class VpcPeeringConnectionVpcInfo(TypedDict, total=False): + CidrBlock: Optional[String] + Ipv6CidrBlockSet: Optional[Ipv6CidrBlockSet] + CidrBlockSet: Optional[CidrBlockSet] + OwnerId: Optional[String] + PeeringOptions: Optional[VpcPeeringConnectionOptionsDescription] + VpcId: Optional[String] + Region: Optional[String] + + +class VpcPeeringConnection(TypedDict, total=False): + AccepterVpcInfo: Optional[VpcPeeringConnectionVpcInfo] + ExpirationTime: Optional[DateTime] + RequesterVpcInfo: Optional[VpcPeeringConnectionVpcInfo] + Status: Optional[VpcPeeringConnectionStateReason] + Tags: Optional[TagList] + VpcPeeringConnectionId: Optional[String] + + +class AcceptVpcPeeringConnectionResult(TypedDict, total=False): + VpcPeeringConnection: Optional[VpcPeeringConnection] + + +class PortRange(TypedDict, total=False): + From: Optional[Integer] + To: Optional[Integer] + + +PortRangeList = List[PortRange] + + +class FirewallStatefulRule(TypedDict, total=False): + RuleGroupArn: Optional[ResourceArn] + Sources: Optional[ValueStringList] + Destinations: Optional[ValueStringList] + SourcePorts: Optional[PortRangeList] + DestinationPorts: Optional[PortRangeList] + Protocol: Optional[String] + RuleAction: Optional[String] + Direction: Optional[String] + + +ProtocolIntList = List[ProtocolInt] + + +class FirewallStatelessRule(TypedDict, total=False): + RuleGroupArn: Optional[ResourceArn] + Sources: Optional[ValueStringList] + Destinations: Optional[ValueStringList] + SourcePorts: Optional[PortRangeList] + DestinationPorts: Optional[PortRangeList] + Protocols: Optional[ProtocolIntList] + RuleAction: Optional[String] + Priority: Optional[Priority] + + +class AnalysisComponent(TypedDict, total=False): + Id: Optional[String] + Arn: Optional[String] + Name: Optional[String] + + +class TransitGatewayRouteTableRoute(TypedDict, total=False): + DestinationCidr: Optional[String] + State: Optional[String] + RouteOrigin: Optional[String] + PrefixListId: Optional[String] + AttachmentId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[String] + + +AnalysisComponentList = List[AnalysisComponent] + + +class AnalysisSecurityGroupRule(TypedDict, total=False): + Cidr: Optional[String] + Direction: Optional[String] + SecurityGroupId: Optional[String] + PortRange: Optional[PortRange] + PrefixListId: Optional[String] + Protocol: Optional[String] + + +class AnalysisRouteTableRoute(TypedDict, total=False): + DestinationCidr: Optional[String] + DestinationPrefixListId: Optional[String] + EgressOnlyInternetGatewayId: Optional[String] + GatewayId: Optional[String] + InstanceId: Optional[String] + NatGatewayId: Optional[String] + NetworkInterfaceId: Optional[String] + Origin: Optional[String] + TransitGatewayId: Optional[String] + VpcPeeringConnectionId: Optional[String] + State: Optional[String] + CarrierGatewayId: Optional[String] + CoreNetworkArn: Optional[ResourceArn] + LocalGatewayId: Optional[String] + + +StringList = List[String] + + +class AnalysisLoadBalancerTarget(TypedDict, total=False): + Address: Optional[IpAddress] + AvailabilityZone: Optional[String] + AvailabilityZoneId: Optional[String] + Instance: Optional[AnalysisComponent] + Port: Optional[Port] + + +class AnalysisLoadBalancerListener(TypedDict, total=False): + LoadBalancerPort: Optional[Port] + InstancePort: Optional[Port] + + +IpAddressList = List[IpAddress] + + +class AnalysisAclRule(TypedDict, total=False): + Cidr: Optional[String] + Egress: Optional[Boolean] + PortRange: Optional[PortRange] + Protocol: Optional[String] + RuleAction: Optional[String] + RuleNumber: Optional[Integer] + + +class Explanation(TypedDict, total=False): + Acl: Optional[AnalysisComponent] + AclRule: Optional[AnalysisAclRule] + Address: Optional[IpAddress] + Addresses: Optional[IpAddressList] + AttachedTo: Optional[AnalysisComponent] + AvailabilityZones: Optional[ValueStringList] + AvailabilityZoneIds: Optional[ValueStringList] + Cidrs: Optional[ValueStringList] + Component: Optional[AnalysisComponent] + CustomerGateway: Optional[AnalysisComponent] + Destination: Optional[AnalysisComponent] + DestinationVpc: Optional[AnalysisComponent] + Direction: Optional[String] + ExplanationCode: Optional[String] + IngressRouteTable: Optional[AnalysisComponent] + InternetGateway: Optional[AnalysisComponent] + LoadBalancerArn: Optional[ResourceArn] + ClassicLoadBalancerListener: Optional[AnalysisLoadBalancerListener] + LoadBalancerListenerPort: Optional[Port] + LoadBalancerTarget: Optional[AnalysisLoadBalancerTarget] + LoadBalancerTargetGroup: Optional[AnalysisComponent] + LoadBalancerTargetGroups: Optional[AnalysisComponentList] + LoadBalancerTargetPort: Optional[Port] + ElasticLoadBalancerListener: Optional[AnalysisComponent] + MissingComponent: Optional[String] + NatGateway: Optional[AnalysisComponent] + NetworkInterface: Optional[AnalysisComponent] + PacketField: Optional[String] + VpcPeeringConnection: Optional[AnalysisComponent] + Port: Optional[Port] + PortRanges: Optional[PortRangeList] + PrefixList: Optional[AnalysisComponent] + Protocols: Optional[StringList] + RouteTableRoute: Optional[AnalysisRouteTableRoute] + RouteTable: Optional[AnalysisComponent] + SecurityGroup: Optional[AnalysisComponent] + SecurityGroupRule: Optional[AnalysisSecurityGroupRule] + SecurityGroups: Optional[AnalysisComponentList] + SourceVpc: Optional[AnalysisComponent] + State: Optional[String] + Subnet: Optional[AnalysisComponent] + SubnetRouteTable: Optional[AnalysisComponent] + Vpc: Optional[AnalysisComponent] + VpcEndpoint: Optional[AnalysisComponent] + VpnConnection: Optional[AnalysisComponent] + VpnGateway: Optional[AnalysisComponent] + TransitGateway: Optional[AnalysisComponent] + TransitGatewayRouteTable: Optional[AnalysisComponent] + TransitGatewayRouteTableRoute: Optional[TransitGatewayRouteTableRoute] + TransitGatewayAttachment: Optional[AnalysisComponent] + ComponentAccount: Optional[ComponentAccount] + ComponentRegion: Optional[ComponentRegion] + FirewallStatelessRule: Optional[FirewallStatelessRule] + FirewallStatefulRule: Optional[FirewallStatefulRule] + + +ExplanationList = List[Explanation] + + +class RuleOption(TypedDict, total=False): + Keyword: Optional[String] + Settings: Optional[StringList] + + +RuleOptionList = List[RuleOption] + + +class RuleGroupRuleOptionsPair(TypedDict, total=False): + RuleGroupArn: Optional[ResourceArn] + RuleOptions: Optional[RuleOptionList] + + +RuleGroupRuleOptionsPairList = List[RuleGroupRuleOptionsPair] + + +class RuleGroupTypePair(TypedDict, total=False): + RuleGroupArn: Optional[ResourceArn] + RuleGroupType: Optional[String] + + +RuleGroupTypePairList = List[RuleGroupTypePair] + + +class AdditionalDetail(TypedDict, total=False): + AdditionalDetailType: Optional[String] + Component: Optional[AnalysisComponent] + VpcEndpointService: Optional[AnalysisComponent] + RuleOptions: Optional[RuleOptionList] + RuleGroupTypePairs: Optional[RuleGroupTypePairList] + RuleGroupRuleOptionsPairs: Optional[RuleGroupRuleOptionsPairList] + ServiceName: Optional[String] + LoadBalancers: Optional[AnalysisComponentList] + + +AdditionalDetailList = List[AdditionalDetail] + + +class AnalysisPacketHeader(TypedDict, total=False): + DestinationAddresses: Optional[IpAddressList] + DestinationPortRanges: Optional[PortRangeList] + Protocol: Optional[String] + SourceAddresses: Optional[IpAddressList] + SourcePortRanges: Optional[PortRangeList] + + +class PathComponent(TypedDict, total=False): + SequenceNumber: Optional[Integer] + AclRule: Optional[AnalysisAclRule] + AttachedTo: Optional[AnalysisComponent] + Component: Optional[AnalysisComponent] + DestinationVpc: Optional[AnalysisComponent] + OutboundHeader: Optional[AnalysisPacketHeader] + InboundHeader: Optional[AnalysisPacketHeader] + RouteTableRoute: Optional[AnalysisRouteTableRoute] + SecurityGroupRule: Optional[AnalysisSecurityGroupRule] + SourceVpc: Optional[AnalysisComponent] + Subnet: Optional[AnalysisComponent] + Vpc: Optional[AnalysisComponent] + AdditionalDetails: Optional[AdditionalDetailList] + TransitGateway: Optional[AnalysisComponent] + TransitGatewayRouteTableRoute: Optional[TransitGatewayRouteTableRoute] + Explanations: Optional[ExplanationList] + ElasticLoadBalancerListener: Optional[AnalysisComponent] + FirewallStatelessRule: Optional[FirewallStatelessRule] + FirewallStatefulRule: Optional[FirewallStatefulRule] + ServiceName: Optional[String] + + +PathComponentList = List[PathComponent] + + +class AccessScopeAnalysisFinding(TypedDict, total=False): + NetworkInsightsAccessScopeAnalysisId: Optional[NetworkInsightsAccessScopeAnalysisId] + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + FindingId: Optional[String] + FindingComponents: Optional[PathComponentList] + + +AccessScopeAnalysisFindingList = List[AccessScopeAnalysisFinding] + + +class ResourceStatement(TypedDict, total=False): + Resources: Optional[ValueStringList] + ResourceTypes: Optional[ValueStringList] + + +class ThroughResourcesStatement(TypedDict, total=False): + ResourceStatement: Optional[ResourceStatement] + + +ThroughResourcesStatementList = List[ThroughResourcesStatement] +ProtocolList = List[Protocol] + + +class PacketHeaderStatement(TypedDict, total=False): + SourceAddresses: Optional[ValueStringList] + DestinationAddresses: Optional[ValueStringList] + SourcePorts: Optional[ValueStringList] + DestinationPorts: Optional[ValueStringList] + SourcePrefixLists: Optional[ValueStringList] + DestinationPrefixLists: Optional[ValueStringList] + Protocols: Optional[ProtocolList] + + +class PathStatement(TypedDict, total=False): + PacketHeaderStatement: Optional[PacketHeaderStatement] + ResourceStatement: Optional[ResourceStatement] + + +class AccessScopePath(TypedDict, total=False): + Source: Optional[PathStatement] + Destination: Optional[PathStatement] + ThroughResources: Optional[ThroughResourcesStatementList] + + +AccessScopePathList = List[AccessScopePath] + + +class ResourceStatementRequest(TypedDict, total=False): + Resources: Optional[ValueStringList] + ResourceTypes: Optional[ValueStringList] + + +class ThroughResourcesStatementRequest(TypedDict, total=False): + ResourceStatement: Optional[ResourceStatementRequest] + + +ThroughResourcesStatementRequestList = List[ThroughResourcesStatementRequest] + + +class PacketHeaderStatementRequest(TypedDict, total=False): + SourceAddresses: Optional[ValueStringList] + DestinationAddresses: Optional[ValueStringList] + SourcePorts: Optional[ValueStringList] + DestinationPorts: Optional[ValueStringList] + SourcePrefixLists: Optional[ValueStringList] + DestinationPrefixLists: Optional[ValueStringList] + Protocols: Optional[ProtocolList] + + +class PathStatementRequest(TypedDict, total=False): + PacketHeaderStatement: Optional[PacketHeaderStatementRequest] + ResourceStatement: Optional[ResourceStatementRequest] + + +class AccessScopePathRequest(TypedDict, total=False): + Source: Optional[PathStatementRequest] + Destination: Optional[PathStatementRequest] + ThroughResources: Optional[ThroughResourcesStatementRequestList] + + +AccessScopePathListRequest = List[AccessScopePathRequest] + + +class AccountAttributeValue(TypedDict, total=False): + AttributeValue: Optional[String] + + +AccountAttributeValueList = List[AccountAttributeValue] + + +class AccountAttribute(TypedDict, total=False): + AttributeName: Optional[String] + AttributeValues: Optional[AccountAttributeValueList] + + +AccountAttributeList = List[AccountAttribute] +AccountAttributeNameStringList = List[AccountAttributeName] + + +class ActiveInstance(TypedDict, total=False): + InstanceId: Optional[String] + InstanceType: Optional[String] + SpotInstanceRequestId: Optional[String] + InstanceHealth: Optional[InstanceHealthStatus] + + +ActiveInstanceSet = List[ActiveInstance] + + +class ActiveVpnTunnelStatus(TypedDict, total=False): + Phase1EncryptionAlgorithm: Optional[String] + Phase2EncryptionAlgorithm: Optional[String] + Phase1IntegrityAlgorithm: Optional[String] + Phase2IntegrityAlgorithm: Optional[String] + Phase1DHGroup: Optional[Integer] + Phase2DHGroup: Optional[Integer] + IkeVersion: Optional[String] + ProvisioningStatus: Optional[VpnTunnelProvisioningStatus] + ProvisioningStatusReason: Optional[String] + + +class AddIpamOperatingRegion(TypedDict, total=False): + RegionName: Optional[String] + + +AddIpamOperatingRegionSet = List[AddIpamOperatingRegion] + + +class AddIpamOrganizationalUnitExclusion(TypedDict, total=False): + OrganizationsEntityPath: Optional[String] + + +AddIpamOrganizationalUnitExclusionSet = List[AddIpamOrganizationalUnitExclusion] + + +class AddPrefixListEntry(TypedDict, total=False): + Cidr: String + Description: Optional[String] + + +AddPrefixListEntries = List[AddPrefixListEntry] + + +class AddedPrincipal(TypedDict, total=False): + PrincipalType: Optional[PrincipalType] + Principal: Optional[String] + ServicePermissionId: Optional[String] + ServiceId: Optional[String] + + +AddedPrincipalSet = List[AddedPrincipal] + + +class Address(TypedDict, total=False): + AllocationId: Optional[String] + AssociationId: Optional[String] + Domain: Optional[DomainType] + NetworkInterfaceId: Optional[String] + NetworkInterfaceOwnerId: Optional[String] + PrivateIpAddress: Optional[String] + Tags: Optional[TagList] + PublicIpv4Pool: Optional[String] + NetworkBorderGroup: Optional[String] + CustomerOwnedIp: Optional[String] + CustomerOwnedIpv4Pool: Optional[String] + CarrierIp: Optional[String] + ServiceManaged: Optional[ServiceManaged] + InstanceId: Optional[String] + PublicIp: Optional[String] + + +class PtrUpdateStatus(TypedDict, total=False): + Value: Optional[String] + Status: Optional[String] + Reason: Optional[String] + + +class AddressAttribute(TypedDict, total=False): + PublicIp: Optional[PublicIpAddress] + AllocationId: Optional[AllocationId] + PtrRecord: Optional[String] + PtrRecordUpdate: Optional[PtrUpdateStatus] + + +AddressList = List[Address] +AddressSet = List[AddressAttribute] +AddressTransferList = List[AddressTransfer] + + +class AdvertiseByoipCidrRequest(ServiceRequest): + Cidr: String + Asn: Optional[String] + DryRun: Optional[Boolean] + NetworkBorderGroup: Optional[String] + + +class AsnAssociation(TypedDict, total=False): + Asn: Optional[String] + Cidr: Optional[String] + StatusMessage: Optional[String] + State: Optional[AsnAssociationState] + + +AsnAssociationSet = List[AsnAssociation] + + +class ByoipCidr(TypedDict, total=False): + Cidr: Optional[String] + Description: Optional[String] + AsnAssociations: Optional[AsnAssociationSet] + StatusMessage: Optional[String] + State: Optional[ByoipCidrState] + NetworkBorderGroup: Optional[String] + + +class AdvertiseByoipCidrResult(TypedDict, total=False): + ByoipCidr: Optional[ByoipCidr] + + +class AllocateAddressRequest(ServiceRequest): + Domain: Optional[DomainType] + Address: Optional[PublicIpAddress] + PublicIpv4Pool: Optional[Ipv4PoolEc2Id] + NetworkBorderGroup: Optional[String] + CustomerOwnedIpv4Pool: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + IpamPoolId: Optional[IpamPoolId] + DryRun: Optional[Boolean] + + +class AllocateAddressResult(TypedDict, total=False): + AllocationId: Optional[String] + PublicIpv4Pool: Optional[String] + NetworkBorderGroup: Optional[String] + Domain: Optional[DomainType] + CustomerOwnedIp: Optional[String] + CustomerOwnedIpv4Pool: Optional[String] + CarrierIp: Optional[String] + PublicIp: Optional[String] + + +AssetIdList = List[AssetId] + + +class AllocateHostsRequest(ServiceRequest): + InstanceFamily: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + HostRecovery: Optional[HostRecovery] + OutpostArn: Optional[String] + HostMaintenance: Optional[HostMaintenance] + AssetIds: Optional[AssetIdList] + AvailabilityZoneId: Optional[AvailabilityZoneId] + AutoPlacement: Optional[AutoPlacement] + ClientToken: Optional[String] + InstanceType: Optional[String] + Quantity: Optional[Integer] + AvailabilityZone: Optional[String] + + +ResponseHostIdList = List[String] + + +class AllocateHostsResult(TypedDict, total=False): + HostIds: Optional[ResponseHostIdList] + + +IpamPoolAllocationDisallowedCidrs = List[String] +IpamPoolAllocationAllowedCidrs = List[String] + + +class AllocateIpamPoolCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Cidr: Optional[String] + NetmaskLength: Optional[Integer] + ClientToken: Optional[String] + Description: Optional[String] + PreviewNextCidr: Optional[Boolean] + AllowedCidrs: Optional[IpamPoolAllocationAllowedCidrs] + DisallowedCidrs: Optional[IpamPoolAllocationDisallowedCidrs] + + +class IpamPoolAllocation(TypedDict, total=False): + Cidr: Optional[String] + IpamPoolAllocationId: Optional[IpamPoolAllocationId] + Description: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[IpamPoolAllocationResourceType] + ResourceRegion: Optional[String] + ResourceOwner: Optional[String] + + +class AllocateIpamPoolCidrResult(TypedDict, total=False): + IpamPoolAllocation: Optional[IpamPoolAllocation] + + +AllocationIdList = List[AllocationId] +AllocationIds = List[AllocationId] +AllowedInstanceTypeSet = List[AllowedInstanceType] + + +class AllowedPrincipal(TypedDict, total=False): + PrincipalType: Optional[PrincipalType] + Principal: Optional[String] + ServicePermissionId: Optional[String] + Tags: Optional[TagList] + ServiceId: Optional[String] + + +AllowedPrincipalSet = List[AllowedPrincipal] + + +class AlternatePathHint(TypedDict, total=False): + ComponentId: Optional[String] + ComponentArn: Optional[String] + + +AlternatePathHintList = List[AlternatePathHint] +ClientVpnSecurityGroupIdSet = List[SecurityGroupId] + + +class ApplySecurityGroupsToClientVpnTargetNetworkRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + VpcId: VpcId + SecurityGroupIds: ClientVpnSecurityGroupIdSet + DryRun: Optional[Boolean] + + +class ApplySecurityGroupsToClientVpnTargetNetworkResult(TypedDict, total=False): + SecurityGroupIds: Optional[ClientVpnSecurityGroupIdSet] + + +ArchitectureTypeList = List[ArchitectureType] +ArchitectureTypeSet = List[ArchitectureType] +ArnList = List[ResourceArn] +AsPath = List[String] + + +class AsnAuthorizationContext(TypedDict, total=False): + Message: String + Signature: String + + +Ipv6AddressList = List[String] +IpPrefixList = List[String] + + +class AssignIpv6AddressesRequest(ServiceRequest): + Ipv6PrefixCount: Optional[Integer] + Ipv6Prefixes: Optional[IpPrefixList] + NetworkInterfaceId: NetworkInterfaceId + Ipv6Addresses: Optional[Ipv6AddressList] + Ipv6AddressCount: Optional[Integer] + + +class AssignIpv6AddressesResult(TypedDict, total=False): + AssignedIpv6Addresses: Optional[Ipv6AddressList] + AssignedIpv6Prefixes: Optional[IpPrefixList] + NetworkInterfaceId: Optional[String] + + +PrivateIpAddressStringList = List[String] + + +class AssignPrivateIpAddressesRequest(ServiceRequest): + Ipv4Prefixes: Optional[IpPrefixList] + Ipv4PrefixCount: Optional[Integer] + NetworkInterfaceId: NetworkInterfaceId + PrivateIpAddresses: Optional[PrivateIpAddressStringList] + SecondaryPrivateIpAddressCount: Optional[Integer] + AllowReassignment: Optional[Boolean] + + +class Ipv4PrefixSpecification(TypedDict, total=False): + Ipv4Prefix: Optional[String] + + +Ipv4PrefixesList = List[Ipv4PrefixSpecification] + + +class AssignedPrivateIpAddress(TypedDict, total=False): + PrivateIpAddress: Optional[String] + + +AssignedPrivateIpAddressList = List[AssignedPrivateIpAddress] + + +class AssignPrivateIpAddressesResult(TypedDict, total=False): + NetworkInterfaceId: Optional[String] + AssignedPrivateIpAddresses: Optional[AssignedPrivateIpAddressList] + AssignedIpv4Prefixes: Optional[Ipv4PrefixesList] + + +IpList = List[String] + + +class AssignPrivateNatGatewayAddressRequest(ServiceRequest): + NatGatewayId: NatGatewayId + PrivateIpAddresses: Optional[IpList] + PrivateIpAddressCount: Optional[PrivateIpAddressCount] + DryRun: Optional[Boolean] + + +class NatGatewayAddress(TypedDict, total=False): + AllocationId: Optional[String] + NetworkInterfaceId: Optional[String] + PrivateIp: Optional[String] + PublicIp: Optional[String] + AssociationId: Optional[String] + IsPrimary: Optional[Boolean] + FailureMessage: Optional[String] + Status: Optional[NatGatewayAddressStatus] + + +NatGatewayAddressList = List[NatGatewayAddress] + + +class AssignPrivateNatGatewayAddressResult(TypedDict, total=False): + NatGatewayId: Optional[NatGatewayId] + NatGatewayAddresses: Optional[NatGatewayAddressList] + + +class AssociateAddressRequest(ServiceRequest): + AllocationId: Optional[AllocationId] + InstanceId: Optional[InstanceId] + PublicIp: Optional[EipAllocationPublicIp] + DryRun: Optional[Boolean] + NetworkInterfaceId: Optional[NetworkInterfaceId] + PrivateIpAddress: Optional[String] + AllowReassociation: Optional[Boolean] + + +class AssociateAddressResult(TypedDict, total=False): + AssociationId: Optional[String] + + +class AssociateCapacityReservationBillingOwnerRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityReservationId: CapacityReservationId + UnusedReservationBillingOwnerId: AccountID + + +class AssociateCapacityReservationBillingOwnerResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class AssociateClientVpnTargetNetworkRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + SubnetId: SubnetId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class AssociationStatus(TypedDict, total=False): + Code: Optional[AssociationStatusCode] + Message: Optional[String] + + +class AssociateClientVpnTargetNetworkResult(TypedDict, total=False): + AssociationId: Optional[String] + Status: Optional[AssociationStatus] + + +class AssociateDhcpOptionsRequest(ServiceRequest): + DhcpOptionsId: DefaultingDhcpOptionsId + VpcId: VpcId + DryRun: Optional[Boolean] + + +class AssociateEnclaveCertificateIamRoleRequest(ServiceRequest): + CertificateArn: CertificateId + RoleArn: RoleId + DryRun: Optional[Boolean] + + +class AssociateEnclaveCertificateIamRoleResult(TypedDict, total=False): + CertificateS3BucketName: Optional[String] + CertificateS3ObjectKey: Optional[String] + EncryptionKmsKeyId: Optional[String] + + +class IamInstanceProfileSpecification(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +class AssociateIamInstanceProfileRequest(ServiceRequest): + IamInstanceProfile: IamInstanceProfileSpecification + InstanceId: InstanceId + + +class IamInstanceProfile(TypedDict, total=False): + Arn: Optional[String] + Id: Optional[String] + + +class IamInstanceProfileAssociation(TypedDict, total=False): + AssociationId: Optional[String] + InstanceId: Optional[String] + IamInstanceProfile: Optional[IamInstanceProfile] + State: Optional[IamInstanceProfileAssociationState] + Timestamp: Optional[DateTime] + + +class AssociateIamInstanceProfileResult(TypedDict, total=False): + IamInstanceProfileAssociation: Optional[IamInstanceProfileAssociation] + + +DedicatedHostIdList = List[DedicatedHostId] +InstanceIdList = List[InstanceId] + + +class InstanceEventWindowAssociationRequest(TypedDict, total=False): + InstanceIds: Optional[InstanceIdList] + InstanceTags: Optional[TagList] + DedicatedHostIds: Optional[DedicatedHostIdList] + + +class AssociateInstanceEventWindowRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceEventWindowId: InstanceEventWindowId + AssociationTarget: InstanceEventWindowAssociationRequest + + +class InstanceEventWindowAssociationTarget(TypedDict, total=False): + InstanceIds: Optional[InstanceIdList] + Tags: Optional[TagList] + DedicatedHostIds: Optional[DedicatedHostIdList] + + +class InstanceEventWindowTimeRange(TypedDict, total=False): + StartWeekDay: Optional[WeekDay] + StartHour: Optional[Hour] + EndWeekDay: Optional[WeekDay] + EndHour: Optional[Hour] + + +InstanceEventWindowTimeRangeList = List[InstanceEventWindowTimeRange] + + +class InstanceEventWindow(TypedDict, total=False): + InstanceEventWindowId: Optional[InstanceEventWindowId] + TimeRanges: Optional[InstanceEventWindowTimeRangeList] + Name: Optional[String] + CronExpression: Optional[InstanceEventWindowCronExpression] + AssociationTarget: Optional[InstanceEventWindowAssociationTarget] + State: Optional[InstanceEventWindowState] + Tags: Optional[TagList] + + +class AssociateInstanceEventWindowResult(TypedDict, total=False): + InstanceEventWindow: Optional[InstanceEventWindow] + + +class AssociateIpamByoasnRequest(ServiceRequest): + DryRun: Optional[Boolean] + Asn: String + Cidr: String + + +class AssociateIpamByoasnResult(TypedDict, total=False): + AsnAssociation: Optional[AsnAssociation] + + +class AssociateIpamResourceDiscoveryRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + IpamResourceDiscoveryId: IpamResourceDiscoveryId + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + + +class IpamResourceDiscoveryAssociation(TypedDict, total=False): + OwnerId: Optional[String] + IpamResourceDiscoveryAssociationId: Optional[IpamResourceDiscoveryAssociationId] + IpamResourceDiscoveryAssociationArn: Optional[String] + IpamResourceDiscoveryId: Optional[IpamResourceDiscoveryId] + IpamId: Optional[IpamId] + IpamArn: Optional[ResourceArn] + IpamRegion: Optional[String] + IsDefault: Optional[Boolean] + ResourceDiscoveryStatus: Optional[IpamAssociatedResourceDiscoveryStatus] + State: Optional[IpamResourceDiscoveryAssociationState] + Tags: Optional[TagList] + + +class AssociateIpamResourceDiscoveryResult(TypedDict, total=False): + IpamResourceDiscoveryAssociation: Optional[IpamResourceDiscoveryAssociation] + + +class AssociateNatGatewayAddressRequest(ServiceRequest): + NatGatewayId: NatGatewayId + AllocationIds: AllocationIdList + PrivateIpAddresses: Optional[IpList] + DryRun: Optional[Boolean] + + +class AssociateNatGatewayAddressResult(TypedDict, total=False): + NatGatewayId: Optional[NatGatewayId] + NatGatewayAddresses: Optional[NatGatewayAddressList] + + +class AssociateRouteServerRequest(ServiceRequest): + RouteServerId: RouteServerId + VpcId: VpcId + DryRun: Optional[Boolean] + + +class RouteServerAssociation(TypedDict, total=False): + RouteServerId: Optional[RouteServerId] + VpcId: Optional[VpcId] + State: Optional[RouteServerAssociationState] + + +class AssociateRouteServerResult(TypedDict, total=False): + RouteServerAssociation: Optional[RouteServerAssociation] + + +class AssociateRouteTableRequest(ServiceRequest): + GatewayId: Optional[RouteGatewayId] + DryRun: Optional[Boolean] + SubnetId: Optional[SubnetId] + RouteTableId: RouteTableId + + +class RouteTableAssociationState(TypedDict, total=False): + State: Optional[RouteTableAssociationStateCode] + StatusMessage: Optional[String] + + +class AssociateRouteTableResult(TypedDict, total=False): + AssociationId: Optional[String] + AssociationState: Optional[RouteTableAssociationState] + + +class AssociateSecurityGroupVpcRequest(ServiceRequest): + GroupId: SecurityGroupId + VpcId: VpcId + DryRun: Optional[Boolean] + + +class AssociateSecurityGroupVpcResult(TypedDict, total=False): + State: Optional[SecurityGroupVpcAssociationState] + + +class AssociateSubnetCidrBlockRequest(ServiceRequest): + Ipv6IpamPoolId: Optional[IpamPoolId] + Ipv6NetmaskLength: Optional[NetmaskLength] + SubnetId: SubnetId + Ipv6CidrBlock: Optional[String] + + +class SubnetCidrBlockState(TypedDict, total=False): + State: Optional[SubnetCidrBlockStateCode] + StatusMessage: Optional[String] + + +class SubnetIpv6CidrBlockAssociation(TypedDict, total=False): + AssociationId: Optional[SubnetCidrAssociationId] + Ipv6CidrBlock: Optional[String] + Ipv6CidrBlockState: Optional[SubnetCidrBlockState] + Ipv6AddressAttribute: Optional[Ipv6AddressAttribute] + IpSource: Optional[IpSource] + + +class AssociateSubnetCidrBlockResult(TypedDict, total=False): + Ipv6CidrBlockAssociation: Optional[SubnetIpv6CidrBlockAssociation] + SubnetId: Optional[String] + + +TransitGatewaySubnetIdList = List[SubnetId] + + +class AssociateTransitGatewayMulticastDomainRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + SubnetIds: TransitGatewaySubnetIdList + DryRun: Optional[Boolean] + + +class AssociateTransitGatewayMulticastDomainResult(TypedDict, total=False): + Associations: Optional[TransitGatewayMulticastDomainAssociations] + + +class AssociateTransitGatewayPolicyTableRequest(ServiceRequest): + TransitGatewayPolicyTableId: TransitGatewayPolicyTableId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class TransitGatewayPolicyTableAssociation(TypedDict, total=False): + TransitGatewayPolicyTableId: Optional[TransitGatewayPolicyTableId] + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + State: Optional[TransitGatewayAssociationState] + + +class AssociateTransitGatewayPolicyTableResult(TypedDict, total=False): + Association: Optional[TransitGatewayPolicyTableAssociation] + + +class AssociateTransitGatewayRouteTableRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class TransitGatewayAssociation(TypedDict, total=False): + TransitGatewayRouteTableId: Optional[TransitGatewayRouteTableId] + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + State: Optional[TransitGatewayAssociationState] + + +class AssociateTransitGatewayRouteTableResult(TypedDict, total=False): + Association: Optional[TransitGatewayAssociation] + + +class AssociateTrunkInterfaceRequest(ServiceRequest): + BranchInterfaceId: NetworkInterfaceId + TrunkInterfaceId: NetworkInterfaceId + VlanId: Optional[Integer] + GreKey: Optional[Integer] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class TrunkInterfaceAssociation(TypedDict, total=False): + AssociationId: Optional[TrunkInterfaceAssociationId] + BranchInterfaceId: Optional[String] + TrunkInterfaceId: Optional[String] + InterfaceProtocol: Optional[InterfaceProtocolType] + VlanId: Optional[Integer] + GreKey: Optional[Integer] + Tags: Optional[TagList] + + +class AssociateTrunkInterfaceResult(TypedDict, total=False): + InterfaceAssociation: Optional[TrunkInterfaceAssociation] + ClientToken: Optional[String] + + +class AssociateVpcCidrBlockRequest(ServiceRequest): + CidrBlock: Optional[String] + Ipv6CidrBlockNetworkBorderGroup: Optional[String] + Ipv6Pool: Optional[Ipv6PoolEc2Id] + Ipv6CidrBlock: Optional[String] + Ipv4IpamPoolId: Optional[IpamPoolId] + Ipv4NetmaskLength: Optional[NetmaskLength] + Ipv6IpamPoolId: Optional[IpamPoolId] + Ipv6NetmaskLength: Optional[NetmaskLength] + VpcId: VpcId + AmazonProvidedIpv6CidrBlock: Optional[Boolean] + + +class VpcCidrBlockState(TypedDict, total=False): + State: Optional[VpcCidrBlockStateCode] + StatusMessage: Optional[String] + + +class VpcCidrBlockAssociation(TypedDict, total=False): + AssociationId: Optional[String] + CidrBlock: Optional[String] + CidrBlockState: Optional[VpcCidrBlockState] + + +class VpcIpv6CidrBlockAssociation(TypedDict, total=False): + AssociationId: Optional[String] + Ipv6CidrBlock: Optional[String] + Ipv6CidrBlockState: Optional[VpcCidrBlockState] + NetworkBorderGroup: Optional[String] + Ipv6Pool: Optional[String] + Ipv6AddressAttribute: Optional[Ipv6AddressAttribute] + IpSource: Optional[IpSource] + + +class AssociateVpcCidrBlockResult(TypedDict, total=False): + Ipv6CidrBlockAssociation: Optional[VpcIpv6CidrBlockAssociation] + CidrBlockAssociation: Optional[VpcCidrBlockAssociation] + VpcId: Optional[String] + + +class AssociatedRole(TypedDict, total=False): + AssociatedRoleArn: Optional[ResourceArn] + CertificateS3BucketName: Optional[String] + CertificateS3ObjectKey: Optional[String] + EncryptionKmsKeyId: Optional[String] + + +AssociatedRolesList = List[AssociatedRole] + + +class AssociatedTargetNetwork(TypedDict, total=False): + NetworkId: Optional[String] + NetworkType: Optional[AssociatedNetworkType] + + +AssociatedTargetNetworkSet = List[AssociatedTargetNetwork] +AssociationIdList = List[IamInstanceProfileAssociationId] + + +class AthenaIntegration(TypedDict, total=False): + IntegrationResultS3DestinationArn: String + PartitionLoadFrequency: PartitionLoadFrequency + PartitionStartDate: Optional[MillisecondDateTime] + PartitionEndDate: Optional[MillisecondDateTime] + + +AthenaIntegrationsSet = List[AthenaIntegration] +GroupIdStringList = List[SecurityGroupId] + + +class AttachClassicLinkVpcRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + VpcId: VpcId + Groups: GroupIdStringList + + +class AttachClassicLinkVpcResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class AttachInternetGatewayRequest(ServiceRequest): + DryRun: Optional[Boolean] + InternetGatewayId: InternetGatewayId + VpcId: VpcId + + +class EnaSrdUdpSpecification(TypedDict, total=False): + EnaSrdUdpEnabled: Optional[Boolean] + + +class EnaSrdSpecification(TypedDict, total=False): + EnaSrdEnabled: Optional[Boolean] + EnaSrdUdpSpecification: Optional[EnaSrdUdpSpecification] + + +class AttachNetworkInterfaceRequest(ServiceRequest): + NetworkCardIndex: Optional[Integer] + EnaSrdSpecification: Optional[EnaSrdSpecification] + EnaQueueCount: Optional[Integer] + DryRun: Optional[Boolean] + NetworkInterfaceId: NetworkInterfaceId + InstanceId: InstanceId + DeviceIndex: Integer + + +class AttachNetworkInterfaceResult(TypedDict, total=False): + AttachmentId: Optional[String] + NetworkCardIndex: Optional[Integer] + + +class AttachVerifiedAccessTrustProviderRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + VerifiedAccessTrustProviderId: VerifiedAccessTrustProviderId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class VerifiedAccessInstanceCustomSubDomain(TypedDict, total=False): + SubDomain: Optional[String] + Nameservers: Optional[ValueStringList] + + +class VerifiedAccessTrustProviderCondensed(TypedDict, total=False): + VerifiedAccessTrustProviderId: Optional[String] + Description: Optional[String] + TrustProviderType: Optional[TrustProviderType] + UserTrustProviderType: Optional[UserTrustProviderType] + DeviceTrustProviderType: Optional[DeviceTrustProviderType] + + +VerifiedAccessTrustProviderCondensedList = List[VerifiedAccessTrustProviderCondensed] + + +class VerifiedAccessInstance(TypedDict, total=False): + VerifiedAccessInstanceId: Optional[String] + Description: Optional[String] + VerifiedAccessTrustProviders: Optional[VerifiedAccessTrustProviderCondensedList] + CreationTime: Optional[String] + LastUpdatedTime: Optional[String] + Tags: Optional[TagList] + FipsEnabled: Optional[Boolean] + CidrEndpointsCustomSubDomain: Optional[VerifiedAccessInstanceCustomSubDomain] + + +class NativeApplicationOidcOptions(TypedDict, total=False): + PublicSigningKeyEndpoint: Optional[String] + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + Scope: Optional[String] + + +class VerifiedAccessSseSpecificationResponse(TypedDict, total=False): + CustomerManagedKeyEnabled: Optional[Boolean] + KmsKeyArn: Optional[KmsKeyArn] + + +class DeviceOptions(TypedDict, total=False): + TenantId: Optional[String] + PublicSigningKeyUrl: Optional[String] + + +class OidcOptions(TypedDict, total=False): + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + Scope: Optional[String] + + +class VerifiedAccessTrustProvider(TypedDict, total=False): + VerifiedAccessTrustProviderId: Optional[String] + Description: Optional[String] + TrustProviderType: Optional[TrustProviderType] + UserTrustProviderType: Optional[UserTrustProviderType] + DeviceTrustProviderType: Optional[DeviceTrustProviderType] + OidcOptions: Optional[OidcOptions] + DeviceOptions: Optional[DeviceOptions] + PolicyReferenceName: Optional[String] + CreationTime: Optional[String] + LastUpdatedTime: Optional[String] + Tags: Optional[TagList] + SseSpecification: Optional[VerifiedAccessSseSpecificationResponse] + NativeApplicationOidcOptions: Optional[NativeApplicationOidcOptions] + + +class AttachVerifiedAccessTrustProviderResult(TypedDict, total=False): + VerifiedAccessTrustProvider: Optional[VerifiedAccessTrustProvider] + VerifiedAccessInstance: Optional[VerifiedAccessInstance] + + +class AttachVolumeRequest(ServiceRequest): + Device: String + InstanceId: InstanceId + VolumeId: VolumeId + DryRun: Optional[Boolean] + + +class AttachVpnGatewayRequest(ServiceRequest): + VpcId: VpcId + VpnGatewayId: VpnGatewayId + DryRun: Optional[Boolean] + + +class VpcAttachment(TypedDict, total=False): + VpcId: Optional[String] + State: Optional[AttachmentStatus] + + +class AttachVpnGatewayResult(TypedDict, total=False): + VpcAttachment: Optional[VpcAttachment] + + +class AttachmentEnaSrdUdpSpecification(TypedDict, total=False): + EnaSrdUdpEnabled: Optional[Boolean] + + +class AttachmentEnaSrdSpecification(TypedDict, total=False): + EnaSrdEnabled: Optional[Boolean] + EnaSrdUdpSpecification: Optional[AttachmentEnaSrdUdpSpecification] + + +class AttributeBooleanValue(TypedDict, total=False): + Value: Optional[Boolean] + + +class RegionalSummary(TypedDict, total=False): + RegionName: Optional[String] + NumberOfMatchedAccounts: Optional[Integer] + NumberOfUnmatchedAccounts: Optional[Integer] + + +RegionalSummaryList = List[RegionalSummary] + + +class AttributeSummary(TypedDict, total=False): + AttributeName: Optional[String] + MostFrequentValue: Optional[String] + NumberOfMatchedAccounts: Optional[Integer] + NumberOfUnmatchedAccounts: Optional[Integer] + RegionalSummaries: Optional[RegionalSummaryList] + + +AttributeSummaryList = List[AttributeSummary] + + +class AttributeValue(TypedDict, total=False): + Value: Optional[String] + + +class ClientVpnAuthorizationRuleStatus(TypedDict, total=False): + Code: Optional[ClientVpnAuthorizationRuleStatusCode] + Message: Optional[String] + + +class AuthorizationRule(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + Description: Optional[String] + GroupId: Optional[String] + AccessAll: Optional[Boolean] + DestinationCidr: Optional[String] + Status: Optional[ClientVpnAuthorizationRuleStatus] + + +AuthorizationRuleSet = List[AuthorizationRule] + + +class AuthorizeClientVpnIngressRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + TargetNetworkCidr: String + AccessGroupId: Optional[String] + AuthorizeAllGroups: Optional[Boolean] + Description: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class AuthorizeClientVpnIngressResult(TypedDict, total=False): + Status: Optional[ClientVpnAuthorizationRuleStatus] + + +class PrefixListId(TypedDict, total=False): + Description: Optional[String] + PrefixListId: Optional[String] + + +PrefixListIdList = List[PrefixListId] + + +class Ipv6Range(TypedDict, total=False): + Description: Optional[String] + CidrIpv6: Optional[String] + + +Ipv6RangeList = List[Ipv6Range] + + +class IpRange(TypedDict, total=False): + Description: Optional[String] + CidrIp: Optional[String] + + +IpRangeList = List[IpRange] + + +class UserIdGroupPair(TypedDict, total=False): + Description: Optional[String] + UserId: Optional[String] + GroupName: Optional[String] + GroupId: Optional[String] + VpcId: Optional[String] + VpcPeeringConnectionId: Optional[String] + PeeringStatus: Optional[String] + + +UserIdGroupPairList = List[UserIdGroupPair] + + +class IpPermission(TypedDict, total=False): + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + UserIdGroupPairs: Optional[UserIdGroupPairList] + IpRanges: Optional[IpRangeList] + Ipv6Ranges: Optional[Ipv6RangeList] + PrefixListIds: Optional[PrefixListIdList] + + +IpPermissionList = List[IpPermission] + + +class AuthorizeSecurityGroupEgressRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + GroupId: SecurityGroupId + SourceSecurityGroupName: Optional[String] + SourceSecurityGroupOwnerId: Optional[String] + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + CidrIp: Optional[String] + IpPermissions: Optional[IpPermissionList] + + +class ReferencedSecurityGroup(TypedDict, total=False): + GroupId: Optional[String] + PeeringStatus: Optional[String] + UserId: Optional[String] + VpcId: Optional[String] + VpcPeeringConnectionId: Optional[String] + + +class SecurityGroupRule(TypedDict, total=False): + SecurityGroupRuleId: Optional[SecurityGroupRuleId] + GroupId: Optional[SecurityGroupId] + GroupOwnerId: Optional[String] + IsEgress: Optional[Boolean] + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + CidrIpv4: Optional[String] + CidrIpv6: Optional[String] + PrefixListId: Optional[PrefixListResourceId] + ReferencedGroupInfo: Optional[ReferencedSecurityGroup] + Description: Optional[String] + Tags: Optional[TagList] + SecurityGroupRuleArn: Optional[String] + + +SecurityGroupRuleList = List[SecurityGroupRule] + + +class AuthorizeSecurityGroupEgressResult(TypedDict, total=False): + Return: Optional[Boolean] + SecurityGroupRules: Optional[SecurityGroupRuleList] + + +class AuthorizeSecurityGroupIngressRequest(ServiceRequest): + CidrIp: Optional[String] + FromPort: Optional[Integer] + GroupId: Optional[SecurityGroupId] + GroupName: Optional[SecurityGroupName] + IpPermissions: Optional[IpPermissionList] + IpProtocol: Optional[String] + SourceSecurityGroupName: Optional[String] + SourceSecurityGroupOwnerId: Optional[String] + ToPort: Optional[Integer] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class AuthorizeSecurityGroupIngressResult(TypedDict, total=False): + Return: Optional[Boolean] + SecurityGroupRules: Optional[SecurityGroupRuleList] + + +class AvailabilityZoneMessage(TypedDict, total=False): + Message: Optional[String] + + +AvailabilityZoneMessageList = List[AvailabilityZoneMessage] + + +class AvailabilityZone(TypedDict, total=False): + OptInStatus: Optional[AvailabilityZoneOptInStatus] + Messages: Optional[AvailabilityZoneMessageList] + RegionName: Optional[String] + ZoneName: Optional[String] + ZoneId: Optional[String] + GroupName: Optional[String] + NetworkBorderGroup: Optional[String] + ZoneType: Optional[String] + ParentZoneName: Optional[String] + ParentZoneId: Optional[String] + GroupLongName: Optional[String] + State: Optional[AvailabilityZoneState] + + +AvailabilityZoneList = List[AvailabilityZone] +AvailabilityZoneStringList = List[String] + + +class InstanceCapacity(TypedDict, total=False): + AvailableCapacity: Optional[Integer] + InstanceType: Optional[String] + TotalCapacity: Optional[Integer] + + +AvailableInstanceCapacityList = List[InstanceCapacity] + + +class AvailableCapacity(TypedDict, total=False): + AvailableInstanceCapacity: Optional[AvailableInstanceCapacityList] + AvailableVCpus: Optional[Integer] + + +BandwidthWeightingTypeList = List[BandwidthWeightingType] + + +class BaselineEbsBandwidthMbps(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class BaselineEbsBandwidthMbpsRequest(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class PerformanceFactorReference(TypedDict, total=False): + InstanceFamily: Optional[String] + + +PerformanceFactorReferenceSet = List[PerformanceFactorReference] + + +class CpuPerformanceFactor(TypedDict, total=False): + References: Optional[PerformanceFactorReferenceSet] + + +class BaselinePerformanceFactors(TypedDict, total=False): + Cpu: Optional[CpuPerformanceFactor] + + +class PerformanceFactorReferenceRequest(TypedDict, total=False): + InstanceFamily: Optional[String] + + +PerformanceFactorReferenceSetRequest = List[PerformanceFactorReferenceRequest] + + +class CpuPerformanceFactorRequest(TypedDict, total=False): + References: Optional[PerformanceFactorReferenceSetRequest] + + +class BaselinePerformanceFactorsRequest(TypedDict, total=False): + Cpu: Optional[CpuPerformanceFactorRequest] + + +BillingProductList = List[String] +Blob = bytes + + +class BlobAttributeValue(TypedDict, total=False): + Value: Optional[Blob] + + +class EbsBlockDevice(TypedDict, total=False): + DeleteOnTermination: Optional[Boolean] + Iops: Optional[Integer] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[VolumeType] + KmsKeyId: Optional[String] + Throughput: Optional[Integer] + OutpostArn: Optional[String] + Encrypted: Optional[Boolean] + VolumeInitializationRate: Optional[Integer] + + +class BlockDeviceMapping(TypedDict, total=False): + Ebs: Optional[EbsBlockDevice] + NoDevice: Optional[String] + DeviceName: Optional[String] + VirtualName: Optional[String] + + +BlockDeviceMappingList = List[BlockDeviceMapping] +BlockDeviceMappingRequestList = List[BlockDeviceMapping] + + +class EbsBlockDeviceResponse(TypedDict, total=False): + Encrypted: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Iops: Optional[Integer] + Throughput: Optional[Integer] + KmsKeyId: Optional[KmsKeyId] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[VolumeType] + + +class BlockDeviceMappingResponse(TypedDict, total=False): + DeviceName: Optional[String] + VirtualName: Optional[String] + Ebs: Optional[EbsBlockDeviceResponse] + NoDevice: Optional[String] + + +BlockDeviceMappingResponseList = List[BlockDeviceMappingResponse] + + +class BlockPublicAccessStates(TypedDict, total=False): + InternetGatewayBlockMode: Optional[BlockPublicAccessMode] + + +BootModeTypeList = List[BootModeType] +BoxedLong = int +BundleIdStringList = List[BundleId] + + +class S3Storage(TypedDict, total=False): + AWSAccessKeyId: Optional[String] + Bucket: Optional[String] + Prefix: Optional[String] + UploadPolicy: Optional[Blob] + UploadPolicySignature: Optional[S3StorageUploadPolicySignature] + + +class Storage(TypedDict, total=False): + S3: Optional[S3Storage] + + +class BundleInstanceRequest(ServiceRequest): + InstanceId: InstanceId + Storage: Storage + DryRun: Optional[Boolean] + + +class BundleTaskError(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class BundleTask(TypedDict, total=False): + InstanceId: Optional[String] + BundleId: Optional[String] + State: Optional[BundleTaskState] + StartTime: Optional[DateTime] + UpdateTime: Optional[DateTime] + Storage: Optional[Storage] + Progress: Optional[String] + BundleTaskError: Optional[BundleTaskError] + + +class BundleInstanceResult(TypedDict, total=False): + BundleTask: Optional[BundleTask] + + +BundleTaskList = List[BundleTask] + + +class Byoasn(TypedDict, total=False): + Asn: Optional[String] + IpamId: Optional[IpamId] + StatusMessage: Optional[String] + State: Optional[AsnState] + + +ByoasnSet = List[Byoasn] +ByoipCidrSet = List[ByoipCidr] + + +class CancelBundleTaskRequest(ServiceRequest): + BundleId: BundleId + DryRun: Optional[Boolean] + + +class CancelBundleTaskResult(TypedDict, total=False): + BundleTask: Optional[BundleTask] + + +class CancelCapacityReservationFleetError(TypedDict, total=False): + Code: Optional[CancelCapacityReservationFleetErrorCode] + Message: Optional[CancelCapacityReservationFleetErrorMessage] + + +CapacityReservationFleetIdSet = List[CapacityReservationFleetId] + + +class CancelCapacityReservationFleetsRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityReservationFleetIds: CapacityReservationFleetIdSet + + +class FailedCapacityReservationFleetCancellationResult(TypedDict, total=False): + CapacityReservationFleetId: Optional[CapacityReservationFleetId] + CancelCapacityReservationFleetError: Optional[CancelCapacityReservationFleetError] + + +FailedCapacityReservationFleetCancellationResultSet = List[ + FailedCapacityReservationFleetCancellationResult +] + + +class CapacityReservationFleetCancellationState(TypedDict, total=False): + CurrentFleetState: Optional[CapacityReservationFleetState] + PreviousFleetState: Optional[CapacityReservationFleetState] + CapacityReservationFleetId: Optional[CapacityReservationFleetId] + + +CapacityReservationFleetCancellationStateSet = List[CapacityReservationFleetCancellationState] + + +class CancelCapacityReservationFleetsResult(TypedDict, total=False): + SuccessfulFleetCancellations: Optional[CapacityReservationFleetCancellationStateSet] + FailedFleetCancellations: Optional[FailedCapacityReservationFleetCancellationResultSet] + + +class CancelCapacityReservationRequest(ServiceRequest): + CapacityReservationId: CapacityReservationId + DryRun: Optional[Boolean] + + +class CancelCapacityReservationResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class CancelConversionRequest(ServiceRequest): + DryRun: Optional[Boolean] + ConversionTaskId: ConversionTaskId + ReasonMessage: Optional[String] + + +class CancelDeclarativePoliciesReportRequest(ServiceRequest): + DryRun: Optional[Boolean] + ReportId: DeclarativePoliciesReportId + + +class CancelDeclarativePoliciesReportResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class CancelExportTaskRequest(ServiceRequest): + ExportTaskId: ExportVmTaskId + + +class CancelImageLaunchPermissionRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class CancelImageLaunchPermissionResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class CancelImportTaskRequest(ServiceRequest): + CancelReason: Optional[String] + DryRun: Optional[Boolean] + ImportTaskId: Optional[ImportTaskId] + + +class CancelImportTaskResult(TypedDict, total=False): + ImportTaskId: Optional[String] + PreviousState: Optional[String] + State: Optional[String] + + +class CancelReservedInstancesListingRequest(ServiceRequest): + ReservedInstancesListingId: ReservedInstancesListingId + + +Long = int + + +class PriceSchedule(TypedDict, total=False): + Active: Optional[Boolean] + CurrencyCode: Optional[CurrencyCodeValues] + Price: Optional[Double] + Term: Optional[Long] + + +PriceScheduleList = List[PriceSchedule] + + +class InstanceCount(TypedDict, total=False): + InstanceCount: Optional[Integer] + State: Optional[ListingState] + + +InstanceCountList = List[InstanceCount] + + +class ReservedInstancesListing(TypedDict, total=False): + ClientToken: Optional[String] + CreateDate: Optional[DateTime] + InstanceCounts: Optional[InstanceCountList] + PriceSchedules: Optional[PriceScheduleList] + ReservedInstancesId: Optional[String] + ReservedInstancesListingId: Optional[String] + Status: Optional[ListingStatus] + StatusMessage: Optional[String] + Tags: Optional[TagList] + UpdateDate: Optional[DateTime] + + +ReservedInstancesListingList = List[ReservedInstancesListing] + + +class CancelReservedInstancesListingResult(TypedDict, total=False): + ReservedInstancesListings: Optional[ReservedInstancesListingList] + + +class CancelSpotFleetRequestsError(TypedDict, total=False): + Code: Optional[CancelBatchErrorCode] + Message: Optional[String] + + +class CancelSpotFleetRequestsErrorItem(TypedDict, total=False): + Error: Optional[CancelSpotFleetRequestsError] + SpotFleetRequestId: Optional[String] + + +CancelSpotFleetRequestsErrorSet = List[CancelSpotFleetRequestsErrorItem] +SpotFleetRequestIdList = List[SpotFleetRequestId] + + +class CancelSpotFleetRequestsRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotFleetRequestIds: SpotFleetRequestIdList + TerminateInstances: Boolean + + +class CancelSpotFleetRequestsSuccessItem(TypedDict, total=False): + CurrentSpotFleetRequestState: Optional[BatchState] + PreviousSpotFleetRequestState: Optional[BatchState] + SpotFleetRequestId: Optional[String] + + +CancelSpotFleetRequestsSuccessSet = List[CancelSpotFleetRequestsSuccessItem] + + +class CancelSpotFleetRequestsResponse(TypedDict, total=False): + SuccessfulFleetRequests: Optional[CancelSpotFleetRequestsSuccessSet] + UnsuccessfulFleetRequests: Optional[CancelSpotFleetRequestsErrorSet] + + +SpotInstanceRequestIdList = List[SpotInstanceRequestId] + + +class CancelSpotInstanceRequestsRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotInstanceRequestIds: SpotInstanceRequestIdList + + +class CancelledSpotInstanceRequest(TypedDict, total=False): + SpotInstanceRequestId: Optional[String] + State: Optional[CancelSpotInstanceRequestState] + + +CancelledSpotInstanceRequestList = List[CancelledSpotInstanceRequest] + + +class CancelSpotInstanceRequestsResult(TypedDict, total=False): + CancelledSpotInstanceRequests: Optional[CancelledSpotInstanceRequestList] + + +class CapacityAllocation(TypedDict, total=False): + AllocationType: Optional[AllocationType] + Count: Optional[Integer] + + +CapacityAllocations = List[CapacityAllocation] + + +class CapacityBlockExtension(TypedDict, total=False): + CapacityReservationId: Optional[CapacityReservationId] + InstanceType: Optional[String] + InstanceCount: Optional[Integer] + AvailabilityZone: Optional[AvailabilityZoneName] + AvailabilityZoneId: Optional[AvailabilityZoneId] + CapacityBlockExtensionOfferingId: Optional[OfferingId] + CapacityBlockExtensionDurationHours: Optional[Integer] + CapacityBlockExtensionStatus: Optional[CapacityBlockExtensionStatus] + CapacityBlockExtensionPurchaseDate: Optional[MillisecondDateTime] + CapacityBlockExtensionStartDate: Optional[MillisecondDateTime] + CapacityBlockExtensionEndDate: Optional[MillisecondDateTime] + UpfrontFee: Optional[String] + CurrencyCode: Optional[String] + + +class CapacityBlockExtensionOffering(TypedDict, total=False): + CapacityBlockExtensionOfferingId: Optional[OfferingId] + InstanceType: Optional[String] + InstanceCount: Optional[Integer] + AvailabilityZone: Optional[AvailabilityZoneName] + AvailabilityZoneId: Optional[AvailabilityZoneId] + StartDate: Optional[MillisecondDateTime] + CapacityBlockExtensionStartDate: Optional[MillisecondDateTime] + CapacityBlockExtensionEndDate: Optional[MillisecondDateTime] + CapacityBlockExtensionDurationHours: Optional[Integer] + UpfrontFee: Optional[String] + CurrencyCode: Optional[String] + Tenancy: Optional[CapacityReservationTenancy] + + +CapacityBlockExtensionOfferingSet = List[CapacityBlockExtensionOffering] +CapacityBlockExtensionSet = List[CapacityBlockExtension] + + +class CapacityBlockOffering(TypedDict, total=False): + CapacityBlockOfferingId: Optional[OfferingId] + InstanceType: Optional[String] + AvailabilityZone: Optional[String] + InstanceCount: Optional[Integer] + StartDate: Optional[MillisecondDateTime] + EndDate: Optional[MillisecondDateTime] + CapacityBlockDurationHours: Optional[Integer] + UpfrontFee: Optional[String] + CurrencyCode: Optional[String] + Tenancy: Optional[CapacityReservationTenancy] + CapacityBlockDurationMinutes: Optional[Integer] + + +CapacityBlockOfferingSet = List[CapacityBlockOffering] + + +class CapacityReservationCommitmentInfo(TypedDict, total=False): + CommittedInstanceCount: Optional[Integer] + CommitmentEndDate: Optional[MillisecondDateTime] + + +class CapacityReservation(TypedDict, total=False): + CapacityReservationId: Optional[String] + OwnerId: Optional[String] + CapacityReservationArn: Optional[String] + AvailabilityZoneId: Optional[String] + InstanceType: Optional[String] + InstancePlatform: Optional[CapacityReservationInstancePlatform] + AvailabilityZone: Optional[String] + Tenancy: Optional[CapacityReservationTenancy] + TotalInstanceCount: Optional[Integer] + AvailableInstanceCount: Optional[Integer] + EbsOptimized: Optional[Boolean] + EphemeralStorage: Optional[Boolean] + State: Optional[CapacityReservationState] + StartDate: Optional[MillisecondDateTime] + EndDate: Optional[DateTime] + EndDateType: Optional[EndDateType] + InstanceMatchCriteria: Optional[InstanceMatchCriteria] + CreateDate: Optional[DateTime] + Tags: Optional[TagList] + OutpostArn: Optional[OutpostArn] + CapacityReservationFleetId: Optional[String] + PlacementGroupArn: Optional[PlacementGroupArn] + CapacityAllocations: Optional[CapacityAllocations] + ReservationType: Optional[CapacityReservationType] + UnusedReservationBillingOwnerId: Optional[AccountID] + CommitmentInfo: Optional[CapacityReservationCommitmentInfo] + DeliveryPreference: Optional[CapacityReservationDeliveryPreference] + + +class CapacityReservationInfo(TypedDict, total=False): + InstanceType: Optional[String] + AvailabilityZone: Optional[AvailabilityZoneName] + Tenancy: Optional[CapacityReservationTenancy] + AvailabilityZoneId: Optional[AvailabilityZoneId] + + +class CapacityReservationBillingRequest(TypedDict, total=False): + CapacityReservationId: Optional[String] + RequestedBy: Optional[String] + UnusedReservationBillingOwnerId: Optional[AccountID] + LastUpdateTime: Optional[MillisecondDateTime] + Status: Optional[CapacityReservationBillingRequestStatus] + StatusMessage: Optional[String] + CapacityReservationInfo: Optional[CapacityReservationInfo] + + +CapacityReservationBillingRequestSet = List[CapacityReservationBillingRequest] +CapacityReservationCommitmentDuration = int + + +class FleetCapacityReservation(TypedDict, total=False): + CapacityReservationId: Optional[CapacityReservationId] + AvailabilityZoneId: Optional[String] + InstanceType: Optional[InstanceType] + InstancePlatform: Optional[CapacityReservationInstancePlatform] + AvailabilityZone: Optional[String] + TotalInstanceCount: Optional[Integer] + FulfilledCapacity: Optional[Double] + EbsOptimized: Optional[Boolean] + CreateDate: Optional[MillisecondDateTime] + Weight: Optional[DoubleWithConstraints] + Priority: Optional[IntegerWithConstraints] + + +FleetCapacityReservationSet = List[FleetCapacityReservation] + + +class CapacityReservationFleet(TypedDict, total=False): + CapacityReservationFleetId: Optional[CapacityReservationFleetId] + CapacityReservationFleetArn: Optional[String] + State: Optional[CapacityReservationFleetState] + TotalTargetCapacity: Optional[Integer] + TotalFulfilledCapacity: Optional[Double] + Tenancy: Optional[FleetCapacityReservationTenancy] + EndDate: Optional[MillisecondDateTime] + CreateTime: Optional[MillisecondDateTime] + InstanceMatchCriteria: Optional[FleetInstanceMatchCriteria] + AllocationStrategy: Optional[String] + InstanceTypeSpecifications: Optional[FleetCapacityReservationSet] + Tags: Optional[TagList] + + +CapacityReservationFleetSet = List[CapacityReservationFleet] + + +class CapacityReservationGroup(TypedDict, total=False): + GroupArn: Optional[String] + OwnerId: Optional[String] + + +CapacityReservationGroupSet = List[CapacityReservationGroup] +CapacityReservationIdSet = List[CapacityReservationId] + + +class CapacityReservationOptions(TypedDict, total=False): + UsageStrategy: Optional[FleetCapacityReservationUsageStrategy] + + +class CapacityReservationOptionsRequest(TypedDict, total=False): + UsageStrategy: Optional[FleetCapacityReservationUsageStrategy] + + +CapacityReservationSet = List[CapacityReservation] + + +class CapacityReservationTarget(TypedDict, total=False): + CapacityReservationId: Optional[CapacityReservationId] + CapacityReservationResourceGroupArn: Optional[String] + + +class CapacityReservationSpecification(TypedDict, total=False): + CapacityReservationPreference: Optional[CapacityReservationPreference] + CapacityReservationTarget: Optional[CapacityReservationTarget] + + +class CapacityReservationTargetResponse(TypedDict, total=False): + CapacityReservationId: Optional[String] + CapacityReservationResourceGroupArn: Optional[String] + + +class CapacityReservationSpecificationResponse(TypedDict, total=False): + CapacityReservationPreference: Optional[CapacityReservationPreference] + CapacityReservationTarget: Optional[CapacityReservationTargetResponse] + + +class CarrierGateway(TypedDict, total=False): + CarrierGatewayId: Optional[CarrierGatewayId] + VpcId: Optional[VpcId] + State: Optional[CarrierGatewayState] + OwnerId: Optional[String] + Tags: Optional[TagList] + + +CarrierGatewayIdSet = List[CarrierGatewayId] +CarrierGatewaySet = List[CarrierGateway] + + +class CertificateAuthentication(TypedDict, total=False): + ClientRootCertificateChain: Optional[String] + + +class CertificateAuthenticationRequest(TypedDict, total=False): + ClientRootCertificateChainArn: Optional[String] + + +class CidrAuthorizationContext(TypedDict, total=False): + Message: String + Signature: String + + +class ClassicLinkDnsSupport(TypedDict, total=False): + ClassicLinkDnsSupported: Optional[Boolean] + VpcId: Optional[String] + + +ClassicLinkDnsSupportList = List[ClassicLinkDnsSupport] + + +class GroupIdentifier(TypedDict, total=False): + GroupId: Optional[String] + GroupName: Optional[String] + + +GroupIdentifierList = List[GroupIdentifier] + + +class ClassicLinkInstance(TypedDict, total=False): + Groups: Optional[GroupIdentifierList] + InstanceId: Optional[String] + Tags: Optional[TagList] + VpcId: Optional[String] + + +ClassicLinkInstanceList = List[ClassicLinkInstance] + + +class ClassicLoadBalancer(TypedDict, total=False): + Name: Optional[String] + + +ClassicLoadBalancers = List[ClassicLoadBalancer] + + +class ClassicLoadBalancersConfig(TypedDict, total=False): + ClassicLoadBalancers: Optional[ClassicLoadBalancers] + + +class ClientCertificateRevocationListStatus(TypedDict, total=False): + Code: Optional[ClientCertificateRevocationListStatusCode] + Message: Optional[String] + + +class ClientConnectOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + LambdaFunctionArn: Optional[String] + + +class ClientVpnEndpointAttributeStatus(TypedDict, total=False): + Code: Optional[ClientVpnEndpointAttributeStatusCode] + Message: Optional[String] + + +class ClientConnectResponseOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + LambdaFunctionArn: Optional[String] + Status: Optional[ClientVpnEndpointAttributeStatus] + + +class ClientData(TypedDict, total=False): + Comment: Optional[String] + UploadEnd: Optional[DateTime] + UploadSize: Optional[Double] + UploadStart: Optional[DateTime] + + +class ClientLoginBannerOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + BannerText: Optional[String] + + +class ClientLoginBannerResponseOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + BannerText: Optional[String] + + +class ClientRouteEnforcementOptions(TypedDict, total=False): + Enforced: Optional[Boolean] + + +class ClientRouteEnforcementResponseOptions(TypedDict, total=False): + Enforced: Optional[Boolean] + + +class FederatedAuthentication(TypedDict, total=False): + SamlProviderArn: Optional[String] + SelfServiceSamlProviderArn: Optional[String] + + +class DirectoryServiceAuthentication(TypedDict, total=False): + DirectoryId: Optional[String] + + +class ClientVpnAuthentication(TypedDict, total=False): + Type: Optional[ClientVpnAuthenticationType] + ActiveDirectory: Optional[DirectoryServiceAuthentication] + MutualAuthentication: Optional[CertificateAuthentication] + FederatedAuthentication: Optional[FederatedAuthentication] + + +ClientVpnAuthenticationList = List[ClientVpnAuthentication] + + +class FederatedAuthenticationRequest(TypedDict, total=False): + SAMLProviderArn: Optional[String] + SelfServiceSAMLProviderArn: Optional[String] + + +class DirectoryServiceAuthenticationRequest(TypedDict, total=False): + DirectoryId: Optional[String] + + +class ClientVpnAuthenticationRequest(TypedDict, total=False): + Type: Optional[ClientVpnAuthenticationType] + ActiveDirectory: Optional[DirectoryServiceAuthenticationRequest] + MutualAuthentication: Optional[CertificateAuthenticationRequest] + FederatedAuthentication: Optional[FederatedAuthenticationRequest] + + +ClientVpnAuthenticationRequestList = List[ClientVpnAuthenticationRequest] + + +class ClientVpnConnectionStatus(TypedDict, total=False): + Code: Optional[ClientVpnConnectionStatusCode] + Message: Optional[String] + + +class ClientVpnConnection(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + Timestamp: Optional[String] + ConnectionId: Optional[String] + Username: Optional[String] + ConnectionEstablishedTime: Optional[String] + IngressBytes: Optional[String] + EgressBytes: Optional[String] + IngressPackets: Optional[String] + EgressPackets: Optional[String] + ClientIp: Optional[String] + CommonName: Optional[String] + Status: Optional[ClientVpnConnectionStatus] + ConnectionEndTime: Optional[String] + PostureComplianceStatuses: Optional[ValueStringList] + + +ClientVpnConnectionSet = List[ClientVpnConnection] + + +class ConnectionLogResponseOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + CloudwatchLogGroup: Optional[String] + CloudwatchLogStream: Optional[String] + + +class ClientVpnEndpointStatus(TypedDict, total=False): + Code: Optional[ClientVpnEndpointStatusCode] + Message: Optional[String] + + +class ClientVpnEndpoint(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + Description: Optional[String] + Status: Optional[ClientVpnEndpointStatus] + CreationTime: Optional[String] + DeletionTime: Optional[String] + DnsName: Optional[String] + ClientCidrBlock: Optional[String] + DnsServers: Optional[ValueStringList] + SplitTunnel: Optional[Boolean] + VpnProtocol: Optional[VpnProtocol] + TransportProtocol: Optional[TransportProtocol] + VpnPort: Optional[Integer] + AssociatedTargetNetworks: Optional[AssociatedTargetNetworkSet] + ServerCertificateArn: Optional[String] + AuthenticationOptions: Optional[ClientVpnAuthenticationList] + ConnectionLogOptions: Optional[ConnectionLogResponseOptions] + Tags: Optional[TagList] + SecurityGroupIds: Optional[ClientVpnSecurityGroupIdSet] + VpcId: Optional[VpcId] + SelfServicePortalUrl: Optional[String] + ClientConnectOptions: Optional[ClientConnectResponseOptions] + SessionTimeoutHours: Optional[Integer] + ClientLoginBannerOptions: Optional[ClientLoginBannerResponseOptions] + ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementResponseOptions] + DisconnectOnSessionTimeout: Optional[Boolean] + + +ClientVpnEndpointIdList = List[ClientVpnEndpointId] + + +class ClientVpnRouteStatus(TypedDict, total=False): + Code: Optional[ClientVpnRouteStatusCode] + Message: Optional[String] + + +class ClientVpnRoute(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + DestinationCidr: Optional[String] + TargetSubnet: Optional[String] + Type: Optional[String] + Origin: Optional[String] + Status: Optional[ClientVpnRouteStatus] + Description: Optional[String] + + +ClientVpnRouteSet = List[ClientVpnRoute] + + +class CloudWatchLogOptions(TypedDict, total=False): + LogEnabled: Optional[Boolean] + LogGroupArn: Optional[String] + LogOutputFormat: Optional[String] + + +class CloudWatchLogOptionsSpecification(TypedDict, total=False): + LogEnabled: Optional[Boolean] + LogGroupArn: Optional[CloudWatchLogGroupArn] + LogOutputFormat: Optional[String] + + +class CoipAddressUsage(TypedDict, total=False): + AllocationId: Optional[String] + AwsAccountId: Optional[String] + AwsService: Optional[String] + CoIp: Optional[String] + + +CoipAddressUsageSet = List[CoipAddressUsage] + + +class CoipCidr(TypedDict, total=False): + Cidr: Optional[String] + CoipPoolId: Optional[Ipv4PoolCoipId] + LocalGatewayRouteTableId: Optional[String] + + +class CoipPool(TypedDict, total=False): + PoolId: Optional[Ipv4PoolCoipId] + PoolCidrs: Optional[ValueStringList] + LocalGatewayRouteTableId: Optional[LocalGatewayRoutetableId] + Tags: Optional[TagList] + PoolArn: Optional[ResourceArn] + + +CoipPoolIdSet = List[Ipv4PoolCoipId] +CoipPoolSet = List[CoipPool] + + +class ConfirmProductInstanceRequest(ServiceRequest): + InstanceId: InstanceId + ProductCode: String + DryRun: Optional[Boolean] + + +class ConfirmProductInstanceResult(TypedDict, total=False): + Return: Optional[Boolean] + OwnerId: Optional[String] + + +class ConnectionLogOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + CloudwatchLogGroup: Optional[String] + CloudwatchLogStream: Optional[String] + + +class ConnectionNotification(TypedDict, total=False): + ConnectionNotificationId: Optional[String] + ServiceId: Optional[String] + VpcEndpointId: Optional[String] + ConnectionNotificationType: Optional[ConnectionNotificationType] + ConnectionNotificationArn: Optional[String] + ConnectionEvents: Optional[ValueStringList] + ConnectionNotificationState: Optional[ConnectionNotificationState] + ServiceRegion: Optional[String] + + +ConnectionNotificationIdsList = List[ConnectionNotificationId] +ConnectionNotificationSet = List[ConnectionNotification] + + +class ConnectionTrackingConfiguration(TypedDict, total=False): + TcpEstablishedTimeout: Optional[Integer] + UdpStreamTimeout: Optional[Integer] + UdpTimeout: Optional[Integer] + + +class ConnectionTrackingSpecification(TypedDict, total=False): + TcpEstablishedTimeout: Optional[Integer] + UdpTimeout: Optional[Integer] + UdpStreamTimeout: Optional[Integer] + + +class ConnectionTrackingSpecificationRequest(TypedDict, total=False): + TcpEstablishedTimeout: Optional[Integer] + UdpStreamTimeout: Optional[Integer] + UdpTimeout: Optional[Integer] + + +class ConnectionTrackingSpecificationResponse(TypedDict, total=False): + TcpEstablishedTimeout: Optional[Integer] + UdpStreamTimeout: Optional[Integer] + UdpTimeout: Optional[Integer] + + +ConversionIdStringList = List[ConversionTaskId] + + +class DiskImageVolumeDescription(TypedDict, total=False): + Id: Optional[String] + Size: Optional[Long] + + +class DiskImageDescription(TypedDict, total=False): + Checksum: Optional[String] + Format: Optional[DiskImageFormat] + ImportManifestUrl: Optional[ImportManifestUrl] + Size: Optional[Long] + + +class ImportVolumeTaskDetails(TypedDict, total=False): + AvailabilityZone: Optional[String] + BytesConverted: Optional[Long] + Description: Optional[String] + Image: Optional[DiskImageDescription] + Volume: Optional[DiskImageVolumeDescription] + + +class ImportInstanceVolumeDetailItem(TypedDict, total=False): + AvailabilityZone: Optional[String] + BytesConverted: Optional[Long] + Description: Optional[String] + Image: Optional[DiskImageDescription] + Status: Optional[String] + StatusMessage: Optional[String] + Volume: Optional[DiskImageVolumeDescription] + + +ImportInstanceVolumeDetailSet = List[ImportInstanceVolumeDetailItem] + + +class ImportInstanceTaskDetails(TypedDict, total=False): + Description: Optional[String] + InstanceId: Optional[String] + Platform: Optional[PlatformValues] + Volumes: Optional[ImportInstanceVolumeDetailSet] + + +class ConversionTask(TypedDict, total=False): + ConversionTaskId: Optional[String] + ExpirationTime: Optional[String] + ImportInstance: Optional[ImportInstanceTaskDetails] + ImportVolume: Optional[ImportVolumeTaskDetails] + State: Optional[ConversionTaskState] + StatusMessage: Optional[String] + Tags: Optional[TagList] + + +class CopyFpgaImageRequest(ServiceRequest): + DryRun: Optional[Boolean] + SourceFpgaImageId: String + Description: Optional[String] + Name: Optional[String] + SourceRegion: String + ClientToken: Optional[String] + + +class CopyFpgaImageResult(TypedDict, total=False): + FpgaImageId: Optional[String] + + +class CopyImageRequest(ServiceRequest): + ClientToken: Optional[String] + Description: Optional[String] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[KmsKeyId] + Name: String + SourceImageId: String + SourceRegion: String + DestinationOutpostArn: Optional[String] + CopyImageTags: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + SnapshotCopyCompletionDurationMinutes: Optional[Long] + DryRun: Optional[Boolean] + + +class CopyImageResult(TypedDict, total=False): + ImageId: Optional[String] + + +class CopySnapshotRequest(ServiceRequest): + Description: Optional[String] + DestinationOutpostArn: Optional[String] + DestinationRegion: Optional[String] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[KmsKeyId] + PresignedUrl: Optional[CopySnapshotRequestPSU] + SourceRegion: String + SourceSnapshotId: String + TagSpecifications: Optional[TagSpecificationList] + CompletionDurationMinutes: Optional[SnapshotCompletionDurationMinutesRequest] + DryRun: Optional[Boolean] + + +class CopySnapshotResult(TypedDict, total=False): + Tags: Optional[TagList] + SnapshotId: Optional[String] + + +CoreCountList = List[CoreCount] +CpuManufacturerSet = List[CpuManufacturer] + + +class CpuOptions(TypedDict, total=False): + CoreCount: Optional[Integer] + ThreadsPerCore: Optional[Integer] + AmdSevSnp: Optional[AmdSevSnpSpecification] + + +class CpuOptionsRequest(TypedDict, total=False): + CoreCount: Optional[Integer] + ThreadsPerCore: Optional[Integer] + AmdSevSnp: Optional[AmdSevSnpSpecification] + + +class CreateCapacityReservationBySplittingRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + SourceCapacityReservationId: CapacityReservationId + InstanceCount: Integer + TagSpecifications: Optional[TagSpecificationList] + + +class CreateCapacityReservationBySplittingResult(TypedDict, total=False): + SourceCapacityReservation: Optional[CapacityReservation] + DestinationCapacityReservation: Optional[CapacityReservation] + InstanceCount: Optional[Integer] + + +class ReservationFleetInstanceSpecification(TypedDict, total=False): + InstanceType: Optional[InstanceType] + InstancePlatform: Optional[CapacityReservationInstancePlatform] + Weight: Optional[DoubleWithConstraints] + AvailabilityZone: Optional[String] + AvailabilityZoneId: Optional[String] + EbsOptimized: Optional[Boolean] + Priority: Optional[IntegerWithConstraints] + + +ReservationFleetInstanceSpecificationList = List[ReservationFleetInstanceSpecification] + + +class CreateCapacityReservationFleetRequest(ServiceRequest): + AllocationStrategy: Optional[String] + ClientToken: Optional[String] + InstanceTypeSpecifications: ReservationFleetInstanceSpecificationList + Tenancy: Optional[FleetCapacityReservationTenancy] + TotalTargetCapacity: Integer + EndDate: Optional[MillisecondDateTime] + InstanceMatchCriteria: Optional[FleetInstanceMatchCriteria] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateCapacityReservationFleetResult(TypedDict, total=False): + CapacityReservationFleetId: Optional[CapacityReservationFleetId] + State: Optional[CapacityReservationFleetState] + TotalTargetCapacity: Optional[Integer] + TotalFulfilledCapacity: Optional[Double] + InstanceMatchCriteria: Optional[FleetInstanceMatchCriteria] + AllocationStrategy: Optional[String] + CreateTime: Optional[MillisecondDateTime] + EndDate: Optional[MillisecondDateTime] + Tenancy: Optional[FleetCapacityReservationTenancy] + FleetCapacityReservations: Optional[FleetCapacityReservationSet] + Tags: Optional[TagList] + + +class CreateCapacityReservationRequest(ServiceRequest): + ClientToken: Optional[String] + InstanceType: String + InstancePlatform: CapacityReservationInstancePlatform + AvailabilityZone: Optional[AvailabilityZoneName] + AvailabilityZoneId: Optional[AvailabilityZoneId] + Tenancy: Optional[CapacityReservationTenancy] + InstanceCount: Integer + EbsOptimized: Optional[Boolean] + EphemeralStorage: Optional[Boolean] + EndDate: Optional[DateTime] + EndDateType: Optional[EndDateType] + InstanceMatchCriteria: Optional[InstanceMatchCriteria] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + OutpostArn: Optional[OutpostArn] + PlacementGroupArn: Optional[PlacementGroupArn] + StartDate: Optional[MillisecondDateTime] + CommitmentDuration: Optional[CapacityReservationCommitmentDuration] + DeliveryPreference: Optional[CapacityReservationDeliveryPreference] + + +class CreateCapacityReservationResult(TypedDict, total=False): + CapacityReservation: Optional[CapacityReservation] + + +class CreateCarrierGatewayRequest(ServiceRequest): + VpcId: VpcId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +class CreateCarrierGatewayResult(TypedDict, total=False): + CarrierGateway: Optional[CarrierGateway] + + +class CreateClientVpnEndpointRequest(ServiceRequest): + ClientCidrBlock: String + ServerCertificateArn: String + AuthenticationOptions: ClientVpnAuthenticationRequestList + ConnectionLogOptions: ConnectionLogOptions + DnsServers: Optional[ValueStringList] + TransportProtocol: Optional[TransportProtocol] + VpnPort: Optional[Integer] + Description: Optional[String] + SplitTunnel: Optional[Boolean] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + SecurityGroupIds: Optional[ClientVpnSecurityGroupIdSet] + VpcId: Optional[VpcId] + SelfServicePortal: Optional[SelfServicePortal] + ClientConnectOptions: Optional[ClientConnectOptions] + SessionTimeoutHours: Optional[Integer] + ClientLoginBannerOptions: Optional[ClientLoginBannerOptions] + ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementOptions] + DisconnectOnSessionTimeout: Optional[Boolean] + + +class CreateClientVpnEndpointResult(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + Status: Optional[ClientVpnEndpointStatus] + DnsName: Optional[String] + + +class CreateClientVpnRouteRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + DestinationCidrBlock: String + TargetVpcSubnetId: SubnetId + Description: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class CreateClientVpnRouteResult(TypedDict, total=False): + Status: Optional[ClientVpnRouteStatus] + + +class CreateCoipCidrRequest(ServiceRequest): + Cidr: String + CoipPoolId: Ipv4PoolCoipId + DryRun: Optional[Boolean] + + +class CreateCoipCidrResult(TypedDict, total=False): + CoipCidr: Optional[CoipCidr] + + +class CreateCoipPoolRequest(ServiceRequest): + LocalGatewayRouteTableId: LocalGatewayRoutetableId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateCoipPoolResult(TypedDict, total=False): + CoipPool: Optional[CoipPool] + + +class CreateCustomerGatewayRequest(ServiceRequest): + BgpAsn: Optional[Integer] + PublicIp: Optional[String] + CertificateArn: Optional[String] + Type: GatewayType + TagSpecifications: Optional[TagSpecificationList] + DeviceName: Optional[String] + IpAddress: Optional[String] + BgpAsnExtended: Optional[Long] + DryRun: Optional[Boolean] + + +class CustomerGateway(TypedDict, total=False): + CertificateArn: Optional[String] + DeviceName: Optional[String] + Tags: Optional[TagList] + BgpAsnExtended: Optional[String] + CustomerGatewayId: Optional[String] + State: Optional[String] + Type: Optional[String] + IpAddress: Optional[String] + BgpAsn: Optional[String] + + +class CreateCustomerGatewayResult(TypedDict, total=False): + CustomerGateway: Optional[CustomerGateway] + + +class CreateDefaultSubnetRequest(ServiceRequest): + AvailabilityZone: AvailabilityZoneName + DryRun: Optional[Boolean] + Ipv6Native: Optional[Boolean] + + +class PrivateDnsNameOptionsOnLaunch(TypedDict, total=False): + HostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +SubnetIpv6CidrBlockAssociationSet = List[SubnetIpv6CidrBlockAssociation] + + +class Subnet(TypedDict, total=False): + AvailabilityZoneId: Optional[String] + EnableLniAtDeviceIndex: Optional[Integer] + MapCustomerOwnedIpOnLaunch: Optional[Boolean] + CustomerOwnedIpv4Pool: Optional[CoipPoolId] + OwnerId: Optional[String] + AssignIpv6AddressOnCreation: Optional[Boolean] + Ipv6CidrBlockAssociationSet: Optional[SubnetIpv6CidrBlockAssociationSet] + Tags: Optional[TagList] + SubnetArn: Optional[String] + OutpostArn: Optional[String] + EnableDns64: Optional[Boolean] + Ipv6Native: Optional[Boolean] + PrivateDnsNameOptionsOnLaunch: Optional[PrivateDnsNameOptionsOnLaunch] + BlockPublicAccessStates: Optional[BlockPublicAccessStates] + SubnetId: Optional[String] + State: Optional[SubnetState] + VpcId: Optional[String] + CidrBlock: Optional[String] + AvailableIpAddressCount: Optional[Integer] + AvailabilityZone: Optional[String] + DefaultForAz: Optional[Boolean] + MapPublicIpOnLaunch: Optional[Boolean] + + +class CreateDefaultSubnetResult(TypedDict, total=False): + Subnet: Optional[Subnet] + + +class CreateDefaultVpcRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class VpcEncryptionControlExclusion(TypedDict, total=False): + State: Optional[VpcEncryptionControlExclusionState] + StateMessage: Optional[String] + + +class VpcEncryptionControlExclusions(TypedDict, total=False): + InternetGateway: Optional[VpcEncryptionControlExclusion] + EgressOnlyInternetGateway: Optional[VpcEncryptionControlExclusion] + NatGateway: Optional[VpcEncryptionControlExclusion] + VirtualPrivateGateway: Optional[VpcEncryptionControlExclusion] + VpcPeering: Optional[VpcEncryptionControlExclusion] + + +class VpcEncryptionControl(TypedDict, total=False): + VpcId: Optional[VpcId] + VpcEncryptionControlId: Optional[VpcEncryptionControlId] + Mode: Optional[VpcEncryptionControlMode] + State: Optional[VpcEncryptionControlState] + StateMessage: Optional[String] + ResourceExclusions: Optional[VpcEncryptionControlExclusions] + Tags: Optional[TagList] + + +VpcCidrBlockAssociationSet = List[VpcCidrBlockAssociation] +VpcIpv6CidrBlockAssociationSet = List[VpcIpv6CidrBlockAssociation] + + +class Vpc(TypedDict, total=False): + OwnerId: Optional[String] + InstanceTenancy: Optional[Tenancy] + Ipv6CidrBlockAssociationSet: Optional[VpcIpv6CidrBlockAssociationSet] + CidrBlockAssociationSet: Optional[VpcCidrBlockAssociationSet] + IsDefault: Optional[Boolean] + EncryptionControl: Optional[VpcEncryptionControl] + Tags: Optional[TagList] + BlockPublicAccessStates: Optional[BlockPublicAccessStates] + VpcId: Optional[String] + State: Optional[VpcState] + CidrBlock: Optional[String] + DhcpOptionsId: Optional[String] + + +class CreateDefaultVpcResult(TypedDict, total=False): + Vpc: Optional[Vpc] + + +class CreateDelegateMacVolumeOwnershipTaskRequest(ServiceRequest): + ClientToken: Optional[String] + DryRun: Optional[Boolean] + InstanceId: InstanceId + MacCredentials: SensitiveMacCredentials + TagSpecifications: Optional[TagSpecificationList] + + +class MacSystemIntegrityProtectionConfiguration(TypedDict, total=False): + AppleInternal: Optional[MacSystemIntegrityProtectionSettingStatus] + BaseSystem: Optional[MacSystemIntegrityProtectionSettingStatus] + DebuggingRestrictions: Optional[MacSystemIntegrityProtectionSettingStatus] + DTraceRestrictions: Optional[MacSystemIntegrityProtectionSettingStatus] + FilesystemProtections: Optional[MacSystemIntegrityProtectionSettingStatus] + KextSigning: Optional[MacSystemIntegrityProtectionSettingStatus] + NvramProtections: Optional[MacSystemIntegrityProtectionSettingStatus] + Status: Optional[MacSystemIntegrityProtectionSettingStatus] + + +class MacModificationTask(TypedDict, total=False): + InstanceId: Optional[InstanceId] + MacModificationTaskId: Optional[MacModificationTaskId] + MacSystemIntegrityProtectionConfig: Optional[MacSystemIntegrityProtectionConfiguration] + StartTime: Optional[MillisecondDateTime] + Tags: Optional[TagList] + TaskState: Optional[MacModificationTaskState] + TaskType: Optional[MacModificationTaskType] + + +class CreateDelegateMacVolumeOwnershipTaskResult(TypedDict, total=False): + MacModificationTask: Optional[MacModificationTask] + + +class NewDhcpConfiguration(TypedDict, total=False): + Key: Optional[String] + Values: Optional[ValueStringList] + + +NewDhcpConfigurationList = List[NewDhcpConfiguration] + + +class CreateDhcpOptionsRequest(ServiceRequest): + DhcpConfigurations: NewDhcpConfigurationList + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +DhcpConfigurationValueList = List[AttributeValue] + + +class DhcpConfiguration(TypedDict, total=False): + Key: Optional[String] + Values: Optional[DhcpConfigurationValueList] + + +DhcpConfigurationList = List[DhcpConfiguration] + + +class DhcpOptions(TypedDict, total=False): + OwnerId: Optional[String] + Tags: Optional[TagList] + DhcpOptionsId: Optional[String] + DhcpConfigurations: Optional[DhcpConfigurationList] + + +class CreateDhcpOptionsResult(TypedDict, total=False): + DhcpOptions: Optional[DhcpOptions] + + +class CreateEgressOnlyInternetGatewayRequest(ServiceRequest): + ClientToken: Optional[String] + DryRun: Optional[Boolean] + VpcId: VpcId + TagSpecifications: Optional[TagSpecificationList] + + +class InternetGatewayAttachment(TypedDict, total=False): + State: Optional[AttachmentStatus] + VpcId: Optional[String] + + +InternetGatewayAttachmentList = List[InternetGatewayAttachment] + + +class EgressOnlyInternetGateway(TypedDict, total=False): + Attachments: Optional[InternetGatewayAttachmentList] + EgressOnlyInternetGatewayId: Optional[EgressOnlyInternetGatewayId] + Tags: Optional[TagList] + + +class CreateEgressOnlyInternetGatewayResult(TypedDict, total=False): + ClientToken: Optional[String] + EgressOnlyInternetGateway: Optional[EgressOnlyInternetGateway] + + +class NetworkBandwidthGbps(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +class TotalLocalStorageGB(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +LocalStorageTypeSet = List[LocalStorageType] + + +class NetworkInterfaceCount(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +InstanceGenerationSet = List[InstanceGeneration] +ExcludedInstanceTypeSet = List[ExcludedInstanceType] + + +class MemoryGiBPerVCpu(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +class MemoryMiB(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class VCpuCountRange(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class InstanceRequirements(TypedDict, total=False): + VCpuCount: Optional[VCpuCountRange] + MemoryMiB: Optional[MemoryMiB] + CpuManufacturers: Optional[CpuManufacturerSet] + MemoryGiBPerVCpu: Optional[MemoryGiBPerVCpu] + ExcludedInstanceTypes: Optional[ExcludedInstanceTypeSet] + InstanceGenerations: Optional[InstanceGenerationSet] + SpotMaxPricePercentageOverLowestPrice: Optional[Integer] + OnDemandMaxPricePercentageOverLowestPrice: Optional[Integer] + BareMetal: Optional[BareMetal] + BurstablePerformance: Optional[BurstablePerformance] + RequireHibernateSupport: Optional[Boolean] + NetworkInterfaceCount: Optional[NetworkInterfaceCount] + LocalStorage: Optional[LocalStorage] + LocalStorageTypes: Optional[LocalStorageTypeSet] + TotalLocalStorageGB: Optional[TotalLocalStorageGB] + BaselineEbsBandwidthMbps: Optional[BaselineEbsBandwidthMbps] + AcceleratorTypes: Optional[AcceleratorTypeSet] + AcceleratorCount: Optional[AcceleratorCount] + AcceleratorManufacturers: Optional[AcceleratorManufacturerSet] + AcceleratorNames: Optional[AcceleratorNameSet] + AcceleratorTotalMemoryMiB: Optional[AcceleratorTotalMemoryMiB] + NetworkBandwidthGbps: Optional[NetworkBandwidthGbps] + AllowedInstanceTypes: Optional[AllowedInstanceTypeSet] + MaxSpotPriceAsPercentageOfOptimalOnDemandPrice: Optional[Integer] + BaselinePerformanceFactors: Optional[BaselinePerformanceFactors] + + +class PlacementResponse(TypedDict, total=False): + GroupName: Optional[PlacementGroupName] + + +class FleetLaunchTemplateOverrides(TypedDict, total=False): + InstanceType: Optional[InstanceType] + MaxPrice: Optional[String] + SubnetId: Optional[String] + AvailabilityZone: Optional[String] + WeightedCapacity: Optional[Double] + Priority: Optional[Double] + Placement: Optional[PlacementResponse] + InstanceRequirements: Optional[InstanceRequirements] + ImageId: Optional[ImageId] + BlockDeviceMappings: Optional[BlockDeviceMappingResponseList] + + +class FleetLaunchTemplateSpecification(TypedDict, total=False): + LaunchTemplateId: Optional[String] + LaunchTemplateName: Optional[LaunchTemplateName] + Version: Optional[String] + + +class LaunchTemplateAndOverridesResponse(TypedDict, total=False): + LaunchTemplateSpecification: Optional[FleetLaunchTemplateSpecification] + Overrides: Optional[FleetLaunchTemplateOverrides] + + +class CreateFleetError(TypedDict, total=False): + LaunchTemplateAndOverrides: Optional[LaunchTemplateAndOverridesResponse] + Lifecycle: Optional[InstanceLifecycle] + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + + +CreateFleetErrorsSet = List[CreateFleetError] +InstanceIdsSet = List[InstanceId] + + +class CreateFleetInstance(TypedDict, total=False): + LaunchTemplateAndOverrides: Optional[LaunchTemplateAndOverridesResponse] + Lifecycle: Optional[InstanceLifecycle] + InstanceIds: Optional[InstanceIdsSet] + InstanceType: Optional[InstanceType] + Platform: Optional[PlatformValues] + + +CreateFleetInstancesSet = List[CreateFleetInstance] + + +class TargetCapacitySpecificationRequest(TypedDict, total=False): + TotalTargetCapacity: Integer + OnDemandTargetCapacity: Optional[Integer] + SpotTargetCapacity: Optional[Integer] + DefaultTargetCapacityType: Optional[DefaultTargetCapacityType] + TargetCapacityUnitType: Optional[TargetCapacityUnitType] + + +class NetworkBandwidthGbpsRequest(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +class TotalLocalStorageGBRequest(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +class NetworkInterfaceCountRequest(TypedDict, total=False): + Min: Optional[Integer] + Max: Optional[Integer] + + +class MemoryGiBPerVCpuRequest(TypedDict, total=False): + Min: Optional[Double] + Max: Optional[Double] + + +class MemoryMiBRequest(TypedDict, total=False): + Min: Integer + Max: Optional[Integer] + + +class VCpuCountRangeRequest(TypedDict, total=False): + Min: Integer + Max: Optional[Integer] + + +class InstanceRequirementsRequest(TypedDict, total=False): + VCpuCount: VCpuCountRangeRequest + MemoryMiB: MemoryMiBRequest + CpuManufacturers: Optional[CpuManufacturerSet] + MemoryGiBPerVCpu: Optional[MemoryGiBPerVCpuRequest] + ExcludedInstanceTypes: Optional[ExcludedInstanceTypeSet] + InstanceGenerations: Optional[InstanceGenerationSet] + SpotMaxPricePercentageOverLowestPrice: Optional[Integer] + OnDemandMaxPricePercentageOverLowestPrice: Optional[Integer] + BareMetal: Optional[BareMetal] + BurstablePerformance: Optional[BurstablePerformance] + RequireHibernateSupport: Optional[Boolean] + NetworkInterfaceCount: Optional[NetworkInterfaceCountRequest] + LocalStorage: Optional[LocalStorage] + LocalStorageTypes: Optional[LocalStorageTypeSet] + TotalLocalStorageGB: Optional[TotalLocalStorageGBRequest] + BaselineEbsBandwidthMbps: Optional[BaselineEbsBandwidthMbpsRequest] + AcceleratorTypes: Optional[AcceleratorTypeSet] + AcceleratorCount: Optional[AcceleratorCountRequest] + AcceleratorManufacturers: Optional[AcceleratorManufacturerSet] + AcceleratorNames: Optional[AcceleratorNameSet] + AcceleratorTotalMemoryMiB: Optional[AcceleratorTotalMemoryMiBRequest] + NetworkBandwidthGbps: Optional[NetworkBandwidthGbpsRequest] + AllowedInstanceTypes: Optional[AllowedInstanceTypeSet] + MaxSpotPriceAsPercentageOfOptimalOnDemandPrice: Optional[Integer] + BaselinePerformanceFactors: Optional[BaselinePerformanceFactorsRequest] + + +class FleetEbsBlockDeviceRequest(TypedDict, total=False): + Encrypted: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Iops: Optional[Integer] + Throughput: Optional[Integer] + KmsKeyId: Optional[KmsKeyId] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[VolumeType] + + +class FleetBlockDeviceMappingRequest(TypedDict, total=False): + DeviceName: Optional[String] + VirtualName: Optional[String] + Ebs: Optional[FleetEbsBlockDeviceRequest] + NoDevice: Optional[String] + + +FleetBlockDeviceMappingRequestList = List[FleetBlockDeviceMappingRequest] + + +class Placement(TypedDict, total=False): + Affinity: Optional[String] + GroupName: Optional[PlacementGroupName] + PartitionNumber: Optional[Integer] + HostId: Optional[String] + Tenancy: Optional[Tenancy] + SpreadDomain: Optional[String] + HostResourceGroupArn: Optional[String] + GroupId: Optional[PlacementGroupId] + AvailabilityZone: Optional[String] + + +class FleetLaunchTemplateOverridesRequest(TypedDict, total=False): + InstanceType: Optional[InstanceType] + MaxPrice: Optional[String] + SubnetId: Optional[SubnetId] + AvailabilityZone: Optional[String] + WeightedCapacity: Optional[Double] + Priority: Optional[Double] + Placement: Optional[Placement] + BlockDeviceMappings: Optional[FleetBlockDeviceMappingRequestList] + InstanceRequirements: Optional[InstanceRequirementsRequest] + ImageId: Optional[String] + + +FleetLaunchTemplateOverridesListRequest = List[FleetLaunchTemplateOverridesRequest] + + +class FleetLaunchTemplateSpecificationRequest(TypedDict, total=False): + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + Version: Optional[String] + + +class FleetLaunchTemplateConfigRequest(TypedDict, total=False): + LaunchTemplateSpecification: Optional[FleetLaunchTemplateSpecificationRequest] + Overrides: Optional[FleetLaunchTemplateOverridesListRequest] + + +FleetLaunchTemplateConfigListRequest = List[FleetLaunchTemplateConfigRequest] + + +class OnDemandOptionsRequest(TypedDict, total=False): + AllocationStrategy: Optional[FleetOnDemandAllocationStrategy] + CapacityReservationOptions: Optional[CapacityReservationOptionsRequest] + SingleInstanceType: Optional[Boolean] + SingleAvailabilityZone: Optional[Boolean] + MinTargetCapacity: Optional[Integer] + MaxTotalPrice: Optional[String] + + +class FleetSpotCapacityRebalanceRequest(TypedDict, total=False): + ReplacementStrategy: Optional[FleetReplacementStrategy] + TerminationDelay: Optional[Integer] + + +class FleetSpotMaintenanceStrategiesRequest(TypedDict, total=False): + CapacityRebalance: Optional[FleetSpotCapacityRebalanceRequest] + + +class SpotOptionsRequest(TypedDict, total=False): + AllocationStrategy: Optional[SpotAllocationStrategy] + MaintenanceStrategies: Optional[FleetSpotMaintenanceStrategiesRequest] + InstanceInterruptionBehavior: Optional[SpotInstanceInterruptionBehavior] + InstancePoolsToUseCount: Optional[Integer] + SingleInstanceType: Optional[Boolean] + SingleAvailabilityZone: Optional[Boolean] + MinTargetCapacity: Optional[Integer] + MaxTotalPrice: Optional[String] + + +class CreateFleetRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + SpotOptions: Optional[SpotOptionsRequest] + OnDemandOptions: Optional[OnDemandOptionsRequest] + ExcessCapacityTerminationPolicy: Optional[FleetExcessCapacityTerminationPolicy] + LaunchTemplateConfigs: FleetLaunchTemplateConfigListRequest + TargetCapacitySpecification: TargetCapacitySpecificationRequest + TerminateInstancesWithExpiration: Optional[Boolean] + Type: Optional[FleetType] + ValidFrom: Optional[DateTime] + ValidUntil: Optional[DateTime] + ReplaceUnhealthyInstances: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + Context: Optional[String] + + +class CreateFleetResult(TypedDict, total=False): + FleetId: Optional[FleetId] + Errors: Optional[CreateFleetErrorsSet] + Instances: Optional[CreateFleetInstancesSet] + + +class DestinationOptionsRequest(TypedDict, total=False): + FileFormat: Optional[DestinationFileFormat] + HiveCompatiblePartitions: Optional[Boolean] + PerHourPartition: Optional[Boolean] + + +FlowLogResourceIds = List[FlowLogResourceId] + + +class CreateFlowLogsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + DeliverLogsPermissionArn: Optional[String] + DeliverCrossAccountRole: Optional[String] + LogGroupName: Optional[String] + ResourceIds: FlowLogResourceIds + ResourceType: FlowLogsResourceType + TrafficType: Optional[TrafficType] + LogDestinationType: Optional[LogDestinationType] + LogDestination: Optional[String] + LogFormat: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + MaxAggregationInterval: Optional[Integer] + DestinationOptions: Optional[DestinationOptionsRequest] + + +class CreateFlowLogsResult(TypedDict, total=False): + ClientToken: Optional[String] + FlowLogIds: Optional[ValueStringList] + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class StorageLocation(TypedDict, total=False): + Bucket: Optional[String] + Key: Optional[String] + + +class CreateFpgaImageRequest(ServiceRequest): + DryRun: Optional[Boolean] + InputStorageLocation: StorageLocation + LogsStorageLocation: Optional[StorageLocation] + Description: Optional[String] + Name: Optional[String] + ClientToken: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +class CreateFpgaImageResult(TypedDict, total=False): + FpgaImageId: Optional[String] + FpgaImageGlobalId: Optional[String] + + +class CreateImageRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + InstanceId: InstanceId + Name: String + Description: Optional[String] + NoReboot: Optional[Boolean] + BlockDeviceMappings: Optional[BlockDeviceMappingRequestList] + + +class CreateImageResult(TypedDict, total=False): + ImageId: Optional[String] + + +SecurityGroupIdStringListRequest = List[SecurityGroupId] + + +class CreateInstanceConnectEndpointRequest(ServiceRequest): + DryRun: Optional[Boolean] + SubnetId: SubnetId + SecurityGroupIds: Optional[SecurityGroupIdStringListRequest] + PreserveClientIp: Optional[Boolean] + ClientToken: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +SecurityGroupIdSet = List[SecurityGroupId] +NetworkInterfaceIdSet = List[String] + + +class Ec2InstanceConnectEndpoint(TypedDict, total=False): + OwnerId: Optional[String] + InstanceConnectEndpointId: Optional[InstanceConnectEndpointId] + InstanceConnectEndpointArn: Optional[ResourceArn] + State: Optional[Ec2InstanceConnectEndpointState] + StateMessage: Optional[String] + DnsName: Optional[String] + FipsDnsName: Optional[String] + NetworkInterfaceIds: Optional[NetworkInterfaceIdSet] + VpcId: Optional[VpcId] + AvailabilityZone: Optional[String] + CreatedAt: Optional[MillisecondDateTime] + SubnetId: Optional[SubnetId] + PreserveClientIp: Optional[Boolean] + SecurityGroupIds: Optional[SecurityGroupIdSet] + Tags: Optional[TagList] + + +class CreateInstanceConnectEndpointResult(TypedDict, total=False): + InstanceConnectEndpoint: Optional[Ec2InstanceConnectEndpoint] + ClientToken: Optional[String] + + +class InstanceEventWindowTimeRangeRequest(TypedDict, total=False): + StartWeekDay: Optional[WeekDay] + StartHour: Optional[Hour] + EndWeekDay: Optional[WeekDay] + EndHour: Optional[Hour] + + +InstanceEventWindowTimeRangeRequestSet = List[InstanceEventWindowTimeRangeRequest] + + +class CreateInstanceEventWindowRequest(ServiceRequest): + DryRun: Optional[Boolean] + Name: Optional[String] + TimeRanges: Optional[InstanceEventWindowTimeRangeRequestSet] + CronExpression: Optional[InstanceEventWindowCronExpression] + TagSpecifications: Optional[TagSpecificationList] + + +class CreateInstanceEventWindowResult(TypedDict, total=False): + InstanceEventWindow: Optional[InstanceEventWindow] + + +class ExportToS3TaskSpecification(TypedDict, total=False): + DiskImageFormat: Optional[DiskImageFormat] + ContainerFormat: Optional[ContainerFormat] + S3Bucket: Optional[String] + S3Prefix: Optional[String] + + +class CreateInstanceExportTaskRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + Description: Optional[String] + InstanceId: InstanceId + TargetEnvironment: ExportEnvironment + ExportToS3Task: ExportToS3TaskSpecification + + +class InstanceExportDetails(TypedDict, total=False): + InstanceId: Optional[String] + TargetEnvironment: Optional[ExportEnvironment] + + +class ExportToS3Task(TypedDict, total=False): + ContainerFormat: Optional[ContainerFormat] + DiskImageFormat: Optional[DiskImageFormat] + S3Bucket: Optional[String] + S3Key: Optional[String] + + +class ExportTask(TypedDict, total=False): + Description: Optional[String] + ExportTaskId: Optional[String] + ExportToS3Task: Optional[ExportToS3Task] + InstanceExportDetails: Optional[InstanceExportDetails] + State: Optional[ExportTaskState] + StatusMessage: Optional[String] + Tags: Optional[TagList] + + +class CreateInstanceExportTaskResult(TypedDict, total=False): + ExportTask: Optional[ExportTask] + + +class CreateInternetGatewayRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class InternetGateway(TypedDict, total=False): + Attachments: Optional[InternetGatewayAttachmentList] + InternetGatewayId: Optional[String] + OwnerId: Optional[String] + Tags: Optional[TagList] + + +class CreateInternetGatewayResult(TypedDict, total=False): + InternetGateway: Optional[InternetGateway] + + +class CreateIpamExternalResourceVerificationTokenRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + + +class IpamExternalResourceVerificationToken(TypedDict, total=False): + IpamExternalResourceVerificationTokenId: Optional[IpamExternalResourceVerificationTokenId] + IpamExternalResourceVerificationTokenArn: Optional[ResourceArn] + IpamId: Optional[IpamId] + IpamArn: Optional[ResourceArn] + IpamRegion: Optional[String] + TokenValue: Optional[String] + TokenName: Optional[String] + NotAfter: Optional[MillisecondDateTime] + Status: Optional[TokenState] + Tags: Optional[TagList] + State: Optional[IpamExternalResourceVerificationTokenState] + + +class CreateIpamExternalResourceVerificationTokenResult(TypedDict, total=False): + IpamExternalResourceVerificationToken: Optional[IpamExternalResourceVerificationToken] + + +class IpamPoolSourceResourceRequest(TypedDict, total=False): + ResourceId: Optional[String] + ResourceType: Optional[IpamPoolSourceResourceType] + ResourceRegion: Optional[String] + ResourceOwner: Optional[String] + + +class RequestIpamResourceTag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +RequestIpamResourceTagList = List[RequestIpamResourceTag] + + +class CreateIpamPoolRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamScopeId: IpamScopeId + Locale: Optional[String] + SourceIpamPoolId: Optional[IpamPoolId] + Description: Optional[String] + AddressFamily: AddressFamily + AutoImport: Optional[Boolean] + PubliclyAdvertisable: Optional[Boolean] + AllocationMinNetmaskLength: Optional[IpamNetmaskLength] + AllocationMaxNetmaskLength: Optional[IpamNetmaskLength] + AllocationDefaultNetmaskLength: Optional[IpamNetmaskLength] + AllocationResourceTags: Optional[RequestIpamResourceTagList] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + AwsService: Optional[IpamPoolAwsService] + PublicIpSource: Optional[IpamPoolPublicIpSource] + SourceResource: Optional[IpamPoolSourceResourceRequest] + + +class IpamPoolSourceResource(TypedDict, total=False): + ResourceId: Optional[String] + ResourceType: Optional[IpamPoolSourceResourceType] + ResourceRegion: Optional[String] + ResourceOwner: Optional[String] + + +class IpamResourceTag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +IpamResourceTagList = List[IpamResourceTag] + + +class IpamPool(TypedDict, total=False): + OwnerId: Optional[String] + IpamPoolId: Optional[IpamPoolId] + SourceIpamPoolId: Optional[IpamPoolId] + IpamPoolArn: Optional[ResourceArn] + IpamScopeArn: Optional[ResourceArn] + IpamScopeType: Optional[IpamScopeType] + IpamArn: Optional[ResourceArn] + IpamRegion: Optional[String] + Locale: Optional[String] + PoolDepth: Optional[Integer] + State: Optional[IpamPoolState] + StateMessage: Optional[String] + Description: Optional[String] + AutoImport: Optional[Boolean] + PubliclyAdvertisable: Optional[Boolean] + AddressFamily: Optional[AddressFamily] + AllocationMinNetmaskLength: Optional[IpamNetmaskLength] + AllocationMaxNetmaskLength: Optional[IpamNetmaskLength] + AllocationDefaultNetmaskLength: Optional[IpamNetmaskLength] + AllocationResourceTags: Optional[IpamResourceTagList] + Tags: Optional[TagList] + AwsService: Optional[IpamPoolAwsService] + PublicIpSource: Optional[IpamPoolPublicIpSource] + SourceResource: Optional[IpamPoolSourceResource] + + +class CreateIpamPoolResult(TypedDict, total=False): + IpamPool: Optional[IpamPool] + + +class CreateIpamRequest(ServiceRequest): + DryRun: Optional[Boolean] + Description: Optional[String] + OperatingRegions: Optional[AddIpamOperatingRegionSet] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + Tier: Optional[IpamTier] + EnablePrivateGua: Optional[Boolean] + MeteredAccount: Optional[IpamMeteredAccount] + + +class CreateIpamResourceDiscoveryRequest(ServiceRequest): + DryRun: Optional[Boolean] + Description: Optional[String] + OperatingRegions: Optional[AddIpamOperatingRegionSet] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + + +class IpamOrganizationalUnitExclusion(TypedDict, total=False): + OrganizationsEntityPath: Optional[String] + + +IpamOrganizationalUnitExclusionSet = List[IpamOrganizationalUnitExclusion] + + +class IpamOperatingRegion(TypedDict, total=False): + RegionName: Optional[String] + + +IpamOperatingRegionSet = List[IpamOperatingRegion] + + +class IpamResourceDiscovery(TypedDict, total=False): + OwnerId: Optional[String] + IpamResourceDiscoveryId: Optional[IpamResourceDiscoveryId] + IpamResourceDiscoveryArn: Optional[String] + IpamResourceDiscoveryRegion: Optional[String] + Description: Optional[String] + OperatingRegions: Optional[IpamOperatingRegionSet] + IsDefault: Optional[Boolean] + State: Optional[IpamResourceDiscoveryState] + Tags: Optional[TagList] + OrganizationalUnitExclusions: Optional[IpamOrganizationalUnitExclusionSet] + + +class CreateIpamResourceDiscoveryResult(TypedDict, total=False): + IpamResourceDiscovery: Optional[IpamResourceDiscovery] + + +class Ipam(TypedDict, total=False): + OwnerId: Optional[String] + IpamId: Optional[IpamId] + IpamArn: Optional[ResourceArn] + IpamRegion: Optional[String] + PublicDefaultScopeId: Optional[IpamScopeId] + PrivateDefaultScopeId: Optional[IpamScopeId] + ScopeCount: Optional[Integer] + Description: Optional[String] + OperatingRegions: Optional[IpamOperatingRegionSet] + State: Optional[IpamState] + Tags: Optional[TagList] + DefaultResourceDiscoveryId: Optional[IpamResourceDiscoveryId] + DefaultResourceDiscoveryAssociationId: Optional[IpamResourceDiscoveryAssociationId] + ResourceDiscoveryAssociationCount: Optional[Integer] + StateMessage: Optional[String] + Tier: Optional[IpamTier] + EnablePrivateGua: Optional[Boolean] + MeteredAccount: Optional[IpamMeteredAccount] + + +class CreateIpamResult(TypedDict, total=False): + Ipam: Optional[Ipam] + + +class CreateIpamScopeRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + + +class IpamScope(TypedDict, total=False): + OwnerId: Optional[String] + IpamScopeId: Optional[IpamScopeId] + IpamScopeArn: Optional[ResourceArn] + IpamArn: Optional[ResourceArn] + IpamRegion: Optional[String] + IpamScopeType: Optional[IpamScopeType] + IsDefault: Optional[Boolean] + Description: Optional[String] + PoolCount: Optional[Integer] + State: Optional[IpamScopeState] + Tags: Optional[TagList] + + +class CreateIpamScopeResult(TypedDict, total=False): + IpamScope: Optional[IpamScope] + + +class CreateKeyPairRequest(ServiceRequest): + KeyName: String + KeyType: Optional[KeyType] + TagSpecifications: Optional[TagSpecificationList] + KeyFormat: Optional[KeyFormat] + DryRun: Optional[Boolean] + + +class OperatorRequest(TypedDict, total=False): + Principal: Optional[String] + + +class LaunchTemplateNetworkPerformanceOptionsRequest(TypedDict, total=False): + BandwidthWeighting: Optional[InstanceBandwidthWeighting] + + +class LaunchTemplateInstanceMaintenanceOptionsRequest(TypedDict, total=False): + AutoRecovery: Optional[LaunchTemplateAutoRecoveryState] + + +class LaunchTemplatePrivateDnsNameOptionsRequest(TypedDict, total=False): + HostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +class LaunchTemplateEnclaveOptionsRequest(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class LaunchTemplateInstanceMetadataOptionsRequest(TypedDict, total=False): + HttpTokens: Optional[LaunchTemplateHttpTokensState] + HttpPutResponseHopLimit: Optional[Integer] + HttpEndpoint: Optional[LaunchTemplateInstanceMetadataEndpointState] + HttpProtocolIpv6: Optional[LaunchTemplateInstanceMetadataProtocolIpv6] + InstanceMetadataTags: Optional[LaunchTemplateInstanceMetadataTagsState] + + +class LaunchTemplateHibernationOptionsRequest(TypedDict, total=False): + Configured: Optional[Boolean] + + +class LaunchTemplateLicenseConfigurationRequest(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +LaunchTemplateLicenseSpecificationListRequest = List[LaunchTemplateLicenseConfigurationRequest] + + +class LaunchTemplateCapacityReservationSpecificationRequest(TypedDict, total=False): + CapacityReservationPreference: Optional[CapacityReservationPreference] + CapacityReservationTarget: Optional[CapacityReservationTarget] + + +class LaunchTemplateCpuOptionsRequest(TypedDict, total=False): + CoreCount: Optional[Integer] + ThreadsPerCore: Optional[Integer] + AmdSevSnp: Optional[AmdSevSnpSpecification] + + +class CreditSpecificationRequest(TypedDict, total=False): + CpuCredits: String + + +class LaunchTemplateSpotMarketOptionsRequest(TypedDict, total=False): + MaxPrice: Optional[String] + SpotInstanceType: Optional[SpotInstanceType] + BlockDurationMinutes: Optional[Integer] + ValidUntil: Optional[DateTime] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + + +class LaunchTemplateInstanceMarketOptionsRequest(TypedDict, total=False): + MarketType: Optional[MarketType] + SpotOptions: Optional[LaunchTemplateSpotMarketOptionsRequest] + + +SecurityGroupStringList = List[SecurityGroupName] +SecurityGroupIdStringList = List[SecurityGroupId] + + +class LaunchTemplateElasticInferenceAccelerator(TypedDict, total=False): + Type: String + Count: Optional[LaunchTemplateElasticInferenceAcceleratorCount] + + +LaunchTemplateElasticInferenceAcceleratorList = List[LaunchTemplateElasticInferenceAccelerator] + + +class ElasticGpuSpecification(TypedDict, total=False): + Type: String + + +ElasticGpuSpecificationList = List[ElasticGpuSpecification] + + +class LaunchTemplateTagSpecificationRequest(TypedDict, total=False): + ResourceType: Optional[ResourceType] + Tags: Optional[TagList] + + +LaunchTemplateTagSpecificationRequestList = List[LaunchTemplateTagSpecificationRequest] + + +class LaunchTemplatePlacementRequest(TypedDict, total=False): + AvailabilityZone: Optional[String] + Affinity: Optional[String] + GroupName: Optional[PlacementGroupName] + HostId: Optional[DedicatedHostId] + Tenancy: Optional[Tenancy] + SpreadDomain: Optional[String] + HostResourceGroupArn: Optional[String] + PartitionNumber: Optional[Integer] + GroupId: Optional[PlacementGroupId] + + +class LaunchTemplatesMonitoringRequest(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class EnaSrdUdpSpecificationRequest(TypedDict, total=False): + EnaSrdUdpEnabled: Optional[Boolean] + + +class EnaSrdSpecificationRequest(TypedDict, total=False): + EnaSrdEnabled: Optional[Boolean] + EnaSrdUdpSpecification: Optional[EnaSrdUdpSpecificationRequest] + + +class Ipv6PrefixSpecificationRequest(TypedDict, total=False): + Ipv6Prefix: Optional[String] + + +Ipv6PrefixList = List[Ipv6PrefixSpecificationRequest] + + +class Ipv4PrefixSpecificationRequest(TypedDict, total=False): + Ipv4Prefix: Optional[String] + + +Ipv4PrefixList = List[Ipv4PrefixSpecificationRequest] + + +class PrivateIpAddressSpecification(TypedDict, total=False): + Primary: Optional[Boolean] + PrivateIpAddress: Optional[String] + + +PrivateIpAddressSpecificationList = List[PrivateIpAddressSpecification] + + +class InstanceIpv6AddressRequest(TypedDict, total=False): + Ipv6Address: Optional[String] + + +InstanceIpv6AddressListRequest = List[InstanceIpv6AddressRequest] + + +class LaunchTemplateInstanceNetworkInterfaceSpecificationRequest(TypedDict, total=False): + AssociateCarrierIpAddress: Optional[Boolean] + AssociatePublicIpAddress: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Description: Optional[String] + DeviceIndex: Optional[Integer] + Groups: Optional[SecurityGroupIdStringList] + InterfaceType: Optional[String] + Ipv6AddressCount: Optional[Integer] + Ipv6Addresses: Optional[InstanceIpv6AddressListRequest] + NetworkInterfaceId: Optional[NetworkInterfaceId] + PrivateIpAddress: Optional[String] + PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] + SecondaryPrivateIpAddressCount: Optional[Integer] + SubnetId: Optional[SubnetId] + NetworkCardIndex: Optional[Integer] + Ipv4Prefixes: Optional[Ipv4PrefixList] + Ipv4PrefixCount: Optional[Integer] + Ipv6Prefixes: Optional[Ipv6PrefixList] + Ipv6PrefixCount: Optional[Integer] + PrimaryIpv6: Optional[Boolean] + EnaSrdSpecification: Optional[EnaSrdSpecificationRequest] + ConnectionTrackingSpecification: Optional[ConnectionTrackingSpecificationRequest] + EnaQueueCount: Optional[Integer] + + +LaunchTemplateInstanceNetworkInterfaceSpecificationRequestList = List[ + LaunchTemplateInstanceNetworkInterfaceSpecificationRequest +] + + +class LaunchTemplateEbsBlockDeviceRequest(TypedDict, total=False): + Encrypted: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Iops: Optional[Integer] + KmsKeyId: Optional[KmsKeyId] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[VolumeType] + Throughput: Optional[Integer] + VolumeInitializationRate: Optional[Integer] + + +class LaunchTemplateBlockDeviceMappingRequest(TypedDict, total=False): + DeviceName: Optional[String] + VirtualName: Optional[String] + Ebs: Optional[LaunchTemplateEbsBlockDeviceRequest] + NoDevice: Optional[String] + + +LaunchTemplateBlockDeviceMappingRequestList = List[LaunchTemplateBlockDeviceMappingRequest] + + +class LaunchTemplateIamInstanceProfileSpecificationRequest(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +class RequestLaunchTemplateData(TypedDict, total=False): + KernelId: Optional[KernelId] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[LaunchTemplateIamInstanceProfileSpecificationRequest] + BlockDeviceMappings: Optional[LaunchTemplateBlockDeviceMappingRequestList] + NetworkInterfaces: Optional[LaunchTemplateInstanceNetworkInterfaceSpecificationRequestList] + ImageId: Optional[ImageId] + InstanceType: Optional[InstanceType] + KeyName: Optional[KeyPairName] + Monitoring: Optional[LaunchTemplatesMonitoringRequest] + Placement: Optional[LaunchTemplatePlacementRequest] + RamDiskId: Optional[RamdiskId] + DisableApiTermination: Optional[Boolean] + InstanceInitiatedShutdownBehavior: Optional[ShutdownBehavior] + UserData: Optional[SensitiveUserData] + TagSpecifications: Optional[LaunchTemplateTagSpecificationRequestList] + ElasticGpuSpecifications: Optional[ElasticGpuSpecificationList] + ElasticInferenceAccelerators: Optional[LaunchTemplateElasticInferenceAcceleratorList] + SecurityGroupIds: Optional[SecurityGroupIdStringList] + SecurityGroups: Optional[SecurityGroupStringList] + InstanceMarketOptions: Optional[LaunchTemplateInstanceMarketOptionsRequest] + CreditSpecification: Optional[CreditSpecificationRequest] + CpuOptions: Optional[LaunchTemplateCpuOptionsRequest] + CapacityReservationSpecification: Optional[ + LaunchTemplateCapacityReservationSpecificationRequest + ] + LicenseSpecifications: Optional[LaunchTemplateLicenseSpecificationListRequest] + HibernationOptions: Optional[LaunchTemplateHibernationOptionsRequest] + MetadataOptions: Optional[LaunchTemplateInstanceMetadataOptionsRequest] + EnclaveOptions: Optional[LaunchTemplateEnclaveOptionsRequest] + InstanceRequirements: Optional[InstanceRequirementsRequest] + PrivateDnsNameOptions: Optional[LaunchTemplatePrivateDnsNameOptionsRequest] + MaintenanceOptions: Optional[LaunchTemplateInstanceMaintenanceOptionsRequest] + DisableApiStop: Optional[Boolean] + Operator: Optional[OperatorRequest] + NetworkPerformanceOptions: Optional[LaunchTemplateNetworkPerformanceOptionsRequest] + + +class CreateLaunchTemplateRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + LaunchTemplateName: LaunchTemplateName + VersionDescription: Optional[VersionDescription] + LaunchTemplateData: RequestLaunchTemplateData + Operator: Optional[OperatorRequest] + TagSpecifications: Optional[TagSpecificationList] + + +class ValidationError(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +ErrorSet = List[ValidationError] + + +class ValidationWarning(TypedDict, total=False): + Errors: Optional[ErrorSet] + + +class OperatorResponse(TypedDict, total=False): + Managed: Optional[Boolean] + Principal: Optional[String] + + +class LaunchTemplate(TypedDict, total=False): + LaunchTemplateId: Optional[String] + LaunchTemplateName: Optional[LaunchTemplateName] + CreateTime: Optional[DateTime] + CreatedBy: Optional[String] + DefaultVersionNumber: Optional[Long] + LatestVersionNumber: Optional[Long] + Tags: Optional[TagList] + Operator: Optional[OperatorResponse] + + +class CreateLaunchTemplateResult(TypedDict, total=False): + LaunchTemplate: Optional[LaunchTemplate] + Warning: Optional[ValidationWarning] + + +class CreateLaunchTemplateVersionRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + SourceVersion: Optional[String] + VersionDescription: Optional[VersionDescription] + LaunchTemplateData: RequestLaunchTemplateData + ResolveAlias: Optional[Boolean] + + +class LaunchTemplateNetworkPerformanceOptions(TypedDict, total=False): + BandwidthWeighting: Optional[InstanceBandwidthWeighting] + + +class LaunchTemplateInstanceMaintenanceOptions(TypedDict, total=False): + AutoRecovery: Optional[LaunchTemplateAutoRecoveryState] + + +class LaunchTemplatePrivateDnsNameOptions(TypedDict, total=False): + HostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +class LaunchTemplateEnclaveOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class LaunchTemplateInstanceMetadataOptions(TypedDict, total=False): + State: Optional[LaunchTemplateInstanceMetadataOptionsState] + HttpTokens: Optional[LaunchTemplateHttpTokensState] + HttpPutResponseHopLimit: Optional[Integer] + HttpEndpoint: Optional[LaunchTemplateInstanceMetadataEndpointState] + HttpProtocolIpv6: Optional[LaunchTemplateInstanceMetadataProtocolIpv6] + InstanceMetadataTags: Optional[LaunchTemplateInstanceMetadataTagsState] + + +class LaunchTemplateHibernationOptions(TypedDict, total=False): + Configured: Optional[Boolean] + + +class LaunchTemplateLicenseConfiguration(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +LaunchTemplateLicenseList = List[LaunchTemplateLicenseConfiguration] + + +class LaunchTemplateCapacityReservationSpecificationResponse(TypedDict, total=False): + CapacityReservationPreference: Optional[CapacityReservationPreference] + CapacityReservationTarget: Optional[CapacityReservationTargetResponse] + + +class LaunchTemplateCpuOptions(TypedDict, total=False): + CoreCount: Optional[Integer] + ThreadsPerCore: Optional[Integer] + AmdSevSnp: Optional[AmdSevSnpSpecification] + + +class CreditSpecification(TypedDict, total=False): + CpuCredits: Optional[String] + + +class LaunchTemplateSpotMarketOptions(TypedDict, total=False): + MaxPrice: Optional[String] + SpotInstanceType: Optional[SpotInstanceType] + BlockDurationMinutes: Optional[Integer] + ValidUntil: Optional[DateTime] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + + +class LaunchTemplateInstanceMarketOptions(TypedDict, total=False): + MarketType: Optional[MarketType] + SpotOptions: Optional[LaunchTemplateSpotMarketOptions] + + +class LaunchTemplateElasticInferenceAcceleratorResponse(TypedDict, total=False): + Type: Optional[String] + Count: Optional[Integer] + + +LaunchTemplateElasticInferenceAcceleratorResponseList = List[ + LaunchTemplateElasticInferenceAcceleratorResponse +] + + +class ElasticGpuSpecificationResponse(TypedDict, total=False): + Type: Optional[String] + + +ElasticGpuSpecificationResponseList = List[ElasticGpuSpecificationResponse] + + +class LaunchTemplateTagSpecification(TypedDict, total=False): + ResourceType: Optional[ResourceType] + Tags: Optional[TagList] + + +LaunchTemplateTagSpecificationList = List[LaunchTemplateTagSpecification] + + +class LaunchTemplatePlacement(TypedDict, total=False): + AvailabilityZone: Optional[String] + Affinity: Optional[String] + GroupName: Optional[String] + HostId: Optional[String] + Tenancy: Optional[Tenancy] + SpreadDomain: Optional[String] + HostResourceGroupArn: Optional[String] + PartitionNumber: Optional[Integer] + GroupId: Optional[PlacementGroupId] + + +class LaunchTemplatesMonitoring(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class LaunchTemplateEnaSrdUdpSpecification(TypedDict, total=False): + EnaSrdUdpEnabled: Optional[Boolean] + + +class LaunchTemplateEnaSrdSpecification(TypedDict, total=False): + EnaSrdEnabled: Optional[Boolean] + EnaSrdUdpSpecification: Optional[LaunchTemplateEnaSrdUdpSpecification] + + +class Ipv6PrefixSpecificationResponse(TypedDict, total=False): + Ipv6Prefix: Optional[String] + + +Ipv6PrefixListResponse = List[Ipv6PrefixSpecificationResponse] + + +class Ipv4PrefixSpecificationResponse(TypedDict, total=False): + Ipv4Prefix: Optional[String] + + +Ipv4PrefixListResponse = List[Ipv4PrefixSpecificationResponse] + + +class InstanceIpv6Address(TypedDict, total=False): + Ipv6Address: Optional[String] + IsPrimaryIpv6: Optional[Boolean] + + +InstanceIpv6AddressList = List[InstanceIpv6Address] + + +class LaunchTemplateInstanceNetworkInterfaceSpecification(TypedDict, total=False): + AssociateCarrierIpAddress: Optional[Boolean] + AssociatePublicIpAddress: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Description: Optional[String] + DeviceIndex: Optional[Integer] + Groups: Optional[GroupIdStringList] + InterfaceType: Optional[String] + Ipv6AddressCount: Optional[Integer] + Ipv6Addresses: Optional[InstanceIpv6AddressList] + NetworkInterfaceId: Optional[NetworkInterfaceId] + PrivateIpAddress: Optional[String] + PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] + SecondaryPrivateIpAddressCount: Optional[Integer] + SubnetId: Optional[SubnetId] + NetworkCardIndex: Optional[Integer] + Ipv4Prefixes: Optional[Ipv4PrefixListResponse] + Ipv4PrefixCount: Optional[Integer] + Ipv6Prefixes: Optional[Ipv6PrefixListResponse] + Ipv6PrefixCount: Optional[Integer] + PrimaryIpv6: Optional[Boolean] + EnaSrdSpecification: Optional[LaunchTemplateEnaSrdSpecification] + ConnectionTrackingSpecification: Optional[ConnectionTrackingSpecification] + EnaQueueCount: Optional[Integer] + + +LaunchTemplateInstanceNetworkInterfaceSpecificationList = List[ + LaunchTemplateInstanceNetworkInterfaceSpecification +] + + +class LaunchTemplateEbsBlockDevice(TypedDict, total=False): + Encrypted: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Iops: Optional[Integer] + KmsKeyId: Optional[KmsKeyId] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[VolumeType] + Throughput: Optional[Integer] + VolumeInitializationRate: Optional[Integer] + + +class LaunchTemplateBlockDeviceMapping(TypedDict, total=False): + DeviceName: Optional[String] + VirtualName: Optional[String] + Ebs: Optional[LaunchTemplateEbsBlockDevice] + NoDevice: Optional[String] + + +LaunchTemplateBlockDeviceMappingList = List[LaunchTemplateBlockDeviceMapping] + + +class LaunchTemplateIamInstanceProfileSpecification(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +class ResponseLaunchTemplateData(TypedDict, total=False): + KernelId: Optional[String] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[LaunchTemplateIamInstanceProfileSpecification] + BlockDeviceMappings: Optional[LaunchTemplateBlockDeviceMappingList] + NetworkInterfaces: Optional[LaunchTemplateInstanceNetworkInterfaceSpecificationList] + ImageId: Optional[String] + InstanceType: Optional[InstanceType] + KeyName: Optional[String] + Monitoring: Optional[LaunchTemplatesMonitoring] + Placement: Optional[LaunchTemplatePlacement] + RamDiskId: Optional[String] + DisableApiTermination: Optional[Boolean] + InstanceInitiatedShutdownBehavior: Optional[ShutdownBehavior] + UserData: Optional[SensitiveUserData] + TagSpecifications: Optional[LaunchTemplateTagSpecificationList] + ElasticGpuSpecifications: Optional[ElasticGpuSpecificationResponseList] + ElasticInferenceAccelerators: Optional[LaunchTemplateElasticInferenceAcceleratorResponseList] + SecurityGroupIds: Optional[ValueStringList] + SecurityGroups: Optional[ValueStringList] + InstanceMarketOptions: Optional[LaunchTemplateInstanceMarketOptions] + CreditSpecification: Optional[CreditSpecification] + CpuOptions: Optional[LaunchTemplateCpuOptions] + CapacityReservationSpecification: Optional[ + LaunchTemplateCapacityReservationSpecificationResponse + ] + LicenseSpecifications: Optional[LaunchTemplateLicenseList] + HibernationOptions: Optional[LaunchTemplateHibernationOptions] + MetadataOptions: Optional[LaunchTemplateInstanceMetadataOptions] + EnclaveOptions: Optional[LaunchTemplateEnclaveOptions] + InstanceRequirements: Optional[InstanceRequirements] + PrivateDnsNameOptions: Optional[LaunchTemplatePrivateDnsNameOptions] + MaintenanceOptions: Optional[LaunchTemplateInstanceMaintenanceOptions] + DisableApiStop: Optional[Boolean] + Operator: Optional[OperatorResponse] + NetworkPerformanceOptions: Optional[LaunchTemplateNetworkPerformanceOptions] + + +class LaunchTemplateVersion(TypedDict, total=False): + LaunchTemplateId: Optional[String] + LaunchTemplateName: Optional[LaunchTemplateName] + VersionNumber: Optional[Long] + VersionDescription: Optional[VersionDescription] + CreateTime: Optional[DateTime] + CreatedBy: Optional[String] + DefaultVersion: Optional[Boolean] + LaunchTemplateData: Optional[ResponseLaunchTemplateData] + Operator: Optional[OperatorResponse] + + +class CreateLaunchTemplateVersionResult(TypedDict, total=False): + LaunchTemplateVersion: Optional[LaunchTemplateVersion] + Warning: Optional[ValidationWarning] + + +class CreateLocalGatewayRouteRequest(ServiceRequest): + DestinationCidrBlock: Optional[String] + LocalGatewayRouteTableId: LocalGatewayRoutetableId + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + DryRun: Optional[Boolean] + NetworkInterfaceId: Optional[NetworkInterfaceId] + DestinationPrefixListId: Optional[PrefixListResourceId] + + +class LocalGatewayRoute(TypedDict, total=False): + DestinationCidrBlock: Optional[String] + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + Type: Optional[LocalGatewayRouteType] + State: Optional[LocalGatewayRouteState] + LocalGatewayRouteTableId: Optional[LocalGatewayRoutetableId] + LocalGatewayRouteTableArn: Optional[ResourceArn] + OwnerId: Optional[String] + SubnetId: Optional[SubnetId] + CoipPoolId: Optional[CoipPoolId] + NetworkInterfaceId: Optional[NetworkInterfaceId] + DestinationPrefixListId: Optional[PrefixListResourceId] + + +class CreateLocalGatewayRouteResult(TypedDict, total=False): + Route: Optional[LocalGatewayRoute] + + +class CreateLocalGatewayRouteTableRequest(ServiceRequest): + LocalGatewayId: LocalGatewayId + Mode: Optional[LocalGatewayRouteTableMode] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class StateReason(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class LocalGatewayRouteTable(TypedDict, total=False): + LocalGatewayRouteTableId: Optional[String] + LocalGatewayRouteTableArn: Optional[ResourceArn] + LocalGatewayId: Optional[LocalGatewayId] + OutpostArn: Optional[String] + OwnerId: Optional[String] + State: Optional[String] + Tags: Optional[TagList] + Mode: Optional[LocalGatewayRouteTableMode] + StateReason: Optional[StateReason] + + +class CreateLocalGatewayRouteTableResult(TypedDict, total=False): + LocalGatewayRouteTable: Optional[LocalGatewayRouteTable] + + +class CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(ServiceRequest): + LocalGatewayRouteTableId: LocalGatewayRoutetableId + LocalGatewayVirtualInterfaceGroupId: LocalGatewayVirtualInterfaceGroupId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class LocalGatewayRouteTableVirtualInterfaceGroupAssociation(TypedDict, total=False): + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId: Optional[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId + ] + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + LocalGatewayId: Optional[String] + LocalGatewayRouteTableId: Optional[LocalGatewayId] + LocalGatewayRouteTableArn: Optional[ResourceArn] + OwnerId: Optional[String] + State: Optional[String] + Tags: Optional[TagList] + + +class CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationResult(TypedDict, total=False): + LocalGatewayRouteTableVirtualInterfaceGroupAssociation: Optional[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociation + ] + + +class CreateLocalGatewayRouteTableVpcAssociationRequest(ServiceRequest): + LocalGatewayRouteTableId: LocalGatewayRoutetableId + VpcId: VpcId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class LocalGatewayRouteTableVpcAssociation(TypedDict, total=False): + LocalGatewayRouteTableVpcAssociationId: Optional[LocalGatewayRouteTableVpcAssociationId] + LocalGatewayRouteTableId: Optional[String] + LocalGatewayRouteTableArn: Optional[ResourceArn] + LocalGatewayId: Optional[String] + VpcId: Optional[String] + OwnerId: Optional[String] + State: Optional[String] + Tags: Optional[TagList] + + +class CreateLocalGatewayRouteTableVpcAssociationResult(TypedDict, total=False): + LocalGatewayRouteTableVpcAssociation: Optional[LocalGatewayRouteTableVpcAssociation] + + +class CreateLocalGatewayVirtualInterfaceGroupRequest(ServiceRequest): + LocalGatewayId: LocalGatewayId + LocalBgpAsn: Optional[Integer] + LocalBgpAsnExtended: Optional[Long] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +LocalGatewayVirtualInterfaceIdSet = List[LocalGatewayVirtualInterfaceId] + + +class LocalGatewayVirtualInterfaceGroup(TypedDict, total=False): + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + LocalGatewayVirtualInterfaceIds: Optional[LocalGatewayVirtualInterfaceIdSet] + LocalGatewayId: Optional[String] + OwnerId: Optional[String] + LocalBgpAsn: Optional[Integer] + LocalBgpAsnExtended: Optional[Long] + LocalGatewayVirtualInterfaceGroupArn: Optional[ResourceArn] + Tags: Optional[TagList] + ConfigurationState: Optional[LocalGatewayVirtualInterfaceGroupConfigurationState] + + +class CreateLocalGatewayVirtualInterfaceGroupResult(TypedDict, total=False): + LocalGatewayVirtualInterfaceGroup: Optional[LocalGatewayVirtualInterfaceGroup] + + +class CreateLocalGatewayVirtualInterfaceRequest(ServiceRequest): + LocalGatewayVirtualInterfaceGroupId: LocalGatewayVirtualInterfaceGroupId + OutpostLagId: OutpostLagId + Vlan: Integer + LocalAddress: String + PeerAddress: String + PeerBgpAsn: Optional[Integer] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + PeerBgpAsnExtended: Optional[Long] + + +class LocalGatewayVirtualInterface(TypedDict, total=False): + LocalGatewayVirtualInterfaceId: Optional[LocalGatewayVirtualInterfaceId] + LocalGatewayId: Optional[String] + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + LocalGatewayVirtualInterfaceArn: Optional[ResourceArn] + OutpostLagId: Optional[String] + Vlan: Optional[Integer] + LocalAddress: Optional[String] + PeerAddress: Optional[String] + LocalBgpAsn: Optional[Integer] + PeerBgpAsn: Optional[Integer] + PeerBgpAsnExtended: Optional[Long] + OwnerId: Optional[String] + Tags: Optional[TagList] + ConfigurationState: Optional[LocalGatewayVirtualInterfaceConfigurationState] + + +class CreateLocalGatewayVirtualInterfaceResult(TypedDict, total=False): + LocalGatewayVirtualInterface: Optional[LocalGatewayVirtualInterface] + + +class MacSystemIntegrityProtectionConfigurationRequest(TypedDict, total=False): + AppleInternal: Optional[MacSystemIntegrityProtectionSettingStatus] + BaseSystem: Optional[MacSystemIntegrityProtectionSettingStatus] + DebuggingRestrictions: Optional[MacSystemIntegrityProtectionSettingStatus] + DTraceRestrictions: Optional[MacSystemIntegrityProtectionSettingStatus] + FilesystemProtections: Optional[MacSystemIntegrityProtectionSettingStatus] + KextSigning: Optional[MacSystemIntegrityProtectionSettingStatus] + NvramProtections: Optional[MacSystemIntegrityProtectionSettingStatus] + + +class CreateMacSystemIntegrityProtectionModificationTaskRequest(ServiceRequest): + ClientToken: Optional[String] + DryRun: Optional[Boolean] + InstanceId: InstanceId + MacCredentials: Optional[SensitiveMacCredentials] + MacSystemIntegrityProtectionConfiguration: Optional[ + MacSystemIntegrityProtectionConfigurationRequest + ] + MacSystemIntegrityProtectionStatus: MacSystemIntegrityProtectionSettingStatus + TagSpecifications: Optional[TagSpecificationList] + + +class CreateMacSystemIntegrityProtectionModificationTaskResult(TypedDict, total=False): + MacModificationTask: Optional[MacModificationTask] + + +class CreateManagedPrefixListRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListName: String + Entries: Optional[AddPrefixListEntries] + MaxEntries: Integer + TagSpecifications: Optional[TagSpecificationList] + AddressFamily: String + ClientToken: Optional[String] + + +class ManagedPrefixList(TypedDict, total=False): + PrefixListId: Optional[PrefixListResourceId] + AddressFamily: Optional[String] + State: Optional[PrefixListState] + StateMessage: Optional[String] + PrefixListArn: Optional[ResourceArn] + PrefixListName: Optional[String] + MaxEntries: Optional[Integer] + Version: Optional[Long] + Tags: Optional[TagList] + OwnerId: Optional[String] + + +class CreateManagedPrefixListResult(TypedDict, total=False): + PrefixList: Optional[ManagedPrefixList] + + +class CreateNatGatewayRequest(ServiceRequest): + AllocationId: Optional[AllocationId] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SubnetId: SubnetId + TagSpecifications: Optional[TagSpecificationList] + ConnectivityType: Optional[ConnectivityType] + PrivateIpAddress: Optional[String] + SecondaryAllocationIds: Optional[AllocationIdList] + SecondaryPrivateIpAddresses: Optional[IpList] + SecondaryPrivateIpAddressCount: Optional[PrivateIpAddressCount] + + +class ProvisionedBandwidth(TypedDict, total=False): + ProvisionTime: Optional[DateTime] + Provisioned: Optional[String] + RequestTime: Optional[DateTime] + Requested: Optional[String] + Status: Optional[String] + + +class NatGateway(TypedDict, total=False): + CreateTime: Optional[DateTime] + DeleteTime: Optional[DateTime] + FailureCode: Optional[String] + FailureMessage: Optional[String] + NatGatewayAddresses: Optional[NatGatewayAddressList] + NatGatewayId: Optional[String] + ProvisionedBandwidth: Optional[ProvisionedBandwidth] + State: Optional[NatGatewayState] + SubnetId: Optional[String] + VpcId: Optional[String] + Tags: Optional[TagList] + ConnectivityType: Optional[ConnectivityType] + + +class CreateNatGatewayResult(TypedDict, total=False): + ClientToken: Optional[String] + NatGateway: Optional[NatGateway] + + +class IcmpTypeCode(TypedDict, total=False): + Code: Optional[Integer] + Type: Optional[Integer] + + +class CreateNetworkAclEntryRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkAclId: NetworkAclId + RuleNumber: Integer + Protocol: String + RuleAction: RuleAction + Egress: Boolean + CidrBlock: Optional[String] + Ipv6CidrBlock: Optional[String] + IcmpTypeCode: Optional[IcmpTypeCode] + PortRange: Optional[PortRange] + + +class CreateNetworkAclRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + VpcId: VpcId + + +class NetworkAclEntry(TypedDict, total=False): + CidrBlock: Optional[String] + Egress: Optional[Boolean] + IcmpTypeCode: Optional[IcmpTypeCode] + Ipv6CidrBlock: Optional[String] + PortRange: Optional[PortRange] + Protocol: Optional[String] + RuleAction: Optional[RuleAction] + RuleNumber: Optional[Integer] + + +NetworkAclEntryList = List[NetworkAclEntry] + + +class NetworkAclAssociation(TypedDict, total=False): + NetworkAclAssociationId: Optional[String] + NetworkAclId: Optional[String] + SubnetId: Optional[String] + + +NetworkAclAssociationList = List[NetworkAclAssociation] + + +class NetworkAcl(TypedDict, total=False): + Associations: Optional[NetworkAclAssociationList] + Entries: Optional[NetworkAclEntryList] + IsDefault: Optional[Boolean] + NetworkAclId: Optional[String] + Tags: Optional[TagList] + VpcId: Optional[String] + OwnerId: Optional[String] + + +class CreateNetworkAclResult(TypedDict, total=False): + NetworkAcl: Optional[NetworkAcl] + ClientToken: Optional[String] + + +class CreateNetworkInsightsAccessScopeRequest(ServiceRequest): + MatchPaths: Optional[AccessScopePathListRequest] + ExcludePaths: Optional[AccessScopePathListRequest] + ClientToken: String + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class NetworkInsightsAccessScopeContent(TypedDict, total=False): + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + MatchPaths: Optional[AccessScopePathList] + ExcludePaths: Optional[AccessScopePathList] + + +class NetworkInsightsAccessScope(TypedDict, total=False): + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + NetworkInsightsAccessScopeArn: Optional[ResourceArn] + CreatedDate: Optional[MillisecondDateTime] + UpdatedDate: Optional[MillisecondDateTime] + Tags: Optional[TagList] + + +class CreateNetworkInsightsAccessScopeResult(TypedDict, total=False): + NetworkInsightsAccessScope: Optional[NetworkInsightsAccessScope] + NetworkInsightsAccessScopeContent: Optional[NetworkInsightsAccessScopeContent] + + +class RequestFilterPortRange(TypedDict, total=False): + FromPort: Optional[Port] + ToPort: Optional[Port] + + +class PathRequestFilter(TypedDict, total=False): + SourceAddress: Optional[IpAddress] + SourcePortRange: Optional[RequestFilterPortRange] + DestinationAddress: Optional[IpAddress] + DestinationPortRange: Optional[RequestFilterPortRange] + + +class CreateNetworkInsightsPathRequest(ServiceRequest): + SourceIp: Optional[IpAddress] + DestinationIp: Optional[IpAddress] + Source: NetworkInsightsResourceId + Destination: Optional[NetworkInsightsResourceId] + Protocol: Protocol + DestinationPort: Optional[Port] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + ClientToken: String + FilterAtSource: Optional[PathRequestFilter] + FilterAtDestination: Optional[PathRequestFilter] + + +class FilterPortRange(TypedDict, total=False): + FromPort: Optional[Port] + ToPort: Optional[Port] + + +class PathFilter(TypedDict, total=False): + SourceAddress: Optional[IpAddress] + SourcePortRange: Optional[FilterPortRange] + DestinationAddress: Optional[IpAddress] + DestinationPortRange: Optional[FilterPortRange] + + +class NetworkInsightsPath(TypedDict, total=False): + NetworkInsightsPathId: Optional[NetworkInsightsPathId] + NetworkInsightsPathArn: Optional[ResourceArn] + CreatedDate: Optional[MillisecondDateTime] + Source: Optional[String] + Destination: Optional[String] + SourceArn: Optional[ResourceArn] + DestinationArn: Optional[ResourceArn] + SourceIp: Optional[IpAddress] + DestinationIp: Optional[IpAddress] + Protocol: Optional[Protocol] + DestinationPort: Optional[Integer] + Tags: Optional[TagList] + FilterAtSource: Optional[PathFilter] + FilterAtDestination: Optional[PathFilter] + + +class CreateNetworkInsightsPathResult(TypedDict, total=False): + NetworkInsightsPath: Optional[NetworkInsightsPath] + + +class CreateNetworkInterfacePermissionRequest(ServiceRequest): + NetworkInterfaceId: NetworkInterfaceId + AwsAccountId: Optional[String] + AwsService: Optional[String] + Permission: InterfacePermissionType + DryRun: Optional[Boolean] + + +class NetworkInterfacePermissionState(TypedDict, total=False): + State: Optional[NetworkInterfacePermissionStateCode] + StatusMessage: Optional[String] + + +class NetworkInterfacePermission(TypedDict, total=False): + NetworkInterfacePermissionId: Optional[String] + NetworkInterfaceId: Optional[String] + AwsAccountId: Optional[String] + AwsService: Optional[String] + Permission: Optional[InterfacePermissionType] + PermissionState: Optional[NetworkInterfacePermissionState] + + +class CreateNetworkInterfacePermissionResult(TypedDict, total=False): + InterfacePermission: Optional[NetworkInterfacePermission] + + +class CreateNetworkInterfaceRequest(ServiceRequest): + Ipv4Prefixes: Optional[Ipv4PrefixList] + Ipv4PrefixCount: Optional[Integer] + Ipv6Prefixes: Optional[Ipv6PrefixList] + Ipv6PrefixCount: Optional[Integer] + InterfaceType: Optional[NetworkInterfaceCreationType] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + EnablePrimaryIpv6: Optional[Boolean] + ConnectionTrackingSpecification: Optional[ConnectionTrackingSpecificationRequest] + Operator: Optional[OperatorRequest] + SubnetId: SubnetId + Description: Optional[String] + PrivateIpAddress: Optional[String] + Groups: Optional[SecurityGroupIdStringList] + PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] + SecondaryPrivateIpAddressCount: Optional[Integer] + Ipv6Addresses: Optional[InstanceIpv6AddressList] + Ipv6AddressCount: Optional[Integer] + DryRun: Optional[Boolean] + + +class Ipv6PrefixSpecification(TypedDict, total=False): + Ipv6Prefix: Optional[String] + + +Ipv6PrefixesList = List[Ipv6PrefixSpecification] + + +class NetworkInterfaceAssociation(TypedDict, total=False): + AllocationId: Optional[String] + AssociationId: Optional[String] + IpOwnerId: Optional[String] + PublicDnsName: Optional[String] + PublicIp: Optional[String] + CustomerOwnedIp: Optional[String] + CarrierIp: Optional[String] + + +class NetworkInterfacePrivateIpAddress(TypedDict, total=False): + Association: Optional[NetworkInterfaceAssociation] + Primary: Optional[Boolean] + PrivateDnsName: Optional[String] + PrivateIpAddress: Optional[String] + + +NetworkInterfacePrivateIpAddressList = List[NetworkInterfacePrivateIpAddress] + + +class PublicIpDnsNameOptions(TypedDict, total=False): + DnsHostnameType: Optional[String] + PublicIpv4DnsName: Optional[String] + PublicIpv6DnsName: Optional[String] + PublicDualStackDnsName: Optional[String] + + +class NetworkInterfaceIpv6Address(TypedDict, total=False): + Ipv6Address: Optional[String] + PublicIpv6DnsName: Optional[String] + IsPrimaryIpv6: Optional[Boolean] + + +NetworkInterfaceIpv6AddressesList = List[NetworkInterfaceIpv6Address] + + +class NetworkInterfaceAttachment(TypedDict, total=False): + AttachTime: Optional[DateTime] + AttachmentId: Optional[String] + DeleteOnTermination: Optional[Boolean] + DeviceIndex: Optional[Integer] + NetworkCardIndex: Optional[Integer] + InstanceId: Optional[String] + InstanceOwnerId: Optional[String] + Status: Optional[AttachmentStatus] + EnaSrdSpecification: Optional[AttachmentEnaSrdSpecification] + EnaQueueCount: Optional[Integer] + + +class NetworkInterface(TypedDict, total=False): + Association: Optional[NetworkInterfaceAssociation] + Attachment: Optional[NetworkInterfaceAttachment] + AvailabilityZone: Optional[String] + ConnectionTrackingConfiguration: Optional[ConnectionTrackingConfiguration] + Description: Optional[String] + Groups: Optional[GroupIdentifierList] + InterfaceType: Optional[NetworkInterfaceType] + Ipv6Addresses: Optional[NetworkInterfaceIpv6AddressesList] + MacAddress: Optional[String] + NetworkInterfaceId: Optional[String] + OutpostArn: Optional[String] + OwnerId: Optional[String] + PrivateDnsName: Optional[String] + PublicDnsName: Optional[String] + PublicIpDnsNameOptions: Optional[PublicIpDnsNameOptions] + PrivateIpAddress: Optional[String] + PrivateIpAddresses: Optional[NetworkInterfacePrivateIpAddressList] + Ipv4Prefixes: Optional[Ipv4PrefixesList] + Ipv6Prefixes: Optional[Ipv6PrefixesList] + RequesterId: Optional[String] + RequesterManaged: Optional[Boolean] + SourceDestCheck: Optional[Boolean] + Status: Optional[NetworkInterfaceStatus] + SubnetId: Optional[String] + TagSet: Optional[TagList] + VpcId: Optional[String] + DenyAllIgwTraffic: Optional[Boolean] + Ipv6Native: Optional[Boolean] + Ipv6Address: Optional[String] + Operator: Optional[OperatorResponse] + + +class CreateNetworkInterfaceResult(TypedDict, total=False): + NetworkInterface: Optional[NetworkInterface] + ClientToken: Optional[String] + + +class CreatePlacementGroupRequest(ServiceRequest): + PartitionCount: Optional[Integer] + TagSpecifications: Optional[TagSpecificationList] + SpreadLevel: Optional[SpreadLevel] + DryRun: Optional[Boolean] + GroupName: Optional[String] + Strategy: Optional[PlacementStrategy] + + +class PlacementGroup(TypedDict, total=False): + GroupName: Optional[String] + State: Optional[PlacementGroupState] + Strategy: Optional[PlacementStrategy] + PartitionCount: Optional[Integer] + GroupId: Optional[String] + Tags: Optional[TagList] + GroupArn: Optional[String] + SpreadLevel: Optional[SpreadLevel] + + +class CreatePlacementGroupResult(TypedDict, total=False): + PlacementGroup: Optional[PlacementGroup] + + +class CreatePublicIpv4PoolRequest(ServiceRequest): + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + NetworkBorderGroup: Optional[String] + + +class CreatePublicIpv4PoolResult(TypedDict, total=False): + PoolId: Optional[Ipv4PoolEc2Id] + + +class CreateReplaceRootVolumeTaskRequest(ServiceRequest): + InstanceId: InstanceId + SnapshotId: Optional[SnapshotId] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + ImageId: Optional[ImageId] + DeleteReplacedRootVolume: Optional[Boolean] + VolumeInitializationRate: Optional[Long] + + +class ReplaceRootVolumeTask(TypedDict, total=False): + ReplaceRootVolumeTaskId: Optional[ReplaceRootVolumeTaskId] + InstanceId: Optional[String] + TaskState: Optional[ReplaceRootVolumeTaskState] + StartTime: Optional[String] + CompleteTime: Optional[String] + Tags: Optional[TagList] + ImageId: Optional[ImageId] + SnapshotId: Optional[SnapshotId] + DeleteReplacedRootVolume: Optional[Boolean] + + +class CreateReplaceRootVolumeTaskResult(TypedDict, total=False): + ReplaceRootVolumeTask: Optional[ReplaceRootVolumeTask] + + +class PriceScheduleSpecification(TypedDict, total=False): + Term: Optional[Long] + Price: Optional[Double] + CurrencyCode: Optional[CurrencyCodeValues] + + +PriceScheduleSpecificationList = List[PriceScheduleSpecification] + + +class CreateReservedInstancesListingRequest(ServiceRequest): + ReservedInstancesId: ReservationId + InstanceCount: Integer + PriceSchedules: PriceScheduleSpecificationList + ClientToken: String + + +class CreateReservedInstancesListingResult(TypedDict, total=False): + ReservedInstancesListings: Optional[ReservedInstancesListingList] + + +class CreateRestoreImageTaskRequest(ServiceRequest): + Bucket: String + ObjectKey: String + Name: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateRestoreImageTaskResult(TypedDict, total=False): + ImageId: Optional[String] + + +class CreateRouteRequest(ServiceRequest): + DestinationPrefixListId: Optional[PrefixListResourceId] + VpcEndpointId: Optional[VpcEndpointId] + TransitGatewayId: Optional[TransitGatewayId] + LocalGatewayId: Optional[LocalGatewayId] + CarrierGatewayId: Optional[CarrierGatewayId] + CoreNetworkArn: Optional[CoreNetworkArn] + DryRun: Optional[Boolean] + RouteTableId: RouteTableId + DestinationCidrBlock: Optional[String] + GatewayId: Optional[RouteGatewayId] + DestinationIpv6CidrBlock: Optional[String] + EgressOnlyInternetGatewayId: Optional[EgressOnlyInternetGatewayId] + InstanceId: Optional[InstanceId] + NetworkInterfaceId: Optional[NetworkInterfaceId] + VpcPeeringConnectionId: Optional[VpcPeeringConnectionId] + NatGatewayId: Optional[NatGatewayId] + + +class CreateRouteResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class CreateRouteServerEndpointRequest(ServiceRequest): + RouteServerId: RouteServerId + SubnetId: SubnetId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + + +class RouteServerEndpoint(TypedDict, total=False): + RouteServerId: Optional[RouteServerId] + RouteServerEndpointId: Optional[RouteServerEndpointId] + VpcId: Optional[VpcId] + SubnetId: Optional[SubnetId] + EniId: Optional[NetworkInterfaceId] + EniAddress: Optional[String] + State: Optional[RouteServerEndpointState] + FailureReason: Optional[String] + Tags: Optional[TagList] + + +class CreateRouteServerEndpointResult(TypedDict, total=False): + RouteServerEndpoint: Optional[RouteServerEndpoint] + + +class RouteServerBgpOptionsRequest(TypedDict, total=False): + PeerAsn: Long + PeerLivenessDetection: Optional[RouteServerPeerLivenessMode] + + +class CreateRouteServerPeerRequest(ServiceRequest): + RouteServerEndpointId: RouteServerEndpointId + PeerAddress: String + BgpOptions: RouteServerBgpOptionsRequest + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + + +class RouteServerBfdStatus(TypedDict, total=False): + Status: Optional[RouteServerBfdState] + + +class RouteServerBgpStatus(TypedDict, total=False): + Status: Optional[RouteServerBgpState] + + +class RouteServerBgpOptions(TypedDict, total=False): + PeerAsn: Optional[Long] + PeerLivenessDetection: Optional[RouteServerPeerLivenessMode] + + +class RouteServerPeer(TypedDict, total=False): + RouteServerPeerId: Optional[RouteServerPeerId] + RouteServerEndpointId: Optional[RouteServerEndpointId] + RouteServerId: Optional[RouteServerId] + VpcId: Optional[VpcId] + SubnetId: Optional[SubnetId] + State: Optional[RouteServerPeerState] + FailureReason: Optional[String] + EndpointEniId: Optional[NetworkInterfaceId] + EndpointEniAddress: Optional[String] + PeerAddress: Optional[String] + BgpOptions: Optional[RouteServerBgpOptions] + BgpStatus: Optional[RouteServerBgpStatus] + BfdStatus: Optional[RouteServerBfdStatus] + Tags: Optional[TagList] + + +class CreateRouteServerPeerResult(TypedDict, total=False): + RouteServerPeer: Optional[RouteServerPeer] + + +class CreateRouteServerRequest(ServiceRequest): + AmazonSideAsn: Long + ClientToken: Optional[String] + DryRun: Optional[Boolean] + PersistRoutes: Optional[RouteServerPersistRoutesAction] + PersistRoutesDuration: Optional[BoxedLong] + SnsNotificationsEnabled: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + + +class RouteServer(TypedDict, total=False): + RouteServerId: Optional[RouteServerId] + AmazonSideAsn: Optional[Long] + State: Optional[RouteServerState] + Tags: Optional[TagList] + PersistRoutesState: Optional[RouteServerPersistRoutesState] + PersistRoutesDuration: Optional[BoxedLong] + SnsNotificationsEnabled: Optional[Boolean] + SnsTopicArn: Optional[String] + + +class CreateRouteServerResult(TypedDict, total=False): + RouteServer: Optional[RouteServer] + + +class CreateRouteTableRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + VpcId: VpcId + + +class Route(TypedDict, total=False): + DestinationCidrBlock: Optional[String] + DestinationIpv6CidrBlock: Optional[String] + DestinationPrefixListId: Optional[String] + EgressOnlyInternetGatewayId: Optional[String] + GatewayId: Optional[String] + InstanceId: Optional[String] + InstanceOwnerId: Optional[String] + NatGatewayId: Optional[String] + TransitGatewayId: Optional[String] + LocalGatewayId: Optional[String] + CarrierGatewayId: Optional[CarrierGatewayId] + NetworkInterfaceId: Optional[String] + Origin: Optional[RouteOrigin] + State: Optional[RouteState] + VpcPeeringConnectionId: Optional[String] + CoreNetworkArn: Optional[CoreNetworkArn] + + +RouteList = List[Route] + + +class PropagatingVgw(TypedDict, total=False): + GatewayId: Optional[String] + + +PropagatingVgwList = List[PropagatingVgw] + + +class RouteTableAssociation(TypedDict, total=False): + Main: Optional[Boolean] + RouteTableAssociationId: Optional[String] + RouteTableId: Optional[String] + SubnetId: Optional[String] + GatewayId: Optional[String] + AssociationState: Optional[RouteTableAssociationState] + + +RouteTableAssociationList = List[RouteTableAssociation] + + +class RouteTable(TypedDict, total=False): + Associations: Optional[RouteTableAssociationList] + PropagatingVgws: Optional[PropagatingVgwList] + RouteTableId: Optional[String] + Routes: Optional[RouteList] + Tags: Optional[TagList] + VpcId: Optional[String] + OwnerId: Optional[String] + + +class CreateRouteTableResult(TypedDict, total=False): + RouteTable: Optional[RouteTable] + ClientToken: Optional[String] + + +class CreateSecurityGroupRequest(ServiceRequest): + Description: String + GroupName: String + VpcId: Optional[VpcId] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateSecurityGroupResult(TypedDict, total=False): + GroupId: Optional[String] + Tags: Optional[TagList] + SecurityGroupArn: Optional[String] + + +class CreateSnapshotRequest(ServiceRequest): + Description: Optional[String] + OutpostArn: Optional[String] + VolumeId: VolumeId + TagSpecifications: Optional[TagSpecificationList] + Location: Optional[SnapshotLocationEnum] + DryRun: Optional[Boolean] + + +VolumeIdStringList = List[VolumeId] + + +class InstanceSpecification(TypedDict, total=False): + InstanceId: InstanceIdWithVolumeResolver + ExcludeBootVolume: Optional[Boolean] + ExcludeDataVolumeIds: Optional[VolumeIdStringList] + + +class CreateSnapshotsRequest(ServiceRequest): + Description: Optional[String] + InstanceSpecification: InstanceSpecification + OutpostArn: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + CopyTagsFromSource: Optional[CopyTagsFromSource] + Location: Optional[SnapshotLocationEnum] + + +class SnapshotInfo(TypedDict, total=False): + Description: Optional[String] + Tags: Optional[TagList] + Encrypted: Optional[Boolean] + VolumeId: Optional[String] + State: Optional[SnapshotState] + VolumeSize: Optional[Integer] + StartTime: Optional[MillisecondDateTime] + Progress: Optional[String] + OwnerId: Optional[String] + SnapshotId: Optional[String] + OutpostArn: Optional[String] + SseType: Optional[SSEType] + AvailabilityZone: Optional[String] + + +SnapshotSet = List[SnapshotInfo] + + +class CreateSnapshotsResult(TypedDict, total=False): + Snapshots: Optional[SnapshotSet] + + +class CreateSpotDatafeedSubscriptionRequest(ServiceRequest): + DryRun: Optional[Boolean] + Bucket: String + Prefix: Optional[String] + + +class SpotInstanceStateFault(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class SpotDatafeedSubscription(TypedDict, total=False): + Bucket: Optional[String] + Fault: Optional[SpotInstanceStateFault] + OwnerId: Optional[String] + Prefix: Optional[String] + State: Optional[DatafeedSubscriptionState] + + +class CreateSpotDatafeedSubscriptionResult(TypedDict, total=False): + SpotDatafeedSubscription: Optional[SpotDatafeedSubscription] + + +class S3ObjectTag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +S3ObjectTagList = List[S3ObjectTag] + + +class CreateStoreImageTaskRequest(ServiceRequest): + ImageId: ImageId + Bucket: String + S3ObjectTags: Optional[S3ObjectTagList] + DryRun: Optional[Boolean] + + +class CreateStoreImageTaskResult(TypedDict, total=False): + ObjectKey: Optional[String] + + +class CreateSubnetCidrReservationRequest(ServiceRequest): + SubnetId: SubnetId + Cidr: String + ReservationType: SubnetCidrReservationType + Description: Optional[String] + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + + +class SubnetCidrReservation(TypedDict, total=False): + SubnetCidrReservationId: Optional[SubnetCidrReservationId] + SubnetId: Optional[SubnetId] + Cidr: Optional[String] + ReservationType: Optional[SubnetCidrReservationType] + OwnerId: Optional[String] + Description: Optional[String] + Tags: Optional[TagList] + + +class CreateSubnetCidrReservationResult(TypedDict, total=False): + SubnetCidrReservation: Optional[SubnetCidrReservation] + + +class CreateSubnetRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + AvailabilityZone: Optional[String] + AvailabilityZoneId: Optional[String] + CidrBlock: Optional[String] + Ipv6CidrBlock: Optional[String] + OutpostArn: Optional[String] + VpcId: VpcId + Ipv6Native: Optional[Boolean] + Ipv4IpamPoolId: Optional[IpamPoolId] + Ipv4NetmaskLength: Optional[NetmaskLength] + Ipv6IpamPoolId: Optional[IpamPoolId] + Ipv6NetmaskLength: Optional[NetmaskLength] + DryRun: Optional[Boolean] + + +class CreateSubnetResult(TypedDict, total=False): + Subnet: Optional[Subnet] + + +ResourceIdList = List[TaggableResourceId] + + +class CreateTagsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Resources: ResourceIdList + Tags: TagList + + +class CreateTrafficMirrorFilterRequest(ServiceRequest): + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +TrafficMirrorNetworkServiceList = List[TrafficMirrorNetworkService] + + +class TrafficMirrorPortRange(TypedDict, total=False): + FromPort: Optional[Integer] + ToPort: Optional[Integer] + + +class TrafficMirrorFilterRule(TypedDict, total=False): + TrafficMirrorFilterRuleId: Optional[String] + TrafficMirrorFilterId: Optional[String] + TrafficDirection: Optional[TrafficDirection] + RuleNumber: Optional[Integer] + RuleAction: Optional[TrafficMirrorRuleAction] + Protocol: Optional[Integer] + DestinationPortRange: Optional[TrafficMirrorPortRange] + SourcePortRange: Optional[TrafficMirrorPortRange] + DestinationCidrBlock: Optional[String] + SourceCidrBlock: Optional[String] + Description: Optional[String] + Tags: Optional[TagList] + + +TrafficMirrorFilterRuleList = List[TrafficMirrorFilterRule] + + +class TrafficMirrorFilter(TypedDict, total=False): + TrafficMirrorFilterId: Optional[String] + IngressFilterRules: Optional[TrafficMirrorFilterRuleList] + EgressFilterRules: Optional[TrafficMirrorFilterRuleList] + NetworkServices: Optional[TrafficMirrorNetworkServiceList] + Description: Optional[String] + Tags: Optional[TagList] + + +class CreateTrafficMirrorFilterResult(TypedDict, total=False): + TrafficMirrorFilter: Optional[TrafficMirrorFilter] + ClientToken: Optional[String] + + +class TrafficMirrorPortRangeRequest(TypedDict, total=False): + FromPort: Optional[Integer] + ToPort: Optional[Integer] + + +class CreateTrafficMirrorFilterRuleRequest(ServiceRequest): + TrafficMirrorFilterId: TrafficMirrorFilterId + TrafficDirection: TrafficDirection + RuleNumber: Integer + RuleAction: TrafficMirrorRuleAction + DestinationPortRange: Optional[TrafficMirrorPortRangeRequest] + SourcePortRange: Optional[TrafficMirrorPortRangeRequest] + Protocol: Optional[Integer] + DestinationCidrBlock: String + SourceCidrBlock: String + Description: Optional[String] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +class CreateTrafficMirrorFilterRuleResult(TypedDict, total=False): + TrafficMirrorFilterRule: Optional[TrafficMirrorFilterRule] + ClientToken: Optional[String] + + +class CreateTrafficMirrorSessionRequest(ServiceRequest): + NetworkInterfaceId: NetworkInterfaceId + TrafficMirrorTargetId: TrafficMirrorTargetId + TrafficMirrorFilterId: TrafficMirrorFilterId + PacketLength: Optional[Integer] + SessionNumber: Integer + VirtualNetworkId: Optional[Integer] + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +class TrafficMirrorSession(TypedDict, total=False): + TrafficMirrorSessionId: Optional[String] + TrafficMirrorTargetId: Optional[String] + TrafficMirrorFilterId: Optional[String] + NetworkInterfaceId: Optional[String] + OwnerId: Optional[String] + PacketLength: Optional[Integer] + SessionNumber: Optional[Integer] + VirtualNetworkId: Optional[Integer] + Description: Optional[String] + Tags: Optional[TagList] + + +class CreateTrafficMirrorSessionResult(TypedDict, total=False): + TrafficMirrorSession: Optional[TrafficMirrorSession] + ClientToken: Optional[String] + + +class CreateTrafficMirrorTargetRequest(ServiceRequest): + NetworkInterfaceId: Optional[NetworkInterfaceId] + NetworkLoadBalancerArn: Optional[String] + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + GatewayLoadBalancerEndpointId: Optional[VpcEndpointId] + + +class TrafficMirrorTarget(TypedDict, total=False): + TrafficMirrorTargetId: Optional[String] + NetworkInterfaceId: Optional[String] + NetworkLoadBalancerArn: Optional[String] + Type: Optional[TrafficMirrorTargetType] + Description: Optional[String] + OwnerId: Optional[String] + Tags: Optional[TagList] + GatewayLoadBalancerEndpointId: Optional[String] + + +class CreateTrafficMirrorTargetResult(TypedDict, total=False): + TrafficMirrorTarget: Optional[TrafficMirrorTarget] + ClientToken: Optional[String] + + +InsideCidrBlocksStringList = List[String] + + +class TransitGatewayConnectRequestBgpOptions(TypedDict, total=False): + PeerAsn: Optional[Long] + + +class CreateTransitGatewayConnectPeerRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + TransitGatewayAddress: Optional[String] + PeerAddress: String + BgpOptions: Optional[TransitGatewayConnectRequestBgpOptions] + InsideCidrBlocks: InsideCidrBlocksStringList + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayAttachmentBgpConfiguration(TypedDict, total=False): + TransitGatewayAsn: Optional[Long] + PeerAsn: Optional[Long] + TransitGatewayAddress: Optional[String] + PeerAddress: Optional[String] + BgpStatus: Optional[BgpStatus] + + +TransitGatewayAttachmentBgpConfigurationList = List[TransitGatewayAttachmentBgpConfiguration] + + +class TransitGatewayConnectPeerConfiguration(TypedDict, total=False): + TransitGatewayAddress: Optional[String] + PeerAddress: Optional[String] + InsideCidrBlocks: Optional[InsideCidrBlocksStringList] + Protocol: Optional[ProtocolValue] + BgpConfigurations: Optional[TransitGatewayAttachmentBgpConfigurationList] + + +class TransitGatewayConnectPeer(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + TransitGatewayConnectPeerId: Optional[TransitGatewayConnectPeerId] + State: Optional[TransitGatewayConnectPeerState] + CreationTime: Optional[DateTime] + ConnectPeerConfiguration: Optional[TransitGatewayConnectPeerConfiguration] + Tags: Optional[TagList] + + +class CreateTransitGatewayConnectPeerResult(TypedDict, total=False): + TransitGatewayConnectPeer: Optional[TransitGatewayConnectPeer] + + +class CreateTransitGatewayConnectRequestOptions(TypedDict, total=False): + Protocol: ProtocolValue + + +class CreateTransitGatewayConnectRequest(ServiceRequest): + TransportTransitGatewayAttachmentId: TransitGatewayAttachmentId + Options: CreateTransitGatewayConnectRequestOptions + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayConnectOptions(TypedDict, total=False): + Protocol: Optional[ProtocolValue] + + +class TransitGatewayConnect(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + TransportTransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + TransitGatewayId: Optional[TransitGatewayId] + State: Optional[TransitGatewayAttachmentState] + CreationTime: Optional[DateTime] + Options: Optional[TransitGatewayConnectOptions] + Tags: Optional[TagList] + + +class CreateTransitGatewayConnectResult(TypedDict, total=False): + TransitGatewayConnect: Optional[TransitGatewayConnect] + + +class CreateTransitGatewayMulticastDomainRequestOptions(TypedDict, total=False): + Igmpv2Support: Optional[Igmpv2SupportValue] + StaticSourcesSupport: Optional[StaticSourcesSupportValue] + AutoAcceptSharedAssociations: Optional[AutoAcceptSharedAssociationsValue] + + +class CreateTransitGatewayMulticastDomainRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + Options: Optional[CreateTransitGatewayMulticastDomainRequestOptions] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastDomainOptions(TypedDict, total=False): + Igmpv2Support: Optional[Igmpv2SupportValue] + StaticSourcesSupport: Optional[StaticSourcesSupportValue] + AutoAcceptSharedAssociations: Optional[AutoAcceptSharedAssociationsValue] + + +class TransitGatewayMulticastDomain(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + TransitGatewayId: Optional[String] + TransitGatewayMulticastDomainArn: Optional[String] + OwnerId: Optional[String] + Options: Optional[TransitGatewayMulticastDomainOptions] + State: Optional[TransitGatewayMulticastDomainState] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +class CreateTransitGatewayMulticastDomainResult(TypedDict, total=False): + TransitGatewayMulticastDomain: Optional[TransitGatewayMulticastDomain] + + +class CreateTransitGatewayPeeringAttachmentRequestOptions(TypedDict, total=False): + DynamicRouting: Optional[DynamicRoutingValue] + + +class CreateTransitGatewayPeeringAttachmentRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + PeerTransitGatewayId: TransitAssociationGatewayId + PeerAccountId: String + PeerRegion: String + Options: Optional[CreateTransitGatewayPeeringAttachmentRequestOptions] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateTransitGatewayPeeringAttachmentResult(TypedDict, total=False): + TransitGatewayPeeringAttachment: Optional[TransitGatewayPeeringAttachment] + + +class CreateTransitGatewayPolicyTableRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayPolicyTable(TypedDict, total=False): + TransitGatewayPolicyTableId: Optional[TransitGatewayPolicyTableId] + TransitGatewayId: Optional[TransitGatewayId] + State: Optional[TransitGatewayPolicyTableState] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +class CreateTransitGatewayPolicyTableResult(TypedDict, total=False): + TransitGatewayPolicyTable: Optional[TransitGatewayPolicyTable] + + +class CreateTransitGatewayPrefixListReferenceRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + PrefixListId: PrefixListResourceId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + Blackhole: Optional[Boolean] + DryRun: Optional[Boolean] + + +class TransitGatewayPrefixListAttachment(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + ResourceId: Optional[String] + + +class TransitGatewayPrefixListReference(TypedDict, total=False): + TransitGatewayRouteTableId: Optional[TransitGatewayRouteTableId] + PrefixListId: Optional[PrefixListResourceId] + PrefixListOwnerId: Optional[String] + State: Optional[TransitGatewayPrefixListReferenceState] + Blackhole: Optional[Boolean] + TransitGatewayAttachment: Optional[TransitGatewayPrefixListAttachment] + + +class CreateTransitGatewayPrefixListReferenceResult(TypedDict, total=False): + TransitGatewayPrefixListReference: Optional[TransitGatewayPrefixListReference] + + +TransitGatewayCidrBlockStringList = List[String] + + +class TransitGatewayRequestOptions(TypedDict, total=False): + AmazonSideAsn: Optional[Long] + AutoAcceptSharedAttachments: Optional[AutoAcceptSharedAttachmentsValue] + DefaultRouteTableAssociation: Optional[DefaultRouteTableAssociationValue] + DefaultRouteTablePropagation: Optional[DefaultRouteTablePropagationValue] + VpnEcmpSupport: Optional[VpnEcmpSupportValue] + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + MulticastSupport: Optional[MulticastSupportValue] + TransitGatewayCidrBlocks: Optional[TransitGatewayCidrBlockStringList] + + +class CreateTransitGatewayRequest(ServiceRequest): + Description: Optional[String] + Options: Optional[TransitGatewayRequestOptions] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayOptions(TypedDict, total=False): + AmazonSideAsn: Optional[Long] + TransitGatewayCidrBlocks: Optional[ValueStringList] + AutoAcceptSharedAttachments: Optional[AutoAcceptSharedAttachmentsValue] + DefaultRouteTableAssociation: Optional[DefaultRouteTableAssociationValue] + AssociationDefaultRouteTableId: Optional[String] + DefaultRouteTablePropagation: Optional[DefaultRouteTablePropagationValue] + PropagationDefaultRouteTableId: Optional[String] + VpnEcmpSupport: Optional[VpnEcmpSupportValue] + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + MulticastSupport: Optional[MulticastSupportValue] + + +class TransitGateway(TypedDict, total=False): + TransitGatewayId: Optional[String] + TransitGatewayArn: Optional[String] + State: Optional[TransitGatewayState] + OwnerId: Optional[String] + Description: Optional[String] + CreationTime: Optional[DateTime] + Options: Optional[TransitGatewayOptions] + Tags: Optional[TagList] + + +class CreateTransitGatewayResult(TypedDict, total=False): + TransitGateway: Optional[TransitGateway] + + +class CreateTransitGatewayRouteRequest(ServiceRequest): + DestinationCidrBlock: String + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + Blackhole: Optional[Boolean] + DryRun: Optional[Boolean] + + +class TransitGatewayRouteAttachment(TypedDict, total=False): + ResourceId: Optional[String] + TransitGatewayAttachmentId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + + +TransitGatewayRouteAttachmentList = List[TransitGatewayRouteAttachment] + + +class TransitGatewayRoute(TypedDict, total=False): + DestinationCidrBlock: Optional[String] + PrefixListId: Optional[PrefixListResourceId] + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + TransitGatewayAttachments: Optional[TransitGatewayRouteAttachmentList] + Type: Optional[TransitGatewayRouteType] + State: Optional[TransitGatewayRouteState] + + +class CreateTransitGatewayRouteResult(TypedDict, total=False): + Route: Optional[TransitGatewayRoute] + + +class CreateTransitGatewayRouteTableAnnouncementRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + PeeringAttachmentId: TransitGatewayAttachmentId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayRouteTableAnnouncement(TypedDict, total=False): + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + TransitGatewayId: Optional[TransitGatewayId] + CoreNetworkId: Optional[String] + PeerTransitGatewayId: Optional[TransitGatewayId] + PeerCoreNetworkId: Optional[String] + PeeringAttachmentId: Optional[TransitGatewayAttachmentId] + AnnouncementDirection: Optional[TransitGatewayRouteTableAnnouncementDirection] + TransitGatewayRouteTableId: Optional[TransitGatewayRouteTableId] + State: Optional[TransitGatewayRouteTableAnnouncementState] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +class CreateTransitGatewayRouteTableAnnouncementResult(TypedDict, total=False): + TransitGatewayRouteTableAnnouncement: Optional[TransitGatewayRouteTableAnnouncement] + + +class CreateTransitGatewayRouteTableRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class TransitGatewayRouteTable(TypedDict, total=False): + TransitGatewayRouteTableId: Optional[String] + TransitGatewayId: Optional[String] + State: Optional[TransitGatewayRouteTableState] + DefaultAssociationRouteTable: Optional[Boolean] + DefaultPropagationRouteTable: Optional[Boolean] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +class CreateTransitGatewayRouteTableResult(TypedDict, total=False): + TransitGatewayRouteTable: Optional[TransitGatewayRouteTable] + + +class CreateTransitGatewayVpcAttachmentRequestOptions(TypedDict, total=False): + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + Ipv6Support: Optional[Ipv6SupportValue] + ApplianceModeSupport: Optional[ApplianceModeSupportValue] + + +class CreateTransitGatewayVpcAttachmentRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + VpcId: VpcId + SubnetIds: TransitGatewaySubnetIdList + Options: Optional[CreateTransitGatewayVpcAttachmentRequestOptions] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + + +class CreateTransitGatewayVpcAttachmentResult(TypedDict, total=False): + TransitGatewayVpcAttachment: Optional[TransitGatewayVpcAttachment] + + +class CreateVerifiedAccessEndpointPortRange(TypedDict, total=False): + FromPort: Optional[VerifiedAccessEndpointPortNumber] + ToPort: Optional[VerifiedAccessEndpointPortNumber] + + +CreateVerifiedAccessEndpointPortRangeList = List[CreateVerifiedAccessEndpointPortRange] +CreateVerifiedAccessEndpointSubnetIdList = List[SubnetId] + + +class CreateVerifiedAccessEndpointCidrOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + SubnetIds: Optional[CreateVerifiedAccessEndpointSubnetIdList] + Cidr: Optional[String] + PortRanges: Optional[CreateVerifiedAccessEndpointPortRangeList] + + +class CreateVerifiedAccessEndpointEniOptions(TypedDict, total=False): + NetworkInterfaceId: Optional[NetworkInterfaceId] + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + PortRanges: Optional[CreateVerifiedAccessEndpointPortRangeList] + + +class CreateVerifiedAccessEndpointLoadBalancerOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + LoadBalancerArn: Optional[LoadBalancerArn] + SubnetIds: Optional[CreateVerifiedAccessEndpointSubnetIdList] + PortRanges: Optional[CreateVerifiedAccessEndpointPortRangeList] + + +class CreateVerifiedAccessEndpointRdsOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + RdsDbInstanceArn: Optional[RdsDbInstanceArn] + RdsDbClusterArn: Optional[RdsDbClusterArn] + RdsDbProxyArn: Optional[RdsDbProxyArn] + RdsEndpoint: Optional[String] + SubnetIds: Optional[CreateVerifiedAccessEndpointSubnetIdList] + + +class VerifiedAccessSseSpecificationRequest(TypedDict, total=False): + CustomerManagedKeyEnabled: Optional[Boolean] + KmsKeyArn: Optional[KmsKeyArn] + + +SecurityGroupIdList = List[SecurityGroupId] + + +class CreateVerifiedAccessEndpointRequest(ServiceRequest): + VerifiedAccessGroupId: VerifiedAccessGroupId + EndpointType: VerifiedAccessEndpointType + AttachmentType: VerifiedAccessEndpointAttachmentType + DomainCertificateArn: Optional[CertificateArn] + ApplicationDomain: Optional[String] + EndpointDomainPrefix: Optional[String] + SecurityGroupIds: Optional[SecurityGroupIdList] + LoadBalancerOptions: Optional[CreateVerifiedAccessEndpointLoadBalancerOptions] + NetworkInterfaceOptions: Optional[CreateVerifiedAccessEndpointEniOptions] + Description: Optional[String] + PolicyDocument: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + RdsOptions: Optional[CreateVerifiedAccessEndpointRdsOptions] + CidrOptions: Optional[CreateVerifiedAccessEndpointCidrOptions] + + +VerifiedAccessEndpointSubnetIdList = List[SubnetId] + + +class VerifiedAccessEndpointPortRange(TypedDict, total=False): + FromPort: Optional[VerifiedAccessEndpointPortNumber] + ToPort: Optional[VerifiedAccessEndpointPortNumber] + + +VerifiedAccessEndpointPortRangeList = List[VerifiedAccessEndpointPortRange] + + +class VerifiedAccessEndpointCidrOptions(TypedDict, total=False): + Cidr: Optional[String] + PortRanges: Optional[VerifiedAccessEndpointPortRangeList] + Protocol: Optional[VerifiedAccessEndpointProtocol] + SubnetIds: Optional[VerifiedAccessEndpointSubnetIdList] + + +class VerifiedAccessEndpointRdsOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + RdsDbInstanceArn: Optional[String] + RdsDbClusterArn: Optional[String] + RdsDbProxyArn: Optional[String] + RdsEndpoint: Optional[String] + SubnetIds: Optional[VerifiedAccessEndpointSubnetIdList] + + +class VerifiedAccessEndpointStatus(TypedDict, total=False): + Code: Optional[VerifiedAccessEndpointStatusCode] + Message: Optional[String] + + +class VerifiedAccessEndpointEniOptions(TypedDict, total=False): + NetworkInterfaceId: Optional[NetworkInterfaceId] + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + PortRanges: Optional[VerifiedAccessEndpointPortRangeList] + + +class VerifiedAccessEndpointLoadBalancerOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + LoadBalancerArn: Optional[String] + SubnetIds: Optional[VerifiedAccessEndpointSubnetIdList] + PortRanges: Optional[VerifiedAccessEndpointPortRangeList] + + +class VerifiedAccessEndpoint(TypedDict, total=False): + VerifiedAccessInstanceId: Optional[String] + VerifiedAccessGroupId: Optional[String] + VerifiedAccessEndpointId: Optional[String] + ApplicationDomain: Optional[String] + EndpointType: Optional[VerifiedAccessEndpointType] + AttachmentType: Optional[VerifiedAccessEndpointAttachmentType] + DomainCertificateArn: Optional[String] + EndpointDomain: Optional[String] + DeviceValidationDomain: Optional[String] + SecurityGroupIds: Optional[SecurityGroupIdList] + LoadBalancerOptions: Optional[VerifiedAccessEndpointLoadBalancerOptions] + NetworkInterfaceOptions: Optional[VerifiedAccessEndpointEniOptions] + Status: Optional[VerifiedAccessEndpointStatus] + Description: Optional[String] + CreationTime: Optional[String] + LastUpdatedTime: Optional[String] + DeletionTime: Optional[String] + Tags: Optional[TagList] + SseSpecification: Optional[VerifiedAccessSseSpecificationResponse] + RdsOptions: Optional[VerifiedAccessEndpointRdsOptions] + CidrOptions: Optional[VerifiedAccessEndpointCidrOptions] + + +class CreateVerifiedAccessEndpointResult(TypedDict, total=False): + VerifiedAccessEndpoint: Optional[VerifiedAccessEndpoint] + + +class CreateVerifiedAccessGroupRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + Description: Optional[String] + PolicyDocument: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + + +class VerifiedAccessGroup(TypedDict, total=False): + VerifiedAccessGroupId: Optional[String] + VerifiedAccessInstanceId: Optional[String] + Description: Optional[String] + Owner: Optional[String] + VerifiedAccessGroupArn: Optional[String] + CreationTime: Optional[String] + LastUpdatedTime: Optional[String] + DeletionTime: Optional[String] + Tags: Optional[TagList] + SseSpecification: Optional[VerifiedAccessSseSpecificationResponse] + + +class CreateVerifiedAccessGroupResult(TypedDict, total=False): + VerifiedAccessGroup: Optional[VerifiedAccessGroup] + + +class CreateVerifiedAccessInstanceRequest(ServiceRequest): + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + FIPSEnabled: Optional[Boolean] + CidrEndpointsCustomSubDomain: Optional[String] + + +class CreateVerifiedAccessInstanceResult(TypedDict, total=False): + VerifiedAccessInstance: Optional[VerifiedAccessInstance] + + +class CreateVerifiedAccessNativeApplicationOidcOptions(TypedDict, total=False): + PublicSigningKeyEndpoint: Optional[String] + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + Scope: Optional[String] + + +class CreateVerifiedAccessTrustProviderDeviceOptions(TypedDict, total=False): + TenantId: Optional[String] + PublicSigningKeyUrl: Optional[String] + + +class CreateVerifiedAccessTrustProviderOidcOptions(TypedDict, total=False): + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + Scope: Optional[String] + + +class CreateVerifiedAccessTrustProviderRequest(ServiceRequest): + TrustProviderType: TrustProviderType + UserTrustProviderType: Optional[UserTrustProviderType] + DeviceTrustProviderType: Optional[DeviceTrustProviderType] + OidcOptions: Optional[CreateVerifiedAccessTrustProviderOidcOptions] + DeviceOptions: Optional[CreateVerifiedAccessTrustProviderDeviceOptions] + PolicyReferenceName: String + Description: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + NativeApplicationOidcOptions: Optional[CreateVerifiedAccessNativeApplicationOidcOptions] + + +class CreateVerifiedAccessTrustProviderResult(TypedDict, total=False): + VerifiedAccessTrustProvider: Optional[VerifiedAccessTrustProvider] + + +class CreateVolumePermission(TypedDict, total=False): + UserId: Optional[String] + Group: Optional[PermissionGroup] + + +CreateVolumePermissionList = List[CreateVolumePermission] + + +class CreateVolumePermissionModifications(TypedDict, total=False): + Add: Optional[CreateVolumePermissionList] + Remove: Optional[CreateVolumePermissionList] + + +class CreateVolumeRequest(ServiceRequest): + AvailabilityZone: AvailabilityZoneName + Encrypted: Optional[Boolean] + Iops: Optional[Integer] + KmsKeyId: Optional[KmsKeyId] + OutpostArn: Optional[String] + Size: Optional[Integer] + SnapshotId: Optional[SnapshotId] + VolumeType: Optional[VolumeType] + TagSpecifications: Optional[TagSpecificationList] + MultiAttachEnabled: Optional[Boolean] + Throughput: Optional[Integer] + ClientToken: Optional[String] + VolumeInitializationRate: Optional[Integer] + Operator: Optional[OperatorRequest] + DryRun: Optional[Boolean] + + +class CreateVpcBlockPublicAccessExclusionRequest(ServiceRequest): + DryRun: Optional[Boolean] + SubnetId: Optional[SubnetId] + VpcId: Optional[VpcId] + InternetGatewayExclusionMode: InternetGatewayExclusionMode + TagSpecifications: Optional[TagSpecificationList] + + +class VpcBlockPublicAccessExclusion(TypedDict, total=False): + ExclusionId: Optional[VpcBlockPublicAccessExclusionId] + InternetGatewayExclusionMode: Optional[InternetGatewayExclusionMode] + ResourceArn: Optional[ResourceArn] + State: Optional[VpcBlockPublicAccessExclusionState] + Reason: Optional[String] + CreationTimestamp: Optional[MillisecondDateTime] + LastUpdateTimestamp: Optional[MillisecondDateTime] + DeletionTimestamp: Optional[MillisecondDateTime] + Tags: Optional[TagList] + + +class CreateVpcBlockPublicAccessExclusionResult(TypedDict, total=False): + VpcBlockPublicAccessExclusion: Optional[VpcBlockPublicAccessExclusion] + + +class CreateVpcEndpointConnectionNotificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: Optional[VpcEndpointServiceId] + VpcEndpointId: Optional[VpcEndpointId] + ConnectionNotificationArn: String + ConnectionEvents: ValueStringList + ClientToken: Optional[String] + + +class CreateVpcEndpointConnectionNotificationResult(TypedDict, total=False): + ConnectionNotification: Optional[ConnectionNotification] + ClientToken: Optional[String] + + +class SubnetConfiguration(TypedDict, total=False): + SubnetId: Optional[SubnetId] + Ipv4: Optional[String] + Ipv6: Optional[String] + + +SubnetConfigurationsList = List[SubnetConfiguration] + + +class DnsOptionsSpecification(TypedDict, total=False): + DnsRecordIpType: Optional[DnsRecordIpType] + PrivateDnsOnlyForInboundResolverEndpoint: Optional[Boolean] + + +VpcEndpointSecurityGroupIdList = List[SecurityGroupId] +VpcEndpointSubnetIdList = List[SubnetId] +VpcEndpointRouteTableIdList = List[RouteTableId] + + +class CreateVpcEndpointRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcEndpointType: Optional[VpcEndpointType] + VpcId: VpcId + ServiceName: Optional[String] + PolicyDocument: Optional[String] + RouteTableIds: Optional[VpcEndpointRouteTableIdList] + SubnetIds: Optional[VpcEndpointSubnetIdList] + SecurityGroupIds: Optional[VpcEndpointSecurityGroupIdList] + IpAddressType: Optional[IpAddressType] + DnsOptions: Optional[DnsOptionsSpecification] + ClientToken: Optional[String] + PrivateDnsEnabled: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + SubnetConfigurations: Optional[SubnetConfigurationsList] + ServiceNetworkArn: Optional[ServiceNetworkArn] + ResourceConfigurationArn: Optional[ResourceConfigurationArn] + ServiceRegion: Optional[String] + + +class SubnetIpPrefixes(TypedDict, total=False): + SubnetId: Optional[String] + IpPrefixes: Optional[ValueStringList] + + +SubnetIpPrefixesList = List[SubnetIpPrefixes] + + +class LastError(TypedDict, total=False): + Message: Optional[String] + Code: Optional[String] + + +class DnsEntry(TypedDict, total=False): + DnsName: Optional[String] + HostedZoneId: Optional[String] + + +DnsEntrySet = List[DnsEntry] + + +class DnsOptions(TypedDict, total=False): + DnsRecordIpType: Optional[DnsRecordIpType] + PrivateDnsOnlyForInboundResolverEndpoint: Optional[Boolean] + + +class SecurityGroupIdentifier(TypedDict, total=False): + GroupId: Optional[String] + GroupName: Optional[String] + + +GroupIdentifierSet = List[SecurityGroupIdentifier] + + +class VpcEndpoint(TypedDict, total=False): + VpcEndpointId: Optional[String] + VpcEndpointType: Optional[VpcEndpointType] + VpcId: Optional[String] + ServiceName: Optional[String] + State: Optional[State] + PolicyDocument: Optional[String] + RouteTableIds: Optional[ValueStringList] + SubnetIds: Optional[ValueStringList] + Groups: Optional[GroupIdentifierSet] + IpAddressType: Optional[IpAddressType] + DnsOptions: Optional[DnsOptions] + PrivateDnsEnabled: Optional[Boolean] + RequesterManaged: Optional[Boolean] + NetworkInterfaceIds: Optional[ValueStringList] + DnsEntries: Optional[DnsEntrySet] + CreationTimestamp: Optional[MillisecondDateTime] + Tags: Optional[TagList] + OwnerId: Optional[String] + LastError: Optional[LastError] + Ipv4Prefixes: Optional[SubnetIpPrefixesList] + Ipv6Prefixes: Optional[SubnetIpPrefixesList] + FailureReason: Optional[String] + ServiceNetworkArn: Optional[ServiceNetworkArn] + ResourceConfigurationArn: Optional[ResourceConfigurationArn] + ServiceRegion: Optional[String] + + +class CreateVpcEndpointResult(TypedDict, total=False): + VpcEndpoint: Optional[VpcEndpoint] + ClientToken: Optional[String] + + +class CreateVpcEndpointServiceConfigurationRequest(ServiceRequest): + DryRun: Optional[Boolean] + AcceptanceRequired: Optional[Boolean] + PrivateDnsName: Optional[String] + NetworkLoadBalancerArns: Optional[ValueStringList] + GatewayLoadBalancerArns: Optional[ValueStringList] + SupportedIpAddressTypes: Optional[ValueStringList] + SupportedRegions: Optional[ValueStringList] + ClientToken: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +class SupportedRegionDetail(TypedDict, total=False): + Region: Optional[String] + ServiceState: Optional[String] + + +SupportedRegionSet = List[SupportedRegionDetail] + + +class PrivateDnsNameConfiguration(TypedDict, total=False): + State: Optional[DnsNameState] + Type: Optional[String] + Value: Optional[String] + Name: Optional[String] + + +SupportedIpAddressTypes = List[ServiceConnectivityType] + + +class ServiceTypeDetail(TypedDict, total=False): + ServiceType: Optional[ServiceType] + + +ServiceTypeDetailSet = List[ServiceTypeDetail] + + +class ServiceConfiguration(TypedDict, total=False): + ServiceType: Optional[ServiceTypeDetailSet] + ServiceId: Optional[String] + ServiceName: Optional[String] + ServiceState: Optional[ServiceState] + AvailabilityZones: Optional[ValueStringList] + AcceptanceRequired: Optional[Boolean] + ManagesVpcEndpoints: Optional[Boolean] + NetworkLoadBalancerArns: Optional[ValueStringList] + GatewayLoadBalancerArns: Optional[ValueStringList] + SupportedIpAddressTypes: Optional[SupportedIpAddressTypes] + BaseEndpointDnsNames: Optional[ValueStringList] + PrivateDnsName: Optional[String] + PrivateDnsNameConfiguration: Optional[PrivateDnsNameConfiguration] + PayerResponsibility: Optional[PayerResponsibility] + Tags: Optional[TagList] + SupportedRegions: Optional[SupportedRegionSet] + RemoteAccessEnabled: Optional[Boolean] + + +class CreateVpcEndpointServiceConfigurationResult(TypedDict, total=False): + ServiceConfiguration: Optional[ServiceConfiguration] + ClientToken: Optional[String] + + +class CreateVpcPeeringConnectionRequest(ServiceRequest): + PeerRegion: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + VpcId: VpcId + PeerVpcId: Optional[String] + PeerOwnerId: Optional[String] + + +class CreateVpcPeeringConnectionResult(TypedDict, total=False): + VpcPeeringConnection: Optional[VpcPeeringConnection] + + +class CreateVpcRequest(ServiceRequest): + CidrBlock: Optional[String] + Ipv6Pool: Optional[Ipv6PoolEc2Id] + Ipv6CidrBlock: Optional[String] + Ipv4IpamPoolId: Optional[IpamPoolId] + Ipv4NetmaskLength: Optional[NetmaskLength] + Ipv6IpamPoolId: Optional[IpamPoolId] + Ipv6NetmaskLength: Optional[NetmaskLength] + Ipv6CidrBlockNetworkBorderGroup: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + InstanceTenancy: Optional[Tenancy] + AmazonProvidedIpv6CidrBlock: Optional[Boolean] + + +class CreateVpcResult(TypedDict, total=False): + Vpc: Optional[Vpc] + + +class VpnTunnelLogOptionsSpecification(TypedDict, total=False): + CloudWatchLogOptions: Optional[CloudWatchLogOptionsSpecification] + + +class IKEVersionsRequestListValue(TypedDict, total=False): + Value: Optional[String] + + +IKEVersionsRequestList = List[IKEVersionsRequestListValue] + + +class Phase2DHGroupNumbersRequestListValue(TypedDict, total=False): + Value: Optional[Integer] + + +Phase2DHGroupNumbersRequestList = List[Phase2DHGroupNumbersRequestListValue] + + +class Phase1DHGroupNumbersRequestListValue(TypedDict, total=False): + Value: Optional[Integer] + + +Phase1DHGroupNumbersRequestList = List[Phase1DHGroupNumbersRequestListValue] + + +class Phase2IntegrityAlgorithmsRequestListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase2IntegrityAlgorithmsRequestList = List[Phase2IntegrityAlgorithmsRequestListValue] + + +class Phase1IntegrityAlgorithmsRequestListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase1IntegrityAlgorithmsRequestList = List[Phase1IntegrityAlgorithmsRequestListValue] + + +class Phase2EncryptionAlgorithmsRequestListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase2EncryptionAlgorithmsRequestList = List[Phase2EncryptionAlgorithmsRequestListValue] + + +class Phase1EncryptionAlgorithmsRequestListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase1EncryptionAlgorithmsRequestList = List[Phase1EncryptionAlgorithmsRequestListValue] + + +class VpnTunnelOptionsSpecification(TypedDict, total=False): + TunnelInsideCidr: Optional[String] + TunnelInsideIpv6Cidr: Optional[String] + PreSharedKey: Optional[preSharedKey] + Phase1LifetimeSeconds: Optional[Integer] + Phase2LifetimeSeconds: Optional[Integer] + RekeyMarginTimeSeconds: Optional[Integer] + RekeyFuzzPercentage: Optional[Integer] + ReplayWindowSize: Optional[Integer] + DPDTimeoutSeconds: Optional[Integer] + DPDTimeoutAction: Optional[String] + Phase1EncryptionAlgorithms: Optional[Phase1EncryptionAlgorithmsRequestList] + Phase2EncryptionAlgorithms: Optional[Phase2EncryptionAlgorithmsRequestList] + Phase1IntegrityAlgorithms: Optional[Phase1IntegrityAlgorithmsRequestList] + Phase2IntegrityAlgorithms: Optional[Phase2IntegrityAlgorithmsRequestList] + Phase1DHGroupNumbers: Optional[Phase1DHGroupNumbersRequestList] + Phase2DHGroupNumbers: Optional[Phase2DHGroupNumbersRequestList] + IKEVersions: Optional[IKEVersionsRequestList] + StartupAction: Optional[String] + LogOptions: Optional[VpnTunnelLogOptionsSpecification] + EnableTunnelLifecycleControl: Optional[Boolean] + + +VpnTunnelOptionsSpecificationsList = List[VpnTunnelOptionsSpecification] + + +class VpnConnectionOptionsSpecification(TypedDict, total=False): + EnableAcceleration: Optional[Boolean] + TunnelInsideIpVersion: Optional[TunnelInsideIpVersion] + TunnelOptions: Optional[VpnTunnelOptionsSpecificationsList] + LocalIpv4NetworkCidr: Optional[String] + RemoteIpv4NetworkCidr: Optional[String] + LocalIpv6NetworkCidr: Optional[String] + RemoteIpv6NetworkCidr: Optional[String] + OutsideIpAddressType: Optional[String] + TransportTransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + StaticRoutesOnly: Optional[Boolean] + + +class CreateVpnConnectionRequest(ServiceRequest): + CustomerGatewayId: CustomerGatewayId + Type: String + VpnGatewayId: Optional[VpnGatewayId] + TransitGatewayId: Optional[TransitGatewayId] + TagSpecifications: Optional[TagSpecificationList] + PreSharedKeyStorage: Optional[String] + DryRun: Optional[Boolean] + Options: Optional[VpnConnectionOptionsSpecification] + + +class VgwTelemetry(TypedDict, total=False): + AcceptedRouteCount: Optional[Integer] + LastStatusChange: Optional[DateTime] + OutsideIpAddress: Optional[String] + Status: Optional[TelemetryStatus] + StatusMessage: Optional[String] + CertificateArn: Optional[String] + + +VgwTelemetryList = List[VgwTelemetry] + + +class VpnStaticRoute(TypedDict, total=False): + DestinationCidrBlock: Optional[String] + Source: Optional[VpnStaticRouteSource] + State: Optional[VpnState] + + +VpnStaticRouteList = List[VpnStaticRoute] + + +class VpnTunnelLogOptions(TypedDict, total=False): + CloudWatchLogOptions: Optional[CloudWatchLogOptions] + + +class IKEVersionsListValue(TypedDict, total=False): + Value: Optional[String] + + +IKEVersionsList = List[IKEVersionsListValue] + + +class Phase2DHGroupNumbersListValue(TypedDict, total=False): + Value: Optional[Integer] + + +Phase2DHGroupNumbersList = List[Phase2DHGroupNumbersListValue] + + +class Phase1DHGroupNumbersListValue(TypedDict, total=False): + Value: Optional[Integer] + + +Phase1DHGroupNumbersList = List[Phase1DHGroupNumbersListValue] + + +class Phase2IntegrityAlgorithmsListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase2IntegrityAlgorithmsList = List[Phase2IntegrityAlgorithmsListValue] + + +class Phase1IntegrityAlgorithmsListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase1IntegrityAlgorithmsList = List[Phase1IntegrityAlgorithmsListValue] + + +class Phase2EncryptionAlgorithmsListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase2EncryptionAlgorithmsList = List[Phase2EncryptionAlgorithmsListValue] + + +class Phase1EncryptionAlgorithmsListValue(TypedDict, total=False): + Value: Optional[String] + + +Phase1EncryptionAlgorithmsList = List[Phase1EncryptionAlgorithmsListValue] + + +class TunnelOption(TypedDict, total=False): + OutsideIpAddress: Optional[String] + TunnelInsideCidr: Optional[String] + TunnelInsideIpv6Cidr: Optional[String] + PreSharedKey: Optional[preSharedKey] + Phase1LifetimeSeconds: Optional[Integer] + Phase2LifetimeSeconds: Optional[Integer] + RekeyMarginTimeSeconds: Optional[Integer] + RekeyFuzzPercentage: Optional[Integer] + ReplayWindowSize: Optional[Integer] + DpdTimeoutSeconds: Optional[Integer] + DpdTimeoutAction: Optional[String] + Phase1EncryptionAlgorithms: Optional[Phase1EncryptionAlgorithmsList] + Phase2EncryptionAlgorithms: Optional[Phase2EncryptionAlgorithmsList] + Phase1IntegrityAlgorithms: Optional[Phase1IntegrityAlgorithmsList] + Phase2IntegrityAlgorithms: Optional[Phase2IntegrityAlgorithmsList] + Phase1DHGroupNumbers: Optional[Phase1DHGroupNumbersList] + Phase2DHGroupNumbers: Optional[Phase2DHGroupNumbersList] + IkeVersions: Optional[IKEVersionsList] + StartupAction: Optional[String] + LogOptions: Optional[VpnTunnelLogOptions] + EnableTunnelLifecycleControl: Optional[Boolean] + + +TunnelOptionsList = List[TunnelOption] + + +class VpnConnectionOptions(TypedDict, total=False): + EnableAcceleration: Optional[Boolean] + StaticRoutesOnly: Optional[Boolean] + LocalIpv4NetworkCidr: Optional[String] + RemoteIpv4NetworkCidr: Optional[String] + LocalIpv6NetworkCidr: Optional[String] + RemoteIpv6NetworkCidr: Optional[String] + OutsideIpAddressType: Optional[String] + TransportTransitGatewayAttachmentId: Optional[String] + TunnelInsideIpVersion: Optional[TunnelInsideIpVersion] + TunnelOptions: Optional[TunnelOptionsList] + + +class VpnConnection(TypedDict, total=False): + Category: Optional[String] + TransitGatewayId: Optional[String] + CoreNetworkArn: Optional[String] + CoreNetworkAttachmentArn: Optional[String] + GatewayAssociationState: Optional[GatewayAssociationState] + Options: Optional[VpnConnectionOptions] + Routes: Optional[VpnStaticRouteList] + Tags: Optional[TagList] + VgwTelemetry: Optional[VgwTelemetryList] + PreSharedKeyArn: Optional[String] + VpnConnectionId: Optional[String] + State: Optional[VpnState] + CustomerGatewayConfiguration: Optional[customerGatewayConfiguration] + Type: Optional[GatewayType] + CustomerGatewayId: Optional[String] + VpnGatewayId: Optional[String] + + +class CreateVpnConnectionResult(TypedDict, total=False): + VpnConnection: Optional[VpnConnection] + + +class CreateVpnConnectionRouteRequest(ServiceRequest): + DestinationCidrBlock: String + VpnConnectionId: VpnConnectionId + + +class CreateVpnGatewayRequest(ServiceRequest): + AvailabilityZone: Optional[String] + Type: GatewayType + TagSpecifications: Optional[TagSpecificationList] + AmazonSideAsn: Optional[Long] + DryRun: Optional[Boolean] + + +VpcAttachmentList = List[VpcAttachment] + + +class VpnGateway(TypedDict, total=False): + AmazonSideAsn: Optional[Long] + Tags: Optional[TagList] + VpnGatewayId: Optional[String] + State: Optional[VpnState] + Type: Optional[GatewayType] + AvailabilityZone: Optional[String] + VpcAttachments: Optional[VpcAttachmentList] + + +class CreateVpnGatewayResult(TypedDict, total=False): + VpnGateway: Optional[VpnGateway] + + +CustomerGatewayIdStringList = List[CustomerGatewayId] +CustomerGatewayList = List[CustomerGateway] + + +class DataQuery(TypedDict, total=False): + Id: Optional[String] + Source: Optional[String] + Destination: Optional[String] + Metric: Optional[MetricType] + Statistic: Optional[StatisticType] + Period: Optional[PeriodType] + + +DataQueries = List[DataQuery] + + +class MetricPoint(TypedDict, total=False): + StartDate: Optional[MillisecondDateTime] + EndDate: Optional[MillisecondDateTime] + Value: Optional[Float] + Status: Optional[String] + + +MetricPoints = List[MetricPoint] + + +class DataResponse(TypedDict, total=False): + Id: Optional[String] + Source: Optional[String] + Destination: Optional[String] + Metric: Optional[MetricType] + Statistic: Optional[StatisticType] + Period: Optional[PeriodType] + MetricPoints: Optional[MetricPoints] + + +DataResponses = List[DataResponse] + + +class DeclarativePoliciesReport(TypedDict, total=False): + ReportId: Optional[String] + S3Bucket: Optional[String] + S3Prefix: Optional[String] + TargetId: Optional[String] + StartTime: Optional[MillisecondDateTime] + EndTime: Optional[MillisecondDateTime] + Status: Optional[ReportState] + Tags: Optional[TagList] + + +DeclarativePoliciesReportList = List[DeclarativePoliciesReport] + + +class DeleteCarrierGatewayRequest(ServiceRequest): + CarrierGatewayId: CarrierGatewayId + DryRun: Optional[Boolean] + + +class DeleteCarrierGatewayResult(TypedDict, total=False): + CarrierGateway: Optional[CarrierGateway] + + +class DeleteClientVpnEndpointRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + DryRun: Optional[Boolean] + + +class DeleteClientVpnEndpointResult(TypedDict, total=False): + Status: Optional[ClientVpnEndpointStatus] + + +class DeleteClientVpnRouteRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + TargetVpcSubnetId: Optional[SubnetId] + DestinationCidrBlock: String + DryRun: Optional[Boolean] + + +class DeleteClientVpnRouteResult(TypedDict, total=False): + Status: Optional[ClientVpnRouteStatus] + + +class DeleteCoipCidrRequest(ServiceRequest): + Cidr: String + CoipPoolId: Ipv4PoolCoipId + DryRun: Optional[Boolean] + + +class DeleteCoipCidrResult(TypedDict, total=False): + CoipCidr: Optional[CoipCidr] + + +class DeleteCoipPoolRequest(ServiceRequest): + CoipPoolId: Ipv4PoolCoipId + DryRun: Optional[Boolean] + + +class DeleteCoipPoolResult(TypedDict, total=False): + CoipPool: Optional[CoipPool] + + +class DeleteCustomerGatewayRequest(ServiceRequest): + CustomerGatewayId: CustomerGatewayId + DryRun: Optional[Boolean] + + +class DeleteDhcpOptionsRequest(ServiceRequest): + DhcpOptionsId: DhcpOptionsId + DryRun: Optional[Boolean] + + +class DeleteEgressOnlyInternetGatewayRequest(ServiceRequest): + DryRun: Optional[Boolean] + EgressOnlyInternetGatewayId: EgressOnlyInternetGatewayId + + +class DeleteEgressOnlyInternetGatewayResult(TypedDict, total=False): + ReturnCode: Optional[Boolean] + + +class DeleteFleetError(TypedDict, total=False): + Code: Optional[DeleteFleetErrorCode] + Message: Optional[String] + + +class DeleteFleetErrorItem(TypedDict, total=False): + Error: Optional[DeleteFleetError] + FleetId: Optional[FleetId] + + +DeleteFleetErrorSet = List[DeleteFleetErrorItem] + + +class DeleteFleetSuccessItem(TypedDict, total=False): + CurrentFleetState: Optional[FleetStateCode] + PreviousFleetState: Optional[FleetStateCode] + FleetId: Optional[FleetId] + + +DeleteFleetSuccessSet = List[DeleteFleetSuccessItem] +FleetIdSet = List[FleetId] + + +class DeleteFleetsRequest(ServiceRequest): + DryRun: Optional[Boolean] + FleetIds: FleetIdSet + TerminateInstances: Boolean + + +class DeleteFleetsResult(TypedDict, total=False): + SuccessfulFleetDeletions: Optional[DeleteFleetSuccessSet] + UnsuccessfulFleetDeletions: Optional[DeleteFleetErrorSet] + + +FlowLogIdList = List[VpcFlowLogId] + + +class DeleteFlowLogsRequest(ServiceRequest): + DryRun: Optional[Boolean] + FlowLogIds: FlowLogIdList + + +class DeleteFlowLogsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class DeleteFpgaImageRequest(ServiceRequest): + DryRun: Optional[Boolean] + FpgaImageId: FpgaImageId + + +class DeleteFpgaImageResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DeleteInstanceConnectEndpointRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceConnectEndpointId: InstanceConnectEndpointId + + +class DeleteInstanceConnectEndpointResult(TypedDict, total=False): + InstanceConnectEndpoint: Optional[Ec2InstanceConnectEndpoint] + + +class DeleteInstanceEventWindowRequest(ServiceRequest): + DryRun: Optional[Boolean] + ForceDelete: Optional[Boolean] + InstanceEventWindowId: InstanceEventWindowId + + +class InstanceEventWindowStateChange(TypedDict, total=False): + InstanceEventWindowId: Optional[InstanceEventWindowId] + State: Optional[InstanceEventWindowState] + + +class DeleteInstanceEventWindowResult(TypedDict, total=False): + InstanceEventWindowState: Optional[InstanceEventWindowStateChange] + + +class DeleteInternetGatewayRequest(ServiceRequest): + DryRun: Optional[Boolean] + InternetGatewayId: InternetGatewayId + + +class DeleteIpamExternalResourceVerificationTokenRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamExternalResourceVerificationTokenId: IpamExternalResourceVerificationTokenId + + +class DeleteIpamExternalResourceVerificationTokenResult(TypedDict, total=False): + IpamExternalResourceVerificationToken: Optional[IpamExternalResourceVerificationToken] + + +class DeleteIpamPoolRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Cascade: Optional[Boolean] + + +class DeleteIpamPoolResult(TypedDict, total=False): + IpamPool: Optional[IpamPool] + + +class DeleteIpamRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + Cascade: Optional[Boolean] + + +class DeleteIpamResourceDiscoveryRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryId: IpamResourceDiscoveryId + + +class DeleteIpamResourceDiscoveryResult(TypedDict, total=False): + IpamResourceDiscovery: Optional[IpamResourceDiscovery] + + +class DeleteIpamResult(TypedDict, total=False): + Ipam: Optional[Ipam] + + +class DeleteIpamScopeRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamScopeId: IpamScopeId + + +class DeleteIpamScopeResult(TypedDict, total=False): + IpamScope: Optional[IpamScope] + + +class DeleteKeyPairRequest(ServiceRequest): + KeyName: Optional[KeyPairNameWithResolver] + KeyPairId: Optional[KeyPairId] + DryRun: Optional[Boolean] + + +class DeleteKeyPairResult(TypedDict, total=False): + Return: Optional[Boolean] + KeyPairId: Optional[String] + + +class DeleteLaunchTemplateRequest(ServiceRequest): + DryRun: Optional[Boolean] + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + + +class DeleteLaunchTemplateResult(TypedDict, total=False): + LaunchTemplate: Optional[LaunchTemplate] + + +VersionStringList = List[String] + + +class DeleteLaunchTemplateVersionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + Versions: VersionStringList + + +class ResponseError(TypedDict, total=False): + Code: Optional[LaunchTemplateErrorCode] + Message: Optional[String] + + +class DeleteLaunchTemplateVersionsResponseErrorItem(TypedDict, total=False): + LaunchTemplateId: Optional[String] + LaunchTemplateName: Optional[String] + VersionNumber: Optional[Long] + ResponseError: Optional[ResponseError] + + +DeleteLaunchTemplateVersionsResponseErrorSet = List[DeleteLaunchTemplateVersionsResponseErrorItem] + + +class DeleteLaunchTemplateVersionsResponseSuccessItem(TypedDict, total=False): + LaunchTemplateId: Optional[String] + LaunchTemplateName: Optional[String] + VersionNumber: Optional[Long] + + +DeleteLaunchTemplateVersionsResponseSuccessSet = List[ + DeleteLaunchTemplateVersionsResponseSuccessItem +] + + +class DeleteLaunchTemplateVersionsResult(TypedDict, total=False): + SuccessfullyDeletedLaunchTemplateVersions: Optional[ + DeleteLaunchTemplateVersionsResponseSuccessSet + ] + UnsuccessfullyDeletedLaunchTemplateVersions: Optional[ + DeleteLaunchTemplateVersionsResponseErrorSet + ] + + +class DeleteLocalGatewayRouteRequest(ServiceRequest): + DestinationCidrBlock: Optional[String] + LocalGatewayRouteTableId: LocalGatewayRoutetableId + DryRun: Optional[Boolean] + DestinationPrefixListId: Optional[PrefixListResourceId] + + +class DeleteLocalGatewayRouteResult(TypedDict, total=False): + Route: Optional[LocalGatewayRoute] + + +class DeleteLocalGatewayRouteTableRequest(ServiceRequest): + LocalGatewayRouteTableId: LocalGatewayRoutetableId + DryRun: Optional[Boolean] + + +class DeleteLocalGatewayRouteTableResult(TypedDict, total=False): + LocalGatewayRouteTable: Optional[LocalGatewayRouteTable] + + +class DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(ServiceRequest): + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId: ( + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId + ) + DryRun: Optional[Boolean] + + +class DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationResult(TypedDict, total=False): + LocalGatewayRouteTableVirtualInterfaceGroupAssociation: Optional[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociation + ] + + +class DeleteLocalGatewayRouteTableVpcAssociationRequest(ServiceRequest): + LocalGatewayRouteTableVpcAssociationId: LocalGatewayRouteTableVpcAssociationId + DryRun: Optional[Boolean] + + +class DeleteLocalGatewayRouteTableVpcAssociationResult(TypedDict, total=False): + LocalGatewayRouteTableVpcAssociation: Optional[LocalGatewayRouteTableVpcAssociation] + + +class DeleteLocalGatewayVirtualInterfaceGroupRequest(ServiceRequest): + LocalGatewayVirtualInterfaceGroupId: LocalGatewayVirtualInterfaceGroupId + DryRun: Optional[Boolean] + + +class DeleteLocalGatewayVirtualInterfaceGroupResult(TypedDict, total=False): + LocalGatewayVirtualInterfaceGroup: Optional[LocalGatewayVirtualInterfaceGroup] + + +class DeleteLocalGatewayVirtualInterfaceRequest(ServiceRequest): + LocalGatewayVirtualInterfaceId: LocalGatewayVirtualInterfaceId + DryRun: Optional[Boolean] + + +class DeleteLocalGatewayVirtualInterfaceResult(TypedDict, total=False): + LocalGatewayVirtualInterface: Optional[LocalGatewayVirtualInterface] + + +class DeleteManagedPrefixListRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListId: PrefixListResourceId + + +class DeleteManagedPrefixListResult(TypedDict, total=False): + PrefixList: Optional[ManagedPrefixList] + + +class DeleteNatGatewayRequest(ServiceRequest): + DryRun: Optional[Boolean] + NatGatewayId: NatGatewayId + + +class DeleteNatGatewayResult(TypedDict, total=False): + NatGatewayId: Optional[String] + + +class DeleteNetworkAclEntryRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkAclId: NetworkAclId + RuleNumber: Integer + Egress: Boolean + + +class DeleteNetworkAclRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkAclId: NetworkAclId + + +class DeleteNetworkInsightsAccessScopeAnalysisRequest(ServiceRequest): + NetworkInsightsAccessScopeAnalysisId: NetworkInsightsAccessScopeAnalysisId + DryRun: Optional[Boolean] + + +class DeleteNetworkInsightsAccessScopeAnalysisResult(TypedDict, total=False): + NetworkInsightsAccessScopeAnalysisId: Optional[NetworkInsightsAccessScopeAnalysisId] + + +class DeleteNetworkInsightsAccessScopeRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInsightsAccessScopeId: NetworkInsightsAccessScopeId + + +class DeleteNetworkInsightsAccessScopeResult(TypedDict, total=False): + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + + +class DeleteNetworkInsightsAnalysisRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInsightsAnalysisId: NetworkInsightsAnalysisId + + +class DeleteNetworkInsightsAnalysisResult(TypedDict, total=False): + NetworkInsightsAnalysisId: Optional[NetworkInsightsAnalysisId] + + +class DeleteNetworkInsightsPathRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInsightsPathId: NetworkInsightsPathId + + +class DeleteNetworkInsightsPathResult(TypedDict, total=False): + NetworkInsightsPathId: Optional[NetworkInsightsPathId] + + +class DeleteNetworkInterfacePermissionRequest(ServiceRequest): + NetworkInterfacePermissionId: NetworkInterfacePermissionId + Force: Optional[Boolean] + DryRun: Optional[Boolean] + + +class DeleteNetworkInterfacePermissionResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DeleteNetworkInterfaceRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInterfaceId: NetworkInterfaceId + + +class DeletePlacementGroupRequest(ServiceRequest): + DryRun: Optional[Boolean] + GroupName: PlacementGroupName + + +class DeletePublicIpv4PoolRequest(ServiceRequest): + DryRun: Optional[Boolean] + PoolId: Ipv4PoolEc2Id + NetworkBorderGroup: Optional[String] + + +class DeletePublicIpv4PoolResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class DeleteQueuedReservedInstancesError(TypedDict, total=False): + Code: Optional[DeleteQueuedReservedInstancesErrorCode] + Message: Optional[String] + + +DeleteQueuedReservedInstancesIdList = List[ReservationId] + + +class DeleteQueuedReservedInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + ReservedInstancesIds: DeleteQueuedReservedInstancesIdList + + +class FailedQueuedPurchaseDeletion(TypedDict, total=False): + Error: Optional[DeleteQueuedReservedInstancesError] + ReservedInstancesId: Optional[String] + + +FailedQueuedPurchaseDeletionSet = List[FailedQueuedPurchaseDeletion] + + +class SuccessfulQueuedPurchaseDeletion(TypedDict, total=False): + ReservedInstancesId: Optional[String] + + +SuccessfulQueuedPurchaseDeletionSet = List[SuccessfulQueuedPurchaseDeletion] + + +class DeleteQueuedReservedInstancesResult(TypedDict, total=False): + SuccessfulQueuedPurchaseDeletions: Optional[SuccessfulQueuedPurchaseDeletionSet] + FailedQueuedPurchaseDeletions: Optional[FailedQueuedPurchaseDeletionSet] + + +class DeleteRouteRequest(ServiceRequest): + DestinationPrefixListId: Optional[PrefixListResourceId] + DryRun: Optional[Boolean] + RouteTableId: RouteTableId + DestinationCidrBlock: Optional[String] + DestinationIpv6CidrBlock: Optional[String] + + +class DeleteRouteServerEndpointRequest(ServiceRequest): + RouteServerEndpointId: RouteServerEndpointId + DryRun: Optional[Boolean] + + +class DeleteRouteServerEndpointResult(TypedDict, total=False): + RouteServerEndpoint: Optional[RouteServerEndpoint] + + +class DeleteRouteServerPeerRequest(ServiceRequest): + RouteServerPeerId: RouteServerPeerId + DryRun: Optional[Boolean] + + +class DeleteRouteServerPeerResult(TypedDict, total=False): + RouteServerPeer: Optional[RouteServerPeer] + + +class DeleteRouteServerRequest(ServiceRequest): + RouteServerId: RouteServerId + DryRun: Optional[Boolean] + + +class DeleteRouteServerResult(TypedDict, total=False): + RouteServer: Optional[RouteServer] + + +class DeleteRouteTableRequest(ServiceRequest): + DryRun: Optional[Boolean] + RouteTableId: RouteTableId + + +class DeleteSecurityGroupRequest(ServiceRequest): + GroupId: Optional[SecurityGroupId] + GroupName: Optional[SecurityGroupName] + DryRun: Optional[Boolean] + + +class DeleteSecurityGroupResult(TypedDict, total=False): + Return: Optional[Boolean] + GroupId: Optional[SecurityGroupId] + + +class DeleteSnapshotRequest(ServiceRequest): + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + + +class DeleteSnapshotReturnCode(TypedDict, total=False): + SnapshotId: Optional[SnapshotId] + ReturnCode: Optional[SnapshotReturnCodes] + + +DeleteSnapshotResultSet = List[DeleteSnapshotReturnCode] + + +class DeleteSpotDatafeedSubscriptionRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DeleteSubnetCidrReservationRequest(ServiceRequest): + SubnetCidrReservationId: SubnetCidrReservationId + DryRun: Optional[Boolean] + + +class DeleteSubnetCidrReservationResult(TypedDict, total=False): + DeletedSubnetCidrReservation: Optional[SubnetCidrReservation] + + +class DeleteSubnetRequest(ServiceRequest): + SubnetId: SubnetId + DryRun: Optional[Boolean] + + +class DeleteTagsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Resources: ResourceIdList + Tags: Optional[TagList] + + +class DeleteTrafficMirrorFilterRequest(ServiceRequest): + TrafficMirrorFilterId: TrafficMirrorFilterId + DryRun: Optional[Boolean] + + +class DeleteTrafficMirrorFilterResult(TypedDict, total=False): + TrafficMirrorFilterId: Optional[String] + + +class DeleteTrafficMirrorFilterRuleRequest(ServiceRequest): + TrafficMirrorFilterRuleId: TrafficMirrorFilterRuleIdWithResolver + DryRun: Optional[Boolean] + + +class DeleteTrafficMirrorFilterRuleResult(TypedDict, total=False): + TrafficMirrorFilterRuleId: Optional[String] + + +class DeleteTrafficMirrorSessionRequest(ServiceRequest): + TrafficMirrorSessionId: TrafficMirrorSessionId + DryRun: Optional[Boolean] + + +class DeleteTrafficMirrorSessionResult(TypedDict, total=False): + TrafficMirrorSessionId: Optional[String] + + +class DeleteTrafficMirrorTargetRequest(ServiceRequest): + TrafficMirrorTargetId: TrafficMirrorTargetId + DryRun: Optional[Boolean] + + +class DeleteTrafficMirrorTargetResult(TypedDict, total=False): + TrafficMirrorTargetId: Optional[String] + + +class DeleteTransitGatewayConnectPeerRequest(ServiceRequest): + TransitGatewayConnectPeerId: TransitGatewayConnectPeerId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayConnectPeerResult(TypedDict, total=False): + TransitGatewayConnectPeer: Optional[TransitGatewayConnectPeer] + + +class DeleteTransitGatewayConnectRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayConnectResult(TypedDict, total=False): + TransitGatewayConnect: Optional[TransitGatewayConnect] + + +class DeleteTransitGatewayMulticastDomainRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayMulticastDomainResult(TypedDict, total=False): + TransitGatewayMulticastDomain: Optional[TransitGatewayMulticastDomain] + + +class DeleteTransitGatewayPeeringAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayPeeringAttachmentResult(TypedDict, total=False): + TransitGatewayPeeringAttachment: Optional[TransitGatewayPeeringAttachment] + + +class DeleteTransitGatewayPolicyTableRequest(ServiceRequest): + TransitGatewayPolicyTableId: TransitGatewayPolicyTableId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayPolicyTableResult(TypedDict, total=False): + TransitGatewayPolicyTable: Optional[TransitGatewayPolicyTable] + + +class DeleteTransitGatewayPrefixListReferenceRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + PrefixListId: PrefixListResourceId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayPrefixListReferenceResult(TypedDict, total=False): + TransitGatewayPrefixListReference: Optional[TransitGatewayPrefixListReference] + + +class DeleteTransitGatewayRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayResult(TypedDict, total=False): + TransitGateway: Optional[TransitGateway] + + +class DeleteTransitGatewayRouteRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + DestinationCidrBlock: String + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayRouteResult(TypedDict, total=False): + Route: Optional[TransitGatewayRoute] + + +class DeleteTransitGatewayRouteTableAnnouncementRequest(ServiceRequest): + TransitGatewayRouteTableAnnouncementId: TransitGatewayRouteTableAnnouncementId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayRouteTableAnnouncementResult(TypedDict, total=False): + TransitGatewayRouteTableAnnouncement: Optional[TransitGatewayRouteTableAnnouncement] + + +class DeleteTransitGatewayRouteTableRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayRouteTableResult(TypedDict, total=False): + TransitGatewayRouteTable: Optional[TransitGatewayRouteTable] + + +class DeleteTransitGatewayVpcAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class DeleteTransitGatewayVpcAttachmentResult(TypedDict, total=False): + TransitGatewayVpcAttachment: Optional[TransitGatewayVpcAttachment] + + +class DeleteVerifiedAccessEndpointRequest(ServiceRequest): + VerifiedAccessEndpointId: VerifiedAccessEndpointId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class DeleteVerifiedAccessEndpointResult(TypedDict, total=False): + VerifiedAccessEndpoint: Optional[VerifiedAccessEndpoint] + + +class DeleteVerifiedAccessGroupRequest(ServiceRequest): + VerifiedAccessGroupId: VerifiedAccessGroupId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class DeleteVerifiedAccessGroupResult(TypedDict, total=False): + VerifiedAccessGroup: Optional[VerifiedAccessGroup] + + +class DeleteVerifiedAccessInstanceRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +class DeleteVerifiedAccessInstanceResult(TypedDict, total=False): + VerifiedAccessInstance: Optional[VerifiedAccessInstance] + + +class DeleteVerifiedAccessTrustProviderRequest(ServiceRequest): + VerifiedAccessTrustProviderId: VerifiedAccessTrustProviderId + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +class DeleteVerifiedAccessTrustProviderResult(TypedDict, total=False): + VerifiedAccessTrustProvider: Optional[VerifiedAccessTrustProvider] + + +class DeleteVolumeRequest(ServiceRequest): + VolumeId: VolumeId + DryRun: Optional[Boolean] + + +class DeleteVpcBlockPublicAccessExclusionRequest(ServiceRequest): + DryRun: Optional[Boolean] + ExclusionId: VpcBlockPublicAccessExclusionId + + +class DeleteVpcBlockPublicAccessExclusionResult(TypedDict, total=False): + VpcBlockPublicAccessExclusion: Optional[VpcBlockPublicAccessExclusion] + + +class DeleteVpcEndpointConnectionNotificationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ConnectionNotificationIds: ConnectionNotificationIdsList + + +class DeleteVpcEndpointConnectionNotificationsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +VpcEndpointServiceIdList = List[VpcEndpointServiceId] + + +class DeleteVpcEndpointServiceConfigurationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceIds: VpcEndpointServiceIdList + + +class DeleteVpcEndpointServiceConfigurationsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class DeleteVpcEndpointsRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcEndpointIds: VpcEndpointIdList + + +class DeleteVpcEndpointsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class DeleteVpcPeeringConnectionRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcPeeringConnectionId: VpcPeeringConnectionId + + +class DeleteVpcPeeringConnectionResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DeleteVpcRequest(ServiceRequest): + VpcId: VpcId + DryRun: Optional[Boolean] + + +class DeleteVpnConnectionRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + DryRun: Optional[Boolean] + + +class DeleteVpnConnectionRouteRequest(ServiceRequest): + DestinationCidrBlock: String + VpnConnectionId: VpnConnectionId + + +class DeleteVpnGatewayRequest(ServiceRequest): + VpnGatewayId: VpnGatewayId + DryRun: Optional[Boolean] + + +class DeprovisionByoipCidrRequest(ServiceRequest): + Cidr: String + DryRun: Optional[Boolean] + + +class DeprovisionByoipCidrResult(TypedDict, total=False): + ByoipCidr: Optional[ByoipCidr] + + +class DeprovisionIpamByoasnRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + Asn: String + + +class DeprovisionIpamByoasnResult(TypedDict, total=False): + Byoasn: Optional[Byoasn] + + +class DeprovisionIpamPoolCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Cidr: Optional[String] + + +class IpamPoolCidrFailureReason(TypedDict, total=False): + Code: Optional[IpamPoolCidrFailureCode] + Message: Optional[String] + + +class IpamPoolCidr(TypedDict, total=False): + Cidr: Optional[String] + State: Optional[IpamPoolCidrState] + FailureReason: Optional[IpamPoolCidrFailureReason] + IpamPoolCidrId: Optional[IpamPoolCidrId] + NetmaskLength: Optional[Integer] + + +class DeprovisionIpamPoolCidrResult(TypedDict, total=False): + IpamPoolCidr: Optional[IpamPoolCidr] + + +class DeprovisionPublicIpv4PoolCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + PoolId: Ipv4PoolEc2Id + Cidr: String + + +DeprovisionedAddressSet = List[String] + + +class DeprovisionPublicIpv4PoolCidrResult(TypedDict, total=False): + PoolId: Optional[Ipv4PoolEc2Id] + DeprovisionedAddresses: Optional[DeprovisionedAddressSet] + + +class DeregisterImageRequest(ServiceRequest): + ImageId: ImageId + DeleteAssociatedSnapshots: Optional[Boolean] + DryRun: Optional[Boolean] + + +class DeregisterImageResult(TypedDict, total=False): + Return: Optional[Boolean] + DeleteSnapshotResults: Optional[DeleteSnapshotResultSet] + + +InstanceTagKeySet = List[String] + + +class DeregisterInstanceTagAttributeRequest(TypedDict, total=False): + IncludeAllTagsOfInstance: Optional[Boolean] + InstanceTagKeys: Optional[InstanceTagKeySet] + + +class DeregisterInstanceEventNotificationAttributesRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceTagAttribute: DeregisterInstanceTagAttributeRequest + + +class InstanceTagNotificationAttribute(TypedDict, total=False): + InstanceTagKeys: Optional[InstanceTagKeySet] + IncludeAllTagsOfInstance: Optional[Boolean] + + +class DeregisterInstanceEventNotificationAttributesResult(TypedDict, total=False): + InstanceTagAttribute: Optional[InstanceTagNotificationAttribute] + + +TransitGatewayNetworkInterfaceIdList = List[NetworkInterfaceId] + + +class DeregisterTransitGatewayMulticastGroupMembersRequest(ServiceRequest): + TransitGatewayMulticastDomainId: Optional[TransitGatewayMulticastDomainId] + GroupIpAddress: Optional[String] + NetworkInterfaceIds: Optional[TransitGatewayNetworkInterfaceIdList] + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastDeregisteredGroupMembers(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + DeregisteredNetworkInterfaceIds: Optional[ValueStringList] + GroupIpAddress: Optional[String] + + +class DeregisterTransitGatewayMulticastGroupMembersResult(TypedDict, total=False): + DeregisteredMulticastGroupMembers: Optional[TransitGatewayMulticastDeregisteredGroupMembers] + + +class DeregisterTransitGatewayMulticastGroupSourcesRequest(ServiceRequest): + TransitGatewayMulticastDomainId: Optional[TransitGatewayMulticastDomainId] + GroupIpAddress: Optional[String] + NetworkInterfaceIds: Optional[TransitGatewayNetworkInterfaceIdList] + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastDeregisteredGroupSources(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + DeregisteredNetworkInterfaceIds: Optional[ValueStringList] + GroupIpAddress: Optional[String] + + +class DeregisterTransitGatewayMulticastGroupSourcesResult(TypedDict, total=False): + DeregisteredMulticastGroupSources: Optional[TransitGatewayMulticastDeregisteredGroupSources] + + +class DescribeAccountAttributesRequest(ServiceRequest): + DryRun: Optional[Boolean] + AttributeNames: Optional[AccountAttributeNameStringList] + + +class DescribeAccountAttributesResult(TypedDict, total=False): + AccountAttributes: Optional[AccountAttributeList] + + +class DescribeAddressTransfersRequest(ServiceRequest): + AllocationIds: Optional[AllocationIdList] + NextToken: Optional[String] + MaxResults: Optional[DescribeAddressTransfersMaxResults] + DryRun: Optional[Boolean] + + +class DescribeAddressTransfersResult(TypedDict, total=False): + AddressTransfers: Optional[AddressTransferList] + NextToken: Optional[String] + + +class DescribeAddressesAttributeRequest(ServiceRequest): + AllocationIds: Optional[AllocationIds] + Attribute: Optional[AddressAttributeName] + NextToken: Optional[NextToken] + MaxResults: Optional[AddressMaxResults] + DryRun: Optional[Boolean] + + +class DescribeAddressesAttributeResult(TypedDict, total=False): + Addresses: Optional[AddressSet] + NextToken: Optional[NextToken] + + +class Filter(TypedDict, total=False): + Name: Optional[String] + Values: Optional[ValueStringList] + + +FilterList = List[Filter] +PublicIpStringList = List[String] + + +class DescribeAddressesRequest(ServiceRequest): + PublicIps: Optional[PublicIpStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + AllocationIds: Optional[AllocationIdList] + + +class DescribeAddressesResult(TypedDict, total=False): + Addresses: Optional[AddressList] + + +class DescribeAggregateIdFormatRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class IdFormat(TypedDict, total=False): + Deadline: Optional[DateTime] + Resource: Optional[String] + UseLongIds: Optional[Boolean] + + +IdFormatList = List[IdFormat] + + +class DescribeAggregateIdFormatResult(TypedDict, total=False): + UseLongIdsAggregated: Optional[Boolean] + Statuses: Optional[IdFormatList] + + +ZoneIdStringList = List[String] +ZoneNameStringList = List[String] + + +class DescribeAvailabilityZonesRequest(ServiceRequest): + ZoneNames: Optional[ZoneNameStringList] + ZoneIds: Optional[ZoneIdStringList] + AllAvailabilityZones: Optional[Boolean] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class DescribeAvailabilityZonesResult(TypedDict, total=False): + AvailabilityZones: Optional[AvailabilityZoneList] + + +class DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(ServiceRequest): + MaxResults: Optional[MaxResultsParam] + NextToken: Optional[String] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class Subscription(TypedDict, total=False): + Source: Optional[String] + Destination: Optional[String] + Metric: Optional[MetricType] + Statistic: Optional[StatisticType] + Period: Optional[PeriodType] + + +SubscriptionList = List[Subscription] + + +class DescribeAwsNetworkPerformanceMetricSubscriptionsResult(TypedDict, total=False): + NextToken: Optional[String] + Subscriptions: Optional[SubscriptionList] + + +class DescribeBundleTasksRequest(ServiceRequest): + BundleIds: Optional[BundleIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class DescribeBundleTasksResult(TypedDict, total=False): + BundleTasks: Optional[BundleTaskList] + + +class DescribeByoipCidrsRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: DescribeByoipCidrsMaxResults + NextToken: Optional[NextToken] + + +class DescribeByoipCidrsResult(TypedDict, total=False): + ByoipCidrs: Optional[ByoipCidrSet] + NextToken: Optional[String] + + +class DescribeCapacityBlockExtensionHistoryRequest(ServiceRequest): + CapacityReservationIds: Optional[CapacityReservationIdSet] + NextToken: Optional[String] + MaxResults: Optional[DescribeFutureCapacityMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class DescribeCapacityBlockExtensionHistoryResult(TypedDict, total=False): + CapacityBlockExtensions: Optional[CapacityBlockExtensionSet] + NextToken: Optional[String] + + +class DescribeCapacityBlockExtensionOfferingsRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityBlockExtensionDurationHours: Integer + CapacityReservationId: CapacityReservationId + NextToken: Optional[String] + MaxResults: Optional[DescribeCapacityBlockExtensionOfferingsMaxResults] + + +class DescribeCapacityBlockExtensionOfferingsResult(TypedDict, total=False): + CapacityBlockExtensionOfferings: Optional[CapacityBlockExtensionOfferingSet] + NextToken: Optional[String] + + +class DescribeCapacityBlockOfferingsRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceType: Optional[String] + InstanceCount: Optional[Integer] + StartDateRange: Optional[MillisecondDateTime] + EndDateRange: Optional[MillisecondDateTime] + CapacityDurationHours: Integer + NextToken: Optional[String] + MaxResults: Optional[DescribeCapacityBlockOfferingsMaxResults] + + +class DescribeCapacityBlockOfferingsResult(TypedDict, total=False): + CapacityBlockOfferings: Optional[CapacityBlockOfferingSet] + NextToken: Optional[String] + + +class DescribeCapacityReservationBillingRequestsRequest(ServiceRequest): + CapacityReservationIds: Optional[CapacityReservationIdSet] + Role: CallerRole + NextToken: Optional[String] + MaxResults: Optional[DescribeCapacityReservationBillingRequestsRequestMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class DescribeCapacityReservationBillingRequestsResult(TypedDict, total=False): + NextToken: Optional[String] + CapacityReservationBillingRequests: Optional[CapacityReservationBillingRequestSet] + + +class DescribeCapacityReservationFleetsRequest(ServiceRequest): + CapacityReservationFleetIds: Optional[CapacityReservationFleetIdSet] + NextToken: Optional[String] + MaxResults: Optional[DescribeCapacityReservationFleetsMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class DescribeCapacityReservationFleetsResult(TypedDict, total=False): + CapacityReservationFleets: Optional[CapacityReservationFleetSet] + NextToken: Optional[String] + + +class DescribeCapacityReservationsRequest(ServiceRequest): + CapacityReservationIds: Optional[CapacityReservationIdSet] + NextToken: Optional[String] + MaxResults: Optional[DescribeCapacityReservationsMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class DescribeCapacityReservationsResult(TypedDict, total=False): + NextToken: Optional[String] + CapacityReservations: Optional[CapacityReservationSet] + + +class DescribeCarrierGatewaysRequest(ServiceRequest): + CarrierGatewayIds: Optional[CarrierGatewayIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[CarrierGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class DescribeCarrierGatewaysResult(TypedDict, total=False): + CarrierGateways: Optional[CarrierGatewaySet] + NextToken: Optional[String] + + +InstanceIdStringList = List[InstanceId] + + +class DescribeClassicLinkInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceIds: Optional[InstanceIdStringList] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeClassicLinkInstancesMaxResults] + + +class DescribeClassicLinkInstancesResult(TypedDict, total=False): + Instances: Optional[ClassicLinkInstanceList] + NextToken: Optional[String] + + +class DescribeClientVpnAuthorizationRulesRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + DryRun: Optional[Boolean] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeClientVpnAuthorizationRulesMaxResults] + + +class DescribeClientVpnAuthorizationRulesResult(TypedDict, total=False): + AuthorizationRules: Optional[AuthorizationRuleSet] + NextToken: Optional[NextToken] + + +class DescribeClientVpnConnectionsRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[DescribeClientVpnConnectionsMaxResults] + DryRun: Optional[Boolean] + + +class DescribeClientVpnConnectionsResult(TypedDict, total=False): + Connections: Optional[ClientVpnConnectionSet] + NextToken: Optional[NextToken] + + +class DescribeClientVpnEndpointsRequest(ServiceRequest): + ClientVpnEndpointIds: Optional[ClientVpnEndpointIdList] + MaxResults: Optional[DescribeClientVpnEndpointMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +EndpointSet = List[ClientVpnEndpoint] + + +class DescribeClientVpnEndpointsResult(TypedDict, total=False): + ClientVpnEndpoints: Optional[EndpointSet] + NextToken: Optional[NextToken] + + +class DescribeClientVpnRoutesRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + Filters: Optional[FilterList] + MaxResults: Optional[DescribeClientVpnRoutesMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class DescribeClientVpnRoutesResult(TypedDict, total=False): + Routes: Optional[ClientVpnRouteSet] + NextToken: Optional[NextToken] + + +class DescribeClientVpnTargetNetworksRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + AssociationIds: Optional[ValueStringList] + MaxResults: Optional[DescribeClientVpnTargetNetworksMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class TargetNetwork(TypedDict, total=False): + AssociationId: Optional[String] + VpcId: Optional[String] + TargetNetworkId: Optional[String] + ClientVpnEndpointId: Optional[String] + Status: Optional[AssociationStatus] + SecurityGroups: Optional[ValueStringList] + + +TargetNetworkSet = List[TargetNetwork] + + +class DescribeClientVpnTargetNetworksResult(TypedDict, total=False): + ClientVpnTargetNetworks: Optional[TargetNetworkSet] + NextToken: Optional[NextToken] + + +class DescribeCoipPoolsRequest(ServiceRequest): + PoolIds: Optional[CoipPoolIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[CoipPoolMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class DescribeCoipPoolsResult(TypedDict, total=False): + CoipPools: Optional[CoipPoolSet] + NextToken: Optional[String] + + +DescribeConversionTaskList = List[ConversionTask] + + +class DescribeConversionTasksRequest(ServiceRequest): + DryRun: Optional[Boolean] + ConversionTaskIds: Optional[ConversionIdStringList] + + +class DescribeConversionTasksResult(TypedDict, total=False): + ConversionTasks: Optional[DescribeConversionTaskList] + + +class DescribeCustomerGatewaysRequest(ServiceRequest): + CustomerGatewayIds: Optional[CustomerGatewayIdStringList] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class DescribeCustomerGatewaysResult(TypedDict, total=False): + CustomerGateways: Optional[CustomerGatewayList] + + +class DescribeDeclarativePoliciesReportsRequest(ServiceRequest): + DryRun: Optional[Boolean] + NextToken: Optional[String] + MaxResults: Optional[DeclarativePoliciesMaxResults] + ReportIds: Optional[ValueStringList] + + +class DescribeDeclarativePoliciesReportsResult(TypedDict, total=False): + NextToken: Optional[String] + Reports: Optional[DeclarativePoliciesReportList] + + +DhcpOptionsIdStringList = List[DhcpOptionsId] + + +class DescribeDhcpOptionsRequest(ServiceRequest): + DhcpOptionsIds: Optional[DhcpOptionsIdStringList] + NextToken: Optional[String] + MaxResults: Optional[DescribeDhcpOptionsMaxResults] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +DhcpOptionsList = List[DhcpOptions] + + +class DescribeDhcpOptionsResult(TypedDict, total=False): + NextToken: Optional[String] + DhcpOptions: Optional[DhcpOptionsList] + + +EgressOnlyInternetGatewayIdList = List[EgressOnlyInternetGatewayId] + + +class DescribeEgressOnlyInternetGatewaysRequest(ServiceRequest): + DryRun: Optional[Boolean] + EgressOnlyInternetGatewayIds: Optional[EgressOnlyInternetGatewayIdList] + MaxResults: Optional[DescribeEgressOnlyInternetGatewaysMaxResults] + NextToken: Optional[String] + Filters: Optional[FilterList] + + +EgressOnlyInternetGatewayList = List[EgressOnlyInternetGateway] + + +class DescribeEgressOnlyInternetGatewaysResult(TypedDict, total=False): + EgressOnlyInternetGateways: Optional[EgressOnlyInternetGatewayList] + NextToken: Optional[String] + + +ElasticGpuIdSet = List[ElasticGpuId] + + +class DescribeElasticGpusRequest(ServiceRequest): + ElasticGpuIds: Optional[ElasticGpuIdSet] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeElasticGpusMaxResults] + NextToken: Optional[String] + + +class ElasticGpuHealth(TypedDict, total=False): + Status: Optional[ElasticGpuStatus] + + +class ElasticGpus(TypedDict, total=False): + ElasticGpuId: Optional[String] + AvailabilityZone: Optional[String] + ElasticGpuType: Optional[String] + ElasticGpuHealth: Optional[ElasticGpuHealth] + ElasticGpuState: Optional[ElasticGpuState] + InstanceId: Optional[String] + Tags: Optional[TagList] + + +ElasticGpuSet = List[ElasticGpus] + + +class DescribeElasticGpusResult(TypedDict, total=False): + ElasticGpuSet: Optional[ElasticGpuSet] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +ExportImageTaskIdList = List[ExportImageTaskId] + + +class DescribeExportImageTasksRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + ExportImageTaskIds: Optional[ExportImageTaskIdList] + MaxResults: Optional[DescribeExportImageTasksMaxResults] + NextToken: Optional[NextToken] + + +class ExportTaskS3Location(TypedDict, total=False): + S3Bucket: Optional[String] + S3Prefix: Optional[String] + + +class ExportImageTask(TypedDict, total=False): + Description: Optional[String] + ExportImageTaskId: Optional[String] + ImageId: Optional[String] + Progress: Optional[String] + S3ExportLocation: Optional[ExportTaskS3Location] + Status: Optional[String] + StatusMessage: Optional[String] + Tags: Optional[TagList] + + +ExportImageTaskList = List[ExportImageTask] + + +class DescribeExportImageTasksResult(TypedDict, total=False): + ExportImageTasks: Optional[ExportImageTaskList] + NextToken: Optional[NextToken] + + +ExportTaskIdStringList = List[ExportTaskId] + + +class DescribeExportTasksRequest(ServiceRequest): + Filters: Optional[FilterList] + ExportTaskIds: Optional[ExportTaskIdStringList] + + +ExportTaskList = List[ExportTask] + + +class DescribeExportTasksResult(TypedDict, total=False): + ExportTasks: Optional[ExportTaskList] + + +FastLaunchImageIdList = List[ImageId] + + +class DescribeFastLaunchImagesRequest(ServiceRequest): + ImageIds: Optional[FastLaunchImageIdList] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeFastLaunchImagesRequestMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class FastLaunchLaunchTemplateSpecificationResponse(TypedDict, total=False): + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[String] + Version: Optional[String] + + +class FastLaunchSnapshotConfigurationResponse(TypedDict, total=False): + TargetResourceCount: Optional[Integer] + + +class DescribeFastLaunchImagesSuccessItem(TypedDict, total=False): + ImageId: Optional[ImageId] + ResourceType: Optional[FastLaunchResourceType] + SnapshotConfiguration: Optional[FastLaunchSnapshotConfigurationResponse] + LaunchTemplate: Optional[FastLaunchLaunchTemplateSpecificationResponse] + MaxParallelLaunches: Optional[Integer] + OwnerId: Optional[String] + State: Optional[FastLaunchStateCode] + StateTransitionReason: Optional[String] + StateTransitionTime: Optional[MillisecondDateTime] + + +DescribeFastLaunchImagesSuccessSet = List[DescribeFastLaunchImagesSuccessItem] + + +class DescribeFastLaunchImagesResult(TypedDict, total=False): + FastLaunchImages: Optional[DescribeFastLaunchImagesSuccessSet] + NextToken: Optional[NextToken] + + +class DescribeFastSnapshotRestoreSuccessItem(TypedDict, total=False): + SnapshotId: Optional[String] + AvailabilityZone: Optional[String] + State: Optional[FastSnapshotRestoreStateCode] + StateTransitionReason: Optional[String] + OwnerId: Optional[String] + OwnerAlias: Optional[String] + EnablingTime: Optional[MillisecondDateTime] + OptimizingTime: Optional[MillisecondDateTime] + EnabledTime: Optional[MillisecondDateTime] + DisablingTime: Optional[MillisecondDateTime] + DisabledTime: Optional[MillisecondDateTime] + + +DescribeFastSnapshotRestoreSuccessSet = List[DescribeFastSnapshotRestoreSuccessItem] + + +class DescribeFastSnapshotRestoresRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[DescribeFastSnapshotRestoresMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class DescribeFastSnapshotRestoresResult(TypedDict, total=False): + FastSnapshotRestores: Optional[DescribeFastSnapshotRestoreSuccessSet] + NextToken: Optional[NextToken] + + +class DescribeFleetError(TypedDict, total=False): + LaunchTemplateAndOverrides: Optional[LaunchTemplateAndOverridesResponse] + Lifecycle: Optional[InstanceLifecycle] + ErrorCode: Optional[String] + ErrorMessage: Optional[String] + + +class DescribeFleetHistoryRequest(ServiceRequest): + DryRun: Optional[Boolean] + EventType: Optional[FleetEventType] + MaxResults: Optional[Integer] + NextToken: Optional[String] + FleetId: FleetId + StartTime: DateTime + + +class EventInformation(TypedDict, total=False): + EventDescription: Optional[String] + EventSubType: Optional[String] + InstanceId: Optional[String] + + +class HistoryRecordEntry(TypedDict, total=False): + EventInformation: Optional[EventInformation] + EventType: Optional[FleetEventType] + Timestamp: Optional[DateTime] + + +HistoryRecordSet = List[HistoryRecordEntry] + + +class DescribeFleetHistoryResult(TypedDict, total=False): + HistoryRecords: Optional[HistoryRecordSet] + LastEvaluatedTime: Optional[DateTime] + NextToken: Optional[String] + FleetId: Optional[FleetId] + StartTime: Optional[DateTime] + + +class DescribeFleetInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: Optional[Integer] + NextToken: Optional[String] + FleetId: FleetId + Filters: Optional[FilterList] + + +class DescribeFleetInstancesResult(TypedDict, total=False): + ActiveInstances: Optional[ActiveInstanceSet] + NextToken: Optional[String] + FleetId: Optional[FleetId] + + +DescribeFleetsErrorSet = List[DescribeFleetError] + + +class DescribeFleetsInstances(TypedDict, total=False): + LaunchTemplateAndOverrides: Optional[LaunchTemplateAndOverridesResponse] + Lifecycle: Optional[InstanceLifecycle] + InstanceIds: Optional[InstanceIdsSet] + InstanceType: Optional[InstanceType] + Platform: Optional[PlatformValues] + + +DescribeFleetsInstancesSet = List[DescribeFleetsInstances] + + +class DescribeFleetsRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: Optional[Integer] + NextToken: Optional[String] + FleetIds: Optional[FleetIdSet] + Filters: Optional[FilterList] + + +class OnDemandOptions(TypedDict, total=False): + AllocationStrategy: Optional[FleetOnDemandAllocationStrategy] + CapacityReservationOptions: Optional[CapacityReservationOptions] + SingleInstanceType: Optional[Boolean] + SingleAvailabilityZone: Optional[Boolean] + MinTargetCapacity: Optional[Integer] + MaxTotalPrice: Optional[String] + + +class FleetSpotCapacityRebalance(TypedDict, total=False): + ReplacementStrategy: Optional[FleetReplacementStrategy] + TerminationDelay: Optional[Integer] + + +class FleetSpotMaintenanceStrategies(TypedDict, total=False): + CapacityRebalance: Optional[FleetSpotCapacityRebalance] + + +class SpotOptions(TypedDict, total=False): + AllocationStrategy: Optional[SpotAllocationStrategy] + MaintenanceStrategies: Optional[FleetSpotMaintenanceStrategies] + InstanceInterruptionBehavior: Optional[SpotInstanceInterruptionBehavior] + InstancePoolsToUseCount: Optional[Integer] + SingleInstanceType: Optional[Boolean] + SingleAvailabilityZone: Optional[Boolean] + MinTargetCapacity: Optional[Integer] + MaxTotalPrice: Optional[String] + + +class TargetCapacitySpecification(TypedDict, total=False): + TotalTargetCapacity: Optional[Integer] + OnDemandTargetCapacity: Optional[Integer] + SpotTargetCapacity: Optional[Integer] + DefaultTargetCapacityType: Optional[DefaultTargetCapacityType] + TargetCapacityUnitType: Optional[TargetCapacityUnitType] + + +FleetLaunchTemplateOverridesList = List[FleetLaunchTemplateOverrides] + + +class FleetLaunchTemplateConfig(TypedDict, total=False): + LaunchTemplateSpecification: Optional[FleetLaunchTemplateSpecification] + Overrides: Optional[FleetLaunchTemplateOverridesList] + + +FleetLaunchTemplateConfigList = List[FleetLaunchTemplateConfig] + + +class FleetData(TypedDict, total=False): + ActivityStatus: Optional[FleetActivityStatus] + CreateTime: Optional[DateTime] + FleetId: Optional[FleetId] + FleetState: Optional[FleetStateCode] + ClientToken: Optional[String] + ExcessCapacityTerminationPolicy: Optional[FleetExcessCapacityTerminationPolicy] + FulfilledCapacity: Optional[Double] + FulfilledOnDemandCapacity: Optional[Double] + LaunchTemplateConfigs: Optional[FleetLaunchTemplateConfigList] + TargetCapacitySpecification: Optional[TargetCapacitySpecification] + TerminateInstancesWithExpiration: Optional[Boolean] + Type: Optional[FleetType] + ValidFrom: Optional[DateTime] + ValidUntil: Optional[DateTime] + ReplaceUnhealthyInstances: Optional[Boolean] + SpotOptions: Optional[SpotOptions] + OnDemandOptions: Optional[OnDemandOptions] + Tags: Optional[TagList] + Errors: Optional[DescribeFleetsErrorSet] + Instances: Optional[DescribeFleetsInstancesSet] + Context: Optional[String] + + +FleetSet = List[FleetData] + + +class DescribeFleetsResult(TypedDict, total=False): + NextToken: Optional[String] + Fleets: Optional[FleetSet] + + +class DescribeFlowLogsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filter: Optional[FilterList] + FlowLogIds: Optional[FlowLogIdList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class DestinationOptionsResponse(TypedDict, total=False): + FileFormat: Optional[DestinationFileFormat] + HiveCompatiblePartitions: Optional[Boolean] + PerHourPartition: Optional[Boolean] + + +class FlowLog(TypedDict, total=False): + CreationTime: Optional[MillisecondDateTime] + DeliverLogsErrorMessage: Optional[String] + DeliverLogsPermissionArn: Optional[String] + DeliverCrossAccountRole: Optional[String] + DeliverLogsStatus: Optional[String] + FlowLogId: Optional[String] + FlowLogStatus: Optional[String] + LogGroupName: Optional[String] + ResourceId: Optional[String] + TrafficType: Optional[TrafficType] + LogDestinationType: Optional[LogDestinationType] + LogDestination: Optional[String] + LogFormat: Optional[String] + Tags: Optional[TagList] + MaxAggregationInterval: Optional[Integer] + DestinationOptions: Optional[DestinationOptionsResponse] + + +FlowLogSet = List[FlowLog] + + +class DescribeFlowLogsResult(TypedDict, total=False): + FlowLogs: Optional[FlowLogSet] + NextToken: Optional[String] + + +class DescribeFpgaImageAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + FpgaImageId: FpgaImageId + Attribute: FpgaImageAttributeName + + +class ProductCode(TypedDict, total=False): + ProductCodeId: Optional[String] + ProductCodeType: Optional[ProductCodeValues] + + +ProductCodeList = List[ProductCode] + + +class LoadPermission(TypedDict, total=False): + UserId: Optional[String] + Group: Optional[PermissionGroup] + + +LoadPermissionList = List[LoadPermission] + + +class FpgaImageAttribute(TypedDict, total=False): + FpgaImageId: Optional[String] + Name: Optional[String] + Description: Optional[String] + LoadPermissions: Optional[LoadPermissionList] + ProductCodes: Optional[ProductCodeList] + + +class DescribeFpgaImageAttributeResult(TypedDict, total=False): + FpgaImageAttribute: Optional[FpgaImageAttribute] + + +OwnerStringList = List[String] +FpgaImageIdList = List[FpgaImageId] + + +class DescribeFpgaImagesRequest(ServiceRequest): + DryRun: Optional[Boolean] + FpgaImageIds: Optional[FpgaImageIdList] + Owners: Optional[OwnerStringList] + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[DescribeFpgaImagesMaxResults] + + +InstanceTypesList = List[String] + + +class FpgaImageState(TypedDict, total=False): + Code: Optional[FpgaImageStateCode] + Message: Optional[String] + + +class PciId(TypedDict, total=False): + DeviceId: Optional[String] + VendorId: Optional[String] + SubsystemId: Optional[String] + SubsystemVendorId: Optional[String] + + +class FpgaImage(TypedDict, total=False): + FpgaImageId: Optional[String] + FpgaImageGlobalId: Optional[String] + Name: Optional[String] + Description: Optional[String] + ShellVersion: Optional[String] + PciId: Optional[PciId] + State: Optional[FpgaImageState] + CreateTime: Optional[DateTime] + UpdateTime: Optional[DateTime] + OwnerId: Optional[String] + OwnerAlias: Optional[String] + ProductCodes: Optional[ProductCodeList] + Tags: Optional[TagList] + Public: Optional[Boolean] + DataRetentionSupport: Optional[Boolean] + InstanceTypes: Optional[InstanceTypesList] + + +FpgaImageList = List[FpgaImage] + + +class DescribeFpgaImagesResult(TypedDict, total=False): + FpgaImages: Optional[FpgaImageList] + NextToken: Optional[NextToken] + + +class DescribeHostReservationOfferingsRequest(ServiceRequest): + Filter: Optional[FilterList] + MaxDuration: Optional[Integer] + MaxResults: Optional[DescribeHostReservationsMaxResults] + MinDuration: Optional[Integer] + NextToken: Optional[String] + OfferingId: Optional[OfferingId] + + +class HostOffering(TypedDict, total=False): + CurrencyCode: Optional[CurrencyCodeValues] + Duration: Optional[Integer] + HourlyPrice: Optional[String] + InstanceFamily: Optional[String] + OfferingId: Optional[OfferingId] + PaymentOption: Optional[PaymentOption] + UpfrontPrice: Optional[String] + + +HostOfferingSet = List[HostOffering] + + +class DescribeHostReservationOfferingsResult(TypedDict, total=False): + NextToken: Optional[String] + OfferingSet: Optional[HostOfferingSet] + + +HostReservationIdSet = List[HostReservationId] + + +class DescribeHostReservationsRequest(ServiceRequest): + Filter: Optional[FilterList] + HostReservationIdSet: Optional[HostReservationIdSet] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +ResponseHostIdSet = List[String] + + +class HostReservation(TypedDict, total=False): + Count: Optional[Integer] + CurrencyCode: Optional[CurrencyCodeValues] + Duration: Optional[Integer] + End: Optional[DateTime] + HostIdSet: Optional[ResponseHostIdSet] + HostReservationId: Optional[HostReservationId] + HourlyPrice: Optional[String] + InstanceFamily: Optional[String] + OfferingId: Optional[OfferingId] + PaymentOption: Optional[PaymentOption] + Start: Optional[DateTime] + State: Optional[ReservationState] + UpfrontPrice: Optional[String] + Tags: Optional[TagList] + + +HostReservationSet = List[HostReservation] + + +class DescribeHostReservationsResult(TypedDict, total=False): + HostReservationSet: Optional[HostReservationSet] + NextToken: Optional[String] + + +RequestHostIdList = List[DedicatedHostId] + + +class DescribeHostsRequest(ServiceRequest): + HostIds: Optional[RequestHostIdList] + NextToken: Optional[String] + MaxResults: Optional[Integer] + Filter: Optional[FilterList] + + +class HostInstance(TypedDict, total=False): + InstanceId: Optional[String] + InstanceType: Optional[String] + OwnerId: Optional[String] + + +HostInstanceList = List[HostInstance] + + +class HostProperties(TypedDict, total=False): + Cores: Optional[Integer] + InstanceType: Optional[String] + InstanceFamily: Optional[String] + Sockets: Optional[Integer] + TotalVCpus: Optional[Integer] + + +class Host(TypedDict, total=False): + AutoPlacement: Optional[AutoPlacement] + AvailabilityZone: Optional[String] + AvailableCapacity: Optional[AvailableCapacity] + ClientToken: Optional[String] + HostId: Optional[String] + HostProperties: Optional[HostProperties] + HostReservationId: Optional[String] + Instances: Optional[HostInstanceList] + State: Optional[AllocationState] + AllocationTime: Optional[DateTime] + ReleaseTime: Optional[DateTime] + Tags: Optional[TagList] + HostRecovery: Optional[HostRecovery] + AllowsMultipleInstanceTypes: Optional[AllowsMultipleInstanceTypes] + OwnerId: Optional[String] + AvailabilityZoneId: Optional[String] + MemberOfServiceLinkedResourceGroup: Optional[Boolean] + OutpostArn: Optional[String] + HostMaintenance: Optional[HostMaintenance] + AssetId: Optional[AssetId] + + +HostList = List[Host] + + +class DescribeHostsResult(TypedDict, total=False): + Hosts: Optional[HostList] + NextToken: Optional[String] + + +class DescribeIamInstanceProfileAssociationsRequest(ServiceRequest): + AssociationIds: Optional[AssociationIdList] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeIamInstanceProfileAssociationsMaxResults] + NextToken: Optional[NextToken] + + +IamInstanceProfileAssociationSet = List[IamInstanceProfileAssociation] + + +class DescribeIamInstanceProfileAssociationsResult(TypedDict, total=False): + IamInstanceProfileAssociations: Optional[IamInstanceProfileAssociationSet] + NextToken: Optional[NextToken] + + +class DescribeIdFormatRequest(ServiceRequest): + Resource: Optional[String] + + +class DescribeIdFormatResult(TypedDict, total=False): + Statuses: Optional[IdFormatList] + + +class DescribeIdentityIdFormatRequest(ServiceRequest): + Resource: Optional[String] + PrincipalArn: String + + +class DescribeIdentityIdFormatResult(TypedDict, total=False): + Statuses: Optional[IdFormatList] + + +class DescribeImageAttributeRequest(ServiceRequest): + Attribute: ImageAttributeName + ImageId: ImageId + DryRun: Optional[Boolean] + + +ImageIdStringList = List[ImageId] +ExecutableByStringList = List[String] + + +class DescribeImagesRequest(ServiceRequest): + ExecutableUsers: Optional[ExecutableByStringList] + ImageIds: Optional[ImageIdStringList] + Owners: Optional[OwnerStringList] + IncludeDeprecated: Optional[Boolean] + IncludeDisabled: Optional[Boolean] + MaxResults: Optional[Integer] + NextToken: Optional[String] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class Image(TypedDict, total=False): + PlatformDetails: Optional[String] + UsageOperation: Optional[String] + BlockDeviceMappings: Optional[BlockDeviceMappingList] + Description: Optional[String] + EnaSupport: Optional[Boolean] + Hypervisor: Optional[HypervisorType] + ImageOwnerAlias: Optional[String] + Name: Optional[String] + RootDeviceName: Optional[String] + RootDeviceType: Optional[DeviceType] + SriovNetSupport: Optional[String] + StateReason: Optional[StateReason] + Tags: Optional[TagList] + VirtualizationType: Optional[VirtualizationType] + BootMode: Optional[BootModeValues] + TpmSupport: Optional[TpmSupportValues] + DeprecationTime: Optional[String] + ImdsSupport: Optional[ImdsSupportValues] + SourceInstanceId: Optional[String] + DeregistrationProtection: Optional[String] + LastLaunchedTime: Optional[String] + ImageAllowed: Optional[Boolean] + SourceImageId: Optional[String] + SourceImageRegion: Optional[String] + ImageId: Optional[String] + ImageLocation: Optional[String] + State: Optional[ImageState] + OwnerId: Optional[String] + CreationDate: Optional[String] + Public: Optional[Boolean] + ProductCodes: Optional[ProductCodeList] + Architecture: Optional[ArchitectureValues] + ImageType: Optional[ImageTypeValues] + KernelId: Optional[String] + RamdiskId: Optional[String] + Platform: Optional[PlatformValues] + + +ImageList = List[Image] + + +class DescribeImagesResult(TypedDict, total=False): + NextToken: Optional[String] + Images: Optional[ImageList] + + +ImportTaskIdList = List[ImportImageTaskId] + + +class DescribeImportImageTasksRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + ImportTaskIds: Optional[ImportTaskIdList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class ImportImageLicenseConfigurationResponse(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +ImportImageLicenseSpecificationListResponse = List[ImportImageLicenseConfigurationResponse] + + +class UserBucketDetails(TypedDict, total=False): + S3Bucket: Optional[String] + S3Key: Optional[String] + + +class SnapshotDetail(TypedDict, total=False): + Description: Optional[String] + DeviceName: Optional[String] + DiskImageSize: Optional[Double] + Format: Optional[String] + Progress: Optional[String] + SnapshotId: Optional[String] + Status: Optional[String] + StatusMessage: Optional[String] + Url: Optional[SensitiveUrl] + UserBucket: Optional[UserBucketDetails] + + +SnapshotDetailList = List[SnapshotDetail] + + +class ImportImageTask(TypedDict, total=False): + Architecture: Optional[String] + Description: Optional[String] + Encrypted: Optional[Boolean] + Hypervisor: Optional[String] + ImageId: Optional[String] + ImportTaskId: Optional[String] + KmsKeyId: Optional[String] + LicenseType: Optional[String] + Platform: Optional[String] + Progress: Optional[String] + SnapshotDetails: Optional[SnapshotDetailList] + Status: Optional[String] + StatusMessage: Optional[String] + Tags: Optional[TagList] + LicenseSpecifications: Optional[ImportImageLicenseSpecificationListResponse] + UsageOperation: Optional[String] + BootMode: Optional[BootModeValues] + + +ImportImageTaskList = List[ImportImageTask] + + +class DescribeImportImageTasksResult(TypedDict, total=False): + ImportImageTasks: Optional[ImportImageTaskList] + NextToken: Optional[String] + + +ImportSnapshotTaskIdList = List[ImportSnapshotTaskId] + + +class DescribeImportSnapshotTasksRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + ImportTaskIds: Optional[ImportSnapshotTaskIdList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class SnapshotTaskDetail(TypedDict, total=False): + Description: Optional[String] + DiskImageSize: Optional[Double] + Encrypted: Optional[Boolean] + Format: Optional[String] + KmsKeyId: Optional[String] + Progress: Optional[String] + SnapshotId: Optional[String] + Status: Optional[String] + StatusMessage: Optional[String] + Url: Optional[SensitiveUrl] + UserBucket: Optional[UserBucketDetails] + + +class ImportSnapshotTask(TypedDict, total=False): + Description: Optional[String] + ImportTaskId: Optional[String] + SnapshotTaskDetail: Optional[SnapshotTaskDetail] + Tags: Optional[TagList] + + +ImportSnapshotTaskList = List[ImportSnapshotTask] + + +class DescribeImportSnapshotTasksResult(TypedDict, total=False): + ImportSnapshotTasks: Optional[ImportSnapshotTaskList] + NextToken: Optional[String] + + +class DescribeInstanceAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + Attribute: InstanceAttributeName + + +class DescribeInstanceConnectEndpointsRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: Optional[InstanceConnectEndpointMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + InstanceConnectEndpointIds: Optional[ValueStringList] + + +InstanceConnectEndpointSet = List[Ec2InstanceConnectEndpoint] + + +class DescribeInstanceConnectEndpointsResult(TypedDict, total=False): + InstanceConnectEndpoints: Optional[InstanceConnectEndpointSet] + NextToken: Optional[NextToken] + + +class DescribeInstanceCreditSpecificationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + InstanceIds: Optional[InstanceIdStringList] + MaxResults: Optional[DescribeInstanceCreditSpecificationsMaxResults] + NextToken: Optional[String] + + +class InstanceCreditSpecification(TypedDict, total=False): + InstanceId: Optional[String] + CpuCredits: Optional[String] + + +InstanceCreditSpecificationList = List[InstanceCreditSpecification] + + +class DescribeInstanceCreditSpecificationsResult(TypedDict, total=False): + InstanceCreditSpecifications: Optional[InstanceCreditSpecificationList] + NextToken: Optional[String] + + +class DescribeInstanceEventNotificationAttributesRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DescribeInstanceEventNotificationAttributesResult(TypedDict, total=False): + InstanceTagAttribute: Optional[InstanceTagNotificationAttribute] + + +InstanceEventWindowIdSet = List[InstanceEventWindowId] + + +class DescribeInstanceEventWindowsRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceEventWindowIds: Optional[InstanceEventWindowIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[ResultRange] + NextToken: Optional[String] + + +InstanceEventWindowSet = List[InstanceEventWindow] + + +class DescribeInstanceEventWindowsResult(TypedDict, total=False): + InstanceEventWindows: Optional[InstanceEventWindowSet] + NextToken: Optional[String] + + +class DescribeInstanceImageMetadataRequest(ServiceRequest): + Filters: Optional[FilterList] + InstanceIds: Optional[InstanceIdStringList] + MaxResults: Optional[DescribeInstanceImageMetadataMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class ImageMetadata(TypedDict, total=False): + ImageId: Optional[ImageId] + Name: Optional[String] + OwnerId: Optional[String] + State: Optional[ImageState] + ImageOwnerAlias: Optional[String] + CreationDate: Optional[String] + DeprecationTime: Optional[String] + ImageAllowed: Optional[Boolean] + IsPublic: Optional[Boolean] + + +class InstanceState(TypedDict, total=False): + Code: Optional[Integer] + Name: Optional[InstanceStateName] + + +class InstanceImageMetadata(TypedDict, total=False): + InstanceId: Optional[InstanceId] + InstanceType: Optional[InstanceType] + LaunchTime: Optional[MillisecondDateTime] + AvailabilityZone: Optional[String] + ZoneId: Optional[String] + State: Optional[InstanceState] + OwnerId: Optional[String] + Tags: Optional[TagList] + ImageMetadata: Optional[ImageMetadata] + Operator: Optional[OperatorResponse] + + +InstanceImageMetadataList = List[InstanceImageMetadata] + + +class DescribeInstanceImageMetadataResult(TypedDict, total=False): + InstanceImageMetadata: Optional[InstanceImageMetadataList] + NextToken: Optional[String] + + +class DescribeInstanceStatusRequest(ServiceRequest): + InstanceIds: Optional[InstanceIdStringList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + IncludeAllInstances: Optional[Boolean] + + +class EbsStatusDetails(TypedDict, total=False): + ImpairedSince: Optional[MillisecondDateTime] + Name: Optional[StatusName] + Status: Optional[StatusType] + + +EbsStatusDetailsList = List[EbsStatusDetails] + + +class EbsStatusSummary(TypedDict, total=False): + Details: Optional[EbsStatusDetailsList] + Status: Optional[SummaryStatus] + + +class InstanceStatusDetails(TypedDict, total=False): + ImpairedSince: Optional[DateTime] + Name: Optional[StatusName] + Status: Optional[StatusType] + + +InstanceStatusDetailsList = List[InstanceStatusDetails] + + +class InstanceStatusSummary(TypedDict, total=False): + Details: Optional[InstanceStatusDetailsList] + Status: Optional[SummaryStatus] + + +class InstanceStatusEvent(TypedDict, total=False): + InstanceEventId: Optional[InstanceEventId] + Code: Optional[EventCode] + Description: Optional[String] + NotAfter: Optional[DateTime] + NotBefore: Optional[DateTime] + NotBeforeDeadline: Optional[DateTime] + + +InstanceStatusEventList = List[InstanceStatusEvent] + + +class InstanceStatus(TypedDict, total=False): + AvailabilityZone: Optional[String] + OutpostArn: Optional[String] + Operator: Optional[OperatorResponse] + Events: Optional[InstanceStatusEventList] + InstanceId: Optional[String] + InstanceState: Optional[InstanceState] + InstanceStatus: Optional[InstanceStatusSummary] + SystemStatus: Optional[InstanceStatusSummary] + AttachedEbsStatus: Optional[EbsStatusSummary] + + +InstanceStatusList = List[InstanceStatus] + + +class DescribeInstanceStatusResult(TypedDict, total=False): + InstanceStatuses: Optional[InstanceStatusList] + NextToken: Optional[String] + + +DescribeInstanceTopologyGroupNameSet = List[PlacementGroupName] +DescribeInstanceTopologyInstanceIdSet = List[InstanceId] + + +class DescribeInstanceTopologyRequest(ServiceRequest): + DryRun: Optional[Boolean] + NextToken: Optional[String] + MaxResults: Optional[DescribeInstanceTopologyMaxResults] + InstanceIds: Optional[DescribeInstanceTopologyInstanceIdSet] + GroupNames: Optional[DescribeInstanceTopologyGroupNameSet] + Filters: Optional[FilterList] + + +NetworkNodesList = List[String] + + +class InstanceTopology(TypedDict, total=False): + InstanceId: Optional[String] + InstanceType: Optional[String] + GroupName: Optional[String] + NetworkNodes: Optional[NetworkNodesList] + AvailabilityZone: Optional[String] + ZoneId: Optional[String] + + +InstanceSet = List[InstanceTopology] + + +class DescribeInstanceTopologyResult(TypedDict, total=False): + Instances: Optional[InstanceSet] + NextToken: Optional[String] + + +class DescribeInstanceTypeOfferingsRequest(ServiceRequest): + DryRun: Optional[Boolean] + LocationType: Optional[LocationType] + Filters: Optional[FilterList] + MaxResults: Optional[DITOMaxResults] + NextToken: Optional[NextToken] + + +class InstanceTypeOffering(TypedDict, total=False): + InstanceType: Optional[InstanceType] + LocationType: Optional[LocationType] + Location: Optional[Location] + + +InstanceTypeOfferingsList = List[InstanceTypeOffering] + + +class DescribeInstanceTypeOfferingsResult(TypedDict, total=False): + InstanceTypeOfferings: Optional[InstanceTypeOfferingsList] + NextToken: Optional[NextToken] + + +RequestInstanceTypeList = List[InstanceType] + + +class DescribeInstanceTypesRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceTypes: Optional[RequestInstanceTypeList] + Filters: Optional[FilterList] + MaxResults: Optional[DITMaxResults] + NextToken: Optional[NextToken] + + +class NeuronDeviceMemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[NeuronDeviceMemorySize] + + +class NeuronDeviceCoreInfo(TypedDict, total=False): + Count: Optional[NeuronDeviceCoreCount] + Version: Optional[NeuronDeviceCoreVersion] + + +class NeuronDeviceInfo(TypedDict, total=False): + Count: Optional[NeuronDeviceCount] + Name: Optional[NeuronDeviceName] + CoreInfo: Optional[NeuronDeviceCoreInfo] + MemoryInfo: Optional[NeuronDeviceMemoryInfo] + + +NeuronDeviceInfoList = List[NeuronDeviceInfo] + + +class NeuronInfo(TypedDict, total=False): + NeuronDevices: Optional[NeuronDeviceInfoList] + TotalNeuronDeviceMemoryInMiB: Optional[TotalNeuronMemory] + + +class MediaDeviceMemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[MediaDeviceMemorySize] + + +class MediaDeviceInfo(TypedDict, total=False): + Count: Optional[MediaDeviceCount] + Name: Optional[MediaDeviceName] + Manufacturer: Optional[MediaDeviceManufacturerName] + MemoryInfo: Optional[MediaDeviceMemoryInfo] + + +MediaDeviceInfoList = List[MediaDeviceInfo] + + +class MediaAcceleratorInfo(TypedDict, total=False): + Accelerators: Optional[MediaDeviceInfoList] + TotalMediaMemoryInMiB: Optional[TotalMediaMemory] + + +NitroTpmSupportedVersionsList = List[NitroTpmSupportedVersionType] + + +class NitroTpmInfo(TypedDict, total=False): + SupportedVersions: Optional[NitroTpmSupportedVersionsList] + + +class InferenceDeviceMemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[InferenceDeviceMemorySize] + + +class InferenceDeviceInfo(TypedDict, total=False): + Count: Optional[InferenceDeviceCount] + Name: Optional[InferenceDeviceName] + Manufacturer: Optional[InferenceDeviceManufacturerName] + MemoryInfo: Optional[InferenceDeviceMemoryInfo] + + +InferenceDeviceInfoList = List[InferenceDeviceInfo] + + +class InferenceAcceleratorInfo(TypedDict, total=False): + Accelerators: Optional[InferenceDeviceInfoList] + TotalInferenceMemoryInMiB: Optional[totalInferenceMemory] + + +PlacementGroupStrategyList = List[PlacementGroupStrategy] + + +class PlacementGroupInfo(TypedDict, total=False): + SupportedStrategies: Optional[PlacementGroupStrategyList] + + +class FpgaDeviceMemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[FpgaDeviceMemorySize] + + +class FpgaDeviceInfo(TypedDict, total=False): + Name: Optional[FpgaDeviceName] + Manufacturer: Optional[FpgaDeviceManufacturerName] + Count: Optional[FpgaDeviceCount] + MemoryInfo: Optional[FpgaDeviceMemoryInfo] + + +FpgaDeviceInfoList = List[FpgaDeviceInfo] + + +class FpgaInfo(TypedDict, total=False): + Fpgas: Optional[FpgaDeviceInfoList] + TotalFpgaMemoryInMiB: Optional[totalFpgaMemory] + + +class GpuDeviceMemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[GpuDeviceMemorySize] + + +class GpuDeviceInfo(TypedDict, total=False): + Name: Optional[GpuDeviceName] + Manufacturer: Optional[GpuDeviceManufacturerName] + Count: Optional[GpuDeviceCount] + MemoryInfo: Optional[GpuDeviceMemoryInfo] + + +GpuDeviceInfoList = List[GpuDeviceInfo] + + +class GpuInfo(TypedDict, total=False): + Gpus: Optional[GpuDeviceInfoList] + TotalGpuMemoryInMiB: Optional[totalGpuMemory] + + +class EfaInfo(TypedDict, total=False): + MaximumEfaInterfaces: Optional[MaximumEfaInterfaces] + + +class NetworkCardInfo(TypedDict, total=False): + NetworkCardIndex: Optional[NetworkCardIndex] + NetworkPerformance: Optional[NetworkPerformance] + MaximumNetworkInterfaces: Optional[MaxNetworkInterfaces] + BaselineBandwidthInGbps: Optional[BaselineBandwidthInGbps] + PeakBandwidthInGbps: Optional[PeakBandwidthInGbps] + DefaultEnaQueueCountPerInterface: Optional[DefaultEnaQueueCountPerInterface] + MaximumEnaQueueCount: Optional[MaximumEnaQueueCount] + MaximumEnaQueueCountPerInterface: Optional[MaximumEnaQueueCountPerInterface] + + +NetworkCardInfoList = List[NetworkCardInfo] + + +class NetworkInfo(TypedDict, total=False): + NetworkPerformance: Optional[NetworkPerformance] + MaximumNetworkInterfaces: Optional[MaxNetworkInterfaces] + MaximumNetworkCards: Optional[MaximumNetworkCards] + DefaultNetworkCardIndex: Optional[DefaultNetworkCardIndex] + NetworkCards: Optional[NetworkCardInfoList] + Ipv4AddressesPerInterface: Optional[MaxIpv4AddrPerInterface] + Ipv6AddressesPerInterface: Optional[MaxIpv6AddrPerInterface] + Ipv6Supported: Optional[Ipv6Flag] + EnaSupport: Optional[EnaSupport] + EfaSupported: Optional[EfaSupportedFlag] + EfaInfo: Optional[EfaInfo] + EncryptionInTransitSupported: Optional[EncryptionInTransitSupported] + EnaSrdSupported: Optional[EnaSrdSupported] + BandwidthWeightings: Optional[BandwidthWeightingTypeList] + FlexibleEnaQueuesSupport: Optional[FlexibleEnaQueuesSupport] + + +class EbsOptimizedInfo(TypedDict, total=False): + BaselineBandwidthInMbps: Optional[BaselineBandwidthInMbps] + BaselineThroughputInMBps: Optional[BaselineThroughputInMBps] + BaselineIops: Optional[BaselineIops] + MaximumBandwidthInMbps: Optional[MaximumBandwidthInMbps] + MaximumThroughputInMBps: Optional[MaximumThroughputInMBps] + MaximumIops: Optional[MaximumIops] + + +class EbsInfo(TypedDict, total=False): + EbsOptimizedSupport: Optional[EbsOptimizedSupport] + EncryptionSupport: Optional[EbsEncryptionSupport] + EbsOptimizedInfo: Optional[EbsOptimizedInfo] + NvmeSupport: Optional[EbsNvmeSupport] + + +DiskSize = int + + +class DiskInfo(TypedDict, total=False): + SizeInGB: Optional[DiskSize] + Count: Optional[DiskCount] + Type: Optional[DiskType] + + +DiskInfoList = List[DiskInfo] + + +class InstanceStorageInfo(TypedDict, total=False): + TotalSizeInGB: Optional[DiskSize] + Disks: Optional[DiskInfoList] + NvmeSupport: Optional[EphemeralNvmeSupport] + EncryptionSupport: Optional[InstanceStorageEncryptionSupport] + + +MemorySize = int + + +class MemoryInfo(TypedDict, total=False): + SizeInMiB: Optional[MemorySize] + + +ThreadsPerCoreList = List[ThreadsPerCore] + + +class VCpuInfo(TypedDict, total=False): + DefaultVCpus: Optional[VCpuCount] + DefaultCores: Optional[CoreCount] + DefaultThreadsPerCore: Optional[ThreadsPerCore] + ValidCores: Optional[CoreCountList] + ValidThreadsPerCore: Optional[ThreadsPerCoreList] + + +SupportedAdditionalProcessorFeatureList = List[SupportedAdditionalProcessorFeature] + + +class ProcessorInfo(TypedDict, total=False): + SupportedArchitectures: Optional[ArchitectureTypeList] + SustainedClockSpeedInGhz: Optional[ProcessorSustainedClockSpeed] + SupportedFeatures: Optional[SupportedAdditionalProcessorFeatureList] + Manufacturer: Optional[CpuManufacturerName] + + +VirtualizationTypeList = List[VirtualizationType] +RootDeviceTypeList = List[RootDeviceType] +UsageClassTypeList = List[UsageClassType] + + +class InstanceTypeInfo(TypedDict, total=False): + InstanceType: Optional[InstanceType] + CurrentGeneration: Optional[CurrentGenerationFlag] + FreeTierEligible: Optional[FreeTierEligibleFlag] + SupportedUsageClasses: Optional[UsageClassTypeList] + SupportedRootDeviceTypes: Optional[RootDeviceTypeList] + SupportedVirtualizationTypes: Optional[VirtualizationTypeList] + BareMetal: Optional[BareMetalFlag] + Hypervisor: Optional[InstanceTypeHypervisor] + ProcessorInfo: Optional[ProcessorInfo] + VCpuInfo: Optional[VCpuInfo] + MemoryInfo: Optional[MemoryInfo] + InstanceStorageSupported: Optional[InstanceStorageFlag] + InstanceStorageInfo: Optional[InstanceStorageInfo] + EbsInfo: Optional[EbsInfo] + NetworkInfo: Optional[NetworkInfo] + GpuInfo: Optional[GpuInfo] + FpgaInfo: Optional[FpgaInfo] + PlacementGroupInfo: Optional[PlacementGroupInfo] + InferenceAcceleratorInfo: Optional[InferenceAcceleratorInfo] + HibernationSupported: Optional[HibernationFlag] + BurstablePerformanceSupported: Optional[BurstablePerformanceFlag] + DedicatedHostsSupported: Optional[DedicatedHostFlag] + AutoRecoverySupported: Optional[AutoRecoveryFlag] + SupportedBootModes: Optional[BootModeTypeList] + NitroEnclavesSupport: Optional[NitroEnclavesSupport] + NitroTpmSupport: Optional[NitroTpmSupport] + NitroTpmInfo: Optional[NitroTpmInfo] + MediaAcceleratorInfo: Optional[MediaAcceleratorInfo] + NeuronInfo: Optional[NeuronInfo] + PhcSupport: Optional[PhcSupport] + RebootMigrationSupport: Optional[RebootMigrationSupport] + + +InstanceTypeInfoList = List[InstanceTypeInfo] + + +class DescribeInstanceTypesResult(TypedDict, total=False): + InstanceTypes: Optional[InstanceTypeInfoList] + NextToken: Optional[NextToken] + + +class DescribeInstancesRequest(ServiceRequest): + InstanceIds: Optional[InstanceIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[Integer] + + +class Monitoring(TypedDict, total=False): + State: Optional[MonitoringState] + + +class InstanceNetworkPerformanceOptions(TypedDict, total=False): + BandwidthWeighting: Optional[InstanceBandwidthWeighting] + + +class InstanceMaintenanceOptions(TypedDict, total=False): + AutoRecovery: Optional[InstanceAutoRecoveryState] + RebootMigration: Optional[InstanceRebootMigrationState] + + +class PrivateDnsNameOptionsResponse(TypedDict, total=False): + HostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +class EnclaveOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class InstanceMetadataOptionsResponse(TypedDict, total=False): + State: Optional[InstanceMetadataOptionsState] + HttpTokens: Optional[HttpTokensState] + HttpPutResponseHopLimit: Optional[Integer] + HttpEndpoint: Optional[InstanceMetadataEndpointState] + HttpProtocolIpv6: Optional[InstanceMetadataProtocolState] + InstanceMetadataTags: Optional[InstanceMetadataTagsState] + + +class LicenseConfiguration(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +LicenseList = List[LicenseConfiguration] + + +class HibernationOptions(TypedDict, total=False): + Configured: Optional[Boolean] + + +class InstanceIpv6Prefix(TypedDict, total=False): + Ipv6Prefix: Optional[String] + + +InstanceIpv6PrefixList = List[InstanceIpv6Prefix] + + +class InstanceIpv4Prefix(TypedDict, total=False): + Ipv4Prefix: Optional[String] + + +InstanceIpv4PrefixList = List[InstanceIpv4Prefix] + + +class InstanceNetworkInterfaceAssociation(TypedDict, total=False): + CarrierIp: Optional[String] + CustomerOwnedIp: Optional[String] + IpOwnerId: Optional[String] + PublicDnsName: Optional[String] + PublicIp: Optional[String] + + +class InstancePrivateIpAddress(TypedDict, total=False): + Association: Optional[InstanceNetworkInterfaceAssociation] + Primary: Optional[Boolean] + PrivateDnsName: Optional[String] + PrivateIpAddress: Optional[String] + + +InstancePrivateIpAddressList = List[InstancePrivateIpAddress] + + +class InstanceAttachmentEnaSrdUdpSpecification(TypedDict, total=False): + EnaSrdUdpEnabled: Optional[Boolean] + + +class InstanceAttachmentEnaSrdSpecification(TypedDict, total=False): + EnaSrdEnabled: Optional[Boolean] + EnaSrdUdpSpecification: Optional[InstanceAttachmentEnaSrdUdpSpecification] + + +class InstanceNetworkInterfaceAttachment(TypedDict, total=False): + AttachTime: Optional[DateTime] + AttachmentId: Optional[String] + DeleteOnTermination: Optional[Boolean] + DeviceIndex: Optional[Integer] + Status: Optional[AttachmentStatus] + NetworkCardIndex: Optional[Integer] + EnaSrdSpecification: Optional[InstanceAttachmentEnaSrdSpecification] + EnaQueueCount: Optional[Integer] + + +class InstanceNetworkInterface(TypedDict, total=False): + Association: Optional[InstanceNetworkInterfaceAssociation] + Attachment: Optional[InstanceNetworkInterfaceAttachment] + Description: Optional[String] + Groups: Optional[GroupIdentifierList] + Ipv6Addresses: Optional[InstanceIpv6AddressList] + MacAddress: Optional[String] + NetworkInterfaceId: Optional[String] + OwnerId: Optional[String] + PrivateDnsName: Optional[String] + PrivateIpAddress: Optional[String] + PrivateIpAddresses: Optional[InstancePrivateIpAddressList] + SourceDestCheck: Optional[Boolean] + Status: Optional[NetworkInterfaceStatus] + SubnetId: Optional[String] + VpcId: Optional[String] + InterfaceType: Optional[String] + Ipv4Prefixes: Optional[InstanceIpv4PrefixList] + Ipv6Prefixes: Optional[InstanceIpv6PrefixList] + ConnectionTrackingConfiguration: Optional[ConnectionTrackingSpecificationResponse] + Operator: Optional[OperatorResponse] + + +InstanceNetworkInterfaceList = List[InstanceNetworkInterface] + + +class ElasticInferenceAcceleratorAssociation(TypedDict, total=False): + ElasticInferenceAcceleratorArn: Optional[String] + ElasticInferenceAcceleratorAssociationId: Optional[String] + ElasticInferenceAcceleratorAssociationState: Optional[String] + ElasticInferenceAcceleratorAssociationTime: Optional[DateTime] + + +ElasticInferenceAcceleratorAssociationList = List[ElasticInferenceAcceleratorAssociation] + + +class ElasticGpuAssociation(TypedDict, total=False): + ElasticGpuId: Optional[ElasticGpuId] + ElasticGpuAssociationId: Optional[String] + ElasticGpuAssociationState: Optional[String] + ElasticGpuAssociationTime: Optional[String] + + +ElasticGpuAssociationList = List[ElasticGpuAssociation] + + +class EbsInstanceBlockDevice(TypedDict, total=False): + AttachTime: Optional[DateTime] + DeleteOnTermination: Optional[Boolean] + Status: Optional[AttachmentStatus] + VolumeId: Optional[String] + AssociatedResource: Optional[String] + VolumeOwnerId: Optional[String] + Operator: Optional[OperatorResponse] + + +class InstanceBlockDeviceMapping(TypedDict, total=False): + DeviceName: Optional[String] + Ebs: Optional[EbsInstanceBlockDevice] + + +InstanceBlockDeviceMappingList = List[InstanceBlockDeviceMapping] + + +class Instance(TypedDict, total=False): + Architecture: Optional[ArchitectureValues] + BlockDeviceMappings: Optional[InstanceBlockDeviceMappingList] + ClientToken: Optional[String] + EbsOptimized: Optional[Boolean] + EnaSupport: Optional[Boolean] + Hypervisor: Optional[HypervisorType] + IamInstanceProfile: Optional[IamInstanceProfile] + InstanceLifecycle: Optional[InstanceLifecycleType] + ElasticGpuAssociations: Optional[ElasticGpuAssociationList] + ElasticInferenceAcceleratorAssociations: Optional[ElasticInferenceAcceleratorAssociationList] + NetworkInterfaces: Optional[InstanceNetworkInterfaceList] + OutpostArn: Optional[String] + RootDeviceName: Optional[String] + RootDeviceType: Optional[DeviceType] + SecurityGroups: Optional[GroupIdentifierList] + SourceDestCheck: Optional[Boolean] + SpotInstanceRequestId: Optional[String] + SriovNetSupport: Optional[String] + StateReason: Optional[StateReason] + Tags: Optional[TagList] + VirtualizationType: Optional[VirtualizationType] + CpuOptions: Optional[CpuOptions] + CapacityReservationId: Optional[String] + CapacityReservationSpecification: Optional[CapacityReservationSpecificationResponse] + HibernationOptions: Optional[HibernationOptions] + Licenses: Optional[LicenseList] + MetadataOptions: Optional[InstanceMetadataOptionsResponse] + EnclaveOptions: Optional[EnclaveOptions] + BootMode: Optional[BootModeValues] + PlatformDetails: Optional[String] + UsageOperation: Optional[String] + UsageOperationUpdateTime: Optional[MillisecondDateTime] + PrivateDnsNameOptions: Optional[PrivateDnsNameOptionsResponse] + Ipv6Address: Optional[String] + TpmSupport: Optional[String] + MaintenanceOptions: Optional[InstanceMaintenanceOptions] + CurrentInstanceBootMode: Optional[InstanceBootModeValues] + NetworkPerformanceOptions: Optional[InstanceNetworkPerformanceOptions] + Operator: Optional[OperatorResponse] + InstanceId: Optional[String] + ImageId: Optional[String] + State: Optional[InstanceState] + PrivateDnsName: Optional[String] + PublicDnsName: Optional[String] + StateTransitionReason: Optional[String] + KeyName: Optional[String] + AmiLaunchIndex: Optional[Integer] + ProductCodes: Optional[ProductCodeList] + InstanceType: Optional[InstanceType] + LaunchTime: Optional[DateTime] + Placement: Optional[Placement] + KernelId: Optional[String] + RamdiskId: Optional[String] + Platform: Optional[PlatformValues] + Monitoring: Optional[Monitoring] + SubnetId: Optional[String] + VpcId: Optional[String] + PrivateIpAddress: Optional[String] + PublicIpAddress: Optional[String] + + +InstanceList = List[Instance] + + +class Reservation(TypedDict, total=False): + ReservationId: Optional[String] + OwnerId: Optional[String] + RequesterId: Optional[String] + Groups: Optional[GroupIdentifierList] + Instances: Optional[InstanceList] + + +ReservationList = List[Reservation] + + +class DescribeInstancesResult(TypedDict, total=False): + NextToken: Optional[String] + Reservations: Optional[ReservationList] + + +InternetGatewayIdList = List[InternetGatewayId] + + +class DescribeInternetGatewaysRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[DescribeInternetGatewaysMaxResults] + DryRun: Optional[Boolean] + InternetGatewayIds: Optional[InternetGatewayIdList] + Filters: Optional[FilterList] + + +InternetGatewayList = List[InternetGateway] + + +class DescribeInternetGatewaysResult(TypedDict, total=False): + InternetGateways: Optional[InternetGatewayList] + NextToken: Optional[String] + + +class DescribeIpamByoasnRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: Optional[DescribeIpamByoasnMaxResults] + NextToken: Optional[NextToken] + + +class DescribeIpamByoasnResult(TypedDict, total=False): + Byoasns: Optional[ByoasnSet] + NextToken: Optional[String] + + +class DescribeIpamExternalResourceVerificationTokensRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + IpamExternalResourceVerificationTokenIds: Optional[ValueStringList] + + +IpamExternalResourceVerificationTokenSet = List[IpamExternalResourceVerificationToken] + + +class DescribeIpamExternalResourceVerificationTokensResult(TypedDict, total=False): + NextToken: Optional[NextToken] + IpamExternalResourceVerificationTokens: Optional[IpamExternalResourceVerificationTokenSet] + + +class DescribeIpamPoolsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[IpamMaxResults] + NextToken: Optional[NextToken] + IpamPoolIds: Optional[ValueStringList] + + +IpamPoolSet = List[IpamPool] + + +class DescribeIpamPoolsResult(TypedDict, total=False): + NextToken: Optional[NextToken] + IpamPools: Optional[IpamPoolSet] + + +class DescribeIpamResourceDiscoveriesRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryIds: Optional[ValueStringList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + Filters: Optional[FilterList] + + +IpamResourceDiscoverySet = List[IpamResourceDiscovery] + + +class DescribeIpamResourceDiscoveriesResult(TypedDict, total=False): + IpamResourceDiscoveries: Optional[IpamResourceDiscoverySet] + NextToken: Optional[NextToken] + + +class DescribeIpamResourceDiscoveryAssociationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryAssociationIds: Optional[ValueStringList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + Filters: Optional[FilterList] + + +IpamResourceDiscoveryAssociationSet = List[IpamResourceDiscoveryAssociation] + + +class DescribeIpamResourceDiscoveryAssociationsResult(TypedDict, total=False): + IpamResourceDiscoveryAssociations: Optional[IpamResourceDiscoveryAssociationSet] + NextToken: Optional[NextToken] + + +class DescribeIpamScopesRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[IpamMaxResults] + NextToken: Optional[NextToken] + IpamScopeIds: Optional[ValueStringList] + + +IpamScopeSet = List[IpamScope] + + +class DescribeIpamScopesResult(TypedDict, total=False): + NextToken: Optional[NextToken] + IpamScopes: Optional[IpamScopeSet] + + +class DescribeIpamsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[IpamMaxResults] + NextToken: Optional[NextToken] + IpamIds: Optional[ValueStringList] + + +IpamSet = List[Ipam] + + +class DescribeIpamsResult(TypedDict, total=False): + NextToken: Optional[NextToken] + Ipams: Optional[IpamSet] + + +Ipv6PoolIdList = List[Ipv6PoolEc2Id] + + +class DescribeIpv6PoolsRequest(ServiceRequest): + PoolIds: Optional[Ipv6PoolIdList] + NextToken: Optional[NextToken] + MaxResults: Optional[Ipv6PoolMaxResults] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class PoolCidrBlock(TypedDict, total=False): + Cidr: Optional[String] + + +PoolCidrBlocksSet = List[PoolCidrBlock] + + +class Ipv6Pool(TypedDict, total=False): + PoolId: Optional[String] + Description: Optional[String] + PoolCidrBlocks: Optional[PoolCidrBlocksSet] + Tags: Optional[TagList] + + +Ipv6PoolSet = List[Ipv6Pool] + + +class DescribeIpv6PoolsResult(TypedDict, total=False): + Ipv6Pools: Optional[Ipv6PoolSet] + NextToken: Optional[NextToken] + + +KeyPairIdStringList = List[KeyPairId] +KeyNameStringList = List[KeyPairName] + + +class DescribeKeyPairsRequest(ServiceRequest): + KeyNames: Optional[KeyNameStringList] + KeyPairIds: Optional[KeyPairIdStringList] + IncludePublicKey: Optional[Boolean] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class KeyPairInfo(TypedDict, total=False): + KeyPairId: Optional[String] + KeyType: Optional[KeyType] + Tags: Optional[TagList] + PublicKey: Optional[String] + CreateTime: Optional[MillisecondDateTime] + KeyName: Optional[String] + KeyFingerprint: Optional[String] + + +KeyPairList = List[KeyPairInfo] + + +class DescribeKeyPairsResult(TypedDict, total=False): + KeyPairs: Optional[KeyPairList] + + +class DescribeLaunchTemplateVersionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + Versions: Optional[VersionStringList] + MinVersion: Optional[String] + MaxVersion: Optional[String] + NextToken: Optional[String] + MaxResults: Optional[Integer] + Filters: Optional[FilterList] + ResolveAlias: Optional[Boolean] + + +LaunchTemplateVersionSet = List[LaunchTemplateVersion] + + +class DescribeLaunchTemplateVersionsResult(TypedDict, total=False): + LaunchTemplateVersions: Optional[LaunchTemplateVersionSet] + NextToken: Optional[String] + + +LaunchTemplateNameStringList = List[LaunchTemplateName] +LaunchTemplateIdStringList = List[LaunchTemplateId] + + +class DescribeLaunchTemplatesRequest(ServiceRequest): + DryRun: Optional[Boolean] + LaunchTemplateIds: Optional[LaunchTemplateIdStringList] + LaunchTemplateNames: Optional[LaunchTemplateNameStringList] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeLaunchTemplatesMaxResults] + + +LaunchTemplateSet = List[LaunchTemplate] + + +class DescribeLaunchTemplatesResult(TypedDict, total=False): + LaunchTemplates: Optional[LaunchTemplateSet] + NextToken: Optional[String] + + +LocalGatewayRouteTableVirtualInterfaceGroupAssociationIdSet = List[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId +] + + +class DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(ServiceRequest): + LocalGatewayRouteTableVirtualInterfaceGroupAssociationIds: Optional[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociationIdSet + ] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +LocalGatewayRouteTableVirtualInterfaceGroupAssociationSet = List[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociation +] + + +class DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsResult(TypedDict, total=False): + LocalGatewayRouteTableVirtualInterfaceGroupAssociations: Optional[ + LocalGatewayRouteTableVirtualInterfaceGroupAssociationSet + ] + NextToken: Optional[String] + + +LocalGatewayRouteTableVpcAssociationIdSet = List[LocalGatewayRouteTableVpcAssociationId] + + +class DescribeLocalGatewayRouteTableVpcAssociationsRequest(ServiceRequest): + LocalGatewayRouteTableVpcAssociationIds: Optional[LocalGatewayRouteTableVpcAssociationIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +LocalGatewayRouteTableVpcAssociationSet = List[LocalGatewayRouteTableVpcAssociation] + + +class DescribeLocalGatewayRouteTableVpcAssociationsResult(TypedDict, total=False): + LocalGatewayRouteTableVpcAssociations: Optional[LocalGatewayRouteTableVpcAssociationSet] + NextToken: Optional[String] + + +LocalGatewayRouteTableIdSet = List[LocalGatewayRoutetableId] + + +class DescribeLocalGatewayRouteTablesRequest(ServiceRequest): + LocalGatewayRouteTableIds: Optional[LocalGatewayRouteTableIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +LocalGatewayRouteTableSet = List[LocalGatewayRouteTable] + + +class DescribeLocalGatewayRouteTablesResult(TypedDict, total=False): + LocalGatewayRouteTables: Optional[LocalGatewayRouteTableSet] + NextToken: Optional[String] + + +LocalGatewayVirtualInterfaceGroupIdSet = List[LocalGatewayVirtualInterfaceGroupId] + + +class DescribeLocalGatewayVirtualInterfaceGroupsRequest(ServiceRequest): + LocalGatewayVirtualInterfaceGroupIds: Optional[LocalGatewayVirtualInterfaceGroupIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +LocalGatewayVirtualInterfaceGroupSet = List[LocalGatewayVirtualInterfaceGroup] + + +class DescribeLocalGatewayVirtualInterfaceGroupsResult(TypedDict, total=False): + LocalGatewayVirtualInterfaceGroups: Optional[LocalGatewayVirtualInterfaceGroupSet] + NextToken: Optional[String] + + +class DescribeLocalGatewayVirtualInterfacesRequest(ServiceRequest): + LocalGatewayVirtualInterfaceIds: Optional[LocalGatewayVirtualInterfaceIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +LocalGatewayVirtualInterfaceSet = List[LocalGatewayVirtualInterface] + + +class DescribeLocalGatewayVirtualInterfacesResult(TypedDict, total=False): + LocalGatewayVirtualInterfaces: Optional[LocalGatewayVirtualInterfaceSet] + NextToken: Optional[String] + + +LocalGatewayIdSet = List[LocalGatewayId] + + +class DescribeLocalGatewaysRequest(ServiceRequest): + LocalGatewayIds: Optional[LocalGatewayIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[LocalGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class LocalGateway(TypedDict, total=False): + LocalGatewayId: Optional[LocalGatewayId] + OutpostArn: Optional[String] + OwnerId: Optional[String] + State: Optional[String] + Tags: Optional[TagList] + + +LocalGatewaySet = List[LocalGateway] + + +class DescribeLocalGatewaysResult(TypedDict, total=False): + LocalGateways: Optional[LocalGatewaySet] + NextToken: Optional[String] + + +SnapshotIdStringList = List[SnapshotId] + + +class DescribeLockedSnapshotsRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[DescribeLockedSnapshotsMaxResults] + NextToken: Optional[String] + SnapshotIds: Optional[SnapshotIdStringList] + DryRun: Optional[Boolean] + + +class LockedSnapshotsInfo(TypedDict, total=False): + OwnerId: Optional[String] + SnapshotId: Optional[String] + LockState: Optional[LockState] + LockDuration: Optional[RetentionPeriodResponseDays] + CoolOffPeriod: Optional[CoolOffPeriodResponseHours] + CoolOffPeriodExpiresOn: Optional[MillisecondDateTime] + LockCreatedOn: Optional[MillisecondDateTime] + LockDurationStartTime: Optional[MillisecondDateTime] + LockExpiresOn: Optional[MillisecondDateTime] + + +LockedSnapshotsInfoList = List[LockedSnapshotsInfo] + + +class DescribeLockedSnapshotsResult(TypedDict, total=False): + Snapshots: Optional[LockedSnapshotsInfoList] + NextToken: Optional[String] + + +class DescribeMacHostsRequest(ServiceRequest): + Filters: Optional[FilterList] + HostIds: Optional[RequestHostIdList] + MaxResults: Optional[DescribeMacHostsRequestMaxResults] + NextToken: Optional[String] + + +MacOSVersionStringList = List[String] + + +class MacHost(TypedDict, total=False): + HostId: Optional[DedicatedHostId] + MacOSLatestSupportedVersions: Optional[MacOSVersionStringList] + + +MacHostList = List[MacHost] + + +class DescribeMacHostsResult(TypedDict, total=False): + MacHosts: Optional[MacHostList] + NextToken: Optional[String] + + +MacModificationTaskIdList = List[MacModificationTaskId] + + +class DescribeMacModificationTasksRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MacModificationTaskIds: Optional[MacModificationTaskIdList] + MaxResults: Optional[DescribeMacModificationTasksMaxResults] + NextToken: Optional[String] + + +MacModificationTaskList = List[MacModificationTask] + + +class DescribeMacModificationTasksResult(TypedDict, total=False): + MacModificationTasks: Optional[MacModificationTaskList] + NextToken: Optional[String] + + +class DescribeManagedPrefixListsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[PrefixListMaxResults] + NextToken: Optional[NextToken] + PrefixListIds: Optional[ValueStringList] + + +ManagedPrefixListSet = List[ManagedPrefixList] + + +class DescribeManagedPrefixListsResult(TypedDict, total=False): + NextToken: Optional[NextToken] + PrefixLists: Optional[ManagedPrefixListSet] + + +class DescribeMovingAddressesRequest(ServiceRequest): + DryRun: Optional[Boolean] + PublicIps: Optional[ValueStringList] + NextToken: Optional[String] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeMovingAddressesMaxResults] + + +class MovingAddressStatus(TypedDict, total=False): + MoveStatus: Optional[MoveStatus] + PublicIp: Optional[String] + + +MovingAddressStatusSet = List[MovingAddressStatus] + + +class DescribeMovingAddressesResult(TypedDict, total=False): + MovingAddressStatuses: Optional[MovingAddressStatusSet] + NextToken: Optional[String] + + +NatGatewayIdStringList = List[NatGatewayId] + + +class DescribeNatGatewaysRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filter: Optional[FilterList] + MaxResults: Optional[DescribeNatGatewaysMaxResults] + NatGatewayIds: Optional[NatGatewayIdStringList] + NextToken: Optional[String] + + +NatGatewayList = List[NatGateway] + + +class DescribeNatGatewaysResult(TypedDict, total=False): + NatGateways: Optional[NatGatewayList] + NextToken: Optional[String] + + +NetworkAclIdStringList = List[NetworkAclId] + + +class DescribeNetworkAclsRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[DescribeNetworkAclsMaxResults] + DryRun: Optional[Boolean] + NetworkAclIds: Optional[NetworkAclIdStringList] + Filters: Optional[FilterList] + + +NetworkAclList = List[NetworkAcl] + + +class DescribeNetworkAclsResult(TypedDict, total=False): + NetworkAcls: Optional[NetworkAclList] + NextToken: Optional[String] + + +NetworkInsightsAccessScopeAnalysisIdList = List[NetworkInsightsAccessScopeAnalysisId] + + +class DescribeNetworkInsightsAccessScopeAnalysesRequest(ServiceRequest): + NetworkInsightsAccessScopeAnalysisIds: Optional[NetworkInsightsAccessScopeAnalysisIdList] + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + AnalysisStartTimeBegin: Optional[MillisecondDateTime] + AnalysisStartTimeEnd: Optional[MillisecondDateTime] + Filters: Optional[FilterList] + MaxResults: Optional[NetworkInsightsMaxResults] + DryRun: Optional[Boolean] + NextToken: Optional[NextToken] + + +class NetworkInsightsAccessScopeAnalysis(TypedDict, total=False): + NetworkInsightsAccessScopeAnalysisId: Optional[NetworkInsightsAccessScopeAnalysisId] + NetworkInsightsAccessScopeAnalysisArn: Optional[ResourceArn] + NetworkInsightsAccessScopeId: Optional[NetworkInsightsAccessScopeId] + Status: Optional[AnalysisStatus] + StatusMessage: Optional[String] + WarningMessage: Optional[String] + StartDate: Optional[MillisecondDateTime] + EndDate: Optional[MillisecondDateTime] + FindingsFound: Optional[FindingsFound] + AnalyzedEniCount: Optional[Integer] + Tags: Optional[TagList] + + +NetworkInsightsAccessScopeAnalysisList = List[NetworkInsightsAccessScopeAnalysis] + + +class DescribeNetworkInsightsAccessScopeAnalysesResult(TypedDict, total=False): + NetworkInsightsAccessScopeAnalyses: Optional[NetworkInsightsAccessScopeAnalysisList] + NextToken: Optional[String] + + +NetworkInsightsAccessScopeIdList = List[NetworkInsightsAccessScopeId] + + +class DescribeNetworkInsightsAccessScopesRequest(ServiceRequest): + NetworkInsightsAccessScopeIds: Optional[NetworkInsightsAccessScopeIdList] + Filters: Optional[FilterList] + MaxResults: Optional[NetworkInsightsMaxResults] + DryRun: Optional[Boolean] + NextToken: Optional[NextToken] + + +NetworkInsightsAccessScopeList = List[NetworkInsightsAccessScope] + + +class DescribeNetworkInsightsAccessScopesResult(TypedDict, total=False): + NetworkInsightsAccessScopes: Optional[NetworkInsightsAccessScopeList] + NextToken: Optional[String] + + +NetworkInsightsAnalysisIdList = List[NetworkInsightsAnalysisId] + + +class DescribeNetworkInsightsAnalysesRequest(ServiceRequest): + NetworkInsightsAnalysisIds: Optional[NetworkInsightsAnalysisIdList] + NetworkInsightsPathId: Optional[NetworkInsightsPathId] + AnalysisStartTime: Optional[MillisecondDateTime] + AnalysisEndTime: Optional[MillisecondDateTime] + Filters: Optional[FilterList] + MaxResults: Optional[NetworkInsightsMaxResults] + DryRun: Optional[Boolean] + NextToken: Optional[NextToken] + + +class NetworkInsightsAnalysis(TypedDict, total=False): + NetworkInsightsAnalysisId: Optional[NetworkInsightsAnalysisId] + NetworkInsightsAnalysisArn: Optional[ResourceArn] + NetworkInsightsPathId: Optional[NetworkInsightsPathId] + AdditionalAccounts: Optional[ValueStringList] + FilterInArns: Optional[ArnList] + FilterOutArns: Optional[ArnList] + StartDate: Optional[MillisecondDateTime] + Status: Optional[AnalysisStatus] + StatusMessage: Optional[String] + WarningMessage: Optional[String] + NetworkPathFound: Optional[Boolean] + ForwardPathComponents: Optional[PathComponentList] + ReturnPathComponents: Optional[PathComponentList] + Explanations: Optional[ExplanationList] + AlternatePathHints: Optional[AlternatePathHintList] + SuggestedAccounts: Optional[ValueStringList] + Tags: Optional[TagList] + + +NetworkInsightsAnalysisList = List[NetworkInsightsAnalysis] + + +class DescribeNetworkInsightsAnalysesResult(TypedDict, total=False): + NetworkInsightsAnalyses: Optional[NetworkInsightsAnalysisList] + NextToken: Optional[String] + + +NetworkInsightsPathIdList = List[NetworkInsightsPathId] + + +class DescribeNetworkInsightsPathsRequest(ServiceRequest): + NetworkInsightsPathIds: Optional[NetworkInsightsPathIdList] + Filters: Optional[FilterList] + MaxResults: Optional[NetworkInsightsMaxResults] + DryRun: Optional[Boolean] + NextToken: Optional[NextToken] + + +NetworkInsightsPathList = List[NetworkInsightsPath] + + +class DescribeNetworkInsightsPathsResult(TypedDict, total=False): + NetworkInsightsPaths: Optional[NetworkInsightsPathList] + NextToken: Optional[String] + + +class DescribeNetworkInterfaceAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInterfaceId: NetworkInterfaceId + Attribute: Optional[NetworkInterfaceAttribute] + + +class DescribeNetworkInterfaceAttributeResult(TypedDict, total=False): + Attachment: Optional[NetworkInterfaceAttachment] + Description: Optional[AttributeValue] + Groups: Optional[GroupIdentifierList] + NetworkInterfaceId: Optional[String] + SourceDestCheck: Optional[AttributeBooleanValue] + AssociatePublicIpAddress: Optional[Boolean] + + +NetworkInterfacePermissionIdList = List[NetworkInterfacePermissionId] + + +class DescribeNetworkInterfacePermissionsRequest(ServiceRequest): + NetworkInterfacePermissionIds: Optional[NetworkInterfacePermissionIdList] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeNetworkInterfacePermissionsMaxResults] + + +NetworkInterfacePermissionList = List[NetworkInterfacePermission] + + +class DescribeNetworkInterfacePermissionsResult(TypedDict, total=False): + NetworkInterfacePermissions: Optional[NetworkInterfacePermissionList] + NextToken: Optional[String] + + +NetworkInterfaceIdList = List[NetworkInterfaceId] + + +class DescribeNetworkInterfacesRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[DescribeNetworkInterfacesMaxResults] + DryRun: Optional[Boolean] + NetworkInterfaceIds: Optional[NetworkInterfaceIdList] + Filters: Optional[FilterList] + + +NetworkInterfaceList = List[NetworkInterface] + + +class DescribeNetworkInterfacesResult(TypedDict, total=False): + NetworkInterfaces: Optional[NetworkInterfaceList] + NextToken: Optional[String] + + +OutpostLagIdSet = List[OutpostLagId] + + +class DescribeOutpostLagsRequest(ServiceRequest): + OutpostLagIds: Optional[OutpostLagIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[OutpostLagMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +ServiceLinkVirtualInterfaceIdSet = List[ServiceLinkVirtualInterfaceId] + + +class OutpostLag(TypedDict, total=False): + OutpostArn: Optional[String] + OwnerId: Optional[String] + State: Optional[String] + OutpostLagId: Optional[OutpostLagId] + LocalGatewayVirtualInterfaceIds: Optional[LocalGatewayVirtualInterfaceIdSet] + ServiceLinkVirtualInterfaceIds: Optional[ServiceLinkVirtualInterfaceIdSet] + Tags: Optional[TagList] + + +OutpostLagSet = List[OutpostLag] + + +class DescribeOutpostLagsResult(TypedDict, total=False): + OutpostLags: Optional[OutpostLagSet] + NextToken: Optional[String] + + +PlacementGroupStringList = List[PlacementGroupName] +PlacementGroupIdStringList = List[PlacementGroupId] + + +class DescribePlacementGroupsRequest(ServiceRequest): + GroupIds: Optional[PlacementGroupIdStringList] + DryRun: Optional[Boolean] + GroupNames: Optional[PlacementGroupStringList] + Filters: Optional[FilterList] + + +PlacementGroupList = List[PlacementGroup] + + +class DescribePlacementGroupsResult(TypedDict, total=False): + PlacementGroups: Optional[PlacementGroupList] + + +PrefixListResourceIdStringList = List[PrefixListResourceId] + + +class DescribePrefixListsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + PrefixListIds: Optional[PrefixListResourceIdStringList] + + +class PrefixList(TypedDict, total=False): + Cidrs: Optional[ValueStringList] + PrefixListId: Optional[String] + PrefixListName: Optional[String] + + +PrefixListSet = List[PrefixList] + + +class DescribePrefixListsResult(TypedDict, total=False): + NextToken: Optional[String] + PrefixLists: Optional[PrefixListSet] + + +ResourceList = List[String] + + +class DescribePrincipalIdFormatRequest(ServiceRequest): + DryRun: Optional[Boolean] + Resources: Optional[ResourceList] + MaxResults: Optional[DescribePrincipalIdFormatMaxResults] + NextToken: Optional[String] + + +class PrincipalIdFormat(TypedDict, total=False): + Arn: Optional[String] + Statuses: Optional[IdFormatList] + + +PrincipalIdFormatList = List[PrincipalIdFormat] + + +class DescribePrincipalIdFormatResult(TypedDict, total=False): + Principals: Optional[PrincipalIdFormatList] + NextToken: Optional[String] + + +PublicIpv4PoolIdStringList = List[Ipv4PoolEc2Id] + + +class DescribePublicIpv4PoolsRequest(ServiceRequest): + PoolIds: Optional[PublicIpv4PoolIdStringList] + NextToken: Optional[NextToken] + MaxResults: Optional[PoolMaxResults] + Filters: Optional[FilterList] + + +class PublicIpv4PoolRange(TypedDict, total=False): + FirstAddress: Optional[String] + LastAddress: Optional[String] + AddressCount: Optional[Integer] + AvailableAddressCount: Optional[Integer] + + +PublicIpv4PoolRangeSet = List[PublicIpv4PoolRange] + + +class PublicIpv4Pool(TypedDict, total=False): + PoolId: Optional[String] + Description: Optional[String] + PoolAddressRanges: Optional[PublicIpv4PoolRangeSet] + TotalAddressCount: Optional[Integer] + TotalAvailableAddressCount: Optional[Integer] + NetworkBorderGroup: Optional[String] + Tags: Optional[TagList] + + +PublicIpv4PoolSet = List[PublicIpv4Pool] + + +class DescribePublicIpv4PoolsResult(TypedDict, total=False): + PublicIpv4Pools: Optional[PublicIpv4PoolSet] + NextToken: Optional[String] + + +RegionNameStringList = List[String] + + +class DescribeRegionsRequest(ServiceRequest): + RegionNames: Optional[RegionNameStringList] + AllRegions: Optional[Boolean] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class Region(TypedDict, total=False): + OptInStatus: Optional[String] + RegionName: Optional[String] + Endpoint: Optional[String] + + +RegionList = List[Region] + + +class DescribeRegionsResult(TypedDict, total=False): + Regions: Optional[RegionList] + + +ReplaceRootVolumeTaskIds = List[ReplaceRootVolumeTaskId] + + +class DescribeReplaceRootVolumeTasksRequest(ServiceRequest): + ReplaceRootVolumeTaskIds: Optional[ReplaceRootVolumeTaskIds] + Filters: Optional[FilterList] + MaxResults: Optional[DescribeReplaceRootVolumeTasksMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +ReplaceRootVolumeTasks = List[ReplaceRootVolumeTask] + + +class DescribeReplaceRootVolumeTasksResult(TypedDict, total=False): + ReplaceRootVolumeTasks: Optional[ReplaceRootVolumeTasks] + NextToken: Optional[String] + + +class DescribeReservedInstancesListingsRequest(ServiceRequest): + ReservedInstancesId: Optional[ReservationId] + ReservedInstancesListingId: Optional[ReservedInstancesListingId] + Filters: Optional[FilterList] + + +class DescribeReservedInstancesListingsResult(TypedDict, total=False): + ReservedInstancesListings: Optional[ReservedInstancesListingList] + + +ReservedInstancesModificationIdStringList = List[ReservedInstancesModificationId] + + +class DescribeReservedInstancesModificationsRequest(ServiceRequest): + ReservedInstancesModificationIds: Optional[ReservedInstancesModificationIdStringList] + NextToken: Optional[String] + Filters: Optional[FilterList] + + +class ReservedInstancesId(TypedDict, total=False): + ReservedInstancesId: Optional[String] + + +ReservedIntancesIds = List[ReservedInstancesId] + + +class ReservedInstancesConfiguration(TypedDict, total=False): + AvailabilityZone: Optional[String] + InstanceCount: Optional[Integer] + InstanceType: Optional[InstanceType] + Platform: Optional[String] + Scope: Optional[scope] + AvailabilityZoneId: Optional[String] + + +class ReservedInstancesModificationResult(TypedDict, total=False): + ReservedInstancesId: Optional[String] + TargetConfiguration: Optional[ReservedInstancesConfiguration] + + +ReservedInstancesModificationResultList = List[ReservedInstancesModificationResult] + + +class ReservedInstancesModification(TypedDict, total=False): + ClientToken: Optional[String] + CreateDate: Optional[DateTime] + EffectiveDate: Optional[DateTime] + ModificationResults: Optional[ReservedInstancesModificationResultList] + ReservedInstancesIds: Optional[ReservedIntancesIds] + ReservedInstancesModificationId: Optional[String] + Status: Optional[String] + StatusMessage: Optional[String] + UpdateDate: Optional[DateTime] + + +ReservedInstancesModificationList = List[ReservedInstancesModification] + + +class DescribeReservedInstancesModificationsResult(TypedDict, total=False): + NextToken: Optional[String] + ReservedInstancesModifications: Optional[ReservedInstancesModificationList] + + +ReservedInstancesOfferingIdStringList = List[ReservedInstancesOfferingId] + + +class DescribeReservedInstancesOfferingsRequest(ServiceRequest): + AvailabilityZone: Optional[String] + IncludeMarketplace: Optional[Boolean] + InstanceType: Optional[InstanceType] + MaxDuration: Optional[Long] + MaxInstanceCount: Optional[Integer] + MinDuration: Optional[Long] + OfferingClass: Optional[OfferingClassType] + ProductDescription: Optional[RIProductDescription] + ReservedInstancesOfferingIds: Optional[ReservedInstancesOfferingIdStringList] + AvailabilityZoneId: Optional[AvailabilityZoneId] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + InstanceTenancy: Optional[Tenancy] + OfferingType: Optional[OfferingTypeValues] + NextToken: Optional[String] + MaxResults: Optional[Integer] + + +class RecurringCharge(TypedDict, total=False): + Amount: Optional[Double] + Frequency: Optional[RecurringChargeFrequency] + + +RecurringChargesList = List[RecurringCharge] + + +class PricingDetail(TypedDict, total=False): + Count: Optional[Integer] + Price: Optional[Double] + + +PricingDetailsList = List[PricingDetail] + + +class ReservedInstancesOffering(TypedDict, total=False): + CurrencyCode: Optional[CurrencyCodeValues] + InstanceTenancy: Optional[Tenancy] + Marketplace: Optional[Boolean] + OfferingClass: Optional[OfferingClassType] + OfferingType: Optional[OfferingTypeValues] + PricingDetails: Optional[PricingDetailsList] + RecurringCharges: Optional[RecurringChargesList] + Scope: Optional[scope] + AvailabilityZoneId: Optional[AvailabilityZoneId] + ReservedInstancesOfferingId: Optional[String] + InstanceType: Optional[InstanceType] + AvailabilityZone: Optional[String] + Duration: Optional[Long] + UsagePrice: Optional[Float] + FixedPrice: Optional[Float] + ProductDescription: Optional[RIProductDescription] + + +ReservedInstancesOfferingList = List[ReservedInstancesOffering] + + +class DescribeReservedInstancesOfferingsResult(TypedDict, total=False): + NextToken: Optional[String] + ReservedInstancesOfferings: Optional[ReservedInstancesOfferingList] + + +ReservedInstancesIdStringList = List[ReservationId] + + +class DescribeReservedInstancesRequest(ServiceRequest): + OfferingClass: Optional[OfferingClassType] + ReservedInstancesIds: Optional[ReservedInstancesIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + OfferingType: Optional[OfferingTypeValues] + + +class ReservedInstances(TypedDict, total=False): + CurrencyCode: Optional[CurrencyCodeValues] + InstanceTenancy: Optional[Tenancy] + OfferingClass: Optional[OfferingClassType] + OfferingType: Optional[OfferingTypeValues] + RecurringCharges: Optional[RecurringChargesList] + Scope: Optional[scope] + Tags: Optional[TagList] + AvailabilityZoneId: Optional[String] + ReservedInstancesId: Optional[String] + InstanceType: Optional[InstanceType] + AvailabilityZone: Optional[String] + Start: Optional[DateTime] + End: Optional[DateTime] + Duration: Optional[Long] + UsagePrice: Optional[Float] + FixedPrice: Optional[Float] + InstanceCount: Optional[Integer] + ProductDescription: Optional[RIProductDescription] + State: Optional[ReservedInstanceState] + + +ReservedInstancesList = List[ReservedInstances] + + +class DescribeReservedInstancesResult(TypedDict, total=False): + ReservedInstances: Optional[ReservedInstancesList] + + +RouteServerEndpointIdsList = List[RouteServerEndpointId] + + +class DescribeRouteServerEndpointsRequest(ServiceRequest): + RouteServerEndpointIds: Optional[RouteServerEndpointIdsList] + NextToken: Optional[String] + MaxResults: Optional[RouteServerMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +RouteServerEndpointsList = List[RouteServerEndpoint] + + +class DescribeRouteServerEndpointsResult(TypedDict, total=False): + RouteServerEndpoints: Optional[RouteServerEndpointsList] + NextToken: Optional[String] + + +RouteServerPeerIdsList = List[RouteServerPeerId] + + +class DescribeRouteServerPeersRequest(ServiceRequest): + RouteServerPeerIds: Optional[RouteServerPeerIdsList] + NextToken: Optional[String] + MaxResults: Optional[RouteServerMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +RouteServerPeersList = List[RouteServerPeer] + + +class DescribeRouteServerPeersResult(TypedDict, total=False): + RouteServerPeers: Optional[RouteServerPeersList] + NextToken: Optional[String] + + +RouteServerIdsList = List[RouteServerId] + + +class DescribeRouteServersRequest(ServiceRequest): + RouteServerIds: Optional[RouteServerIdsList] + NextToken: Optional[String] + MaxResults: Optional[RouteServerMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +RouteServersList = List[RouteServer] + + +class DescribeRouteServersResult(TypedDict, total=False): + RouteServers: Optional[RouteServersList] + NextToken: Optional[String] + + +RouteTableIdStringList = List[RouteTableId] + + +class DescribeRouteTablesRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[DescribeRouteTablesMaxResults] + DryRun: Optional[Boolean] + RouteTableIds: Optional[RouteTableIdStringList] + Filters: Optional[FilterList] + + +RouteTableList = List[RouteTable] + + +class DescribeRouteTablesResult(TypedDict, total=False): + RouteTables: Optional[RouteTableList] + NextToken: Optional[String] + + +OccurrenceDayRequestSet = List[Integer] + + +class ScheduledInstanceRecurrenceRequest(TypedDict, total=False): + Frequency: Optional[String] + Interval: Optional[Integer] + OccurrenceDays: Optional[OccurrenceDayRequestSet] + OccurrenceRelativeToEnd: Optional[Boolean] + OccurrenceUnit: Optional[String] + + +class SlotDateTimeRangeRequest(TypedDict, total=False): + EarliestTime: DateTime + LatestTime: DateTime + + +class DescribeScheduledInstanceAvailabilityRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + FirstSlotStartTimeRange: SlotDateTimeRangeRequest + MaxResults: Optional[DescribeScheduledInstanceAvailabilityMaxResults] + MaxSlotDurationInHours: Optional[Integer] + MinSlotDurationInHours: Optional[Integer] + NextToken: Optional[String] + Recurrence: ScheduledInstanceRecurrenceRequest + + +OccurrenceDaySet = List[Integer] + + +class ScheduledInstanceRecurrence(TypedDict, total=False): + Frequency: Optional[String] + Interval: Optional[Integer] + OccurrenceDaySet: Optional[OccurrenceDaySet] + OccurrenceRelativeToEnd: Optional[Boolean] + OccurrenceUnit: Optional[String] + + +class ScheduledInstanceAvailability(TypedDict, total=False): + AvailabilityZone: Optional[String] + AvailableInstanceCount: Optional[Integer] + FirstSlotStartTime: Optional[DateTime] + HourlyPrice: Optional[String] + InstanceType: Optional[String] + MaxTermDurationInDays: Optional[Integer] + MinTermDurationInDays: Optional[Integer] + NetworkPlatform: Optional[String] + Platform: Optional[String] + PurchaseToken: Optional[String] + Recurrence: Optional[ScheduledInstanceRecurrence] + SlotDurationInHours: Optional[Integer] + TotalScheduledInstanceHours: Optional[Integer] + + +ScheduledInstanceAvailabilitySet = List[ScheduledInstanceAvailability] + + +class DescribeScheduledInstanceAvailabilityResult(TypedDict, total=False): + NextToken: Optional[String] + ScheduledInstanceAvailabilitySet: Optional[ScheduledInstanceAvailabilitySet] + + +class SlotStartTimeRangeRequest(TypedDict, total=False): + EarliestTime: Optional[DateTime] + LatestTime: Optional[DateTime] + + +ScheduledInstanceIdRequestSet = List[ScheduledInstanceId] + + +class DescribeScheduledInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + ScheduledInstanceIds: Optional[ScheduledInstanceIdRequestSet] + SlotStartTimeRange: Optional[SlotStartTimeRangeRequest] + + +class ScheduledInstance(TypedDict, total=False): + AvailabilityZone: Optional[String] + CreateDate: Optional[DateTime] + HourlyPrice: Optional[String] + InstanceCount: Optional[Integer] + InstanceType: Optional[String] + NetworkPlatform: Optional[String] + NextSlotStartTime: Optional[DateTime] + Platform: Optional[String] + PreviousSlotEndTime: Optional[DateTime] + Recurrence: Optional[ScheduledInstanceRecurrence] + ScheduledInstanceId: Optional[String] + SlotDurationInHours: Optional[Integer] + TermEndDate: Optional[DateTime] + TermStartDate: Optional[DateTime] + TotalScheduledInstanceHours: Optional[Integer] + + +ScheduledInstanceSet = List[ScheduledInstance] + + +class DescribeScheduledInstancesResult(TypedDict, total=False): + NextToken: Optional[String] + ScheduledInstanceSet: Optional[ScheduledInstanceSet] + + +GroupIds = List[SecurityGroupId] + + +class DescribeSecurityGroupReferencesRequest(ServiceRequest): + DryRun: Optional[Boolean] + GroupId: GroupIds + + +class SecurityGroupReference(TypedDict, total=False): + GroupId: Optional[String] + ReferencingVpcId: Optional[String] + VpcPeeringConnectionId: Optional[String] + TransitGatewayId: Optional[String] + + +SecurityGroupReferences = List[SecurityGroupReference] + + +class DescribeSecurityGroupReferencesResult(TypedDict, total=False): + SecurityGroupReferenceSet: Optional[SecurityGroupReferences] + + +SecurityGroupRuleIdList = List[String] + + +class DescribeSecurityGroupRulesRequest(ServiceRequest): + Filters: Optional[FilterList] + SecurityGroupRuleIds: Optional[SecurityGroupRuleIdList] + DryRun: Optional[Boolean] + NextToken: Optional[String] + MaxResults: Optional[DescribeSecurityGroupRulesMaxResults] + + +class DescribeSecurityGroupRulesResult(TypedDict, total=False): + SecurityGroupRules: Optional[SecurityGroupRuleList] + NextToken: Optional[String] + + +class DescribeSecurityGroupVpcAssociationsRequest(ServiceRequest): + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeSecurityGroupVpcAssociationsMaxResults] + DryRun: Optional[Boolean] + + +class SecurityGroupVpcAssociation(TypedDict, total=False): + GroupId: Optional[SecurityGroupId] + VpcId: Optional[VpcId] + VpcOwnerId: Optional[String] + State: Optional[SecurityGroupVpcAssociationState] + StateReason: Optional[String] + + +SecurityGroupVpcAssociationList = List[SecurityGroupVpcAssociation] + + +class DescribeSecurityGroupVpcAssociationsResult(TypedDict, total=False): + SecurityGroupVpcAssociations: Optional[SecurityGroupVpcAssociationList] + NextToken: Optional[String] + + +GroupNameStringList = List[SecurityGroupName] + + +class DescribeSecurityGroupsRequest(ServiceRequest): + GroupIds: Optional[GroupIdStringList] + GroupNames: Optional[GroupNameStringList] + NextToken: Optional[String] + MaxResults: Optional[DescribeSecurityGroupsMaxResults] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class SecurityGroup(TypedDict, total=False): + GroupId: Optional[String] + IpPermissionsEgress: Optional[IpPermissionList] + Tags: Optional[TagList] + VpcId: Optional[String] + SecurityGroupArn: Optional[String] + OwnerId: Optional[String] + GroupName: Optional[String] + Description: Optional[String] + IpPermissions: Optional[IpPermissionList] + + +SecurityGroupList = List[SecurityGroup] + + +class DescribeSecurityGroupsResult(TypedDict, total=False): + NextToken: Optional[String] + SecurityGroups: Optional[SecurityGroupList] + + +class DescribeServiceLinkVirtualInterfacesRequest(ServiceRequest): + ServiceLinkVirtualInterfaceIds: Optional[ServiceLinkVirtualInterfaceIdSet] + Filters: Optional[FilterList] + MaxResults: Optional[ServiceLinkMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class ServiceLinkVirtualInterface(TypedDict, total=False): + ServiceLinkVirtualInterfaceId: Optional[ServiceLinkVirtualInterfaceId] + ServiceLinkVirtualInterfaceArn: Optional[ResourceArn] + OutpostId: Optional[String] + OutpostArn: Optional[String] + OwnerId: Optional[String] + LocalAddress: Optional[String] + PeerAddress: Optional[String] + PeerBgpAsn: Optional[Long] + Vlan: Optional[Integer] + OutpostLagId: Optional[OutpostLagId] + Tags: Optional[TagList] + ConfigurationState: Optional[ServiceLinkVirtualInterfaceConfigurationState] + + +ServiceLinkVirtualInterfaceSet = List[ServiceLinkVirtualInterface] + + +class DescribeServiceLinkVirtualInterfacesResult(TypedDict, total=False): + ServiceLinkVirtualInterfaces: Optional[ServiceLinkVirtualInterfaceSet] + NextToken: Optional[String] + + +class DescribeSnapshotAttributeRequest(ServiceRequest): + Attribute: SnapshotAttributeName + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + + +class DescribeSnapshotAttributeResult(TypedDict, total=False): + ProductCodes: Optional[ProductCodeList] + SnapshotId: Optional[String] + CreateVolumePermissions: Optional[CreateVolumePermissionList] + + +class DescribeSnapshotTierStatusRequest(ServiceRequest): + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + NextToken: Optional[String] + MaxResults: Optional[DescribeSnapshotTierStatusMaxResults] + + +class SnapshotTierStatus(TypedDict, total=False): + SnapshotId: Optional[SnapshotId] + VolumeId: Optional[VolumeId] + Status: Optional[SnapshotState] + OwnerId: Optional[String] + Tags: Optional[TagList] + StorageTier: Optional[StorageTier] + LastTieringStartTime: Optional[MillisecondDateTime] + LastTieringProgress: Optional[Integer] + LastTieringOperationStatus: Optional[TieringOperationStatus] + LastTieringOperationStatusDetail: Optional[String] + ArchivalCompleteTime: Optional[MillisecondDateTime] + RestoreExpiryTime: Optional[MillisecondDateTime] + + +snapshotTierStatusSet = List[SnapshotTierStatus] + + +class DescribeSnapshotTierStatusResult(TypedDict, total=False): + SnapshotTierStatuses: Optional[snapshotTierStatusSet] + NextToken: Optional[String] + + +RestorableByStringList = List[String] + + +class DescribeSnapshotsRequest(ServiceRequest): + MaxResults: Optional[Integer] + NextToken: Optional[String] + OwnerIds: Optional[OwnerStringList] + RestorableByUserIds: Optional[RestorableByStringList] + SnapshotIds: Optional[SnapshotIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class Snapshot(TypedDict, total=False): + OwnerAlias: Optional[String] + OutpostArn: Optional[String] + Tags: Optional[TagList] + StorageTier: Optional[StorageTier] + RestoreExpiryTime: Optional[MillisecondDateTime] + SseType: Optional[SSEType] + AvailabilityZone: Optional[String] + TransferType: Optional[TransferType] + CompletionDurationMinutes: Optional[SnapshotCompletionDurationMinutesResponse] + CompletionTime: Optional[MillisecondDateTime] + FullSnapshotSizeInBytes: Optional[Long] + SnapshotId: Optional[String] + VolumeId: Optional[String] + State: Optional[SnapshotState] + StateMessage: Optional[String] + StartTime: Optional[DateTime] + Progress: Optional[String] + OwnerId: Optional[String] + Description: Optional[String] + VolumeSize: Optional[Integer] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[String] + DataEncryptionKeyId: Optional[String] + + +SnapshotList = List[Snapshot] + + +class DescribeSnapshotsResult(TypedDict, total=False): + NextToken: Optional[String] + Snapshots: Optional[SnapshotList] + + +class DescribeSpotDatafeedSubscriptionRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DescribeSpotDatafeedSubscriptionResult(TypedDict, total=False): + SpotDatafeedSubscription: Optional[SpotDatafeedSubscription] + + +class DescribeSpotFleetInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotFleetRequestId: SpotFleetRequestId + NextToken: Optional[String] + MaxResults: Optional[DescribeSpotFleetInstancesMaxResults] + + +class DescribeSpotFleetInstancesResponse(TypedDict, total=False): + ActiveInstances: Optional[ActiveInstanceSet] + NextToken: Optional[String] + SpotFleetRequestId: Optional[String] + + +class DescribeSpotFleetRequestHistoryRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotFleetRequestId: SpotFleetRequestId + EventType: Optional[EventType] + StartTime: DateTime + NextToken: Optional[String] + MaxResults: Optional[DescribeSpotFleetRequestHistoryMaxResults] + + +class HistoryRecord(TypedDict, total=False): + EventInformation: Optional[EventInformation] + EventType: Optional[EventType] + Timestamp: Optional[DateTime] + + +HistoryRecords = List[HistoryRecord] + + +class DescribeSpotFleetRequestHistoryResponse(TypedDict, total=False): + HistoryRecords: Optional[HistoryRecords] + LastEvaluatedTime: Optional[DateTime] + NextToken: Optional[String] + SpotFleetRequestId: Optional[String] + StartTime: Optional[DateTime] + + +class DescribeSpotFleetRequestsRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotFleetRequestIds: Optional[SpotFleetRequestIdList] + NextToken: Optional[String] + MaxResults: Optional[Integer] + + +class TargetGroup(TypedDict, total=False): + Arn: Optional[String] + + +TargetGroups = List[TargetGroup] + + +class TargetGroupsConfig(TypedDict, total=False): + TargetGroups: Optional[TargetGroups] + + +class LoadBalancersConfig(TypedDict, total=False): + ClassicLoadBalancersConfig: Optional[ClassicLoadBalancersConfig] + TargetGroupsConfig: Optional[TargetGroupsConfig] + + +class LaunchTemplateOverrides(TypedDict, total=False): + InstanceType: Optional[InstanceType] + SpotPrice: Optional[String] + SubnetId: Optional[SubnetId] + AvailabilityZone: Optional[String] + WeightedCapacity: Optional[Double] + Priority: Optional[Double] + InstanceRequirements: Optional[InstanceRequirements] + + +LaunchTemplateOverridesList = List[LaunchTemplateOverrides] + + +class LaunchTemplateConfig(TypedDict, total=False): + LaunchTemplateSpecification: Optional[FleetLaunchTemplateSpecification] + Overrides: Optional[LaunchTemplateOverridesList] + + +LaunchTemplateConfigList = List[LaunchTemplateConfig] + + +class SpotFleetTagSpecification(TypedDict, total=False): + ResourceType: Optional[ResourceType] + Tags: Optional[TagList] + + +SpotFleetTagSpecificationList = List[SpotFleetTagSpecification] + + +class SpotPlacement(TypedDict, total=False): + AvailabilityZone: Optional[String] + GroupName: Optional[PlacementGroupName] + Tenancy: Optional[Tenancy] + + +class InstanceNetworkInterfaceSpecification(TypedDict, total=False): + AssociatePublicIpAddress: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Description: Optional[String] + DeviceIndex: Optional[Integer] + Groups: Optional[SecurityGroupIdStringList] + Ipv6AddressCount: Optional[Integer] + Ipv6Addresses: Optional[InstanceIpv6AddressList] + NetworkInterfaceId: Optional[NetworkInterfaceId] + PrivateIpAddress: Optional[String] + PrivateIpAddresses: Optional[PrivateIpAddressSpecificationList] + SecondaryPrivateIpAddressCount: Optional[Integer] + SubnetId: Optional[String] + AssociateCarrierIpAddress: Optional[Boolean] + InterfaceType: Optional[String] + NetworkCardIndex: Optional[Integer] + Ipv4Prefixes: Optional[Ipv4PrefixList] + Ipv4PrefixCount: Optional[Integer] + Ipv6Prefixes: Optional[Ipv6PrefixList] + Ipv6PrefixCount: Optional[Integer] + PrimaryIpv6: Optional[Boolean] + EnaSrdSpecification: Optional[EnaSrdSpecificationRequest] + ConnectionTrackingSpecification: Optional[ConnectionTrackingSpecificationRequest] + EnaQueueCount: Optional[Integer] + + +InstanceNetworkInterfaceSpecificationList = List[InstanceNetworkInterfaceSpecification] + + +class SpotFleetMonitoring(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class SpotFleetLaunchSpecification(TypedDict, total=False): + AddressingType: Optional[String] + BlockDeviceMappings: Optional[BlockDeviceMappingList] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[IamInstanceProfileSpecification] + ImageId: Optional[ImageId] + InstanceType: Optional[InstanceType] + KernelId: Optional[String] + KeyName: Optional[KeyPairName] + Monitoring: Optional[SpotFleetMonitoring] + NetworkInterfaces: Optional[InstanceNetworkInterfaceSpecificationList] + Placement: Optional[SpotPlacement] + RamdiskId: Optional[String] + SpotPrice: Optional[String] + SubnetId: Optional[SubnetId] + UserData: Optional[SensitiveUserData] + WeightedCapacity: Optional[Double] + TagSpecifications: Optional[SpotFleetTagSpecificationList] + InstanceRequirements: Optional[InstanceRequirements] + SecurityGroups: Optional[GroupIdentifierList] + + +LaunchSpecsList = List[SpotFleetLaunchSpecification] + + +class SpotCapacityRebalance(TypedDict, total=False): + ReplacementStrategy: Optional[ReplacementStrategy] + TerminationDelay: Optional[Integer] + + +class SpotMaintenanceStrategies(TypedDict, total=False): + CapacityRebalance: Optional[SpotCapacityRebalance] + + +class SpotFleetRequestConfigData(TypedDict, total=False): + AllocationStrategy: Optional[AllocationStrategy] + OnDemandAllocationStrategy: Optional[OnDemandAllocationStrategy] + SpotMaintenanceStrategies: Optional[SpotMaintenanceStrategies] + ClientToken: Optional[String] + ExcessCapacityTerminationPolicy: Optional[ExcessCapacityTerminationPolicy] + FulfilledCapacity: Optional[Double] + OnDemandFulfilledCapacity: Optional[Double] + IamFleetRole: String + LaunchSpecifications: Optional[LaunchSpecsList] + LaunchTemplateConfigs: Optional[LaunchTemplateConfigList] + SpotPrice: Optional[String] + TargetCapacity: Integer + OnDemandTargetCapacity: Optional[Integer] + OnDemandMaxTotalPrice: Optional[String] + SpotMaxTotalPrice: Optional[String] + TerminateInstancesWithExpiration: Optional[Boolean] + Type: Optional[FleetType] + ValidFrom: Optional[DateTime] + ValidUntil: Optional[DateTime] + ReplaceUnhealthyInstances: Optional[Boolean] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + LoadBalancersConfig: Optional[LoadBalancersConfig] + InstancePoolsToUseCount: Optional[Integer] + Context: Optional[String] + TargetCapacityUnitType: Optional[TargetCapacityUnitType] + TagSpecifications: Optional[TagSpecificationList] + + +class SpotFleetRequestConfig(TypedDict, total=False): + ActivityStatus: Optional[ActivityStatus] + CreateTime: Optional[MillisecondDateTime] + SpotFleetRequestConfig: Optional[SpotFleetRequestConfigData] + SpotFleetRequestId: Optional[String] + SpotFleetRequestState: Optional[BatchState] + Tags: Optional[TagList] + + +SpotFleetRequestConfigSet = List[SpotFleetRequestConfig] + + +class DescribeSpotFleetRequestsResponse(TypedDict, total=False): + NextToken: Optional[String] + SpotFleetRequestConfigs: Optional[SpotFleetRequestConfigSet] + + +class DescribeSpotInstanceRequestsRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[Integer] + DryRun: Optional[Boolean] + SpotInstanceRequestIds: Optional[SpotInstanceRequestIdList] + Filters: Optional[FilterList] + + +class SpotInstanceStatus(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + UpdateTime: Optional[DateTime] + + +class RunInstancesMonitoringEnabled(TypedDict, total=False): + Enabled: Boolean + + +class LaunchSpecification(TypedDict, total=False): + UserData: Optional[SensitiveUserData] + AddressingType: Optional[String] + BlockDeviceMappings: Optional[BlockDeviceMappingList] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[IamInstanceProfileSpecification] + ImageId: Optional[String] + InstanceType: Optional[InstanceType] + KernelId: Optional[String] + KeyName: Optional[String] + NetworkInterfaces: Optional[InstanceNetworkInterfaceSpecificationList] + Placement: Optional[SpotPlacement] + RamdiskId: Optional[String] + SubnetId: Optional[String] + SecurityGroups: Optional[GroupIdentifierList] + Monitoring: Optional[RunInstancesMonitoringEnabled] + + +class SpotInstanceRequest(TypedDict, total=False): + ActualBlockHourlyPrice: Optional[String] + AvailabilityZoneGroup: Optional[String] + BlockDurationMinutes: Optional[Integer] + CreateTime: Optional[DateTime] + Fault: Optional[SpotInstanceStateFault] + InstanceId: Optional[InstanceId] + LaunchGroup: Optional[String] + LaunchSpecification: Optional[LaunchSpecification] + LaunchedAvailabilityZone: Optional[String] + ProductDescription: Optional[RIProductDescription] + SpotInstanceRequestId: Optional[String] + SpotPrice: Optional[String] + State: Optional[SpotInstanceState] + Status: Optional[SpotInstanceStatus] + Tags: Optional[TagList] + Type: Optional[SpotInstanceType] + ValidFrom: Optional[DateTime] + ValidUntil: Optional[DateTime] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + + +SpotInstanceRequestList = List[SpotInstanceRequest] + + +class DescribeSpotInstanceRequestsResult(TypedDict, total=False): + SpotInstanceRequests: Optional[SpotInstanceRequestList] + NextToken: Optional[String] + + +ProductDescriptionList = List[String] +InstanceTypeList = List[InstanceType] + + +class DescribeSpotPriceHistoryRequest(ServiceRequest): + DryRun: Optional[Boolean] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + InstanceTypes: Optional[InstanceTypeList] + ProductDescriptions: Optional[ProductDescriptionList] + Filters: Optional[FilterList] + AvailabilityZone: Optional[String] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class SpotPrice(TypedDict, total=False): + AvailabilityZone: Optional[String] + InstanceType: Optional[InstanceType] + ProductDescription: Optional[RIProductDescription] + SpotPrice: Optional[String] + Timestamp: Optional[DateTime] + + +SpotPriceHistoryList = List[SpotPrice] + + +class DescribeSpotPriceHistoryResult(TypedDict, total=False): + NextToken: Optional[String] + SpotPriceHistory: Optional[SpotPriceHistoryList] + + +class DescribeStaleSecurityGroupsRequest(ServiceRequest): + DryRun: Optional[Boolean] + MaxResults: Optional[DescribeStaleSecurityGroupsMaxResults] + NextToken: Optional[DescribeStaleSecurityGroupsNextToken] + VpcId: VpcId + + +UserIdGroupPairSet = List[UserIdGroupPair] +PrefixListIdSet = List[String] +IpRanges = List[String] + + +class StaleIpPermission(TypedDict, total=False): + FromPort: Optional[Integer] + IpProtocol: Optional[String] + IpRanges: Optional[IpRanges] + PrefixListIds: Optional[PrefixListIdSet] + ToPort: Optional[Integer] + UserIdGroupPairs: Optional[UserIdGroupPairSet] + + +StaleIpPermissionSet = List[StaleIpPermission] + + +class StaleSecurityGroup(TypedDict, total=False): + Description: Optional[String] + GroupId: Optional[String] + GroupName: Optional[String] + StaleIpPermissions: Optional[StaleIpPermissionSet] + StaleIpPermissionsEgress: Optional[StaleIpPermissionSet] + VpcId: Optional[String] + + +StaleSecurityGroupSet = List[StaleSecurityGroup] + + +class DescribeStaleSecurityGroupsResult(TypedDict, total=False): + NextToken: Optional[String] + StaleSecurityGroupSet: Optional[StaleSecurityGroupSet] + + +ImageIdList = List[ImageId] + + +class DescribeStoreImageTasksRequest(ServiceRequest): + ImageIds: Optional[ImageIdList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeStoreImageTasksRequestMaxResults] + + +class StoreImageTaskResult(TypedDict, total=False): + AmiId: Optional[String] + TaskStartTime: Optional[MillisecondDateTime] + Bucket: Optional[String] + S3objectKey: Optional[String] + ProgressPercentage: Optional[Integer] + StoreTaskState: Optional[String] + StoreTaskFailureReason: Optional[String] + + +StoreImageTaskResultSet = List[StoreImageTaskResult] + + +class DescribeStoreImageTasksResult(TypedDict, total=False): + StoreImageTaskResults: Optional[StoreImageTaskResultSet] + NextToken: Optional[String] + + +SubnetIdStringList = List[SubnetId] + + +class DescribeSubnetsRequest(ServiceRequest): + Filters: Optional[FilterList] + SubnetIds: Optional[SubnetIdStringList] + NextToken: Optional[String] + MaxResults: Optional[DescribeSubnetsMaxResults] + DryRun: Optional[Boolean] + + +SubnetList = List[Subnet] + + +class DescribeSubnetsResult(TypedDict, total=False): + NextToken: Optional[String] + Subnets: Optional[SubnetList] + + +class DescribeTagsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class TagDescription(TypedDict, total=False): + Key: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[ResourceType] + Value: Optional[String] + + +TagDescriptionList = List[TagDescription] + + +class DescribeTagsResult(TypedDict, total=False): + NextToken: Optional[String] + Tags: Optional[TagDescriptionList] + + +TrafficMirrorFilterRuleIdList = List[TrafficMirrorFilterRuleIdWithResolver] + + +class DescribeTrafficMirrorFilterRulesRequest(ServiceRequest): + TrafficMirrorFilterRuleIds: Optional[TrafficMirrorFilterRuleIdList] + TrafficMirrorFilterId: Optional[TrafficMirrorFilterId] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[TrafficMirroringMaxResults] + NextToken: Optional[NextToken] + + +TrafficMirrorFilterRuleSet = List[TrafficMirrorFilterRule] + + +class DescribeTrafficMirrorFilterRulesResult(TypedDict, total=False): + TrafficMirrorFilterRules: Optional[TrafficMirrorFilterRuleSet] + NextToken: Optional[String] + + +TrafficMirrorFilterIdList = List[TrafficMirrorFilterId] + + +class DescribeTrafficMirrorFiltersRequest(ServiceRequest): + TrafficMirrorFilterIds: Optional[TrafficMirrorFilterIdList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[TrafficMirroringMaxResults] + NextToken: Optional[NextToken] + + +TrafficMirrorFilterSet = List[TrafficMirrorFilter] + + +class DescribeTrafficMirrorFiltersResult(TypedDict, total=False): + TrafficMirrorFilters: Optional[TrafficMirrorFilterSet] + NextToken: Optional[String] + + +TrafficMirrorSessionIdList = List[TrafficMirrorSessionId] + + +class DescribeTrafficMirrorSessionsRequest(ServiceRequest): + TrafficMirrorSessionIds: Optional[TrafficMirrorSessionIdList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[TrafficMirroringMaxResults] + NextToken: Optional[NextToken] + + +TrafficMirrorSessionSet = List[TrafficMirrorSession] + + +class DescribeTrafficMirrorSessionsResult(TypedDict, total=False): + TrafficMirrorSessions: Optional[TrafficMirrorSessionSet] + NextToken: Optional[String] + + +TrafficMirrorTargetIdList = List[TrafficMirrorTargetId] + + +class DescribeTrafficMirrorTargetsRequest(ServiceRequest): + TrafficMirrorTargetIds: Optional[TrafficMirrorTargetIdList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[TrafficMirroringMaxResults] + NextToken: Optional[NextToken] + + +TrafficMirrorTargetSet = List[TrafficMirrorTarget] + + +class DescribeTrafficMirrorTargetsResult(TypedDict, total=False): + TrafficMirrorTargets: Optional[TrafficMirrorTargetSet] + NextToken: Optional[String] + + +TransitGatewayAttachmentIdStringList = List[TransitGatewayAttachmentId] + + +class DescribeTransitGatewayAttachmentsRequest(ServiceRequest): + TransitGatewayAttachmentIds: Optional[TransitGatewayAttachmentIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayAttachmentAssociation(TypedDict, total=False): + TransitGatewayRouteTableId: Optional[String] + State: Optional[TransitGatewayAssociationState] + + +class TransitGatewayAttachment(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + TransitGatewayId: Optional[String] + TransitGatewayOwnerId: Optional[String] + ResourceOwnerId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + ResourceId: Optional[String] + State: Optional[TransitGatewayAttachmentState] + Association: Optional[TransitGatewayAttachmentAssociation] + CreationTime: Optional[DateTime] + Tags: Optional[TagList] + + +TransitGatewayAttachmentList = List[TransitGatewayAttachment] + + +class DescribeTransitGatewayAttachmentsResult(TypedDict, total=False): + TransitGatewayAttachments: Optional[TransitGatewayAttachmentList] + NextToken: Optional[String] + + +TransitGatewayConnectPeerIdStringList = List[TransitGatewayConnectPeerId] + + +class DescribeTransitGatewayConnectPeersRequest(ServiceRequest): + TransitGatewayConnectPeerIds: Optional[TransitGatewayConnectPeerIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayConnectPeerList = List[TransitGatewayConnectPeer] + + +class DescribeTransitGatewayConnectPeersResult(TypedDict, total=False): + TransitGatewayConnectPeers: Optional[TransitGatewayConnectPeerList] + NextToken: Optional[String] + + +class DescribeTransitGatewayConnectsRequest(ServiceRequest): + TransitGatewayAttachmentIds: Optional[TransitGatewayAttachmentIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayConnectList = List[TransitGatewayConnect] + + +class DescribeTransitGatewayConnectsResult(TypedDict, total=False): + TransitGatewayConnects: Optional[TransitGatewayConnectList] + NextToken: Optional[String] + + +TransitGatewayMulticastDomainIdStringList = List[TransitGatewayMulticastDomainId] + + +class DescribeTransitGatewayMulticastDomainsRequest(ServiceRequest): + TransitGatewayMulticastDomainIds: Optional[TransitGatewayMulticastDomainIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayMulticastDomainList = List[TransitGatewayMulticastDomain] + + +class DescribeTransitGatewayMulticastDomainsResult(TypedDict, total=False): + TransitGatewayMulticastDomains: Optional[TransitGatewayMulticastDomainList] + NextToken: Optional[String] + + +class DescribeTransitGatewayPeeringAttachmentsRequest(ServiceRequest): + TransitGatewayAttachmentIds: Optional[TransitGatewayAttachmentIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayPeeringAttachmentList = List[TransitGatewayPeeringAttachment] + + +class DescribeTransitGatewayPeeringAttachmentsResult(TypedDict, total=False): + TransitGatewayPeeringAttachments: Optional[TransitGatewayPeeringAttachmentList] + NextToken: Optional[String] + + +TransitGatewayPolicyTableIdStringList = List[TransitGatewayPolicyTableId] + + +class DescribeTransitGatewayPolicyTablesRequest(ServiceRequest): + TransitGatewayPolicyTableIds: Optional[TransitGatewayPolicyTableIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayPolicyTableList = List[TransitGatewayPolicyTable] + + +class DescribeTransitGatewayPolicyTablesResult(TypedDict, total=False): + TransitGatewayPolicyTables: Optional[TransitGatewayPolicyTableList] + NextToken: Optional[String] + + +TransitGatewayRouteTableAnnouncementIdStringList = List[TransitGatewayRouteTableAnnouncementId] + + +class DescribeTransitGatewayRouteTableAnnouncementsRequest(ServiceRequest): + TransitGatewayRouteTableAnnouncementIds: Optional[ + TransitGatewayRouteTableAnnouncementIdStringList + ] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayRouteTableAnnouncementList = List[TransitGatewayRouteTableAnnouncement] + + +class DescribeTransitGatewayRouteTableAnnouncementsResult(TypedDict, total=False): + TransitGatewayRouteTableAnnouncements: Optional[TransitGatewayRouteTableAnnouncementList] + NextToken: Optional[String] + + +TransitGatewayRouteTableIdStringList = List[TransitGatewayRouteTableId] + + +class DescribeTransitGatewayRouteTablesRequest(ServiceRequest): + TransitGatewayRouteTableIds: Optional[TransitGatewayRouteTableIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayRouteTableList = List[TransitGatewayRouteTable] + + +class DescribeTransitGatewayRouteTablesResult(TypedDict, total=False): + TransitGatewayRouteTables: Optional[TransitGatewayRouteTableList] + NextToken: Optional[String] + + +class DescribeTransitGatewayVpcAttachmentsRequest(ServiceRequest): + TransitGatewayAttachmentIds: Optional[TransitGatewayAttachmentIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayVpcAttachmentList = List[TransitGatewayVpcAttachment] + + +class DescribeTransitGatewayVpcAttachmentsResult(TypedDict, total=False): + TransitGatewayVpcAttachments: Optional[TransitGatewayVpcAttachmentList] + NextToken: Optional[String] + + +TransitGatewayIdStringList = List[TransitGatewayId] + + +class DescribeTransitGatewaysRequest(ServiceRequest): + TransitGatewayIds: Optional[TransitGatewayIdStringList] + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayList = List[TransitGateway] + + +class DescribeTransitGatewaysResult(TypedDict, total=False): + TransitGateways: Optional[TransitGatewayList] + NextToken: Optional[String] + + +TrunkInterfaceAssociationIdList = List[TrunkInterfaceAssociationId] + + +class DescribeTrunkInterfaceAssociationsRequest(ServiceRequest): + AssociationIds: Optional[TrunkInterfaceAssociationIdList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[DescribeTrunkInterfaceAssociationsMaxResults] + + +TrunkInterfaceAssociationList = List[TrunkInterfaceAssociation] + + +class DescribeTrunkInterfaceAssociationsResult(TypedDict, total=False): + InterfaceAssociations: Optional[TrunkInterfaceAssociationList] + NextToken: Optional[String] + + +VerifiedAccessEndpointIdList = List[VerifiedAccessEndpointId] + + +class DescribeVerifiedAccessEndpointsRequest(ServiceRequest): + VerifiedAccessEndpointIds: Optional[VerifiedAccessEndpointIdList] + VerifiedAccessInstanceId: Optional[VerifiedAccessInstanceId] + VerifiedAccessGroupId: Optional[VerifiedAccessGroupId] + MaxResults: Optional[DescribeVerifiedAccessEndpointsMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +VerifiedAccessEndpointList = List[VerifiedAccessEndpoint] + + +class DescribeVerifiedAccessEndpointsResult(TypedDict, total=False): + VerifiedAccessEndpoints: Optional[VerifiedAccessEndpointList] + NextToken: Optional[NextToken] + + +VerifiedAccessGroupIdList = List[VerifiedAccessGroupId] + + +class DescribeVerifiedAccessGroupsRequest(ServiceRequest): + VerifiedAccessGroupIds: Optional[VerifiedAccessGroupIdList] + VerifiedAccessInstanceId: Optional[VerifiedAccessInstanceId] + MaxResults: Optional[DescribeVerifiedAccessGroupMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +VerifiedAccessGroupList = List[VerifiedAccessGroup] + + +class DescribeVerifiedAccessGroupsResult(TypedDict, total=False): + VerifiedAccessGroups: Optional[VerifiedAccessGroupList] + NextToken: Optional[NextToken] + + +VerifiedAccessInstanceIdList = List[VerifiedAccessInstanceId] + + +class DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(ServiceRequest): + VerifiedAccessInstanceIds: Optional[VerifiedAccessInstanceIdList] + MaxResults: Optional[DescribeVerifiedAccessInstanceLoggingConfigurationsMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class VerifiedAccessLogDeliveryStatus(TypedDict, total=False): + Code: Optional[VerifiedAccessLogDeliveryStatusCode] + Message: Optional[String] + + +class VerifiedAccessLogKinesisDataFirehoseDestination(TypedDict, total=False): + Enabled: Optional[Boolean] + DeliveryStatus: Optional[VerifiedAccessLogDeliveryStatus] + DeliveryStream: Optional[String] + + +class VerifiedAccessLogCloudWatchLogsDestination(TypedDict, total=False): + Enabled: Optional[Boolean] + DeliveryStatus: Optional[VerifiedAccessLogDeliveryStatus] + LogGroup: Optional[String] + + +class VerifiedAccessLogS3Destination(TypedDict, total=False): + Enabled: Optional[Boolean] + DeliveryStatus: Optional[VerifiedAccessLogDeliveryStatus] + BucketName: Optional[String] + Prefix: Optional[String] + BucketOwner: Optional[String] + + +class VerifiedAccessLogs(TypedDict, total=False): + S3: Optional[VerifiedAccessLogS3Destination] + CloudWatchLogs: Optional[VerifiedAccessLogCloudWatchLogsDestination] + KinesisDataFirehose: Optional[VerifiedAccessLogKinesisDataFirehoseDestination] + LogVersion: Optional[String] + IncludeTrustContext: Optional[Boolean] + + +class VerifiedAccessInstanceLoggingConfiguration(TypedDict, total=False): + VerifiedAccessInstanceId: Optional[String] + AccessLogs: Optional[VerifiedAccessLogs] + + +VerifiedAccessInstanceLoggingConfigurationList = List[VerifiedAccessInstanceLoggingConfiguration] + + +class DescribeVerifiedAccessInstanceLoggingConfigurationsResult(TypedDict, total=False): + LoggingConfigurations: Optional[VerifiedAccessInstanceLoggingConfigurationList] + NextToken: Optional[NextToken] + + +class DescribeVerifiedAccessInstancesRequest(ServiceRequest): + VerifiedAccessInstanceIds: Optional[VerifiedAccessInstanceIdList] + MaxResults: Optional[DescribeVerifiedAccessInstancesMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +VerifiedAccessInstanceList = List[VerifiedAccessInstance] + + +class DescribeVerifiedAccessInstancesResult(TypedDict, total=False): + VerifiedAccessInstances: Optional[VerifiedAccessInstanceList] + NextToken: Optional[NextToken] + + +VerifiedAccessTrustProviderIdList = List[VerifiedAccessTrustProviderId] + + +class DescribeVerifiedAccessTrustProvidersRequest(ServiceRequest): + VerifiedAccessTrustProviderIds: Optional[VerifiedAccessTrustProviderIdList] + MaxResults: Optional[DescribeVerifiedAccessTrustProvidersMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +VerifiedAccessTrustProviderList = List[VerifiedAccessTrustProvider] + + +class DescribeVerifiedAccessTrustProvidersResult(TypedDict, total=False): + VerifiedAccessTrustProviders: Optional[VerifiedAccessTrustProviderList] + NextToken: Optional[NextToken] + + +class DescribeVolumeAttributeRequest(ServiceRequest): + Attribute: VolumeAttributeName + VolumeId: VolumeId + DryRun: Optional[Boolean] + + +class DescribeVolumeAttributeResult(TypedDict, total=False): + AutoEnableIO: Optional[AttributeBooleanValue] + ProductCodes: Optional[ProductCodeList] + VolumeId: Optional[String] + + +class DescribeVolumeStatusRequest(ServiceRequest): + MaxResults: Optional[Integer] + NextToken: Optional[String] + VolumeIds: Optional[VolumeIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class VolumeStatusAttachmentStatus(TypedDict, total=False): + IoPerformance: Optional[String] + InstanceId: Optional[String] + + +VolumeStatusAttachmentStatusList = List[VolumeStatusAttachmentStatus] + + +class VolumeStatusDetails(TypedDict, total=False): + Name: Optional[VolumeStatusName] + Status: Optional[String] + + +VolumeStatusDetailsList = List[VolumeStatusDetails] + + +class VolumeStatusInfo(TypedDict, total=False): + Details: Optional[VolumeStatusDetailsList] + Status: Optional[VolumeStatusInfoStatus] + + +class VolumeStatusEvent(TypedDict, total=False): + Description: Optional[String] + EventId: Optional[String] + EventType: Optional[String] + NotAfter: Optional[MillisecondDateTime] + NotBefore: Optional[MillisecondDateTime] + InstanceId: Optional[String] + + +VolumeStatusEventsList = List[VolumeStatusEvent] + + +class VolumeStatusAction(TypedDict, total=False): + Code: Optional[String] + Description: Optional[String] + EventId: Optional[String] + EventType: Optional[String] + + +VolumeStatusActionsList = List[VolumeStatusAction] + + +class VolumeStatusItem(TypedDict, total=False): + Actions: Optional[VolumeStatusActionsList] + AvailabilityZone: Optional[String] + OutpostArn: Optional[String] + Events: Optional[VolumeStatusEventsList] + VolumeId: Optional[String] + VolumeStatus: Optional[VolumeStatusInfo] + AttachmentStatuses: Optional[VolumeStatusAttachmentStatusList] + AvailabilityZoneId: Optional[String] + + +VolumeStatusList = List[VolumeStatusItem] + + +class DescribeVolumeStatusResult(TypedDict, total=False): + NextToken: Optional[String] + VolumeStatuses: Optional[VolumeStatusList] + + +class DescribeVolumesModificationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + VolumeIds: Optional[VolumeIdStringList] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[Integer] + + +class VolumeModification(TypedDict, total=False): + VolumeId: Optional[String] + ModificationState: Optional[VolumeModificationState] + StatusMessage: Optional[String] + TargetSize: Optional[Integer] + TargetIops: Optional[Integer] + TargetVolumeType: Optional[VolumeType] + TargetThroughput: Optional[Integer] + TargetMultiAttachEnabled: Optional[Boolean] + OriginalSize: Optional[Integer] + OriginalIops: Optional[Integer] + OriginalVolumeType: Optional[VolumeType] + OriginalThroughput: Optional[Integer] + OriginalMultiAttachEnabled: Optional[Boolean] + Progress: Optional[Long] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + + +VolumeModificationList = List[VolumeModification] + + +class DescribeVolumesModificationsResult(TypedDict, total=False): + NextToken: Optional[String] + VolumesModifications: Optional[VolumeModificationList] + + +class DescribeVolumesRequest(ServiceRequest): + VolumeIds: Optional[VolumeIdStringList] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + NextToken: Optional[String] + MaxResults: Optional[Integer] + + +class VolumeAttachment(TypedDict, total=False): + DeleteOnTermination: Optional[Boolean] + AssociatedResource: Optional[String] + InstanceOwningService: Optional[String] + VolumeId: Optional[String] + InstanceId: Optional[String] + Device: Optional[String] + State: Optional[VolumeAttachmentState] + AttachTime: Optional[DateTime] + + +VolumeAttachmentList = List[VolumeAttachment] + + +class Volume(TypedDict, total=False): + OutpostArn: Optional[String] + Iops: Optional[Integer] + Tags: Optional[TagList] + VolumeType: Optional[VolumeType] + FastRestored: Optional[Boolean] + MultiAttachEnabled: Optional[Boolean] + Throughput: Optional[Integer] + SseType: Optional[SSEType] + Operator: Optional[OperatorResponse] + VolumeInitializationRate: Optional[Integer] + VolumeId: Optional[String] + Size: Optional[Integer] + SnapshotId: Optional[String] + AvailabilityZone: Optional[String] + State: Optional[VolumeState] + CreateTime: Optional[DateTime] + Attachments: Optional[VolumeAttachmentList] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[String] + + +VolumeList = List[Volume] + + +class DescribeVolumesResult(TypedDict, total=False): + NextToken: Optional[String] + Volumes: Optional[VolumeList] + + +class DescribeVpcAttributeRequest(ServiceRequest): + Attribute: VpcAttributeName + VpcId: VpcId + DryRun: Optional[Boolean] + + +class DescribeVpcAttributeResult(TypedDict, total=False): + EnableDnsHostnames: Optional[AttributeBooleanValue] + EnableDnsSupport: Optional[AttributeBooleanValue] + EnableNetworkAddressUsageMetrics: Optional[AttributeBooleanValue] + VpcId: Optional[String] + + +VpcBlockPublicAccessExclusionIdList = List[VpcBlockPublicAccessExclusionId] + + +class DescribeVpcBlockPublicAccessExclusionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + ExclusionIds: Optional[VpcBlockPublicAccessExclusionIdList] + NextToken: Optional[String] + MaxResults: Optional[DescribeVpcBlockPublicAccessExclusionsMaxResults] + + +VpcBlockPublicAccessExclusionList = List[VpcBlockPublicAccessExclusion] + + +class DescribeVpcBlockPublicAccessExclusionsResult(TypedDict, total=False): + VpcBlockPublicAccessExclusions: Optional[VpcBlockPublicAccessExclusionList] + NextToken: Optional[String] + + +class DescribeVpcBlockPublicAccessOptionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class VpcBlockPublicAccessOptions(TypedDict, total=False): + AwsAccountId: Optional[String] + AwsRegion: Optional[String] + State: Optional[VpcBlockPublicAccessState] + InternetGatewayBlockMode: Optional[InternetGatewayBlockMode] + Reason: Optional[String] + LastUpdateTimestamp: Optional[MillisecondDateTime] + ManagedBy: Optional[ManagedBy] + ExclusionsAllowed: Optional[VpcBlockPublicAccessExclusionsAllowed] + + +class DescribeVpcBlockPublicAccessOptionsResult(TypedDict, total=False): + VpcBlockPublicAccessOptions: Optional[VpcBlockPublicAccessOptions] + + +VpcClassicLinkIdList = List[VpcId] + + +class DescribeVpcClassicLinkDnsSupportRequest(ServiceRequest): + VpcIds: Optional[VpcClassicLinkIdList] + MaxResults: Optional[DescribeVpcClassicLinkDnsSupportMaxResults] + NextToken: Optional[DescribeVpcClassicLinkDnsSupportNextToken] + + +class DescribeVpcClassicLinkDnsSupportResult(TypedDict, total=False): + NextToken: Optional[DescribeVpcClassicLinkDnsSupportNextToken] + Vpcs: Optional[ClassicLinkDnsSupportList] + + +class DescribeVpcClassicLinkRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcIds: Optional[VpcClassicLinkIdList] + Filters: Optional[FilterList] + + +class VpcClassicLink(TypedDict, total=False): + ClassicLinkEnabled: Optional[Boolean] + Tags: Optional[TagList] + VpcId: Optional[String] + + +VpcClassicLinkList = List[VpcClassicLink] + + +class DescribeVpcClassicLinkResult(TypedDict, total=False): + Vpcs: Optional[VpcClassicLinkList] + + +class DescribeVpcEndpointAssociationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcEndpointIds: Optional[VpcEndpointIdList] + Filters: Optional[FilterList] + MaxResults: Optional[maxResults] + NextToken: Optional[String] + + +class VpcEndpointAssociation(TypedDict, total=False): + Id: Optional[String] + VpcEndpointId: Optional[VpcEndpointId] + ServiceNetworkArn: Optional[ServiceNetworkArn] + ServiceNetworkName: Optional[String] + AssociatedResourceAccessibility: Optional[String] + FailureReason: Optional[String] + FailureCode: Optional[String] + DnsEntry: Optional[DnsEntry] + PrivateDnsEntry: Optional[DnsEntry] + AssociatedResourceArn: Optional[String] + ResourceConfigurationGroupArn: Optional[String] + Tags: Optional[TagList] + + +VpcEndpointAssociationSet = List[VpcEndpointAssociation] + + +class DescribeVpcEndpointAssociationsResult(TypedDict, total=False): + VpcEndpointAssociations: Optional[VpcEndpointAssociationSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointConnectionNotificationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ConnectionNotificationId: Optional[ConnectionNotificationId] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class DescribeVpcEndpointConnectionNotificationsResult(TypedDict, total=False): + ConnectionNotificationSet: Optional[ConnectionNotificationSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointConnectionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class VpcEndpointConnection(TypedDict, total=False): + ServiceId: Optional[String] + VpcEndpointId: Optional[String] + VpcEndpointOwner: Optional[String] + VpcEndpointState: Optional[State] + CreationTimestamp: Optional[MillisecondDateTime] + DnsEntries: Optional[DnsEntrySet] + NetworkLoadBalancerArns: Optional[ValueStringList] + GatewayLoadBalancerArns: Optional[ValueStringList] + IpAddressType: Optional[IpAddressType] + VpcEndpointConnectionId: Optional[String] + Tags: Optional[TagList] + VpcEndpointRegion: Optional[String] + + +VpcEndpointConnectionSet = List[VpcEndpointConnection] + + +class DescribeVpcEndpointConnectionsResult(TypedDict, total=False): + VpcEndpointConnections: Optional[VpcEndpointConnectionSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointServiceConfigurationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceIds: Optional[VpcEndpointServiceIdList] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +ServiceConfigurationSet = List[ServiceConfiguration] + + +class DescribeVpcEndpointServiceConfigurationsResult(TypedDict, total=False): + ServiceConfigurations: Optional[ServiceConfigurationSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointServicePermissionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class DescribeVpcEndpointServicePermissionsResult(TypedDict, total=False): + AllowedPrincipals: Optional[AllowedPrincipalSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointServicesRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceNames: Optional[ValueStringList] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + ServiceRegions: Optional[ValueStringList] + + +class PrivateDnsDetails(TypedDict, total=False): + PrivateDnsName: Optional[String] + + +PrivateDnsDetailsSet = List[PrivateDnsDetails] + + +class ServiceDetail(TypedDict, total=False): + ServiceName: Optional[String] + ServiceId: Optional[String] + ServiceType: Optional[ServiceTypeDetailSet] + ServiceRegion: Optional[String] + AvailabilityZones: Optional[ValueStringList] + Owner: Optional[String] + BaseEndpointDnsNames: Optional[ValueStringList] + PrivateDnsName: Optional[String] + PrivateDnsNames: Optional[PrivateDnsDetailsSet] + VpcEndpointPolicySupported: Optional[Boolean] + AcceptanceRequired: Optional[Boolean] + ManagesVpcEndpoints: Optional[Boolean] + PayerResponsibility: Optional[PayerResponsibility] + Tags: Optional[TagList] + PrivateDnsNameVerificationState: Optional[DnsNameState] + SupportedIpAddressTypes: Optional[SupportedIpAddressTypes] + + +ServiceDetailSet = List[ServiceDetail] + + +class DescribeVpcEndpointServicesResult(TypedDict, total=False): + ServiceNames: Optional[ValueStringList] + ServiceDetails: Optional[ServiceDetailSet] + NextToken: Optional[String] + + +class DescribeVpcEndpointsRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcEndpointIds: Optional[VpcEndpointIdList] + Filters: Optional[FilterList] + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +VpcEndpointSet = List[VpcEndpoint] + + +class DescribeVpcEndpointsResult(TypedDict, total=False): + VpcEndpoints: Optional[VpcEndpointSet] + NextToken: Optional[String] + + +VpcPeeringConnectionIdList = List[VpcPeeringConnectionId] + + +class DescribeVpcPeeringConnectionsRequest(ServiceRequest): + NextToken: Optional[String] + MaxResults: Optional[DescribeVpcPeeringConnectionsMaxResults] + DryRun: Optional[Boolean] + VpcPeeringConnectionIds: Optional[VpcPeeringConnectionIdList] + Filters: Optional[FilterList] + + +VpcPeeringConnectionList = List[VpcPeeringConnection] + + +class DescribeVpcPeeringConnectionsResult(TypedDict, total=False): + VpcPeeringConnections: Optional[VpcPeeringConnectionList] + NextToken: Optional[String] + + +VpcIdStringList = List[VpcId] + + +class DescribeVpcsRequest(ServiceRequest): + Filters: Optional[FilterList] + VpcIds: Optional[VpcIdStringList] + NextToken: Optional[String] + MaxResults: Optional[DescribeVpcsMaxResults] + DryRun: Optional[Boolean] + + +VpcList = List[Vpc] + + +class DescribeVpcsResult(TypedDict, total=False): + NextToken: Optional[String] + Vpcs: Optional[VpcList] + + +VpnConnectionIdStringList = List[VpnConnectionId] + + +class DescribeVpnConnectionsRequest(ServiceRequest): + Filters: Optional[FilterList] + VpnConnectionIds: Optional[VpnConnectionIdStringList] + DryRun: Optional[Boolean] + + +VpnConnectionList = List[VpnConnection] + + +class DescribeVpnConnectionsResult(TypedDict, total=False): + VpnConnections: Optional[VpnConnectionList] + + +VpnGatewayIdStringList = List[VpnGatewayId] + + +class DescribeVpnGatewaysRequest(ServiceRequest): + Filters: Optional[FilterList] + VpnGatewayIds: Optional[VpnGatewayIdStringList] + DryRun: Optional[Boolean] + + +VpnGatewayList = List[VpnGateway] + + +class DescribeVpnGatewaysResult(TypedDict, total=False): + VpnGateways: Optional[VpnGatewayList] + + +class DetachClassicLinkVpcRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + VpcId: VpcId + + +class DetachClassicLinkVpcResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DetachInternetGatewayRequest(ServiceRequest): + DryRun: Optional[Boolean] + InternetGatewayId: InternetGatewayId + VpcId: VpcId + + +class DetachNetworkInterfaceRequest(ServiceRequest): + DryRun: Optional[Boolean] + AttachmentId: NetworkInterfaceAttachmentId + Force: Optional[Boolean] + + +class DetachVerifiedAccessTrustProviderRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + VerifiedAccessTrustProviderId: VerifiedAccessTrustProviderId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class DetachVerifiedAccessTrustProviderResult(TypedDict, total=False): + VerifiedAccessTrustProvider: Optional[VerifiedAccessTrustProvider] + VerifiedAccessInstance: Optional[VerifiedAccessInstance] + + +class DetachVolumeRequest(ServiceRequest): + Device: Optional[String] + Force: Optional[Boolean] + InstanceId: Optional[InstanceIdForResolver] + VolumeId: VolumeIdWithResolver + DryRun: Optional[Boolean] + + +class DetachVpnGatewayRequest(ServiceRequest): + VpcId: VpcId + VpnGatewayId: VpnGatewayId + DryRun: Optional[Boolean] + + +DeviceTrustProviderTypeList = List[DeviceTrustProviderType] + + +class DisableAddressTransferRequest(ServiceRequest): + AllocationId: AllocationId + DryRun: Optional[Boolean] + + +class DisableAddressTransferResult(TypedDict, total=False): + AddressTransfer: Optional[AddressTransfer] + + +class DisableAllowedImagesSettingsRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DisableAllowedImagesSettingsResult(TypedDict, total=False): + AllowedImagesSettingsState: Optional[AllowedImagesSettingsDisabledState] + + +class DisableAwsNetworkPerformanceMetricSubscriptionRequest(ServiceRequest): + Source: Optional[String] + Destination: Optional[String] + Metric: Optional[MetricType] + Statistic: Optional[StatisticType] + DryRun: Optional[Boolean] + + +class DisableAwsNetworkPerformanceMetricSubscriptionResult(TypedDict, total=False): + Output: Optional[Boolean] + + +class DisableEbsEncryptionByDefaultRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DisableEbsEncryptionByDefaultResult(TypedDict, total=False): + EbsEncryptionByDefault: Optional[Boolean] + + +class DisableFastLaunchRequest(ServiceRequest): + ImageId: ImageId + Force: Optional[Boolean] + DryRun: Optional[Boolean] + + +class DisableFastLaunchResult(TypedDict, total=False): + ImageId: Optional[ImageId] + ResourceType: Optional[FastLaunchResourceType] + SnapshotConfiguration: Optional[FastLaunchSnapshotConfigurationResponse] + LaunchTemplate: Optional[FastLaunchLaunchTemplateSpecificationResponse] + MaxParallelLaunches: Optional[Integer] + OwnerId: Optional[String] + State: Optional[FastLaunchStateCode] + StateTransitionReason: Optional[String] + StateTransitionTime: Optional[MillisecondDateTime] + + +class DisableFastSnapshotRestoreStateError(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class DisableFastSnapshotRestoreStateErrorItem(TypedDict, total=False): + AvailabilityZone: Optional[String] + Error: Optional[DisableFastSnapshotRestoreStateError] + + +DisableFastSnapshotRestoreStateErrorSet = List[DisableFastSnapshotRestoreStateErrorItem] + + +class DisableFastSnapshotRestoreErrorItem(TypedDict, total=False): + SnapshotId: Optional[String] + FastSnapshotRestoreStateErrors: Optional[DisableFastSnapshotRestoreStateErrorSet] + + +DisableFastSnapshotRestoreErrorSet = List[DisableFastSnapshotRestoreErrorItem] + + +class DisableFastSnapshotRestoreSuccessItem(TypedDict, total=False): + SnapshotId: Optional[String] + AvailabilityZone: Optional[String] + State: Optional[FastSnapshotRestoreStateCode] + StateTransitionReason: Optional[String] + OwnerId: Optional[String] + OwnerAlias: Optional[String] + EnablingTime: Optional[MillisecondDateTime] + OptimizingTime: Optional[MillisecondDateTime] + EnabledTime: Optional[MillisecondDateTime] + DisablingTime: Optional[MillisecondDateTime] + DisabledTime: Optional[MillisecondDateTime] + + +DisableFastSnapshotRestoreSuccessSet = List[DisableFastSnapshotRestoreSuccessItem] + + +class DisableFastSnapshotRestoresRequest(ServiceRequest): + AvailabilityZones: AvailabilityZoneStringList + SourceSnapshotIds: SnapshotIdStringList + DryRun: Optional[Boolean] + + +class DisableFastSnapshotRestoresResult(TypedDict, total=False): + Successful: Optional[DisableFastSnapshotRestoreSuccessSet] + Unsuccessful: Optional[DisableFastSnapshotRestoreErrorSet] + + +class DisableImageBlockPublicAccessRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DisableImageBlockPublicAccessResult(TypedDict, total=False): + ImageBlockPublicAccessState: Optional[ImageBlockPublicAccessDisabledState] + + +class DisableImageDeprecationRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class DisableImageDeprecationResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisableImageDeregistrationProtectionRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class DisableImageDeregistrationProtectionResult(TypedDict, total=False): + Return: Optional[String] + + +class DisableImageRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class DisableImageResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisableIpamOrganizationAdminAccountRequest(ServiceRequest): + DryRun: Optional[Boolean] + DelegatedAdminAccountId: String + + +class DisableIpamOrganizationAdminAccountResult(TypedDict, total=False): + Success: Optional[Boolean] + + +class DisableRouteServerPropagationRequest(ServiceRequest): + RouteServerId: RouteServerId + RouteTableId: RouteTableId + DryRun: Optional[Boolean] + + +class RouteServerPropagation(TypedDict, total=False): + RouteServerId: Optional[RouteServerId] + RouteTableId: Optional[RouteTableId] + State: Optional[RouteServerPropagationState] + + +class DisableRouteServerPropagationResult(TypedDict, total=False): + RouteServerPropagation: Optional[RouteServerPropagation] + + +class DisableSerialConsoleAccessRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DisableSerialConsoleAccessResult(TypedDict, total=False): + SerialConsoleAccessEnabled: Optional[Boolean] + + +class DisableSnapshotBlockPublicAccessRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class DisableSnapshotBlockPublicAccessResult(TypedDict, total=False): + State: Optional[SnapshotBlockPublicAccessState] + + +class DisableTransitGatewayRouteTablePropagationRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + DryRun: Optional[Boolean] + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + + +class TransitGatewayPropagation(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + TransitGatewayRouteTableId: Optional[String] + State: Optional[TransitGatewayPropagationState] + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + + +class DisableTransitGatewayRouteTablePropagationResult(TypedDict, total=False): + Propagation: Optional[TransitGatewayPropagation] + + +class DisableVgwRoutePropagationRequest(ServiceRequest): + GatewayId: VpnGatewayId + RouteTableId: RouteTableId + DryRun: Optional[Boolean] + + +class DisableVpcClassicLinkDnsSupportRequest(ServiceRequest): + VpcId: Optional[VpcId] + + +class DisableVpcClassicLinkDnsSupportResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisableVpcClassicLinkRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcId: VpcId + + +class DisableVpcClassicLinkResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisassociateAddressRequest(ServiceRequest): + AssociationId: Optional[ElasticIpAssociationId] + PublicIp: Optional[EipAllocationPublicIp] + DryRun: Optional[Boolean] + + +class DisassociateCapacityReservationBillingOwnerRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityReservationId: CapacityReservationId + UnusedReservationBillingOwnerId: AccountID + + +class DisassociateCapacityReservationBillingOwnerResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisassociateClientVpnTargetNetworkRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + AssociationId: String + DryRun: Optional[Boolean] + + +class DisassociateClientVpnTargetNetworkResult(TypedDict, total=False): + AssociationId: Optional[String] + Status: Optional[AssociationStatus] + + +class DisassociateEnclaveCertificateIamRoleRequest(ServiceRequest): + CertificateArn: CertificateId + RoleArn: RoleId + DryRun: Optional[Boolean] + + +class DisassociateEnclaveCertificateIamRoleResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class DisassociateIamInstanceProfileRequest(ServiceRequest): + AssociationId: IamInstanceProfileAssociationId + + +class DisassociateIamInstanceProfileResult(TypedDict, total=False): + IamInstanceProfileAssociation: Optional[IamInstanceProfileAssociation] + + +class InstanceEventWindowDisassociationRequest(TypedDict, total=False): + InstanceIds: Optional[InstanceIdList] + InstanceTags: Optional[TagList] + DedicatedHostIds: Optional[DedicatedHostIdList] + + +class DisassociateInstanceEventWindowRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceEventWindowId: InstanceEventWindowId + AssociationTarget: InstanceEventWindowDisassociationRequest + + +class DisassociateInstanceEventWindowResult(TypedDict, total=False): + InstanceEventWindow: Optional[InstanceEventWindow] + + +class DisassociateIpamByoasnRequest(ServiceRequest): + DryRun: Optional[Boolean] + Asn: String + Cidr: String + + +class DisassociateIpamByoasnResult(TypedDict, total=False): + AsnAssociation: Optional[AsnAssociation] + + +class DisassociateIpamResourceDiscoveryRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryAssociationId: IpamResourceDiscoveryAssociationId + + +class DisassociateIpamResourceDiscoveryResult(TypedDict, total=False): + IpamResourceDiscoveryAssociation: Optional[IpamResourceDiscoveryAssociation] + + +EipAssociationIdList = List[ElasticIpAssociationId] + + +class DisassociateNatGatewayAddressRequest(ServiceRequest): + NatGatewayId: NatGatewayId + AssociationIds: EipAssociationIdList + MaxDrainDurationSeconds: Optional[DrainSeconds] + DryRun: Optional[Boolean] + + +class DisassociateNatGatewayAddressResult(TypedDict, total=False): + NatGatewayId: Optional[NatGatewayId] + NatGatewayAddresses: Optional[NatGatewayAddressList] + + +class DisassociateRouteServerRequest(ServiceRequest): + RouteServerId: RouteServerId + VpcId: VpcId + DryRun: Optional[Boolean] + + +class DisassociateRouteServerResult(TypedDict, total=False): + RouteServerAssociation: Optional[RouteServerAssociation] + + +class DisassociateRouteTableRequest(ServiceRequest): + DryRun: Optional[Boolean] + AssociationId: RouteTableAssociationId + + +class DisassociateSecurityGroupVpcRequest(ServiceRequest): + GroupId: DisassociateSecurityGroupVpcSecurityGroupId + VpcId: String + DryRun: Optional[Boolean] + + +class DisassociateSecurityGroupVpcResult(TypedDict, total=False): + State: Optional[SecurityGroupVpcAssociationState] + + +class DisassociateSubnetCidrBlockRequest(ServiceRequest): + AssociationId: SubnetCidrAssociationId + + +class DisassociateSubnetCidrBlockResult(TypedDict, total=False): + Ipv6CidrBlockAssociation: Optional[SubnetIpv6CidrBlockAssociation] + SubnetId: Optional[String] + + +class DisassociateTransitGatewayMulticastDomainRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + SubnetIds: TransitGatewaySubnetIdList + DryRun: Optional[Boolean] + + +class DisassociateTransitGatewayMulticastDomainResult(TypedDict, total=False): + Associations: Optional[TransitGatewayMulticastDomainAssociations] + + +class DisassociateTransitGatewayPolicyTableRequest(ServiceRequest): + TransitGatewayPolicyTableId: TransitGatewayPolicyTableId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class DisassociateTransitGatewayPolicyTableResult(TypedDict, total=False): + Association: Optional[TransitGatewayPolicyTableAssociation] + + +class DisassociateTransitGatewayRouteTableRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class DisassociateTransitGatewayRouteTableResult(TypedDict, total=False): + Association: Optional[TransitGatewayAssociation] + + +class DisassociateTrunkInterfaceRequest(ServiceRequest): + AssociationId: TrunkInterfaceAssociationId + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class DisassociateTrunkInterfaceResult(TypedDict, total=False): + Return: Optional[Boolean] + ClientToken: Optional[String] + + +class DisassociateVpcCidrBlockRequest(ServiceRequest): + AssociationId: VpcCidrAssociationId + + +class DisassociateVpcCidrBlockResult(TypedDict, total=False): + Ipv6CidrBlockAssociation: Optional[VpcIpv6CidrBlockAssociation] + CidrBlockAssociation: Optional[VpcCidrBlockAssociation] + VpcId: Optional[String] + + +class VolumeDetail(TypedDict, total=False): + Size: Long + + +class DiskImageDetail(TypedDict, total=False): + Format: DiskImageFormat + Bytes: Long + ImportManifestUrl: ImportManifestUrl + + +class DiskImage(TypedDict, total=False): + Description: Optional[String] + Image: Optional[DiskImageDetail] + Volume: Optional[VolumeDetail] + + +DiskImageList = List[DiskImage] + + +class DnsServersOptionsModifyStructure(TypedDict, total=False): + CustomDnsServers: Optional[ValueStringList] + Enabled: Optional[Boolean] + + +class EbsInstanceBlockDeviceSpecification(TypedDict, total=False): + VolumeId: Optional[VolumeId] + DeleteOnTermination: Optional[Boolean] + + +ElasticGpuSpecifications = List[ElasticGpuSpecification] + + +class ElasticInferenceAccelerator(TypedDict, total=False): + Type: String + Count: Optional[ElasticInferenceAcceleratorCount] + + +ElasticInferenceAccelerators = List[ElasticInferenceAccelerator] + + +class EnableAddressTransferRequest(ServiceRequest): + AllocationId: AllocationId + TransferAccountId: String + DryRun: Optional[Boolean] + + +class EnableAddressTransferResult(TypedDict, total=False): + AddressTransfer: Optional[AddressTransfer] + + +class EnableAllowedImagesSettingsRequest(ServiceRequest): + AllowedImagesSettingsState: AllowedImagesSettingsEnabledState + DryRun: Optional[Boolean] + + +class EnableAllowedImagesSettingsResult(TypedDict, total=False): + AllowedImagesSettingsState: Optional[AllowedImagesSettingsEnabledState] + + +class EnableAwsNetworkPerformanceMetricSubscriptionRequest(ServiceRequest): + Source: Optional[String] + Destination: Optional[String] + Metric: Optional[MetricType] + Statistic: Optional[StatisticType] + DryRun: Optional[Boolean] + + +class EnableAwsNetworkPerformanceMetricSubscriptionResult(TypedDict, total=False): + Output: Optional[Boolean] + + +class EnableEbsEncryptionByDefaultRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class EnableEbsEncryptionByDefaultResult(TypedDict, total=False): + EbsEncryptionByDefault: Optional[Boolean] + + +class FastLaunchLaunchTemplateSpecificationRequest(TypedDict, total=False): + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[String] + Version: String + + +class FastLaunchSnapshotConfigurationRequest(TypedDict, total=False): + TargetResourceCount: Optional[Integer] + + +class EnableFastLaunchRequest(ServiceRequest): + ImageId: ImageId + ResourceType: Optional[String] + SnapshotConfiguration: Optional[FastLaunchSnapshotConfigurationRequest] + LaunchTemplate: Optional[FastLaunchLaunchTemplateSpecificationRequest] + MaxParallelLaunches: Optional[Integer] + DryRun: Optional[Boolean] + + +class EnableFastLaunchResult(TypedDict, total=False): + ImageId: Optional[ImageId] + ResourceType: Optional[FastLaunchResourceType] + SnapshotConfiguration: Optional[FastLaunchSnapshotConfigurationResponse] + LaunchTemplate: Optional[FastLaunchLaunchTemplateSpecificationResponse] + MaxParallelLaunches: Optional[Integer] + OwnerId: Optional[String] + State: Optional[FastLaunchStateCode] + StateTransitionReason: Optional[String] + StateTransitionTime: Optional[MillisecondDateTime] + + +class EnableFastSnapshotRestoreStateError(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +class EnableFastSnapshotRestoreStateErrorItem(TypedDict, total=False): + AvailabilityZone: Optional[String] + Error: Optional[EnableFastSnapshotRestoreStateError] + + +EnableFastSnapshotRestoreStateErrorSet = List[EnableFastSnapshotRestoreStateErrorItem] + + +class EnableFastSnapshotRestoreErrorItem(TypedDict, total=False): + SnapshotId: Optional[String] + FastSnapshotRestoreStateErrors: Optional[EnableFastSnapshotRestoreStateErrorSet] + + +EnableFastSnapshotRestoreErrorSet = List[EnableFastSnapshotRestoreErrorItem] + + +class EnableFastSnapshotRestoreSuccessItem(TypedDict, total=False): + SnapshotId: Optional[String] + AvailabilityZone: Optional[String] + State: Optional[FastSnapshotRestoreStateCode] + StateTransitionReason: Optional[String] + OwnerId: Optional[String] + OwnerAlias: Optional[String] + EnablingTime: Optional[MillisecondDateTime] + OptimizingTime: Optional[MillisecondDateTime] + EnabledTime: Optional[MillisecondDateTime] + DisablingTime: Optional[MillisecondDateTime] + DisabledTime: Optional[MillisecondDateTime] + + +EnableFastSnapshotRestoreSuccessSet = List[EnableFastSnapshotRestoreSuccessItem] + + +class EnableFastSnapshotRestoresRequest(ServiceRequest): + AvailabilityZones: AvailabilityZoneStringList + SourceSnapshotIds: SnapshotIdStringList + DryRun: Optional[Boolean] + + +class EnableFastSnapshotRestoresResult(TypedDict, total=False): + Successful: Optional[EnableFastSnapshotRestoreSuccessSet] + Unsuccessful: Optional[EnableFastSnapshotRestoreErrorSet] + + +class EnableImageBlockPublicAccessRequest(ServiceRequest): + ImageBlockPublicAccessState: ImageBlockPublicAccessEnabledState + DryRun: Optional[Boolean] + + +class EnableImageBlockPublicAccessResult(TypedDict, total=False): + ImageBlockPublicAccessState: Optional[ImageBlockPublicAccessEnabledState] + + +class EnableImageDeprecationRequest(ServiceRequest): + ImageId: ImageId + DeprecateAt: MillisecondDateTime + DryRun: Optional[Boolean] + + +class EnableImageDeprecationResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class EnableImageDeregistrationProtectionRequest(ServiceRequest): + ImageId: ImageId + WithCooldown: Optional[Boolean] + DryRun: Optional[Boolean] + + +class EnableImageDeregistrationProtectionResult(TypedDict, total=False): + Return: Optional[String] + + +class EnableImageRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class EnableImageResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class EnableIpamOrganizationAdminAccountRequest(ServiceRequest): + DryRun: Optional[Boolean] + DelegatedAdminAccountId: String + + +class EnableIpamOrganizationAdminAccountResult(TypedDict, total=False): + Success: Optional[Boolean] + + +class EnableReachabilityAnalyzerOrganizationSharingRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class EnableReachabilityAnalyzerOrganizationSharingResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class EnableRouteServerPropagationRequest(ServiceRequest): + RouteServerId: RouteServerId + RouteTableId: RouteTableId + DryRun: Optional[Boolean] + + +class EnableRouteServerPropagationResult(TypedDict, total=False): + RouteServerPropagation: Optional[RouteServerPropagation] + + +class EnableSerialConsoleAccessRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class EnableSerialConsoleAccessResult(TypedDict, total=False): + SerialConsoleAccessEnabled: Optional[Boolean] + + +class EnableSnapshotBlockPublicAccessRequest(ServiceRequest): + State: SnapshotBlockPublicAccessState + DryRun: Optional[Boolean] + + +class EnableSnapshotBlockPublicAccessResult(TypedDict, total=False): + State: Optional[SnapshotBlockPublicAccessState] + + +class EnableTransitGatewayRouteTablePropagationRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + DryRun: Optional[Boolean] + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + + +class EnableTransitGatewayRouteTablePropagationResult(TypedDict, total=False): + Propagation: Optional[TransitGatewayPropagation] + + +class EnableVgwRoutePropagationRequest(ServiceRequest): + GatewayId: VpnGatewayId + RouteTableId: RouteTableId + DryRun: Optional[Boolean] + + +class EnableVolumeIORequest(ServiceRequest): + DryRun: Optional[Boolean] + VolumeId: VolumeId + + +class EnableVpcClassicLinkDnsSupportRequest(ServiceRequest): + VpcId: Optional[VpcId] + + +class EnableVpcClassicLinkDnsSupportResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class EnableVpcClassicLinkRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcId: VpcId + + +class EnableVpcClassicLinkResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class EnclaveOptionsRequest(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class ExportClientVpnClientCertificateRevocationListRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + DryRun: Optional[Boolean] + + +class ExportClientVpnClientCertificateRevocationListResult(TypedDict, total=False): + CertificateRevocationList: Optional[String] + Status: Optional[ClientCertificateRevocationListStatus] + + +class ExportClientVpnClientConfigurationRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + DryRun: Optional[Boolean] + + +class ExportClientVpnClientConfigurationResult(TypedDict, total=False): + ClientConfiguration: Optional[String] + + +class ExportTaskS3LocationRequest(TypedDict, total=False): + S3Bucket: String + S3Prefix: Optional[String] + + +class ExportImageRequest(ServiceRequest): + ClientToken: Optional[String] + Description: Optional[String] + DiskImageFormat: DiskImageFormat + DryRun: Optional[Boolean] + ImageId: ImageId + S3ExportLocation: ExportTaskS3LocationRequest + RoleName: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +class ExportImageResult(TypedDict, total=False): + Description: Optional[String] + DiskImageFormat: Optional[DiskImageFormat] + ExportImageTaskId: Optional[String] + ImageId: Optional[String] + RoleName: Optional[String] + Progress: Optional[String] + S3ExportLocation: Optional[ExportTaskS3Location] + Status: Optional[String] + StatusMessage: Optional[String] + Tags: Optional[TagList] + + +class ExportTransitGatewayRoutesRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + Filters: Optional[FilterList] + S3Bucket: String + DryRun: Optional[Boolean] + + +class ExportTransitGatewayRoutesResult(TypedDict, total=False): + S3Location: Optional[String] + + +class ExportVerifiedAccessInstanceClientConfigurationRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + DryRun: Optional[Boolean] + + +class VerifiedAccessInstanceOpenVpnClientConfigurationRoute(TypedDict, total=False): + Cidr: Optional[String] + + +VerifiedAccessInstanceOpenVpnClientConfigurationRouteList = List[ + VerifiedAccessInstanceOpenVpnClientConfigurationRoute +] + + +class VerifiedAccessInstanceOpenVpnClientConfiguration(TypedDict, total=False): + Config: Optional[String] + Routes: Optional[VerifiedAccessInstanceOpenVpnClientConfigurationRouteList] + + +VerifiedAccessInstanceOpenVpnClientConfigurationList = List[ + VerifiedAccessInstanceOpenVpnClientConfiguration +] + + +class VerifiedAccessInstanceUserTrustProviderClientConfiguration(TypedDict, total=False): + Type: Optional[UserTrustProviderType] + Scopes: Optional[String] + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + PublicSigningKeyEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + PkceEnabled: Optional[Boolean] + + +class ExportVerifiedAccessInstanceClientConfigurationResult(TypedDict, total=False): + Version: Optional[String] + VerifiedAccessInstanceId: Optional[String] + Region: Optional[String] + DeviceTrustProviders: Optional[DeviceTrustProviderTypeList] + UserTrustProvider: Optional[VerifiedAccessInstanceUserTrustProviderClientConfiguration] + OpenVpnConfigurations: Optional[VerifiedAccessInstanceOpenVpnClientConfigurationList] + + +class GetActiveVpnTunnelStatusRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnTunnelOutsideIpAddress: String + DryRun: Optional[Boolean] + + +class GetActiveVpnTunnelStatusResult(TypedDict, total=False): + ActiveVpnTunnelStatus: Optional[ActiveVpnTunnelStatus] + + +class GetAllowedImagesSettingsRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +ImageProviderList = List[ImageProvider] + + +class ImageCriterion(TypedDict, total=False): + ImageProviders: Optional[ImageProviderList] + + +ImageCriterionList = List[ImageCriterion] + + +class GetAllowedImagesSettingsResult(TypedDict, total=False): + State: Optional[String] + ImageCriteria: Optional[ImageCriterionList] + ManagedBy: Optional[ManagedBy] + + +class GetAssociatedEnclaveCertificateIamRolesRequest(ServiceRequest): + CertificateArn: CertificateId + DryRun: Optional[Boolean] + + +class GetAssociatedEnclaveCertificateIamRolesResult(TypedDict, total=False): + AssociatedRoles: Optional[AssociatedRolesList] + + +class GetAssociatedIpv6PoolCidrsRequest(ServiceRequest): + PoolId: Ipv6PoolEc2Id + NextToken: Optional[NextToken] + MaxResults: Optional[Ipv6PoolMaxResults] + DryRun: Optional[Boolean] + + +class Ipv6CidrAssociation(TypedDict, total=False): + Ipv6Cidr: Optional[String] + AssociatedResource: Optional[String] + + +Ipv6CidrAssociationSet = List[Ipv6CidrAssociation] + + +class GetAssociatedIpv6PoolCidrsResult(TypedDict, total=False): + Ipv6CidrAssociations: Optional[Ipv6CidrAssociationSet] + NextToken: Optional[String] + + +class GetAwsNetworkPerformanceDataRequest(ServiceRequest): + DataQueries: Optional[DataQueries] + StartTime: Optional[MillisecondDateTime] + EndTime: Optional[MillisecondDateTime] + MaxResults: Optional[Integer] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class GetAwsNetworkPerformanceDataResult(TypedDict, total=False): + DataResponses: Optional[DataResponses] + NextToken: Optional[String] + + +class GetCapacityReservationUsageRequest(ServiceRequest): + CapacityReservationId: CapacityReservationId + NextToken: Optional[String] + MaxResults: Optional[GetCapacityReservationUsageRequestMaxResults] + DryRun: Optional[Boolean] + + +class InstanceUsage(TypedDict, total=False): + AccountId: Optional[String] + UsedInstanceCount: Optional[Integer] + + +InstanceUsageSet = List[InstanceUsage] + + +class GetCapacityReservationUsageResult(TypedDict, total=False): + NextToken: Optional[String] + CapacityReservationId: Optional[String] + InstanceType: Optional[String] + TotalInstanceCount: Optional[Integer] + AvailableInstanceCount: Optional[Integer] + State: Optional[CapacityReservationState] + InstanceUsages: Optional[InstanceUsageSet] + + +class GetCoipPoolUsageRequest(ServiceRequest): + PoolId: Ipv4PoolCoipId + Filters: Optional[FilterList] + MaxResults: Optional[CoipPoolMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class GetCoipPoolUsageResult(TypedDict, total=False): + CoipPoolId: Optional[String] + CoipAddressUsages: Optional[CoipAddressUsageSet] + LocalGatewayRouteTableId: Optional[String] + NextToken: Optional[String] + + +class GetConsoleOutputRequest(ServiceRequest): + InstanceId: InstanceId + Latest: Optional[Boolean] + DryRun: Optional[Boolean] + + +class GetConsoleOutputResult(TypedDict, total=False): + InstanceId: Optional[String] + Timestamp: Optional[DateTime] + Output: Optional[String] + + +class GetConsoleScreenshotRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + WakeUp: Optional[Boolean] + + +class GetConsoleScreenshotResult(TypedDict, total=False): + ImageData: Optional[String] + InstanceId: Optional[String] + + +class GetDeclarativePoliciesReportSummaryRequest(ServiceRequest): + DryRun: Optional[Boolean] + ReportId: DeclarativePoliciesReportId + + +class GetDeclarativePoliciesReportSummaryResult(TypedDict, total=False): + ReportId: Optional[String] + S3Bucket: Optional[String] + S3Prefix: Optional[String] + TargetId: Optional[String] + StartTime: Optional[MillisecondDateTime] + EndTime: Optional[MillisecondDateTime] + NumberOfAccounts: Optional[Integer] + NumberOfFailedAccounts: Optional[Integer] + AttributeSummaries: Optional[AttributeSummaryList] + + +class GetDefaultCreditSpecificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceFamily: UnlimitedSupportedInstanceFamily + + +class InstanceFamilyCreditSpecification(TypedDict, total=False): + InstanceFamily: Optional[UnlimitedSupportedInstanceFamily] + CpuCredits: Optional[String] + + +class GetDefaultCreditSpecificationResult(TypedDict, total=False): + InstanceFamilyCreditSpecification: Optional[InstanceFamilyCreditSpecification] + + +class GetEbsDefaultKmsKeyIdRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class GetEbsDefaultKmsKeyIdResult(TypedDict, total=False): + KmsKeyId: Optional[String] + + +class GetEbsEncryptionByDefaultRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class GetEbsEncryptionByDefaultResult(TypedDict, total=False): + EbsEncryptionByDefault: Optional[Boolean] + SseType: Optional[SSEType] + + +class IntegrateServices(TypedDict, total=False): + AthenaIntegrations: Optional[AthenaIntegrationsSet] + + +class GetFlowLogsIntegrationTemplateRequest(ServiceRequest): + DryRun: Optional[Boolean] + FlowLogId: VpcFlowLogId + ConfigDeliveryS3DestinationArn: String + IntegrateServices: IntegrateServices + + +class GetFlowLogsIntegrationTemplateResult(TypedDict, total=False): + Result: Optional[String] + + +class GetGroupsForCapacityReservationRequest(ServiceRequest): + CapacityReservationId: CapacityReservationId + NextToken: Optional[String] + MaxResults: Optional[GetGroupsForCapacityReservationRequestMaxResults] + DryRun: Optional[Boolean] + + +class GetGroupsForCapacityReservationResult(TypedDict, total=False): + NextToken: Optional[String] + CapacityReservationGroups: Optional[CapacityReservationGroupSet] + + +RequestHostIdSet = List[DedicatedHostId] + + +class GetHostReservationPurchasePreviewRequest(ServiceRequest): + HostIdSet: RequestHostIdSet + OfferingId: OfferingId + + +class Purchase(TypedDict, total=False): + CurrencyCode: Optional[CurrencyCodeValues] + Duration: Optional[Integer] + HostIdSet: Optional[ResponseHostIdSet] + HostReservationId: Optional[HostReservationId] + HourlyPrice: Optional[String] + InstanceFamily: Optional[String] + PaymentOption: Optional[PaymentOption] + UpfrontPrice: Optional[String] + + +PurchaseSet = List[Purchase] + + +class GetHostReservationPurchasePreviewResult(TypedDict, total=False): + CurrencyCode: Optional[CurrencyCodeValues] + Purchase: Optional[PurchaseSet] + TotalHourlyPrice: Optional[String] + TotalUpfrontPrice: Optional[String] + + +class GetImageBlockPublicAccessStateRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class GetImageBlockPublicAccessStateResult(TypedDict, total=False): + ImageBlockPublicAccessState: Optional[String] + ManagedBy: Optional[ManagedBy] + + +class GetInstanceMetadataDefaultsRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class InstanceMetadataDefaultsResponse(TypedDict, total=False): + HttpTokens: Optional[HttpTokensState] + HttpPutResponseHopLimit: Optional[BoxedInteger] + HttpEndpoint: Optional[InstanceMetadataEndpointState] + InstanceMetadataTags: Optional[InstanceMetadataTagsState] + ManagedBy: Optional[ManagedBy] + ManagedExceptionMessage: Optional[String] + + +class GetInstanceMetadataDefaultsResult(TypedDict, total=False): + AccountLevel: Optional[InstanceMetadataDefaultsResponse] + + +class GetInstanceTpmEkPubRequest(ServiceRequest): + InstanceId: InstanceId + KeyType: EkPubKeyType + KeyFormat: EkPubKeyFormat + DryRun: Optional[Boolean] + + +class GetInstanceTpmEkPubResult(TypedDict, total=False): + InstanceId: Optional[InstanceId] + KeyType: Optional[EkPubKeyType] + KeyFormat: Optional[EkPubKeyFormat] + KeyValue: Optional[EkPubKeyValue] + + +VirtualizationTypeSet = List[VirtualizationType] + + +class GetInstanceTypesFromInstanceRequirementsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ArchitectureTypes: ArchitectureTypeSet + VirtualizationTypes: VirtualizationTypeSet + InstanceRequirements: InstanceRequirementsRequest + MaxResults: Optional[Integer] + NextToken: Optional[String] + + +class InstanceTypeInfoFromInstanceRequirements(TypedDict, total=False): + InstanceType: Optional[String] + + +InstanceTypeInfoFromInstanceRequirementsSet = List[InstanceTypeInfoFromInstanceRequirements] + + +class GetInstanceTypesFromInstanceRequirementsResult(TypedDict, total=False): + InstanceTypes: Optional[InstanceTypeInfoFromInstanceRequirementsSet] + NextToken: Optional[String] + + +class GetInstanceUefiDataRequest(ServiceRequest): + InstanceId: InstanceId + DryRun: Optional[Boolean] + + +class GetInstanceUefiDataResult(TypedDict, total=False): + InstanceId: Optional[InstanceId] + UefiData: Optional[String] + + +class GetIpamAddressHistoryRequest(ServiceRequest): + DryRun: Optional[Boolean] + Cidr: String + IpamScopeId: IpamScopeId + VpcId: Optional[String] + StartTime: Optional[MillisecondDateTime] + EndTime: Optional[MillisecondDateTime] + MaxResults: Optional[IpamAddressHistoryMaxResults] + NextToken: Optional[NextToken] + + +class IpamAddressHistoryRecord(TypedDict, total=False): + ResourceOwnerId: Optional[String] + ResourceRegion: Optional[String] + ResourceType: Optional[IpamAddressHistoryResourceType] + ResourceId: Optional[String] + ResourceCidr: Optional[String] + ResourceName: Optional[String] + ResourceComplianceStatus: Optional[IpamComplianceStatus] + ResourceOverlapStatus: Optional[IpamOverlapStatus] + VpcId: Optional[String] + SampledStartTime: Optional[MillisecondDateTime] + SampledEndTime: Optional[MillisecondDateTime] + + +IpamAddressHistoryRecordSet = List[IpamAddressHistoryRecord] + + +class GetIpamAddressHistoryResult(TypedDict, total=False): + HistoryRecords: Optional[IpamAddressHistoryRecordSet] + NextToken: Optional[NextToken] + + +class GetIpamDiscoveredAccountsRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryId: IpamResourceDiscoveryId + DiscoveryRegion: String + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + + +class IpamDiscoveryFailureReason(TypedDict, total=False): + Code: Optional[IpamDiscoveryFailureCode] + Message: Optional[String] + + +class IpamDiscoveredAccount(TypedDict, total=False): + AccountId: Optional[String] + DiscoveryRegion: Optional[String] + FailureReason: Optional[IpamDiscoveryFailureReason] + LastAttemptedDiscoveryTime: Optional[MillisecondDateTime] + LastSuccessfulDiscoveryTime: Optional[MillisecondDateTime] + OrganizationalUnitId: Optional[String] + + +IpamDiscoveredAccountSet = List[IpamDiscoveredAccount] + + +class GetIpamDiscoveredAccountsResult(TypedDict, total=False): + IpamDiscoveredAccounts: Optional[IpamDiscoveredAccountSet] + NextToken: Optional[NextToken] + + +class GetIpamDiscoveredPublicAddressesRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryId: IpamResourceDiscoveryId + AddressRegion: String + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + + +class IpamPublicAddressSecurityGroup(TypedDict, total=False): + GroupName: Optional[String] + GroupId: Optional[String] + + +IpamPublicAddressSecurityGroupList = List[IpamPublicAddressSecurityGroup] + + +class IpamPublicAddressTag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +IpamPublicAddressTagList = List[IpamPublicAddressTag] + + +class IpamPublicAddressTags(TypedDict, total=False): + EipTags: Optional[IpamPublicAddressTagList] + + +class IpamDiscoveredPublicAddress(TypedDict, total=False): + IpamResourceDiscoveryId: Optional[IpamResourceDiscoveryId] + AddressRegion: Optional[String] + Address: Optional[String] + AddressOwnerId: Optional[String] + AddressAllocationId: Optional[String] + AssociationStatus: Optional[IpamPublicAddressAssociationStatus] + AddressType: Optional[IpamPublicAddressType] + Service: Optional[IpamPublicAddressAwsService] + ServiceResource: Optional[String] + VpcId: Optional[String] + SubnetId: Optional[String] + PublicIpv4PoolId: Optional[String] + NetworkInterfaceId: Optional[String] + NetworkInterfaceDescription: Optional[String] + InstanceId: Optional[String] + Tags: Optional[IpamPublicAddressTags] + NetworkBorderGroup: Optional[String] + SecurityGroups: Optional[IpamPublicAddressSecurityGroupList] + SampleTime: Optional[MillisecondDateTime] + + +IpamDiscoveredPublicAddressSet = List[IpamDiscoveredPublicAddress] + + +class GetIpamDiscoveredPublicAddressesResult(TypedDict, total=False): + IpamDiscoveredPublicAddresses: Optional[IpamDiscoveredPublicAddressSet] + OldestSampleTime: Optional[MillisecondDateTime] + NextToken: Optional[NextToken] + + +class GetIpamDiscoveredResourceCidrsRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryId: IpamResourceDiscoveryId + ResourceRegion: String + Filters: Optional[FilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[IpamMaxResults] + + +class IpamDiscoveredResourceCidr(TypedDict, total=False): + IpamResourceDiscoveryId: Optional[IpamResourceDiscoveryId] + ResourceRegion: Optional[String] + ResourceId: Optional[String] + ResourceOwnerId: Optional[String] + ResourceCidr: Optional[String] + IpSource: Optional[IpamResourceCidrIpSource] + ResourceType: Optional[IpamResourceType] + ResourceTags: Optional[IpamResourceTagList] + IpUsage: Optional[BoxedDouble] + VpcId: Optional[String] + SubnetId: Optional[String] + NetworkInterfaceAttachmentStatus: Optional[IpamNetworkInterfaceAttachmentStatus] + SampleTime: Optional[MillisecondDateTime] + AvailabilityZoneId: Optional[String] + + +IpamDiscoveredResourceCidrSet = List[IpamDiscoveredResourceCidr] + + +class GetIpamDiscoveredResourceCidrsResult(TypedDict, total=False): + IpamDiscoveredResourceCidrs: Optional[IpamDiscoveredResourceCidrSet] + NextToken: Optional[NextToken] + + +class GetIpamPoolAllocationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + IpamPoolAllocationId: Optional[IpamPoolAllocationId] + Filters: Optional[FilterList] + MaxResults: Optional[GetIpamPoolAllocationsMaxResults] + NextToken: Optional[NextToken] + + +IpamPoolAllocationSet = List[IpamPoolAllocation] + + +class GetIpamPoolAllocationsResult(TypedDict, total=False): + IpamPoolAllocations: Optional[IpamPoolAllocationSet] + NextToken: Optional[NextToken] + + +class GetIpamPoolCidrsRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Filters: Optional[FilterList] + MaxResults: Optional[IpamMaxResults] + NextToken: Optional[NextToken] + + +IpamPoolCidrSet = List[IpamPoolCidr] + + +class GetIpamPoolCidrsResult(TypedDict, total=False): + IpamPoolCidrs: Optional[IpamPoolCidrSet] + NextToken: Optional[NextToken] + + +class GetIpamResourceCidrsRequest(ServiceRequest): + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + MaxResults: Optional[IpamMaxResults] + NextToken: Optional[NextToken] + IpamScopeId: IpamScopeId + IpamPoolId: Optional[IpamPoolId] + ResourceId: Optional[String] + ResourceType: Optional[IpamResourceType] + ResourceTag: Optional[RequestIpamResourceTag] + ResourceOwner: Optional[String] + + +class IpamResourceCidr(TypedDict, total=False): + IpamId: Optional[IpamId] + IpamScopeId: Optional[IpamScopeId] + IpamPoolId: Optional[IpamPoolId] + ResourceRegion: Optional[String] + ResourceOwnerId: Optional[String] + ResourceId: Optional[String] + ResourceName: Optional[String] + ResourceCidr: Optional[String] + ResourceType: Optional[IpamResourceType] + ResourceTags: Optional[IpamResourceTagList] + IpUsage: Optional[BoxedDouble] + ComplianceStatus: Optional[IpamComplianceStatus] + ManagementState: Optional[IpamManagementState] + OverlapStatus: Optional[IpamOverlapStatus] + VpcId: Optional[String] + AvailabilityZoneId: Optional[String] + + +IpamResourceCidrSet = List[IpamResourceCidr] + + +class GetIpamResourceCidrsResult(TypedDict, total=False): + NextToken: Optional[NextToken] + IpamResourceCidrs: Optional[IpamResourceCidrSet] + + +class GetLaunchTemplateDataRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + + +class GetLaunchTemplateDataResult(TypedDict, total=False): + LaunchTemplateData: Optional[ResponseLaunchTemplateData] + + +class GetManagedPrefixListAssociationsRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListId: PrefixListResourceId + MaxResults: Optional[GetManagedPrefixListAssociationsMaxResults] + NextToken: Optional[NextToken] + + +class PrefixListAssociation(TypedDict, total=False): + ResourceId: Optional[String] + ResourceOwner: Optional[String] + + +PrefixListAssociationSet = List[PrefixListAssociation] + + +class GetManagedPrefixListAssociationsResult(TypedDict, total=False): + PrefixListAssociations: Optional[PrefixListAssociationSet] + NextToken: Optional[String] + + +class GetManagedPrefixListEntriesRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListId: PrefixListResourceId + TargetVersion: Optional[Long] + MaxResults: Optional[PrefixListMaxResults] + NextToken: Optional[NextToken] + + +class PrefixListEntry(TypedDict, total=False): + Cidr: Optional[String] + Description: Optional[String] + + +PrefixListEntrySet = List[PrefixListEntry] + + +class GetManagedPrefixListEntriesResult(TypedDict, total=False): + Entries: Optional[PrefixListEntrySet] + NextToken: Optional[NextToken] + + +class GetNetworkInsightsAccessScopeAnalysisFindingsRequest(ServiceRequest): + NetworkInsightsAccessScopeAnalysisId: NetworkInsightsAccessScopeAnalysisId + MaxResults: Optional[GetNetworkInsightsAccessScopeAnalysisFindingsMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class GetNetworkInsightsAccessScopeAnalysisFindingsResult(TypedDict, total=False): + NetworkInsightsAccessScopeAnalysisId: Optional[NetworkInsightsAccessScopeAnalysisId] + AnalysisStatus: Optional[AnalysisStatus] + AnalysisFindings: Optional[AccessScopeAnalysisFindingList] + NextToken: Optional[String] + + +class GetNetworkInsightsAccessScopeContentRequest(ServiceRequest): + NetworkInsightsAccessScopeId: NetworkInsightsAccessScopeId + DryRun: Optional[Boolean] + + +class GetNetworkInsightsAccessScopeContentResult(TypedDict, total=False): + NetworkInsightsAccessScopeContent: Optional[NetworkInsightsAccessScopeContent] + + +class GetPasswordDataRequest(ServiceRequest): + InstanceId: InstanceId + DryRun: Optional[Boolean] + + +class GetPasswordDataResult(TypedDict, total=False): + InstanceId: Optional[String] + Timestamp: Optional[DateTime] + PasswordData: Optional[PasswordData] + + +class GetReservedInstancesExchangeQuoteRequest(ServiceRequest): + DryRun: Optional[Boolean] + ReservedInstanceIds: ReservedInstanceIdSet + TargetConfigurations: Optional[TargetConfigurationRequestSet] + + +class TargetConfiguration(TypedDict, total=False): + InstanceCount: Optional[Integer] + OfferingId: Optional[String] + + +class ReservationValue(TypedDict, total=False): + HourlyPrice: Optional[String] + RemainingTotalValue: Optional[String] + RemainingUpfrontValue: Optional[String] + + +class TargetReservationValue(TypedDict, total=False): + ReservationValue: Optional[ReservationValue] + TargetConfiguration: Optional[TargetConfiguration] + + +TargetReservationValueSet = List[TargetReservationValue] + + +class ReservedInstanceReservationValue(TypedDict, total=False): + ReservationValue: Optional[ReservationValue] + ReservedInstanceId: Optional[String] + + +ReservedInstanceReservationValueSet = List[ReservedInstanceReservationValue] + + +class GetReservedInstancesExchangeQuoteResult(TypedDict, total=False): + CurrencyCode: Optional[String] + IsValidExchange: Optional[Boolean] + OutputReservedInstancesWillExpireAt: Optional[DateTime] + PaymentDue: Optional[String] + ReservedInstanceValueRollup: Optional[ReservationValue] + ReservedInstanceValueSet: Optional[ReservedInstanceReservationValueSet] + TargetConfigurationValueRollup: Optional[ReservationValue] + TargetConfigurationValueSet: Optional[TargetReservationValueSet] + ValidationFailureReason: Optional[String] + + +class GetRouteServerAssociationsRequest(ServiceRequest): + RouteServerId: RouteServerId + DryRun: Optional[Boolean] + + +RouteServerAssociationsList = List[RouteServerAssociation] + + +class GetRouteServerAssociationsResult(TypedDict, total=False): + RouteServerAssociations: Optional[RouteServerAssociationsList] + + +class GetRouteServerPropagationsRequest(ServiceRequest): + RouteServerId: RouteServerId + RouteTableId: Optional[RouteTableId] + DryRun: Optional[Boolean] + + +RouteServerPropagationsList = List[RouteServerPropagation] + + +class GetRouteServerPropagationsResult(TypedDict, total=False): + RouteServerPropagations: Optional[RouteServerPropagationsList] + + +class GetRouteServerRoutingDatabaseRequest(ServiceRequest): + RouteServerId: RouteServerId + NextToken: Optional[String] + MaxResults: Optional[RouteServerMaxResults] + DryRun: Optional[Boolean] + Filters: Optional[FilterList] + + +class RouteServerRouteInstallationDetail(TypedDict, total=False): + RouteTableId: Optional[RouteTableId] + RouteInstallationStatus: Optional[RouteServerRouteInstallationStatus] + RouteInstallationStatusReason: Optional[String] + + +RouteServerRouteInstallationDetails = List[RouteServerRouteInstallationDetail] + + +class RouteServerRoute(TypedDict, total=False): + RouteServerEndpointId: Optional[RouteServerEndpointId] + RouteServerPeerId: Optional[RouteServerPeerId] + RouteInstallationDetails: Optional[RouteServerRouteInstallationDetails] + RouteStatus: Optional[RouteServerRouteStatus] + Prefix: Optional[String] + AsPaths: Optional[AsPath] + Med: Optional[Integer] + NextHopIp: Optional[String] + + +RouteServerRouteList = List[RouteServerRoute] + + +class GetRouteServerRoutingDatabaseResult(TypedDict, total=False): + AreRoutesPersisted: Optional[Boolean] + Routes: Optional[RouteServerRouteList] + NextToken: Optional[String] + + +class GetSecurityGroupsForVpcRequest(ServiceRequest): + VpcId: VpcId + NextToken: Optional[String] + MaxResults: Optional[GetSecurityGroupsForVpcRequestMaxResults] + Filters: Optional[FilterList] + DryRun: Optional[Boolean] + + +class SecurityGroupForVpc(TypedDict, total=False): + Description: Optional[String] + GroupName: Optional[String] + OwnerId: Optional[String] + GroupId: Optional[String] + Tags: Optional[TagList] + PrimaryVpcId: Optional[String] + + +SecurityGroupForVpcList = List[SecurityGroupForVpc] + + +class GetSecurityGroupsForVpcResult(TypedDict, total=False): + NextToken: Optional[String] + SecurityGroupForVpcs: Optional[SecurityGroupForVpcList] + + +class GetSerialConsoleAccessStatusRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class GetSerialConsoleAccessStatusResult(TypedDict, total=False): + SerialConsoleAccessEnabled: Optional[Boolean] + ManagedBy: Optional[ManagedBy] + + +class GetSnapshotBlockPublicAccessStateRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class GetSnapshotBlockPublicAccessStateResult(TypedDict, total=False): + State: Optional[SnapshotBlockPublicAccessState] + ManagedBy: Optional[ManagedBy] + + +class InstanceRequirementsWithMetadataRequest(TypedDict, total=False): + ArchitectureTypes: Optional[ArchitectureTypeSet] + VirtualizationTypes: Optional[VirtualizationTypeSet] + InstanceRequirements: Optional[InstanceRequirementsRequest] + + +RegionNames = List[String] +InstanceTypes = List[String] + + +class GetSpotPlacementScoresRequest(ServiceRequest): + InstanceTypes: Optional[InstanceTypes] + TargetCapacity: SpotPlacementScoresTargetCapacity + TargetCapacityUnitType: Optional[TargetCapacityUnitType] + SingleAvailabilityZone: Optional[Boolean] + RegionNames: Optional[RegionNames] + InstanceRequirementsWithMetadata: Optional[InstanceRequirementsWithMetadataRequest] + DryRun: Optional[Boolean] + MaxResults: Optional[SpotPlacementScoresMaxResults] + NextToken: Optional[String] + + +class SpotPlacementScore(TypedDict, total=False): + Region: Optional[String] + AvailabilityZoneId: Optional[String] + Score: Optional[Integer] + + +SpotPlacementScores = List[SpotPlacementScore] + + +class GetSpotPlacementScoresResult(TypedDict, total=False): + SpotPlacementScores: Optional[SpotPlacementScores] + NextToken: Optional[String] + + +class GetSubnetCidrReservationsRequest(ServiceRequest): + Filters: Optional[FilterList] + SubnetId: SubnetId + DryRun: Optional[Boolean] + NextToken: Optional[String] + MaxResults: Optional[GetSubnetCidrReservationsMaxResults] + + +SubnetCidrReservationList = List[SubnetCidrReservation] + + +class GetSubnetCidrReservationsResult(TypedDict, total=False): + SubnetIpv4CidrReservations: Optional[SubnetCidrReservationList] + SubnetIpv6CidrReservations: Optional[SubnetCidrReservationList] + NextToken: Optional[String] + + +class GetTransitGatewayAttachmentPropagationsRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayAttachmentPropagation(TypedDict, total=False): + TransitGatewayRouteTableId: Optional[String] + State: Optional[TransitGatewayPropagationState] + + +TransitGatewayAttachmentPropagationList = List[TransitGatewayAttachmentPropagation] + + +class GetTransitGatewayAttachmentPropagationsResult(TypedDict, total=False): + TransitGatewayAttachmentPropagations: Optional[TransitGatewayAttachmentPropagationList] + NextToken: Optional[String] + + +class GetTransitGatewayMulticastDomainAssociationsRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastDomainAssociation(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + ResourceOwnerId: Optional[String] + Subnet: Optional[SubnetAssociation] + + +TransitGatewayMulticastDomainAssociationList = List[TransitGatewayMulticastDomainAssociation] + + +class GetTransitGatewayMulticastDomainAssociationsResult(TypedDict, total=False): + MulticastDomainAssociations: Optional[TransitGatewayMulticastDomainAssociationList] + NextToken: Optional[String] + + +class GetTransitGatewayPolicyTableAssociationsRequest(ServiceRequest): + TransitGatewayPolicyTableId: TransitGatewayPolicyTableId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayPolicyTableAssociationList = List[TransitGatewayPolicyTableAssociation] + + +class GetTransitGatewayPolicyTableAssociationsResult(TypedDict, total=False): + Associations: Optional[TransitGatewayPolicyTableAssociationList] + NextToken: Optional[String] + + +class GetTransitGatewayPolicyTableEntriesRequest(ServiceRequest): + TransitGatewayPolicyTableId: TransitGatewayPolicyTableId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayPolicyRuleMetaData(TypedDict, total=False): + MetaDataKey: Optional[String] + MetaDataValue: Optional[String] + + +class TransitGatewayPolicyRule(TypedDict, total=False): + SourceCidrBlock: Optional[String] + SourcePortRange: Optional[String] + DestinationCidrBlock: Optional[String] + DestinationPortRange: Optional[String] + Protocol: Optional[String] + MetaData: Optional[TransitGatewayPolicyRuleMetaData] + + +class TransitGatewayPolicyTableEntry(TypedDict, total=False): + PolicyRuleNumber: Optional[String] + PolicyRule: Optional[TransitGatewayPolicyRule] + TargetRouteTableId: Optional[TransitGatewayRouteTableId] + + +TransitGatewayPolicyTableEntryList = List[TransitGatewayPolicyTableEntry] + + +class GetTransitGatewayPolicyTableEntriesResult(TypedDict, total=False): + TransitGatewayPolicyTableEntries: Optional[TransitGatewayPolicyTableEntryList] + + +class GetTransitGatewayPrefixListReferencesRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +TransitGatewayPrefixListReferenceSet = List[TransitGatewayPrefixListReference] + + +class GetTransitGatewayPrefixListReferencesResult(TypedDict, total=False): + TransitGatewayPrefixListReferences: Optional[TransitGatewayPrefixListReferenceSet] + NextToken: Optional[String] + + +class GetTransitGatewayRouteTableAssociationsRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayRouteTableAssociation(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + State: Optional[TransitGatewayAssociationState] + + +TransitGatewayRouteTableAssociationList = List[TransitGatewayRouteTableAssociation] + + +class GetTransitGatewayRouteTableAssociationsResult(TypedDict, total=False): + Associations: Optional[TransitGatewayRouteTableAssociationList] + NextToken: Optional[String] + + +class GetTransitGatewayRouteTablePropagationsRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayRouteTablePropagation(TypedDict, total=False): + TransitGatewayAttachmentId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + State: Optional[TransitGatewayPropagationState] + TransitGatewayRouteTableAnnouncementId: Optional[TransitGatewayRouteTableAnnouncementId] + + +TransitGatewayRouteTablePropagationList = List[TransitGatewayRouteTablePropagation] + + +class GetTransitGatewayRouteTablePropagationsResult(TypedDict, total=False): + TransitGatewayRouteTablePropagations: Optional[TransitGatewayRouteTablePropagationList] + NextToken: Optional[String] + + +class GetVerifiedAccessEndpointPolicyRequest(ServiceRequest): + VerifiedAccessEndpointId: VerifiedAccessEndpointId + DryRun: Optional[Boolean] + + +class GetVerifiedAccessEndpointPolicyResult(TypedDict, total=False): + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + + +class GetVerifiedAccessEndpointTargetsRequest(ServiceRequest): + VerifiedAccessEndpointId: VerifiedAccessEndpointId + MaxResults: Optional[GetVerifiedAccessEndpointTargetsMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class VerifiedAccessEndpointTarget(TypedDict, total=False): + VerifiedAccessEndpointId: Optional[VerifiedAccessEndpointId] + VerifiedAccessEndpointTargetIpAddress: Optional[String] + VerifiedAccessEndpointTargetDns: Optional[String] + + +VerifiedAccessEndpointTargetList = List[VerifiedAccessEndpointTarget] + + +class GetVerifiedAccessEndpointTargetsResult(TypedDict, total=False): + VerifiedAccessEndpointTargets: Optional[VerifiedAccessEndpointTargetList] + NextToken: Optional[NextToken] + + +class GetVerifiedAccessGroupPolicyRequest(ServiceRequest): + VerifiedAccessGroupId: VerifiedAccessGroupId + DryRun: Optional[Boolean] + + +class GetVerifiedAccessGroupPolicyResult(TypedDict, total=False): + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + + +class GetVpnConnectionDeviceSampleConfigurationRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnConnectionDeviceTypeId: VpnConnectionDeviceTypeId + InternetKeyExchangeVersion: Optional[String] + SampleType: Optional[String] + DryRun: Optional[Boolean] + + +class GetVpnConnectionDeviceSampleConfigurationResult(TypedDict, total=False): + VpnConnectionDeviceSampleConfiguration: Optional[VpnConnectionDeviceSampleConfiguration] + + +class GetVpnConnectionDeviceTypesRequest(ServiceRequest): + MaxResults: Optional[GVCDMaxResults] + NextToken: Optional[NextToken] + DryRun: Optional[Boolean] + + +class VpnConnectionDeviceType(TypedDict, total=False): + VpnConnectionDeviceTypeId: Optional[String] + Vendor: Optional[String] + Platform: Optional[String] + Software: Optional[String] + + +VpnConnectionDeviceTypeList = List[VpnConnectionDeviceType] + + +class GetVpnConnectionDeviceTypesResult(TypedDict, total=False): + VpnConnectionDeviceTypes: Optional[VpnConnectionDeviceTypeList] + NextToken: Optional[NextToken] + + +class GetVpnTunnelReplacementStatusRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnTunnelOutsideIpAddress: String + DryRun: Optional[Boolean] + + +class MaintenanceDetails(TypedDict, total=False): + PendingMaintenance: Optional[String] + MaintenanceAutoAppliedAfter: Optional[MillisecondDateTime] + LastMaintenanceApplied: Optional[MillisecondDateTime] + + +class GetVpnTunnelReplacementStatusResult(TypedDict, total=False): + VpnConnectionId: Optional[VpnConnectionId] + TransitGatewayId: Optional[TransitGatewayId] + CustomerGatewayId: Optional[CustomerGatewayId] + VpnGatewayId: Optional[VpnGatewayId] + VpnTunnelOutsideIpAddress: Optional[String] + MaintenanceDetails: Optional[MaintenanceDetails] + + +class HibernationOptionsRequest(TypedDict, total=False): + Configured: Optional[Boolean] + + +class LaunchPermission(TypedDict, total=False): + OrganizationArn: Optional[String] + OrganizationalUnitArn: Optional[String] + UserId: Optional[String] + Group: Optional[PermissionGroup] + + +LaunchPermissionList = List[LaunchPermission] + + +class ImageAttribute(TypedDict, total=False): + Description: Optional[AttributeValue] + KernelId: Optional[AttributeValue] + RamdiskId: Optional[AttributeValue] + SriovNetSupport: Optional[AttributeValue] + BootMode: Optional[AttributeValue] + TpmSupport: Optional[AttributeValue] + UefiData: Optional[AttributeValue] + LastLaunchedTime: Optional[AttributeValue] + ImdsSupport: Optional[AttributeValue] + DeregistrationProtection: Optional[AttributeValue] + ImageId: Optional[String] + LaunchPermissions: Optional[LaunchPermissionList] + ProductCodes: Optional[ProductCodeList] + BlockDeviceMappings: Optional[BlockDeviceMappingList] + + +ImageProviderRequestList = List[ImageProviderRequest] + + +class ImageCriterionRequest(TypedDict, total=False): + ImageProviders: Optional[ImageProviderRequestList] + + +ImageCriterionRequestList = List[ImageCriterionRequest] + + +class UserBucket(TypedDict, total=False): + S3Bucket: Optional[String] + S3Key: Optional[String] + + +class ImageDiskContainer(TypedDict, total=False): + Description: Optional[String] + DeviceName: Optional[String] + Format: Optional[String] + SnapshotId: Optional[SnapshotId] + Url: Optional[SensitiveUrl] + UserBucket: Optional[UserBucket] + + +ImageDiskContainerList = List[ImageDiskContainer] + + +class ImageRecycleBinInfo(TypedDict, total=False): + ImageId: Optional[String] + Name: Optional[String] + Description: Optional[String] + RecycleBinEnterTime: Optional[MillisecondDateTime] + RecycleBinExitTime: Optional[MillisecondDateTime] + + +ImageRecycleBinInfoList = List[ImageRecycleBinInfo] + + +class ImportClientVpnClientCertificateRevocationListRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + CertificateRevocationList: String + DryRun: Optional[Boolean] + + +class ImportClientVpnClientCertificateRevocationListResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ImportImageLicenseConfigurationRequest(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +ImportImageLicenseSpecificationListRequest = List[ImportImageLicenseConfigurationRequest] + + +class ImportImageRequest(ServiceRequest): + Architecture: Optional[String] + ClientData: Optional[ClientData] + ClientToken: Optional[String] + Description: Optional[String] + DiskContainers: Optional[ImageDiskContainerList] + DryRun: Optional[Boolean] + Encrypted: Optional[Boolean] + Hypervisor: Optional[String] + KmsKeyId: Optional[KmsKeyId] + LicenseType: Optional[String] + Platform: Optional[String] + RoleName: Optional[String] + LicenseSpecifications: Optional[ImportImageLicenseSpecificationListRequest] + TagSpecifications: Optional[TagSpecificationList] + UsageOperation: Optional[String] + BootMode: Optional[BootModeValues] + + +class ImportImageResult(TypedDict, total=False): + Architecture: Optional[String] + Description: Optional[String] + Encrypted: Optional[Boolean] + Hypervisor: Optional[String] + ImageId: Optional[String] + ImportTaskId: Optional[ImportImageTaskId] + KmsKeyId: Optional[KmsKeyId] + LicenseType: Optional[String] + Platform: Optional[String] + Progress: Optional[String] + SnapshotDetails: Optional[SnapshotDetailList] + Status: Optional[String] + StatusMessage: Optional[String] + LicenseSpecifications: Optional[ImportImageLicenseSpecificationListResponse] + Tags: Optional[TagList] + UsageOperation: Optional[String] + + +class UserData(TypedDict, total=False): + Data: Optional[String] + + +class ImportInstanceLaunchSpecification(TypedDict, total=False): + Architecture: Optional[ArchitectureValues] + GroupNames: Optional[SecurityGroupStringList] + GroupIds: Optional[SecurityGroupIdStringList] + AdditionalInfo: Optional[String] + UserData: Optional[UserData] + InstanceType: Optional[InstanceType] + Placement: Optional[Placement] + Monitoring: Optional[Boolean] + SubnetId: Optional[SubnetId] + InstanceInitiatedShutdownBehavior: Optional[ShutdownBehavior] + PrivateIpAddress: Optional[String] + + +class ImportInstanceRequest(ServiceRequest): + DryRun: Optional[Boolean] + Description: Optional[String] + LaunchSpecification: Optional[ImportInstanceLaunchSpecification] + DiskImages: Optional[DiskImageList] + Platform: PlatformValues + + +class ImportInstanceResult(TypedDict, total=False): + ConversionTask: Optional[ConversionTask] + + +class ImportKeyPairRequest(ServiceRequest): + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + KeyName: String + PublicKeyMaterial: Blob + + +class ImportKeyPairResult(TypedDict, total=False): + KeyFingerprint: Optional[String] + KeyName: Optional[String] + KeyPairId: Optional[String] + Tags: Optional[TagList] + + +class SnapshotDiskContainer(TypedDict, total=False): + Description: Optional[String] + Format: Optional[String] + Url: Optional[SensitiveUrl] + UserBucket: Optional[UserBucket] + + +class ImportSnapshotRequest(ServiceRequest): + ClientData: Optional[ClientData] + ClientToken: Optional[String] + Description: Optional[String] + DiskContainer: Optional[SnapshotDiskContainer] + DryRun: Optional[Boolean] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[KmsKeyId] + RoleName: Optional[String] + TagSpecifications: Optional[TagSpecificationList] + + +class ImportSnapshotResult(TypedDict, total=False): + Description: Optional[String] + ImportTaskId: Optional[String] + SnapshotTaskDetail: Optional[SnapshotTaskDetail] + Tags: Optional[TagList] + + +class ImportVolumeRequest(ServiceRequest): + DryRun: Optional[Boolean] + AvailabilityZone: String + Image: DiskImageDetail + Description: Optional[String] + Volume: VolumeDetail + + +class ImportVolumeResult(TypedDict, total=False): + ConversionTask: Optional[ConversionTask] + + +class InstanceAttribute(TypedDict, total=False): + BlockDeviceMappings: Optional[InstanceBlockDeviceMappingList] + DisableApiTermination: Optional[AttributeBooleanValue] + EnaSupport: Optional[AttributeBooleanValue] + EnclaveOptions: Optional[EnclaveOptions] + EbsOptimized: Optional[AttributeBooleanValue] + InstanceId: Optional[String] + InstanceInitiatedShutdownBehavior: Optional[AttributeValue] + InstanceType: Optional[AttributeValue] + KernelId: Optional[AttributeValue] + ProductCodes: Optional[ProductCodeList] + RamdiskId: Optional[AttributeValue] + RootDeviceName: Optional[AttributeValue] + SourceDestCheck: Optional[AttributeBooleanValue] + SriovNetSupport: Optional[AttributeValue] + UserData: Optional[AttributeValue] + DisableApiStop: Optional[AttributeBooleanValue] + Groups: Optional[GroupIdentifierList] + + +class InstanceBlockDeviceMappingSpecification(TypedDict, total=False): + DeviceName: Optional[String] + Ebs: Optional[EbsInstanceBlockDeviceSpecification] + VirtualName: Optional[String] + NoDevice: Optional[String] + + +InstanceBlockDeviceMappingSpecificationList = List[InstanceBlockDeviceMappingSpecification] + + +class InstanceCreditSpecificationRequest(TypedDict, total=False): + InstanceId: InstanceId + CpuCredits: Optional[String] + + +InstanceCreditSpecificationListRequest = List[InstanceCreditSpecificationRequest] +InstanceIdSet = List[InstanceId] + + +class InstanceMaintenanceOptionsRequest(TypedDict, total=False): + AutoRecovery: Optional[InstanceAutoRecoveryState] + + +class SpotMarketOptions(TypedDict, total=False): + MaxPrice: Optional[String] + SpotInstanceType: Optional[SpotInstanceType] + BlockDurationMinutes: Optional[Integer] + ValidUntil: Optional[DateTime] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + + +class InstanceMarketOptionsRequest(TypedDict, total=False): + MarketType: Optional[MarketType] + SpotOptions: Optional[SpotMarketOptions] + + +class InstanceMetadataOptionsRequest(TypedDict, total=False): + HttpTokens: Optional[HttpTokensState] + HttpPutResponseHopLimit: Optional[Integer] + HttpEndpoint: Optional[InstanceMetadataEndpointState] + HttpProtocolIpv6: Optional[InstanceMetadataProtocolState] + InstanceMetadataTags: Optional[InstanceMetadataTagsState] + + +class InstanceMonitoring(TypedDict, total=False): + InstanceId: Optional[String] + Monitoring: Optional[Monitoring] + + +InstanceMonitoringList = List[InstanceMonitoring] + + +class InstanceNetworkPerformanceOptionsRequest(TypedDict, total=False): + BandwidthWeighting: Optional[InstanceBandwidthWeighting] + + +class InstanceStateChange(TypedDict, total=False): + InstanceId: Optional[String] + CurrentState: Optional[InstanceState] + PreviousState: Optional[InstanceState] + + +InstanceStateChangeList = List[InstanceStateChange] + + +class IpamCidrAuthorizationContext(TypedDict, total=False): + Message: Optional[String] + Signature: Optional[String] + + +class KeyPair(TypedDict, total=False): + KeyPairId: Optional[String] + Tags: Optional[TagList] + KeyName: Optional[String] + KeyFingerprint: Optional[String] + KeyMaterial: Optional[SensitiveUserData] + + +class LaunchPermissionModifications(TypedDict, total=False): + Add: Optional[LaunchPermissionList] + Remove: Optional[LaunchPermissionList] + + +class LaunchTemplateSpecification(TypedDict, total=False): + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[String] + Version: Optional[String] + + +class LicenseConfigurationRequest(TypedDict, total=False): + LicenseConfigurationArn: Optional[String] + + +LicenseSpecificationListRequest = List[LicenseConfigurationRequest] + + +class ListImagesInRecycleBinRequest(ServiceRequest): + ImageIds: Optional[ImageIdStringList] + NextToken: Optional[String] + MaxResults: Optional[ListImagesInRecycleBinMaxResults] + DryRun: Optional[Boolean] + + +class ListImagesInRecycleBinResult(TypedDict, total=False): + Images: Optional[ImageRecycleBinInfoList] + NextToken: Optional[String] + + +class ListSnapshotsInRecycleBinRequest(ServiceRequest): + MaxResults: Optional[ListSnapshotsInRecycleBinMaxResults] + NextToken: Optional[String] + SnapshotIds: Optional[SnapshotIdStringList] + DryRun: Optional[Boolean] + + +class SnapshotRecycleBinInfo(TypedDict, total=False): + SnapshotId: Optional[String] + RecycleBinEnterTime: Optional[MillisecondDateTime] + RecycleBinExitTime: Optional[MillisecondDateTime] + Description: Optional[String] + VolumeId: Optional[String] + + +SnapshotRecycleBinInfoList = List[SnapshotRecycleBinInfo] + + +class ListSnapshotsInRecycleBinResult(TypedDict, total=False): + Snapshots: Optional[SnapshotRecycleBinInfoList] + NextToken: Optional[String] + + +class LoadPermissionRequest(TypedDict, total=False): + Group: Optional[PermissionGroup] + UserId: Optional[String] + + +LoadPermissionListRequest = List[LoadPermissionRequest] + + +class LoadPermissionModifications(TypedDict, total=False): + Add: Optional[LoadPermissionListRequest] + Remove: Optional[LoadPermissionListRequest] + + +LocalGatewayRouteList = List[LocalGatewayRoute] + + +class LockSnapshotRequest(ServiceRequest): + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + LockMode: LockMode + CoolOffPeriod: Optional[CoolOffPeriodRequestHours] + LockDuration: Optional[RetentionPeriodRequestDays] + ExpirationDate: Optional[MillisecondDateTime] + + +class LockSnapshotResult(TypedDict, total=False): + SnapshotId: Optional[String] + LockState: Optional[LockState] + LockDuration: Optional[RetentionPeriodResponseDays] + CoolOffPeriod: Optional[CoolOffPeriodResponseHours] + CoolOffPeriodExpiresOn: Optional[MillisecondDateTime] + LockCreatedOn: Optional[MillisecondDateTime] + LockExpiresOn: Optional[MillisecondDateTime] + LockDurationStartTime: Optional[MillisecondDateTime] + + +class ModifyAddressAttributeRequest(ServiceRequest): + AllocationId: AllocationId + DomainName: Optional[String] + DryRun: Optional[Boolean] + + +class ModifyAddressAttributeResult(TypedDict, total=False): + Address: Optional[AddressAttribute] + + +class ModifyAvailabilityZoneGroupRequest(ServiceRequest): + GroupName: String + OptInStatus: ModifyAvailabilityZoneOptInStatus + DryRun: Optional[Boolean] + + +class ModifyAvailabilityZoneGroupResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyCapacityReservationFleetRequest(ServiceRequest): + CapacityReservationFleetId: CapacityReservationFleetId + TotalTargetCapacity: Optional[Integer] + EndDate: Optional[MillisecondDateTime] + DryRun: Optional[Boolean] + RemoveEndDate: Optional[Boolean] + + +class ModifyCapacityReservationFleetResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyCapacityReservationRequest(ServiceRequest): + CapacityReservationId: CapacityReservationId + InstanceCount: Optional[Integer] + EndDate: Optional[DateTime] + EndDateType: Optional[EndDateType] + Accept: Optional[Boolean] + DryRun: Optional[Boolean] + AdditionalInfo: Optional[String] + InstanceMatchCriteria: Optional[InstanceMatchCriteria] + + +class ModifyCapacityReservationResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyClientVpnEndpointRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + ServerCertificateArn: Optional[String] + ConnectionLogOptions: Optional[ConnectionLogOptions] + DnsServers: Optional[DnsServersOptionsModifyStructure] + VpnPort: Optional[Integer] + Description: Optional[String] + SplitTunnel: Optional[Boolean] + DryRun: Optional[Boolean] + SecurityGroupIds: Optional[ClientVpnSecurityGroupIdSet] + VpcId: Optional[VpcId] + SelfServicePortal: Optional[SelfServicePortal] + ClientConnectOptions: Optional[ClientConnectOptions] + SessionTimeoutHours: Optional[Integer] + ClientLoginBannerOptions: Optional[ClientLoginBannerOptions] + ClientRouteEnforcementOptions: Optional[ClientRouteEnforcementOptions] + DisconnectOnSessionTimeout: Optional[Boolean] + + +class ModifyClientVpnEndpointResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyDefaultCreditSpecificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceFamily: UnlimitedSupportedInstanceFamily + CpuCredits: String + + +class ModifyDefaultCreditSpecificationResult(TypedDict, total=False): + InstanceFamilyCreditSpecification: Optional[InstanceFamilyCreditSpecification] + + +class ModifyEbsDefaultKmsKeyIdRequest(ServiceRequest): + KmsKeyId: KmsKeyId + DryRun: Optional[Boolean] + + +class ModifyEbsDefaultKmsKeyIdResult(TypedDict, total=False): + KmsKeyId: Optional[String] + + +class ModifyFleetRequest(ServiceRequest): + DryRun: Optional[Boolean] + ExcessCapacityTerminationPolicy: Optional[FleetExcessCapacityTerminationPolicy] + LaunchTemplateConfigs: Optional[FleetLaunchTemplateConfigListRequest] + FleetId: FleetId + TargetCapacitySpecification: Optional[TargetCapacitySpecificationRequest] + Context: Optional[String] + + +class ModifyFleetResult(TypedDict, total=False): + Return: Optional[Boolean] + + +ProductCodeStringList = List[String] +UserGroupStringList = List[String] +UserIdStringList = List[String] + + +class ModifyFpgaImageAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + FpgaImageId: FpgaImageId + Attribute: Optional[FpgaImageAttributeName] + OperationType: Optional[OperationType] + UserIds: Optional[UserIdStringList] + UserGroups: Optional[UserGroupStringList] + ProductCodes: Optional[ProductCodeStringList] + LoadPermission: Optional[LoadPermissionModifications] + Description: Optional[String] + Name: Optional[String] + + +class ModifyFpgaImageAttributeResult(TypedDict, total=False): + FpgaImageAttribute: Optional[FpgaImageAttribute] + + +class ModifyHostsRequest(ServiceRequest): + HostRecovery: Optional[HostRecovery] + InstanceType: Optional[String] + InstanceFamily: Optional[String] + HostMaintenance: Optional[HostMaintenance] + HostIds: RequestHostIdList + AutoPlacement: Optional[AutoPlacement] + + +UnsuccessfulItemList = List[UnsuccessfulItem] + + +class ModifyHostsResult(TypedDict, total=False): + Successful: Optional[ResponseHostIdList] + Unsuccessful: Optional[UnsuccessfulItemList] + + +class ModifyIdFormatRequest(ServiceRequest): + Resource: String + UseLongIds: Boolean + + +class ModifyIdentityIdFormatRequest(ServiceRequest): + Resource: String + UseLongIds: Boolean + PrincipalArn: String + + +OrganizationalUnitArnStringList = List[String] +OrganizationArnStringList = List[String] + + +class ModifyImageAttributeRequest(ServiceRequest): + Attribute: Optional[String] + Description: Optional[AttributeValue] + ImageId: ImageId + LaunchPermission: Optional[LaunchPermissionModifications] + OperationType: Optional[OperationType] + ProductCodes: Optional[ProductCodeStringList] + UserGroups: Optional[UserGroupStringList] + UserIds: Optional[UserIdStringList] + Value: Optional[String] + OrganizationArns: Optional[OrganizationArnStringList] + OrganizationalUnitArns: Optional[OrganizationalUnitArnStringList] + ImdsSupport: Optional[AttributeValue] + DryRun: Optional[Boolean] + + +class ModifyInstanceAttributeRequest(ServiceRequest): + SourceDestCheck: Optional[AttributeBooleanValue] + DisableApiStop: Optional[AttributeBooleanValue] + DryRun: Optional[Boolean] + InstanceId: InstanceId + Attribute: Optional[InstanceAttributeName] + Value: Optional[String] + BlockDeviceMappings: Optional[InstanceBlockDeviceMappingSpecificationList] + DisableApiTermination: Optional[AttributeBooleanValue] + InstanceType: Optional[AttributeValue] + Kernel: Optional[AttributeValue] + Ramdisk: Optional[AttributeValue] + UserData: Optional[BlobAttributeValue] + InstanceInitiatedShutdownBehavior: Optional[AttributeValue] + Groups: Optional[GroupIdStringList] + EbsOptimized: Optional[AttributeBooleanValue] + SriovNetSupport: Optional[AttributeValue] + EnaSupport: Optional[AttributeBooleanValue] + + +class ModifyInstanceCapacityReservationAttributesRequest(ServiceRequest): + InstanceId: InstanceId + CapacityReservationSpecification: CapacityReservationSpecification + DryRun: Optional[Boolean] + + +class ModifyInstanceCapacityReservationAttributesResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyInstanceCpuOptionsRequest(ServiceRequest): + InstanceId: InstanceId + CoreCount: Integer + ThreadsPerCore: Integer + DryRun: Optional[Boolean] + + +class ModifyInstanceCpuOptionsResult(TypedDict, total=False): + InstanceId: Optional[InstanceId] + CoreCount: Optional[Integer] + ThreadsPerCore: Optional[Integer] + + +class ModifyInstanceCreditSpecificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + InstanceCreditSpecifications: InstanceCreditSpecificationListRequest + + +class UnsuccessfulInstanceCreditSpecificationItemError(TypedDict, total=False): + Code: Optional[UnsuccessfulInstanceCreditSpecificationErrorCode] + Message: Optional[String] + + +class UnsuccessfulInstanceCreditSpecificationItem(TypedDict, total=False): + InstanceId: Optional[String] + Error: Optional[UnsuccessfulInstanceCreditSpecificationItemError] + + +UnsuccessfulInstanceCreditSpecificationSet = List[UnsuccessfulInstanceCreditSpecificationItem] + + +class SuccessfulInstanceCreditSpecificationItem(TypedDict, total=False): + InstanceId: Optional[String] + + +SuccessfulInstanceCreditSpecificationSet = List[SuccessfulInstanceCreditSpecificationItem] + + +class ModifyInstanceCreditSpecificationResult(TypedDict, total=False): + SuccessfulInstanceCreditSpecifications: Optional[SuccessfulInstanceCreditSpecificationSet] + UnsuccessfulInstanceCreditSpecifications: Optional[UnsuccessfulInstanceCreditSpecificationSet] + + +class ModifyInstanceEventStartTimeRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + InstanceEventId: String + NotBefore: DateTime + + +class ModifyInstanceEventStartTimeResult(TypedDict, total=False): + Event: Optional[InstanceStatusEvent] + + +class ModifyInstanceEventWindowRequest(ServiceRequest): + DryRun: Optional[Boolean] + Name: Optional[String] + InstanceEventWindowId: InstanceEventWindowId + TimeRanges: Optional[InstanceEventWindowTimeRangeRequestSet] + CronExpression: Optional[InstanceEventWindowCronExpression] + + +class ModifyInstanceEventWindowResult(TypedDict, total=False): + InstanceEventWindow: Optional[InstanceEventWindow] + + +class ModifyInstanceMaintenanceOptionsRequest(ServiceRequest): + InstanceId: InstanceId + AutoRecovery: Optional[InstanceAutoRecoveryState] + RebootMigration: Optional[InstanceRebootMigrationState] + DryRun: Optional[Boolean] + + +class ModifyInstanceMaintenanceOptionsResult(TypedDict, total=False): + InstanceId: Optional[String] + AutoRecovery: Optional[InstanceAutoRecoveryState] + RebootMigration: Optional[InstanceRebootMigrationState] + + +class ModifyInstanceMetadataDefaultsRequest(ServiceRequest): + HttpTokens: Optional[MetadataDefaultHttpTokensState] + HttpPutResponseHopLimit: Optional[BoxedInteger] + HttpEndpoint: Optional[DefaultInstanceMetadataEndpointState] + InstanceMetadataTags: Optional[DefaultInstanceMetadataTagsState] + DryRun: Optional[Boolean] + + +class ModifyInstanceMetadataDefaultsResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyInstanceMetadataOptionsRequest(ServiceRequest): + InstanceId: InstanceId + HttpTokens: Optional[HttpTokensState] + HttpPutResponseHopLimit: Optional[Integer] + HttpEndpoint: Optional[InstanceMetadataEndpointState] + DryRun: Optional[Boolean] + HttpProtocolIpv6: Optional[InstanceMetadataProtocolState] + InstanceMetadataTags: Optional[InstanceMetadataTagsState] + + +class ModifyInstanceMetadataOptionsResult(TypedDict, total=False): + InstanceId: Optional[String] + InstanceMetadataOptions: Optional[InstanceMetadataOptionsResponse] + + +class ModifyInstanceNetworkPerformanceRequest(ServiceRequest): + InstanceId: InstanceId + BandwidthWeighting: InstanceBandwidthWeighting + DryRun: Optional[Boolean] + + +class ModifyInstanceNetworkPerformanceResult(TypedDict, total=False): + InstanceId: Optional[InstanceId] + BandwidthWeighting: Optional[InstanceBandwidthWeighting] + + +class ModifyInstancePlacementRequest(ServiceRequest): + GroupName: Optional[PlacementGroupName] + PartitionNumber: Optional[Integer] + HostResourceGroupArn: Optional[String] + GroupId: Optional[PlacementGroupId] + InstanceId: InstanceId + Tenancy: Optional[HostTenancy] + Affinity: Optional[Affinity] + HostId: Optional[DedicatedHostId] + + +class ModifyInstancePlacementResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyIpamPoolRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Description: Optional[String] + AutoImport: Optional[Boolean] + AllocationMinNetmaskLength: Optional[IpamNetmaskLength] + AllocationMaxNetmaskLength: Optional[IpamNetmaskLength] + AllocationDefaultNetmaskLength: Optional[IpamNetmaskLength] + ClearAllocationDefaultNetmaskLength: Optional[Boolean] + AddAllocationResourceTags: Optional[RequestIpamResourceTagList] + RemoveAllocationResourceTags: Optional[RequestIpamResourceTagList] + + +class ModifyIpamPoolResult(TypedDict, total=False): + IpamPool: Optional[IpamPool] + + +class RemoveIpamOperatingRegion(TypedDict, total=False): + RegionName: Optional[String] + + +RemoveIpamOperatingRegionSet = List[RemoveIpamOperatingRegion] + + +class ModifyIpamRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + Description: Optional[String] + AddOperatingRegions: Optional[AddIpamOperatingRegionSet] + RemoveOperatingRegions: Optional[RemoveIpamOperatingRegionSet] + Tier: Optional[IpamTier] + EnablePrivateGua: Optional[Boolean] + MeteredAccount: Optional[IpamMeteredAccount] + + +class ModifyIpamResourceCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + ResourceId: String + ResourceCidr: String + ResourceRegion: String + CurrentIpamScopeId: IpamScopeId + DestinationIpamScopeId: Optional[IpamScopeId] + Monitored: Boolean + + +class ModifyIpamResourceCidrResult(TypedDict, total=False): + IpamResourceCidr: Optional[IpamResourceCidr] + + +class RemoveIpamOrganizationalUnitExclusion(TypedDict, total=False): + OrganizationsEntityPath: Optional[String] + + +RemoveIpamOrganizationalUnitExclusionSet = List[RemoveIpamOrganizationalUnitExclusion] + + +class ModifyIpamResourceDiscoveryRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamResourceDiscoveryId: IpamResourceDiscoveryId + Description: Optional[String] + AddOperatingRegions: Optional[AddIpamOperatingRegionSet] + RemoveOperatingRegions: Optional[RemoveIpamOperatingRegionSet] + AddOrganizationalUnitExclusions: Optional[AddIpamOrganizationalUnitExclusionSet] + RemoveOrganizationalUnitExclusions: Optional[RemoveIpamOrganizationalUnitExclusionSet] + + +class ModifyIpamResourceDiscoveryResult(TypedDict, total=False): + IpamResourceDiscovery: Optional[IpamResourceDiscovery] + + +class ModifyIpamResult(TypedDict, total=False): + Ipam: Optional[Ipam] + + +class ModifyIpamScopeRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamScopeId: IpamScopeId + Description: Optional[String] + + +class ModifyIpamScopeResult(TypedDict, total=False): + IpamScope: Optional[IpamScope] + + +class ModifyLaunchTemplateRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + LaunchTemplateId: Optional[LaunchTemplateId] + LaunchTemplateName: Optional[LaunchTemplateName] + DefaultVersion: Optional[String] + + +class ModifyLaunchTemplateResult(TypedDict, total=False): + LaunchTemplate: Optional[LaunchTemplate] + + +class ModifyLocalGatewayRouteRequest(ServiceRequest): + DestinationCidrBlock: Optional[String] + LocalGatewayRouteTableId: LocalGatewayRoutetableId + LocalGatewayVirtualInterfaceGroupId: Optional[LocalGatewayVirtualInterfaceGroupId] + NetworkInterfaceId: Optional[NetworkInterfaceId] + DryRun: Optional[Boolean] + DestinationPrefixListId: Optional[PrefixListResourceId] + + +class ModifyLocalGatewayRouteResult(TypedDict, total=False): + Route: Optional[LocalGatewayRoute] + + +class RemovePrefixListEntry(TypedDict, total=False): + Cidr: String + + +RemovePrefixListEntries = List[RemovePrefixListEntry] + + +class ModifyManagedPrefixListRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListId: PrefixListResourceId + CurrentVersion: Optional[Long] + PrefixListName: Optional[String] + AddEntries: Optional[AddPrefixListEntries] + RemoveEntries: Optional[RemovePrefixListEntries] + MaxEntries: Optional[Integer] + + +class ModifyManagedPrefixListResult(TypedDict, total=False): + PrefixList: Optional[ManagedPrefixList] + + +class NetworkInterfaceAttachmentChanges(TypedDict, total=False): + DefaultEnaQueueCount: Optional[Boolean] + EnaQueueCount: Optional[Integer] + AttachmentId: Optional[NetworkInterfaceAttachmentId] + DeleteOnTermination: Optional[Boolean] + + +class ModifyNetworkInterfaceAttributeRequest(ServiceRequest): + EnaSrdSpecification: Optional[EnaSrdSpecification] + EnablePrimaryIpv6: Optional[Boolean] + ConnectionTrackingSpecification: Optional[ConnectionTrackingSpecificationRequest] + AssociatePublicIpAddress: Optional[Boolean] + DryRun: Optional[Boolean] + NetworkInterfaceId: NetworkInterfaceId + Description: Optional[AttributeValue] + SourceDestCheck: Optional[AttributeBooleanValue] + Groups: Optional[SecurityGroupIdStringList] + Attachment: Optional[NetworkInterfaceAttachmentChanges] + + +class ModifyPrivateDnsNameOptionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + PrivateDnsHostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +class ModifyPrivateDnsNameOptionsResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyPublicIpDnsNameOptionsRequest(ServiceRequest): + NetworkInterfaceId: NetworkInterfaceId + HostnameType: PublicIpDnsOption + DryRun: Optional[Boolean] + + +class ModifyPublicIpDnsNameOptionsResult(TypedDict, total=False): + Successful: Optional[Boolean] + + +ReservedInstancesConfigurationList = List[ReservedInstancesConfiguration] + + +class ModifyReservedInstancesRequest(ServiceRequest): + ReservedInstancesIds: ReservedInstancesIdStringList + ClientToken: Optional[String] + TargetConfigurations: ReservedInstancesConfigurationList + + +class ModifyReservedInstancesResult(TypedDict, total=False): + ReservedInstancesModificationId: Optional[String] + + +class ModifyRouteServerRequest(ServiceRequest): + RouteServerId: RouteServerId + PersistRoutes: Optional[RouteServerPersistRoutesAction] + PersistRoutesDuration: Optional[BoxedLong] + SnsNotificationsEnabled: Optional[Boolean] + DryRun: Optional[Boolean] + + +class ModifyRouteServerResult(TypedDict, total=False): + RouteServer: Optional[RouteServer] + + +class SecurityGroupRuleRequest(TypedDict, total=False): + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + CidrIpv4: Optional[String] + CidrIpv6: Optional[String] + PrefixListId: Optional[PrefixListResourceId] + ReferencedGroupId: Optional[SecurityGroupId] + Description: Optional[String] + + +class SecurityGroupRuleUpdate(TypedDict, total=False): + SecurityGroupRuleId: SecurityGroupRuleId + SecurityGroupRule: Optional[SecurityGroupRuleRequest] + + +SecurityGroupRuleUpdateList = List[SecurityGroupRuleUpdate] + + +class ModifySecurityGroupRulesRequest(ServiceRequest): + GroupId: SecurityGroupId + SecurityGroupRules: SecurityGroupRuleUpdateList + DryRun: Optional[Boolean] + + +class ModifySecurityGroupRulesResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifySnapshotAttributeRequest(ServiceRequest): + Attribute: Optional[SnapshotAttributeName] + CreateVolumePermission: Optional[CreateVolumePermissionModifications] + GroupNames: Optional[GroupNameStringList] + OperationType: Optional[OperationType] + SnapshotId: SnapshotId + UserIds: Optional[UserIdStringList] + DryRun: Optional[Boolean] + + +class ModifySnapshotTierRequest(ServiceRequest): + SnapshotId: SnapshotId + StorageTier: Optional[TargetStorageTier] + DryRun: Optional[Boolean] + + +class ModifySnapshotTierResult(TypedDict, total=False): + SnapshotId: Optional[String] + TieringStartTime: Optional[MillisecondDateTime] + + +class ModifySpotFleetRequestRequest(ServiceRequest): + LaunchTemplateConfigs: Optional[LaunchTemplateConfigList] + OnDemandTargetCapacity: Optional[Integer] + Context: Optional[String] + SpotFleetRequestId: SpotFleetRequestId + TargetCapacity: Optional[Integer] + ExcessCapacityTerminationPolicy: Optional[ExcessCapacityTerminationPolicy] + + +class ModifySpotFleetRequestResponse(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifySubnetAttributeRequest(ServiceRequest): + AssignIpv6AddressOnCreation: Optional[AttributeBooleanValue] + MapPublicIpOnLaunch: Optional[AttributeBooleanValue] + SubnetId: SubnetId + MapCustomerOwnedIpOnLaunch: Optional[AttributeBooleanValue] + CustomerOwnedIpv4Pool: Optional[CoipPoolId] + EnableDns64: Optional[AttributeBooleanValue] + PrivateDnsHostnameTypeOnLaunch: Optional[HostnameType] + EnableResourceNameDnsARecordOnLaunch: Optional[AttributeBooleanValue] + EnableResourceNameDnsAAAARecordOnLaunch: Optional[AttributeBooleanValue] + EnableLniAtDeviceIndex: Optional[Integer] + DisableLniAtDeviceIndex: Optional[AttributeBooleanValue] + + +class ModifyTrafficMirrorFilterNetworkServicesRequest(ServiceRequest): + TrafficMirrorFilterId: TrafficMirrorFilterId + AddNetworkServices: Optional[TrafficMirrorNetworkServiceList] + RemoveNetworkServices: Optional[TrafficMirrorNetworkServiceList] + DryRun: Optional[Boolean] + + +class ModifyTrafficMirrorFilterNetworkServicesResult(TypedDict, total=False): + TrafficMirrorFilter: Optional[TrafficMirrorFilter] + + +TrafficMirrorFilterRuleFieldList = List[TrafficMirrorFilterRuleField] + + +class ModifyTrafficMirrorFilterRuleRequest(ServiceRequest): + TrafficMirrorFilterRuleId: TrafficMirrorFilterRuleIdWithResolver + TrafficDirection: Optional[TrafficDirection] + RuleNumber: Optional[Integer] + RuleAction: Optional[TrafficMirrorRuleAction] + DestinationPortRange: Optional[TrafficMirrorPortRangeRequest] + SourcePortRange: Optional[TrafficMirrorPortRangeRequest] + Protocol: Optional[Integer] + DestinationCidrBlock: Optional[String] + SourceCidrBlock: Optional[String] + Description: Optional[String] + RemoveFields: Optional[TrafficMirrorFilterRuleFieldList] + DryRun: Optional[Boolean] + + +class ModifyTrafficMirrorFilterRuleResult(TypedDict, total=False): + TrafficMirrorFilterRule: Optional[TrafficMirrorFilterRule] + + +TrafficMirrorSessionFieldList = List[TrafficMirrorSessionField] + + +class ModifyTrafficMirrorSessionRequest(ServiceRequest): + TrafficMirrorSessionId: TrafficMirrorSessionId + TrafficMirrorTargetId: Optional[TrafficMirrorTargetId] + TrafficMirrorFilterId: Optional[TrafficMirrorFilterId] + PacketLength: Optional[Integer] + SessionNumber: Optional[Integer] + VirtualNetworkId: Optional[Integer] + Description: Optional[String] + RemoveFields: Optional[TrafficMirrorSessionFieldList] + DryRun: Optional[Boolean] + + +class ModifyTrafficMirrorSessionResult(TypedDict, total=False): + TrafficMirrorSession: Optional[TrafficMirrorSession] + + +class ModifyTransitGatewayOptions(TypedDict, total=False): + AddTransitGatewayCidrBlocks: Optional[TransitGatewayCidrBlockStringList] + RemoveTransitGatewayCidrBlocks: Optional[TransitGatewayCidrBlockStringList] + VpnEcmpSupport: Optional[VpnEcmpSupportValue] + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + AutoAcceptSharedAttachments: Optional[AutoAcceptSharedAttachmentsValue] + DefaultRouteTableAssociation: Optional[DefaultRouteTableAssociationValue] + AssociationDefaultRouteTableId: Optional[TransitGatewayRouteTableId] + DefaultRouteTablePropagation: Optional[DefaultRouteTablePropagationValue] + PropagationDefaultRouteTableId: Optional[TransitGatewayRouteTableId] + AmazonSideAsn: Optional[Long] + + +class ModifyTransitGatewayPrefixListReferenceRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + PrefixListId: PrefixListResourceId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + Blackhole: Optional[Boolean] + DryRun: Optional[Boolean] + + +class ModifyTransitGatewayPrefixListReferenceResult(TypedDict, total=False): + TransitGatewayPrefixListReference: Optional[TransitGatewayPrefixListReference] + + +class ModifyTransitGatewayRequest(ServiceRequest): + TransitGatewayId: TransitGatewayId + Description: Optional[String] + Options: Optional[ModifyTransitGatewayOptions] + DryRun: Optional[Boolean] + + +class ModifyTransitGatewayResult(TypedDict, total=False): + TransitGateway: Optional[TransitGateway] + + +class ModifyTransitGatewayVpcAttachmentRequestOptions(TypedDict, total=False): + DnsSupport: Optional[DnsSupportValue] + SecurityGroupReferencingSupport: Optional[SecurityGroupReferencingSupportValue] + Ipv6Support: Optional[Ipv6SupportValue] + ApplianceModeSupport: Optional[ApplianceModeSupportValue] + + +class ModifyTransitGatewayVpcAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + AddSubnetIds: Optional[TransitGatewaySubnetIdList] + RemoveSubnetIds: Optional[TransitGatewaySubnetIdList] + Options: Optional[ModifyTransitGatewayVpcAttachmentRequestOptions] + DryRun: Optional[Boolean] + + +class ModifyTransitGatewayVpcAttachmentResult(TypedDict, total=False): + TransitGatewayVpcAttachment: Optional[TransitGatewayVpcAttachment] + + +class ModifyVerifiedAccessEndpointPortRange(TypedDict, total=False): + FromPort: Optional[VerifiedAccessEndpointPortNumber] + ToPort: Optional[VerifiedAccessEndpointPortNumber] + + +ModifyVerifiedAccessEndpointPortRangeList = List[ModifyVerifiedAccessEndpointPortRange] + + +class ModifyVerifiedAccessEndpointCidrOptions(TypedDict, total=False): + PortRanges: Optional[ModifyVerifiedAccessEndpointPortRangeList] + + +class ModifyVerifiedAccessEndpointEniOptions(TypedDict, total=False): + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + PortRanges: Optional[ModifyVerifiedAccessEndpointPortRangeList] + + +ModifyVerifiedAccessEndpointSubnetIdList = List[SubnetId] + + +class ModifyVerifiedAccessEndpointLoadBalancerOptions(TypedDict, total=False): + SubnetIds: Optional[ModifyVerifiedAccessEndpointSubnetIdList] + Protocol: Optional[VerifiedAccessEndpointProtocol] + Port: Optional[VerifiedAccessEndpointPortNumber] + PortRanges: Optional[ModifyVerifiedAccessEndpointPortRangeList] + + +class ModifyVerifiedAccessEndpointPolicyRequest(ServiceRequest): + VerifiedAccessEndpointId: VerifiedAccessEndpointId + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + + +class ModifyVerifiedAccessEndpointPolicyResult(TypedDict, total=False): + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + SseSpecification: Optional[VerifiedAccessSseSpecificationResponse] + + +class ModifyVerifiedAccessEndpointRdsOptions(TypedDict, total=False): + SubnetIds: Optional[ModifyVerifiedAccessEndpointSubnetIdList] + Port: Optional[VerifiedAccessEndpointPortNumber] + RdsEndpoint: Optional[String] + + +class ModifyVerifiedAccessEndpointRequest(ServiceRequest): + VerifiedAccessEndpointId: VerifiedAccessEndpointId + VerifiedAccessGroupId: Optional[VerifiedAccessGroupId] + LoadBalancerOptions: Optional[ModifyVerifiedAccessEndpointLoadBalancerOptions] + NetworkInterfaceOptions: Optional[ModifyVerifiedAccessEndpointEniOptions] + Description: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + RdsOptions: Optional[ModifyVerifiedAccessEndpointRdsOptions] + CidrOptions: Optional[ModifyVerifiedAccessEndpointCidrOptions] + + +class ModifyVerifiedAccessEndpointResult(TypedDict, total=False): + VerifiedAccessEndpoint: Optional[VerifiedAccessEndpoint] + + +class ModifyVerifiedAccessGroupPolicyRequest(ServiceRequest): + VerifiedAccessGroupId: VerifiedAccessGroupId + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + + +class ModifyVerifiedAccessGroupPolicyResult(TypedDict, total=False): + PolicyEnabled: Optional[Boolean] + PolicyDocument: Optional[String] + SseSpecification: Optional[VerifiedAccessSseSpecificationResponse] + + +class ModifyVerifiedAccessGroupRequest(ServiceRequest): + VerifiedAccessGroupId: VerifiedAccessGroupId + VerifiedAccessInstanceId: Optional[VerifiedAccessInstanceId] + Description: Optional[String] + ClientToken: Optional[String] + DryRun: Optional[Boolean] + + +class ModifyVerifiedAccessGroupResult(TypedDict, total=False): + VerifiedAccessGroup: Optional[VerifiedAccessGroup] + + +class VerifiedAccessLogKinesisDataFirehoseDestinationOptions(TypedDict, total=False): + Enabled: Boolean + DeliveryStream: Optional[String] + + +class VerifiedAccessLogCloudWatchLogsDestinationOptions(TypedDict, total=False): + Enabled: Boolean + LogGroup: Optional[String] + + +class VerifiedAccessLogS3DestinationOptions(TypedDict, total=False): + Enabled: Boolean + BucketName: Optional[String] + Prefix: Optional[String] + BucketOwner: Optional[String] + + +class VerifiedAccessLogOptions(TypedDict, total=False): + S3: Optional[VerifiedAccessLogS3DestinationOptions] + CloudWatchLogs: Optional[VerifiedAccessLogCloudWatchLogsDestinationOptions] + KinesisDataFirehose: Optional[VerifiedAccessLogKinesisDataFirehoseDestinationOptions] + LogVersion: Optional[String] + IncludeTrustContext: Optional[Boolean] + + +class ModifyVerifiedAccessInstanceLoggingConfigurationRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + AccessLogs: VerifiedAccessLogOptions + DryRun: Optional[Boolean] + ClientToken: Optional[String] + + +class ModifyVerifiedAccessInstanceLoggingConfigurationResult(TypedDict, total=False): + LoggingConfiguration: Optional[VerifiedAccessInstanceLoggingConfiguration] + + +class ModifyVerifiedAccessInstanceRequest(ServiceRequest): + VerifiedAccessInstanceId: VerifiedAccessInstanceId + Description: Optional[String] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + CidrEndpointsCustomSubDomain: Optional[String] + + +class ModifyVerifiedAccessInstanceResult(TypedDict, total=False): + VerifiedAccessInstance: Optional[VerifiedAccessInstance] + + +class ModifyVerifiedAccessNativeApplicationOidcOptions(TypedDict, total=False): + PublicSigningKeyEndpoint: Optional[String] + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + Scope: Optional[String] + + +class ModifyVerifiedAccessTrustProviderDeviceOptions(TypedDict, total=False): + PublicSigningKeyUrl: Optional[String] + + +class ModifyVerifiedAccessTrustProviderOidcOptions(TypedDict, total=False): + Issuer: Optional[String] + AuthorizationEndpoint: Optional[String] + TokenEndpoint: Optional[String] + UserInfoEndpoint: Optional[String] + ClientId: Optional[String] + ClientSecret: Optional[ClientSecretType] + Scope: Optional[String] + + +class ModifyVerifiedAccessTrustProviderRequest(ServiceRequest): + VerifiedAccessTrustProviderId: VerifiedAccessTrustProviderId + OidcOptions: Optional[ModifyVerifiedAccessTrustProviderOidcOptions] + DeviceOptions: Optional[ModifyVerifiedAccessTrustProviderDeviceOptions] + Description: Optional[String] + DryRun: Optional[Boolean] + ClientToken: Optional[String] + SseSpecification: Optional[VerifiedAccessSseSpecificationRequest] + NativeApplicationOidcOptions: Optional[ModifyVerifiedAccessNativeApplicationOidcOptions] + + +class ModifyVerifiedAccessTrustProviderResult(TypedDict, total=False): + VerifiedAccessTrustProvider: Optional[VerifiedAccessTrustProvider] + + +class ModifyVolumeAttributeRequest(ServiceRequest): + AutoEnableIO: Optional[AttributeBooleanValue] + VolumeId: VolumeId + DryRun: Optional[Boolean] + + +class ModifyVolumeRequest(ServiceRequest): + DryRun: Optional[Boolean] + VolumeId: VolumeId + Size: Optional[Integer] + VolumeType: Optional[VolumeType] + Iops: Optional[Integer] + Throughput: Optional[Integer] + MultiAttachEnabled: Optional[Boolean] + + +class ModifyVolumeResult(TypedDict, total=False): + VolumeModification: Optional[VolumeModification] + + +class ModifyVpcAttributeRequest(ServiceRequest): + EnableDnsHostnames: Optional[AttributeBooleanValue] + EnableDnsSupport: Optional[AttributeBooleanValue] + VpcId: VpcId + EnableNetworkAddressUsageMetrics: Optional[AttributeBooleanValue] + + +class ModifyVpcBlockPublicAccessExclusionRequest(ServiceRequest): + DryRun: Optional[Boolean] + ExclusionId: VpcBlockPublicAccessExclusionId + InternetGatewayExclusionMode: InternetGatewayExclusionMode + + +class ModifyVpcBlockPublicAccessExclusionResult(TypedDict, total=False): + VpcBlockPublicAccessExclusion: Optional[VpcBlockPublicAccessExclusion] + + +class ModifyVpcBlockPublicAccessOptionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + InternetGatewayBlockMode: InternetGatewayBlockMode + + +class ModifyVpcBlockPublicAccessOptionsResult(TypedDict, total=False): + VpcBlockPublicAccessOptions: Optional[VpcBlockPublicAccessOptions] + + +class ModifyVpcEndpointConnectionNotificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + ConnectionNotificationId: ConnectionNotificationId + ConnectionNotificationArn: Optional[String] + ConnectionEvents: Optional[ValueStringList] + + +class ModifyVpcEndpointConnectionNotificationResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class ModifyVpcEndpointRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcEndpointId: VpcEndpointId + ResetPolicy: Optional[Boolean] + PolicyDocument: Optional[String] + AddRouteTableIds: Optional[VpcEndpointRouteTableIdList] + RemoveRouteTableIds: Optional[VpcEndpointRouteTableIdList] + AddSubnetIds: Optional[VpcEndpointSubnetIdList] + RemoveSubnetIds: Optional[VpcEndpointSubnetIdList] + AddSecurityGroupIds: Optional[VpcEndpointSecurityGroupIdList] + RemoveSecurityGroupIds: Optional[VpcEndpointSecurityGroupIdList] + IpAddressType: Optional[IpAddressType] + DnsOptions: Optional[DnsOptionsSpecification] + PrivateDnsEnabled: Optional[Boolean] + SubnetConfigurations: Optional[SubnetConfigurationsList] + + +class ModifyVpcEndpointResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyVpcEndpointServiceConfigurationRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + PrivateDnsName: Optional[String] + RemovePrivateDnsName: Optional[Boolean] + AcceptanceRequired: Optional[Boolean] + AddNetworkLoadBalancerArns: Optional[ValueStringList] + RemoveNetworkLoadBalancerArns: Optional[ValueStringList] + AddGatewayLoadBalancerArns: Optional[ValueStringList] + RemoveGatewayLoadBalancerArns: Optional[ValueStringList] + AddSupportedIpAddressTypes: Optional[ValueStringList] + RemoveSupportedIpAddressTypes: Optional[ValueStringList] + AddSupportedRegions: Optional[ValueStringList] + RemoveSupportedRegions: Optional[ValueStringList] + + +class ModifyVpcEndpointServiceConfigurationResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ModifyVpcEndpointServicePayerResponsibilityRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + PayerResponsibility: PayerResponsibility + + +class ModifyVpcEndpointServicePayerResponsibilityResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class ModifyVpcEndpointServicePermissionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + AddAllowedPrincipals: Optional[ValueStringList] + RemoveAllowedPrincipals: Optional[ValueStringList] + + +class ModifyVpcEndpointServicePermissionsResult(TypedDict, total=False): + AddedPrincipals: Optional[AddedPrincipalSet] + ReturnValue: Optional[Boolean] + + +class PeeringConnectionOptionsRequest(TypedDict, total=False): + AllowDnsResolutionFromRemoteVpc: Optional[Boolean] + AllowEgressFromLocalClassicLinkToRemoteVpc: Optional[Boolean] + AllowEgressFromLocalVpcToRemoteClassicLink: Optional[Boolean] + + +class ModifyVpcPeeringConnectionOptionsRequest(ServiceRequest): + AccepterPeeringConnectionOptions: Optional[PeeringConnectionOptionsRequest] + DryRun: Optional[Boolean] + RequesterPeeringConnectionOptions: Optional[PeeringConnectionOptionsRequest] + VpcPeeringConnectionId: VpcPeeringConnectionId + + +class PeeringConnectionOptions(TypedDict, total=False): + AllowDnsResolutionFromRemoteVpc: Optional[Boolean] + AllowEgressFromLocalClassicLinkToRemoteVpc: Optional[Boolean] + AllowEgressFromLocalVpcToRemoteClassicLink: Optional[Boolean] + + +class ModifyVpcPeeringConnectionOptionsResult(TypedDict, total=False): + AccepterPeeringConnectionOptions: Optional[PeeringConnectionOptions] + RequesterPeeringConnectionOptions: Optional[PeeringConnectionOptions] + + +class ModifyVpcTenancyRequest(ServiceRequest): + VpcId: VpcId + InstanceTenancy: VpcTenancy + DryRun: Optional[Boolean] + + +class ModifyVpcTenancyResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class ModifyVpnConnectionOptionsRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + LocalIpv4NetworkCidr: Optional[String] + RemoteIpv4NetworkCidr: Optional[String] + LocalIpv6NetworkCidr: Optional[String] + RemoteIpv6NetworkCidr: Optional[String] + DryRun: Optional[Boolean] + + +class ModifyVpnConnectionOptionsResult(TypedDict, total=False): + VpnConnection: Optional[VpnConnection] + + +class ModifyVpnConnectionRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + TransitGatewayId: Optional[TransitGatewayId] + CustomerGatewayId: Optional[CustomerGatewayId] + VpnGatewayId: Optional[VpnGatewayId] + DryRun: Optional[Boolean] + + +class ModifyVpnConnectionResult(TypedDict, total=False): + VpnConnection: Optional[VpnConnection] + + +class ModifyVpnTunnelCertificateRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnTunnelOutsideIpAddress: String + DryRun: Optional[Boolean] + + +class ModifyVpnTunnelCertificateResult(TypedDict, total=False): + VpnConnection: Optional[VpnConnection] + + +class ModifyVpnTunnelOptionsSpecification(TypedDict, total=False): + TunnelInsideCidr: Optional[String] + TunnelInsideIpv6Cidr: Optional[String] + PreSharedKey: Optional[preSharedKey] + Phase1LifetimeSeconds: Optional[Integer] + Phase2LifetimeSeconds: Optional[Integer] + RekeyMarginTimeSeconds: Optional[Integer] + RekeyFuzzPercentage: Optional[Integer] + ReplayWindowSize: Optional[Integer] + DPDTimeoutSeconds: Optional[Integer] + DPDTimeoutAction: Optional[String] + Phase1EncryptionAlgorithms: Optional[Phase1EncryptionAlgorithmsRequestList] + Phase2EncryptionAlgorithms: Optional[Phase2EncryptionAlgorithmsRequestList] + Phase1IntegrityAlgorithms: Optional[Phase1IntegrityAlgorithmsRequestList] + Phase2IntegrityAlgorithms: Optional[Phase2IntegrityAlgorithmsRequestList] + Phase1DHGroupNumbers: Optional[Phase1DHGroupNumbersRequestList] + Phase2DHGroupNumbers: Optional[Phase2DHGroupNumbersRequestList] + IKEVersions: Optional[IKEVersionsRequestList] + StartupAction: Optional[String] + LogOptions: Optional[VpnTunnelLogOptionsSpecification] + EnableTunnelLifecycleControl: Optional[Boolean] + + +class ModifyVpnTunnelOptionsRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnTunnelOutsideIpAddress: String + TunnelOptions: ModifyVpnTunnelOptionsSpecification + DryRun: Optional[Boolean] + SkipTunnelReplacement: Optional[Boolean] + PreSharedKeyStorage: Optional[String] + + +class ModifyVpnTunnelOptionsResult(TypedDict, total=False): + VpnConnection: Optional[VpnConnection] + + +class MonitorInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + DryRun: Optional[Boolean] + + +class MonitorInstancesResult(TypedDict, total=False): + InstanceMonitorings: Optional[InstanceMonitoringList] + + +class MoveAddressToVpcRequest(ServiceRequest): + DryRun: Optional[Boolean] + PublicIp: String + + +class MoveAddressToVpcResult(TypedDict, total=False): + AllocationId: Optional[String] + Status: Optional[Status] + + +class MoveByoipCidrToIpamRequest(ServiceRequest): + DryRun: Optional[Boolean] + Cidr: String + IpamPoolId: IpamPoolId + IpamPoolOwner: String + + +class MoveByoipCidrToIpamResult(TypedDict, total=False): + ByoipCidr: Optional[ByoipCidr] + + +class MoveCapacityReservationInstancesRequest(ServiceRequest): + DryRun: Optional[Boolean] + ClientToken: Optional[String] + SourceCapacityReservationId: CapacityReservationId + DestinationCapacityReservationId: CapacityReservationId + InstanceCount: Integer + + +class MoveCapacityReservationInstancesResult(TypedDict, total=False): + SourceCapacityReservation: Optional[CapacityReservation] + DestinationCapacityReservation: Optional[CapacityReservation] + InstanceCount: Optional[Integer] + + +class PrivateDnsNameOptionsRequest(TypedDict, total=False): + HostnameType: Optional[HostnameType] + EnableResourceNameDnsARecord: Optional[Boolean] + EnableResourceNameDnsAAAARecord: Optional[Boolean] + + +class ScheduledInstancesPrivateIpAddressConfig(TypedDict, total=False): + Primary: Optional[Boolean] + PrivateIpAddress: Optional[String] + + +PrivateIpAddressConfigSet = List[ScheduledInstancesPrivateIpAddressConfig] + + +class ProvisionByoipCidrRequest(ServiceRequest): + Cidr: String + CidrAuthorizationContext: Optional[CidrAuthorizationContext] + PubliclyAdvertisable: Optional[Boolean] + Description: Optional[String] + DryRun: Optional[Boolean] + PoolTagSpecifications: Optional[TagSpecificationList] + MultiRegion: Optional[Boolean] + NetworkBorderGroup: Optional[String] + + +class ProvisionByoipCidrResult(TypedDict, total=False): + ByoipCidr: Optional[ByoipCidr] + + +class ProvisionIpamByoasnRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamId: IpamId + Asn: String + AsnAuthorizationContext: AsnAuthorizationContext + + +class ProvisionIpamByoasnResult(TypedDict, total=False): + Byoasn: Optional[Byoasn] + + +class ProvisionIpamPoolCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Cidr: Optional[String] + CidrAuthorizationContext: Optional[IpamCidrAuthorizationContext] + NetmaskLength: Optional[Integer] + ClientToken: Optional[String] + VerificationMethod: Optional[VerificationMethod] + IpamExternalResourceVerificationTokenId: Optional[IpamExternalResourceVerificationTokenId] + + +class ProvisionIpamPoolCidrResult(TypedDict, total=False): + IpamPoolCidr: Optional[IpamPoolCidr] + + +class ProvisionPublicIpv4PoolCidrRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + PoolId: Ipv4PoolEc2Id + NetmaskLength: Integer + NetworkBorderGroup: Optional[String] + + +class ProvisionPublicIpv4PoolCidrResult(TypedDict, total=False): + PoolId: Optional[Ipv4PoolEc2Id] + PoolAddressRange: Optional[PublicIpv4PoolRange] + + +class PurchaseCapacityBlockExtensionRequest(ServiceRequest): + CapacityBlockExtensionOfferingId: OfferingId + CapacityReservationId: CapacityReservationId + DryRun: Optional[Boolean] + + +class PurchaseCapacityBlockExtensionResult(TypedDict, total=False): + CapacityBlockExtensions: Optional[CapacityBlockExtensionSet] + + +class PurchaseCapacityBlockRequest(ServiceRequest): + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + CapacityBlockOfferingId: OfferingId + InstancePlatform: CapacityReservationInstancePlatform + + +class PurchaseCapacityBlockResult(TypedDict, total=False): + CapacityReservation: Optional[CapacityReservation] + + +class PurchaseHostReservationRequest(ServiceRequest): + ClientToken: Optional[String] + CurrencyCode: Optional[CurrencyCodeValues] + HostIdSet: RequestHostIdSet + LimitPrice: Optional[String] + OfferingId: OfferingId + TagSpecifications: Optional[TagSpecificationList] + + +class PurchaseHostReservationResult(TypedDict, total=False): + ClientToken: Optional[String] + CurrencyCode: Optional[CurrencyCodeValues] + Purchase: Optional[PurchaseSet] + TotalHourlyPrice: Optional[String] + TotalUpfrontPrice: Optional[String] + + +class PurchaseRequest(TypedDict, total=False): + InstanceCount: Integer + PurchaseToken: String + + +PurchaseRequestSet = List[PurchaseRequest] + + +class ReservedInstanceLimitPrice(TypedDict, total=False): + Amount: Optional[Double] + CurrencyCode: Optional[CurrencyCodeValues] + + +class PurchaseReservedInstancesOfferingRequest(ServiceRequest): + InstanceCount: Integer + ReservedInstancesOfferingId: ReservedInstancesOfferingId + PurchaseTime: Optional[DateTime] + DryRun: Optional[Boolean] + LimitPrice: Optional[ReservedInstanceLimitPrice] + + +class PurchaseReservedInstancesOfferingResult(TypedDict, total=False): + ReservedInstancesId: Optional[String] + + +class PurchaseScheduledInstancesRequest(ServiceRequest): + ClientToken: Optional[String] + DryRun: Optional[Boolean] + PurchaseRequests: PurchaseRequestSet + + +PurchasedScheduledInstanceSet = List[ScheduledInstance] + + +class PurchaseScheduledInstancesResult(TypedDict, total=False): + ScheduledInstanceSet: Optional[PurchasedScheduledInstanceSet] + + +ReasonCodesList = List[ReportInstanceReasonCodes] + + +class RebootInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + DryRun: Optional[Boolean] + + +class RegisterImageRequest(ServiceRequest): + ImageLocation: Optional[String] + BillingProducts: Optional[BillingProductList] + BootMode: Optional[BootModeValues] + TpmSupport: Optional[TpmSupportValues] + UefiData: Optional[StringType] + ImdsSupport: Optional[ImdsSupportValues] + TagSpecifications: Optional[TagSpecificationList] + DryRun: Optional[Boolean] + Name: String + Description: Optional[String] + Architecture: Optional[ArchitectureValues] + KernelId: Optional[KernelId] + RamdiskId: Optional[RamdiskId] + RootDeviceName: Optional[String] + BlockDeviceMappings: Optional[BlockDeviceMappingRequestList] + VirtualizationType: Optional[String] + SriovNetSupport: Optional[String] + EnaSupport: Optional[Boolean] + + +class RegisterImageResult(TypedDict, total=False): + ImageId: Optional[String] + + +class RegisterInstanceTagAttributeRequest(TypedDict, total=False): + IncludeAllTagsOfInstance: Optional[Boolean] + InstanceTagKeys: Optional[InstanceTagKeySet] + + +class RegisterInstanceEventNotificationAttributesRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceTagAttribute: RegisterInstanceTagAttributeRequest + + +class RegisterInstanceEventNotificationAttributesResult(TypedDict, total=False): + InstanceTagAttribute: Optional[InstanceTagNotificationAttribute] + + +class RegisterTransitGatewayMulticastGroupMembersRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + GroupIpAddress: Optional[String] + NetworkInterfaceIds: TransitGatewayNetworkInterfaceIdList + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastRegisteredGroupMembers(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + RegisteredNetworkInterfaceIds: Optional[ValueStringList] + GroupIpAddress: Optional[String] + + +class RegisterTransitGatewayMulticastGroupMembersResult(TypedDict, total=False): + RegisteredMulticastGroupMembers: Optional[TransitGatewayMulticastRegisteredGroupMembers] + + +class RegisterTransitGatewayMulticastGroupSourcesRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + GroupIpAddress: Optional[String] + NetworkInterfaceIds: TransitGatewayNetworkInterfaceIdList + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastRegisteredGroupSources(TypedDict, total=False): + TransitGatewayMulticastDomainId: Optional[String] + RegisteredNetworkInterfaceIds: Optional[ValueStringList] + GroupIpAddress: Optional[String] + + +class RegisterTransitGatewayMulticastGroupSourcesResult(TypedDict, total=False): + RegisteredMulticastGroupSources: Optional[TransitGatewayMulticastRegisteredGroupSources] + + +class RejectCapacityReservationBillingOwnershipRequest(ServiceRequest): + DryRun: Optional[Boolean] + CapacityReservationId: CapacityReservationId + + +class RejectCapacityReservationBillingOwnershipResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class RejectTransitGatewayMulticastDomainAssociationsRequest(ServiceRequest): + TransitGatewayMulticastDomainId: Optional[TransitGatewayMulticastDomainId] + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + SubnetIds: Optional[ValueStringList] + DryRun: Optional[Boolean] + + +class RejectTransitGatewayMulticastDomainAssociationsResult(TypedDict, total=False): + Associations: Optional[TransitGatewayMulticastDomainAssociations] + + +class RejectTransitGatewayPeeringAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class RejectTransitGatewayPeeringAttachmentResult(TypedDict, total=False): + TransitGatewayPeeringAttachment: Optional[TransitGatewayPeeringAttachment] + + +class RejectTransitGatewayVpcAttachmentRequest(ServiceRequest): + TransitGatewayAttachmentId: TransitGatewayAttachmentId + DryRun: Optional[Boolean] + + +class RejectTransitGatewayVpcAttachmentResult(TypedDict, total=False): + TransitGatewayVpcAttachment: Optional[TransitGatewayVpcAttachment] + + +class RejectVpcEndpointConnectionsRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + VpcEndpointIds: VpcEndpointIdList + + +class RejectVpcEndpointConnectionsResult(TypedDict, total=False): + Unsuccessful: Optional[UnsuccessfulItemSet] + + +class RejectVpcPeeringConnectionRequest(ServiceRequest): + DryRun: Optional[Boolean] + VpcPeeringConnectionId: VpcPeeringConnectionId + + +class RejectVpcPeeringConnectionResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ReleaseAddressRequest(ServiceRequest): + AllocationId: Optional[AllocationId] + PublicIp: Optional[String] + NetworkBorderGroup: Optional[String] + DryRun: Optional[Boolean] + + +class ReleaseHostsRequest(ServiceRequest): + HostIds: RequestHostIdList + + +class ReleaseHostsResult(TypedDict, total=False): + Successful: Optional[ResponseHostIdList] + Unsuccessful: Optional[UnsuccessfulItemList] + + +class ReleaseIpamPoolAllocationRequest(ServiceRequest): + DryRun: Optional[Boolean] + IpamPoolId: IpamPoolId + Cidr: String + IpamPoolAllocationId: IpamPoolAllocationId + + +class ReleaseIpamPoolAllocationResult(TypedDict, total=False): + Success: Optional[Boolean] + + +class ReplaceIamInstanceProfileAssociationRequest(ServiceRequest): + IamInstanceProfile: IamInstanceProfileSpecification + AssociationId: IamInstanceProfileAssociationId + + +class ReplaceIamInstanceProfileAssociationResult(TypedDict, total=False): + IamInstanceProfileAssociation: Optional[IamInstanceProfileAssociation] + + +class ReplaceImageCriteriaInAllowedImagesSettingsRequest(ServiceRequest): + ImageCriteria: Optional[ImageCriterionRequestList] + DryRun: Optional[Boolean] + + +class ReplaceImageCriteriaInAllowedImagesSettingsResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class ReplaceNetworkAclAssociationRequest(ServiceRequest): + DryRun: Optional[Boolean] + AssociationId: NetworkAclAssociationId + NetworkAclId: NetworkAclId + + +class ReplaceNetworkAclAssociationResult(TypedDict, total=False): + NewAssociationId: Optional[String] + + +class ReplaceNetworkAclEntryRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkAclId: NetworkAclId + RuleNumber: Integer + Protocol: String + RuleAction: RuleAction + Egress: Boolean + CidrBlock: Optional[String] + Ipv6CidrBlock: Optional[String] + IcmpTypeCode: Optional[IcmpTypeCode] + PortRange: Optional[PortRange] + + +class ReplaceRouteRequest(ServiceRequest): + DestinationPrefixListId: Optional[PrefixListResourceId] + VpcEndpointId: Optional[VpcEndpointId] + LocalTarget: Optional[Boolean] + TransitGatewayId: Optional[TransitGatewayId] + LocalGatewayId: Optional[LocalGatewayId] + CarrierGatewayId: Optional[CarrierGatewayId] + CoreNetworkArn: Optional[CoreNetworkArn] + DryRun: Optional[Boolean] + RouteTableId: RouteTableId + DestinationCidrBlock: Optional[String] + GatewayId: Optional[RouteGatewayId] + DestinationIpv6CidrBlock: Optional[String] + EgressOnlyInternetGatewayId: Optional[EgressOnlyInternetGatewayId] + InstanceId: Optional[InstanceId] + NetworkInterfaceId: Optional[NetworkInterfaceId] + VpcPeeringConnectionId: Optional[VpcPeeringConnectionId] + NatGatewayId: Optional[NatGatewayId] + + +class ReplaceRouteTableAssociationRequest(ServiceRequest): + DryRun: Optional[Boolean] + AssociationId: RouteTableAssociationId + RouteTableId: RouteTableId + + +class ReplaceRouteTableAssociationResult(TypedDict, total=False): + NewAssociationId: Optional[String] + AssociationState: Optional[RouteTableAssociationState] + + +class ReplaceTransitGatewayRouteRequest(ServiceRequest): + DestinationCidrBlock: String + TransitGatewayRouteTableId: TransitGatewayRouteTableId + TransitGatewayAttachmentId: Optional[TransitGatewayAttachmentId] + Blackhole: Optional[Boolean] + DryRun: Optional[Boolean] + + +class ReplaceTransitGatewayRouteResult(TypedDict, total=False): + Route: Optional[TransitGatewayRoute] + + +class ReplaceVpnTunnelRequest(ServiceRequest): + VpnConnectionId: VpnConnectionId + VpnTunnelOutsideIpAddress: String + ApplyPendingMaintenance: Optional[Boolean] + DryRun: Optional[Boolean] + + +class ReplaceVpnTunnelResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ReportInstanceStatusRequest(ServiceRequest): + DryRun: Optional[Boolean] + Instances: InstanceIdStringList + Status: ReportStatusType + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + ReasonCodes: ReasonCodesList + Description: Optional[ReportInstanceStatusRequestDescription] + + +class RequestSpotFleetRequest(ServiceRequest): + DryRun: Optional[Boolean] + SpotFleetRequestConfig: SpotFleetRequestConfigData + + +class RequestSpotFleetResponse(TypedDict, total=False): + SpotFleetRequestId: Optional[String] + + +RequestSpotLaunchSpecificationSecurityGroupList = List[String] +RequestSpotLaunchSpecificationSecurityGroupIdList = List[SecurityGroupId] + + +class RequestSpotLaunchSpecification(TypedDict, total=False): + SecurityGroupIds: Optional[RequestSpotLaunchSpecificationSecurityGroupIdList] + SecurityGroups: Optional[RequestSpotLaunchSpecificationSecurityGroupList] + AddressingType: Optional[String] + BlockDeviceMappings: Optional[BlockDeviceMappingList] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[IamInstanceProfileSpecification] + ImageId: Optional[ImageId] + InstanceType: Optional[InstanceType] + KernelId: Optional[KernelId] + KeyName: Optional[KeyPairNameWithResolver] + Monitoring: Optional[RunInstancesMonitoringEnabled] + NetworkInterfaces: Optional[InstanceNetworkInterfaceSpecificationList] + Placement: Optional[SpotPlacement] + RamdiskId: Optional[RamdiskId] + SubnetId: Optional[SubnetId] + UserData: Optional[SensitiveUserData] + + +class RequestSpotInstancesRequest(ServiceRequest): + LaunchSpecification: Optional[RequestSpotLaunchSpecification] + TagSpecifications: Optional[TagSpecificationList] + InstanceInterruptionBehavior: Optional[InstanceInterruptionBehavior] + DryRun: Optional[Boolean] + SpotPrice: Optional[String] + ClientToken: Optional[String] + InstanceCount: Optional[Integer] + Type: Optional[SpotInstanceType] + ValidFrom: Optional[DateTime] + ValidUntil: Optional[DateTime] + LaunchGroup: Optional[String] + AvailabilityZoneGroup: Optional[String] + BlockDurationMinutes: Optional[Integer] + + +class RequestSpotInstancesResult(TypedDict, total=False): + SpotInstanceRequests: Optional[SpotInstanceRequestList] + + +class ResetAddressAttributeRequest(ServiceRequest): + AllocationId: AllocationId + Attribute: AddressAttributeName + DryRun: Optional[Boolean] + + +class ResetAddressAttributeResult(TypedDict, total=False): + Address: Optional[AddressAttribute] + + +class ResetEbsDefaultKmsKeyIdRequest(ServiceRequest): + DryRun: Optional[Boolean] + + +class ResetEbsDefaultKmsKeyIdResult(TypedDict, total=False): + KmsKeyId: Optional[String] + + +class ResetFpgaImageAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + FpgaImageId: FpgaImageId + Attribute: Optional[ResetFpgaImageAttributeName] + + +class ResetFpgaImageAttributeResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class ResetImageAttributeRequest(ServiceRequest): + Attribute: ResetImageAttributeName + ImageId: ImageId + DryRun: Optional[Boolean] + + +class ResetInstanceAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + InstanceId: InstanceId + Attribute: InstanceAttributeName + + +class ResetNetworkInterfaceAttributeRequest(ServiceRequest): + DryRun: Optional[Boolean] + NetworkInterfaceId: NetworkInterfaceId + SourceDestCheck: Optional[String] + + +class ResetSnapshotAttributeRequest(ServiceRequest): + Attribute: SnapshotAttributeName + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + + +class RestoreAddressToClassicRequest(ServiceRequest): + DryRun: Optional[Boolean] + PublicIp: String + + +class RestoreAddressToClassicResult(TypedDict, total=False): + PublicIp: Optional[String] + Status: Optional[Status] + + +class RestoreImageFromRecycleBinRequest(ServiceRequest): + ImageId: ImageId + DryRun: Optional[Boolean] + + +class RestoreImageFromRecycleBinResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class RestoreManagedPrefixListVersionRequest(ServiceRequest): + DryRun: Optional[Boolean] + PrefixListId: PrefixListResourceId + PreviousVersion: Long + CurrentVersion: Long + + +class RestoreManagedPrefixListVersionResult(TypedDict, total=False): + PrefixList: Optional[ManagedPrefixList] + + +class RestoreSnapshotFromRecycleBinRequest(ServiceRequest): + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + + +class RestoreSnapshotFromRecycleBinResult(TypedDict, total=False): + SnapshotId: Optional[String] + OutpostArn: Optional[String] + Description: Optional[String] + Encrypted: Optional[Boolean] + OwnerId: Optional[String] + Progress: Optional[String] + StartTime: Optional[MillisecondDateTime] + State: Optional[SnapshotState] + VolumeId: Optional[String] + VolumeSize: Optional[Integer] + SseType: Optional[SSEType] + + +class RestoreSnapshotTierRequest(ServiceRequest): + SnapshotId: SnapshotId + TemporaryRestoreDays: Optional[RestoreSnapshotTierRequestTemporaryRestoreDays] + PermanentRestore: Optional[Boolean] + DryRun: Optional[Boolean] + + +class RestoreSnapshotTierResult(TypedDict, total=False): + SnapshotId: Optional[String] + RestoreStartTime: Optional[MillisecondDateTime] + RestoreDuration: Optional[Integer] + IsPermanentRestore: Optional[Boolean] + + +class RevokeClientVpnIngressRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + TargetNetworkCidr: String + AccessGroupId: Optional[String] + RevokeAllGroups: Optional[Boolean] + DryRun: Optional[Boolean] + + +class RevokeClientVpnIngressResult(TypedDict, total=False): + Status: Optional[ClientVpnAuthorizationRuleStatus] + + +class RevokeSecurityGroupEgressRequest(ServiceRequest): + SecurityGroupRuleIds: Optional[SecurityGroupRuleIdList] + DryRun: Optional[Boolean] + GroupId: SecurityGroupId + SourceSecurityGroupName: Optional[String] + SourceSecurityGroupOwnerId: Optional[String] + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + CidrIp: Optional[String] + IpPermissions: Optional[IpPermissionList] + + +class RevokedSecurityGroupRule(TypedDict, total=False): + SecurityGroupRuleId: Optional[SecurityGroupRuleId] + GroupId: Optional[SecurityGroupId] + IsEgress: Optional[Boolean] + IpProtocol: Optional[String] + FromPort: Optional[Integer] + ToPort: Optional[Integer] + CidrIpv4: Optional[String] + CidrIpv6: Optional[String] + PrefixListId: Optional[PrefixListResourceId] + ReferencedGroupId: Optional[SecurityGroupId] + Description: Optional[String] + + +RevokedSecurityGroupRuleList = List[RevokedSecurityGroupRule] + + +class RevokeSecurityGroupEgressResult(TypedDict, total=False): + Return: Optional[Boolean] + UnknownIpPermissions: Optional[IpPermissionList] + RevokedSecurityGroupRules: Optional[RevokedSecurityGroupRuleList] + + +class RevokeSecurityGroupIngressRequest(ServiceRequest): + CidrIp: Optional[String] + FromPort: Optional[Integer] + GroupId: Optional[SecurityGroupId] + GroupName: Optional[SecurityGroupName] + IpPermissions: Optional[IpPermissionList] + IpProtocol: Optional[String] + SourceSecurityGroupName: Optional[String] + SourceSecurityGroupOwnerId: Optional[String] + ToPort: Optional[Integer] + SecurityGroupRuleIds: Optional[SecurityGroupRuleIdList] + DryRun: Optional[Boolean] + + +class RevokeSecurityGroupIngressResult(TypedDict, total=False): + Return: Optional[Boolean] + UnknownIpPermissions: Optional[IpPermissionList] + RevokedSecurityGroupRules: Optional[RevokedSecurityGroupRuleList] + + +class RunInstancesRequest(ServiceRequest): + BlockDeviceMappings: Optional[BlockDeviceMappingRequestList] + ImageId: Optional[ImageId] + InstanceType: Optional[InstanceType] + Ipv6AddressCount: Optional[Integer] + Ipv6Addresses: Optional[InstanceIpv6AddressList] + KernelId: Optional[KernelId] + KeyName: Optional[KeyPairName] + MaxCount: Integer + MinCount: Integer + Monitoring: Optional[RunInstancesMonitoringEnabled] + Placement: Optional[Placement] + RamdiskId: Optional[RamdiskId] + SecurityGroupIds: Optional[SecurityGroupIdStringList] + SecurityGroups: Optional[SecurityGroupStringList] + SubnetId: Optional[SubnetId] + UserData: Optional[RunInstancesUserData] + ElasticGpuSpecification: Optional[ElasticGpuSpecifications] + ElasticInferenceAccelerators: Optional[ElasticInferenceAccelerators] + TagSpecifications: Optional[TagSpecificationList] + LaunchTemplate: Optional[LaunchTemplateSpecification] + InstanceMarketOptions: Optional[InstanceMarketOptionsRequest] + CreditSpecification: Optional[CreditSpecificationRequest] + CpuOptions: Optional[CpuOptionsRequest] + CapacityReservationSpecification: Optional[CapacityReservationSpecification] + HibernationOptions: Optional[HibernationOptionsRequest] + LicenseSpecifications: Optional[LicenseSpecificationListRequest] + MetadataOptions: Optional[InstanceMetadataOptionsRequest] + EnclaveOptions: Optional[EnclaveOptionsRequest] + PrivateDnsNameOptions: Optional[PrivateDnsNameOptionsRequest] + MaintenanceOptions: Optional[InstanceMaintenanceOptionsRequest] + DisableApiStop: Optional[Boolean] + EnablePrimaryIpv6: Optional[Boolean] + NetworkPerformanceOptions: Optional[InstanceNetworkPerformanceOptionsRequest] + Operator: Optional[OperatorRequest] + DryRun: Optional[Boolean] + DisableApiTermination: Optional[Boolean] + InstanceInitiatedShutdownBehavior: Optional[ShutdownBehavior] + PrivateIpAddress: Optional[String] + ClientToken: Optional[String] + AdditionalInfo: Optional[String] + NetworkInterfaces: Optional[InstanceNetworkInterfaceSpecificationList] + IamInstanceProfile: Optional[IamInstanceProfileSpecification] + EbsOptimized: Optional[Boolean] + + +ScheduledInstancesSecurityGroupIdSet = List[SecurityGroupId] + + +class ScheduledInstancesPlacement(TypedDict, total=False): + AvailabilityZone: Optional[String] + GroupName: Optional[PlacementGroupName] + + +class ScheduledInstancesIpv6Address(TypedDict, total=False): + Ipv6Address: Optional[Ipv6Address] + + +ScheduledInstancesIpv6AddressList = List[ScheduledInstancesIpv6Address] + + +class ScheduledInstancesNetworkInterface(TypedDict, total=False): + AssociatePublicIpAddress: Optional[Boolean] + DeleteOnTermination: Optional[Boolean] + Description: Optional[String] + DeviceIndex: Optional[Integer] + Groups: Optional[ScheduledInstancesSecurityGroupIdSet] + Ipv6AddressCount: Optional[Integer] + Ipv6Addresses: Optional[ScheduledInstancesIpv6AddressList] + NetworkInterfaceId: Optional[NetworkInterfaceId] + PrivateIpAddress: Optional[String] + PrivateIpAddressConfigs: Optional[PrivateIpAddressConfigSet] + SecondaryPrivateIpAddressCount: Optional[Integer] + SubnetId: Optional[SubnetId] + + +ScheduledInstancesNetworkInterfaceSet = List[ScheduledInstancesNetworkInterface] + + +class ScheduledInstancesMonitoring(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class ScheduledInstancesIamInstanceProfile(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +class ScheduledInstancesEbs(TypedDict, total=False): + DeleteOnTermination: Optional[Boolean] + Encrypted: Optional[Boolean] + Iops: Optional[Integer] + SnapshotId: Optional[SnapshotId] + VolumeSize: Optional[Integer] + VolumeType: Optional[String] + + +class ScheduledInstancesBlockDeviceMapping(TypedDict, total=False): + DeviceName: Optional[String] + Ebs: Optional[ScheduledInstancesEbs] + NoDevice: Optional[String] + VirtualName: Optional[String] + + +ScheduledInstancesBlockDeviceMappingSet = List[ScheduledInstancesBlockDeviceMapping] + + +class ScheduledInstancesLaunchSpecification(TypedDict, total=False): + BlockDeviceMappings: Optional[ScheduledInstancesBlockDeviceMappingSet] + EbsOptimized: Optional[Boolean] + IamInstanceProfile: Optional[ScheduledInstancesIamInstanceProfile] + ImageId: ImageId + InstanceType: Optional[String] + KernelId: Optional[KernelId] + KeyName: Optional[KeyPairName] + Monitoring: Optional[ScheduledInstancesMonitoring] + NetworkInterfaces: Optional[ScheduledInstancesNetworkInterfaceSet] + Placement: Optional[ScheduledInstancesPlacement] + RamdiskId: Optional[RamdiskId] + SecurityGroupIds: Optional[ScheduledInstancesSecurityGroupIdSet] + SubnetId: Optional[SubnetId] + UserData: Optional[String] + + +class RunScheduledInstancesRequest(ServiceRequest): + ClientToken: Optional[String] + DryRun: Optional[Boolean] + InstanceCount: Optional[Integer] + LaunchSpecification: ScheduledInstancesLaunchSpecification + ScheduledInstanceId: ScheduledInstanceId + + +class RunScheduledInstancesResult(TypedDict, total=False): + InstanceIdSet: Optional[InstanceIdSet] + + +class SearchLocalGatewayRoutesRequest(ServiceRequest): + LocalGatewayRouteTableId: LocalGatewayRoutetableId + Filters: Optional[FilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class SearchLocalGatewayRoutesResult(TypedDict, total=False): + Routes: Optional[LocalGatewayRouteList] + NextToken: Optional[String] + + +class SearchTransitGatewayMulticastGroupsRequest(ServiceRequest): + TransitGatewayMulticastDomainId: TransitGatewayMulticastDomainId + Filters: Optional[FilterList] + MaxResults: Optional[TransitGatewayMaxResults] + NextToken: Optional[String] + DryRun: Optional[Boolean] + + +class TransitGatewayMulticastGroup(TypedDict, total=False): + GroupIpAddress: Optional[String] + TransitGatewayAttachmentId: Optional[String] + SubnetId: Optional[String] + ResourceId: Optional[String] + ResourceType: Optional[TransitGatewayAttachmentResourceType] + ResourceOwnerId: Optional[String] + NetworkInterfaceId: Optional[String] + GroupMember: Optional[Boolean] + GroupSource: Optional[Boolean] + MemberType: Optional[MembershipType] + SourceType: Optional[MembershipType] + + +TransitGatewayMulticastGroupList = List[TransitGatewayMulticastGroup] + + +class SearchTransitGatewayMulticastGroupsResult(TypedDict, total=False): + MulticastGroups: Optional[TransitGatewayMulticastGroupList] + NextToken: Optional[String] + + +class SearchTransitGatewayRoutesRequest(ServiceRequest): + TransitGatewayRouteTableId: TransitGatewayRouteTableId + Filters: FilterList + MaxResults: Optional[TransitGatewayMaxResults] + DryRun: Optional[Boolean] + + +TransitGatewayRouteList = List[TransitGatewayRoute] + + +class SearchTransitGatewayRoutesResult(TypedDict, total=False): + Routes: Optional[TransitGatewayRouteList] + AdditionalRoutesAvailable: Optional[Boolean] + + +class SecurityGroupRuleDescription(TypedDict, total=False): + SecurityGroupRuleId: Optional[String] + Description: Optional[String] + + +SecurityGroupRuleDescriptionList = List[SecurityGroupRuleDescription] + + +class SendDiagnosticInterruptRequest(ServiceRequest): + InstanceId: InstanceId + DryRun: Optional[Boolean] + + +class StartDeclarativePoliciesReportRequest(ServiceRequest): + DryRun: Optional[Boolean] + S3Bucket: String + S3Prefix: Optional[String] + TargetId: String + TagSpecifications: Optional[TagSpecificationList] + + +class StartDeclarativePoliciesReportResult(TypedDict, total=False): + ReportId: Optional[String] + + +class StartInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + AdditionalInfo: Optional[String] + DryRun: Optional[Boolean] + + +class StartInstancesResult(TypedDict, total=False): + StartingInstances: Optional[InstanceStateChangeList] + + +class StartNetworkInsightsAccessScopeAnalysisRequest(ServiceRequest): + NetworkInsightsAccessScopeId: NetworkInsightsAccessScopeId + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: String + + +class StartNetworkInsightsAccessScopeAnalysisResult(TypedDict, total=False): + NetworkInsightsAccessScopeAnalysis: Optional[NetworkInsightsAccessScopeAnalysis] + + +class StartNetworkInsightsAnalysisRequest(ServiceRequest): + NetworkInsightsPathId: NetworkInsightsPathId + AdditionalAccounts: Optional[ValueStringList] + FilterInArns: Optional[ArnList] + FilterOutArns: Optional[ArnList] + DryRun: Optional[Boolean] + TagSpecifications: Optional[TagSpecificationList] + ClientToken: String + + +class StartNetworkInsightsAnalysisResult(TypedDict, total=False): + NetworkInsightsAnalysis: Optional[NetworkInsightsAnalysis] + + +class StartVpcEndpointServicePrivateDnsVerificationRequest(ServiceRequest): + DryRun: Optional[Boolean] + ServiceId: VpcEndpointServiceId + + +class StartVpcEndpointServicePrivateDnsVerificationResult(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class StopInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + Hibernate: Optional[Boolean] + DryRun: Optional[Boolean] + Force: Optional[Boolean] + + +class StopInstancesResult(TypedDict, total=False): + StoppingInstances: Optional[InstanceStateChangeList] + + +class TerminateClientVpnConnectionsRequest(ServiceRequest): + ClientVpnEndpointId: ClientVpnEndpointId + ConnectionId: Optional[String] + Username: Optional[String] + DryRun: Optional[Boolean] + + +class TerminateConnectionStatus(TypedDict, total=False): + ConnectionId: Optional[String] + PreviousStatus: Optional[ClientVpnConnectionStatus] + CurrentStatus: Optional[ClientVpnConnectionStatus] + + +TerminateConnectionStatusSet = List[TerminateConnectionStatus] + + +class TerminateClientVpnConnectionsResult(TypedDict, total=False): + ClientVpnEndpointId: Optional[String] + Username: Optional[String] + ConnectionStatuses: Optional[TerminateConnectionStatusSet] + + +class TerminateInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + DryRun: Optional[Boolean] + + +class TerminateInstancesResult(TypedDict, total=False): + TerminatingInstances: Optional[InstanceStateChangeList] + + +class UnassignIpv6AddressesRequest(ServiceRequest): + Ipv6Prefixes: Optional[IpPrefixList] + NetworkInterfaceId: NetworkInterfaceId + Ipv6Addresses: Optional[Ipv6AddressList] + + +class UnassignIpv6AddressesResult(TypedDict, total=False): + NetworkInterfaceId: Optional[String] + UnassignedIpv6Addresses: Optional[Ipv6AddressList] + UnassignedIpv6Prefixes: Optional[IpPrefixList] + + +class UnassignPrivateIpAddressesRequest(ServiceRequest): + Ipv4Prefixes: Optional[IpPrefixList] + NetworkInterfaceId: NetworkInterfaceId + PrivateIpAddresses: Optional[PrivateIpAddressStringList] + + +class UnassignPrivateNatGatewayAddressRequest(ServiceRequest): + NatGatewayId: NatGatewayId + PrivateIpAddresses: IpList + MaxDrainDurationSeconds: Optional[DrainSeconds] + DryRun: Optional[Boolean] + + +class UnassignPrivateNatGatewayAddressResult(TypedDict, total=False): + NatGatewayId: Optional[NatGatewayId] + NatGatewayAddresses: Optional[NatGatewayAddressList] + + +class UnlockSnapshotRequest(ServiceRequest): + SnapshotId: SnapshotId + DryRun: Optional[Boolean] + + +class UnlockSnapshotResult(TypedDict, total=False): + SnapshotId: Optional[String] + + +class UnmonitorInstancesRequest(ServiceRequest): + InstanceIds: InstanceIdStringList + DryRun: Optional[Boolean] + + +class UnmonitorInstancesResult(TypedDict, total=False): + InstanceMonitorings: Optional[InstanceMonitoringList] + + +class UpdateSecurityGroupRuleDescriptionsEgressRequest(ServiceRequest): + DryRun: Optional[Boolean] + GroupId: Optional[SecurityGroupId] + GroupName: Optional[SecurityGroupName] + IpPermissions: Optional[IpPermissionList] + SecurityGroupRuleDescriptions: Optional[SecurityGroupRuleDescriptionList] + + +class UpdateSecurityGroupRuleDescriptionsEgressResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class UpdateSecurityGroupRuleDescriptionsIngressRequest(ServiceRequest): + DryRun: Optional[Boolean] + GroupId: Optional[SecurityGroupId] + GroupName: Optional[SecurityGroupName] + IpPermissions: Optional[IpPermissionList] + SecurityGroupRuleDescriptions: Optional[SecurityGroupRuleDescriptionList] + + +class UpdateSecurityGroupRuleDescriptionsIngressResult(TypedDict, total=False): + Return: Optional[Boolean] + + +class WithdrawByoipCidrRequest(ServiceRequest): + Cidr: String + DryRun: Optional[Boolean] + + +class WithdrawByoipCidrResult(TypedDict, total=False): + ByoipCidr: Optional[ByoipCidr] + + +class Ec2Api: + service = "ec2" + version = "2016-11-15" + + @handler("AcceptAddressTransfer") + def accept_address_transfer( + self, + context: RequestContext, + address: String, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptAddressTransferResult: + raise NotImplementedError + + @handler("AcceptCapacityReservationBillingOwnership") + def accept_capacity_reservation_billing_ownership( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptCapacityReservationBillingOwnershipResult: + raise NotImplementedError + + @handler("AcceptReservedInstancesExchangeQuote") + def accept_reserved_instances_exchange_quote( + self, + context: RequestContext, + reserved_instance_ids: ReservedInstanceIdSet, + dry_run: Boolean | None = None, + target_configurations: TargetConfigurationRequestSet | None = None, + **kwargs, + ) -> AcceptReservedInstancesExchangeQuoteResult: + raise NotImplementedError + + @handler("AcceptTransitGatewayMulticastDomainAssociations") + def accept_transit_gateway_multicast_domain_associations( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId | None = None, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + subnet_ids: ValueStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptTransitGatewayMulticastDomainAssociationsResult: + raise NotImplementedError + + @handler("AcceptTransitGatewayPeeringAttachment") + def accept_transit_gateway_peering_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptTransitGatewayPeeringAttachmentResult: + raise NotImplementedError + + @handler("AcceptTransitGatewayVpcAttachment") + def accept_transit_gateway_vpc_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptTransitGatewayVpcAttachmentResult: + raise NotImplementedError + + @handler("AcceptVpcEndpointConnections") + def accept_vpc_endpoint_connections( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + vpc_endpoint_ids: VpcEndpointIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptVpcEndpointConnectionsResult: + raise NotImplementedError + + @handler("AcceptVpcPeeringConnection") + def accept_vpc_peering_connection( + self, + context: RequestContext, + vpc_peering_connection_id: VpcPeeringConnectionIdWithResolver, + dry_run: Boolean | None = None, + **kwargs, + ) -> AcceptVpcPeeringConnectionResult: + raise NotImplementedError + + @handler("AdvertiseByoipCidr") + def advertise_byoip_cidr( + self, + context: RequestContext, + cidr: String, + asn: String | None = None, + dry_run: Boolean | None = None, + network_border_group: String | None = None, + **kwargs, + ) -> AdvertiseByoipCidrResult: + raise NotImplementedError + + @handler("AllocateAddress") + def allocate_address( + self, + context: RequestContext, + domain: DomainType | None = None, + address: PublicIpAddress | None = None, + public_ipv4_pool: Ipv4PoolEc2Id | None = None, + network_border_group: String | None = None, + customer_owned_ipv4_pool: String | None = None, + tag_specifications: TagSpecificationList | None = None, + ipam_pool_id: IpamPoolId | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AllocateAddressResult: + raise NotImplementedError + + @handler("AllocateHosts") + def allocate_hosts( + self, + context: RequestContext, + instance_family: String | None = None, + tag_specifications: TagSpecificationList | None = None, + host_recovery: HostRecovery | None = None, + outpost_arn: String | None = None, + host_maintenance: HostMaintenance | None = None, + asset_ids: AssetIdList | None = None, + availability_zone_id: AvailabilityZoneId | None = None, + auto_placement: AutoPlacement | None = None, + client_token: String | None = None, + instance_type: String | None = None, + quantity: Integer | None = None, + availability_zone: String | None = None, + **kwargs, + ) -> AllocateHostsResult: + raise NotImplementedError + + @handler("AllocateIpamPoolCidr") + def allocate_ipam_pool_cidr( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + cidr: String | None = None, + netmask_length: Integer | None = None, + client_token: String | None = None, + description: String | None = None, + preview_next_cidr: Boolean | None = None, + allowed_cidrs: IpamPoolAllocationAllowedCidrs | None = None, + disallowed_cidrs: IpamPoolAllocationDisallowedCidrs | None = None, + **kwargs, + ) -> AllocateIpamPoolCidrResult: + raise NotImplementedError + + @handler("ApplySecurityGroupsToClientVpnTargetNetwork") + def apply_security_groups_to_client_vpn_target_network( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + vpc_id: VpcId, + security_group_ids: ClientVpnSecurityGroupIdSet, + dry_run: Boolean | None = None, + **kwargs, + ) -> ApplySecurityGroupsToClientVpnTargetNetworkResult: + raise NotImplementedError + + @handler("AssignIpv6Addresses") + def assign_ipv6_addresses( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + ipv6_prefix_count: Integer | None = None, + ipv6_prefixes: IpPrefixList | None = None, + ipv6_addresses: Ipv6AddressList | None = None, + ipv6_address_count: Integer | None = None, + **kwargs, + ) -> AssignIpv6AddressesResult: + raise NotImplementedError + + @handler("AssignPrivateIpAddresses") + def assign_private_ip_addresses( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + ipv4_prefixes: IpPrefixList | None = None, + ipv4_prefix_count: Integer | None = None, + private_ip_addresses: PrivateIpAddressStringList | None = None, + secondary_private_ip_address_count: Integer | None = None, + allow_reassignment: Boolean | None = None, + **kwargs, + ) -> AssignPrivateIpAddressesResult: + raise NotImplementedError + + @handler("AssignPrivateNatGatewayAddress") + def assign_private_nat_gateway_address( + self, + context: RequestContext, + nat_gateway_id: NatGatewayId, + private_ip_addresses: IpList | None = None, + private_ip_address_count: PrivateIpAddressCount | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssignPrivateNatGatewayAddressResult: + raise NotImplementedError + + @handler("AssociateAddress") + def associate_address( + self, + context: RequestContext, + allocation_id: AllocationId | None = None, + instance_id: InstanceId | None = None, + public_ip: EipAllocationPublicIp | None = None, + dry_run: Boolean | None = None, + network_interface_id: NetworkInterfaceId | None = None, + private_ip_address: String | None = None, + allow_reassociation: Boolean | None = None, + **kwargs, + ) -> AssociateAddressResult: + raise NotImplementedError + + @handler("AssociateCapacityReservationBillingOwner") + def associate_capacity_reservation_billing_owner( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + unused_reservation_billing_owner_id: AccountID, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateCapacityReservationBillingOwnerResult: + raise NotImplementedError + + @handler("AssociateClientVpnTargetNetwork") + def associate_client_vpn_target_network( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + subnet_id: SubnetId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateClientVpnTargetNetworkResult: + raise NotImplementedError + + @handler("AssociateDhcpOptions") + def associate_dhcp_options( + self, + context: RequestContext, + dhcp_options_id: DefaultingDhcpOptionsId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("AssociateEnclaveCertificateIamRole") + def associate_enclave_certificate_iam_role( + self, + context: RequestContext, + certificate_arn: CertificateId, + role_arn: RoleId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateEnclaveCertificateIamRoleResult: + raise NotImplementedError + + @handler("AssociateIamInstanceProfile") + def associate_iam_instance_profile( + self, + context: RequestContext, + iam_instance_profile: IamInstanceProfileSpecification, + instance_id: InstanceId, + **kwargs, + ) -> AssociateIamInstanceProfileResult: + raise NotImplementedError + + @handler("AssociateInstanceEventWindow") + def associate_instance_event_window( + self, + context: RequestContext, + instance_event_window_id: InstanceEventWindowId, + association_target: InstanceEventWindowAssociationRequest, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateInstanceEventWindowResult: + raise NotImplementedError + + @handler("AssociateIpamByoasn") + def associate_ipam_byoasn( + self, + context: RequestContext, + asn: String, + cidr: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateIpamByoasnResult: + raise NotImplementedError + + @handler("AssociateIpamResourceDiscovery") + def associate_ipam_resource_discovery( + self, + context: RequestContext, + ipam_id: IpamId, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + **kwargs, + ) -> AssociateIpamResourceDiscoveryResult: + raise NotImplementedError + + @handler("AssociateNatGatewayAddress") + def associate_nat_gateway_address( + self, + context: RequestContext, + nat_gateway_id: NatGatewayId, + allocation_ids: AllocationIdList, + private_ip_addresses: IpList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateNatGatewayAddressResult: + raise NotImplementedError + + @handler("AssociateRouteServer") + def associate_route_server( + self, + context: RequestContext, + route_server_id: RouteServerId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateRouteServerResult: + raise NotImplementedError + + @handler("AssociateRouteTable") + def associate_route_table( + self, + context: RequestContext, + route_table_id: RouteTableId, + gateway_id: RouteGatewayId | None = None, + dry_run: Boolean | None = None, + subnet_id: SubnetId | None = None, + **kwargs, + ) -> AssociateRouteTableResult: + raise NotImplementedError + + @handler("AssociateSecurityGroupVpc") + def associate_security_group_vpc( + self, + context: RequestContext, + group_id: SecurityGroupId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateSecurityGroupVpcResult: + raise NotImplementedError + + @handler("AssociateSubnetCidrBlock") + def associate_subnet_cidr_block( + self, + context: RequestContext, + subnet_id: SubnetId, + ipv6_ipam_pool_id: IpamPoolId | None = None, + ipv6_netmask_length: NetmaskLength | None = None, + ipv6_cidr_block: String | None = None, + **kwargs, + ) -> AssociateSubnetCidrBlockResult: + raise NotImplementedError + + @handler("AssociateTransitGatewayMulticastDomain") + def associate_transit_gateway_multicast_domain( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + subnet_ids: TransitGatewaySubnetIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateTransitGatewayMulticastDomainResult: + raise NotImplementedError + + @handler("AssociateTransitGatewayPolicyTable") + def associate_transit_gateway_policy_table( + self, + context: RequestContext, + transit_gateway_policy_table_id: TransitGatewayPolicyTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateTransitGatewayPolicyTableResult: + raise NotImplementedError + + @handler("AssociateTransitGatewayRouteTable") + def associate_transit_gateway_route_table( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateTransitGatewayRouteTableResult: + raise NotImplementedError + + @handler("AssociateTrunkInterface") + def associate_trunk_interface( + self, + context: RequestContext, + branch_interface_id: NetworkInterfaceId, + trunk_interface_id: NetworkInterfaceId, + vlan_id: Integer | None = None, + gre_key: Integer | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AssociateTrunkInterfaceResult: + raise NotImplementedError + + @handler("AssociateVpcCidrBlock") + def associate_vpc_cidr_block( + self, + context: RequestContext, + vpc_id: VpcId, + cidr_block: String | None = None, + ipv6_cidr_block_network_border_group: String | None = None, + ipv6_pool: Ipv6PoolEc2Id | None = None, + ipv6_cidr_block: String | None = None, + ipv4_ipam_pool_id: IpamPoolId | None = None, + ipv4_netmask_length: NetmaskLength | None = None, + ipv6_ipam_pool_id: IpamPoolId | None = None, + ipv6_netmask_length: NetmaskLength | None = None, + amazon_provided_ipv6_cidr_block: Boolean | None = None, + **kwargs, + ) -> AssociateVpcCidrBlockResult: + raise NotImplementedError + + @handler("AttachClassicLinkVpc") + def attach_classic_link_vpc( + self, + context: RequestContext, + instance_id: InstanceId, + vpc_id: VpcId, + groups: GroupIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> AttachClassicLinkVpcResult: + raise NotImplementedError + + @handler("AttachInternetGateway") + def attach_internet_gateway( + self, + context: RequestContext, + internet_gateway_id: InternetGatewayId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("AttachNetworkInterface") + def attach_network_interface( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + instance_id: InstanceId, + device_index: Integer, + network_card_index: Integer | None = None, + ena_srd_specification: EnaSrdSpecification | None = None, + ena_queue_count: Integer | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AttachNetworkInterfaceResult: + raise NotImplementedError + + @handler("AttachVerifiedAccessTrustProvider") + def attach_verified_access_trust_provider( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + verified_access_trust_provider_id: VerifiedAccessTrustProviderId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AttachVerifiedAccessTrustProviderResult: + raise NotImplementedError + + @handler("AttachVolume") + def attach_volume( + self, + context: RequestContext, + device: String, + instance_id: InstanceId, + volume_id: VolumeId, + dry_run: Boolean | None = None, + **kwargs, + ) -> VolumeAttachment: + raise NotImplementedError + + @handler("AttachVpnGateway") + def attach_vpn_gateway( + self, + context: RequestContext, + vpc_id: VpcId, + vpn_gateway_id: VpnGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> AttachVpnGatewayResult: + raise NotImplementedError + + @handler("AuthorizeClientVpnIngress") + def authorize_client_vpn_ingress( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + target_network_cidr: String, + access_group_id: String | None = None, + authorize_all_groups: Boolean | None = None, + description: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AuthorizeClientVpnIngressResult: + raise NotImplementedError + + @handler("AuthorizeSecurityGroupEgress") + def authorize_security_group_egress( + self, + context: RequestContext, + group_id: SecurityGroupId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + source_security_group_name: String | None = None, + source_security_group_owner_id: String | None = None, + ip_protocol: String | None = None, + from_port: Integer | None = None, + to_port: Integer | None = None, + cidr_ip: String | None = None, + ip_permissions: IpPermissionList | None = None, + **kwargs, + ) -> AuthorizeSecurityGroupEgressResult: + raise NotImplementedError + + @handler("AuthorizeSecurityGroupIngress") + def authorize_security_group_ingress( + self, + context: RequestContext, + cidr_ip: String | None = None, + from_port: Integer | None = None, + group_id: SecurityGroupId | None = None, + group_name: SecurityGroupName | None = None, + ip_permissions: IpPermissionList | None = None, + ip_protocol: String | None = None, + source_security_group_name: String | None = None, + source_security_group_owner_id: String | None = None, + to_port: Integer | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> AuthorizeSecurityGroupIngressResult: + raise NotImplementedError + + @handler("BundleInstance") + def bundle_instance( + self, + context: RequestContext, + instance_id: InstanceId, + storage: Storage, + dry_run: Boolean | None = None, + **kwargs, + ) -> BundleInstanceResult: + raise NotImplementedError + + @handler("CancelBundleTask") + def cancel_bundle_task( + self, context: RequestContext, bundle_id: BundleId, dry_run: Boolean | None = None, **kwargs + ) -> CancelBundleTaskResult: + raise NotImplementedError + + @handler("CancelCapacityReservation") + def cancel_capacity_reservation( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> CancelCapacityReservationResult: + raise NotImplementedError + + @handler("CancelCapacityReservationFleets") + def cancel_capacity_reservation_fleets( + self, + context: RequestContext, + capacity_reservation_fleet_ids: CapacityReservationFleetIdSet, + dry_run: Boolean | None = None, + **kwargs, + ) -> CancelCapacityReservationFleetsResult: + raise NotImplementedError + + @handler("CancelConversionTask") + def cancel_conversion_task( + self, + context: RequestContext, + conversion_task_id: ConversionTaskId, + dry_run: Boolean | None = None, + reason_message: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CancelDeclarativePoliciesReport") + def cancel_declarative_policies_report( + self, + context: RequestContext, + report_id: DeclarativePoliciesReportId, + dry_run: Boolean | None = None, + **kwargs, + ) -> CancelDeclarativePoliciesReportResult: + raise NotImplementedError + + @handler("CancelExportTask") + def cancel_export_task( + self, context: RequestContext, export_task_id: ExportVmTaskId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CancelImageLaunchPermission") + def cancel_image_launch_permission( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> CancelImageLaunchPermissionResult: + raise NotImplementedError + + @handler("CancelImportTask") + def cancel_import_task( + self, + context: RequestContext, + cancel_reason: String | None = None, + dry_run: Boolean | None = None, + import_task_id: ImportTaskId | None = None, + **kwargs, + ) -> CancelImportTaskResult: + raise NotImplementedError + + @handler("CancelReservedInstancesListing") + def cancel_reserved_instances_listing( + self, + context: RequestContext, + reserved_instances_listing_id: ReservedInstancesListingId, + **kwargs, + ) -> CancelReservedInstancesListingResult: + raise NotImplementedError + + @handler("CancelSpotFleetRequests") + def cancel_spot_fleet_requests( + self, + context: RequestContext, + spot_fleet_request_ids: SpotFleetRequestIdList, + terminate_instances: Boolean, + dry_run: Boolean | None = None, + **kwargs, + ) -> CancelSpotFleetRequestsResponse: + raise NotImplementedError + + @handler("CancelSpotInstanceRequests") + def cancel_spot_instance_requests( + self, + context: RequestContext, + spot_instance_request_ids: SpotInstanceRequestIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> CancelSpotInstanceRequestsResult: + raise NotImplementedError + + @handler("ConfirmProductInstance") + def confirm_product_instance( + self, + context: RequestContext, + instance_id: InstanceId, + product_code: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> ConfirmProductInstanceResult: + raise NotImplementedError + + @handler("CopyFpgaImage") + def copy_fpga_image( + self, + context: RequestContext, + source_fpga_image_id: String, + source_region: String, + dry_run: Boolean | None = None, + description: String | None = None, + name: String | None = None, + client_token: String | None = None, + **kwargs, + ) -> CopyFpgaImageResult: + raise NotImplementedError + + @handler("CopyImage") + def copy_image( + self, + context: RequestContext, + name: String, + source_image_id: String, + source_region: String, + client_token: String | None = None, + description: String | None = None, + encrypted: Boolean | None = None, + kms_key_id: KmsKeyId | None = None, + destination_outpost_arn: String | None = None, + copy_image_tags: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + snapshot_copy_completion_duration_minutes: Long | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CopyImageResult: + raise NotImplementedError + + @handler("CopySnapshot") + def copy_snapshot( + self, + context: RequestContext, + source_region: String, + source_snapshot_id: String, + description: String | None = None, + destination_outpost_arn: String | None = None, + destination_region: String | None = None, + encrypted: Boolean | None = None, + kms_key_id: KmsKeyId | None = None, + presigned_url: CopySnapshotRequestPSU | None = None, + tag_specifications: TagSpecificationList | None = None, + completion_duration_minutes: SnapshotCompletionDurationMinutesRequest | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CopySnapshotResult: + raise NotImplementedError + + @handler("CreateCapacityReservation") + def create_capacity_reservation( + self, + context: RequestContext, + instance_type: String, + instance_platform: CapacityReservationInstancePlatform, + instance_count: Integer, + client_token: String | None = None, + availability_zone: AvailabilityZoneName | None = None, + availability_zone_id: AvailabilityZoneId | None = None, + tenancy: CapacityReservationTenancy | None = None, + ebs_optimized: Boolean | None = None, + ephemeral_storage: Boolean | None = None, + end_date: DateTime | None = None, + end_date_type: EndDateType | None = None, + instance_match_criteria: InstanceMatchCriteria | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + outpost_arn: OutpostArn | None = None, + placement_group_arn: PlacementGroupArn | None = None, + start_date: MillisecondDateTime | None = None, + commitment_duration: CapacityReservationCommitmentDuration | None = None, + delivery_preference: CapacityReservationDeliveryPreference | None = None, + **kwargs, + ) -> CreateCapacityReservationResult: + raise NotImplementedError + + @handler("CreateCapacityReservationBySplitting") + def create_capacity_reservation_by_splitting( + self, + context: RequestContext, + source_capacity_reservation_id: CapacityReservationId, + instance_count: Integer, + dry_run: Boolean | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateCapacityReservationBySplittingResult: + raise NotImplementedError + + @handler("CreateCapacityReservationFleet") + def create_capacity_reservation_fleet( + self, + context: RequestContext, + instance_type_specifications: ReservationFleetInstanceSpecificationList, + total_target_capacity: Integer, + allocation_strategy: String | None = None, + client_token: String | None = None, + tenancy: FleetCapacityReservationTenancy | None = None, + end_date: MillisecondDateTime | None = None, + instance_match_criteria: FleetInstanceMatchCriteria | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateCapacityReservationFleetResult: + raise NotImplementedError + + @handler("CreateCarrierGateway") + def create_carrier_gateway( + self, + context: RequestContext, + vpc_id: VpcId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateCarrierGatewayResult: + raise NotImplementedError + + @handler("CreateClientVpnEndpoint") + def create_client_vpn_endpoint( + self, + context: RequestContext, + client_cidr_block: String, + server_certificate_arn: String, + authentication_options: ClientVpnAuthenticationRequestList, + connection_log_options: ConnectionLogOptions, + dns_servers: ValueStringList | None = None, + transport_protocol: TransportProtocol | None = None, + vpn_port: Integer | None = None, + description: String | None = None, + split_tunnel: Boolean | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + security_group_ids: ClientVpnSecurityGroupIdSet | None = None, + vpc_id: VpcId | None = None, + self_service_portal: SelfServicePortal | None = None, + client_connect_options: ClientConnectOptions | None = None, + session_timeout_hours: Integer | None = None, + client_login_banner_options: ClientLoginBannerOptions | None = None, + client_route_enforcement_options: ClientRouteEnforcementOptions | None = None, + disconnect_on_session_timeout: Boolean | None = None, + **kwargs, + ) -> CreateClientVpnEndpointResult: + raise NotImplementedError + + @handler("CreateClientVpnRoute") + def create_client_vpn_route( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + destination_cidr_block: String, + target_vpc_subnet_id: SubnetId, + description: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateClientVpnRouteResult: + raise NotImplementedError + + @handler("CreateCoipCidr") + def create_coip_cidr( + self, + context: RequestContext, + cidr: String, + coip_pool_id: Ipv4PoolCoipId, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateCoipCidrResult: + raise NotImplementedError + + @handler("CreateCoipPool") + def create_coip_pool( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateCoipPoolResult: + raise NotImplementedError + + @handler("CreateCustomerGateway", expand=False) + def create_customer_gateway( + self, context: RequestContext, request: CreateCustomerGatewayRequest, **kwargs + ) -> CreateCustomerGatewayResult: + raise NotImplementedError + + @handler("CreateDefaultSubnet") + def create_default_subnet( + self, + context: RequestContext, + availability_zone: AvailabilityZoneName, + dry_run: Boolean | None = None, + ipv6_native: Boolean | None = None, + **kwargs, + ) -> CreateDefaultSubnetResult: + raise NotImplementedError + + @handler("CreateDefaultVpc") + def create_default_vpc( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> CreateDefaultVpcResult: + raise NotImplementedError + + @handler("CreateDelegateMacVolumeOwnershipTask") + def create_delegate_mac_volume_ownership_task( + self, + context: RequestContext, + instance_id: InstanceId, + mac_credentials: SensitiveMacCredentials, + client_token: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateDelegateMacVolumeOwnershipTaskResult: + raise NotImplementedError + + @handler("CreateDhcpOptions") + def create_dhcp_options( + self, + context: RequestContext, + dhcp_configurations: NewDhcpConfigurationList, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateDhcpOptionsResult: + raise NotImplementedError + + @handler("CreateEgressOnlyInternetGateway") + def create_egress_only_internet_gateway( + self, + context: RequestContext, + vpc_id: VpcId, + client_token: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateEgressOnlyInternetGatewayResult: + raise NotImplementedError + + @handler("CreateFleet", expand=False) + def create_fleet( + self, context: RequestContext, request: CreateFleetRequest, **kwargs + ) -> CreateFleetResult: + raise NotImplementedError + + @handler("CreateFlowLogs") + def create_flow_logs( + self, + context: RequestContext, + resource_ids: FlowLogResourceIds, + resource_type: FlowLogsResourceType, + dry_run: Boolean | None = None, + client_token: String | None = None, + deliver_logs_permission_arn: String | None = None, + deliver_cross_account_role: String | None = None, + log_group_name: String | None = None, + traffic_type: TrafficType | None = None, + log_destination_type: LogDestinationType | None = None, + log_destination: String | None = None, + log_format: String | None = None, + tag_specifications: TagSpecificationList | None = None, + max_aggregation_interval: Integer | None = None, + destination_options: DestinationOptionsRequest | None = None, + **kwargs, + ) -> CreateFlowLogsResult: + raise NotImplementedError + + @handler("CreateFpgaImage") + def create_fpga_image( + self, + context: RequestContext, + input_storage_location: StorageLocation, + dry_run: Boolean | None = None, + logs_storage_location: StorageLocation | None = None, + description: String | None = None, + name: String | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateFpgaImageResult: + raise NotImplementedError + + @handler("CreateImage") + def create_image( + self, + context: RequestContext, + instance_id: InstanceId, + name: String, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + description: String | None = None, + no_reboot: Boolean | None = None, + block_device_mappings: BlockDeviceMappingRequestList | None = None, + **kwargs, + ) -> CreateImageResult: + raise NotImplementedError + + @handler("CreateInstanceConnectEndpoint") + def create_instance_connect_endpoint( + self, + context: RequestContext, + subnet_id: SubnetId, + dry_run: Boolean | None = None, + security_group_ids: SecurityGroupIdStringListRequest | None = None, + preserve_client_ip: Boolean | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateInstanceConnectEndpointResult: + raise NotImplementedError + + @handler("CreateInstanceEventWindow") + def create_instance_event_window( + self, + context: RequestContext, + dry_run: Boolean | None = None, + name: String | None = None, + time_ranges: InstanceEventWindowTimeRangeRequestSet | None = None, + cron_expression: InstanceEventWindowCronExpression | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateInstanceEventWindowResult: + raise NotImplementedError + + @handler("CreateInstanceExportTask") + def create_instance_export_task( + self, + context: RequestContext, + instance_id: InstanceId, + target_environment: ExportEnvironment, + export_to_s3_task: ExportToS3TaskSpecification, + tag_specifications: TagSpecificationList | None = None, + description: String | None = None, + **kwargs, + ) -> CreateInstanceExportTaskResult: + raise NotImplementedError + + @handler("CreateInternetGateway") + def create_internet_gateway( + self, + context: RequestContext, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateInternetGatewayResult: + raise NotImplementedError + + @handler("CreateIpam") + def create_ipam( + self, + context: RequestContext, + dry_run: Boolean | None = None, + description: String | None = None, + operating_regions: AddIpamOperatingRegionSet | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + tier: IpamTier | None = None, + enable_private_gua: Boolean | None = None, + metered_account: IpamMeteredAccount | None = None, + **kwargs, + ) -> CreateIpamResult: + raise NotImplementedError + + @handler("CreateIpamExternalResourceVerificationToken") + def create_ipam_external_resource_verification_token( + self, + context: RequestContext, + ipam_id: IpamId, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateIpamExternalResourceVerificationTokenResult: + raise NotImplementedError + + @handler("CreateIpamPool") + def create_ipam_pool( + self, + context: RequestContext, + ipam_scope_id: IpamScopeId, + address_family: AddressFamily, + dry_run: Boolean | None = None, + locale: String | None = None, + source_ipam_pool_id: IpamPoolId | None = None, + description: String | None = None, + auto_import: Boolean | None = None, + publicly_advertisable: Boolean | None = None, + allocation_min_netmask_length: IpamNetmaskLength | None = None, + allocation_max_netmask_length: IpamNetmaskLength | None = None, + allocation_default_netmask_length: IpamNetmaskLength | None = None, + allocation_resource_tags: RequestIpamResourceTagList | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + aws_service: IpamPoolAwsService | None = None, + public_ip_source: IpamPoolPublicIpSource | None = None, + source_resource: IpamPoolSourceResourceRequest | None = None, + **kwargs, + ) -> CreateIpamPoolResult: + raise NotImplementedError + + @handler("CreateIpamResourceDiscovery") + def create_ipam_resource_discovery( + self, + context: RequestContext, + dry_run: Boolean | None = None, + description: String | None = None, + operating_regions: AddIpamOperatingRegionSet | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateIpamResourceDiscoveryResult: + raise NotImplementedError + + @handler("CreateIpamScope") + def create_ipam_scope( + self, + context: RequestContext, + ipam_id: IpamId, + dry_run: Boolean | None = None, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateIpamScopeResult: + raise NotImplementedError + + @handler("CreateKeyPair") + def create_key_pair( + self, + context: RequestContext, + key_name: String, + key_type: KeyType | None = None, + tag_specifications: TagSpecificationList | None = None, + key_format: KeyFormat | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> KeyPair: + raise NotImplementedError + + @handler("CreateLaunchTemplate") + def create_launch_template( + self, + context: RequestContext, + launch_template_name: LaunchTemplateName, + launch_template_data: RequestLaunchTemplateData, + dry_run: Boolean | None = None, + client_token: String | None = None, + version_description: VersionDescription | None = None, + operator: OperatorRequest | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateLaunchTemplateResult: + raise NotImplementedError + + @handler("CreateLaunchTemplateVersion") + def create_launch_template_version( + self, + context: RequestContext, + launch_template_data: RequestLaunchTemplateData, + dry_run: Boolean | None = None, + client_token: String | None = None, + launch_template_id: LaunchTemplateId | None = None, + launch_template_name: LaunchTemplateName | None = None, + source_version: String | None = None, + version_description: VersionDescription | None = None, + resolve_alias: Boolean | None = None, + **kwargs, + ) -> CreateLaunchTemplateVersionResult: + raise NotImplementedError + + @handler("CreateLocalGatewayRoute") + def create_local_gateway_route( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + destination_cidr_block: String | None = None, + local_gateway_virtual_interface_group_id: LocalGatewayVirtualInterfaceGroupId | None = None, + dry_run: Boolean | None = None, + network_interface_id: NetworkInterfaceId | None = None, + destination_prefix_list_id: PrefixListResourceId | None = None, + **kwargs, + ) -> CreateLocalGatewayRouteResult: + raise NotImplementedError + + @handler("CreateLocalGatewayRouteTable") + def create_local_gateway_route_table( + self, + context: RequestContext, + local_gateway_id: LocalGatewayId, + mode: LocalGatewayRouteTableMode | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateLocalGatewayRouteTableResult: + raise NotImplementedError + + @handler("CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation") + def create_local_gateway_route_table_virtual_interface_group_association( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + local_gateway_virtual_interface_group_id: LocalGatewayVirtualInterfaceGroupId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationResult: + raise NotImplementedError + + @handler("CreateLocalGatewayRouteTableVpcAssociation") + def create_local_gateway_route_table_vpc_association( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + vpc_id: VpcId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateLocalGatewayRouteTableVpcAssociationResult: + raise NotImplementedError + + @handler("CreateLocalGatewayVirtualInterface") + def create_local_gateway_virtual_interface( + self, + context: RequestContext, + local_gateway_virtual_interface_group_id: LocalGatewayVirtualInterfaceGroupId, + outpost_lag_id: OutpostLagId, + vlan: Integer, + local_address: String, + peer_address: String, + peer_bgp_asn: Integer | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + peer_bgp_asn_extended: Long | None = None, + **kwargs, + ) -> CreateLocalGatewayVirtualInterfaceResult: + raise NotImplementedError + + @handler("CreateLocalGatewayVirtualInterfaceGroup") + def create_local_gateway_virtual_interface_group( + self, + context: RequestContext, + local_gateway_id: LocalGatewayId, + local_bgp_asn: Integer | None = None, + local_bgp_asn_extended: Long | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateLocalGatewayVirtualInterfaceGroupResult: + raise NotImplementedError + + @handler("CreateMacSystemIntegrityProtectionModificationTask") + def create_mac_system_integrity_protection_modification_task( + self, + context: RequestContext, + instance_id: InstanceId, + mac_system_integrity_protection_status: MacSystemIntegrityProtectionSettingStatus, + client_token: String | None = None, + dry_run: Boolean | None = None, + mac_credentials: SensitiveMacCredentials | None = None, + mac_system_integrity_protection_configuration: MacSystemIntegrityProtectionConfigurationRequest + | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateMacSystemIntegrityProtectionModificationTaskResult: + raise NotImplementedError + + @handler("CreateManagedPrefixList") + def create_managed_prefix_list( + self, + context: RequestContext, + prefix_list_name: String, + max_entries: Integer, + address_family: String, + dry_run: Boolean | None = None, + entries: AddPrefixListEntries | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateManagedPrefixListResult: + raise NotImplementedError + + @handler("CreateNatGateway") + def create_nat_gateway( + self, + context: RequestContext, + subnet_id: SubnetId, + allocation_id: AllocationId | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + connectivity_type: ConnectivityType | None = None, + private_ip_address: String | None = None, + secondary_allocation_ids: AllocationIdList | None = None, + secondary_private_ip_addresses: IpList | None = None, + secondary_private_ip_address_count: PrivateIpAddressCount | None = None, + **kwargs, + ) -> CreateNatGatewayResult: + raise NotImplementedError + + @handler("CreateNetworkAcl") + def create_network_acl( + self, + context: RequestContext, + vpc_id: VpcId, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateNetworkAclResult: + raise NotImplementedError + + @handler("CreateNetworkAclEntry") + def create_network_acl_entry( + self, + context: RequestContext, + network_acl_id: NetworkAclId, + rule_number: Integer, + protocol: String, + rule_action: RuleAction, + egress: Boolean, + dry_run: Boolean | None = None, + cidr_block: String | None = None, + ipv6_cidr_block: String | None = None, + icmp_type_code: IcmpTypeCode | None = None, + port_range: PortRange | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateNetworkInsightsAccessScope") + def create_network_insights_access_scope( + self, + context: RequestContext, + client_token: String, + match_paths: AccessScopePathListRequest | None = None, + exclude_paths: AccessScopePathListRequest | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateNetworkInsightsAccessScopeResult: + raise NotImplementedError + + @handler("CreateNetworkInsightsPath") + def create_network_insights_path( + self, + context: RequestContext, + source: NetworkInsightsResourceId, + protocol: Protocol, + client_token: String, + source_ip: IpAddress | None = None, + destination_ip: IpAddress | None = None, + destination: NetworkInsightsResourceId | None = None, + destination_port: Port | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + filter_at_source: PathRequestFilter | None = None, + filter_at_destination: PathRequestFilter | None = None, + **kwargs, + ) -> CreateNetworkInsightsPathResult: + raise NotImplementedError + + @handler("CreateNetworkInterface") + def create_network_interface( + self, + context: RequestContext, + subnet_id: SubnetId, + ipv4_prefixes: Ipv4PrefixList | None = None, + ipv4_prefix_count: Integer | None = None, + ipv6_prefixes: Ipv6PrefixList | None = None, + ipv6_prefix_count: Integer | None = None, + interface_type: NetworkInterfaceCreationType | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + enable_primary_ipv6: Boolean | None = None, + connection_tracking_specification: ConnectionTrackingSpecificationRequest | None = None, + operator: OperatorRequest | None = None, + description: String | None = None, + private_ip_address: String | None = None, + groups: SecurityGroupIdStringList | None = None, + private_ip_addresses: PrivateIpAddressSpecificationList | None = None, + secondary_private_ip_address_count: Integer | None = None, + ipv6_addresses: InstanceIpv6AddressList | None = None, + ipv6_address_count: Integer | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateNetworkInterfaceResult: + raise NotImplementedError + + @handler("CreateNetworkInterfacePermission") + def create_network_interface_permission( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + permission: InterfacePermissionType, + aws_account_id: String | None = None, + aws_service: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateNetworkInterfacePermissionResult: + raise NotImplementedError + + @handler("CreatePlacementGroup") + def create_placement_group( + self, + context: RequestContext, + partition_count: Integer | None = None, + tag_specifications: TagSpecificationList | None = None, + spread_level: SpreadLevel | None = None, + dry_run: Boolean | None = None, + group_name: String | None = None, + strategy: PlacementStrategy | None = None, + **kwargs, + ) -> CreatePlacementGroupResult: + raise NotImplementedError + + @handler("CreatePublicIpv4Pool") + def create_public_ipv4_pool( + self, + context: RequestContext, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + network_border_group: String | None = None, + **kwargs, + ) -> CreatePublicIpv4PoolResult: + raise NotImplementedError + + @handler("CreateReplaceRootVolumeTask") + def create_replace_root_volume_task( + self, + context: RequestContext, + instance_id: InstanceId, + snapshot_id: SnapshotId | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + image_id: ImageId | None = None, + delete_replaced_root_volume: Boolean | None = None, + volume_initialization_rate: Long | None = None, + **kwargs, + ) -> CreateReplaceRootVolumeTaskResult: + raise NotImplementedError + + @handler("CreateReservedInstancesListing") + def create_reserved_instances_listing( + self, + context: RequestContext, + reserved_instances_id: ReservationId, + instance_count: Integer, + price_schedules: PriceScheduleSpecificationList, + client_token: String, + **kwargs, + ) -> CreateReservedInstancesListingResult: + raise NotImplementedError + + @handler("CreateRestoreImageTask") + def create_restore_image_task( + self, + context: RequestContext, + bucket: String, + object_key: String, + name: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateRestoreImageTaskResult: + raise NotImplementedError + + @handler("CreateRoute") + def create_route( + self, + context: RequestContext, + route_table_id: RouteTableId, + destination_prefix_list_id: PrefixListResourceId | None = None, + vpc_endpoint_id: VpcEndpointId | None = None, + transit_gateway_id: TransitGatewayId | None = None, + local_gateway_id: LocalGatewayId | None = None, + carrier_gateway_id: CarrierGatewayId | None = None, + core_network_arn: CoreNetworkArn | None = None, + dry_run: Boolean | None = None, + destination_cidr_block: String | None = None, + gateway_id: RouteGatewayId | None = None, + destination_ipv6_cidr_block: String | None = None, + egress_only_internet_gateway_id: EgressOnlyInternetGatewayId | None = None, + instance_id: InstanceId | None = None, + network_interface_id: NetworkInterfaceId | None = None, + vpc_peering_connection_id: VpcPeeringConnectionId | None = None, + nat_gateway_id: NatGatewayId | None = None, + **kwargs, + ) -> CreateRouteResult: + raise NotImplementedError + + @handler("CreateRouteServer") + def create_route_server( + self, + context: RequestContext, + amazon_side_asn: Long, + client_token: String | None = None, + dry_run: Boolean | None = None, + persist_routes: RouteServerPersistRoutesAction | None = None, + persist_routes_duration: BoxedLong | None = None, + sns_notifications_enabled: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateRouteServerResult: + raise NotImplementedError + + @handler("CreateRouteServerEndpoint") + def create_route_server_endpoint( + self, + context: RequestContext, + route_server_id: RouteServerId, + subnet_id: SubnetId, + client_token: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateRouteServerEndpointResult: + raise NotImplementedError + + @handler("CreateRouteServerPeer") + def create_route_server_peer( + self, + context: RequestContext, + route_server_endpoint_id: RouteServerEndpointId, + peer_address: String, + bgp_options: RouteServerBgpOptionsRequest, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateRouteServerPeerResult: + raise NotImplementedError + + @handler("CreateRouteTable") + def create_route_table( + self, + context: RequestContext, + vpc_id: VpcId, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateRouteTableResult: + raise NotImplementedError + + @handler("CreateSecurityGroup") + def create_security_group( + self, + context: RequestContext, + description: String, + group_name: String, + vpc_id: VpcId | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateSecurityGroupResult: + raise NotImplementedError + + @handler("CreateSnapshot") + def create_snapshot( + self, + context: RequestContext, + volume_id: VolumeId, + description: String | None = None, + outpost_arn: String | None = None, + tag_specifications: TagSpecificationList | None = None, + location: SnapshotLocationEnum | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> Snapshot: + raise NotImplementedError + + @handler("CreateSnapshots") + def create_snapshots( + self, + context: RequestContext, + instance_specification: InstanceSpecification, + description: String | None = None, + outpost_arn: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + copy_tags_from_source: CopyTagsFromSource | None = None, + location: SnapshotLocationEnum | None = None, + **kwargs, + ) -> CreateSnapshotsResult: + raise NotImplementedError + + @handler("CreateSpotDatafeedSubscription") + def create_spot_datafeed_subscription( + self, + context: RequestContext, + bucket: String, + dry_run: Boolean | None = None, + prefix: String | None = None, + **kwargs, + ) -> CreateSpotDatafeedSubscriptionResult: + raise NotImplementedError + + @handler("CreateStoreImageTask") + def create_store_image_task( + self, + context: RequestContext, + image_id: ImageId, + bucket: String, + s3_object_tags: S3ObjectTagList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateStoreImageTaskResult: + raise NotImplementedError + + @handler("CreateSubnet") + def create_subnet( + self, + context: RequestContext, + vpc_id: VpcId, + tag_specifications: TagSpecificationList | None = None, + availability_zone: String | None = None, + availability_zone_id: String | None = None, + cidr_block: String | None = None, + ipv6_cidr_block: String | None = None, + outpost_arn: String | None = None, + ipv6_native: Boolean | None = None, + ipv4_ipam_pool_id: IpamPoolId | None = None, + ipv4_netmask_length: NetmaskLength | None = None, + ipv6_ipam_pool_id: IpamPoolId | None = None, + ipv6_netmask_length: NetmaskLength | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateSubnetResult: + raise NotImplementedError + + @handler("CreateSubnetCidrReservation") + def create_subnet_cidr_reservation( + self, + context: RequestContext, + subnet_id: SubnetId, + cidr: String, + reservation_type: SubnetCidrReservationType, + description: String | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateSubnetCidrReservationResult: + raise NotImplementedError + + @handler("CreateTags") + def create_tags( + self, + context: RequestContext, + resources: ResourceIdList, + tags: TagList, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateTrafficMirrorFilter") + def create_traffic_mirror_filter( + self, + context: RequestContext, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateTrafficMirrorFilterResult: + raise NotImplementedError + + @handler("CreateTrafficMirrorFilterRule") + def create_traffic_mirror_filter_rule( + self, + context: RequestContext, + traffic_mirror_filter_id: TrafficMirrorFilterId, + traffic_direction: TrafficDirection, + rule_number: Integer, + rule_action: TrafficMirrorRuleAction, + destination_cidr_block: String, + source_cidr_block: String, + destination_port_range: TrafficMirrorPortRangeRequest | None = None, + source_port_range: TrafficMirrorPortRangeRequest | None = None, + protocol: Integer | None = None, + description: String | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateTrafficMirrorFilterRuleResult: + raise NotImplementedError + + @handler("CreateTrafficMirrorSession") + def create_traffic_mirror_session( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + traffic_mirror_target_id: TrafficMirrorTargetId, + traffic_mirror_filter_id: TrafficMirrorFilterId, + session_number: Integer, + packet_length: Integer | None = None, + virtual_network_id: Integer | None = None, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateTrafficMirrorSessionResult: + raise NotImplementedError + + @handler("CreateTrafficMirrorTarget") + def create_traffic_mirror_target( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId | None = None, + network_load_balancer_arn: String | None = None, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + gateway_load_balancer_endpoint_id: VpcEndpointId | None = None, + **kwargs, + ) -> CreateTrafficMirrorTargetResult: + raise NotImplementedError + + @handler("CreateTransitGateway") + def create_transit_gateway( + self, + context: RequestContext, + description: String | None = None, + options: TransitGatewayRequestOptions | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayResult: + raise NotImplementedError + + @handler("CreateTransitGatewayConnect") + def create_transit_gateway_connect( + self, + context: RequestContext, + transport_transit_gateway_attachment_id: TransitGatewayAttachmentId, + options: CreateTransitGatewayConnectRequestOptions, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayConnectResult: + raise NotImplementedError + + @handler("CreateTransitGatewayConnectPeer") + def create_transit_gateway_connect_peer( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + peer_address: String, + inside_cidr_blocks: InsideCidrBlocksStringList, + transit_gateway_address: String | None = None, + bgp_options: TransitGatewayConnectRequestBgpOptions | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayConnectPeerResult: + raise NotImplementedError + + @handler("CreateTransitGatewayMulticastDomain") + def create_transit_gateway_multicast_domain( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + options: CreateTransitGatewayMulticastDomainRequestOptions | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayMulticastDomainResult: + raise NotImplementedError + + @handler("CreateTransitGatewayPeeringAttachment") + def create_transit_gateway_peering_attachment( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + peer_transit_gateway_id: TransitAssociationGatewayId, + peer_account_id: String, + peer_region: String, + options: CreateTransitGatewayPeeringAttachmentRequestOptions | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayPeeringAttachmentResult: + raise NotImplementedError + + @handler("CreateTransitGatewayPolicyTable") + def create_transit_gateway_policy_table( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayPolicyTableResult: + raise NotImplementedError + + @handler("CreateTransitGatewayPrefixListReference") + def create_transit_gateway_prefix_list_reference( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + prefix_list_id: PrefixListResourceId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + blackhole: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayPrefixListReferenceResult: + raise NotImplementedError + + @handler("CreateTransitGatewayRoute") + def create_transit_gateway_route( + self, + context: RequestContext, + destination_cidr_block: String, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + blackhole: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayRouteResult: + raise NotImplementedError + + @handler("CreateTransitGatewayRouteTable") + def create_transit_gateway_route_table( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayRouteTableResult: + raise NotImplementedError + + @handler("CreateTransitGatewayRouteTableAnnouncement") + def create_transit_gateway_route_table_announcement( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + peering_attachment_id: TransitGatewayAttachmentId, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayRouteTableAnnouncementResult: + raise NotImplementedError + + @handler("CreateTransitGatewayVpcAttachment") + def create_transit_gateway_vpc_attachment( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + vpc_id: VpcId, + subnet_ids: TransitGatewaySubnetIdList, + options: CreateTransitGatewayVpcAttachmentRequestOptions | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> CreateTransitGatewayVpcAttachmentResult: + raise NotImplementedError + + @handler("CreateVerifiedAccessEndpoint") + def create_verified_access_endpoint( + self, + context: RequestContext, + verified_access_group_id: VerifiedAccessGroupId, + endpoint_type: VerifiedAccessEndpointType, + attachment_type: VerifiedAccessEndpointAttachmentType, + domain_certificate_arn: CertificateArn | None = None, + application_domain: String | None = None, + endpoint_domain_prefix: String | None = None, + security_group_ids: SecurityGroupIdList | None = None, + load_balancer_options: CreateVerifiedAccessEndpointLoadBalancerOptions | None = None, + network_interface_options: CreateVerifiedAccessEndpointEniOptions | None = None, + description: String | None = None, + policy_document: String | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + rds_options: CreateVerifiedAccessEndpointRdsOptions | None = None, + cidr_options: CreateVerifiedAccessEndpointCidrOptions | None = None, + **kwargs, + ) -> CreateVerifiedAccessEndpointResult: + raise NotImplementedError + + @handler("CreateVerifiedAccessGroup") + def create_verified_access_group( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + description: String | None = None, + policy_document: String | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + **kwargs, + ) -> CreateVerifiedAccessGroupResult: + raise NotImplementedError + + @handler("CreateVerifiedAccessInstance") + def create_verified_access_instance( + self, + context: RequestContext, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + fips_enabled: Boolean | None = None, + cidr_endpoints_custom_sub_domain: String | None = None, + **kwargs, + ) -> CreateVerifiedAccessInstanceResult: + raise NotImplementedError + + @handler("CreateVerifiedAccessTrustProvider") + def create_verified_access_trust_provider( + self, + context: RequestContext, + trust_provider_type: TrustProviderType, + policy_reference_name: String, + user_trust_provider_type: UserTrustProviderType | None = None, + device_trust_provider_type: DeviceTrustProviderType | None = None, + oidc_options: CreateVerifiedAccessTrustProviderOidcOptions | None = None, + device_options: CreateVerifiedAccessTrustProviderDeviceOptions | None = None, + description: String | None = None, + tag_specifications: TagSpecificationList | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + native_application_oidc_options: CreateVerifiedAccessNativeApplicationOidcOptions + | None = None, + **kwargs, + ) -> CreateVerifiedAccessTrustProviderResult: + raise NotImplementedError + + @handler("CreateVolume") + def create_volume( + self, + context: RequestContext, + availability_zone: AvailabilityZoneName, + encrypted: Boolean | None = None, + iops: Integer | None = None, + kms_key_id: KmsKeyId | None = None, + outpost_arn: String | None = None, + size: Integer | None = None, + snapshot_id: SnapshotId | None = None, + volume_type: VolumeType | None = None, + tag_specifications: TagSpecificationList | None = None, + multi_attach_enabled: Boolean | None = None, + throughput: Integer | None = None, + client_token: String | None = None, + volume_initialization_rate: Integer | None = None, + operator: OperatorRequest | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> Volume: + raise NotImplementedError + + @handler("CreateVpc") + def create_vpc( + self, + context: RequestContext, + cidr_block: String | None = None, + ipv6_pool: Ipv6PoolEc2Id | None = None, + ipv6_cidr_block: String | None = None, + ipv4_ipam_pool_id: IpamPoolId | None = None, + ipv4_netmask_length: NetmaskLength | None = None, + ipv6_ipam_pool_id: IpamPoolId | None = None, + ipv6_netmask_length: NetmaskLength | None = None, + ipv6_cidr_block_network_border_group: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + instance_tenancy: Tenancy | None = None, + amazon_provided_ipv6_cidr_block: Boolean | None = None, + **kwargs, + ) -> CreateVpcResult: + raise NotImplementedError + + @handler("CreateVpcBlockPublicAccessExclusion") + def create_vpc_block_public_access_exclusion( + self, + context: RequestContext, + internet_gateway_exclusion_mode: InternetGatewayExclusionMode, + dry_run: Boolean | None = None, + subnet_id: SubnetId | None = None, + vpc_id: VpcId | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateVpcBlockPublicAccessExclusionResult: + raise NotImplementedError + + @handler("CreateVpcEndpoint") + def create_vpc_endpoint( + self, + context: RequestContext, + vpc_id: VpcId, + dry_run: Boolean | None = None, + vpc_endpoint_type: VpcEndpointType | None = None, + service_name: String | None = None, + policy_document: String | None = None, + route_table_ids: VpcEndpointRouteTableIdList | None = None, + subnet_ids: VpcEndpointSubnetIdList | None = None, + security_group_ids: VpcEndpointSecurityGroupIdList | None = None, + ip_address_type: IpAddressType | None = None, + dns_options: DnsOptionsSpecification | None = None, + client_token: String | None = None, + private_dns_enabled: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + subnet_configurations: SubnetConfigurationsList | None = None, + service_network_arn: ServiceNetworkArn | None = None, + resource_configuration_arn: ResourceConfigurationArn | None = None, + service_region: String | None = None, + **kwargs, + ) -> CreateVpcEndpointResult: + raise NotImplementedError + + @handler("CreateVpcEndpointConnectionNotification") + def create_vpc_endpoint_connection_notification( + self, + context: RequestContext, + connection_notification_arn: String, + connection_events: ValueStringList, + dry_run: Boolean | None = None, + service_id: VpcEndpointServiceId | None = None, + vpc_endpoint_id: VpcEndpointId | None = None, + client_token: String | None = None, + **kwargs, + ) -> CreateVpcEndpointConnectionNotificationResult: + raise NotImplementedError + + @handler("CreateVpcEndpointServiceConfiguration") + def create_vpc_endpoint_service_configuration( + self, + context: RequestContext, + dry_run: Boolean | None = None, + acceptance_required: Boolean | None = None, + private_dns_name: String | None = None, + network_load_balancer_arns: ValueStringList | None = None, + gateway_load_balancer_arns: ValueStringList | None = None, + supported_ip_address_types: ValueStringList | None = None, + supported_regions: ValueStringList | None = None, + client_token: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> CreateVpcEndpointServiceConfigurationResult: + raise NotImplementedError + + @handler("CreateVpcPeeringConnection") + def create_vpc_peering_connection( + self, + context: RequestContext, + vpc_id: VpcId, + peer_region: String | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + peer_vpc_id: String | None = None, + peer_owner_id: String | None = None, + **kwargs, + ) -> CreateVpcPeeringConnectionResult: + raise NotImplementedError + + @handler("CreateVpnConnection", expand=False) + def create_vpn_connection( + self, context: RequestContext, request: CreateVpnConnectionRequest, **kwargs + ) -> CreateVpnConnectionResult: + raise NotImplementedError + + @handler("CreateVpnConnectionRoute") + def create_vpn_connection_route( + self, + context: RequestContext, + destination_cidr_block: String, + vpn_connection_id: VpnConnectionId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateVpnGateway", expand=False) + def create_vpn_gateway( + self, context: RequestContext, request: CreateVpnGatewayRequest, **kwargs + ) -> CreateVpnGatewayResult: + raise NotImplementedError + + @handler("DeleteCarrierGateway") + def delete_carrier_gateway( + self, + context: RequestContext, + carrier_gateway_id: CarrierGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteCarrierGatewayResult: + raise NotImplementedError + + @handler("DeleteClientVpnEndpoint") + def delete_client_vpn_endpoint( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteClientVpnEndpointResult: + raise NotImplementedError + + @handler("DeleteClientVpnRoute") + def delete_client_vpn_route( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + destination_cidr_block: String, + target_vpc_subnet_id: SubnetId | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteClientVpnRouteResult: + raise NotImplementedError + + @handler("DeleteCoipCidr") + def delete_coip_cidr( + self, + context: RequestContext, + cidr: String, + coip_pool_id: Ipv4PoolCoipId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteCoipCidrResult: + raise NotImplementedError + + @handler("DeleteCoipPool") + def delete_coip_pool( + self, + context: RequestContext, + coip_pool_id: Ipv4PoolCoipId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteCoipPoolResult: + raise NotImplementedError + + @handler("DeleteCustomerGateway") + def delete_customer_gateway( + self, + context: RequestContext, + customer_gateway_id: CustomerGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteDhcpOptions") + def delete_dhcp_options( + self, + context: RequestContext, + dhcp_options_id: DhcpOptionsId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteEgressOnlyInternetGateway") + def delete_egress_only_internet_gateway( + self, + context: RequestContext, + egress_only_internet_gateway_id: EgressOnlyInternetGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteEgressOnlyInternetGatewayResult: + raise NotImplementedError + + @handler("DeleteFleets") + def delete_fleets( + self, + context: RequestContext, + fleet_ids: FleetIdSet, + terminate_instances: Boolean, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteFleetsResult: + raise NotImplementedError + + @handler("DeleteFlowLogs") + def delete_flow_logs( + self, + context: RequestContext, + flow_log_ids: FlowLogIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteFlowLogsResult: + raise NotImplementedError + + @handler("DeleteFpgaImage") + def delete_fpga_image( + self, + context: RequestContext, + fpga_image_id: FpgaImageId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteFpgaImageResult: + raise NotImplementedError + + @handler("DeleteInstanceConnectEndpoint") + def delete_instance_connect_endpoint( + self, + context: RequestContext, + instance_connect_endpoint_id: InstanceConnectEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteInstanceConnectEndpointResult: + raise NotImplementedError + + @handler("DeleteInstanceEventWindow") + def delete_instance_event_window( + self, + context: RequestContext, + instance_event_window_id: InstanceEventWindowId, + dry_run: Boolean | None = None, + force_delete: Boolean | None = None, + **kwargs, + ) -> DeleteInstanceEventWindowResult: + raise NotImplementedError + + @handler("DeleteInternetGateway") + def delete_internet_gateway( + self, + context: RequestContext, + internet_gateway_id: InternetGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteIpam") + def delete_ipam( + self, + context: RequestContext, + ipam_id: IpamId, + dry_run: Boolean | None = None, + cascade: Boolean | None = None, + **kwargs, + ) -> DeleteIpamResult: + raise NotImplementedError + + @handler("DeleteIpamExternalResourceVerificationToken") + def delete_ipam_external_resource_verification_token( + self, + context: RequestContext, + ipam_external_resource_verification_token_id: IpamExternalResourceVerificationTokenId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteIpamExternalResourceVerificationTokenResult: + raise NotImplementedError + + @handler("DeleteIpamPool") + def delete_ipam_pool( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + cascade: Boolean | None = None, + **kwargs, + ) -> DeleteIpamPoolResult: + raise NotImplementedError + + @handler("DeleteIpamResourceDiscovery") + def delete_ipam_resource_discovery( + self, + context: RequestContext, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteIpamResourceDiscoveryResult: + raise NotImplementedError + + @handler("DeleteIpamScope") + def delete_ipam_scope( + self, + context: RequestContext, + ipam_scope_id: IpamScopeId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteIpamScopeResult: + raise NotImplementedError + + @handler("DeleteKeyPair") + def delete_key_pair( + self, + context: RequestContext, + key_name: KeyPairNameWithResolver | None = None, + key_pair_id: KeyPairId | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteKeyPairResult: + raise NotImplementedError + + @handler("DeleteLaunchTemplate") + def delete_launch_template( + self, + context: RequestContext, + dry_run: Boolean | None = None, + launch_template_id: LaunchTemplateId | None = None, + launch_template_name: LaunchTemplateName | None = None, + **kwargs, + ) -> DeleteLaunchTemplateResult: + raise NotImplementedError + + @handler("DeleteLaunchTemplateVersions") + def delete_launch_template_versions( + self, + context: RequestContext, + versions: VersionStringList, + dry_run: Boolean | None = None, + launch_template_id: LaunchTemplateId | None = None, + launch_template_name: LaunchTemplateName | None = None, + **kwargs, + ) -> DeleteLaunchTemplateVersionsResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayRoute") + def delete_local_gateway_route( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + destination_cidr_block: String | None = None, + dry_run: Boolean | None = None, + destination_prefix_list_id: PrefixListResourceId | None = None, + **kwargs, + ) -> DeleteLocalGatewayRouteResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayRouteTable") + def delete_local_gateway_route_table( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteLocalGatewayRouteTableResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation") + def delete_local_gateway_route_table_virtual_interface_group_association( + self, + context: RequestContext, + local_gateway_route_table_virtual_interface_group_association_id: LocalGatewayRouteTableVirtualInterfaceGroupAssociationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayRouteTableVpcAssociation") + def delete_local_gateway_route_table_vpc_association( + self, + context: RequestContext, + local_gateway_route_table_vpc_association_id: LocalGatewayRouteTableVpcAssociationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteLocalGatewayRouteTableVpcAssociationResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayVirtualInterface") + def delete_local_gateway_virtual_interface( + self, + context: RequestContext, + local_gateway_virtual_interface_id: LocalGatewayVirtualInterfaceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteLocalGatewayVirtualInterfaceResult: + raise NotImplementedError + + @handler("DeleteLocalGatewayVirtualInterfaceGroup") + def delete_local_gateway_virtual_interface_group( + self, + context: RequestContext, + local_gateway_virtual_interface_group_id: LocalGatewayVirtualInterfaceGroupId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteLocalGatewayVirtualInterfaceGroupResult: + raise NotImplementedError + + @handler("DeleteManagedPrefixList") + def delete_managed_prefix_list( + self, + context: RequestContext, + prefix_list_id: PrefixListResourceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteManagedPrefixListResult: + raise NotImplementedError + + @handler("DeleteNatGateway") + def delete_nat_gateway( + self, + context: RequestContext, + nat_gateway_id: NatGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNatGatewayResult: + raise NotImplementedError + + @handler("DeleteNetworkAcl") + def delete_network_acl( + self, + context: RequestContext, + network_acl_id: NetworkAclId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteNetworkAclEntry") + def delete_network_acl_entry( + self, + context: RequestContext, + network_acl_id: NetworkAclId, + rule_number: Integer, + egress: Boolean, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteNetworkInsightsAccessScope") + def delete_network_insights_access_scope( + self, + context: RequestContext, + network_insights_access_scope_id: NetworkInsightsAccessScopeId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNetworkInsightsAccessScopeResult: + raise NotImplementedError + + @handler("DeleteNetworkInsightsAccessScopeAnalysis") + def delete_network_insights_access_scope_analysis( + self, + context: RequestContext, + network_insights_access_scope_analysis_id: NetworkInsightsAccessScopeAnalysisId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNetworkInsightsAccessScopeAnalysisResult: + raise NotImplementedError + + @handler("DeleteNetworkInsightsAnalysis") + def delete_network_insights_analysis( + self, + context: RequestContext, + network_insights_analysis_id: NetworkInsightsAnalysisId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNetworkInsightsAnalysisResult: + raise NotImplementedError + + @handler("DeleteNetworkInsightsPath") + def delete_network_insights_path( + self, + context: RequestContext, + network_insights_path_id: NetworkInsightsPathId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNetworkInsightsPathResult: + raise NotImplementedError + + @handler("DeleteNetworkInterface") + def delete_network_interface( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteNetworkInterfacePermission") + def delete_network_interface_permission( + self, + context: RequestContext, + network_interface_permission_id: NetworkInterfacePermissionId, + force: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteNetworkInterfacePermissionResult: + raise NotImplementedError + + @handler("DeletePlacementGroup") + def delete_placement_group( + self, + context: RequestContext, + group_name: PlacementGroupName, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeletePublicIpv4Pool") + def delete_public_ipv4_pool( + self, + context: RequestContext, + pool_id: Ipv4PoolEc2Id, + dry_run: Boolean | None = None, + network_border_group: String | None = None, + **kwargs, + ) -> DeletePublicIpv4PoolResult: + raise NotImplementedError + + @handler("DeleteQueuedReservedInstances") + def delete_queued_reserved_instances( + self, + context: RequestContext, + reserved_instances_ids: DeleteQueuedReservedInstancesIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteQueuedReservedInstancesResult: + raise NotImplementedError + + @handler("DeleteRoute") + def delete_route( + self, + context: RequestContext, + route_table_id: RouteTableId, + destination_prefix_list_id: PrefixListResourceId | None = None, + dry_run: Boolean | None = None, + destination_cidr_block: String | None = None, + destination_ipv6_cidr_block: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteRouteServer") + def delete_route_server( + self, + context: RequestContext, + route_server_id: RouteServerId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteRouteServerResult: + raise NotImplementedError + + @handler("DeleteRouteServerEndpoint") + def delete_route_server_endpoint( + self, + context: RequestContext, + route_server_endpoint_id: RouteServerEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteRouteServerEndpointResult: + raise NotImplementedError + + @handler("DeleteRouteServerPeer") + def delete_route_server_peer( + self, + context: RequestContext, + route_server_peer_id: RouteServerPeerId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteRouteServerPeerResult: + raise NotImplementedError + + @handler("DeleteRouteTable") + def delete_route_table( + self, + context: RequestContext, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteSecurityGroup") + def delete_security_group( + self, + context: RequestContext, + group_id: SecurityGroupId | None = None, + group_name: SecurityGroupName | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteSecurityGroupResult: + raise NotImplementedError + + @handler("DeleteSnapshot") + def delete_snapshot( + self, + context: RequestContext, + snapshot_id: SnapshotId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteSpotDatafeedSubscription") + def delete_spot_datafeed_subscription( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSubnet") + def delete_subnet( + self, context: RequestContext, subnet_id: SubnetId, dry_run: Boolean | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSubnetCidrReservation") + def delete_subnet_cidr_reservation( + self, + context: RequestContext, + subnet_cidr_reservation_id: SubnetCidrReservationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteSubnetCidrReservationResult: + raise NotImplementedError + + @handler("DeleteTags") + def delete_tags( + self, + context: RequestContext, + resources: ResourceIdList, + dry_run: Boolean | None = None, + tags: TagList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteTrafficMirrorFilter") + def delete_traffic_mirror_filter( + self, + context: RequestContext, + traffic_mirror_filter_id: TrafficMirrorFilterId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTrafficMirrorFilterResult: + raise NotImplementedError + + @handler("DeleteTrafficMirrorFilterRule") + def delete_traffic_mirror_filter_rule( + self, + context: RequestContext, + traffic_mirror_filter_rule_id: TrafficMirrorFilterRuleIdWithResolver, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTrafficMirrorFilterRuleResult: + raise NotImplementedError + + @handler("DeleteTrafficMirrorSession") + def delete_traffic_mirror_session( + self, + context: RequestContext, + traffic_mirror_session_id: TrafficMirrorSessionId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTrafficMirrorSessionResult: + raise NotImplementedError + + @handler("DeleteTrafficMirrorTarget") + def delete_traffic_mirror_target( + self, + context: RequestContext, + traffic_mirror_target_id: TrafficMirrorTargetId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTrafficMirrorTargetResult: + raise NotImplementedError + + @handler("DeleteTransitGateway") + def delete_transit_gateway( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayConnect") + def delete_transit_gateway_connect( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayConnectResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayConnectPeer") + def delete_transit_gateway_connect_peer( + self, + context: RequestContext, + transit_gateway_connect_peer_id: TransitGatewayConnectPeerId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayConnectPeerResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayMulticastDomain") + def delete_transit_gateway_multicast_domain( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayMulticastDomainResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayPeeringAttachment") + def delete_transit_gateway_peering_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayPeeringAttachmentResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayPolicyTable") + def delete_transit_gateway_policy_table( + self, + context: RequestContext, + transit_gateway_policy_table_id: TransitGatewayPolicyTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayPolicyTableResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayPrefixListReference") + def delete_transit_gateway_prefix_list_reference( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + prefix_list_id: PrefixListResourceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayPrefixListReferenceResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayRoute") + def delete_transit_gateway_route( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + destination_cidr_block: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayRouteResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayRouteTable") + def delete_transit_gateway_route_table( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayRouteTableResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayRouteTableAnnouncement") + def delete_transit_gateway_route_table_announcement( + self, + context: RequestContext, + transit_gateway_route_table_announcement_id: TransitGatewayRouteTableAnnouncementId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayRouteTableAnnouncementResult: + raise NotImplementedError + + @handler("DeleteTransitGatewayVpcAttachment") + def delete_transit_gateway_vpc_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteTransitGatewayVpcAttachmentResult: + raise NotImplementedError + + @handler("DeleteVerifiedAccessEndpoint") + def delete_verified_access_endpoint( + self, + context: RequestContext, + verified_access_endpoint_id: VerifiedAccessEndpointId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVerifiedAccessEndpointResult: + raise NotImplementedError + + @handler("DeleteVerifiedAccessGroup") + def delete_verified_access_group( + self, + context: RequestContext, + verified_access_group_id: VerifiedAccessGroupId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVerifiedAccessGroupResult: + raise NotImplementedError + + @handler("DeleteVerifiedAccessInstance") + def delete_verified_access_instance( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> DeleteVerifiedAccessInstanceResult: + raise NotImplementedError + + @handler("DeleteVerifiedAccessTrustProvider") + def delete_verified_access_trust_provider( + self, + context: RequestContext, + verified_access_trust_provider_id: VerifiedAccessTrustProviderId, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> DeleteVerifiedAccessTrustProviderResult: + raise NotImplementedError + + @handler("DeleteVolume") + def delete_volume( + self, context: RequestContext, volume_id: VolumeId, dry_run: Boolean | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteVpc") + def delete_vpc( + self, context: RequestContext, vpc_id: VpcId, dry_run: Boolean | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteVpcBlockPublicAccessExclusion") + def delete_vpc_block_public_access_exclusion( + self, + context: RequestContext, + exclusion_id: VpcBlockPublicAccessExclusionId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVpcBlockPublicAccessExclusionResult: + raise NotImplementedError + + @handler("DeleteVpcEndpointConnectionNotifications") + def delete_vpc_endpoint_connection_notifications( + self, + context: RequestContext, + connection_notification_ids: ConnectionNotificationIdsList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVpcEndpointConnectionNotificationsResult: + raise NotImplementedError + + @handler("DeleteVpcEndpointServiceConfigurations") + def delete_vpc_endpoint_service_configurations( + self, + context: RequestContext, + service_ids: VpcEndpointServiceIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVpcEndpointServiceConfigurationsResult: + raise NotImplementedError + + @handler("DeleteVpcEndpoints") + def delete_vpc_endpoints( + self, + context: RequestContext, + vpc_endpoint_ids: VpcEndpointIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVpcEndpointsResult: + raise NotImplementedError + + @handler("DeleteVpcPeeringConnection") + def delete_vpc_peering_connection( + self, + context: RequestContext, + vpc_peering_connection_id: VpcPeeringConnectionId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeleteVpcPeeringConnectionResult: + raise NotImplementedError + + @handler("DeleteVpnConnection") + def delete_vpn_connection( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteVpnConnectionRoute") + def delete_vpn_connection_route( + self, + context: RequestContext, + destination_cidr_block: String, + vpn_connection_id: VpnConnectionId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteVpnGateway") + def delete_vpn_gateway( + self, + context: RequestContext, + vpn_gateway_id: VpnGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeprovisionByoipCidr") + def deprovision_byoip_cidr( + self, context: RequestContext, cidr: String, dry_run: Boolean | None = None, **kwargs + ) -> DeprovisionByoipCidrResult: + raise NotImplementedError + + @handler("DeprovisionIpamByoasn") + def deprovision_ipam_byoasn( + self, + context: RequestContext, + ipam_id: IpamId, + asn: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeprovisionIpamByoasnResult: + raise NotImplementedError + + @handler("DeprovisionIpamPoolCidr") + def deprovision_ipam_pool_cidr( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + cidr: String | None = None, + **kwargs, + ) -> DeprovisionIpamPoolCidrResult: + raise NotImplementedError + + @handler("DeprovisionPublicIpv4PoolCidr") + def deprovision_public_ipv4_pool_cidr( + self, + context: RequestContext, + pool_id: Ipv4PoolEc2Id, + cidr: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeprovisionPublicIpv4PoolCidrResult: + raise NotImplementedError + + @handler("DeregisterImage") + def deregister_image( + self, + context: RequestContext, + image_id: ImageId, + delete_associated_snapshots: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeregisterImageResult: + raise NotImplementedError + + @handler("DeregisterInstanceEventNotificationAttributes") + def deregister_instance_event_notification_attributes( + self, + context: RequestContext, + instance_tag_attribute: DeregisterInstanceTagAttributeRequest, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeregisterInstanceEventNotificationAttributesResult: + raise NotImplementedError + + @handler("DeregisterTransitGatewayMulticastGroupMembers") + def deregister_transit_gateway_multicast_group_members( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId | None = None, + group_ip_address: String | None = None, + network_interface_ids: TransitGatewayNetworkInterfaceIdList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeregisterTransitGatewayMulticastGroupMembersResult: + raise NotImplementedError + + @handler("DeregisterTransitGatewayMulticastGroupSources") + def deregister_transit_gateway_multicast_group_sources( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId | None = None, + group_ip_address: String | None = None, + network_interface_ids: TransitGatewayNetworkInterfaceIdList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DeregisterTransitGatewayMulticastGroupSourcesResult: + raise NotImplementedError + + @handler("DescribeAccountAttributes") + def describe_account_attributes( + self, + context: RequestContext, + dry_run: Boolean | None = None, + attribute_names: AccountAttributeNameStringList | None = None, + **kwargs, + ) -> DescribeAccountAttributesResult: + raise NotImplementedError + + @handler("DescribeAddressTransfers") + def describe_address_transfers( + self, + context: RequestContext, + allocation_ids: AllocationIdList | None = None, + next_token: String | None = None, + max_results: DescribeAddressTransfersMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeAddressTransfersResult: + raise NotImplementedError + + @handler("DescribeAddresses") + def describe_addresses( + self, + context: RequestContext, + public_ips: PublicIpStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + allocation_ids: AllocationIdList | None = None, + **kwargs, + ) -> DescribeAddressesResult: + raise NotImplementedError + + @handler("DescribeAddressesAttribute") + def describe_addresses_attribute( + self, + context: RequestContext, + allocation_ids: AllocationIds | None = None, + attribute: AddressAttributeName | None = None, + next_token: NextToken | None = None, + max_results: AddressMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeAddressesAttributeResult: + raise NotImplementedError + + @handler("DescribeAggregateIdFormat") + def describe_aggregate_id_format( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DescribeAggregateIdFormatResult: + raise NotImplementedError + + @handler("DescribeAvailabilityZones") + def describe_availability_zones( + self, + context: RequestContext, + zone_names: ZoneNameStringList | None = None, + zone_ids: ZoneIdStringList | None = None, + all_availability_zones: Boolean | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeAvailabilityZonesResult: + raise NotImplementedError + + @handler("DescribeAwsNetworkPerformanceMetricSubscriptions") + def describe_aws_network_performance_metric_subscriptions( + self, + context: RequestContext, + max_results: MaxResultsParam | None = None, + next_token: String | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeAwsNetworkPerformanceMetricSubscriptionsResult: + raise NotImplementedError + + @handler("DescribeBundleTasks") + def describe_bundle_tasks( + self, + context: RequestContext, + bundle_ids: BundleIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeBundleTasksResult: + raise NotImplementedError + + @handler("DescribeByoipCidrs") + def describe_byoip_cidrs( + self, + context: RequestContext, + max_results: DescribeByoipCidrsMaxResults, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeByoipCidrsResult: + raise NotImplementedError + + @handler("DescribeCapacityBlockExtensionHistory") + def describe_capacity_block_extension_history( + self, + context: RequestContext, + capacity_reservation_ids: CapacityReservationIdSet | None = None, + next_token: String | None = None, + max_results: DescribeFutureCapacityMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCapacityBlockExtensionHistoryResult: + raise NotImplementedError + + @handler("DescribeCapacityBlockExtensionOfferings") + def describe_capacity_block_extension_offerings( + self, + context: RequestContext, + capacity_block_extension_duration_hours: Integer, + capacity_reservation_id: CapacityReservationId, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DescribeCapacityBlockExtensionOfferingsMaxResults | None = None, + **kwargs, + ) -> DescribeCapacityBlockExtensionOfferingsResult: + raise NotImplementedError + + @handler("DescribeCapacityBlockOfferings") + def describe_capacity_block_offerings( + self, + context: RequestContext, + capacity_duration_hours: Integer, + dry_run: Boolean | None = None, + instance_type: String | None = None, + instance_count: Integer | None = None, + start_date_range: MillisecondDateTime | None = None, + end_date_range: MillisecondDateTime | None = None, + next_token: String | None = None, + max_results: DescribeCapacityBlockOfferingsMaxResults | None = None, + **kwargs, + ) -> DescribeCapacityBlockOfferingsResult: + raise NotImplementedError + + @handler("DescribeCapacityReservationBillingRequests") + def describe_capacity_reservation_billing_requests( + self, + context: RequestContext, + role: CallerRole, + capacity_reservation_ids: CapacityReservationIdSet | None = None, + next_token: String | None = None, + max_results: DescribeCapacityReservationBillingRequestsRequestMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCapacityReservationBillingRequestsResult: + raise NotImplementedError + + @handler("DescribeCapacityReservationFleets") + def describe_capacity_reservation_fleets( + self, + context: RequestContext, + capacity_reservation_fleet_ids: CapacityReservationFleetIdSet | None = None, + next_token: String | None = None, + max_results: DescribeCapacityReservationFleetsMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCapacityReservationFleetsResult: + raise NotImplementedError + + @handler("DescribeCapacityReservations") + def describe_capacity_reservations( + self, + context: RequestContext, + capacity_reservation_ids: CapacityReservationIdSet | None = None, + next_token: String | None = None, + max_results: DescribeCapacityReservationsMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCapacityReservationsResult: + raise NotImplementedError + + @handler("DescribeCarrierGateways") + def describe_carrier_gateways( + self, + context: RequestContext, + carrier_gateway_ids: CarrierGatewayIdSet | None = None, + filters: FilterList | None = None, + max_results: CarrierGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCarrierGatewaysResult: + raise NotImplementedError + + @handler("DescribeClassicLinkInstances") + def describe_classic_link_instances( + self, + context: RequestContext, + dry_run: Boolean | None = None, + instance_ids: InstanceIdStringList | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeClassicLinkInstancesMaxResults | None = None, + **kwargs, + ) -> DescribeClassicLinkInstancesResult: + raise NotImplementedError + + @handler("DescribeClientVpnAuthorizationRules") + def describe_client_vpn_authorization_rules( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + max_results: DescribeClientVpnAuthorizationRulesMaxResults | None = None, + **kwargs, + ) -> DescribeClientVpnAuthorizationRulesResult: + raise NotImplementedError + + @handler("DescribeClientVpnConnections") + def describe_client_vpn_connections( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: DescribeClientVpnConnectionsMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeClientVpnConnectionsResult: + raise NotImplementedError + + @handler("DescribeClientVpnEndpoints") + def describe_client_vpn_endpoints( + self, + context: RequestContext, + client_vpn_endpoint_ids: ClientVpnEndpointIdList | None = None, + max_results: DescribeClientVpnEndpointMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeClientVpnEndpointsResult: + raise NotImplementedError + + @handler("DescribeClientVpnRoutes") + def describe_client_vpn_routes( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + filters: FilterList | None = None, + max_results: DescribeClientVpnRoutesMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeClientVpnRoutesResult: + raise NotImplementedError + + @handler("DescribeClientVpnTargetNetworks") + def describe_client_vpn_target_networks( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + association_ids: ValueStringList | None = None, + max_results: DescribeClientVpnTargetNetworksMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeClientVpnTargetNetworksResult: + raise NotImplementedError + + @handler("DescribeCoipPools") + def describe_coip_pools( + self, + context: RequestContext, + pool_ids: CoipPoolIdSet | None = None, + filters: FilterList | None = None, + max_results: CoipPoolMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCoipPoolsResult: + raise NotImplementedError + + @handler("DescribeConversionTasks") + def describe_conversion_tasks( + self, + context: RequestContext, + dry_run: Boolean | None = None, + conversion_task_ids: ConversionIdStringList | None = None, + **kwargs, + ) -> DescribeConversionTasksResult: + raise NotImplementedError + + @handler("DescribeCustomerGateways") + def describe_customer_gateways( + self, + context: RequestContext, + customer_gateway_ids: CustomerGatewayIdStringList | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeCustomerGatewaysResult: + raise NotImplementedError + + @handler("DescribeDeclarativePoliciesReports") + def describe_declarative_policies_reports( + self, + context: RequestContext, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DeclarativePoliciesMaxResults | None = None, + report_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeDeclarativePoliciesReportsResult: + raise NotImplementedError + + @handler("DescribeDhcpOptions") + def describe_dhcp_options( + self, + context: RequestContext, + dhcp_options_ids: DhcpOptionsIdStringList | None = None, + next_token: String | None = None, + max_results: DescribeDhcpOptionsMaxResults | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeDhcpOptionsResult: + raise NotImplementedError + + @handler("DescribeEgressOnlyInternetGateways") + def describe_egress_only_internet_gateways( + self, + context: RequestContext, + dry_run: Boolean | None = None, + egress_only_internet_gateway_ids: EgressOnlyInternetGatewayIdList | None = None, + max_results: DescribeEgressOnlyInternetGatewaysMaxResults | None = None, + next_token: String | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeEgressOnlyInternetGatewaysResult: + raise NotImplementedError + + @handler("DescribeElasticGpus") + def describe_elastic_gpus( + self, + context: RequestContext, + elastic_gpu_ids: ElasticGpuIdSet | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: DescribeElasticGpusMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeElasticGpusResult: + raise NotImplementedError + + @handler("DescribeExportImageTasks") + def describe_export_image_tasks( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + export_image_task_ids: ExportImageTaskIdList | None = None, + max_results: DescribeExportImageTasksMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeExportImageTasksResult: + raise NotImplementedError + + @handler("DescribeExportTasks") + def describe_export_tasks( + self, + context: RequestContext, + filters: FilterList | None = None, + export_task_ids: ExportTaskIdStringList | None = None, + **kwargs, + ) -> DescribeExportTasksResult: + raise NotImplementedError + + @handler("DescribeFastLaunchImages") + def describe_fast_launch_images( + self, + context: RequestContext, + image_ids: FastLaunchImageIdList | None = None, + filters: FilterList | None = None, + max_results: DescribeFastLaunchImagesRequestMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeFastLaunchImagesResult: + raise NotImplementedError + + @handler("DescribeFastSnapshotRestores") + def describe_fast_snapshot_restores( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: DescribeFastSnapshotRestoresMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeFastSnapshotRestoresResult: + raise NotImplementedError + + @handler("DescribeFleetHistory") + def describe_fleet_history( + self, + context: RequestContext, + fleet_id: FleetId, + start_time: DateTime, + dry_run: Boolean | None = None, + event_type: FleetEventType | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeFleetHistoryResult: + raise NotImplementedError + + @handler("DescribeFleetInstances") + def describe_fleet_instances( + self, + context: RequestContext, + fleet_id: FleetId, + dry_run: Boolean | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeFleetInstancesResult: + raise NotImplementedError + + @handler("DescribeFleets") + def describe_fleets( + self, + context: RequestContext, + dry_run: Boolean | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + fleet_ids: FleetIdSet | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeFleetsResult: + raise NotImplementedError + + @handler("DescribeFlowLogs") + def describe_flow_logs( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filter: FilterList | None = None, + flow_log_ids: FlowLogIdList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeFlowLogsResult: + raise NotImplementedError + + @handler("DescribeFpgaImageAttribute") + def describe_fpga_image_attribute( + self, + context: RequestContext, + fpga_image_id: FpgaImageId, + attribute: FpgaImageAttributeName, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeFpgaImageAttributeResult: + raise NotImplementedError + + @handler("DescribeFpgaImages") + def describe_fpga_images( + self, + context: RequestContext, + dry_run: Boolean | None = None, + fpga_image_ids: FpgaImageIdList | None = None, + owners: OwnerStringList | None = None, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: DescribeFpgaImagesMaxResults | None = None, + **kwargs, + ) -> DescribeFpgaImagesResult: + raise NotImplementedError + + @handler("DescribeHostReservationOfferings") + def describe_host_reservation_offerings( + self, + context: RequestContext, + filter: FilterList | None = None, + max_duration: Integer | None = None, + max_results: DescribeHostReservationsMaxResults | None = None, + min_duration: Integer | None = None, + next_token: String | None = None, + offering_id: OfferingId | None = None, + **kwargs, + ) -> DescribeHostReservationOfferingsResult: + raise NotImplementedError + + @handler("DescribeHostReservations") + def describe_host_reservations( + self, + context: RequestContext, + filter: FilterList | None = None, + host_reservation_id_set: HostReservationIdSet | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeHostReservationsResult: + raise NotImplementedError + + @handler("DescribeHosts") + def describe_hosts( + self, + context: RequestContext, + host_ids: RequestHostIdList | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + filter: FilterList | None = None, + **kwargs, + ) -> DescribeHostsResult: + raise NotImplementedError + + @handler("DescribeIamInstanceProfileAssociations") + def describe_iam_instance_profile_associations( + self, + context: RequestContext, + association_ids: AssociationIdList | None = None, + filters: FilterList | None = None, + max_results: DescribeIamInstanceProfileAssociationsMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeIamInstanceProfileAssociationsResult: + raise NotImplementedError + + @handler("DescribeIdFormat") + def describe_id_format( + self, context: RequestContext, resource: String | None = None, **kwargs + ) -> DescribeIdFormatResult: + raise NotImplementedError + + @handler("DescribeIdentityIdFormat") + def describe_identity_id_format( + self, + context: RequestContext, + principal_arn: String, + resource: String | None = None, + **kwargs, + ) -> DescribeIdentityIdFormatResult: + raise NotImplementedError + + @handler("DescribeImageAttribute") + def describe_image_attribute( + self, + context: RequestContext, + attribute: ImageAttributeName, + image_id: ImageId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ImageAttribute: + raise NotImplementedError + + @handler("DescribeImages") + def describe_images( + self, + context: RequestContext, + executable_users: ExecutableByStringList | None = None, + image_ids: ImageIdStringList | None = None, + owners: OwnerStringList | None = None, + include_deprecated: Boolean | None = None, + include_disabled: Boolean | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeImagesResult: + raise NotImplementedError + + @handler("DescribeImportImageTasks") + def describe_import_image_tasks( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + import_task_ids: ImportTaskIdList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeImportImageTasksResult: + raise NotImplementedError + + @handler("DescribeImportSnapshotTasks") + def describe_import_snapshot_tasks( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + import_task_ids: ImportSnapshotTaskIdList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeImportSnapshotTasksResult: + raise NotImplementedError + + @handler("DescribeInstanceAttribute") + def describe_instance_attribute( + self, + context: RequestContext, + instance_id: InstanceId, + attribute: InstanceAttributeName, + dry_run: Boolean | None = None, + **kwargs, + ) -> InstanceAttribute: + raise NotImplementedError + + @handler("DescribeInstanceConnectEndpoints") + def describe_instance_connect_endpoints( + self, + context: RequestContext, + dry_run: Boolean | None = None, + max_results: InstanceConnectEndpointMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + instance_connect_endpoint_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeInstanceConnectEndpointsResult: + raise NotImplementedError + + @handler("DescribeInstanceCreditSpecifications") + def describe_instance_credit_specifications( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + instance_ids: InstanceIdStringList | None = None, + max_results: DescribeInstanceCreditSpecificationsMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeInstanceCreditSpecificationsResult: + raise NotImplementedError + + @handler("DescribeInstanceEventNotificationAttributes") + def describe_instance_event_notification_attributes( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DescribeInstanceEventNotificationAttributesResult: + raise NotImplementedError + + @handler("DescribeInstanceEventWindows") + def describe_instance_event_windows( + self, + context: RequestContext, + dry_run: Boolean | None = None, + instance_event_window_ids: InstanceEventWindowIdSet | None = None, + filters: FilterList | None = None, + max_results: ResultRange | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeInstanceEventWindowsResult: + raise NotImplementedError + + @handler("DescribeInstanceImageMetadata") + def describe_instance_image_metadata( + self, + context: RequestContext, + filters: FilterList | None = None, + instance_ids: InstanceIdStringList | None = None, + max_results: DescribeInstanceImageMetadataMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeInstanceImageMetadataResult: + raise NotImplementedError + + @handler("DescribeInstanceStatus") + def describe_instance_status( + self, + context: RequestContext, + instance_ids: InstanceIdStringList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + include_all_instances: Boolean | None = None, + **kwargs, + ) -> DescribeInstanceStatusResult: + raise NotImplementedError + + @handler("DescribeInstanceTopology") + def describe_instance_topology( + self, + context: RequestContext, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DescribeInstanceTopologyMaxResults | None = None, + instance_ids: DescribeInstanceTopologyInstanceIdSet | None = None, + group_names: DescribeInstanceTopologyGroupNameSet | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeInstanceTopologyResult: + raise NotImplementedError + + @handler("DescribeInstanceTypeOfferings") + def describe_instance_type_offerings( + self, + context: RequestContext, + dry_run: Boolean | None = None, + location_type: LocationType | None = None, + filters: FilterList | None = None, + max_results: DITOMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInstanceTypeOfferingsResult: + raise NotImplementedError + + @handler("DescribeInstanceTypes") + def describe_instance_types( + self, + context: RequestContext, + dry_run: Boolean | None = None, + instance_types: RequestInstanceTypeList | None = None, + filters: FilterList | None = None, + max_results: DITMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInstanceTypesResult: + raise NotImplementedError + + @handler("DescribeInstances") + def describe_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + **kwargs, + ) -> DescribeInstancesResult: + raise NotImplementedError + + @handler("DescribeInternetGateways") + def describe_internet_gateways( + self, + context: RequestContext, + next_token: String | None = None, + max_results: DescribeInternetGatewaysMaxResults | None = None, + dry_run: Boolean | None = None, + internet_gateway_ids: InternetGatewayIdList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeInternetGatewaysResult: + raise NotImplementedError + + @handler("DescribeIpamByoasn") + def describe_ipam_byoasn( + self, + context: RequestContext, + dry_run: Boolean | None = None, + max_results: DescribeIpamByoasnMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeIpamByoasnResult: + raise NotImplementedError + + @handler("DescribeIpamExternalResourceVerificationTokens") + def describe_ipam_external_resource_verification_tokens( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + ipam_external_resource_verification_token_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeIpamExternalResourceVerificationTokensResult: + raise NotImplementedError + + @handler("DescribeIpamPools") + def describe_ipam_pools( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: IpamMaxResults | None = None, + next_token: NextToken | None = None, + ipam_pool_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeIpamPoolsResult: + raise NotImplementedError + + @handler("DescribeIpamResourceDiscoveries") + def describe_ipam_resource_discoveries( + self, + context: RequestContext, + dry_run: Boolean | None = None, + ipam_resource_discovery_ids: ValueStringList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeIpamResourceDiscoveriesResult: + raise NotImplementedError + + @handler("DescribeIpamResourceDiscoveryAssociations") + def describe_ipam_resource_discovery_associations( + self, + context: RequestContext, + dry_run: Boolean | None = None, + ipam_resource_discovery_association_ids: ValueStringList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeIpamResourceDiscoveryAssociationsResult: + raise NotImplementedError + + @handler("DescribeIpamScopes") + def describe_ipam_scopes( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: IpamMaxResults | None = None, + next_token: NextToken | None = None, + ipam_scope_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeIpamScopesResult: + raise NotImplementedError + + @handler("DescribeIpams") + def describe_ipams( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: IpamMaxResults | None = None, + next_token: NextToken | None = None, + ipam_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeIpamsResult: + raise NotImplementedError + + @handler("DescribeIpv6Pools") + def describe_ipv6_pools( + self, + context: RequestContext, + pool_ids: Ipv6PoolIdList | None = None, + next_token: NextToken | None = None, + max_results: Ipv6PoolMaxResults | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeIpv6PoolsResult: + raise NotImplementedError + + @handler("DescribeKeyPairs") + def describe_key_pairs( + self, + context: RequestContext, + key_names: KeyNameStringList | None = None, + key_pair_ids: KeyPairIdStringList | None = None, + include_public_key: Boolean | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeKeyPairsResult: + raise NotImplementedError + + @handler("DescribeLaunchTemplateVersions") + def describe_launch_template_versions( + self, + context: RequestContext, + dry_run: Boolean | None = None, + launch_template_id: LaunchTemplateId | None = None, + launch_template_name: LaunchTemplateName | None = None, + versions: VersionStringList | None = None, + min_version: String | None = None, + max_version: String | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + filters: FilterList | None = None, + resolve_alias: Boolean | None = None, + **kwargs, + ) -> DescribeLaunchTemplateVersionsResult: + raise NotImplementedError + + @handler("DescribeLaunchTemplates") + def describe_launch_templates( + self, + context: RequestContext, + dry_run: Boolean | None = None, + launch_template_ids: LaunchTemplateIdStringList | None = None, + launch_template_names: LaunchTemplateNameStringList | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeLaunchTemplatesMaxResults | None = None, + **kwargs, + ) -> DescribeLaunchTemplatesResult: + raise NotImplementedError + + @handler("DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations") + def describe_local_gateway_route_table_virtual_interface_group_associations( + self, + context: RequestContext, + local_gateway_route_table_virtual_interface_group_association_ids: LocalGatewayRouteTableVirtualInterfaceGroupAssociationIdSet + | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsResult: + raise NotImplementedError + + @handler("DescribeLocalGatewayRouteTableVpcAssociations") + def describe_local_gateway_route_table_vpc_associations( + self, + context: RequestContext, + local_gateway_route_table_vpc_association_ids: LocalGatewayRouteTableVpcAssociationIdSet + | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewayRouteTableVpcAssociationsResult: + raise NotImplementedError + + @handler("DescribeLocalGatewayRouteTables") + def describe_local_gateway_route_tables( + self, + context: RequestContext, + local_gateway_route_table_ids: LocalGatewayRouteTableIdSet | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewayRouteTablesResult: + raise NotImplementedError + + @handler("DescribeLocalGatewayVirtualInterfaceGroups") + def describe_local_gateway_virtual_interface_groups( + self, + context: RequestContext, + local_gateway_virtual_interface_group_ids: LocalGatewayVirtualInterfaceGroupIdSet + | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewayVirtualInterfaceGroupsResult: + raise NotImplementedError + + @handler("DescribeLocalGatewayVirtualInterfaces") + def describe_local_gateway_virtual_interfaces( + self, + context: RequestContext, + local_gateway_virtual_interface_ids: LocalGatewayVirtualInterfaceIdSet | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewayVirtualInterfacesResult: + raise NotImplementedError + + @handler("DescribeLocalGateways") + def describe_local_gateways( + self, + context: RequestContext, + local_gateway_ids: LocalGatewayIdSet | None = None, + filters: FilterList | None = None, + max_results: LocalGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLocalGatewaysResult: + raise NotImplementedError + + @handler("DescribeLockedSnapshots") + def describe_locked_snapshots( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: DescribeLockedSnapshotsMaxResults | None = None, + next_token: String | None = None, + snapshot_ids: SnapshotIdStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeLockedSnapshotsResult: + raise NotImplementedError + + @handler("DescribeMacHosts") + def describe_mac_hosts( + self, + context: RequestContext, + filters: FilterList | None = None, + host_ids: RequestHostIdList | None = None, + max_results: DescribeMacHostsRequestMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeMacHostsResult: + raise NotImplementedError + + @handler("DescribeMacModificationTasks") + def describe_mac_modification_tasks( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + mac_modification_task_ids: MacModificationTaskIdList | None = None, + max_results: DescribeMacModificationTasksMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeMacModificationTasksResult: + raise NotImplementedError + + @handler("DescribeManagedPrefixLists") + def describe_managed_prefix_lists( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: PrefixListMaxResults | None = None, + next_token: NextToken | None = None, + prefix_list_ids: ValueStringList | None = None, + **kwargs, + ) -> DescribeManagedPrefixListsResult: + raise NotImplementedError + + @handler("DescribeMovingAddresses") + def describe_moving_addresses( + self, + context: RequestContext, + dry_run: Boolean | None = None, + public_ips: ValueStringList | None = None, + next_token: String | None = None, + filters: FilterList | None = None, + max_results: DescribeMovingAddressesMaxResults | None = None, + **kwargs, + ) -> DescribeMovingAddressesResult: + raise NotImplementedError + + @handler("DescribeNatGateways") + def describe_nat_gateways( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filter: FilterList | None = None, + max_results: DescribeNatGatewaysMaxResults | None = None, + nat_gateway_ids: NatGatewayIdStringList | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeNatGatewaysResult: + raise NotImplementedError + + @handler("DescribeNetworkAcls") + def describe_network_acls( + self, + context: RequestContext, + next_token: String | None = None, + max_results: DescribeNetworkAclsMaxResults | None = None, + dry_run: Boolean | None = None, + network_acl_ids: NetworkAclIdStringList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeNetworkAclsResult: + raise NotImplementedError + + @handler("DescribeNetworkInsightsAccessScopeAnalyses") + def describe_network_insights_access_scope_analyses( + self, + context: RequestContext, + network_insights_access_scope_analysis_ids: NetworkInsightsAccessScopeAnalysisIdList + | None = None, + network_insights_access_scope_id: NetworkInsightsAccessScopeId | None = None, + analysis_start_time_begin: MillisecondDateTime | None = None, + analysis_start_time_end: MillisecondDateTime | None = None, + filters: FilterList | None = None, + max_results: NetworkInsightsMaxResults | None = None, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeNetworkInsightsAccessScopeAnalysesResult: + raise NotImplementedError + + @handler("DescribeNetworkInsightsAccessScopes") + def describe_network_insights_access_scopes( + self, + context: RequestContext, + network_insights_access_scope_ids: NetworkInsightsAccessScopeIdList | None = None, + filters: FilterList | None = None, + max_results: NetworkInsightsMaxResults | None = None, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeNetworkInsightsAccessScopesResult: + raise NotImplementedError + + @handler("DescribeNetworkInsightsAnalyses") + def describe_network_insights_analyses( + self, + context: RequestContext, + network_insights_analysis_ids: NetworkInsightsAnalysisIdList | None = None, + network_insights_path_id: NetworkInsightsPathId | None = None, + analysis_start_time: MillisecondDateTime | None = None, + analysis_end_time: MillisecondDateTime | None = None, + filters: FilterList | None = None, + max_results: NetworkInsightsMaxResults | None = None, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeNetworkInsightsAnalysesResult: + raise NotImplementedError + + @handler("DescribeNetworkInsightsPaths") + def describe_network_insights_paths( + self, + context: RequestContext, + network_insights_path_ids: NetworkInsightsPathIdList | None = None, + filters: FilterList | None = None, + max_results: NetworkInsightsMaxResults | None = None, + dry_run: Boolean | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeNetworkInsightsPathsResult: + raise NotImplementedError + + @handler("DescribeNetworkInterfaceAttribute") + def describe_network_interface_attribute( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + dry_run: Boolean | None = None, + attribute: NetworkInterfaceAttribute | None = None, + **kwargs, + ) -> DescribeNetworkInterfaceAttributeResult: + raise NotImplementedError + + @handler("DescribeNetworkInterfacePermissions") + def describe_network_interface_permissions( + self, + context: RequestContext, + network_interface_permission_ids: NetworkInterfacePermissionIdList | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeNetworkInterfacePermissionsMaxResults | None = None, + **kwargs, + ) -> DescribeNetworkInterfacePermissionsResult: + raise NotImplementedError + + @handler("DescribeNetworkInterfaces") + def describe_network_interfaces( + self, + context: RequestContext, + next_token: String | None = None, + max_results: DescribeNetworkInterfacesMaxResults | None = None, + dry_run: Boolean | None = None, + network_interface_ids: NetworkInterfaceIdList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeNetworkInterfacesResult: + raise NotImplementedError + + @handler("DescribeOutpostLags") + def describe_outpost_lags( + self, + context: RequestContext, + outpost_lag_ids: OutpostLagIdSet | None = None, + filters: FilterList | None = None, + max_results: OutpostLagMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeOutpostLagsResult: + raise NotImplementedError + + @handler("DescribePlacementGroups") + def describe_placement_groups( + self, + context: RequestContext, + group_ids: PlacementGroupIdStringList | None = None, + dry_run: Boolean | None = None, + group_names: PlacementGroupStringList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribePlacementGroupsResult: + raise NotImplementedError + + @handler("DescribePrefixLists") + def describe_prefix_lists( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + prefix_list_ids: PrefixListResourceIdStringList | None = None, + **kwargs, + ) -> DescribePrefixListsResult: + raise NotImplementedError + + @handler("DescribePrincipalIdFormat") + def describe_principal_id_format( + self, + context: RequestContext, + dry_run: Boolean | None = None, + resources: ResourceList | None = None, + max_results: DescribePrincipalIdFormatMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribePrincipalIdFormatResult: + raise NotImplementedError + + @handler("DescribePublicIpv4Pools") + def describe_public_ipv4_pools( + self, + context: RequestContext, + pool_ids: PublicIpv4PoolIdStringList | None = None, + next_token: NextToken | None = None, + max_results: PoolMaxResults | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribePublicIpv4PoolsResult: + raise NotImplementedError + + @handler("DescribeRegions") + def describe_regions( + self, + context: RequestContext, + region_names: RegionNameStringList | None = None, + all_regions: Boolean | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeRegionsResult: + raise NotImplementedError + + @handler("DescribeReplaceRootVolumeTasks") + def describe_replace_root_volume_tasks( + self, + context: RequestContext, + replace_root_volume_task_ids: ReplaceRootVolumeTaskIds | None = None, + filters: FilterList | None = None, + max_results: DescribeReplaceRootVolumeTasksMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeReplaceRootVolumeTasksResult: + raise NotImplementedError + + @handler("DescribeReservedInstances") + def describe_reserved_instances( + self, + context: RequestContext, + offering_class: OfferingClassType | None = None, + reserved_instances_ids: ReservedInstancesIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + offering_type: OfferingTypeValues | None = None, + **kwargs, + ) -> DescribeReservedInstancesResult: + raise NotImplementedError + + @handler("DescribeReservedInstancesListings") + def describe_reserved_instances_listings( + self, + context: RequestContext, + reserved_instances_id: ReservationId | None = None, + reserved_instances_listing_id: ReservedInstancesListingId | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeReservedInstancesListingsResult: + raise NotImplementedError + + @handler("DescribeReservedInstancesModifications") + def describe_reserved_instances_modifications( + self, + context: RequestContext, + reserved_instances_modification_ids: ReservedInstancesModificationIdStringList + | None = None, + next_token: String | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeReservedInstancesModificationsResult: + raise NotImplementedError + + @handler("DescribeReservedInstancesOfferings") + def describe_reserved_instances_offerings( + self, + context: RequestContext, + availability_zone: String | None = None, + include_marketplace: Boolean | None = None, + instance_type: InstanceType | None = None, + max_duration: Long | None = None, + max_instance_count: Integer | None = None, + min_duration: Long | None = None, + offering_class: OfferingClassType | None = None, + product_description: RIProductDescription | None = None, + reserved_instances_offering_ids: ReservedInstancesOfferingIdStringList | None = None, + availability_zone_id: AvailabilityZoneId | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + instance_tenancy: Tenancy | None = None, + offering_type: OfferingTypeValues | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + **kwargs, + ) -> DescribeReservedInstancesOfferingsResult: + raise NotImplementedError + + @handler("DescribeRouteServerEndpoints") + def describe_route_server_endpoints( + self, + context: RequestContext, + route_server_endpoint_ids: RouteServerEndpointIdsList | None = None, + next_token: String | None = None, + max_results: RouteServerMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeRouteServerEndpointsResult: + raise NotImplementedError + + @handler("DescribeRouteServerPeers") + def describe_route_server_peers( + self, + context: RequestContext, + route_server_peer_ids: RouteServerPeerIdsList | None = None, + next_token: String | None = None, + max_results: RouteServerMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeRouteServerPeersResult: + raise NotImplementedError + + @handler("DescribeRouteServers") + def describe_route_servers( + self, + context: RequestContext, + route_server_ids: RouteServerIdsList | None = None, + next_token: String | None = None, + max_results: RouteServerMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeRouteServersResult: + raise NotImplementedError + + @handler("DescribeRouteTables") + def describe_route_tables( + self, + context: RequestContext, + next_token: String | None = None, + max_results: DescribeRouteTablesMaxResults | None = None, + dry_run: Boolean | None = None, + route_table_ids: RouteTableIdStringList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeRouteTablesResult: + raise NotImplementedError + + @handler("DescribeScheduledInstanceAvailability") + def describe_scheduled_instance_availability( + self, + context: RequestContext, + first_slot_start_time_range: SlotDateTimeRangeRequest, + recurrence: ScheduledInstanceRecurrenceRequest, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: DescribeScheduledInstanceAvailabilityMaxResults | None = None, + max_slot_duration_in_hours: Integer | None = None, + min_slot_duration_in_hours: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeScheduledInstanceAvailabilityResult: + raise NotImplementedError + + @handler("DescribeScheduledInstances") + def describe_scheduled_instances( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + scheduled_instance_ids: ScheduledInstanceIdRequestSet | None = None, + slot_start_time_range: SlotStartTimeRangeRequest | None = None, + **kwargs, + ) -> DescribeScheduledInstancesResult: + raise NotImplementedError + + @handler("DescribeSecurityGroupReferences") + def describe_security_group_references( + self, context: RequestContext, group_id: GroupIds, dry_run: Boolean | None = None, **kwargs + ) -> DescribeSecurityGroupReferencesResult: + raise NotImplementedError + + @handler("DescribeSecurityGroupRules") + def describe_security_group_rules( + self, + context: RequestContext, + filters: FilterList | None = None, + security_group_rule_ids: SecurityGroupRuleIdList | None = None, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DescribeSecurityGroupRulesMaxResults | None = None, + **kwargs, + ) -> DescribeSecurityGroupRulesResult: + raise NotImplementedError + + @handler("DescribeSecurityGroupVpcAssociations") + def describe_security_group_vpc_associations( + self, + context: RequestContext, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeSecurityGroupVpcAssociationsMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeSecurityGroupVpcAssociationsResult: + raise NotImplementedError + + @handler("DescribeSecurityGroups") + def describe_security_groups( + self, + context: RequestContext, + group_ids: GroupIdStringList | None = None, + group_names: GroupNameStringList | None = None, + next_token: String | None = None, + max_results: DescribeSecurityGroupsMaxResults | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeSecurityGroupsResult: + raise NotImplementedError + + @handler("DescribeServiceLinkVirtualInterfaces") + def describe_service_link_virtual_interfaces( + self, + context: RequestContext, + service_link_virtual_interface_ids: ServiceLinkVirtualInterfaceIdSet | None = None, + filters: FilterList | None = None, + max_results: ServiceLinkMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeServiceLinkVirtualInterfacesResult: + raise NotImplementedError + + @handler("DescribeSnapshotAttribute") + def describe_snapshot_attribute( + self, + context: RequestContext, + attribute: SnapshotAttributeName, + snapshot_id: SnapshotId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeSnapshotAttributeResult: + raise NotImplementedError + + @handler("DescribeSnapshotTierStatus") + def describe_snapshot_tier_status( + self, + context: RequestContext, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DescribeSnapshotTierStatusMaxResults | None = None, + **kwargs, + ) -> DescribeSnapshotTierStatusResult: + raise NotImplementedError + + @handler("DescribeSnapshots") + def describe_snapshots( + self, + context: RequestContext, + max_results: Integer | None = None, + next_token: String | None = None, + owner_ids: OwnerStringList | None = None, + restorable_by_user_ids: RestorableByStringList | None = None, + snapshot_ids: SnapshotIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeSnapshotsResult: + raise NotImplementedError + + @handler("DescribeSpotDatafeedSubscription") + def describe_spot_datafeed_subscription( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DescribeSpotDatafeedSubscriptionResult: + raise NotImplementedError + + @handler("DescribeSpotFleetInstances") + def describe_spot_fleet_instances( + self, + context: RequestContext, + spot_fleet_request_id: SpotFleetRequestId, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: DescribeSpotFleetInstancesMaxResults | None = None, + **kwargs, + ) -> DescribeSpotFleetInstancesResponse: + raise NotImplementedError + + @handler("DescribeSpotFleetRequestHistory") + def describe_spot_fleet_request_history( + self, + context: RequestContext, + spot_fleet_request_id: SpotFleetRequestId, + start_time: DateTime, + dry_run: Boolean | None = None, + event_type: EventType | None = None, + next_token: String | None = None, + max_results: DescribeSpotFleetRequestHistoryMaxResults | None = None, + **kwargs, + ) -> DescribeSpotFleetRequestHistoryResponse: + raise NotImplementedError + + @handler("DescribeSpotFleetRequests") + def describe_spot_fleet_requests( + self, + context: RequestContext, + dry_run: Boolean | None = None, + spot_fleet_request_ids: SpotFleetRequestIdList | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + **kwargs, + ) -> DescribeSpotFleetRequestsResponse: + raise NotImplementedError + + @handler("DescribeSpotInstanceRequests") + def describe_spot_instance_requests( + self, + context: RequestContext, + next_token: String | None = None, + max_results: Integer | None = None, + dry_run: Boolean | None = None, + spot_instance_request_ids: SpotInstanceRequestIdList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeSpotInstanceRequestsResult: + raise NotImplementedError + + @handler("DescribeSpotPriceHistory") + def describe_spot_price_history( + self, + context: RequestContext, + dry_run: Boolean | None = None, + start_time: DateTime | None = None, + end_time: DateTime | None = None, + instance_types: InstanceTypeList | None = None, + product_descriptions: ProductDescriptionList | None = None, + filters: FilterList | None = None, + availability_zone: String | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeSpotPriceHistoryResult: + raise NotImplementedError + + @handler("DescribeStaleSecurityGroups") + def describe_stale_security_groups( + self, + context: RequestContext, + vpc_id: VpcId, + dry_run: Boolean | None = None, + max_results: DescribeStaleSecurityGroupsMaxResults | None = None, + next_token: DescribeStaleSecurityGroupsNextToken | None = None, + **kwargs, + ) -> DescribeStaleSecurityGroupsResult: + raise NotImplementedError + + @handler("DescribeStoreImageTasks") + def describe_store_image_tasks( + self, + context: RequestContext, + image_ids: ImageIdList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeStoreImageTasksRequestMaxResults | None = None, + **kwargs, + ) -> DescribeStoreImageTasksResult: + raise NotImplementedError + + @handler("DescribeSubnets") + def describe_subnets( + self, + context: RequestContext, + filters: FilterList | None = None, + subnet_ids: SubnetIdStringList | None = None, + next_token: String | None = None, + max_results: DescribeSubnetsMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeSubnetsResult: + raise NotImplementedError + + @handler("DescribeTags") + def describe_tags( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeTagsResult: + raise NotImplementedError + + @handler("DescribeTrafficMirrorFilterRules") + def describe_traffic_mirror_filter_rules( + self, + context: RequestContext, + traffic_mirror_filter_rule_ids: TrafficMirrorFilterRuleIdList | None = None, + traffic_mirror_filter_id: TrafficMirrorFilterId | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: TrafficMirroringMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeTrafficMirrorFilterRulesResult: + raise NotImplementedError + + @handler("DescribeTrafficMirrorFilters") + def describe_traffic_mirror_filters( + self, + context: RequestContext, + traffic_mirror_filter_ids: TrafficMirrorFilterIdList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: TrafficMirroringMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeTrafficMirrorFiltersResult: + raise NotImplementedError + + @handler("DescribeTrafficMirrorSessions") + def describe_traffic_mirror_sessions( + self, + context: RequestContext, + traffic_mirror_session_ids: TrafficMirrorSessionIdList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: TrafficMirroringMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeTrafficMirrorSessionsResult: + raise NotImplementedError + + @handler("DescribeTrafficMirrorTargets") + def describe_traffic_mirror_targets( + self, + context: RequestContext, + traffic_mirror_target_ids: TrafficMirrorTargetIdList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: TrafficMirroringMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeTrafficMirrorTargetsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayAttachments") + def describe_transit_gateway_attachments( + self, + context: RequestContext, + transit_gateway_attachment_ids: TransitGatewayAttachmentIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayAttachmentsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayConnectPeers") + def describe_transit_gateway_connect_peers( + self, + context: RequestContext, + transit_gateway_connect_peer_ids: TransitGatewayConnectPeerIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayConnectPeersResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayConnects") + def describe_transit_gateway_connects( + self, + context: RequestContext, + transit_gateway_attachment_ids: TransitGatewayAttachmentIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayConnectsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayMulticastDomains") + def describe_transit_gateway_multicast_domains( + self, + context: RequestContext, + transit_gateway_multicast_domain_ids: TransitGatewayMulticastDomainIdStringList + | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayMulticastDomainsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayPeeringAttachments") + def describe_transit_gateway_peering_attachments( + self, + context: RequestContext, + transit_gateway_attachment_ids: TransitGatewayAttachmentIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayPeeringAttachmentsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayPolicyTables") + def describe_transit_gateway_policy_tables( + self, + context: RequestContext, + transit_gateway_policy_table_ids: TransitGatewayPolicyTableIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayPolicyTablesResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayRouteTableAnnouncements") + def describe_transit_gateway_route_table_announcements( + self, + context: RequestContext, + transit_gateway_route_table_announcement_ids: TransitGatewayRouteTableAnnouncementIdStringList + | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayRouteTableAnnouncementsResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayRouteTables") + def describe_transit_gateway_route_tables( + self, + context: RequestContext, + transit_gateway_route_table_ids: TransitGatewayRouteTableIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayRouteTablesResult: + raise NotImplementedError + + @handler("DescribeTransitGatewayVpcAttachments") + def describe_transit_gateway_vpc_attachments( + self, + context: RequestContext, + transit_gateway_attachment_ids: TransitGatewayAttachmentIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewayVpcAttachmentsResult: + raise NotImplementedError + + @handler("DescribeTransitGateways") + def describe_transit_gateways( + self, + context: RequestContext, + transit_gateway_ids: TransitGatewayIdStringList | None = None, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeTransitGatewaysResult: + raise NotImplementedError + + @handler("DescribeTrunkInterfaceAssociations") + def describe_trunk_interface_associations( + self, + context: RequestContext, + association_ids: TrunkInterfaceAssociationIdList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: DescribeTrunkInterfaceAssociationsMaxResults | None = None, + **kwargs, + ) -> DescribeTrunkInterfaceAssociationsResult: + raise NotImplementedError + + @handler("DescribeVerifiedAccessEndpoints") + def describe_verified_access_endpoints( + self, + context: RequestContext, + verified_access_endpoint_ids: VerifiedAccessEndpointIdList | None = None, + verified_access_instance_id: VerifiedAccessInstanceId | None = None, + verified_access_group_id: VerifiedAccessGroupId | None = None, + max_results: DescribeVerifiedAccessEndpointsMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVerifiedAccessEndpointsResult: + raise NotImplementedError + + @handler("DescribeVerifiedAccessGroups") + def describe_verified_access_groups( + self, + context: RequestContext, + verified_access_group_ids: VerifiedAccessGroupIdList | None = None, + verified_access_instance_id: VerifiedAccessInstanceId | None = None, + max_results: DescribeVerifiedAccessGroupMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVerifiedAccessGroupsResult: + raise NotImplementedError + + @handler("DescribeVerifiedAccessInstanceLoggingConfigurations") + def describe_verified_access_instance_logging_configurations( + self, + context: RequestContext, + verified_access_instance_ids: VerifiedAccessInstanceIdList | None = None, + max_results: DescribeVerifiedAccessInstanceLoggingConfigurationsMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVerifiedAccessInstanceLoggingConfigurationsResult: + raise NotImplementedError + + @handler("DescribeVerifiedAccessInstances") + def describe_verified_access_instances( + self, + context: RequestContext, + verified_access_instance_ids: VerifiedAccessInstanceIdList | None = None, + max_results: DescribeVerifiedAccessInstancesMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVerifiedAccessInstancesResult: + raise NotImplementedError + + @handler("DescribeVerifiedAccessTrustProviders") + def describe_verified_access_trust_providers( + self, + context: RequestContext, + verified_access_trust_provider_ids: VerifiedAccessTrustProviderIdList | None = None, + max_results: DescribeVerifiedAccessTrustProvidersMaxResults | None = None, + next_token: NextToken | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVerifiedAccessTrustProvidersResult: + raise NotImplementedError + + @handler("DescribeVolumeAttribute") + def describe_volume_attribute( + self, + context: RequestContext, + attribute: VolumeAttributeName, + volume_id: VolumeId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVolumeAttributeResult: + raise NotImplementedError + + @handler("DescribeVolumeStatus") + def describe_volume_status( + self, + context: RequestContext, + max_results: Integer | None = None, + next_token: String | None = None, + volume_ids: VolumeIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeVolumeStatusResult: + raise NotImplementedError + + @handler("DescribeVolumes") + def describe_volumes( + self, + context: RequestContext, + volume_ids: VolumeIdStringList | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + **kwargs, + ) -> DescribeVolumesResult: + raise NotImplementedError + + @handler("DescribeVolumesModifications") + def describe_volumes_modifications( + self, + context: RequestContext, + dry_run: Boolean | None = None, + volume_ids: VolumeIdStringList | None = None, + filters: FilterList | None = None, + next_token: String | None = None, + max_results: Integer | None = None, + **kwargs, + ) -> DescribeVolumesModificationsResult: + raise NotImplementedError + + @handler("DescribeVpcAttribute") + def describe_vpc_attribute( + self, + context: RequestContext, + attribute: VpcAttributeName, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVpcAttributeResult: + raise NotImplementedError + + @handler("DescribeVpcBlockPublicAccessExclusions") + def describe_vpc_block_public_access_exclusions( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + exclusion_ids: VpcBlockPublicAccessExclusionIdList | None = None, + next_token: String | None = None, + max_results: DescribeVpcBlockPublicAccessExclusionsMaxResults | None = None, + **kwargs, + ) -> DescribeVpcBlockPublicAccessExclusionsResult: + raise NotImplementedError + + @handler("DescribeVpcBlockPublicAccessOptions") + def describe_vpc_block_public_access_options( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DescribeVpcBlockPublicAccessOptionsResult: + raise NotImplementedError + + @handler("DescribeVpcClassicLink") + def describe_vpc_classic_link( + self, + context: RequestContext, + dry_run: Boolean | None = None, + vpc_ids: VpcClassicLinkIdList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeVpcClassicLinkResult: + raise NotImplementedError + + @handler("DescribeVpcClassicLinkDnsSupport") + def describe_vpc_classic_link_dns_support( + self, + context: RequestContext, + vpc_ids: VpcClassicLinkIdList | None = None, + max_results: DescribeVpcClassicLinkDnsSupportMaxResults | None = None, + next_token: DescribeVpcClassicLinkDnsSupportNextToken | None = None, + **kwargs, + ) -> DescribeVpcClassicLinkDnsSupportResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointAssociations") + def describe_vpc_endpoint_associations( + self, + context: RequestContext, + dry_run: Boolean | None = None, + vpc_endpoint_ids: VpcEndpointIdList | None = None, + filters: FilterList | None = None, + max_results: maxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointAssociationsResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointConnectionNotifications") + def describe_vpc_endpoint_connection_notifications( + self, + context: RequestContext, + dry_run: Boolean | None = None, + connection_notification_id: ConnectionNotificationId | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointConnectionNotificationsResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointConnections") + def describe_vpc_endpoint_connections( + self, + context: RequestContext, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointConnectionsResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointServiceConfigurations") + def describe_vpc_endpoint_service_configurations( + self, + context: RequestContext, + dry_run: Boolean | None = None, + service_ids: VpcEndpointServiceIdList | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointServiceConfigurationsResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointServicePermissions") + def describe_vpc_endpoint_service_permissions( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointServicePermissionsResult: + raise NotImplementedError + + @handler("DescribeVpcEndpointServices") + def describe_vpc_endpoint_services( + self, + context: RequestContext, + dry_run: Boolean | None = None, + service_names: ValueStringList | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + service_regions: ValueStringList | None = None, + **kwargs, + ) -> DescribeVpcEndpointServicesResult: + raise NotImplementedError + + @handler("DescribeVpcEndpoints") + def describe_vpc_endpoints( + self, + context: RequestContext, + dry_run: Boolean | None = None, + vpc_endpoint_ids: VpcEndpointIdList | None = None, + filters: FilterList | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeVpcEndpointsResult: + raise NotImplementedError + + @handler("DescribeVpcPeeringConnections") + def describe_vpc_peering_connections( + self, + context: RequestContext, + next_token: String | None = None, + max_results: DescribeVpcPeeringConnectionsMaxResults | None = None, + dry_run: Boolean | None = None, + vpc_peering_connection_ids: VpcPeeringConnectionIdList | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> DescribeVpcPeeringConnectionsResult: + raise NotImplementedError + + @handler("DescribeVpcs") + def describe_vpcs( + self, + context: RequestContext, + filters: FilterList | None = None, + vpc_ids: VpcIdStringList | None = None, + next_token: String | None = None, + max_results: DescribeVpcsMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVpcsResult: + raise NotImplementedError + + @handler("DescribeVpnConnections") + def describe_vpn_connections( + self, + context: RequestContext, + filters: FilterList | None = None, + vpn_connection_ids: VpnConnectionIdStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVpnConnectionsResult: + raise NotImplementedError + + @handler("DescribeVpnGateways") + def describe_vpn_gateways( + self, + context: RequestContext, + filters: FilterList | None = None, + vpn_gateway_ids: VpnGatewayIdStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DescribeVpnGatewaysResult: + raise NotImplementedError + + @handler("DetachClassicLinkVpc") + def detach_classic_link_vpc( + self, + context: RequestContext, + instance_id: InstanceId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DetachClassicLinkVpcResult: + raise NotImplementedError + + @handler("DetachInternetGateway") + def detach_internet_gateway( + self, + context: RequestContext, + internet_gateway_id: InternetGatewayId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DetachNetworkInterface") + def detach_network_interface( + self, + context: RequestContext, + attachment_id: NetworkInterfaceAttachmentId, + dry_run: Boolean | None = None, + force: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DetachVerifiedAccessTrustProvider") + def detach_verified_access_trust_provider( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + verified_access_trust_provider_id: VerifiedAccessTrustProviderId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DetachVerifiedAccessTrustProviderResult: + raise NotImplementedError + + @handler("DetachVolume") + def detach_volume( + self, + context: RequestContext, + volume_id: VolumeIdWithResolver, + device: String | None = None, + force: Boolean | None = None, + instance_id: InstanceIdForResolver | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> VolumeAttachment: + raise NotImplementedError + + @handler("DetachVpnGateway") + def detach_vpn_gateway( + self, + context: RequestContext, + vpc_id: VpcId, + vpn_gateway_id: VpnGatewayId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DisableAddressTransfer") + def disable_address_transfer( + self, + context: RequestContext, + allocation_id: AllocationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableAddressTransferResult: + raise NotImplementedError + + @handler("DisableAllowedImagesSettings") + def disable_allowed_images_settings( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DisableAllowedImagesSettingsResult: + raise NotImplementedError + + @handler("DisableAwsNetworkPerformanceMetricSubscription") + def disable_aws_network_performance_metric_subscription( + self, + context: RequestContext, + source: String | None = None, + destination: String | None = None, + metric: MetricType | None = None, + statistic: StatisticType | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableAwsNetworkPerformanceMetricSubscriptionResult: + raise NotImplementedError + + @handler("DisableEbsEncryptionByDefault") + def disable_ebs_encryption_by_default( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DisableEbsEncryptionByDefaultResult: + raise NotImplementedError + + @handler("DisableFastLaunch") + def disable_fast_launch( + self, + context: RequestContext, + image_id: ImageId, + force: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableFastLaunchResult: + raise NotImplementedError + + @handler("DisableFastSnapshotRestores") + def disable_fast_snapshot_restores( + self, + context: RequestContext, + availability_zones: AvailabilityZoneStringList, + source_snapshot_ids: SnapshotIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableFastSnapshotRestoresResult: + raise NotImplementedError + + @handler("DisableImage") + def disable_image( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> DisableImageResult: + raise NotImplementedError + + @handler("DisableImageBlockPublicAccess") + def disable_image_block_public_access( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DisableImageBlockPublicAccessResult: + raise NotImplementedError + + @handler("DisableImageDeprecation") + def disable_image_deprecation( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> DisableImageDeprecationResult: + raise NotImplementedError + + @handler("DisableImageDeregistrationProtection") + def disable_image_deregistration_protection( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> DisableImageDeregistrationProtectionResult: + raise NotImplementedError + + @handler("DisableIpamOrganizationAdminAccount") + def disable_ipam_organization_admin_account( + self, + context: RequestContext, + delegated_admin_account_id: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableIpamOrganizationAdminAccountResult: + raise NotImplementedError + + @handler("DisableRouteServerPropagation") + def disable_route_server_propagation( + self, + context: RequestContext, + route_server_id: RouteServerId, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisableRouteServerPropagationResult: + raise NotImplementedError + + @handler("DisableSerialConsoleAccess") + def disable_serial_console_access( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DisableSerialConsoleAccessResult: + raise NotImplementedError + + @handler("DisableSnapshotBlockPublicAccess") + def disable_snapshot_block_public_access( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> DisableSnapshotBlockPublicAccessResult: + raise NotImplementedError + + @handler("DisableTransitGatewayRouteTablePropagation") + def disable_transit_gateway_route_table_propagation( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + dry_run: Boolean | None = None, + transit_gateway_route_table_announcement_id: TransitGatewayRouteTableAnnouncementId + | None = None, + **kwargs, + ) -> DisableTransitGatewayRouteTablePropagationResult: + raise NotImplementedError + + @handler("DisableVgwRoutePropagation") + def disable_vgw_route_propagation( + self, + context: RequestContext, + gateway_id: VpnGatewayId, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DisableVpcClassicLink") + def disable_vpc_classic_link( + self, context: RequestContext, vpc_id: VpcId, dry_run: Boolean | None = None, **kwargs + ) -> DisableVpcClassicLinkResult: + raise NotImplementedError + + @handler("DisableVpcClassicLinkDnsSupport") + def disable_vpc_classic_link_dns_support( + self, context: RequestContext, vpc_id: VpcId | None = None, **kwargs + ) -> DisableVpcClassicLinkDnsSupportResult: + raise NotImplementedError + + @handler("DisassociateAddress") + def disassociate_address( + self, + context: RequestContext, + association_id: ElasticIpAssociationId | None = None, + public_ip: EipAllocationPublicIp | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DisassociateCapacityReservationBillingOwner") + def disassociate_capacity_reservation_billing_owner( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + unused_reservation_billing_owner_id: AccountID, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateCapacityReservationBillingOwnerResult: + raise NotImplementedError + + @handler("DisassociateClientVpnTargetNetwork") + def disassociate_client_vpn_target_network( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + association_id: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateClientVpnTargetNetworkResult: + raise NotImplementedError + + @handler("DisassociateEnclaveCertificateIamRole") + def disassociate_enclave_certificate_iam_role( + self, + context: RequestContext, + certificate_arn: CertificateId, + role_arn: RoleId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateEnclaveCertificateIamRoleResult: + raise NotImplementedError + + @handler("DisassociateIamInstanceProfile") + def disassociate_iam_instance_profile( + self, context: RequestContext, association_id: IamInstanceProfileAssociationId, **kwargs + ) -> DisassociateIamInstanceProfileResult: + raise NotImplementedError + + @handler("DisassociateInstanceEventWindow") + def disassociate_instance_event_window( + self, + context: RequestContext, + instance_event_window_id: InstanceEventWindowId, + association_target: InstanceEventWindowDisassociationRequest, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateInstanceEventWindowResult: + raise NotImplementedError + + @handler("DisassociateIpamByoasn") + def disassociate_ipam_byoasn( + self, + context: RequestContext, + asn: String, + cidr: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateIpamByoasnResult: + raise NotImplementedError + + @handler("DisassociateIpamResourceDiscovery") + def disassociate_ipam_resource_discovery( + self, + context: RequestContext, + ipam_resource_discovery_association_id: IpamResourceDiscoveryAssociationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateIpamResourceDiscoveryResult: + raise NotImplementedError + + @handler("DisassociateNatGatewayAddress") + def disassociate_nat_gateway_address( + self, + context: RequestContext, + nat_gateway_id: NatGatewayId, + association_ids: EipAssociationIdList, + max_drain_duration_seconds: DrainSeconds | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateNatGatewayAddressResult: + raise NotImplementedError + + @handler("DisassociateRouteServer") + def disassociate_route_server( + self, + context: RequestContext, + route_server_id: RouteServerId, + vpc_id: VpcId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateRouteServerResult: + raise NotImplementedError + + @handler("DisassociateRouteTable") + def disassociate_route_table( + self, + context: RequestContext, + association_id: RouteTableAssociationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DisassociateSecurityGroupVpc") + def disassociate_security_group_vpc( + self, + context: RequestContext, + group_id: DisassociateSecurityGroupVpcSecurityGroupId, + vpc_id: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateSecurityGroupVpcResult: + raise NotImplementedError + + @handler("DisassociateSubnetCidrBlock") + def disassociate_subnet_cidr_block( + self, context: RequestContext, association_id: SubnetCidrAssociationId, **kwargs + ) -> DisassociateSubnetCidrBlockResult: + raise NotImplementedError + + @handler("DisassociateTransitGatewayMulticastDomain") + def disassociate_transit_gateway_multicast_domain( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + subnet_ids: TransitGatewaySubnetIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateTransitGatewayMulticastDomainResult: + raise NotImplementedError + + @handler("DisassociateTransitGatewayPolicyTable") + def disassociate_transit_gateway_policy_table( + self, + context: RequestContext, + transit_gateway_policy_table_id: TransitGatewayPolicyTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateTransitGatewayPolicyTableResult: + raise NotImplementedError + + @handler("DisassociateTransitGatewayRouteTable") + def disassociate_transit_gateway_route_table( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateTransitGatewayRouteTableResult: + raise NotImplementedError + + @handler("DisassociateTrunkInterface") + def disassociate_trunk_interface( + self, + context: RequestContext, + association_id: TrunkInterfaceAssociationId, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> DisassociateTrunkInterfaceResult: + raise NotImplementedError + + @handler("DisassociateVpcCidrBlock") + def disassociate_vpc_cidr_block( + self, context: RequestContext, association_id: VpcCidrAssociationId, **kwargs + ) -> DisassociateVpcCidrBlockResult: + raise NotImplementedError + + @handler("EnableAddressTransfer") + def enable_address_transfer( + self, + context: RequestContext, + allocation_id: AllocationId, + transfer_account_id: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableAddressTransferResult: + raise NotImplementedError + + @handler("EnableAllowedImagesSettings") + def enable_allowed_images_settings( + self, + context: RequestContext, + allowed_images_settings_state: AllowedImagesSettingsEnabledState, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableAllowedImagesSettingsResult: + raise NotImplementedError + + @handler("EnableAwsNetworkPerformanceMetricSubscription") + def enable_aws_network_performance_metric_subscription( + self, + context: RequestContext, + source: String | None = None, + destination: String | None = None, + metric: MetricType | None = None, + statistic: StatisticType | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableAwsNetworkPerformanceMetricSubscriptionResult: + raise NotImplementedError + + @handler("EnableEbsEncryptionByDefault") + def enable_ebs_encryption_by_default( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> EnableEbsEncryptionByDefaultResult: + raise NotImplementedError + + @handler("EnableFastLaunch") + def enable_fast_launch( + self, + context: RequestContext, + image_id: ImageId, + resource_type: String | None = None, + snapshot_configuration: FastLaunchSnapshotConfigurationRequest | None = None, + launch_template: FastLaunchLaunchTemplateSpecificationRequest | None = None, + max_parallel_launches: Integer | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableFastLaunchResult: + raise NotImplementedError + + @handler("EnableFastSnapshotRestores") + def enable_fast_snapshot_restores( + self, + context: RequestContext, + availability_zones: AvailabilityZoneStringList, + source_snapshot_ids: SnapshotIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableFastSnapshotRestoresResult: + raise NotImplementedError + + @handler("EnableImage") + def enable_image( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> EnableImageResult: + raise NotImplementedError + + @handler("EnableImageBlockPublicAccess") + def enable_image_block_public_access( + self, + context: RequestContext, + image_block_public_access_state: ImageBlockPublicAccessEnabledState, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableImageBlockPublicAccessResult: + raise NotImplementedError + + @handler("EnableImageDeprecation") + def enable_image_deprecation( + self, + context: RequestContext, + image_id: ImageId, + deprecate_at: MillisecondDateTime, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableImageDeprecationResult: + raise NotImplementedError + + @handler("EnableImageDeregistrationProtection") + def enable_image_deregistration_protection( + self, + context: RequestContext, + image_id: ImageId, + with_cooldown: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableImageDeregistrationProtectionResult: + raise NotImplementedError + + @handler("EnableIpamOrganizationAdminAccount") + def enable_ipam_organization_admin_account( + self, + context: RequestContext, + delegated_admin_account_id: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableIpamOrganizationAdminAccountResult: + raise NotImplementedError + + @handler("EnableReachabilityAnalyzerOrganizationSharing") + def enable_reachability_analyzer_organization_sharing( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> EnableReachabilityAnalyzerOrganizationSharingResult: + raise NotImplementedError + + @handler("EnableRouteServerPropagation") + def enable_route_server_propagation( + self, + context: RequestContext, + route_server_id: RouteServerId, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableRouteServerPropagationResult: + raise NotImplementedError + + @handler("EnableSerialConsoleAccess") + def enable_serial_console_access( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> EnableSerialConsoleAccessResult: + raise NotImplementedError + + @handler("EnableSnapshotBlockPublicAccess") + def enable_snapshot_block_public_access( + self, + context: RequestContext, + state: SnapshotBlockPublicAccessState, + dry_run: Boolean | None = None, + **kwargs, + ) -> EnableSnapshotBlockPublicAccessResult: + raise NotImplementedError + + @handler("EnableTransitGatewayRouteTablePropagation") + def enable_transit_gateway_route_table_propagation( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + dry_run: Boolean | None = None, + transit_gateway_route_table_announcement_id: TransitGatewayRouteTableAnnouncementId + | None = None, + **kwargs, + ) -> EnableTransitGatewayRouteTablePropagationResult: + raise NotImplementedError + + @handler("EnableVgwRoutePropagation") + def enable_vgw_route_propagation( + self, + context: RequestContext, + gateway_id: VpnGatewayId, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("EnableVolumeIO") + def enable_volume_io( + self, context: RequestContext, volume_id: VolumeId, dry_run: Boolean | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("EnableVpcClassicLink") + def enable_vpc_classic_link( + self, context: RequestContext, vpc_id: VpcId, dry_run: Boolean | None = None, **kwargs + ) -> EnableVpcClassicLinkResult: + raise NotImplementedError + + @handler("EnableVpcClassicLinkDnsSupport") + def enable_vpc_classic_link_dns_support( + self, context: RequestContext, vpc_id: VpcId | None = None, **kwargs + ) -> EnableVpcClassicLinkDnsSupportResult: + raise NotImplementedError + + @handler("ExportClientVpnClientCertificateRevocationList") + def export_client_vpn_client_certificate_revocation_list( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ExportClientVpnClientCertificateRevocationListResult: + raise NotImplementedError + + @handler("ExportClientVpnClientConfiguration") + def export_client_vpn_client_configuration( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ExportClientVpnClientConfigurationResult: + raise NotImplementedError + + @handler("ExportImage") + def export_image( + self, + context: RequestContext, + disk_image_format: DiskImageFormat, + image_id: ImageId, + s3_export_location: ExportTaskS3LocationRequest, + client_token: String | None = None, + description: String | None = None, + dry_run: Boolean | None = None, + role_name: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> ExportImageResult: + raise NotImplementedError + + @handler("ExportTransitGatewayRoutes") + def export_transit_gateway_routes( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + s3_bucket: String, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ExportTransitGatewayRoutesResult: + raise NotImplementedError + + @handler("ExportVerifiedAccessInstanceClientConfiguration") + def export_verified_access_instance_client_configuration( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ExportVerifiedAccessInstanceClientConfigurationResult: + raise NotImplementedError + + @handler("GetActiveVpnTunnelStatus") + def get_active_vpn_tunnel_status( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_tunnel_outside_ip_address: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetActiveVpnTunnelStatusResult: + raise NotImplementedError + + @handler("GetAllowedImagesSettings") + def get_allowed_images_settings( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetAllowedImagesSettingsResult: + raise NotImplementedError + + @handler("GetAssociatedEnclaveCertificateIamRoles") + def get_associated_enclave_certificate_iam_roles( + self, + context: RequestContext, + certificate_arn: CertificateId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetAssociatedEnclaveCertificateIamRolesResult: + raise NotImplementedError + + @handler("GetAssociatedIpv6PoolCidrs") + def get_associated_ipv6_pool_cidrs( + self, + context: RequestContext, + pool_id: Ipv6PoolEc2Id, + next_token: NextToken | None = None, + max_results: Ipv6PoolMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetAssociatedIpv6PoolCidrsResult: + raise NotImplementedError + + @handler("GetAwsNetworkPerformanceData") + def get_aws_network_performance_data( + self, + context: RequestContext, + data_queries: DataQueries | None = None, + start_time: MillisecondDateTime | None = None, + end_time: MillisecondDateTime | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetAwsNetworkPerformanceDataResult: + raise NotImplementedError + + @handler("GetCapacityReservationUsage") + def get_capacity_reservation_usage( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + next_token: String | None = None, + max_results: GetCapacityReservationUsageRequestMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetCapacityReservationUsageResult: + raise NotImplementedError + + @handler("GetCoipPoolUsage") + def get_coip_pool_usage( + self, + context: RequestContext, + pool_id: Ipv4PoolCoipId, + filters: FilterList | None = None, + max_results: CoipPoolMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetCoipPoolUsageResult: + raise NotImplementedError + + @handler("GetConsoleOutput") + def get_console_output( + self, + context: RequestContext, + instance_id: InstanceId, + latest: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetConsoleOutputResult: + raise NotImplementedError + + @handler("GetConsoleScreenshot") + def get_console_screenshot( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + wake_up: Boolean | None = None, + **kwargs, + ) -> GetConsoleScreenshotResult: + raise NotImplementedError + + @handler("GetDeclarativePoliciesReportSummary") + def get_declarative_policies_report_summary( + self, + context: RequestContext, + report_id: DeclarativePoliciesReportId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetDeclarativePoliciesReportSummaryResult: + raise NotImplementedError + + @handler("GetDefaultCreditSpecification") + def get_default_credit_specification( + self, + context: RequestContext, + instance_family: UnlimitedSupportedInstanceFamily, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetDefaultCreditSpecificationResult: + raise NotImplementedError + + @handler("GetEbsDefaultKmsKeyId") + def get_ebs_default_kms_key_id( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetEbsDefaultKmsKeyIdResult: + raise NotImplementedError + + @handler("GetEbsEncryptionByDefault") + def get_ebs_encryption_by_default( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetEbsEncryptionByDefaultResult: + raise NotImplementedError + + @handler("GetFlowLogsIntegrationTemplate") + def get_flow_logs_integration_template( + self, + context: RequestContext, + flow_log_id: VpcFlowLogId, + config_delivery_s3_destination_arn: String, + integrate_services: IntegrateServices, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetFlowLogsIntegrationTemplateResult: + raise NotImplementedError + + @handler("GetGroupsForCapacityReservation") + def get_groups_for_capacity_reservation( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + next_token: String | None = None, + max_results: GetGroupsForCapacityReservationRequestMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetGroupsForCapacityReservationResult: + raise NotImplementedError + + @handler("GetHostReservationPurchasePreview") + def get_host_reservation_purchase_preview( + self, + context: RequestContext, + host_id_set: RequestHostIdSet, + offering_id: OfferingId, + **kwargs, + ) -> GetHostReservationPurchasePreviewResult: + raise NotImplementedError + + @handler("GetImageBlockPublicAccessState") + def get_image_block_public_access_state( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetImageBlockPublicAccessStateResult: + raise NotImplementedError + + @handler("GetInstanceMetadataDefaults") + def get_instance_metadata_defaults( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetInstanceMetadataDefaultsResult: + raise NotImplementedError + + @handler("GetInstanceTpmEkPub") + def get_instance_tpm_ek_pub( + self, + context: RequestContext, + instance_id: InstanceId, + key_type: EkPubKeyType, + key_format: EkPubKeyFormat, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetInstanceTpmEkPubResult: + raise NotImplementedError + + @handler("GetInstanceTypesFromInstanceRequirements") + def get_instance_types_from_instance_requirements( + self, + context: RequestContext, + architecture_types: ArchitectureTypeSet, + virtualization_types: VirtualizationTypeSet, + instance_requirements: InstanceRequirementsRequest, + dry_run: Boolean | None = None, + max_results: Integer | None = None, + next_token: String | None = None, + **kwargs, + ) -> GetInstanceTypesFromInstanceRequirementsResult: + raise NotImplementedError + + @handler("GetInstanceUefiData") + def get_instance_uefi_data( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetInstanceUefiDataResult: + raise NotImplementedError + + @handler("GetIpamAddressHistory") + def get_ipam_address_history( + self, + context: RequestContext, + cidr: String, + ipam_scope_id: IpamScopeId, + dry_run: Boolean | None = None, + vpc_id: String | None = None, + start_time: MillisecondDateTime | None = None, + end_time: MillisecondDateTime | None = None, + max_results: IpamAddressHistoryMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetIpamAddressHistoryResult: + raise NotImplementedError + + @handler("GetIpamDiscoveredAccounts") + def get_ipam_discovered_accounts( + self, + context: RequestContext, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + discovery_region: String, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + **kwargs, + ) -> GetIpamDiscoveredAccountsResult: + raise NotImplementedError + + @handler("GetIpamDiscoveredPublicAddresses") + def get_ipam_discovered_public_addresses( + self, + context: RequestContext, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + address_region: String, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + **kwargs, + ) -> GetIpamDiscoveredPublicAddressesResult: + raise NotImplementedError + + @handler("GetIpamDiscoveredResourceCidrs") + def get_ipam_discovered_resource_cidrs( + self, + context: RequestContext, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + resource_region: String, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + next_token: NextToken | None = None, + max_results: IpamMaxResults | None = None, + **kwargs, + ) -> GetIpamDiscoveredResourceCidrsResult: + raise NotImplementedError + + @handler("GetIpamPoolAllocations") + def get_ipam_pool_allocations( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + ipam_pool_allocation_id: IpamPoolAllocationId | None = None, + filters: FilterList | None = None, + max_results: GetIpamPoolAllocationsMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetIpamPoolAllocationsResult: + raise NotImplementedError + + @handler("GetIpamPoolCidrs") + def get_ipam_pool_cidrs( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: IpamMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetIpamPoolCidrsResult: + raise NotImplementedError + + @handler("GetIpamResourceCidrs") + def get_ipam_resource_cidrs( + self, + context: RequestContext, + ipam_scope_id: IpamScopeId, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + max_results: IpamMaxResults | None = None, + next_token: NextToken | None = None, + ipam_pool_id: IpamPoolId | None = None, + resource_id: String | None = None, + resource_type: IpamResourceType | None = None, + resource_tag: RequestIpamResourceTag | None = None, + resource_owner: String | None = None, + **kwargs, + ) -> GetIpamResourceCidrsResult: + raise NotImplementedError + + @handler("GetLaunchTemplateData") + def get_launch_template_data( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetLaunchTemplateDataResult: + raise NotImplementedError + + @handler("GetManagedPrefixListAssociations") + def get_managed_prefix_list_associations( + self, + context: RequestContext, + prefix_list_id: PrefixListResourceId, + dry_run: Boolean | None = None, + max_results: GetManagedPrefixListAssociationsMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetManagedPrefixListAssociationsResult: + raise NotImplementedError + + @handler("GetManagedPrefixListEntries") + def get_managed_prefix_list_entries( + self, + context: RequestContext, + prefix_list_id: PrefixListResourceId, + dry_run: Boolean | None = None, + target_version: Long | None = None, + max_results: PrefixListMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetManagedPrefixListEntriesResult: + raise NotImplementedError + + @handler("GetNetworkInsightsAccessScopeAnalysisFindings") + def get_network_insights_access_scope_analysis_findings( + self, + context: RequestContext, + network_insights_access_scope_analysis_id: NetworkInsightsAccessScopeAnalysisId, + max_results: GetNetworkInsightsAccessScopeAnalysisFindingsMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetNetworkInsightsAccessScopeAnalysisFindingsResult: + raise NotImplementedError + + @handler("GetNetworkInsightsAccessScopeContent") + def get_network_insights_access_scope_content( + self, + context: RequestContext, + network_insights_access_scope_id: NetworkInsightsAccessScopeId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetNetworkInsightsAccessScopeContentResult: + raise NotImplementedError + + @handler("GetPasswordData") + def get_password_data( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetPasswordDataResult: + raise NotImplementedError + + @handler("GetReservedInstancesExchangeQuote") + def get_reserved_instances_exchange_quote( + self, + context: RequestContext, + reserved_instance_ids: ReservedInstanceIdSet, + dry_run: Boolean | None = None, + target_configurations: TargetConfigurationRequestSet | None = None, + **kwargs, + ) -> GetReservedInstancesExchangeQuoteResult: + raise NotImplementedError + + @handler("GetRouteServerAssociations") + def get_route_server_associations( + self, + context: RequestContext, + route_server_id: RouteServerId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetRouteServerAssociationsResult: + raise NotImplementedError + + @handler("GetRouteServerPropagations") + def get_route_server_propagations( + self, + context: RequestContext, + route_server_id: RouteServerId, + route_table_id: RouteTableId | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetRouteServerPropagationsResult: + raise NotImplementedError + + @handler("GetRouteServerRoutingDatabase") + def get_route_server_routing_database( + self, + context: RequestContext, + route_server_id: RouteServerId, + next_token: String | None = None, + max_results: RouteServerMaxResults | None = None, + dry_run: Boolean | None = None, + filters: FilterList | None = None, + **kwargs, + ) -> GetRouteServerRoutingDatabaseResult: + raise NotImplementedError + + @handler("GetSecurityGroupsForVpc") + def get_security_groups_for_vpc( + self, + context: RequestContext, + vpc_id: VpcId, + next_token: String | None = None, + max_results: GetSecurityGroupsForVpcRequestMaxResults | None = None, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetSecurityGroupsForVpcResult: + raise NotImplementedError + + @handler("GetSerialConsoleAccessStatus") + def get_serial_console_access_status( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetSerialConsoleAccessStatusResult: + raise NotImplementedError + + @handler("GetSnapshotBlockPublicAccessState") + def get_snapshot_block_public_access_state( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> GetSnapshotBlockPublicAccessStateResult: + raise NotImplementedError + + @handler("GetSpotPlacementScores") + def get_spot_placement_scores( + self, + context: RequestContext, + target_capacity: SpotPlacementScoresTargetCapacity, + instance_types: InstanceTypes | None = None, + target_capacity_unit_type: TargetCapacityUnitType | None = None, + single_availability_zone: Boolean | None = None, + region_names: RegionNames | None = None, + instance_requirements_with_metadata: InstanceRequirementsWithMetadataRequest | None = None, + dry_run: Boolean | None = None, + max_results: SpotPlacementScoresMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> GetSpotPlacementScoresResult: + raise NotImplementedError + + @handler("GetSubnetCidrReservations") + def get_subnet_cidr_reservations( + self, + context: RequestContext, + subnet_id: SubnetId, + filters: FilterList | None = None, + dry_run: Boolean | None = None, + next_token: String | None = None, + max_results: GetSubnetCidrReservationsMaxResults | None = None, + **kwargs, + ) -> GetSubnetCidrReservationsResult: + raise NotImplementedError + + @handler("GetTransitGatewayAttachmentPropagations") + def get_transit_gateway_attachment_propagations( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayAttachmentPropagationsResult: + raise NotImplementedError + + @handler("GetTransitGatewayMulticastDomainAssociations") + def get_transit_gateway_multicast_domain_associations( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayMulticastDomainAssociationsResult: + raise NotImplementedError + + @handler("GetTransitGatewayPolicyTableAssociations") + def get_transit_gateway_policy_table_associations( + self, + context: RequestContext, + transit_gateway_policy_table_id: TransitGatewayPolicyTableId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayPolicyTableAssociationsResult: + raise NotImplementedError + + @handler("GetTransitGatewayPolicyTableEntries") + def get_transit_gateway_policy_table_entries( + self, + context: RequestContext, + transit_gateway_policy_table_id: TransitGatewayPolicyTableId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayPolicyTableEntriesResult: + raise NotImplementedError + + @handler("GetTransitGatewayPrefixListReferences") + def get_transit_gateway_prefix_list_references( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayPrefixListReferencesResult: + raise NotImplementedError + + @handler("GetTransitGatewayRouteTableAssociations") + def get_transit_gateway_route_table_associations( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayRouteTableAssociationsResult: + raise NotImplementedError + + @handler("GetTransitGatewayRouteTablePropagations") + def get_transit_gateway_route_table_propagations( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetTransitGatewayRouteTablePropagationsResult: + raise NotImplementedError + + @handler("GetVerifiedAccessEndpointPolicy") + def get_verified_access_endpoint_policy( + self, + context: RequestContext, + verified_access_endpoint_id: VerifiedAccessEndpointId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVerifiedAccessEndpointPolicyResult: + raise NotImplementedError + + @handler("GetVerifiedAccessEndpointTargets") + def get_verified_access_endpoint_targets( + self, + context: RequestContext, + verified_access_endpoint_id: VerifiedAccessEndpointId, + max_results: GetVerifiedAccessEndpointTargetsMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVerifiedAccessEndpointTargetsResult: + raise NotImplementedError + + @handler("GetVerifiedAccessGroupPolicy") + def get_verified_access_group_policy( + self, + context: RequestContext, + verified_access_group_id: VerifiedAccessGroupId, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVerifiedAccessGroupPolicyResult: + raise NotImplementedError + + @handler("GetVpnConnectionDeviceSampleConfiguration") + def get_vpn_connection_device_sample_configuration( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_connection_device_type_id: VpnConnectionDeviceTypeId, + internet_key_exchange_version: String | None = None, + sample_type: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVpnConnectionDeviceSampleConfigurationResult: + raise NotImplementedError + + @handler("GetVpnConnectionDeviceTypes") + def get_vpn_connection_device_types( + self, + context: RequestContext, + max_results: GVCDMaxResults | None = None, + next_token: NextToken | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVpnConnectionDeviceTypesResult: + raise NotImplementedError + + @handler("GetVpnTunnelReplacementStatus") + def get_vpn_tunnel_replacement_status( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_tunnel_outside_ip_address: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> GetVpnTunnelReplacementStatusResult: + raise NotImplementedError + + @handler("ImportClientVpnClientCertificateRevocationList") + def import_client_vpn_client_certificate_revocation_list( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + certificate_revocation_list: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> ImportClientVpnClientCertificateRevocationListResult: + raise NotImplementedError + + @handler("ImportImage") + def import_image( + self, + context: RequestContext, + architecture: String | None = None, + client_data: ClientData | None = None, + client_token: String | None = None, + description: String | None = None, + disk_containers: ImageDiskContainerList | None = None, + dry_run: Boolean | None = None, + encrypted: Boolean | None = None, + hypervisor: String | None = None, + kms_key_id: KmsKeyId | None = None, + license_type: String | None = None, + platform: String | None = None, + role_name: String | None = None, + license_specifications: ImportImageLicenseSpecificationListRequest | None = None, + tag_specifications: TagSpecificationList | None = None, + usage_operation: String | None = None, + boot_mode: BootModeValues | None = None, + **kwargs, + ) -> ImportImageResult: + raise NotImplementedError + + @handler("ImportInstance") + def import_instance( + self, + context: RequestContext, + platform: PlatformValues, + dry_run: Boolean | None = None, + description: String | None = None, + launch_specification: ImportInstanceLaunchSpecification | None = None, + disk_images: DiskImageList | None = None, + **kwargs, + ) -> ImportInstanceResult: + raise NotImplementedError + + @handler("ImportKeyPair") + def import_key_pair( + self, + context: RequestContext, + key_name: String, + public_key_material: Blob, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ImportKeyPairResult: + raise NotImplementedError + + @handler("ImportSnapshot") + def import_snapshot( + self, + context: RequestContext, + client_data: ClientData | None = None, + client_token: String | None = None, + description: String | None = None, + disk_container: SnapshotDiskContainer | None = None, + dry_run: Boolean | None = None, + encrypted: Boolean | None = None, + kms_key_id: KmsKeyId | None = None, + role_name: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> ImportSnapshotResult: + raise NotImplementedError + + @handler("ImportVolume") + def import_volume( + self, + context: RequestContext, + availability_zone: String, + image: DiskImageDetail, + volume: VolumeDetail, + dry_run: Boolean | None = None, + description: String | None = None, + **kwargs, + ) -> ImportVolumeResult: + raise NotImplementedError + + @handler("ListImagesInRecycleBin") + def list_images_in_recycle_bin( + self, + context: RequestContext, + image_ids: ImageIdStringList | None = None, + next_token: String | None = None, + max_results: ListImagesInRecycleBinMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ListImagesInRecycleBinResult: + raise NotImplementedError + + @handler("ListSnapshotsInRecycleBin") + def list_snapshots_in_recycle_bin( + self, + context: RequestContext, + max_results: ListSnapshotsInRecycleBinMaxResults | None = None, + next_token: String | None = None, + snapshot_ids: SnapshotIdStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ListSnapshotsInRecycleBinResult: + raise NotImplementedError + + @handler("LockSnapshot") + def lock_snapshot( + self, + context: RequestContext, + snapshot_id: SnapshotId, + lock_mode: LockMode, + dry_run: Boolean | None = None, + cool_off_period: CoolOffPeriodRequestHours | None = None, + lock_duration: RetentionPeriodRequestDays | None = None, + expiration_date: MillisecondDateTime | None = None, + **kwargs, + ) -> LockSnapshotResult: + raise NotImplementedError + + @handler("ModifyAddressAttribute") + def modify_address_attribute( + self, + context: RequestContext, + allocation_id: AllocationId, + domain_name: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyAddressAttributeResult: + raise NotImplementedError + + @handler("ModifyAvailabilityZoneGroup") + def modify_availability_zone_group( + self, + context: RequestContext, + group_name: String, + opt_in_status: ModifyAvailabilityZoneOptInStatus, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyAvailabilityZoneGroupResult: + raise NotImplementedError + + @handler("ModifyCapacityReservation") + def modify_capacity_reservation( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + instance_count: Integer | None = None, + end_date: DateTime | None = None, + end_date_type: EndDateType | None = None, + accept: Boolean | None = None, + dry_run: Boolean | None = None, + additional_info: String | None = None, + instance_match_criteria: InstanceMatchCriteria | None = None, + **kwargs, + ) -> ModifyCapacityReservationResult: + raise NotImplementedError + + @handler("ModifyCapacityReservationFleet") + def modify_capacity_reservation_fleet( + self, + context: RequestContext, + capacity_reservation_fleet_id: CapacityReservationFleetId, + total_target_capacity: Integer | None = None, + end_date: MillisecondDateTime | None = None, + dry_run: Boolean | None = None, + remove_end_date: Boolean | None = None, + **kwargs, + ) -> ModifyCapacityReservationFleetResult: + raise NotImplementedError + + @handler("ModifyClientVpnEndpoint") + def modify_client_vpn_endpoint( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + server_certificate_arn: String | None = None, + connection_log_options: ConnectionLogOptions | None = None, + dns_servers: DnsServersOptionsModifyStructure | None = None, + vpn_port: Integer | None = None, + description: String | None = None, + split_tunnel: Boolean | None = None, + dry_run: Boolean | None = None, + security_group_ids: ClientVpnSecurityGroupIdSet | None = None, + vpc_id: VpcId | None = None, + self_service_portal: SelfServicePortal | None = None, + client_connect_options: ClientConnectOptions | None = None, + session_timeout_hours: Integer | None = None, + client_login_banner_options: ClientLoginBannerOptions | None = None, + client_route_enforcement_options: ClientRouteEnforcementOptions | None = None, + disconnect_on_session_timeout: Boolean | None = None, + **kwargs, + ) -> ModifyClientVpnEndpointResult: + raise NotImplementedError + + @handler("ModifyDefaultCreditSpecification") + def modify_default_credit_specification( + self, + context: RequestContext, + instance_family: UnlimitedSupportedInstanceFamily, + cpu_credits: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyDefaultCreditSpecificationResult: + raise NotImplementedError + + @handler("ModifyEbsDefaultKmsKeyId") + def modify_ebs_default_kms_key_id( + self, + context: RequestContext, + kms_key_id: KmsKeyId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyEbsDefaultKmsKeyIdResult: + raise NotImplementedError + + @handler("ModifyFleet", expand=False) + def modify_fleet( + self, context: RequestContext, request: ModifyFleetRequest, **kwargs + ) -> ModifyFleetResult: + raise NotImplementedError + + @handler("ModifyFpgaImageAttribute") + def modify_fpga_image_attribute( + self, + context: RequestContext, + fpga_image_id: FpgaImageId, + dry_run: Boolean | None = None, + attribute: FpgaImageAttributeName | None = None, + operation_type: OperationType | None = None, + user_ids: UserIdStringList | None = None, + user_groups: UserGroupStringList | None = None, + product_codes: ProductCodeStringList | None = None, + load_permission: LoadPermissionModifications | None = None, + description: String | None = None, + name: String | None = None, + **kwargs, + ) -> ModifyFpgaImageAttributeResult: + raise NotImplementedError + + @handler("ModifyHosts") + def modify_hosts( + self, + context: RequestContext, + host_ids: RequestHostIdList, + host_recovery: HostRecovery | None = None, + instance_type: String | None = None, + instance_family: String | None = None, + host_maintenance: HostMaintenance | None = None, + auto_placement: AutoPlacement | None = None, + **kwargs, + ) -> ModifyHostsResult: + raise NotImplementedError + + @handler("ModifyIdFormat") + def modify_id_format( + self, context: RequestContext, resource: String, use_long_ids: Boolean, **kwargs + ) -> None: + raise NotImplementedError + + @handler("ModifyIdentityIdFormat") + def modify_identity_id_format( + self, + context: RequestContext, + resource: String, + use_long_ids: Boolean, + principal_arn: String, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyImageAttribute") + def modify_image_attribute( + self, + context: RequestContext, + image_id: ImageId, + attribute: String | None = None, + description: AttributeValue | None = None, + launch_permission: LaunchPermissionModifications | None = None, + operation_type: OperationType | None = None, + product_codes: ProductCodeStringList | None = None, + user_groups: UserGroupStringList | None = None, + user_ids: UserIdStringList | None = None, + value: String | None = None, + organization_arns: OrganizationArnStringList | None = None, + organizational_unit_arns: OrganizationalUnitArnStringList | None = None, + imds_support: AttributeValue | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyInstanceAttribute") + def modify_instance_attribute( + self, + context: RequestContext, + instance_id: InstanceId, + source_dest_check: AttributeBooleanValue | None = None, + disable_api_stop: AttributeBooleanValue | None = None, + dry_run: Boolean | None = None, + attribute: InstanceAttributeName | None = None, + value: String | None = None, + block_device_mappings: InstanceBlockDeviceMappingSpecificationList | None = None, + disable_api_termination: AttributeBooleanValue | None = None, + instance_type: AttributeValue | None = None, + kernel: AttributeValue | None = None, + ramdisk: AttributeValue | None = None, + user_data: BlobAttributeValue | None = None, + instance_initiated_shutdown_behavior: AttributeValue | None = None, + groups: GroupIdStringList | None = None, + ebs_optimized: AttributeBooleanValue | None = None, + sriov_net_support: AttributeValue | None = None, + ena_support: AttributeBooleanValue | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyInstanceCapacityReservationAttributes") + def modify_instance_capacity_reservation_attributes( + self, + context: RequestContext, + instance_id: InstanceId, + capacity_reservation_specification: CapacityReservationSpecification, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceCapacityReservationAttributesResult: + raise NotImplementedError + + @handler("ModifyInstanceCpuOptions") + def modify_instance_cpu_options( + self, + context: RequestContext, + instance_id: InstanceId, + core_count: Integer, + threads_per_core: Integer, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceCpuOptionsResult: + raise NotImplementedError + + @handler("ModifyInstanceCreditSpecification") + def modify_instance_credit_specification( + self, + context: RequestContext, + instance_credit_specifications: InstanceCreditSpecificationListRequest, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> ModifyInstanceCreditSpecificationResult: + raise NotImplementedError + + @handler("ModifyInstanceEventStartTime") + def modify_instance_event_start_time( + self, + context: RequestContext, + instance_id: InstanceId, + instance_event_id: String, + not_before: DateTime, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceEventStartTimeResult: + raise NotImplementedError + + @handler("ModifyInstanceEventWindow") + def modify_instance_event_window( + self, + context: RequestContext, + instance_event_window_id: InstanceEventWindowId, + dry_run: Boolean | None = None, + name: String | None = None, + time_ranges: InstanceEventWindowTimeRangeRequestSet | None = None, + cron_expression: InstanceEventWindowCronExpression | None = None, + **kwargs, + ) -> ModifyInstanceEventWindowResult: + raise NotImplementedError + + @handler("ModifyInstanceMaintenanceOptions") + def modify_instance_maintenance_options( + self, + context: RequestContext, + instance_id: InstanceId, + auto_recovery: InstanceAutoRecoveryState | None = None, + reboot_migration: InstanceRebootMigrationState | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceMaintenanceOptionsResult: + raise NotImplementedError + + @handler("ModifyInstanceMetadataDefaults") + def modify_instance_metadata_defaults( + self, + context: RequestContext, + http_tokens: MetadataDefaultHttpTokensState | None = None, + http_put_response_hop_limit: BoxedInteger | None = None, + http_endpoint: DefaultInstanceMetadataEndpointState | None = None, + instance_metadata_tags: DefaultInstanceMetadataTagsState | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceMetadataDefaultsResult: + raise NotImplementedError + + @handler("ModifyInstanceMetadataOptions") + def modify_instance_metadata_options( + self, + context: RequestContext, + instance_id: InstanceId, + http_tokens: HttpTokensState | None = None, + http_put_response_hop_limit: Integer | None = None, + http_endpoint: InstanceMetadataEndpointState | None = None, + dry_run: Boolean | None = None, + http_protocol_ipv6: InstanceMetadataProtocolState | None = None, + instance_metadata_tags: InstanceMetadataTagsState | None = None, + **kwargs, + ) -> ModifyInstanceMetadataOptionsResult: + raise NotImplementedError + + @handler("ModifyInstanceNetworkPerformanceOptions") + def modify_instance_network_performance_options( + self, + context: RequestContext, + instance_id: InstanceId, + bandwidth_weighting: InstanceBandwidthWeighting, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyInstanceNetworkPerformanceResult: + raise NotImplementedError + + @handler("ModifyInstancePlacement") + def modify_instance_placement( + self, + context: RequestContext, + instance_id: InstanceId, + group_name: PlacementGroupName | None = None, + partition_number: Integer | None = None, + host_resource_group_arn: String | None = None, + group_id: PlacementGroupId | None = None, + tenancy: HostTenancy | None = None, + affinity: Affinity | None = None, + host_id: DedicatedHostId | None = None, + **kwargs, + ) -> ModifyInstancePlacementResult: + raise NotImplementedError + + @handler("ModifyIpam") + def modify_ipam( + self, + context: RequestContext, + ipam_id: IpamId, + dry_run: Boolean | None = None, + description: String | None = None, + add_operating_regions: AddIpamOperatingRegionSet | None = None, + remove_operating_regions: RemoveIpamOperatingRegionSet | None = None, + tier: IpamTier | None = None, + enable_private_gua: Boolean | None = None, + metered_account: IpamMeteredAccount | None = None, + **kwargs, + ) -> ModifyIpamResult: + raise NotImplementedError + + @handler("ModifyIpamPool") + def modify_ipam_pool( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + description: String | None = None, + auto_import: Boolean | None = None, + allocation_min_netmask_length: IpamNetmaskLength | None = None, + allocation_max_netmask_length: IpamNetmaskLength | None = None, + allocation_default_netmask_length: IpamNetmaskLength | None = None, + clear_allocation_default_netmask_length: Boolean | None = None, + add_allocation_resource_tags: RequestIpamResourceTagList | None = None, + remove_allocation_resource_tags: RequestIpamResourceTagList | None = None, + **kwargs, + ) -> ModifyIpamPoolResult: + raise NotImplementedError + + @handler("ModifyIpamResourceCidr") + def modify_ipam_resource_cidr( + self, + context: RequestContext, + resource_id: String, + resource_cidr: String, + resource_region: String, + current_ipam_scope_id: IpamScopeId, + monitored: Boolean, + dry_run: Boolean | None = None, + destination_ipam_scope_id: IpamScopeId | None = None, + **kwargs, + ) -> ModifyIpamResourceCidrResult: + raise NotImplementedError + + @handler("ModifyIpamResourceDiscovery") + def modify_ipam_resource_discovery( + self, + context: RequestContext, + ipam_resource_discovery_id: IpamResourceDiscoveryId, + dry_run: Boolean | None = None, + description: String | None = None, + add_operating_regions: AddIpamOperatingRegionSet | None = None, + remove_operating_regions: RemoveIpamOperatingRegionSet | None = None, + add_organizational_unit_exclusions: AddIpamOrganizationalUnitExclusionSet | None = None, + remove_organizational_unit_exclusions: RemoveIpamOrganizationalUnitExclusionSet + | None = None, + **kwargs, + ) -> ModifyIpamResourceDiscoveryResult: + raise NotImplementedError + + @handler("ModifyIpamScope") + def modify_ipam_scope( + self, + context: RequestContext, + ipam_scope_id: IpamScopeId, + dry_run: Boolean | None = None, + description: String | None = None, + **kwargs, + ) -> ModifyIpamScopeResult: + raise NotImplementedError + + @handler("ModifyLaunchTemplate") + def modify_launch_template( + self, + context: RequestContext, + dry_run: Boolean | None = None, + client_token: String | None = None, + launch_template_id: LaunchTemplateId | None = None, + launch_template_name: LaunchTemplateName | None = None, + default_version: String | None = None, + **kwargs, + ) -> ModifyLaunchTemplateResult: + raise NotImplementedError + + @handler("ModifyLocalGatewayRoute") + def modify_local_gateway_route( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + destination_cidr_block: String | None = None, + local_gateway_virtual_interface_group_id: LocalGatewayVirtualInterfaceGroupId | None = None, + network_interface_id: NetworkInterfaceId | None = None, + dry_run: Boolean | None = None, + destination_prefix_list_id: PrefixListResourceId | None = None, + **kwargs, + ) -> ModifyLocalGatewayRouteResult: + raise NotImplementedError + + @handler("ModifyManagedPrefixList") + def modify_managed_prefix_list( + self, + context: RequestContext, + prefix_list_id: PrefixListResourceId, + dry_run: Boolean | None = None, + current_version: Long | None = None, + prefix_list_name: String | None = None, + add_entries: AddPrefixListEntries | None = None, + remove_entries: RemovePrefixListEntries | None = None, + max_entries: Integer | None = None, + **kwargs, + ) -> ModifyManagedPrefixListResult: + raise NotImplementedError + + @handler("ModifyNetworkInterfaceAttribute") + def modify_network_interface_attribute( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + ena_srd_specification: EnaSrdSpecification | None = None, + enable_primary_ipv6: Boolean | None = None, + connection_tracking_specification: ConnectionTrackingSpecificationRequest | None = None, + associate_public_ip_address: Boolean | None = None, + dry_run: Boolean | None = None, + description: AttributeValue | None = None, + source_dest_check: AttributeBooleanValue | None = None, + groups: SecurityGroupIdStringList | None = None, + attachment: NetworkInterfaceAttachmentChanges | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyPrivateDnsNameOptions") + def modify_private_dns_name_options( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + private_dns_hostname_type: HostnameType | None = None, + enable_resource_name_dns_a_record: Boolean | None = None, + enable_resource_name_dns_aaaa_record: Boolean | None = None, + **kwargs, + ) -> ModifyPrivateDnsNameOptionsResult: + raise NotImplementedError + + @handler("ModifyPublicIpDnsNameOptions") + def modify_public_ip_dns_name_options( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + hostname_type: PublicIpDnsOption, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyPublicIpDnsNameOptionsResult: + raise NotImplementedError + + @handler("ModifyReservedInstances") + def modify_reserved_instances( + self, + context: RequestContext, + reserved_instances_ids: ReservedInstancesIdStringList, + target_configurations: ReservedInstancesConfigurationList, + client_token: String | None = None, + **kwargs, + ) -> ModifyReservedInstancesResult: + raise NotImplementedError + + @handler("ModifyRouteServer") + def modify_route_server( + self, + context: RequestContext, + route_server_id: RouteServerId, + persist_routes: RouteServerPersistRoutesAction | None = None, + persist_routes_duration: BoxedLong | None = None, + sns_notifications_enabled: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyRouteServerResult: + raise NotImplementedError + + @handler("ModifySecurityGroupRules") + def modify_security_group_rules( + self, + context: RequestContext, + group_id: SecurityGroupId, + security_group_rules: SecurityGroupRuleUpdateList, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifySecurityGroupRulesResult: + raise NotImplementedError + + @handler("ModifySnapshotAttribute") + def modify_snapshot_attribute( + self, + context: RequestContext, + snapshot_id: SnapshotId, + attribute: SnapshotAttributeName | None = None, + create_volume_permission: CreateVolumePermissionModifications | None = None, + group_names: GroupNameStringList | None = None, + operation_type: OperationType | None = None, + user_ids: UserIdStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifySnapshotTier") + def modify_snapshot_tier( + self, + context: RequestContext, + snapshot_id: SnapshotId, + storage_tier: TargetStorageTier | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifySnapshotTierResult: + raise NotImplementedError + + @handler("ModifySpotFleetRequest", expand=False) + def modify_spot_fleet_request( + self, context: RequestContext, request: ModifySpotFleetRequestRequest, **kwargs + ) -> ModifySpotFleetRequestResponse: + raise NotImplementedError + + @handler("ModifySubnetAttribute") + def modify_subnet_attribute( + self, + context: RequestContext, + subnet_id: SubnetId, + assign_ipv6_address_on_creation: AttributeBooleanValue | None = None, + map_public_ip_on_launch: AttributeBooleanValue | None = None, + map_customer_owned_ip_on_launch: AttributeBooleanValue | None = None, + customer_owned_ipv4_pool: CoipPoolId | None = None, + enable_dns64: AttributeBooleanValue | None = None, + private_dns_hostname_type_on_launch: HostnameType | None = None, + enable_resource_name_dns_a_record_on_launch: AttributeBooleanValue | None = None, + enable_resource_name_dns_aaaa_record_on_launch: AttributeBooleanValue | None = None, + enable_lni_at_device_index: Integer | None = None, + disable_lni_at_device_index: AttributeBooleanValue | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyTrafficMirrorFilterNetworkServices") + def modify_traffic_mirror_filter_network_services( + self, + context: RequestContext, + traffic_mirror_filter_id: TrafficMirrorFilterId, + add_network_services: TrafficMirrorNetworkServiceList | None = None, + remove_network_services: TrafficMirrorNetworkServiceList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTrafficMirrorFilterNetworkServicesResult: + raise NotImplementedError + + @handler("ModifyTrafficMirrorFilterRule") + def modify_traffic_mirror_filter_rule( + self, + context: RequestContext, + traffic_mirror_filter_rule_id: TrafficMirrorFilterRuleIdWithResolver, + traffic_direction: TrafficDirection | None = None, + rule_number: Integer | None = None, + rule_action: TrafficMirrorRuleAction | None = None, + destination_port_range: TrafficMirrorPortRangeRequest | None = None, + source_port_range: TrafficMirrorPortRangeRequest | None = None, + protocol: Integer | None = None, + destination_cidr_block: String | None = None, + source_cidr_block: String | None = None, + description: String | None = None, + remove_fields: TrafficMirrorFilterRuleFieldList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTrafficMirrorFilterRuleResult: + raise NotImplementedError + + @handler("ModifyTrafficMirrorSession") + def modify_traffic_mirror_session( + self, + context: RequestContext, + traffic_mirror_session_id: TrafficMirrorSessionId, + traffic_mirror_target_id: TrafficMirrorTargetId | None = None, + traffic_mirror_filter_id: TrafficMirrorFilterId | None = None, + packet_length: Integer | None = None, + session_number: Integer | None = None, + virtual_network_id: Integer | None = None, + description: String | None = None, + remove_fields: TrafficMirrorSessionFieldList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTrafficMirrorSessionResult: + raise NotImplementedError + + @handler("ModifyTransitGateway") + def modify_transit_gateway( + self, + context: RequestContext, + transit_gateway_id: TransitGatewayId, + description: String | None = None, + options: ModifyTransitGatewayOptions | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTransitGatewayResult: + raise NotImplementedError + + @handler("ModifyTransitGatewayPrefixListReference") + def modify_transit_gateway_prefix_list_reference( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + prefix_list_id: PrefixListResourceId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + blackhole: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTransitGatewayPrefixListReferenceResult: + raise NotImplementedError + + @handler("ModifyTransitGatewayVpcAttachment") + def modify_transit_gateway_vpc_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + add_subnet_ids: TransitGatewaySubnetIdList | None = None, + remove_subnet_ids: TransitGatewaySubnetIdList | None = None, + options: ModifyTransitGatewayVpcAttachmentRequestOptions | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyTransitGatewayVpcAttachmentResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessEndpoint") + def modify_verified_access_endpoint( + self, + context: RequestContext, + verified_access_endpoint_id: VerifiedAccessEndpointId, + verified_access_group_id: VerifiedAccessGroupId | None = None, + load_balancer_options: ModifyVerifiedAccessEndpointLoadBalancerOptions | None = None, + network_interface_options: ModifyVerifiedAccessEndpointEniOptions | None = None, + description: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + rds_options: ModifyVerifiedAccessEndpointRdsOptions | None = None, + cidr_options: ModifyVerifiedAccessEndpointCidrOptions | None = None, + **kwargs, + ) -> ModifyVerifiedAccessEndpointResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessEndpointPolicy") + def modify_verified_access_endpoint_policy( + self, + context: RequestContext, + verified_access_endpoint_id: VerifiedAccessEndpointId, + policy_enabled: Boolean | None = None, + policy_document: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + **kwargs, + ) -> ModifyVerifiedAccessEndpointPolicyResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessGroup") + def modify_verified_access_group( + self, + context: RequestContext, + verified_access_group_id: VerifiedAccessGroupId, + verified_access_instance_id: VerifiedAccessInstanceId | None = None, + description: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVerifiedAccessGroupResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessGroupPolicy") + def modify_verified_access_group_policy( + self, + context: RequestContext, + verified_access_group_id: VerifiedAccessGroupId, + policy_enabled: Boolean | None = None, + policy_document: String | None = None, + client_token: String | None = None, + dry_run: Boolean | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + **kwargs, + ) -> ModifyVerifiedAccessGroupPolicyResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessInstance") + def modify_verified_access_instance( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + description: String | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + cidr_endpoints_custom_sub_domain: String | None = None, + **kwargs, + ) -> ModifyVerifiedAccessInstanceResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessInstanceLoggingConfiguration") + def modify_verified_access_instance_logging_configuration( + self, + context: RequestContext, + verified_access_instance_id: VerifiedAccessInstanceId, + access_logs: VerifiedAccessLogOptions, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> ModifyVerifiedAccessInstanceLoggingConfigurationResult: + raise NotImplementedError + + @handler("ModifyVerifiedAccessTrustProvider") + def modify_verified_access_trust_provider( + self, + context: RequestContext, + verified_access_trust_provider_id: VerifiedAccessTrustProviderId, + oidc_options: ModifyVerifiedAccessTrustProviderOidcOptions | None = None, + device_options: ModifyVerifiedAccessTrustProviderDeviceOptions | None = None, + description: String | None = None, + dry_run: Boolean | None = None, + client_token: String | None = None, + sse_specification: VerifiedAccessSseSpecificationRequest | None = None, + native_application_oidc_options: ModifyVerifiedAccessNativeApplicationOidcOptions + | None = None, + **kwargs, + ) -> ModifyVerifiedAccessTrustProviderResult: + raise NotImplementedError + + @handler("ModifyVolume") + def modify_volume( + self, + context: RequestContext, + volume_id: VolumeId, + dry_run: Boolean | None = None, + size: Integer | None = None, + volume_type: VolumeType | None = None, + iops: Integer | None = None, + throughput: Integer | None = None, + multi_attach_enabled: Boolean | None = None, + **kwargs, + ) -> ModifyVolumeResult: + raise NotImplementedError + + @handler("ModifyVolumeAttribute") + def modify_volume_attribute( + self, + context: RequestContext, + volume_id: VolumeId, + auto_enable_io: AttributeBooleanValue | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyVpcAttribute") + def modify_vpc_attribute( + self, + context: RequestContext, + vpc_id: VpcId, + enable_dns_hostnames: AttributeBooleanValue | None = None, + enable_dns_support: AttributeBooleanValue | None = None, + enable_network_address_usage_metrics: AttributeBooleanValue | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyVpcBlockPublicAccessExclusion") + def modify_vpc_block_public_access_exclusion( + self, + context: RequestContext, + exclusion_id: VpcBlockPublicAccessExclusionId, + internet_gateway_exclusion_mode: InternetGatewayExclusionMode, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpcBlockPublicAccessExclusionResult: + raise NotImplementedError + + @handler("ModifyVpcBlockPublicAccessOptions") + def modify_vpc_block_public_access_options( + self, + context: RequestContext, + internet_gateway_block_mode: InternetGatewayBlockMode, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpcBlockPublicAccessOptionsResult: + raise NotImplementedError + + @handler("ModifyVpcEndpoint") + def modify_vpc_endpoint( + self, + context: RequestContext, + vpc_endpoint_id: VpcEndpointId, + dry_run: Boolean | None = None, + reset_policy: Boolean | None = None, + policy_document: String | None = None, + add_route_table_ids: VpcEndpointRouteTableIdList | None = None, + remove_route_table_ids: VpcEndpointRouteTableIdList | None = None, + add_subnet_ids: VpcEndpointSubnetIdList | None = None, + remove_subnet_ids: VpcEndpointSubnetIdList | None = None, + add_security_group_ids: VpcEndpointSecurityGroupIdList | None = None, + remove_security_group_ids: VpcEndpointSecurityGroupIdList | None = None, + ip_address_type: IpAddressType | None = None, + dns_options: DnsOptionsSpecification | None = None, + private_dns_enabled: Boolean | None = None, + subnet_configurations: SubnetConfigurationsList | None = None, + **kwargs, + ) -> ModifyVpcEndpointResult: + raise NotImplementedError + + @handler("ModifyVpcEndpointConnectionNotification") + def modify_vpc_endpoint_connection_notification( + self, + context: RequestContext, + connection_notification_id: ConnectionNotificationId, + dry_run: Boolean | None = None, + connection_notification_arn: String | None = None, + connection_events: ValueStringList | None = None, + **kwargs, + ) -> ModifyVpcEndpointConnectionNotificationResult: + raise NotImplementedError + + @handler("ModifyVpcEndpointServiceConfiguration") + def modify_vpc_endpoint_service_configuration( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + dry_run: Boolean | None = None, + private_dns_name: String | None = None, + remove_private_dns_name: Boolean | None = None, + acceptance_required: Boolean | None = None, + add_network_load_balancer_arns: ValueStringList | None = None, + remove_network_load_balancer_arns: ValueStringList | None = None, + add_gateway_load_balancer_arns: ValueStringList | None = None, + remove_gateway_load_balancer_arns: ValueStringList | None = None, + add_supported_ip_address_types: ValueStringList | None = None, + remove_supported_ip_address_types: ValueStringList | None = None, + add_supported_regions: ValueStringList | None = None, + remove_supported_regions: ValueStringList | None = None, + **kwargs, + ) -> ModifyVpcEndpointServiceConfigurationResult: + raise NotImplementedError + + @handler("ModifyVpcEndpointServicePayerResponsibility") + def modify_vpc_endpoint_service_payer_responsibility( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + payer_responsibility: PayerResponsibility, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpcEndpointServicePayerResponsibilityResult: + raise NotImplementedError + + @handler("ModifyVpcEndpointServicePermissions") + def modify_vpc_endpoint_service_permissions( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + dry_run: Boolean | None = None, + add_allowed_principals: ValueStringList | None = None, + remove_allowed_principals: ValueStringList | None = None, + **kwargs, + ) -> ModifyVpcEndpointServicePermissionsResult: + raise NotImplementedError + + @handler("ModifyVpcPeeringConnectionOptions") + def modify_vpc_peering_connection_options( + self, + context: RequestContext, + vpc_peering_connection_id: VpcPeeringConnectionId, + accepter_peering_connection_options: PeeringConnectionOptionsRequest | None = None, + dry_run: Boolean | None = None, + requester_peering_connection_options: PeeringConnectionOptionsRequest | None = None, + **kwargs, + ) -> ModifyVpcPeeringConnectionOptionsResult: + raise NotImplementedError + + @handler("ModifyVpcTenancy") + def modify_vpc_tenancy( + self, + context: RequestContext, + vpc_id: VpcId, + instance_tenancy: VpcTenancy, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpcTenancyResult: + raise NotImplementedError + + @handler("ModifyVpnConnection") + def modify_vpn_connection( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + transit_gateway_id: TransitGatewayId | None = None, + customer_gateway_id: CustomerGatewayId | None = None, + vpn_gateway_id: VpnGatewayId | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpnConnectionResult: + raise NotImplementedError + + @handler("ModifyVpnConnectionOptions") + def modify_vpn_connection_options( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + local_ipv4_network_cidr: String | None = None, + remote_ipv4_network_cidr: String | None = None, + local_ipv6_network_cidr: String | None = None, + remote_ipv6_network_cidr: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpnConnectionOptionsResult: + raise NotImplementedError + + @handler("ModifyVpnTunnelCertificate") + def modify_vpn_tunnel_certificate( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_tunnel_outside_ip_address: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> ModifyVpnTunnelCertificateResult: + raise NotImplementedError + + @handler("ModifyVpnTunnelOptions") + def modify_vpn_tunnel_options( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_tunnel_outside_ip_address: String, + tunnel_options: ModifyVpnTunnelOptionsSpecification, + dry_run: Boolean | None = None, + skip_tunnel_replacement: Boolean | None = None, + pre_shared_key_storage: String | None = None, + **kwargs, + ) -> ModifyVpnTunnelOptionsResult: + raise NotImplementedError + + @handler("MonitorInstances") + def monitor_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> MonitorInstancesResult: + raise NotImplementedError + + @handler("MoveAddressToVpc") + def move_address_to_vpc( + self, context: RequestContext, public_ip: String, dry_run: Boolean | None = None, **kwargs + ) -> MoveAddressToVpcResult: + raise NotImplementedError + + @handler("MoveByoipCidrToIpam") + def move_byoip_cidr_to_ipam( + self, + context: RequestContext, + cidr: String, + ipam_pool_id: IpamPoolId, + ipam_pool_owner: String, + dry_run: Boolean | None = None, + **kwargs, + ) -> MoveByoipCidrToIpamResult: + raise NotImplementedError + + @handler("MoveCapacityReservationInstances") + def move_capacity_reservation_instances( + self, + context: RequestContext, + source_capacity_reservation_id: CapacityReservationId, + destination_capacity_reservation_id: CapacityReservationId, + instance_count: Integer, + dry_run: Boolean | None = None, + client_token: String | None = None, + **kwargs, + ) -> MoveCapacityReservationInstancesResult: + raise NotImplementedError + + @handler("ProvisionByoipCidr") + def provision_byoip_cidr( + self, + context: RequestContext, + cidr: String, + cidr_authorization_context: CidrAuthorizationContext | None = None, + publicly_advertisable: Boolean | None = None, + description: String | None = None, + dry_run: Boolean | None = None, + pool_tag_specifications: TagSpecificationList | None = None, + multi_region: Boolean | None = None, + network_border_group: String | None = None, + **kwargs, + ) -> ProvisionByoipCidrResult: + raise NotImplementedError + + @handler("ProvisionIpamByoasn") + def provision_ipam_byoasn( + self, + context: RequestContext, + ipam_id: IpamId, + asn: String, + asn_authorization_context: AsnAuthorizationContext, + dry_run: Boolean | None = None, + **kwargs, + ) -> ProvisionIpamByoasnResult: + raise NotImplementedError + + @handler("ProvisionIpamPoolCidr") + def provision_ipam_pool_cidr( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + dry_run: Boolean | None = None, + cidr: String | None = None, + cidr_authorization_context: IpamCidrAuthorizationContext | None = None, + netmask_length: Integer | None = None, + client_token: String | None = None, + verification_method: VerificationMethod | None = None, + ipam_external_resource_verification_token_id: IpamExternalResourceVerificationTokenId + | None = None, + **kwargs, + ) -> ProvisionIpamPoolCidrResult: + raise NotImplementedError + + @handler("ProvisionPublicIpv4PoolCidr") + def provision_public_ipv4_pool_cidr( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + pool_id: Ipv4PoolEc2Id, + netmask_length: Integer, + dry_run: Boolean | None = None, + network_border_group: String | None = None, + **kwargs, + ) -> ProvisionPublicIpv4PoolCidrResult: + raise NotImplementedError + + @handler("PurchaseCapacityBlock") + def purchase_capacity_block( + self, + context: RequestContext, + capacity_block_offering_id: OfferingId, + instance_platform: CapacityReservationInstancePlatform, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> PurchaseCapacityBlockResult: + raise NotImplementedError + + @handler("PurchaseCapacityBlockExtension") + def purchase_capacity_block_extension( + self, + context: RequestContext, + capacity_block_extension_offering_id: OfferingId, + capacity_reservation_id: CapacityReservationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> PurchaseCapacityBlockExtensionResult: + raise NotImplementedError + + @handler("PurchaseHostReservation") + def purchase_host_reservation( + self, + context: RequestContext, + host_id_set: RequestHostIdSet, + offering_id: OfferingId, + client_token: String | None = None, + currency_code: CurrencyCodeValues | None = None, + limit_price: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> PurchaseHostReservationResult: + raise NotImplementedError + + @handler("PurchaseReservedInstancesOffering") + def purchase_reserved_instances_offering( + self, + context: RequestContext, + instance_count: Integer, + reserved_instances_offering_id: ReservedInstancesOfferingId, + purchase_time: DateTime | None = None, + dry_run: Boolean | None = None, + limit_price: ReservedInstanceLimitPrice | None = None, + **kwargs, + ) -> PurchaseReservedInstancesOfferingResult: + raise NotImplementedError + + @handler("PurchaseScheduledInstances") + def purchase_scheduled_instances( + self, + context: RequestContext, + purchase_requests: PurchaseRequestSet, + client_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> PurchaseScheduledInstancesResult: + raise NotImplementedError + + @handler("RebootInstances") + def reboot_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RegisterImage") + def register_image( + self, + context: RequestContext, + name: String, + image_location: String | None = None, + billing_products: BillingProductList | None = None, + boot_mode: BootModeValues | None = None, + tpm_support: TpmSupportValues | None = None, + uefi_data: StringType | None = None, + imds_support: ImdsSupportValues | None = None, + tag_specifications: TagSpecificationList | None = None, + dry_run: Boolean | None = None, + description: String | None = None, + architecture: ArchitectureValues | None = None, + kernel_id: KernelId | None = None, + ramdisk_id: RamdiskId | None = None, + root_device_name: String | None = None, + block_device_mappings: BlockDeviceMappingRequestList | None = None, + virtualization_type: String | None = None, + sriov_net_support: String | None = None, + ena_support: Boolean | None = None, + **kwargs, + ) -> RegisterImageResult: + raise NotImplementedError + + @handler("RegisterInstanceEventNotificationAttributes") + def register_instance_event_notification_attributes( + self, + context: RequestContext, + instance_tag_attribute: RegisterInstanceTagAttributeRequest, + dry_run: Boolean | None = None, + **kwargs, + ) -> RegisterInstanceEventNotificationAttributesResult: + raise NotImplementedError + + @handler("RegisterTransitGatewayMulticastGroupMembers") + def register_transit_gateway_multicast_group_members( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + network_interface_ids: TransitGatewayNetworkInterfaceIdList, + group_ip_address: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RegisterTransitGatewayMulticastGroupMembersResult: + raise NotImplementedError + + @handler("RegisterTransitGatewayMulticastGroupSources") + def register_transit_gateway_multicast_group_sources( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + network_interface_ids: TransitGatewayNetworkInterfaceIdList, + group_ip_address: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RegisterTransitGatewayMulticastGroupSourcesResult: + raise NotImplementedError + + @handler("RejectCapacityReservationBillingOwnership") + def reject_capacity_reservation_billing_ownership( + self, + context: RequestContext, + capacity_reservation_id: CapacityReservationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectCapacityReservationBillingOwnershipResult: + raise NotImplementedError + + @handler("RejectTransitGatewayMulticastDomainAssociations") + def reject_transit_gateway_multicast_domain_associations( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId | None = None, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + subnet_ids: ValueStringList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectTransitGatewayMulticastDomainAssociationsResult: + raise NotImplementedError + + @handler("RejectTransitGatewayPeeringAttachment") + def reject_transit_gateway_peering_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectTransitGatewayPeeringAttachmentResult: + raise NotImplementedError + + @handler("RejectTransitGatewayVpcAttachment") + def reject_transit_gateway_vpc_attachment( + self, + context: RequestContext, + transit_gateway_attachment_id: TransitGatewayAttachmentId, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectTransitGatewayVpcAttachmentResult: + raise NotImplementedError + + @handler("RejectVpcEndpointConnections") + def reject_vpc_endpoint_connections( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + vpc_endpoint_ids: VpcEndpointIdList, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectVpcEndpointConnectionsResult: + raise NotImplementedError + + @handler("RejectVpcPeeringConnection") + def reject_vpc_peering_connection( + self, + context: RequestContext, + vpc_peering_connection_id: VpcPeeringConnectionId, + dry_run: Boolean | None = None, + **kwargs, + ) -> RejectVpcPeeringConnectionResult: + raise NotImplementedError + + @handler("ReleaseAddress") + def release_address( + self, + context: RequestContext, + allocation_id: AllocationId | None = None, + public_ip: String | None = None, + network_border_group: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ReleaseHosts") + def release_hosts( + self, context: RequestContext, host_ids: RequestHostIdList, **kwargs + ) -> ReleaseHostsResult: + raise NotImplementedError + + @handler("ReleaseIpamPoolAllocation") + def release_ipam_pool_allocation( + self, + context: RequestContext, + ipam_pool_id: IpamPoolId, + cidr: String, + ipam_pool_allocation_id: IpamPoolAllocationId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReleaseIpamPoolAllocationResult: + raise NotImplementedError + + @handler("ReplaceIamInstanceProfileAssociation") + def replace_iam_instance_profile_association( + self, + context: RequestContext, + iam_instance_profile: IamInstanceProfileSpecification, + association_id: IamInstanceProfileAssociationId, + **kwargs, + ) -> ReplaceIamInstanceProfileAssociationResult: + raise NotImplementedError + + @handler("ReplaceImageCriteriaInAllowedImagesSettings") + def replace_image_criteria_in_allowed_images_settings( + self, + context: RequestContext, + image_criteria: ImageCriterionRequestList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReplaceImageCriteriaInAllowedImagesSettingsResult: + raise NotImplementedError + + @handler("ReplaceNetworkAclAssociation") + def replace_network_acl_association( + self, + context: RequestContext, + association_id: NetworkAclAssociationId, + network_acl_id: NetworkAclId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReplaceNetworkAclAssociationResult: + raise NotImplementedError + + @handler("ReplaceNetworkAclEntry") + def replace_network_acl_entry( + self, + context: RequestContext, + network_acl_id: NetworkAclId, + rule_number: Integer, + protocol: String, + rule_action: RuleAction, + egress: Boolean, + dry_run: Boolean | None = None, + cidr_block: String | None = None, + ipv6_cidr_block: String | None = None, + icmp_type_code: IcmpTypeCode | None = None, + port_range: PortRange | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ReplaceRoute") + def replace_route( + self, + context: RequestContext, + route_table_id: RouteTableId, + destination_prefix_list_id: PrefixListResourceId | None = None, + vpc_endpoint_id: VpcEndpointId | None = None, + local_target: Boolean | None = None, + transit_gateway_id: TransitGatewayId | None = None, + local_gateway_id: LocalGatewayId | None = None, + carrier_gateway_id: CarrierGatewayId | None = None, + core_network_arn: CoreNetworkArn | None = None, + dry_run: Boolean | None = None, + destination_cidr_block: String | None = None, + gateway_id: RouteGatewayId | None = None, + destination_ipv6_cidr_block: String | None = None, + egress_only_internet_gateway_id: EgressOnlyInternetGatewayId | None = None, + instance_id: InstanceId | None = None, + network_interface_id: NetworkInterfaceId | None = None, + vpc_peering_connection_id: VpcPeeringConnectionId | None = None, + nat_gateway_id: NatGatewayId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ReplaceRouteTableAssociation") + def replace_route_table_association( + self, + context: RequestContext, + association_id: RouteTableAssociationId, + route_table_id: RouteTableId, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReplaceRouteTableAssociationResult: + raise NotImplementedError + + @handler("ReplaceTransitGatewayRoute") + def replace_transit_gateway_route( + self, + context: RequestContext, + destination_cidr_block: String, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + transit_gateway_attachment_id: TransitGatewayAttachmentId | None = None, + blackhole: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReplaceTransitGatewayRouteResult: + raise NotImplementedError + + @handler("ReplaceVpnTunnel") + def replace_vpn_tunnel( + self, + context: RequestContext, + vpn_connection_id: VpnConnectionId, + vpn_tunnel_outside_ip_address: String, + apply_pending_maintenance: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> ReplaceVpnTunnelResult: + raise NotImplementedError + + @handler("ReportInstanceStatus") + def report_instance_status( + self, + context: RequestContext, + instances: InstanceIdStringList, + status: ReportStatusType, + reason_codes: ReasonCodesList, + dry_run: Boolean | None = None, + start_time: DateTime | None = None, + end_time: DateTime | None = None, + description: ReportInstanceStatusRequestDescription | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RequestSpotFleet") + def request_spot_fleet( + self, + context: RequestContext, + spot_fleet_request_config: SpotFleetRequestConfigData, + dry_run: Boolean | None = None, + **kwargs, + ) -> RequestSpotFleetResponse: + raise NotImplementedError + + @handler("RequestSpotInstances", expand=False) + def request_spot_instances( + self, context: RequestContext, request: RequestSpotInstancesRequest, **kwargs + ) -> RequestSpotInstancesResult: + raise NotImplementedError + + @handler("ResetAddressAttribute") + def reset_address_attribute( + self, + context: RequestContext, + allocation_id: AllocationId, + attribute: AddressAttributeName, + dry_run: Boolean | None = None, + **kwargs, + ) -> ResetAddressAttributeResult: + raise NotImplementedError + + @handler("ResetEbsDefaultKmsKeyId") + def reset_ebs_default_kms_key_id( + self, context: RequestContext, dry_run: Boolean | None = None, **kwargs + ) -> ResetEbsDefaultKmsKeyIdResult: + raise NotImplementedError + + @handler("ResetFpgaImageAttribute") + def reset_fpga_image_attribute( + self, + context: RequestContext, + fpga_image_id: FpgaImageId, + dry_run: Boolean | None = None, + attribute: ResetFpgaImageAttributeName | None = None, + **kwargs, + ) -> ResetFpgaImageAttributeResult: + raise NotImplementedError + + @handler("ResetImageAttribute") + def reset_image_attribute( + self, + context: RequestContext, + attribute: ResetImageAttributeName, + image_id: ImageId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ResetInstanceAttribute") + def reset_instance_attribute( + self, + context: RequestContext, + instance_id: InstanceId, + attribute: InstanceAttributeName, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ResetNetworkInterfaceAttribute") + def reset_network_interface_attribute( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + dry_run: Boolean | None = None, + source_dest_check: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ResetSnapshotAttribute") + def reset_snapshot_attribute( + self, + context: RequestContext, + attribute: SnapshotAttributeName, + snapshot_id: SnapshotId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RestoreAddressToClassic") + def restore_address_to_classic( + self, context: RequestContext, public_ip: String, dry_run: Boolean | None = None, **kwargs + ) -> RestoreAddressToClassicResult: + raise NotImplementedError + + @handler("RestoreImageFromRecycleBin") + def restore_image_from_recycle_bin( + self, context: RequestContext, image_id: ImageId, dry_run: Boolean | None = None, **kwargs + ) -> RestoreImageFromRecycleBinResult: + raise NotImplementedError + + @handler("RestoreManagedPrefixListVersion") + def restore_managed_prefix_list_version( + self, + context: RequestContext, + prefix_list_id: PrefixListResourceId, + previous_version: Long, + current_version: Long, + dry_run: Boolean | None = None, + **kwargs, + ) -> RestoreManagedPrefixListVersionResult: + raise NotImplementedError + + @handler("RestoreSnapshotFromRecycleBin") + def restore_snapshot_from_recycle_bin( + self, + context: RequestContext, + snapshot_id: SnapshotId, + dry_run: Boolean | None = None, + **kwargs, + ) -> RestoreSnapshotFromRecycleBinResult: + raise NotImplementedError + + @handler("RestoreSnapshotTier") + def restore_snapshot_tier( + self, + context: RequestContext, + snapshot_id: SnapshotId, + temporary_restore_days: RestoreSnapshotTierRequestTemporaryRestoreDays | None = None, + permanent_restore: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RestoreSnapshotTierResult: + raise NotImplementedError + + @handler("RevokeClientVpnIngress") + def revoke_client_vpn_ingress( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + target_network_cidr: String, + access_group_id: String | None = None, + revoke_all_groups: Boolean | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RevokeClientVpnIngressResult: + raise NotImplementedError + + @handler("RevokeSecurityGroupEgress") + def revoke_security_group_egress( + self, + context: RequestContext, + group_id: SecurityGroupId, + security_group_rule_ids: SecurityGroupRuleIdList | None = None, + dry_run: Boolean | None = None, + source_security_group_name: String | None = None, + source_security_group_owner_id: String | None = None, + ip_protocol: String | None = None, + from_port: Integer | None = None, + to_port: Integer | None = None, + cidr_ip: String | None = None, + ip_permissions: IpPermissionList | None = None, + **kwargs, + ) -> RevokeSecurityGroupEgressResult: + raise NotImplementedError + + @handler("RevokeSecurityGroupIngress") + def revoke_security_group_ingress( + self, + context: RequestContext, + cidr_ip: String | None = None, + from_port: Integer | None = None, + group_id: SecurityGroupId | None = None, + group_name: SecurityGroupName | None = None, + ip_permissions: IpPermissionList | None = None, + ip_protocol: String | None = None, + source_security_group_name: String | None = None, + source_security_group_owner_id: String | None = None, + to_port: Integer | None = None, + security_group_rule_ids: SecurityGroupRuleIdList | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> RevokeSecurityGroupIngressResult: + raise NotImplementedError + + @handler("RunInstances") + def run_instances( + self, + context: RequestContext, + max_count: Integer, + min_count: Integer, + block_device_mappings: BlockDeviceMappingRequestList | None = None, + image_id: ImageId | None = None, + instance_type: InstanceType | None = None, + ipv6_address_count: Integer | None = None, + ipv6_addresses: InstanceIpv6AddressList | None = None, + kernel_id: KernelId | None = None, + key_name: KeyPairName | None = None, + monitoring: RunInstancesMonitoringEnabled | None = None, + placement: Placement | None = None, + ramdisk_id: RamdiskId | None = None, + security_group_ids: SecurityGroupIdStringList | None = None, + security_groups: SecurityGroupStringList | None = None, + subnet_id: SubnetId | None = None, + user_data: RunInstancesUserData | None = None, + elastic_gpu_specification: ElasticGpuSpecifications | None = None, + elastic_inference_accelerators: ElasticInferenceAccelerators | None = None, + tag_specifications: TagSpecificationList | None = None, + launch_template: LaunchTemplateSpecification | None = None, + instance_market_options: InstanceMarketOptionsRequest | None = None, + credit_specification: CreditSpecificationRequest | None = None, + cpu_options: CpuOptionsRequest | None = None, + capacity_reservation_specification: CapacityReservationSpecification | None = None, + hibernation_options: HibernationOptionsRequest | None = None, + license_specifications: LicenseSpecificationListRequest | None = None, + metadata_options: InstanceMetadataOptionsRequest | None = None, + enclave_options: EnclaveOptionsRequest | None = None, + private_dns_name_options: PrivateDnsNameOptionsRequest | None = None, + maintenance_options: InstanceMaintenanceOptionsRequest | None = None, + disable_api_stop: Boolean | None = None, + enable_primary_ipv6: Boolean | None = None, + network_performance_options: InstanceNetworkPerformanceOptionsRequest | None = None, + operator: OperatorRequest | None = None, + dry_run: Boolean | None = None, + disable_api_termination: Boolean | None = None, + instance_initiated_shutdown_behavior: ShutdownBehavior | None = None, + private_ip_address: String | None = None, + client_token: String | None = None, + additional_info: String | None = None, + network_interfaces: InstanceNetworkInterfaceSpecificationList | None = None, + iam_instance_profile: IamInstanceProfileSpecification | None = None, + ebs_optimized: Boolean | None = None, + **kwargs, + ) -> Reservation: + raise NotImplementedError + + @handler("RunScheduledInstances") + def run_scheduled_instances( + self, + context: RequestContext, + launch_specification: ScheduledInstancesLaunchSpecification, + scheduled_instance_id: ScheduledInstanceId, + client_token: String | None = None, + dry_run: Boolean | None = None, + instance_count: Integer | None = None, + **kwargs, + ) -> RunScheduledInstancesResult: + raise NotImplementedError + + @handler("SearchLocalGatewayRoutes") + def search_local_gateway_routes( + self, + context: RequestContext, + local_gateway_route_table_id: LocalGatewayRoutetableId, + filters: FilterList | None = None, + max_results: MaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> SearchLocalGatewayRoutesResult: + raise NotImplementedError + + @handler("SearchTransitGatewayMulticastGroups") + def search_transit_gateway_multicast_groups( + self, + context: RequestContext, + transit_gateway_multicast_domain_id: TransitGatewayMulticastDomainId, + filters: FilterList | None = None, + max_results: TransitGatewayMaxResults | None = None, + next_token: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> SearchTransitGatewayMulticastGroupsResult: + raise NotImplementedError + + @handler("SearchTransitGatewayRoutes") + def search_transit_gateway_routes( + self, + context: RequestContext, + transit_gateway_route_table_id: TransitGatewayRouteTableId, + filters: FilterList, + max_results: TransitGatewayMaxResults | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> SearchTransitGatewayRoutesResult: + raise NotImplementedError + + @handler("SendDiagnosticInterrupt") + def send_diagnostic_interrupt( + self, + context: RequestContext, + instance_id: InstanceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartDeclarativePoliciesReport") + def start_declarative_policies_report( + self, + context: RequestContext, + s3_bucket: String, + target_id: String, + dry_run: Boolean | None = None, + s3_prefix: String | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> StartDeclarativePoliciesReportResult: + raise NotImplementedError + + @handler("StartInstances") + def start_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + additional_info: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> StartInstancesResult: + raise NotImplementedError + + @handler("StartNetworkInsightsAccessScopeAnalysis") + def start_network_insights_access_scope_analysis( + self, + context: RequestContext, + network_insights_access_scope_id: NetworkInsightsAccessScopeId, + client_token: String, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> StartNetworkInsightsAccessScopeAnalysisResult: + raise NotImplementedError + + @handler("StartNetworkInsightsAnalysis") + def start_network_insights_analysis( + self, + context: RequestContext, + network_insights_path_id: NetworkInsightsPathId, + client_token: String, + additional_accounts: ValueStringList | None = None, + filter_in_arns: ArnList | None = None, + filter_out_arns: ArnList | None = None, + dry_run: Boolean | None = None, + tag_specifications: TagSpecificationList | None = None, + **kwargs, + ) -> StartNetworkInsightsAnalysisResult: + raise NotImplementedError + + @handler("StartVpcEndpointServicePrivateDnsVerification") + def start_vpc_endpoint_service_private_dns_verification( + self, + context: RequestContext, + service_id: VpcEndpointServiceId, + dry_run: Boolean | None = None, + **kwargs, + ) -> StartVpcEndpointServicePrivateDnsVerificationResult: + raise NotImplementedError + + @handler("StopInstances") + def stop_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + hibernate: Boolean | None = None, + dry_run: Boolean | None = None, + force: Boolean | None = None, + **kwargs, + ) -> StopInstancesResult: + raise NotImplementedError + + @handler("TerminateClientVpnConnections") + def terminate_client_vpn_connections( + self, + context: RequestContext, + client_vpn_endpoint_id: ClientVpnEndpointId, + connection_id: String | None = None, + username: String | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> TerminateClientVpnConnectionsResult: + raise NotImplementedError + + @handler("TerminateInstances") + def terminate_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> TerminateInstancesResult: + raise NotImplementedError + + @handler("UnassignIpv6Addresses") + def unassign_ipv6_addresses( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + ipv6_prefixes: IpPrefixList | None = None, + ipv6_addresses: Ipv6AddressList | None = None, + **kwargs, + ) -> UnassignIpv6AddressesResult: + raise NotImplementedError + + @handler("UnassignPrivateIpAddresses") + def unassign_private_ip_addresses( + self, + context: RequestContext, + network_interface_id: NetworkInterfaceId, + ipv4_prefixes: IpPrefixList | None = None, + private_ip_addresses: PrivateIpAddressStringList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UnassignPrivateNatGatewayAddress") + def unassign_private_nat_gateway_address( + self, + context: RequestContext, + nat_gateway_id: NatGatewayId, + private_ip_addresses: IpList, + max_drain_duration_seconds: DrainSeconds | None = None, + dry_run: Boolean | None = None, + **kwargs, + ) -> UnassignPrivateNatGatewayAddressResult: + raise NotImplementedError + + @handler("UnlockSnapshot") + def unlock_snapshot( + self, + context: RequestContext, + snapshot_id: SnapshotId, + dry_run: Boolean | None = None, + **kwargs, + ) -> UnlockSnapshotResult: + raise NotImplementedError + + @handler("UnmonitorInstances") + def unmonitor_instances( + self, + context: RequestContext, + instance_ids: InstanceIdStringList, + dry_run: Boolean | None = None, + **kwargs, + ) -> UnmonitorInstancesResult: + raise NotImplementedError + + @handler("UpdateSecurityGroupRuleDescriptionsEgress") + def update_security_group_rule_descriptions_egress( + self, + context: RequestContext, + dry_run: Boolean | None = None, + group_id: SecurityGroupId | None = None, + group_name: SecurityGroupName | None = None, + ip_permissions: IpPermissionList | None = None, + security_group_rule_descriptions: SecurityGroupRuleDescriptionList | None = None, + **kwargs, + ) -> UpdateSecurityGroupRuleDescriptionsEgressResult: + raise NotImplementedError + + @handler("UpdateSecurityGroupRuleDescriptionsIngress") + def update_security_group_rule_descriptions_ingress( + self, + context: RequestContext, + dry_run: Boolean | None = None, + group_id: SecurityGroupId | None = None, + group_name: SecurityGroupName | None = None, + ip_permissions: IpPermissionList | None = None, + security_group_rule_descriptions: SecurityGroupRuleDescriptionList | None = None, + **kwargs, + ) -> UpdateSecurityGroupRuleDescriptionsIngressResult: + raise NotImplementedError + + @handler("WithdrawByoipCidr") + def withdraw_byoip_cidr( + self, context: RequestContext, cidr: String, dry_run: Boolean | None = None, **kwargs + ) -> WithdrawByoipCidrResult: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/es/__init__.py b/localstack-core/localstack/aws/api/es/__init__.py new file mode 100644 index 0000000000000..4c5774cbd36fa --- /dev/null +++ b/localstack-core/localstack/aws/api/es/__init__.py @@ -0,0 +1,2078 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ARN = str +AWSAccount = str +BackendRole = str +Boolean = bool +ChangeProgressStageName = str +ChangeProgressStageStatus = str +ClientToken = str +CloudWatchLogsLogGroupArn = str +CommitMessage = str +ConnectionAlias = str +CrossClusterSearchConnectionId = str +CrossClusterSearchConnectionStatusMessage = str +DeploymentType = str +DescribePackagesFilterValue = str +Description = str +DomainArn = str +DomainId = str +DomainName = str +DomainNameFqdn = str +Double = float +DryRun = bool +ElasticsearchVersionString = str +Endpoint = str +ErrorMessage = str +ErrorType = str +GUID = str +IdentityPoolId = str +InstanceCount = int +InstanceRole = str +Integer = int +IntegerClass = int +Issue = str +KmsKeyId = str +LimitName = str +LimitValue = str +MaxResults = int +MaximumInstanceCount = int +Message = str +MinimumInstanceCount = int +NextToken = str +NonEmptyString = str +OwnerId = str +PackageDescription = str +PackageID = str +PackageName = str +PackageVersion = str +Password = str +PolicyDocument = str +ReferencePath = str +Region = str +ReservationToken = str +RoleArn = str +S3BucketName = str +S3Key = str +SAMLEntityId = str +SAMLMetadata = str +ScheduledAutoTuneDescription = str +ServiceUrl = str +StorageSubTypeName = str +StorageTypeName = str +String = str +TagKey = str +TagValue = str +TotalNumberOfStages = int +UIntValue = int +UpgradeName = str +UserPoolId = str +Username = str +VpcEndpointId = str + + +class AutoTuneDesiredState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class AutoTuneState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + ENABLE_IN_PROGRESS = "ENABLE_IN_PROGRESS" + DISABLE_IN_PROGRESS = "DISABLE_IN_PROGRESS" + DISABLED_AND_ROLLBACK_SCHEDULED = "DISABLED_AND_ROLLBACK_SCHEDULED" + DISABLED_AND_ROLLBACK_IN_PROGRESS = "DISABLED_AND_ROLLBACK_IN_PROGRESS" + DISABLED_AND_ROLLBACK_COMPLETE = "DISABLED_AND_ROLLBACK_COMPLETE" + DISABLED_AND_ROLLBACK_ERROR = "DISABLED_AND_ROLLBACK_ERROR" + ERROR = "ERROR" + + +class AutoTuneType(StrEnum): + SCHEDULED_ACTION = "SCHEDULED_ACTION" + + +class ConfigChangeStatus(StrEnum): + Pending = "Pending" + Initializing = "Initializing" + Validating = "Validating" + ValidationFailed = "ValidationFailed" + ApplyingChanges = "ApplyingChanges" + Completed = "Completed" + PendingUserInput = "PendingUserInput" + Cancelled = "Cancelled" + + +class DeploymentStatus(StrEnum): + PENDING_UPDATE = "PENDING_UPDATE" + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + NOT_ELIGIBLE = "NOT_ELIGIBLE" + ELIGIBLE = "ELIGIBLE" + + +class DescribePackagesFilterName(StrEnum): + PackageID = "PackageID" + PackageName = "PackageName" + PackageStatus = "PackageStatus" + + +class DomainPackageStatus(StrEnum): + ASSOCIATING = "ASSOCIATING" + ASSOCIATION_FAILED = "ASSOCIATION_FAILED" + ACTIVE = "ACTIVE" + DISSOCIATING = "DISSOCIATING" + DISSOCIATION_FAILED = "DISSOCIATION_FAILED" + + +class DomainProcessingStatusType(StrEnum): + Creating = "Creating" + Active = "Active" + Modifying = "Modifying" + UpgradingEngineVersion = "UpgradingEngineVersion" + UpdatingServiceSoftware = "UpdatingServiceSoftware" + Isolated = "Isolated" + Deleting = "Deleting" + + +class ESPartitionInstanceType(StrEnum): + m3_medium_elasticsearch = "m3.medium.elasticsearch" + m3_large_elasticsearch = "m3.large.elasticsearch" + m3_xlarge_elasticsearch = "m3.xlarge.elasticsearch" + m3_2xlarge_elasticsearch = "m3.2xlarge.elasticsearch" + m4_large_elasticsearch = "m4.large.elasticsearch" + m4_xlarge_elasticsearch = "m4.xlarge.elasticsearch" + m4_2xlarge_elasticsearch = "m4.2xlarge.elasticsearch" + m4_4xlarge_elasticsearch = "m4.4xlarge.elasticsearch" + m4_10xlarge_elasticsearch = "m4.10xlarge.elasticsearch" + m5_large_elasticsearch = "m5.large.elasticsearch" + m5_xlarge_elasticsearch = "m5.xlarge.elasticsearch" + m5_2xlarge_elasticsearch = "m5.2xlarge.elasticsearch" + m5_4xlarge_elasticsearch = "m5.4xlarge.elasticsearch" + m5_12xlarge_elasticsearch = "m5.12xlarge.elasticsearch" + r5_large_elasticsearch = "r5.large.elasticsearch" + r5_xlarge_elasticsearch = "r5.xlarge.elasticsearch" + r5_2xlarge_elasticsearch = "r5.2xlarge.elasticsearch" + r5_4xlarge_elasticsearch = "r5.4xlarge.elasticsearch" + r5_12xlarge_elasticsearch = "r5.12xlarge.elasticsearch" + c5_large_elasticsearch = "c5.large.elasticsearch" + c5_xlarge_elasticsearch = "c5.xlarge.elasticsearch" + c5_2xlarge_elasticsearch = "c5.2xlarge.elasticsearch" + c5_4xlarge_elasticsearch = "c5.4xlarge.elasticsearch" + c5_9xlarge_elasticsearch = "c5.9xlarge.elasticsearch" + c5_18xlarge_elasticsearch = "c5.18xlarge.elasticsearch" + ultrawarm1_medium_elasticsearch = "ultrawarm1.medium.elasticsearch" + ultrawarm1_large_elasticsearch = "ultrawarm1.large.elasticsearch" + t2_micro_elasticsearch = "t2.micro.elasticsearch" + t2_small_elasticsearch = "t2.small.elasticsearch" + t2_medium_elasticsearch = "t2.medium.elasticsearch" + r3_large_elasticsearch = "r3.large.elasticsearch" + r3_xlarge_elasticsearch = "r3.xlarge.elasticsearch" + r3_2xlarge_elasticsearch = "r3.2xlarge.elasticsearch" + r3_4xlarge_elasticsearch = "r3.4xlarge.elasticsearch" + r3_8xlarge_elasticsearch = "r3.8xlarge.elasticsearch" + i2_xlarge_elasticsearch = "i2.xlarge.elasticsearch" + i2_2xlarge_elasticsearch = "i2.2xlarge.elasticsearch" + d2_xlarge_elasticsearch = "d2.xlarge.elasticsearch" + d2_2xlarge_elasticsearch = "d2.2xlarge.elasticsearch" + d2_4xlarge_elasticsearch = "d2.4xlarge.elasticsearch" + d2_8xlarge_elasticsearch = "d2.8xlarge.elasticsearch" + c4_large_elasticsearch = "c4.large.elasticsearch" + c4_xlarge_elasticsearch = "c4.xlarge.elasticsearch" + c4_2xlarge_elasticsearch = "c4.2xlarge.elasticsearch" + c4_4xlarge_elasticsearch = "c4.4xlarge.elasticsearch" + c4_8xlarge_elasticsearch = "c4.8xlarge.elasticsearch" + r4_large_elasticsearch = "r4.large.elasticsearch" + r4_xlarge_elasticsearch = "r4.xlarge.elasticsearch" + r4_2xlarge_elasticsearch = "r4.2xlarge.elasticsearch" + r4_4xlarge_elasticsearch = "r4.4xlarge.elasticsearch" + r4_8xlarge_elasticsearch = "r4.8xlarge.elasticsearch" + r4_16xlarge_elasticsearch = "r4.16xlarge.elasticsearch" + i3_large_elasticsearch = "i3.large.elasticsearch" + i3_xlarge_elasticsearch = "i3.xlarge.elasticsearch" + i3_2xlarge_elasticsearch = "i3.2xlarge.elasticsearch" + i3_4xlarge_elasticsearch = "i3.4xlarge.elasticsearch" + i3_8xlarge_elasticsearch = "i3.8xlarge.elasticsearch" + i3_16xlarge_elasticsearch = "i3.16xlarge.elasticsearch" + + +class ESWarmPartitionInstanceType(StrEnum): + ultrawarm1_medium_elasticsearch = "ultrawarm1.medium.elasticsearch" + ultrawarm1_large_elasticsearch = "ultrawarm1.large.elasticsearch" + + +class EngineType(StrEnum): + OpenSearch = "OpenSearch" + Elasticsearch = "Elasticsearch" + + +class InboundCrossClusterSearchConnectionStatusCode(StrEnum): + PENDING_ACCEPTANCE = "PENDING_ACCEPTANCE" + APPROVED = "APPROVED" + REJECTING = "REJECTING" + REJECTED = "REJECTED" + DELETING = "DELETING" + DELETED = "DELETED" + + +class InitiatedBy(StrEnum): + CUSTOMER = "CUSTOMER" + SERVICE = "SERVICE" + + +class LogType(StrEnum): + INDEX_SLOW_LOGS = "INDEX_SLOW_LOGS" + SEARCH_SLOW_LOGS = "SEARCH_SLOW_LOGS" + ES_APPLICATION_LOGS = "ES_APPLICATION_LOGS" + AUDIT_LOGS = "AUDIT_LOGS" + + +class OptionState(StrEnum): + RequiresIndexDocuments = "RequiresIndexDocuments" + Processing = "Processing" + Active = "Active" + + +class OutboundCrossClusterSearchConnectionStatusCode(StrEnum): + PENDING_ACCEPTANCE = "PENDING_ACCEPTANCE" + VALIDATING = "VALIDATING" + VALIDATION_FAILED = "VALIDATION_FAILED" + PROVISIONING = "PROVISIONING" + ACTIVE = "ACTIVE" + REJECTED = "REJECTED" + DELETING = "DELETING" + DELETED = "DELETED" + + +class OverallChangeStatus(StrEnum): + PENDING = "PENDING" + PROCESSING = "PROCESSING" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + +class PackageStatus(StrEnum): + COPYING = "COPYING" + COPY_FAILED = "COPY_FAILED" + VALIDATING = "VALIDATING" + VALIDATION_FAILED = "VALIDATION_FAILED" + AVAILABLE = "AVAILABLE" + DELETING = "DELETING" + DELETED = "DELETED" + DELETE_FAILED = "DELETE_FAILED" + + +class PackageType(StrEnum): + TXT_DICTIONARY = "TXT-DICTIONARY" + + +class PrincipalType(StrEnum): + AWS_ACCOUNT = "AWS_ACCOUNT" + AWS_SERVICE = "AWS_SERVICE" + + +class PropertyValueType(StrEnum): + PLAIN_TEXT = "PLAIN_TEXT" + STRINGIFIED_JSON = "STRINGIFIED_JSON" + + +class ReservedElasticsearchInstancePaymentOption(StrEnum): + ALL_UPFRONT = "ALL_UPFRONT" + PARTIAL_UPFRONT = "PARTIAL_UPFRONT" + NO_UPFRONT = "NO_UPFRONT" + + +class RollbackOnDisable(StrEnum): + NO_ROLLBACK = "NO_ROLLBACK" + DEFAULT_ROLLBACK = "DEFAULT_ROLLBACK" + + +class ScheduledAutoTuneActionType(StrEnum): + JVM_HEAP_SIZE_TUNING = "JVM_HEAP_SIZE_TUNING" + JVM_YOUNG_GEN_TUNING = "JVM_YOUNG_GEN_TUNING" + + +class ScheduledAutoTuneSeverityType(StrEnum): + LOW = "LOW" + MEDIUM = "MEDIUM" + HIGH = "HIGH" + + +class TLSSecurityPolicy(StrEnum): + Policy_Min_TLS_1_0_2019_07 = "Policy-Min-TLS-1-0-2019-07" + Policy_Min_TLS_1_2_2019_07 = "Policy-Min-TLS-1-2-2019-07" + Policy_Min_TLS_1_2_PFS_2023_10 = "Policy-Min-TLS-1-2-PFS-2023-10" + + +class TimeUnit(StrEnum): + HOURS = "HOURS" + + +class UpgradeStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + SUCCEEDED = "SUCCEEDED" + SUCCEEDED_WITH_ISSUES = "SUCCEEDED_WITH_ISSUES" + FAILED = "FAILED" + + +class UpgradeStep(StrEnum): + PRE_UPGRADE_CHECK = "PRE_UPGRADE_CHECK" + SNAPSHOT = "SNAPSHOT" + UPGRADE = "UPGRADE" + + +class VolumeType(StrEnum): + standard = "standard" + gp2 = "gp2" + io1 = "io1" + gp3 = "gp3" + + +class VpcEndpointErrorCode(StrEnum): + ENDPOINT_NOT_FOUND = "ENDPOINT_NOT_FOUND" + SERVER_ERROR = "SERVER_ERROR" + + +class VpcEndpointStatus(StrEnum): + CREATING = "CREATING" + CREATE_FAILED = "CREATE_FAILED" + ACTIVE = "ACTIVE" + UPDATING = "UPDATING" + UPDATE_FAILED = "UPDATE_FAILED" + DELETING = "DELETING" + DELETE_FAILED = "DELETE_FAILED" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 403 + + +class BaseException(ServiceException): + code: str = "BaseException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 409 + + +class DisabledOperationException(ServiceException): + code: str = "DisabledOperationException" + sender_fault: bool = False + status_code: int = 409 + + +class InternalException(ServiceException): + code: str = "InternalException" + sender_fault: bool = False + status_code: int = 500 + + +class InvalidPaginationTokenException(ServiceException): + code: str = "InvalidPaginationTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTypeException(ServiceException): + code: str = "InvalidTypeException" + sender_fault: bool = False + status_code: int = 409 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 409 + + +class ResourceAlreadyExistsException(ServiceException): + code: str = "ResourceAlreadyExistsException" + sender_fault: bool = False + status_code: int = 409 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 409 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class AcceptInboundCrossClusterSearchConnectionRequest(ServiceRequest): + CrossClusterSearchConnectionId: CrossClusterSearchConnectionId + + +class InboundCrossClusterSearchConnectionStatus(TypedDict, total=False): + StatusCode: Optional[InboundCrossClusterSearchConnectionStatusCode] + Message: Optional[CrossClusterSearchConnectionStatusMessage] + + +class DomainInformation(TypedDict, total=False): + OwnerId: Optional[OwnerId] + DomainName: DomainName + Region: Optional[Region] + + +class InboundCrossClusterSearchConnection(TypedDict, total=False): + SourceDomainInfo: Optional[DomainInformation] + DestinationDomainInfo: Optional[DomainInformation] + CrossClusterSearchConnectionId: Optional[CrossClusterSearchConnectionId] + ConnectionStatus: Optional[InboundCrossClusterSearchConnectionStatus] + + +class AcceptInboundCrossClusterSearchConnectionResponse(TypedDict, total=False): + CrossClusterSearchConnection: Optional[InboundCrossClusterSearchConnection] + + +UpdateTimestamp = datetime + + +class OptionStatus(TypedDict, total=False): + CreationDate: UpdateTimestamp + UpdateDate: UpdateTimestamp + UpdateVersion: Optional[UIntValue] + State: OptionState + PendingDeletion: Optional[Boolean] + + +class AccessPoliciesStatus(TypedDict, total=False): + Options: PolicyDocument + Status: OptionStatus + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class AddTagsRequest(ServiceRequest): + ARN: ARN + TagList: TagList + + +LimitValueList = List[LimitValue] + + +class AdditionalLimit(TypedDict, total=False): + LimitName: Optional[LimitName] + LimitValues: Optional[LimitValueList] + + +AdditionalLimitList = List[AdditionalLimit] +AdvancedOptions = Dict[String, String] + + +class AdvancedOptionsStatus(TypedDict, total=False): + Options: AdvancedOptions + Status: OptionStatus + + +DisableTimestamp = datetime + + +class SAMLIdp(TypedDict, total=False): + MetadataContent: SAMLMetadata + EntityId: SAMLEntityId + + +class SAMLOptionsOutput(TypedDict, total=False): + Enabled: Optional[Boolean] + Idp: Optional[SAMLIdp] + SubjectKey: Optional[String] + RolesKey: Optional[String] + SessionTimeoutMinutes: Optional[IntegerClass] + + +class AdvancedSecurityOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + InternalUserDatabaseEnabled: Optional[Boolean] + SAMLOptions: Optional[SAMLOptionsOutput] + AnonymousAuthDisableDate: Optional[DisableTimestamp] + AnonymousAuthEnabled: Optional[Boolean] + + +class SAMLOptionsInput(TypedDict, total=False): + Enabled: Optional[Boolean] + Idp: Optional[SAMLIdp] + MasterUserName: Optional[Username] + MasterBackendRole: Optional[BackendRole] + SubjectKey: Optional[String] + RolesKey: Optional[String] + SessionTimeoutMinutes: Optional[IntegerClass] + + +class MasterUserOptions(TypedDict, total=False): + MasterUserARN: Optional[ARN] + MasterUserName: Optional[Username] + MasterUserPassword: Optional[Password] + + +class AdvancedSecurityOptionsInput(TypedDict, total=False): + Enabled: Optional[Boolean] + InternalUserDatabaseEnabled: Optional[Boolean] + MasterUserOptions: Optional[MasterUserOptions] + SAMLOptions: Optional[SAMLOptionsInput] + AnonymousAuthEnabled: Optional[Boolean] + + +class AdvancedSecurityOptionsStatus(TypedDict, total=False): + Options: AdvancedSecurityOptions + Status: OptionStatus + + +class AssociatePackageRequest(ServiceRequest): + PackageID: PackageID + DomainName: DomainName + + +class ErrorDetails(TypedDict, total=False): + ErrorType: Optional[ErrorType] + ErrorMessage: Optional[ErrorMessage] + + +LastUpdated = datetime + + +class DomainPackageDetails(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageName: Optional[PackageName] + PackageType: Optional[PackageType] + LastUpdated: Optional[LastUpdated] + DomainName: Optional[DomainName] + DomainPackageStatus: Optional[DomainPackageStatus] + PackageVersion: Optional[PackageVersion] + ReferencePath: Optional[ReferencePath] + ErrorDetails: Optional[ErrorDetails] + + +class AssociatePackageResponse(TypedDict, total=False): + DomainPackageDetails: Optional[DomainPackageDetails] + + +class AuthorizeVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + Account: AWSAccount + + +class AuthorizedPrincipal(TypedDict, total=False): + PrincipalType: Optional[PrincipalType] + Principal: Optional[String] + + +class AuthorizeVpcEndpointAccessResponse(TypedDict, total=False): + AuthorizedPrincipal: AuthorizedPrincipal + + +AuthorizedPrincipalList = List[AuthorizedPrincipal] +AutoTuneDate = datetime + + +class ScheduledAutoTuneDetails(TypedDict, total=False): + Date: Optional[AutoTuneDate] + ActionType: Optional[ScheduledAutoTuneActionType] + Action: Optional[ScheduledAutoTuneDescription] + Severity: Optional[ScheduledAutoTuneSeverityType] + + +class AutoTuneDetails(TypedDict, total=False): + ScheduledAutoTuneDetails: Optional[ScheduledAutoTuneDetails] + + +class AutoTune(TypedDict, total=False): + AutoTuneType: Optional[AutoTuneType] + AutoTuneDetails: Optional[AutoTuneDetails] + + +AutoTuneList = List[AutoTune] +DurationValue = int + + +class Duration(TypedDict, total=False): + Value: Optional[DurationValue] + Unit: Optional[TimeUnit] + + +StartAt = datetime + + +class AutoTuneMaintenanceSchedule(TypedDict, total=False): + StartAt: Optional[StartAt] + Duration: Optional[Duration] + CronExpressionForRecurrence: Optional[String] + + +AutoTuneMaintenanceScheduleList = List[AutoTuneMaintenanceSchedule] + + +class AutoTuneOptions(TypedDict, total=False): + DesiredState: Optional[AutoTuneDesiredState] + RollbackOnDisable: Optional[RollbackOnDisable] + MaintenanceSchedules: Optional[AutoTuneMaintenanceScheduleList] + + +class AutoTuneOptionsInput(TypedDict, total=False): + DesiredState: Optional[AutoTuneDesiredState] + MaintenanceSchedules: Optional[AutoTuneMaintenanceScheduleList] + + +class AutoTuneOptionsOutput(TypedDict, total=False): + State: Optional[AutoTuneState] + ErrorMessage: Optional[String] + + +class AutoTuneStatus(TypedDict, total=False): + CreationDate: UpdateTimestamp + UpdateDate: UpdateTimestamp + UpdateVersion: Optional[UIntValue] + State: AutoTuneState + ErrorMessage: Optional[String] + PendingDeletion: Optional[Boolean] + + +class AutoTuneOptionsStatus(TypedDict, total=False): + Options: Optional[AutoTuneOptions] + Status: Optional[AutoTuneStatus] + + +class CancelDomainConfigChangeRequest(ServiceRequest): + DomainName: DomainName + DryRun: Optional[DryRun] + + +class CancelledChangeProperty(TypedDict, total=False): + PropertyName: Optional[String] + CancelledValue: Optional[String] + ActiveValue: Optional[String] + + +CancelledChangePropertyList = List[CancelledChangeProperty] +GUIDList = List[GUID] + + +class CancelDomainConfigChangeResponse(TypedDict, total=False): + DryRun: Optional[DryRun] + CancelledChangeIds: Optional[GUIDList] + CancelledChangeProperties: Optional[CancelledChangePropertyList] + + +class CancelElasticsearchServiceSoftwareUpdateRequest(ServiceRequest): + DomainName: DomainName + + +DeploymentCloseDateTimeStamp = datetime + + +class ServiceSoftwareOptions(TypedDict, total=False): + CurrentVersion: Optional[String] + NewVersion: Optional[String] + UpdateAvailable: Optional[Boolean] + Cancellable: Optional[Boolean] + UpdateStatus: Optional[DeploymentStatus] + Description: Optional[String] + AutomatedUpdateDate: Optional[DeploymentCloseDateTimeStamp] + OptionalDeployment: Optional[Boolean] + + +class CancelElasticsearchServiceSoftwareUpdateResponse(TypedDict, total=False): + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + + +class ChangeProgressDetails(TypedDict, total=False): + ChangeId: Optional[GUID] + Message: Optional[Message] + ConfigChangeStatus: Optional[ConfigChangeStatus] + StartTime: Optional[UpdateTimestamp] + LastUpdatedTime: Optional[UpdateTimestamp] + InitiatedBy: Optional[InitiatedBy] + + +class ChangeProgressStage(TypedDict, total=False): + Name: Optional[ChangeProgressStageName] + Status: Optional[ChangeProgressStageStatus] + Description: Optional[Description] + LastUpdated: Optional[LastUpdated] + + +ChangeProgressStageList = List[ChangeProgressStage] +StringList = List[String] + + +class ChangeProgressStatusDetails(TypedDict, total=False): + ChangeId: Optional[GUID] + StartTime: Optional[UpdateTimestamp] + Status: Optional[OverallChangeStatus] + PendingProperties: Optional[StringList] + CompletedProperties: Optional[StringList] + TotalNumberOfStages: Optional[TotalNumberOfStages] + ChangeProgressStages: Optional[ChangeProgressStageList] + ConfigChangeStatus: Optional[ConfigChangeStatus] + LastUpdatedTime: Optional[UpdateTimestamp] + InitiatedBy: Optional[InitiatedBy] + + +class CognitoOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + UserPoolId: Optional[UserPoolId] + IdentityPoolId: Optional[IdentityPoolId] + RoleArn: Optional[RoleArn] + + +class CognitoOptionsStatus(TypedDict, total=False): + Options: CognitoOptions + Status: OptionStatus + + +class ColdStorageOptions(TypedDict, total=False): + Enabled: Boolean + + +ElasticsearchVersionList = List[ElasticsearchVersionString] + + +class CompatibleVersionsMap(TypedDict, total=False): + SourceVersion: Optional[ElasticsearchVersionString] + TargetVersions: Optional[ElasticsearchVersionList] + + +CompatibleElasticsearchVersionsList = List[CompatibleVersionsMap] + + +class DomainEndpointOptions(TypedDict, total=False): + EnforceHTTPS: Optional[Boolean] + TLSSecurityPolicy: Optional[TLSSecurityPolicy] + CustomEndpointEnabled: Optional[Boolean] + CustomEndpoint: Optional[DomainNameFqdn] + CustomEndpointCertificateArn: Optional[ARN] + + +class LogPublishingOption(TypedDict, total=False): + CloudWatchLogsLogGroupArn: Optional[CloudWatchLogsLogGroupArn] + Enabled: Optional[Boolean] + + +LogPublishingOptions = Dict[LogType, LogPublishingOption] + + +class NodeToNodeEncryptionOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class EncryptionAtRestOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + KmsKeyId: Optional[KmsKeyId] + + +class VPCOptions(TypedDict, total=False): + SubnetIds: Optional[StringList] + SecurityGroupIds: Optional[StringList] + + +class SnapshotOptions(TypedDict, total=False): + AutomatedSnapshotStartHour: Optional[IntegerClass] + + +class EBSOptions(TypedDict, total=False): + EBSEnabled: Optional[Boolean] + VolumeType: Optional[VolumeType] + VolumeSize: Optional[IntegerClass] + Iops: Optional[IntegerClass] + Throughput: Optional[IntegerClass] + + +class ZoneAwarenessConfig(TypedDict, total=False): + AvailabilityZoneCount: Optional[IntegerClass] + + +class ElasticsearchClusterConfig(TypedDict, total=False): + InstanceType: Optional[ESPartitionInstanceType] + InstanceCount: Optional[IntegerClass] + DedicatedMasterEnabled: Optional[Boolean] + ZoneAwarenessEnabled: Optional[Boolean] + ZoneAwarenessConfig: Optional[ZoneAwarenessConfig] + DedicatedMasterType: Optional[ESPartitionInstanceType] + DedicatedMasterCount: Optional[IntegerClass] + WarmEnabled: Optional[Boolean] + WarmType: Optional[ESWarmPartitionInstanceType] + WarmCount: Optional[IntegerClass] + ColdStorageOptions: Optional[ColdStorageOptions] + + +class CreateElasticsearchDomainRequest(ServiceRequest): + DomainName: DomainName + ElasticsearchVersion: Optional[ElasticsearchVersionString] + ElasticsearchClusterConfig: Optional[ElasticsearchClusterConfig] + EBSOptions: Optional[EBSOptions] + AccessPolicies: Optional[PolicyDocument] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCOptions] + CognitoOptions: Optional[CognitoOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + AdvancedOptions: Optional[AdvancedOptions] + LogPublishingOptions: Optional[LogPublishingOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + AutoTuneOptions: Optional[AutoTuneOptionsInput] + TagList: Optional[TagList] + + +class ModifyingProperties(TypedDict, total=False): + Name: Optional[String] + ActiveValue: Optional[String] + PendingValue: Optional[String] + ValueType: Optional[PropertyValueType] + + +ModifyingPropertiesList = List[ModifyingProperties] + + +class VPCDerivedInfo(TypedDict, total=False): + VPCId: Optional[String] + SubnetIds: Optional[StringList] + AvailabilityZones: Optional[StringList] + SecurityGroupIds: Optional[StringList] + + +EndpointsMap = Dict[String, ServiceUrl] + + +class ElasticsearchDomainStatus(TypedDict, total=False): + DomainId: DomainId + DomainName: DomainName + ARN: ARN + Created: Optional[Boolean] + Deleted: Optional[Boolean] + Endpoint: Optional[ServiceUrl] + Endpoints: Optional[EndpointsMap] + Processing: Optional[Boolean] + UpgradeProcessing: Optional[Boolean] + ElasticsearchVersion: Optional[ElasticsearchVersionString] + ElasticsearchClusterConfig: ElasticsearchClusterConfig + EBSOptions: Optional[EBSOptions] + AccessPolicies: Optional[PolicyDocument] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCDerivedInfo] + CognitoOptions: Optional[CognitoOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + AdvancedOptions: Optional[AdvancedOptions] + LogPublishingOptions: Optional[LogPublishingOptions] + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptions] + AutoTuneOptions: Optional[AutoTuneOptionsOutput] + ChangeProgressDetails: Optional[ChangeProgressDetails] + DomainProcessingStatus: Optional[DomainProcessingStatusType] + ModifyingProperties: Optional[ModifyingPropertiesList] + + +class CreateElasticsearchDomainResponse(TypedDict, total=False): + DomainStatus: Optional[ElasticsearchDomainStatus] + + +class CreateOutboundCrossClusterSearchConnectionRequest(ServiceRequest): + SourceDomainInfo: DomainInformation + DestinationDomainInfo: DomainInformation + ConnectionAlias: ConnectionAlias + + +class OutboundCrossClusterSearchConnectionStatus(TypedDict, total=False): + StatusCode: Optional[OutboundCrossClusterSearchConnectionStatusCode] + Message: Optional[CrossClusterSearchConnectionStatusMessage] + + +class CreateOutboundCrossClusterSearchConnectionResponse(TypedDict, total=False): + SourceDomainInfo: Optional[DomainInformation] + DestinationDomainInfo: Optional[DomainInformation] + ConnectionAlias: Optional[ConnectionAlias] + ConnectionStatus: Optional[OutboundCrossClusterSearchConnectionStatus] + CrossClusterSearchConnectionId: Optional[CrossClusterSearchConnectionId] + + +class PackageSource(TypedDict, total=False): + S3BucketName: Optional[S3BucketName] + S3Key: Optional[S3Key] + + +class CreatePackageRequest(ServiceRequest): + PackageName: PackageName + PackageType: PackageType + PackageDescription: Optional[PackageDescription] + PackageSource: PackageSource + + +CreatedAt = datetime + + +class PackageDetails(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageName: Optional[PackageName] + PackageType: Optional[PackageType] + PackageDescription: Optional[PackageDescription] + PackageStatus: Optional[PackageStatus] + CreatedAt: Optional[CreatedAt] + LastUpdatedAt: Optional[LastUpdated] + AvailablePackageVersion: Optional[PackageVersion] + ErrorDetails: Optional[ErrorDetails] + + +class CreatePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class CreateVpcEndpointRequest(ServiceRequest): + DomainArn: DomainArn + VpcOptions: VPCOptions + ClientToken: Optional[ClientToken] + + +class VpcEndpoint(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + VpcEndpointOwner: Optional[AWSAccount] + DomainArn: Optional[DomainArn] + VpcOptions: Optional[VPCDerivedInfo] + Status: Optional[VpcEndpointStatus] + Endpoint: Optional[Endpoint] + + +class CreateVpcEndpointResponse(TypedDict, total=False): + VpcEndpoint: VpcEndpoint + + +class DeleteElasticsearchDomainRequest(ServiceRequest): + DomainName: DomainName + + +class DeleteElasticsearchDomainResponse(TypedDict, total=False): + DomainStatus: Optional[ElasticsearchDomainStatus] + + +class DeleteInboundCrossClusterSearchConnectionRequest(ServiceRequest): + CrossClusterSearchConnectionId: CrossClusterSearchConnectionId + + +class DeleteInboundCrossClusterSearchConnectionResponse(TypedDict, total=False): + CrossClusterSearchConnection: Optional[InboundCrossClusterSearchConnection] + + +class DeleteOutboundCrossClusterSearchConnectionRequest(ServiceRequest): + CrossClusterSearchConnectionId: CrossClusterSearchConnectionId + + +class OutboundCrossClusterSearchConnection(TypedDict, total=False): + SourceDomainInfo: Optional[DomainInformation] + DestinationDomainInfo: Optional[DomainInformation] + CrossClusterSearchConnectionId: Optional[CrossClusterSearchConnectionId] + ConnectionAlias: Optional[ConnectionAlias] + ConnectionStatus: Optional[OutboundCrossClusterSearchConnectionStatus] + + +class DeleteOutboundCrossClusterSearchConnectionResponse(TypedDict, total=False): + CrossClusterSearchConnection: Optional[OutboundCrossClusterSearchConnection] + + +class DeletePackageRequest(ServiceRequest): + PackageID: PackageID + + +class DeletePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class DeleteVpcEndpointRequest(ServiceRequest): + VpcEndpointId: VpcEndpointId + + +class VpcEndpointSummary(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + VpcEndpointOwner: Optional[String] + DomainArn: Optional[DomainArn] + Status: Optional[VpcEndpointStatus] + + +class DeleteVpcEndpointResponse(TypedDict, total=False): + VpcEndpointSummary: VpcEndpointSummary + + +class DescribeDomainAutoTunesRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeDomainAutoTunesResponse(TypedDict, total=False): + AutoTunes: Optional[AutoTuneList] + NextToken: Optional[NextToken] + + +class DescribeDomainChangeProgressRequest(ServiceRequest): + DomainName: DomainName + ChangeId: Optional[GUID] + + +class DescribeDomainChangeProgressResponse(TypedDict, total=False): + ChangeProgressStatus: Optional[ChangeProgressStatusDetails] + + +class DescribeElasticsearchDomainConfigRequest(ServiceRequest): + DomainName: DomainName + + +class DomainEndpointOptionsStatus(TypedDict, total=False): + Options: DomainEndpointOptions + Status: OptionStatus + + +class LogPublishingOptionsStatus(TypedDict, total=False): + Options: Optional[LogPublishingOptions] + Status: Optional[OptionStatus] + + +class NodeToNodeEncryptionOptionsStatus(TypedDict, total=False): + Options: NodeToNodeEncryptionOptions + Status: OptionStatus + + +class EncryptionAtRestOptionsStatus(TypedDict, total=False): + Options: EncryptionAtRestOptions + Status: OptionStatus + + +class VPCDerivedInfoStatus(TypedDict, total=False): + Options: VPCDerivedInfo + Status: OptionStatus + + +class SnapshotOptionsStatus(TypedDict, total=False): + Options: SnapshotOptions + Status: OptionStatus + + +class EBSOptionsStatus(TypedDict, total=False): + Options: EBSOptions + Status: OptionStatus + + +class ElasticsearchClusterConfigStatus(TypedDict, total=False): + Options: ElasticsearchClusterConfig + Status: OptionStatus + + +class ElasticsearchVersionStatus(TypedDict, total=False): + Options: ElasticsearchVersionString + Status: OptionStatus + + +class ElasticsearchDomainConfig(TypedDict, total=False): + ElasticsearchVersion: Optional[ElasticsearchVersionStatus] + ElasticsearchClusterConfig: Optional[ElasticsearchClusterConfigStatus] + EBSOptions: Optional[EBSOptionsStatus] + AccessPolicies: Optional[AccessPoliciesStatus] + SnapshotOptions: Optional[SnapshotOptionsStatus] + VPCOptions: Optional[VPCDerivedInfoStatus] + CognitoOptions: Optional[CognitoOptionsStatus] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptionsStatus] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptionsStatus] + AdvancedOptions: Optional[AdvancedOptionsStatus] + LogPublishingOptions: Optional[LogPublishingOptionsStatus] + DomainEndpointOptions: Optional[DomainEndpointOptionsStatus] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsStatus] + AutoTuneOptions: Optional[AutoTuneOptionsStatus] + ChangeProgressDetails: Optional[ChangeProgressDetails] + ModifyingProperties: Optional[ModifyingPropertiesList] + + +class DescribeElasticsearchDomainConfigResponse(TypedDict, total=False): + DomainConfig: ElasticsearchDomainConfig + + +class DescribeElasticsearchDomainRequest(ServiceRequest): + DomainName: DomainName + + +class DescribeElasticsearchDomainResponse(TypedDict, total=False): + DomainStatus: ElasticsearchDomainStatus + + +DomainNameList = List[DomainName] + + +class DescribeElasticsearchDomainsRequest(ServiceRequest): + DomainNames: DomainNameList + + +ElasticsearchDomainStatusList = List[ElasticsearchDomainStatus] + + +class DescribeElasticsearchDomainsResponse(TypedDict, total=False): + DomainStatusList: ElasticsearchDomainStatusList + + +class DescribeElasticsearchInstanceTypeLimitsRequest(ServiceRequest): + DomainName: Optional[DomainName] + InstanceType: ESPartitionInstanceType + ElasticsearchVersion: ElasticsearchVersionString + + +class InstanceCountLimits(TypedDict, total=False): + MinimumInstanceCount: Optional[MinimumInstanceCount] + MaximumInstanceCount: Optional[MaximumInstanceCount] + + +class InstanceLimits(TypedDict, total=False): + InstanceCountLimits: Optional[InstanceCountLimits] + + +class StorageTypeLimit(TypedDict, total=False): + LimitName: Optional[LimitName] + LimitValues: Optional[LimitValueList] + + +StorageTypeLimitList = List[StorageTypeLimit] + + +class StorageType(TypedDict, total=False): + StorageTypeName: Optional[StorageTypeName] + StorageSubTypeName: Optional[StorageSubTypeName] + StorageTypeLimits: Optional[StorageTypeLimitList] + + +StorageTypeList = List[StorageType] + + +class Limits(TypedDict, total=False): + StorageTypes: Optional[StorageTypeList] + InstanceLimits: Optional[InstanceLimits] + AdditionalLimits: Optional[AdditionalLimitList] + + +LimitsByRole = Dict[InstanceRole, Limits] + + +class DescribeElasticsearchInstanceTypeLimitsResponse(TypedDict, total=False): + LimitsByRole: Optional[LimitsByRole] + + +ValueStringList = List[NonEmptyString] + + +class Filter(TypedDict, total=False): + Name: Optional[NonEmptyString] + Values: Optional[ValueStringList] + + +FilterList = List[Filter] + + +class DescribeInboundCrossClusterSearchConnectionsRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +InboundCrossClusterSearchConnections = List[InboundCrossClusterSearchConnection] + + +class DescribeInboundCrossClusterSearchConnectionsResponse(TypedDict, total=False): + CrossClusterSearchConnections: Optional[InboundCrossClusterSearchConnections] + NextToken: Optional[NextToken] + + +class DescribeOutboundCrossClusterSearchConnectionsRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +OutboundCrossClusterSearchConnections = List[OutboundCrossClusterSearchConnection] + + +class DescribeOutboundCrossClusterSearchConnectionsResponse(TypedDict, total=False): + CrossClusterSearchConnections: Optional[OutboundCrossClusterSearchConnections] + NextToken: Optional[NextToken] + + +DescribePackagesFilterValues = List[DescribePackagesFilterValue] + + +class DescribePackagesFilter(TypedDict, total=False): + Name: Optional[DescribePackagesFilterName] + Value: Optional[DescribePackagesFilterValues] + + +DescribePackagesFilterList = List[DescribePackagesFilter] + + +class DescribePackagesRequest(ServiceRequest): + Filters: Optional[DescribePackagesFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +PackageDetailsList = List[PackageDetails] + + +class DescribePackagesResponse(TypedDict, total=False): + PackageDetailsList: Optional[PackageDetailsList] + NextToken: Optional[String] + + +class DescribeReservedElasticsearchInstanceOfferingsRequest(ServiceRequest): + ReservedElasticsearchInstanceOfferingId: Optional[GUID] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class RecurringCharge(TypedDict, total=False): + RecurringChargeAmount: Optional[Double] + RecurringChargeFrequency: Optional[String] + + +RecurringChargeList = List[RecurringCharge] + + +class ReservedElasticsearchInstanceOffering(TypedDict, total=False): + ReservedElasticsearchInstanceOfferingId: Optional[GUID] + ElasticsearchInstanceType: Optional[ESPartitionInstanceType] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + PaymentOption: Optional[ReservedElasticsearchInstancePaymentOption] + RecurringCharges: Optional[RecurringChargeList] + + +ReservedElasticsearchInstanceOfferingList = List[ReservedElasticsearchInstanceOffering] + + +class DescribeReservedElasticsearchInstanceOfferingsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + ReservedElasticsearchInstanceOfferings: Optional[ReservedElasticsearchInstanceOfferingList] + + +class DescribeReservedElasticsearchInstancesRequest(ServiceRequest): + ReservedElasticsearchInstanceId: Optional[GUID] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ReservedElasticsearchInstance(TypedDict, total=False): + ReservationName: Optional[ReservationToken] + ReservedElasticsearchInstanceId: Optional[GUID] + ReservedElasticsearchInstanceOfferingId: Optional[String] + ElasticsearchInstanceType: Optional[ESPartitionInstanceType] + StartTime: Optional[UpdateTimestamp] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + ElasticsearchInstanceCount: Optional[Integer] + State: Optional[String] + PaymentOption: Optional[ReservedElasticsearchInstancePaymentOption] + RecurringCharges: Optional[RecurringChargeList] + + +ReservedElasticsearchInstanceList = List[ReservedElasticsearchInstance] + + +class DescribeReservedElasticsearchInstancesResponse(TypedDict, total=False): + NextToken: Optional[String] + ReservedElasticsearchInstances: Optional[ReservedElasticsearchInstanceList] + + +VpcEndpointIdList = List[VpcEndpointId] + + +class DescribeVpcEndpointsRequest(ServiceRequest): + VpcEndpointIds: VpcEndpointIdList + + +class VpcEndpointError(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + ErrorCode: Optional[VpcEndpointErrorCode] + ErrorMessage: Optional[String] + + +VpcEndpointErrorList = List[VpcEndpointError] +VpcEndpoints = List[VpcEndpoint] + + +class DescribeVpcEndpointsResponse(TypedDict, total=False): + VpcEndpoints: VpcEndpoints + VpcEndpointErrors: VpcEndpointErrorList + + +class DissociatePackageRequest(ServiceRequest): + PackageID: PackageID + DomainName: DomainName + + +class DissociatePackageResponse(TypedDict, total=False): + DomainPackageDetails: Optional[DomainPackageDetails] + + +class DomainInfo(TypedDict, total=False): + DomainName: Optional[DomainName] + EngineType: Optional[EngineType] + + +DomainInfoList = List[DomainInfo] +DomainPackageDetailsList = List[DomainPackageDetails] + + +class DryRunResults(TypedDict, total=False): + DeploymentType: Optional[DeploymentType] + Message: Optional[Message] + + +ElasticsearchInstanceTypeList = List[ESPartitionInstanceType] + + +class GetCompatibleElasticsearchVersionsRequest(ServiceRequest): + DomainName: Optional[DomainName] + + +class GetCompatibleElasticsearchVersionsResponse(TypedDict, total=False): + CompatibleElasticsearchVersions: Optional[CompatibleElasticsearchVersionsList] + + +class GetPackageVersionHistoryRequest(ServiceRequest): + PackageID: PackageID + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class PackageVersionHistory(TypedDict, total=False): + PackageVersion: Optional[PackageVersion] + CommitMessage: Optional[CommitMessage] + CreatedAt: Optional[CreatedAt] + + +PackageVersionHistoryList = List[PackageVersionHistory] + + +class GetPackageVersionHistoryResponse(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageVersionHistoryList: Optional[PackageVersionHistoryList] + NextToken: Optional[String] + + +class GetUpgradeHistoryRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +Issues = List[Issue] + + +class UpgradeStepItem(TypedDict, total=False): + UpgradeStep: Optional[UpgradeStep] + UpgradeStepStatus: Optional[UpgradeStatus] + Issues: Optional[Issues] + ProgressPercent: Optional[Double] + + +UpgradeStepsList = List[UpgradeStepItem] +StartTimestamp = datetime + + +class UpgradeHistory(TypedDict, total=False): + UpgradeName: Optional[UpgradeName] + StartTimestamp: Optional[StartTimestamp] + UpgradeStatus: Optional[UpgradeStatus] + StepsList: Optional[UpgradeStepsList] + + +UpgradeHistoryList = List[UpgradeHistory] + + +class GetUpgradeHistoryResponse(TypedDict, total=False): + UpgradeHistories: Optional[UpgradeHistoryList] + NextToken: Optional[String] + + +class GetUpgradeStatusRequest(ServiceRequest): + DomainName: DomainName + + +class GetUpgradeStatusResponse(TypedDict, total=False): + UpgradeStep: Optional[UpgradeStep] + StepStatus: Optional[UpgradeStatus] + UpgradeName: Optional[UpgradeName] + + +class ListDomainNamesRequest(ServiceRequest): + EngineType: Optional[EngineType] + + +class ListDomainNamesResponse(TypedDict, total=False): + DomainNames: Optional[DomainInfoList] + + +class ListDomainsForPackageRequest(ServiceRequest): + PackageID: PackageID + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListDomainsForPackageResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + NextToken: Optional[String] + + +class ListElasticsearchInstanceTypesRequest(ServiceRequest): + ElasticsearchVersion: ElasticsearchVersionString + DomainName: Optional[DomainName] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListElasticsearchInstanceTypesResponse(TypedDict, total=False): + ElasticsearchInstanceTypes: Optional[ElasticsearchInstanceTypeList] + NextToken: Optional[NextToken] + + +class ListElasticsearchVersionsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListElasticsearchVersionsResponse(TypedDict, total=False): + ElasticsearchVersions: Optional[ElasticsearchVersionList] + NextToken: Optional[NextToken] + + +class ListPackagesForDomainRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListPackagesForDomainResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + NextToken: Optional[String] + + +class ListTagsRequest(ServiceRequest): + ARN: ARN + + +class ListTagsResponse(TypedDict, total=False): + TagList: Optional[TagList] + + +class ListVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + NextToken: Optional[NextToken] + + +class ListVpcEndpointAccessResponse(TypedDict, total=False): + AuthorizedPrincipalList: AuthorizedPrincipalList + NextToken: NextToken + + +class ListVpcEndpointsForDomainRequest(ServiceRequest): + DomainName: DomainName + NextToken: Optional[NextToken] + + +VpcEndpointSummaryList = List[VpcEndpointSummary] + + +class ListVpcEndpointsForDomainResponse(TypedDict, total=False): + VpcEndpointSummaryList: VpcEndpointSummaryList + NextToken: NextToken + + +class ListVpcEndpointsRequest(ServiceRequest): + NextToken: Optional[NextToken] + + +class ListVpcEndpointsResponse(TypedDict, total=False): + VpcEndpointSummaryList: VpcEndpointSummaryList + NextToken: NextToken + + +class PurchaseReservedElasticsearchInstanceOfferingRequest(ServiceRequest): + ReservedElasticsearchInstanceOfferingId: GUID + ReservationName: ReservationToken + InstanceCount: Optional[InstanceCount] + + +class PurchaseReservedElasticsearchInstanceOfferingResponse(TypedDict, total=False): + ReservedElasticsearchInstanceId: Optional[GUID] + ReservationName: Optional[ReservationToken] + + +class RejectInboundCrossClusterSearchConnectionRequest(ServiceRequest): + CrossClusterSearchConnectionId: CrossClusterSearchConnectionId + + +class RejectInboundCrossClusterSearchConnectionResponse(TypedDict, total=False): + CrossClusterSearchConnection: Optional[InboundCrossClusterSearchConnection] + + +class RemoveTagsRequest(ServiceRequest): + ARN: ARN + TagKeys: StringList + + +class RevokeVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + Account: AWSAccount + + +class RevokeVpcEndpointAccessResponse(TypedDict, total=False): + pass + + +class StartElasticsearchServiceSoftwareUpdateRequest(ServiceRequest): + DomainName: DomainName + + +class StartElasticsearchServiceSoftwareUpdateResponse(TypedDict, total=False): + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + + +class UpdateElasticsearchDomainConfigRequest(ServiceRequest): + DomainName: DomainName + ElasticsearchClusterConfig: Optional[ElasticsearchClusterConfig] + EBSOptions: Optional[EBSOptions] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCOptions] + CognitoOptions: Optional[CognitoOptions] + AdvancedOptions: Optional[AdvancedOptions] + AccessPolicies: Optional[PolicyDocument] + LogPublishingOptions: Optional[LogPublishingOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + AutoTuneOptions: Optional[AutoTuneOptions] + DryRun: Optional[DryRun] + + +class UpdateElasticsearchDomainConfigResponse(TypedDict, total=False): + DomainConfig: ElasticsearchDomainConfig + DryRunResults: Optional[DryRunResults] + + +class UpdatePackageRequest(ServiceRequest): + PackageID: PackageID + PackageSource: PackageSource + PackageDescription: Optional[PackageDescription] + CommitMessage: Optional[CommitMessage] + + +class UpdatePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class UpdateVpcEndpointRequest(ServiceRequest): + VpcEndpointId: VpcEndpointId + VpcOptions: VPCOptions + + +class UpdateVpcEndpointResponse(TypedDict, total=False): + VpcEndpoint: VpcEndpoint + + +class UpgradeElasticsearchDomainRequest(ServiceRequest): + DomainName: DomainName + TargetVersion: ElasticsearchVersionString + PerformCheckOnly: Optional[Boolean] + + +class UpgradeElasticsearchDomainResponse(TypedDict, total=False): + DomainName: Optional[DomainName] + TargetVersion: Optional[ElasticsearchVersionString] + PerformCheckOnly: Optional[Boolean] + ChangeProgressDetails: Optional[ChangeProgressDetails] + + +class EsApi: + service = "es" + version = "2015-01-01" + + @handler("AcceptInboundCrossClusterSearchConnection") + def accept_inbound_cross_cluster_search_connection( + self, + context: RequestContext, + cross_cluster_search_connection_id: CrossClusterSearchConnectionId, + **kwargs, + ) -> AcceptInboundCrossClusterSearchConnectionResponse: + raise NotImplementedError + + @handler("AddTags") + def add_tags(self, context: RequestContext, arn: ARN, tag_list: TagList, **kwargs) -> None: + raise NotImplementedError + + @handler("AssociatePackage") + def associate_package( + self, context: RequestContext, package_id: PackageID, domain_name: DomainName, **kwargs + ) -> AssociatePackageResponse: + raise NotImplementedError + + @handler("AuthorizeVpcEndpointAccess") + def authorize_vpc_endpoint_access( + self, context: RequestContext, domain_name: DomainName, account: AWSAccount, **kwargs + ) -> AuthorizeVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("CancelDomainConfigChange") + def cancel_domain_config_change( + self, + context: RequestContext, + domain_name: DomainName, + dry_run: DryRun | None = None, + **kwargs, + ) -> CancelDomainConfigChangeResponse: + raise NotImplementedError + + @handler("CancelElasticsearchServiceSoftwareUpdate") + def cancel_elasticsearch_service_software_update( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> CancelElasticsearchServiceSoftwareUpdateResponse: + raise NotImplementedError + + @handler("CreateElasticsearchDomain") + def create_elasticsearch_domain( + self, + context: RequestContext, + domain_name: DomainName, + elasticsearch_version: ElasticsearchVersionString | None = None, + elasticsearch_cluster_config: ElasticsearchClusterConfig | None = None, + ebs_options: EBSOptions | None = None, + access_policies: PolicyDocument | None = None, + snapshot_options: SnapshotOptions | None = None, + vpc_options: VPCOptions | None = None, + cognito_options: CognitoOptions | None = None, + encryption_at_rest_options: EncryptionAtRestOptions | None = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions | None = None, + advanced_options: AdvancedOptions | None = None, + log_publishing_options: LogPublishingOptions | None = None, + domain_endpoint_options: DomainEndpointOptions | None = None, + advanced_security_options: AdvancedSecurityOptionsInput | None = None, + auto_tune_options: AutoTuneOptionsInput | None = None, + tag_list: TagList | None = None, + **kwargs, + ) -> CreateElasticsearchDomainResponse: + raise NotImplementedError + + @handler("CreateOutboundCrossClusterSearchConnection") + def create_outbound_cross_cluster_search_connection( + self, + context: RequestContext, + source_domain_info: DomainInformation, + destination_domain_info: DomainInformation, + connection_alias: ConnectionAlias, + **kwargs, + ) -> CreateOutboundCrossClusterSearchConnectionResponse: + raise NotImplementedError + + @handler("CreatePackage") + def create_package( + self, + context: RequestContext, + package_name: PackageName, + package_type: PackageType, + package_source: PackageSource, + package_description: PackageDescription | None = None, + **kwargs, + ) -> CreatePackageResponse: + raise NotImplementedError + + @handler("CreateVpcEndpoint") + def create_vpc_endpoint( + self, + context: RequestContext, + domain_arn: DomainArn, + vpc_options: VPCOptions, + client_token: ClientToken | None = None, + **kwargs, + ) -> CreateVpcEndpointResponse: + raise NotImplementedError + + @handler("DeleteElasticsearchDomain") + def delete_elasticsearch_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DeleteElasticsearchDomainResponse: + raise NotImplementedError + + @handler("DeleteElasticsearchServiceRole") + def delete_elasticsearch_service_role(self, context: RequestContext, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteInboundCrossClusterSearchConnection") + def delete_inbound_cross_cluster_search_connection( + self, + context: RequestContext, + cross_cluster_search_connection_id: CrossClusterSearchConnectionId, + **kwargs, + ) -> DeleteInboundCrossClusterSearchConnectionResponse: + raise NotImplementedError + + @handler("DeleteOutboundCrossClusterSearchConnection") + def delete_outbound_cross_cluster_search_connection( + self, + context: RequestContext, + cross_cluster_search_connection_id: CrossClusterSearchConnectionId, + **kwargs, + ) -> DeleteOutboundCrossClusterSearchConnectionResponse: + raise NotImplementedError + + @handler("DeletePackage") + def delete_package( + self, context: RequestContext, package_id: PackageID, **kwargs + ) -> DeletePackageResponse: + raise NotImplementedError + + @handler("DeleteVpcEndpoint") + def delete_vpc_endpoint( + self, context: RequestContext, vpc_endpoint_id: VpcEndpointId, **kwargs + ) -> DeleteVpcEndpointResponse: + raise NotImplementedError + + @handler("DescribeDomainAutoTunes") + def describe_domain_auto_tunes( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeDomainAutoTunesResponse: + raise NotImplementedError + + @handler("DescribeDomainChangeProgress") + def describe_domain_change_progress( + self, + context: RequestContext, + domain_name: DomainName, + change_id: GUID | None = None, + **kwargs, + ) -> DescribeDomainChangeProgressResponse: + raise NotImplementedError + + @handler("DescribeElasticsearchDomain") + def describe_elasticsearch_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeElasticsearchDomainResponse: + raise NotImplementedError + + @handler("DescribeElasticsearchDomainConfig") + def describe_elasticsearch_domain_config( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeElasticsearchDomainConfigResponse: + raise NotImplementedError + + @handler("DescribeElasticsearchDomains") + def describe_elasticsearch_domains( + self, context: RequestContext, domain_names: DomainNameList, **kwargs + ) -> DescribeElasticsearchDomainsResponse: + raise NotImplementedError + + @handler("DescribeElasticsearchInstanceTypeLimits") + def describe_elasticsearch_instance_type_limits( + self, + context: RequestContext, + instance_type: ESPartitionInstanceType, + elasticsearch_version: ElasticsearchVersionString, + domain_name: DomainName | None = None, + **kwargs, + ) -> DescribeElasticsearchInstanceTypeLimitsResponse: + raise NotImplementedError + + @handler("DescribeInboundCrossClusterSearchConnections") + def describe_inbound_cross_cluster_search_connections( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInboundCrossClusterSearchConnectionsResponse: + raise NotImplementedError + + @handler("DescribeOutboundCrossClusterSearchConnections") + def describe_outbound_cross_cluster_search_connections( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeOutboundCrossClusterSearchConnectionsResponse: + raise NotImplementedError + + @handler("DescribePackages") + def describe_packages( + self, + context: RequestContext, + filters: DescribePackagesFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribePackagesResponse: + raise NotImplementedError + + @handler("DescribeReservedElasticsearchInstanceOfferings") + def describe_reserved_elasticsearch_instance_offerings( + self, + context: RequestContext, + reserved_elasticsearch_instance_offering_id: GUID | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeReservedElasticsearchInstanceOfferingsResponse: + raise NotImplementedError + + @handler("DescribeReservedElasticsearchInstances") + def describe_reserved_elasticsearch_instances( + self, + context: RequestContext, + reserved_elasticsearch_instance_id: GUID | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeReservedElasticsearchInstancesResponse: + raise NotImplementedError + + @handler("DescribeVpcEndpoints") + def describe_vpc_endpoints( + self, context: RequestContext, vpc_endpoint_ids: VpcEndpointIdList, **kwargs + ) -> DescribeVpcEndpointsResponse: + raise NotImplementedError + + @handler("DissociatePackage") + def dissociate_package( + self, context: RequestContext, package_id: PackageID, domain_name: DomainName, **kwargs + ) -> DissociatePackageResponse: + raise NotImplementedError + + @handler("GetCompatibleElasticsearchVersions") + def get_compatible_elasticsearch_versions( + self, context: RequestContext, domain_name: DomainName | None = None, **kwargs + ) -> GetCompatibleElasticsearchVersionsResponse: + raise NotImplementedError + + @handler("GetPackageVersionHistory") + def get_package_version_history( + self, + context: RequestContext, + package_id: PackageID, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetPackageVersionHistoryResponse: + raise NotImplementedError + + @handler("GetUpgradeHistory") + def get_upgrade_history( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetUpgradeHistoryResponse: + raise NotImplementedError + + @handler("GetUpgradeStatus") + def get_upgrade_status( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> GetUpgradeStatusResponse: + raise NotImplementedError + + @handler("ListDomainNames") + def list_domain_names( + self, context: RequestContext, engine_type: EngineType | None = None, **kwargs + ) -> ListDomainNamesResponse: + raise NotImplementedError + + @handler("ListDomainsForPackage") + def list_domains_for_package( + self, + context: RequestContext, + package_id: PackageID, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDomainsForPackageResponse: + raise NotImplementedError + + @handler("ListElasticsearchInstanceTypes") + def list_elasticsearch_instance_types( + self, + context: RequestContext, + elasticsearch_version: ElasticsearchVersionString, + domain_name: DomainName | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListElasticsearchInstanceTypesResponse: + raise NotImplementedError + + @handler("ListElasticsearchVersions") + def list_elasticsearch_versions( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListElasticsearchVersionsResponse: + raise NotImplementedError + + @handler("ListPackagesForDomain") + def list_packages_for_domain( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListPackagesForDomainResponse: + raise NotImplementedError + + @handler("ListTags") + def list_tags(self, context: RequestContext, arn: ARN, **kwargs) -> ListTagsResponse: + raise NotImplementedError + + @handler("ListVpcEndpointAccess") + def list_vpc_endpoint_access( + self, + context: RequestContext, + domain_name: DomainName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("ListVpcEndpoints") + def list_vpc_endpoints( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> ListVpcEndpointsResponse: + raise NotImplementedError + + @handler("ListVpcEndpointsForDomain") + def list_vpc_endpoints_for_domain( + self, + context: RequestContext, + domain_name: DomainName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListVpcEndpointsForDomainResponse: + raise NotImplementedError + + @handler("PurchaseReservedElasticsearchInstanceOffering") + def purchase_reserved_elasticsearch_instance_offering( + self, + context: RequestContext, + reserved_elasticsearch_instance_offering_id: GUID, + reservation_name: ReservationToken, + instance_count: InstanceCount | None = None, + **kwargs, + ) -> PurchaseReservedElasticsearchInstanceOfferingResponse: + raise NotImplementedError + + @handler("RejectInboundCrossClusterSearchConnection") + def reject_inbound_cross_cluster_search_connection( + self, + context: RequestContext, + cross_cluster_search_connection_id: CrossClusterSearchConnectionId, + **kwargs, + ) -> RejectInboundCrossClusterSearchConnectionResponse: + raise NotImplementedError + + @handler("RemoveTags") + def remove_tags( + self, context: RequestContext, arn: ARN, tag_keys: StringList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RevokeVpcEndpointAccess") + def revoke_vpc_endpoint_access( + self, context: RequestContext, domain_name: DomainName, account: AWSAccount, **kwargs + ) -> RevokeVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("StartElasticsearchServiceSoftwareUpdate") + def start_elasticsearch_service_software_update( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> StartElasticsearchServiceSoftwareUpdateResponse: + raise NotImplementedError + + @handler("UpdateElasticsearchDomainConfig") + def update_elasticsearch_domain_config( + self, + context: RequestContext, + domain_name: DomainName, + elasticsearch_cluster_config: ElasticsearchClusterConfig | None = None, + ebs_options: EBSOptions | None = None, + snapshot_options: SnapshotOptions | None = None, + vpc_options: VPCOptions | None = None, + cognito_options: CognitoOptions | None = None, + advanced_options: AdvancedOptions | None = None, + access_policies: PolicyDocument | None = None, + log_publishing_options: LogPublishingOptions | None = None, + domain_endpoint_options: DomainEndpointOptions | None = None, + advanced_security_options: AdvancedSecurityOptionsInput | None = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions | None = None, + encryption_at_rest_options: EncryptionAtRestOptions | None = None, + auto_tune_options: AutoTuneOptions | None = None, + dry_run: DryRun | None = None, + **kwargs, + ) -> UpdateElasticsearchDomainConfigResponse: + raise NotImplementedError + + @handler("UpdatePackage") + def update_package( + self, + context: RequestContext, + package_id: PackageID, + package_source: PackageSource, + package_description: PackageDescription | None = None, + commit_message: CommitMessage | None = None, + **kwargs, + ) -> UpdatePackageResponse: + raise NotImplementedError + + @handler("UpdateVpcEndpoint") + def update_vpc_endpoint( + self, + context: RequestContext, + vpc_endpoint_id: VpcEndpointId, + vpc_options: VPCOptions, + **kwargs, + ) -> UpdateVpcEndpointResponse: + raise NotImplementedError + + @handler("UpgradeElasticsearchDomain") + def upgrade_elasticsearch_domain( + self, + context: RequestContext, + domain_name: DomainName, + target_version: ElasticsearchVersionString, + perform_check_only: Boolean | None = None, + **kwargs, + ) -> UpgradeElasticsearchDomainResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/events/__init__.py b/localstack-core/localstack/aws/api/events/__init__.py new file mode 100644 index 0000000000000..3ad5d9dcaaaf1 --- /dev/null +++ b/localstack-core/localstack/aws/api/events/__init__.py @@ -0,0 +1,2112 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccountId = str +Action = str +ApiDestinationArn = str +ApiDestinationDescription = str +ApiDestinationInvocationRateLimitPerSecond = int +ApiDestinationName = str +ArchiveArn = str +ArchiveDescription = str +ArchiveName = str +ArchiveStateReason = str +Arn = str +AuthHeaderParameters = str +AuthHeaderParametersSensitive = str +Boolean = bool +CapacityProvider = str +CapacityProviderStrategyItemBase = int +CapacityProviderStrategyItemWeight = int +ConnectionArn = str +ConnectionDescription = str +ConnectionName = str +ConnectionStateReason = str +CreatedBy = str +Database = str +DbUser = str +EndpointArn = str +EndpointDescription = str +EndpointId = str +EndpointName = str +EndpointStateReason = str +EndpointUrl = str +ErrorCode = str +ErrorMessage = str +EventBusArn = str +EventBusDescription = str +EventBusName = str +EventBusNameOrArn = str +EventId = str +EventPattern = str +EventResource = str +EventSourceName = str +EventSourceNamePrefix = str +GraphQLOperation = str +HeaderKey = str +HeaderValue = str +HeaderValueSensitive = str +HealthCheck = str +HomeRegion = str +HttpsEndpoint = str +IamRoleArn = str +InputTransformerPathKey = str +Integer = int +KmsKeyIdentifier = str +LimitMax100 = int +LimitMin1 = int +ManagedBy = str +MaximumEventAgeInSeconds = int +MaximumRetryAttempts = int +MessageGroupId = str +NextToken = str +NonPartnerEventBusArn = str +NonPartnerEventBusName = str +NonPartnerEventBusNameOrArn = str +PartnerEventSourceNamePrefix = str +PathParameter = str +PlacementConstraintExpression = str +PlacementStrategyField = str +Principal = str +QueryStringKey = str +QueryStringValue = str +QueryStringValueSensitive = str +RedshiftSecretManagerArn = str +ReferenceId = str +ReplayArn = str +ReplayDescription = str +ReplayName = str +ReplayStateReason = str +ResourceArn = str +ResourceAssociationArn = str +ResourceConfigurationArn = str +RetentionDays = int +RoleArn = str +Route = str +RuleArn = str +RuleDescription = str +RuleName = str +RunCommandTargetKey = str +RunCommandTargetValue = str +SageMakerPipelineParameterName = str +SageMakerPipelineParameterValue = str +ScheduleExpression = str +SecretsManagerSecretArn = str +SensitiveString = str +Sql = str +StatementId = str +StatementName = str +String = str +TagKey = str +TagValue = str +TargetArn = str +TargetId = str +TargetInput = str +TargetInputPath = str +TargetPartitionKeyPath = str +TraceHeader = str +TransformerInput = str + + +class ApiDestinationHttpMethod(StrEnum): + POST = "POST" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PUT = "PUT" + PATCH = "PATCH" + DELETE = "DELETE" + + +class ApiDestinationState(StrEnum): + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + + +class ArchiveState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + CREATING = "CREATING" + UPDATING = "UPDATING" + CREATE_FAILED = "CREATE_FAILED" + UPDATE_FAILED = "UPDATE_FAILED" + + +class AssignPublicIp(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class ConnectionAuthorizationType(StrEnum): + BASIC = "BASIC" + OAUTH_CLIENT_CREDENTIALS = "OAUTH_CLIENT_CREDENTIALS" + API_KEY = "API_KEY" + + +class ConnectionOAuthHttpMethod(StrEnum): + GET = "GET" + POST = "POST" + PUT = "PUT" + + +class ConnectionState(StrEnum): + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + AUTHORIZED = "AUTHORIZED" + DEAUTHORIZED = "DEAUTHORIZED" + AUTHORIZING = "AUTHORIZING" + DEAUTHORIZING = "DEAUTHORIZING" + ACTIVE = "ACTIVE" + FAILED_CONNECTIVITY = "FAILED_CONNECTIVITY" + + +class EndpointState(StrEnum): + ACTIVE = "ACTIVE" + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + CREATE_FAILED = "CREATE_FAILED" + UPDATE_FAILED = "UPDATE_FAILED" + DELETE_FAILED = "DELETE_FAILED" + + +class EventSourceState(StrEnum): + PENDING = "PENDING" + ACTIVE = "ACTIVE" + DELETED = "DELETED" + + +class LaunchType(StrEnum): + EC2 = "EC2" + FARGATE = "FARGATE" + EXTERNAL = "EXTERNAL" + + +class PlacementConstraintType(StrEnum): + distinctInstance = "distinctInstance" + memberOf = "memberOf" + + +class PlacementStrategyType(StrEnum): + random = "random" + spread = "spread" + binpack = "binpack" + + +class PropagateTags(StrEnum): + TASK_DEFINITION = "TASK_DEFINITION" + + +class ReplayState(StrEnum): + STARTING = "STARTING" + RUNNING = "RUNNING" + CANCELLING = "CANCELLING" + COMPLETED = "COMPLETED" + CANCELLED = "CANCELLED" + FAILED = "FAILED" + + +class ReplicationState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class RuleState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS = "ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModificationException" + sender_fault: bool = False + status_code: int = 400 + + +class IllegalStatusException(ServiceException): + code: str = "IllegalStatusException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalException(ServiceException): + code: str = "InternalException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidEventPatternException(ServiceException): + code: str = "InvalidEventPatternException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidStateException(ServiceException): + code: str = "InvalidStateException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ManagedRuleException(ServiceException): + code: str = "ManagedRuleException" + sender_fault: bool = False + status_code: int = 400 + + +class OperationDisabledException(ServiceException): + code: str = "OperationDisabledException" + sender_fault: bool = False + status_code: int = 400 + + +class PolicyLengthExceededException(ServiceException): + code: str = "PolicyLengthExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceAlreadyExistsException(ServiceException): + code: str = "ResourceAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class ActivateEventSourceRequest(ServiceRequest): + Name: EventSourceName + + +Timestamp = datetime + + +class ApiDestination(TypedDict, total=False): + ApiDestinationArn: Optional[ApiDestinationArn] + Name: Optional[ApiDestinationName] + ApiDestinationState: Optional[ApiDestinationState] + ConnectionArn: Optional[ConnectionArn] + InvocationEndpoint: Optional[HttpsEndpoint] + HttpMethod: Optional[ApiDestinationHttpMethod] + InvocationRateLimitPerSecond: Optional[ApiDestinationInvocationRateLimitPerSecond] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +ApiDestinationResponseList = List[ApiDestination] + + +class AppSyncParameters(TypedDict, total=False): + GraphQLOperation: Optional[GraphQLOperation] + + +Long = int + + +class Archive(TypedDict, total=False): + ArchiveName: Optional[ArchiveName] + EventSourceArn: Optional[EventBusArn] + State: Optional[ArchiveState] + StateReason: Optional[ArchiveStateReason] + RetentionDays: Optional[RetentionDays] + SizeBytes: Optional[Long] + EventCount: Optional[Long] + CreationTime: Optional[Timestamp] + + +ArchiveResponseList = List[Archive] +StringList = List[String] + + +class AwsVpcConfiguration(TypedDict, total=False): + Subnets: StringList + SecurityGroups: Optional[StringList] + AssignPublicIp: Optional[AssignPublicIp] + + +class BatchArrayProperties(TypedDict, total=False): + Size: Optional[Integer] + + +class BatchRetryStrategy(TypedDict, total=False): + Attempts: Optional[Integer] + + +class BatchParameters(TypedDict, total=False): + JobDefinition: String + JobName: String + ArrayProperties: Optional[BatchArrayProperties] + RetryStrategy: Optional[BatchRetryStrategy] + + +class CancelReplayRequest(ServiceRequest): + ReplayName: ReplayName + + +class CancelReplayResponse(TypedDict, total=False): + ReplayArn: Optional[ReplayArn] + State: Optional[ReplayState] + StateReason: Optional[ReplayStateReason] + + +class CapacityProviderStrategyItem(TypedDict, total=False): + capacityProvider: CapacityProvider + weight: Optional[CapacityProviderStrategyItemWeight] + base: Optional[CapacityProviderStrategyItemBase] + + +CapacityProviderStrategy = List[CapacityProviderStrategyItem] + + +class Condition(TypedDict, total=False): + Type: String + Key: String + Value: String + + +class Connection(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + Name: Optional[ConnectionName] + ConnectionState: Optional[ConnectionState] + StateReason: Optional[ConnectionStateReason] + AuthorizationType: Optional[ConnectionAuthorizationType] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LastAuthorizedTime: Optional[Timestamp] + + +class ConnectionApiKeyAuthResponseParameters(TypedDict, total=False): + ApiKeyName: Optional[AuthHeaderParameters] + + +class DescribeConnectionResourceParameters(TypedDict, total=False): + ResourceConfigurationArn: ResourceConfigurationArn + ResourceAssociationArn: ResourceAssociationArn + + +class DescribeConnectionConnectivityParameters(TypedDict, total=False): + ResourceParameters: DescribeConnectionResourceParameters + + +class ConnectionBodyParameter(TypedDict, total=False): + Key: Optional[String] + Value: Optional[SensitiveString] + IsValueSecret: Optional[Boolean] + + +ConnectionBodyParametersList = List[ConnectionBodyParameter] + + +class ConnectionQueryStringParameter(TypedDict, total=False): + Key: Optional[QueryStringKey] + Value: Optional[QueryStringValueSensitive] + IsValueSecret: Optional[Boolean] + + +ConnectionQueryStringParametersList = List[ConnectionQueryStringParameter] + + +class ConnectionHeaderParameter(TypedDict, total=False): + Key: Optional[HeaderKey] + Value: Optional[HeaderValueSensitive] + IsValueSecret: Optional[Boolean] + + +ConnectionHeaderParametersList = List[ConnectionHeaderParameter] + + +class ConnectionHttpParameters(TypedDict, total=False): + HeaderParameters: Optional[ConnectionHeaderParametersList] + QueryStringParameters: Optional[ConnectionQueryStringParametersList] + BodyParameters: Optional[ConnectionBodyParametersList] + + +class ConnectionOAuthClientResponseParameters(TypedDict, total=False): + ClientID: Optional[AuthHeaderParameters] + + +class ConnectionOAuthResponseParameters(TypedDict, total=False): + ClientParameters: Optional[ConnectionOAuthClientResponseParameters] + AuthorizationEndpoint: Optional[HttpsEndpoint] + HttpMethod: Optional[ConnectionOAuthHttpMethod] + OAuthHttpParameters: Optional[ConnectionHttpParameters] + + +class ConnectionBasicAuthResponseParameters(TypedDict, total=False): + Username: Optional[AuthHeaderParameters] + + +class ConnectionAuthResponseParameters(TypedDict, total=False): + BasicAuthParameters: Optional[ConnectionBasicAuthResponseParameters] + OAuthParameters: Optional[ConnectionOAuthResponseParameters] + ApiKeyAuthParameters: Optional[ConnectionApiKeyAuthResponseParameters] + InvocationHttpParameters: Optional[ConnectionHttpParameters] + ConnectivityParameters: Optional[DescribeConnectionConnectivityParameters] + + +ConnectionResponseList = List[Connection] + + +class ConnectivityResourceConfigurationArn(TypedDict, total=False): + ResourceConfigurationArn: ResourceConfigurationArn + + +class ConnectivityResourceParameters(TypedDict, total=False): + ResourceParameters: ConnectivityResourceConfigurationArn + + +class CreateApiDestinationRequest(ServiceRequest): + Name: ApiDestinationName + Description: Optional[ApiDestinationDescription] + ConnectionArn: ConnectionArn + InvocationEndpoint: HttpsEndpoint + HttpMethod: ApiDestinationHttpMethod + InvocationRateLimitPerSecond: Optional[ApiDestinationInvocationRateLimitPerSecond] + + +class CreateApiDestinationResponse(TypedDict, total=False): + ApiDestinationArn: Optional[ApiDestinationArn] + ApiDestinationState: Optional[ApiDestinationState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class CreateArchiveRequest(ServiceRequest): + ArchiveName: ArchiveName + EventSourceArn: EventBusArn + Description: Optional[ArchiveDescription] + EventPattern: Optional[EventPattern] + RetentionDays: Optional[RetentionDays] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class CreateArchiveResponse(TypedDict, total=False): + ArchiveArn: Optional[ArchiveArn] + State: Optional[ArchiveState] + StateReason: Optional[ArchiveStateReason] + CreationTime: Optional[Timestamp] + + +class CreateConnectionApiKeyAuthRequestParameters(TypedDict, total=False): + ApiKeyName: AuthHeaderParameters + ApiKeyValue: AuthHeaderParametersSensitive + + +class CreateConnectionOAuthClientRequestParameters(TypedDict, total=False): + ClientID: AuthHeaderParameters + ClientSecret: AuthHeaderParametersSensitive + + +class CreateConnectionOAuthRequestParameters(TypedDict, total=False): + ClientParameters: CreateConnectionOAuthClientRequestParameters + AuthorizationEndpoint: HttpsEndpoint + HttpMethod: ConnectionOAuthHttpMethod + OAuthHttpParameters: Optional[ConnectionHttpParameters] + + +class CreateConnectionBasicAuthRequestParameters(TypedDict, total=False): + Username: AuthHeaderParameters + Password: AuthHeaderParametersSensitive + + +class CreateConnectionAuthRequestParameters(TypedDict, total=False): + BasicAuthParameters: Optional[CreateConnectionBasicAuthRequestParameters] + OAuthParameters: Optional[CreateConnectionOAuthRequestParameters] + ApiKeyAuthParameters: Optional[CreateConnectionApiKeyAuthRequestParameters] + InvocationHttpParameters: Optional[ConnectionHttpParameters] + ConnectivityParameters: Optional[ConnectivityResourceParameters] + + +class CreateConnectionRequest(ServiceRequest): + Name: ConnectionName + Description: Optional[ConnectionDescription] + AuthorizationType: ConnectionAuthorizationType + AuthParameters: CreateConnectionAuthRequestParameters + InvocationConnectivityParameters: Optional[ConnectivityResourceParameters] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class CreateConnectionResponse(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + ConnectionState: Optional[ConnectionState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class EndpointEventBus(TypedDict, total=False): + EventBusArn: NonPartnerEventBusArn + + +EndpointEventBusList = List[EndpointEventBus] + + +class ReplicationConfig(TypedDict, total=False): + State: Optional[ReplicationState] + + +class Secondary(TypedDict, total=False): + Route: Route + + +class Primary(TypedDict, total=False): + HealthCheck: HealthCheck + + +class FailoverConfig(TypedDict, total=False): + Primary: Primary + Secondary: Secondary + + +class RoutingConfig(TypedDict, total=False): + FailoverConfig: FailoverConfig + + +class CreateEndpointRequest(ServiceRequest): + Name: EndpointName + Description: Optional[EndpointDescription] + RoutingConfig: RoutingConfig + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: EndpointEventBusList + RoleArn: Optional[IamRoleArn] + + +class CreateEndpointResponse(TypedDict, total=False): + Name: Optional[EndpointName] + Arn: Optional[EndpointArn] + RoutingConfig: Optional[RoutingConfig] + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: Optional[EndpointEventBusList] + RoleArn: Optional[IamRoleArn] + State: Optional[EndpointState] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class DeadLetterConfig(TypedDict, total=False): + Arn: Optional[ResourceArn] + + +class CreateEventBusRequest(ServiceRequest): + Name: EventBusName + EventSourceName: Optional[EventSourceName] + Description: Optional[EventBusDescription] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + DeadLetterConfig: Optional[DeadLetterConfig] + Tags: Optional[TagList] + + +class CreateEventBusResponse(TypedDict, total=False): + EventBusArn: Optional[String] + Description: Optional[EventBusDescription] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + DeadLetterConfig: Optional[DeadLetterConfig] + + +class CreatePartnerEventSourceRequest(ServiceRequest): + Name: EventSourceName + Account: AccountId + + +class CreatePartnerEventSourceResponse(TypedDict, total=False): + EventSourceArn: Optional[String] + + +class DeactivateEventSourceRequest(ServiceRequest): + Name: EventSourceName + + +class DeauthorizeConnectionRequest(ServiceRequest): + Name: ConnectionName + + +class DeauthorizeConnectionResponse(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + ConnectionState: Optional[ConnectionState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LastAuthorizedTime: Optional[Timestamp] + + +class DeleteApiDestinationRequest(ServiceRequest): + Name: ApiDestinationName + + +class DeleteApiDestinationResponse(TypedDict, total=False): + pass + + +class DeleteArchiveRequest(ServiceRequest): + ArchiveName: ArchiveName + + +class DeleteArchiveResponse(TypedDict, total=False): + pass + + +class DeleteConnectionRequest(ServiceRequest): + Name: ConnectionName + + +class DeleteConnectionResponse(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + ConnectionState: Optional[ConnectionState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LastAuthorizedTime: Optional[Timestamp] + + +class DeleteEndpointRequest(ServiceRequest): + Name: EndpointName + + +class DeleteEndpointResponse(TypedDict, total=False): + pass + + +class DeleteEventBusRequest(ServiceRequest): + Name: EventBusName + + +class DeletePartnerEventSourceRequest(ServiceRequest): + Name: EventSourceName + Account: AccountId + + +class DeleteRuleRequest(ServiceRequest): + Name: RuleName + EventBusName: Optional[EventBusNameOrArn] + Force: Optional[Boolean] + + +class DescribeApiDestinationRequest(ServiceRequest): + Name: ApiDestinationName + + +class DescribeApiDestinationResponse(TypedDict, total=False): + ApiDestinationArn: Optional[ApiDestinationArn] + Name: Optional[ApiDestinationName] + Description: Optional[ApiDestinationDescription] + ApiDestinationState: Optional[ApiDestinationState] + ConnectionArn: Optional[ConnectionArn] + InvocationEndpoint: Optional[HttpsEndpoint] + HttpMethod: Optional[ApiDestinationHttpMethod] + InvocationRateLimitPerSecond: Optional[ApiDestinationInvocationRateLimitPerSecond] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class DescribeArchiveRequest(ServiceRequest): + ArchiveName: ArchiveName + + +class DescribeArchiveResponse(TypedDict, total=False): + ArchiveArn: Optional[ArchiveArn] + ArchiveName: Optional[ArchiveName] + EventSourceArn: Optional[EventBusArn] + Description: Optional[ArchiveDescription] + EventPattern: Optional[EventPattern] + State: Optional[ArchiveState] + StateReason: Optional[ArchiveStateReason] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + RetentionDays: Optional[RetentionDays] + SizeBytes: Optional[Long] + EventCount: Optional[Long] + CreationTime: Optional[Timestamp] + + +class DescribeConnectionRequest(ServiceRequest): + Name: ConnectionName + + +class DescribeConnectionResponse(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + Name: Optional[ConnectionName] + Description: Optional[ConnectionDescription] + InvocationConnectivityParameters: Optional[DescribeConnectionConnectivityParameters] + ConnectionState: Optional[ConnectionState] + StateReason: Optional[ConnectionStateReason] + AuthorizationType: Optional[ConnectionAuthorizationType] + SecretArn: Optional[SecretsManagerSecretArn] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + AuthParameters: Optional[ConnectionAuthResponseParameters] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LastAuthorizedTime: Optional[Timestamp] + + +class DescribeEndpointRequest(ServiceRequest): + Name: EndpointName + HomeRegion: Optional[HomeRegion] + + +class DescribeEndpointResponse(TypedDict, total=False): + Name: Optional[EndpointName] + Description: Optional[EndpointDescription] + Arn: Optional[EndpointArn] + RoutingConfig: Optional[RoutingConfig] + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: Optional[EndpointEventBusList] + RoleArn: Optional[IamRoleArn] + EndpointId: Optional[EndpointId] + EndpointUrl: Optional[EndpointUrl] + State: Optional[EndpointState] + StateReason: Optional[EndpointStateReason] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class DescribeEventBusRequest(ServiceRequest): + Name: Optional[EventBusNameOrArn] + + +class DescribeEventBusResponse(TypedDict, total=False): + Name: Optional[String] + Arn: Optional[String] + Description: Optional[EventBusDescription] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + DeadLetterConfig: Optional[DeadLetterConfig] + Policy: Optional[String] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class DescribeEventSourceRequest(ServiceRequest): + Name: EventSourceName + + +class DescribeEventSourceResponse(TypedDict, total=False): + Arn: Optional[String] + CreatedBy: Optional[String] + CreationTime: Optional[Timestamp] + ExpirationTime: Optional[Timestamp] + Name: Optional[String] + State: Optional[EventSourceState] + + +class DescribePartnerEventSourceRequest(ServiceRequest): + Name: EventSourceName + + +class DescribePartnerEventSourceResponse(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +class DescribeReplayRequest(ServiceRequest): + ReplayName: ReplayName + + +ReplayDestinationFilters = List[Arn] + + +class ReplayDestination(TypedDict, total=False): + Arn: Arn + FilterArns: Optional[ReplayDestinationFilters] + + +class DescribeReplayResponse(TypedDict, total=False): + ReplayName: Optional[ReplayName] + ReplayArn: Optional[ReplayArn] + Description: Optional[ReplayDescription] + State: Optional[ReplayState] + StateReason: Optional[ReplayStateReason] + EventSourceArn: Optional[ArchiveArn] + Destination: Optional[ReplayDestination] + EventStartTime: Optional[Timestamp] + EventEndTime: Optional[Timestamp] + EventLastReplayedTime: Optional[Timestamp] + ReplayStartTime: Optional[Timestamp] + ReplayEndTime: Optional[Timestamp] + + +class DescribeRuleRequest(ServiceRequest): + Name: RuleName + EventBusName: Optional[EventBusNameOrArn] + + +class DescribeRuleResponse(TypedDict, total=False): + Name: Optional[RuleName] + Arn: Optional[RuleArn] + EventPattern: Optional[EventPattern] + ScheduleExpression: Optional[ScheduleExpression] + State: Optional[RuleState] + Description: Optional[RuleDescription] + RoleArn: Optional[RoleArn] + ManagedBy: Optional[ManagedBy] + EventBusName: Optional[EventBusName] + CreatedBy: Optional[CreatedBy] + + +class DisableRuleRequest(ServiceRequest): + Name: RuleName + EventBusName: Optional[EventBusNameOrArn] + + +PlacementStrategy = TypedDict( + "PlacementStrategy", + { + "type": Optional[PlacementStrategyType], + "field": Optional[PlacementStrategyField], + }, + total=False, +) +PlacementStrategies = List[PlacementStrategy] +PlacementConstraint = TypedDict( + "PlacementConstraint", + { + "type": Optional[PlacementConstraintType], + "expression": Optional[PlacementConstraintExpression], + }, + total=False, +) +PlacementConstraints = List[PlacementConstraint] + + +class NetworkConfiguration(TypedDict, total=False): + awsvpcConfiguration: Optional[AwsVpcConfiguration] + + +class EcsParameters(TypedDict, total=False): + TaskDefinitionArn: Arn + TaskCount: Optional[LimitMin1] + LaunchType: Optional[LaunchType] + NetworkConfiguration: Optional[NetworkConfiguration] + PlatformVersion: Optional[String] + Group: Optional[String] + CapacityProviderStrategy: Optional[CapacityProviderStrategy] + EnableECSManagedTags: Optional[Boolean] + EnableExecuteCommand: Optional[Boolean] + PlacementConstraints: Optional[PlacementConstraints] + PlacementStrategy: Optional[PlacementStrategies] + PropagateTags: Optional[PropagateTags] + ReferenceId: Optional[ReferenceId] + Tags: Optional[TagList] + + +class EnableRuleRequest(ServiceRequest): + Name: RuleName + EventBusName: Optional[EventBusNameOrArn] + + +class Endpoint(TypedDict, total=False): + Name: Optional[EndpointName] + Description: Optional[EndpointDescription] + Arn: Optional[EndpointArn] + RoutingConfig: Optional[RoutingConfig] + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: Optional[EndpointEventBusList] + RoleArn: Optional[IamRoleArn] + EndpointId: Optional[EndpointId] + EndpointUrl: Optional[EndpointUrl] + State: Optional[EndpointState] + StateReason: Optional[EndpointStateReason] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +EndpointList = List[Endpoint] + + +class EventBus(TypedDict, total=False): + Name: Optional[String] + Arn: Optional[String] + Description: Optional[EventBusDescription] + Policy: Optional[String] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +EventBusList = List[EventBus] +EventResourceList = List[EventResource] + + +class EventSource(TypedDict, total=False): + Arn: Optional[String] + CreatedBy: Optional[String] + CreationTime: Optional[Timestamp] + ExpirationTime: Optional[Timestamp] + Name: Optional[String] + State: Optional[EventSourceState] + + +EventSourceList = List[EventSource] +EventTime = datetime +HeaderParametersMap = Dict[HeaderKey, HeaderValue] +QueryStringParametersMap = Dict[QueryStringKey, QueryStringValue] +PathParameterList = List[PathParameter] + + +class HttpParameters(TypedDict, total=False): + PathParameterValues: Optional[PathParameterList] + HeaderParameters: Optional[HeaderParametersMap] + QueryStringParameters: Optional[QueryStringParametersMap] + + +TransformerPaths = Dict[InputTransformerPathKey, TargetInputPath] + + +class InputTransformer(TypedDict, total=False): + InputPathsMap: Optional[TransformerPaths] + InputTemplate: TransformerInput + + +class KinesisParameters(TypedDict, total=False): + PartitionKeyPath: TargetPartitionKeyPath + + +class ListApiDestinationsRequest(ServiceRequest): + NamePrefix: Optional[ApiDestinationName] + ConnectionArn: Optional[ConnectionArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class ListApiDestinationsResponse(TypedDict, total=False): + ApiDestinations: Optional[ApiDestinationResponseList] + NextToken: Optional[NextToken] + + +class ListArchivesRequest(ServiceRequest): + NamePrefix: Optional[ArchiveName] + EventSourceArn: Optional[EventBusArn] + State: Optional[ArchiveState] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class ListArchivesResponse(TypedDict, total=False): + Archives: Optional[ArchiveResponseList] + NextToken: Optional[NextToken] + + +class ListConnectionsRequest(ServiceRequest): + NamePrefix: Optional[ConnectionName] + ConnectionState: Optional[ConnectionState] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class ListConnectionsResponse(TypedDict, total=False): + Connections: Optional[ConnectionResponseList] + NextToken: Optional[NextToken] + + +class ListEndpointsRequest(ServiceRequest): + NamePrefix: Optional[EndpointName] + HomeRegion: Optional[HomeRegion] + NextToken: Optional[NextToken] + MaxResults: Optional[LimitMax100] + + +class ListEndpointsResponse(TypedDict, total=False): + Endpoints: Optional[EndpointList] + NextToken: Optional[NextToken] + + +class ListEventBusesRequest(ServiceRequest): + NamePrefix: Optional[EventBusName] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class ListEventBusesResponse(TypedDict, total=False): + EventBuses: Optional[EventBusList] + NextToken: Optional[NextToken] + + +class ListEventSourcesRequest(ServiceRequest): + NamePrefix: Optional[EventSourceNamePrefix] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class ListEventSourcesResponse(TypedDict, total=False): + EventSources: Optional[EventSourceList] + NextToken: Optional[NextToken] + + +class ListPartnerEventSourceAccountsRequest(ServiceRequest): + EventSourceName: EventSourceName + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class PartnerEventSourceAccount(TypedDict, total=False): + Account: Optional[AccountId] + CreationTime: Optional[Timestamp] + ExpirationTime: Optional[Timestamp] + State: Optional[EventSourceState] + + +PartnerEventSourceAccountList = List[PartnerEventSourceAccount] + + +class ListPartnerEventSourceAccountsResponse(TypedDict, total=False): + PartnerEventSourceAccounts: Optional[PartnerEventSourceAccountList] + NextToken: Optional[NextToken] + + +class ListPartnerEventSourcesRequest(ServiceRequest): + NamePrefix: PartnerEventSourceNamePrefix + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class PartnerEventSource(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[String] + + +PartnerEventSourceList = List[PartnerEventSource] + + +class ListPartnerEventSourcesResponse(TypedDict, total=False): + PartnerEventSources: Optional[PartnerEventSourceList] + NextToken: Optional[NextToken] + + +class ListReplaysRequest(ServiceRequest): + NamePrefix: Optional[ReplayName] + State: Optional[ReplayState] + EventSourceArn: Optional[ArchiveArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class Replay(TypedDict, total=False): + ReplayName: Optional[ReplayName] + EventSourceArn: Optional[ArchiveArn] + State: Optional[ReplayState] + StateReason: Optional[ReplayStateReason] + EventStartTime: Optional[Timestamp] + EventEndTime: Optional[Timestamp] + EventLastReplayedTime: Optional[Timestamp] + ReplayStartTime: Optional[Timestamp] + ReplayEndTime: Optional[Timestamp] + + +ReplayList = List[Replay] + + +class ListReplaysResponse(TypedDict, total=False): + Replays: Optional[ReplayList] + NextToken: Optional[NextToken] + + +class ListRuleNamesByTargetRequest(ServiceRequest): + TargetArn: TargetArn + EventBusName: Optional[EventBusNameOrArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +RuleNameList = List[RuleName] + + +class ListRuleNamesByTargetResponse(TypedDict, total=False): + RuleNames: Optional[RuleNameList] + NextToken: Optional[NextToken] + + +class ListRulesRequest(ServiceRequest): + NamePrefix: Optional[RuleName] + EventBusName: Optional[EventBusNameOrArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class Rule(TypedDict, total=False): + Name: Optional[RuleName] + Arn: Optional[RuleArn] + EventPattern: Optional[EventPattern] + State: Optional[RuleState] + Description: Optional[RuleDescription] + ScheduleExpression: Optional[ScheduleExpression] + RoleArn: Optional[RoleArn] + ManagedBy: Optional[ManagedBy] + EventBusName: Optional[EventBusName] + + +RuleResponseList = List[Rule] + + +class ListRulesResponse(TypedDict, total=False): + Rules: Optional[RuleResponseList] + NextToken: Optional[NextToken] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceARN: Arn + + +class ListTagsForResourceResponse(TypedDict, total=False): + Tags: Optional[TagList] + + +class ListTargetsByRuleRequest(ServiceRequest): + Rule: RuleName + EventBusName: Optional[EventBusNameOrArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class RetryPolicy(TypedDict, total=False): + MaximumRetryAttempts: Optional[MaximumRetryAttempts] + MaximumEventAgeInSeconds: Optional[MaximumEventAgeInSeconds] + + +class SageMakerPipelineParameter(TypedDict, total=False): + Name: SageMakerPipelineParameterName + Value: SageMakerPipelineParameterValue + + +SageMakerPipelineParameterList = List[SageMakerPipelineParameter] + + +class SageMakerPipelineParameters(TypedDict, total=False): + PipelineParameterList: Optional[SageMakerPipelineParameterList] + + +Sqls = List[Sql] + + +class RedshiftDataParameters(TypedDict, total=False): + SecretManagerArn: Optional[RedshiftSecretManagerArn] + Database: Database + DbUser: Optional[DbUser] + Sql: Optional[Sql] + StatementName: Optional[StatementName] + WithEvent: Optional[Boolean] + Sqls: Optional[Sqls] + + +class SqsParameters(TypedDict, total=False): + MessageGroupId: Optional[MessageGroupId] + + +RunCommandTargetValues = List[RunCommandTargetValue] + + +class RunCommandTarget(TypedDict, total=False): + Key: RunCommandTargetKey + Values: RunCommandTargetValues + + +RunCommandTargets = List[RunCommandTarget] + + +class RunCommandParameters(TypedDict, total=False): + RunCommandTargets: RunCommandTargets + + +class Target(TypedDict, total=False): + Id: TargetId + Arn: TargetArn + RoleArn: Optional[RoleArn] + Input: Optional[TargetInput] + InputPath: Optional[TargetInputPath] + InputTransformer: Optional[InputTransformer] + KinesisParameters: Optional[KinesisParameters] + RunCommandParameters: Optional[RunCommandParameters] + EcsParameters: Optional[EcsParameters] + BatchParameters: Optional[BatchParameters] + SqsParameters: Optional[SqsParameters] + HttpParameters: Optional[HttpParameters] + RedshiftDataParameters: Optional[RedshiftDataParameters] + SageMakerPipelineParameters: Optional[SageMakerPipelineParameters] + DeadLetterConfig: Optional[DeadLetterConfig] + RetryPolicy: Optional[RetryPolicy] + AppSyncParameters: Optional[AppSyncParameters] + + +TargetList = List[Target] + + +class ListTargetsByRuleResponse(TypedDict, total=False): + Targets: Optional[TargetList] + NextToken: Optional[NextToken] + + +class PutEventsRequestEntry(TypedDict, total=False): + Time: Optional[EventTime] + Source: Optional[String] + Resources: Optional[EventResourceList] + DetailType: Optional[String] + Detail: Optional[String] + EventBusName: Optional[NonPartnerEventBusNameOrArn] + TraceHeader: Optional[TraceHeader] + + +PutEventsRequestEntryList = List[PutEventsRequestEntry] + + +class PutEventsRequest(ServiceRequest): + Entries: PutEventsRequestEntryList + EndpointId: Optional[EndpointId] + + +class PutEventsResultEntry(TypedDict, total=False): + EventId: Optional[EventId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +PutEventsResultEntryList = List[PutEventsResultEntry] + + +class PutEventsResponse(TypedDict, total=False): + FailedEntryCount: Optional[Integer] + Entries: Optional[PutEventsResultEntryList] + + +class PutPartnerEventsRequestEntry(TypedDict, total=False): + Time: Optional[EventTime] + Source: Optional[EventSourceName] + Resources: Optional[EventResourceList] + DetailType: Optional[String] + Detail: Optional[String] + + +PutPartnerEventsRequestEntryList = List[PutPartnerEventsRequestEntry] + + +class PutPartnerEventsRequest(ServiceRequest): + Entries: PutPartnerEventsRequestEntryList + + +class PutPartnerEventsResultEntry(TypedDict, total=False): + EventId: Optional[EventId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +PutPartnerEventsResultEntryList = List[PutPartnerEventsResultEntry] + + +class PutPartnerEventsResponse(TypedDict, total=False): + FailedEntryCount: Optional[Integer] + Entries: Optional[PutPartnerEventsResultEntryList] + + +class PutPermissionRequest(ServiceRequest): + EventBusName: Optional[NonPartnerEventBusName] + Action: Optional[Action] + Principal: Optional[Principal] + StatementId: Optional[StatementId] + Condition: Optional[Condition] + Policy: Optional[String] + + +class PutRuleRequest(ServiceRequest): + Name: RuleName + ScheduleExpression: Optional[ScheduleExpression] + EventPattern: Optional[EventPattern] + State: Optional[RuleState] + Description: Optional[RuleDescription] + RoleArn: Optional[RoleArn] + Tags: Optional[TagList] + EventBusName: Optional[EventBusNameOrArn] + + +class PutRuleResponse(TypedDict, total=False): + RuleArn: Optional[RuleArn] + + +class PutTargetsRequest(ServiceRequest): + Rule: RuleName + EventBusName: Optional[EventBusNameOrArn] + Targets: TargetList + + +class PutTargetsResultEntry(TypedDict, total=False): + TargetId: Optional[TargetId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +PutTargetsResultEntryList = List[PutTargetsResultEntry] + + +class PutTargetsResponse(TypedDict, total=False): + FailedEntryCount: Optional[Integer] + FailedEntries: Optional[PutTargetsResultEntryList] + + +class RemovePermissionRequest(ServiceRequest): + StatementId: Optional[StatementId] + RemoveAllPermissions: Optional[Boolean] + EventBusName: Optional[NonPartnerEventBusName] + + +TargetIdList = List[TargetId] + + +class RemoveTargetsRequest(ServiceRequest): + Rule: RuleName + EventBusName: Optional[EventBusNameOrArn] + Ids: TargetIdList + Force: Optional[Boolean] + + +class RemoveTargetsResultEntry(TypedDict, total=False): + TargetId: Optional[TargetId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +RemoveTargetsResultEntryList = List[RemoveTargetsResultEntry] + + +class RemoveTargetsResponse(TypedDict, total=False): + FailedEntryCount: Optional[Integer] + FailedEntries: Optional[RemoveTargetsResultEntryList] + + +class StartReplayRequest(ServiceRequest): + ReplayName: ReplayName + Description: Optional[ReplayDescription] + EventSourceArn: ArchiveArn + EventStartTime: Timestamp + EventEndTime: Timestamp + Destination: ReplayDestination + + +class StartReplayResponse(TypedDict, total=False): + ReplayArn: Optional[ReplayArn] + State: Optional[ReplayState] + StateReason: Optional[ReplayStateReason] + ReplayStartTime: Optional[Timestamp] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + ResourceARN: Arn + Tags: TagList + + +class TagResourceResponse(TypedDict, total=False): + pass + + +class TestEventPatternRequest(ServiceRequest): + EventPattern: EventPattern + Event: String + + +class TestEventPatternResponse(TypedDict, total=False): + Result: Optional[Boolean] + + +class UntagResourceRequest(ServiceRequest): + ResourceARN: Arn + TagKeys: TagKeyList + + +class UntagResourceResponse(TypedDict, total=False): + pass + + +class UpdateApiDestinationRequest(ServiceRequest): + Name: ApiDestinationName + Description: Optional[ApiDestinationDescription] + ConnectionArn: Optional[ConnectionArn] + InvocationEndpoint: Optional[HttpsEndpoint] + HttpMethod: Optional[ApiDestinationHttpMethod] + InvocationRateLimitPerSecond: Optional[ApiDestinationInvocationRateLimitPerSecond] + + +class UpdateApiDestinationResponse(TypedDict, total=False): + ApiDestinationArn: Optional[ApiDestinationArn] + ApiDestinationState: Optional[ApiDestinationState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class UpdateArchiveRequest(ServiceRequest): + ArchiveName: ArchiveName + Description: Optional[ArchiveDescription] + EventPattern: Optional[EventPattern] + RetentionDays: Optional[RetentionDays] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class UpdateArchiveResponse(TypedDict, total=False): + ArchiveArn: Optional[ArchiveArn] + State: Optional[ArchiveState] + StateReason: Optional[ArchiveStateReason] + CreationTime: Optional[Timestamp] + + +class UpdateConnectionApiKeyAuthRequestParameters(TypedDict, total=False): + ApiKeyName: Optional[AuthHeaderParameters] + ApiKeyValue: Optional[AuthHeaderParametersSensitive] + + +class UpdateConnectionOAuthClientRequestParameters(TypedDict, total=False): + ClientID: Optional[AuthHeaderParameters] + ClientSecret: Optional[AuthHeaderParametersSensitive] + + +class UpdateConnectionOAuthRequestParameters(TypedDict, total=False): + ClientParameters: Optional[UpdateConnectionOAuthClientRequestParameters] + AuthorizationEndpoint: Optional[HttpsEndpoint] + HttpMethod: Optional[ConnectionOAuthHttpMethod] + OAuthHttpParameters: Optional[ConnectionHttpParameters] + + +class UpdateConnectionBasicAuthRequestParameters(TypedDict, total=False): + Username: Optional[AuthHeaderParameters] + Password: Optional[AuthHeaderParametersSensitive] + + +class UpdateConnectionAuthRequestParameters(TypedDict, total=False): + BasicAuthParameters: Optional[UpdateConnectionBasicAuthRequestParameters] + OAuthParameters: Optional[UpdateConnectionOAuthRequestParameters] + ApiKeyAuthParameters: Optional[UpdateConnectionApiKeyAuthRequestParameters] + InvocationHttpParameters: Optional[ConnectionHttpParameters] + ConnectivityParameters: Optional[ConnectivityResourceParameters] + + +class UpdateConnectionRequest(ServiceRequest): + Name: ConnectionName + Description: Optional[ConnectionDescription] + AuthorizationType: Optional[ConnectionAuthorizationType] + AuthParameters: Optional[UpdateConnectionAuthRequestParameters] + InvocationConnectivityParameters: Optional[ConnectivityResourceParameters] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class UpdateConnectionResponse(TypedDict, total=False): + ConnectionArn: Optional[ConnectionArn] + ConnectionState: Optional[ConnectionState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LastAuthorizedTime: Optional[Timestamp] + + +class UpdateEndpointRequest(ServiceRequest): + Name: EndpointName + Description: Optional[EndpointDescription] + RoutingConfig: Optional[RoutingConfig] + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: Optional[EndpointEventBusList] + RoleArn: Optional[IamRoleArn] + + +class UpdateEndpointResponse(TypedDict, total=False): + Name: Optional[EndpointName] + Arn: Optional[EndpointArn] + RoutingConfig: Optional[RoutingConfig] + ReplicationConfig: Optional[ReplicationConfig] + EventBuses: Optional[EndpointEventBusList] + RoleArn: Optional[IamRoleArn] + EndpointId: Optional[EndpointId] + EndpointUrl: Optional[EndpointUrl] + State: Optional[EndpointState] + + +class UpdateEventBusRequest(ServiceRequest): + Name: Optional[EventBusName] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + Description: Optional[EventBusDescription] + DeadLetterConfig: Optional[DeadLetterConfig] + + +class UpdateEventBusResponse(TypedDict, total=False): + Arn: Optional[String] + Name: Optional[EventBusName] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + Description: Optional[EventBusDescription] + DeadLetterConfig: Optional[DeadLetterConfig] + + +class EventsApi: + service = "events" + version = "2015-10-07" + + @handler("ActivateEventSource") + def activate_event_source( + self, context: RequestContext, name: EventSourceName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CancelReplay") + def cancel_replay( + self, context: RequestContext, replay_name: ReplayName, **kwargs + ) -> CancelReplayResponse: + raise NotImplementedError + + @handler("CreateApiDestination") + def create_api_destination( + self, + context: RequestContext, + name: ApiDestinationName, + connection_arn: ConnectionArn, + invocation_endpoint: HttpsEndpoint, + http_method: ApiDestinationHttpMethod, + description: ApiDestinationDescription | None = None, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond | None = None, + **kwargs, + ) -> CreateApiDestinationResponse: + raise NotImplementedError + + @handler("CreateArchive") + def create_archive( + self, + context: RequestContext, + archive_name: ArchiveName, + event_source_arn: EventBusArn, + description: ArchiveDescription | None = None, + event_pattern: EventPattern | None = None, + retention_days: RetentionDays | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> CreateArchiveResponse: + raise NotImplementedError + + @handler("CreateConnection") + def create_connection( + self, + context: RequestContext, + name: ConnectionName, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters, + description: ConnectionDescription | None = None, + invocation_connectivity_parameters: ConnectivityResourceParameters | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> CreateConnectionResponse: + raise NotImplementedError + + @handler("CreateEndpoint") + def create_endpoint( + self, + context: RequestContext, + name: EndpointName, + routing_config: RoutingConfig, + event_buses: EndpointEventBusList, + description: EndpointDescription | None = None, + replication_config: ReplicationConfig | None = None, + role_arn: IamRoleArn | None = None, + **kwargs, + ) -> CreateEndpointResponse: + raise NotImplementedError + + @handler("CreateEventBus") + def create_event_bus( + self, + context: RequestContext, + name: EventBusName, + event_source_name: EventSourceName | None = None, + description: EventBusDescription | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + dead_letter_config: DeadLetterConfig | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateEventBusResponse: + raise NotImplementedError + + @handler("CreatePartnerEventSource") + def create_partner_event_source( + self, context: RequestContext, name: EventSourceName, account: AccountId, **kwargs + ) -> CreatePartnerEventSourceResponse: + raise NotImplementedError + + @handler("DeactivateEventSource") + def deactivate_event_source( + self, context: RequestContext, name: EventSourceName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeauthorizeConnection") + def deauthorize_connection( + self, context: RequestContext, name: ConnectionName, **kwargs + ) -> DeauthorizeConnectionResponse: + raise NotImplementedError + + @handler("DeleteApiDestination") + def delete_api_destination( + self, context: RequestContext, name: ApiDestinationName, **kwargs + ) -> DeleteApiDestinationResponse: + raise NotImplementedError + + @handler("DeleteArchive") + def delete_archive( + self, context: RequestContext, archive_name: ArchiveName, **kwargs + ) -> DeleteArchiveResponse: + raise NotImplementedError + + @handler("DeleteConnection") + def delete_connection( + self, context: RequestContext, name: ConnectionName, **kwargs + ) -> DeleteConnectionResponse: + raise NotImplementedError + + @handler("DeleteEndpoint") + def delete_endpoint( + self, context: RequestContext, name: EndpointName, **kwargs + ) -> DeleteEndpointResponse: + raise NotImplementedError + + @handler("DeleteEventBus") + def delete_event_bus(self, context: RequestContext, name: EventBusName, **kwargs) -> None: + raise NotImplementedError + + @handler("DeletePartnerEventSource") + def delete_partner_event_source( + self, context: RequestContext, name: EventSourceName, account: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteRule") + def delete_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn | None = None, + force: Boolean | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DescribeApiDestination") + def describe_api_destination( + self, context: RequestContext, name: ApiDestinationName, **kwargs + ) -> DescribeApiDestinationResponse: + raise NotImplementedError + + @handler("DescribeArchive") + def describe_archive( + self, context: RequestContext, archive_name: ArchiveName, **kwargs + ) -> DescribeArchiveResponse: + raise NotImplementedError + + @handler("DescribeConnection") + def describe_connection( + self, context: RequestContext, name: ConnectionName, **kwargs + ) -> DescribeConnectionResponse: + raise NotImplementedError + + @handler("DescribeEndpoint") + def describe_endpoint( + self, + context: RequestContext, + name: EndpointName, + home_region: HomeRegion | None = None, + **kwargs, + ) -> DescribeEndpointResponse: + raise NotImplementedError + + @handler("DescribeEventBus") + def describe_event_bus( + self, context: RequestContext, name: EventBusNameOrArn | None = None, **kwargs + ) -> DescribeEventBusResponse: + raise NotImplementedError + + @handler("DescribeEventSource") + def describe_event_source( + self, context: RequestContext, name: EventSourceName, **kwargs + ) -> DescribeEventSourceResponse: + raise NotImplementedError + + @handler("DescribePartnerEventSource") + def describe_partner_event_source( + self, context: RequestContext, name: EventSourceName, **kwargs + ) -> DescribePartnerEventSourceResponse: + raise NotImplementedError + + @handler("DescribeReplay") + def describe_replay( + self, context: RequestContext, replay_name: ReplayName, **kwargs + ) -> DescribeReplayResponse: + raise NotImplementedError + + @handler("DescribeRule") + def describe_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn | None = None, + **kwargs, + ) -> DescribeRuleResponse: + raise NotImplementedError + + @handler("DisableRule") + def disable_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("EnableRule") + def enable_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ListApiDestinations") + def list_api_destinations( + self, + context: RequestContext, + name_prefix: ApiDestinationName | None = None, + connection_arn: ConnectionArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListApiDestinationsResponse: + raise NotImplementedError + + @handler("ListArchives") + def list_archives( + self, + context: RequestContext, + name_prefix: ArchiveName | None = None, + event_source_arn: EventBusArn | None = None, + state: ArchiveState | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListArchivesResponse: + raise NotImplementedError + + @handler("ListConnections") + def list_connections( + self, + context: RequestContext, + name_prefix: ConnectionName | None = None, + connection_state: ConnectionState | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListConnectionsResponse: + raise NotImplementedError + + @handler("ListEndpoints") + def list_endpoints( + self, + context: RequestContext, + name_prefix: EndpointName | None = None, + home_region: HomeRegion | None = None, + next_token: NextToken | None = None, + max_results: LimitMax100 | None = None, + **kwargs, + ) -> ListEndpointsResponse: + raise NotImplementedError + + @handler("ListEventBuses") + def list_event_buses( + self, + context: RequestContext, + name_prefix: EventBusName | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListEventBusesResponse: + raise NotImplementedError + + @handler("ListEventSources") + def list_event_sources( + self, + context: RequestContext, + name_prefix: EventSourceNamePrefix | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListEventSourcesResponse: + raise NotImplementedError + + @handler("ListPartnerEventSourceAccounts") + def list_partner_event_source_accounts( + self, + context: RequestContext, + event_source_name: EventSourceName, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListPartnerEventSourceAccountsResponse: + raise NotImplementedError + + @handler("ListPartnerEventSources") + def list_partner_event_sources( + self, + context: RequestContext, + name_prefix: PartnerEventSourceNamePrefix, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListPartnerEventSourcesResponse: + raise NotImplementedError + + @handler("ListReplays") + def list_replays( + self, + context: RequestContext, + name_prefix: ReplayName | None = None, + state: ReplayState | None = None, + event_source_arn: ArchiveArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListReplaysResponse: + raise NotImplementedError + + @handler("ListRuleNamesByTarget") + def list_rule_names_by_target( + self, + context: RequestContext, + target_arn: TargetArn, + event_bus_name: EventBusNameOrArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListRuleNamesByTargetResponse: + raise NotImplementedError + + @handler("ListRules") + def list_rules( + self, + context: RequestContext, + name_prefix: RuleName | None = None, + event_bus_name: EventBusNameOrArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListRulesResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: Arn, **kwargs + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("ListTargetsByRule") + def list_targets_by_rule( + self, + context: RequestContext, + rule: RuleName, + event_bus_name: EventBusNameOrArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListTargetsByRuleResponse: + raise NotImplementedError + + @handler("PutEvents") + def put_events( + self, + context: RequestContext, + entries: PutEventsRequestEntryList, + endpoint_id: EndpointId | None = None, + **kwargs, + ) -> PutEventsResponse: + raise NotImplementedError + + @handler("PutPartnerEvents") + def put_partner_events( + self, context: RequestContext, entries: PutPartnerEventsRequestEntryList, **kwargs + ) -> PutPartnerEventsResponse: + raise NotImplementedError + + @handler("PutPermission") + def put_permission( + self, + context: RequestContext, + event_bus_name: NonPartnerEventBusName | None = None, + action: Action | None = None, + principal: Principal | None = None, + statement_id: StatementId | None = None, + condition: Condition | None = None, + policy: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutRule") + def put_rule( + self, + context: RequestContext, + name: RuleName, + schedule_expression: ScheduleExpression | None = None, + event_pattern: EventPattern | None = None, + state: RuleState | None = None, + description: RuleDescription | None = None, + role_arn: RoleArn | None = None, + tags: TagList | None = None, + event_bus_name: EventBusNameOrArn | None = None, + **kwargs, + ) -> PutRuleResponse: + raise NotImplementedError + + @handler("PutTargets") + def put_targets( + self, + context: RequestContext, + rule: RuleName, + targets: TargetList, + event_bus_name: EventBusNameOrArn | None = None, + **kwargs, + ) -> PutTargetsResponse: + raise NotImplementedError + + @handler("RemovePermission") + def remove_permission( + self, + context: RequestContext, + statement_id: StatementId | None = None, + remove_all_permissions: Boolean | None = None, + event_bus_name: NonPartnerEventBusName | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemoveTargets") + def remove_targets( + self, + context: RequestContext, + rule: RuleName, + ids: TargetIdList, + event_bus_name: EventBusNameOrArn | None = None, + force: Boolean | None = None, + **kwargs, + ) -> RemoveTargetsResponse: + raise NotImplementedError + + @handler("StartReplay") + def start_replay( + self, + context: RequestContext, + replay_name: ReplayName, + event_source_arn: ArchiveArn, + event_start_time: Timestamp, + event_end_time: Timestamp, + destination: ReplayDestination, + description: ReplayDescription | None = None, + **kwargs, + ) -> StartReplayResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: TagList, **kwargs + ) -> TagResourceResponse: + raise NotImplementedError + + @handler("TestEventPattern") + def test_event_pattern( + self, context: RequestContext, event_pattern: EventPattern, event: String, **kwargs + ) -> TestEventPatternResponse: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceResponse: + raise NotImplementedError + + @handler("UpdateApiDestination") + def update_api_destination( + self, + context: RequestContext, + name: ApiDestinationName, + description: ApiDestinationDescription | None = None, + connection_arn: ConnectionArn | None = None, + invocation_endpoint: HttpsEndpoint | None = None, + http_method: ApiDestinationHttpMethod | None = None, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond | None = None, + **kwargs, + ) -> UpdateApiDestinationResponse: + raise NotImplementedError + + @handler("UpdateArchive") + def update_archive( + self, + context: RequestContext, + archive_name: ArchiveName, + description: ArchiveDescription | None = None, + event_pattern: EventPattern | None = None, + retention_days: RetentionDays | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> UpdateArchiveResponse: + raise NotImplementedError + + @handler("UpdateConnection") + def update_connection( + self, + context: RequestContext, + name: ConnectionName, + description: ConnectionDescription | None = None, + authorization_type: ConnectionAuthorizationType | None = None, + auth_parameters: UpdateConnectionAuthRequestParameters | None = None, + invocation_connectivity_parameters: ConnectivityResourceParameters | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> UpdateConnectionResponse: + raise NotImplementedError + + @handler("UpdateEndpoint") + def update_endpoint( + self, + context: RequestContext, + name: EndpointName, + description: EndpointDescription | None = None, + routing_config: RoutingConfig | None = None, + replication_config: ReplicationConfig | None = None, + event_buses: EndpointEventBusList | None = None, + role_arn: IamRoleArn | None = None, + **kwargs, + ) -> UpdateEndpointResponse: + raise NotImplementedError + + @handler("UpdateEventBus") + def update_event_bus( + self, + context: RequestContext, + name: EventBusName | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + description: EventBusDescription | None = None, + dead_letter_config: DeadLetterConfig | None = None, + **kwargs, + ) -> UpdateEventBusResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/firehose/__init__.py b/localstack-core/localstack/aws/api/firehose/__init__.py new file mode 100644 index 0000000000000..f1b3c79ac204d --- /dev/null +++ b/localstack-core/localstack/aws/api/firehose/__init__.py @@ -0,0 +1,1636 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AWSKMSKeyARN = str +AmazonOpenSearchServerlessBufferingIntervalInSeconds = int +AmazonOpenSearchServerlessBufferingSizeInMBs = int +AmazonOpenSearchServerlessCollectionEndpoint = str +AmazonOpenSearchServerlessIndexName = str +AmazonOpenSearchServerlessRetryDurationInSeconds = int +AmazonopensearchserviceBufferingIntervalInSeconds = int +AmazonopensearchserviceBufferingSizeInMBs = int +AmazonopensearchserviceClusterEndpoint = str +AmazonopensearchserviceDomainARN = str +AmazonopensearchserviceIndexName = str +AmazonopensearchserviceRetryDurationInSeconds = int +AmazonopensearchserviceTypeName = str +BlockSizeBytes = int +BooleanObject = bool +BucketARN = str +ClusterJDBCURL = str +CopyOptions = str +CustomTimeZone = str +DataTableColumns = str +DataTableName = str +DatabaseColumnName = str +DatabaseEndpoint = str +DatabaseName = str +DatabasePort = int +DatabaseTableName = str +DeliveryStreamARN = str +DeliveryStreamName = str +DeliveryStreamVersionId = str +DescribeDeliveryStreamInputLimit = int +DestinationId = str +ElasticsearchBufferingIntervalInSeconds = int +ElasticsearchBufferingSizeInMBs = int +ElasticsearchClusterEndpoint = str +ElasticsearchDomainARN = str +ElasticsearchIndexName = str +ElasticsearchRetryDurationInSeconds = int +ElasticsearchTypeName = str +ErrorCode = str +ErrorMessage = str +ErrorOutputPrefix = str +FileExtension = str +GlueDataCatalogARN = str +HECAcknowledgmentTimeoutInSeconds = int +HECEndpoint = str +HECToken = str +HttpEndpointAccessKey = str +HttpEndpointAttributeName = str +HttpEndpointAttributeValue = str +HttpEndpointBufferingIntervalInSeconds = int +HttpEndpointBufferingSizeInMBs = int +HttpEndpointName = str +HttpEndpointRetryDurationInSeconds = int +HttpEndpointUrl = str +IntervalInSeconds = int +KinesisStreamARN = str +ListDeliveryStreamsInputLimit = int +ListTagsForDeliveryStreamInputLimit = int +LogGroupName = str +LogStreamName = str +MSKClusterARN = str +NonEmptyString = str +NonEmptyStringWithoutWhitespace = str +NonNegativeIntegerObject = int +OrcRowIndexStride = int +OrcStripeSizeBytes = int +ParquetPageSizeBytes = int +Password = str +Prefix = str +ProcessorParameterValue = str +Proportion = float +PutResponseRecordId = str +RedshiftRetryDurationInSeconds = int +RetryDurationInSeconds = int +RoleARN = str +SecretARN = str +SizeInMBs = int +SnowflakeAccountUrl = str +SnowflakeBufferingIntervalInSeconds = int +SnowflakeBufferingSizeInMBs = int +SnowflakeContentColumnName = str +SnowflakeDatabase = str +SnowflakeKeyPassphrase = str +SnowflakeMetaDataColumnName = str +SnowflakePrivateKey = str +SnowflakePrivateLinkVpceId = str +SnowflakeRetryDurationInSeconds = int +SnowflakeRole = str +SnowflakeSchema = str +SnowflakeTable = str +SnowflakeUser = str +SplunkBufferingIntervalInSeconds = int +SplunkBufferingSizeInMBs = int +SplunkRetryDurationInSeconds = int +StringWithLettersDigitsUnderscoresDots = str +TagKey = str +TagValue = str +ThroughputHintInMBs = int +TopicName = str +Username = str +VpcEndpointServiceName = str +WarehouseLocation = str + + +class AmazonOpenSearchServerlessS3BackupMode(StrEnum): + FailedDocumentsOnly = "FailedDocumentsOnly" + AllDocuments = "AllDocuments" + + +class AmazonopensearchserviceIndexRotationPeriod(StrEnum): + NoRotation = "NoRotation" + OneHour = "OneHour" + OneDay = "OneDay" + OneWeek = "OneWeek" + OneMonth = "OneMonth" + + +class AmazonopensearchserviceS3BackupMode(StrEnum): + FailedDocumentsOnly = "FailedDocumentsOnly" + AllDocuments = "AllDocuments" + + +class CompressionFormat(StrEnum): + UNCOMPRESSED = "UNCOMPRESSED" + GZIP = "GZIP" + ZIP = "ZIP" + Snappy = "Snappy" + HADOOP_SNAPPY = "HADOOP_SNAPPY" + + +class Connectivity(StrEnum): + PUBLIC = "PUBLIC" + PRIVATE = "PRIVATE" + + +class ContentEncoding(StrEnum): + NONE = "NONE" + GZIP = "GZIP" + + +class DatabaseType(StrEnum): + MySQL = "MySQL" + PostgreSQL = "PostgreSQL" + + +class DefaultDocumentIdFormat(StrEnum): + FIREHOSE_DEFAULT = "FIREHOSE_DEFAULT" + NO_DOCUMENT_ID = "NO_DOCUMENT_ID" + + +class DeliveryStreamEncryptionStatus(StrEnum): + ENABLED = "ENABLED" + ENABLING = "ENABLING" + ENABLING_FAILED = "ENABLING_FAILED" + DISABLED = "DISABLED" + DISABLING = "DISABLING" + DISABLING_FAILED = "DISABLING_FAILED" + + +class DeliveryStreamFailureType(StrEnum): + VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND = "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" + VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED = "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" + RETIRE_KMS_GRANT_FAILED = "RETIRE_KMS_GRANT_FAILED" + CREATE_KMS_GRANT_FAILED = "CREATE_KMS_GRANT_FAILED" + KMS_ACCESS_DENIED = "KMS_ACCESS_DENIED" + DISABLED_KMS_KEY = "DISABLED_KMS_KEY" + INVALID_KMS_KEY = "INVALID_KMS_KEY" + KMS_KEY_NOT_FOUND = "KMS_KEY_NOT_FOUND" + KMS_OPT_IN_REQUIRED = "KMS_OPT_IN_REQUIRED" + CREATE_ENI_FAILED = "CREATE_ENI_FAILED" + DELETE_ENI_FAILED = "DELETE_ENI_FAILED" + SUBNET_NOT_FOUND = "SUBNET_NOT_FOUND" + SECURITY_GROUP_NOT_FOUND = "SECURITY_GROUP_NOT_FOUND" + ENI_ACCESS_DENIED = "ENI_ACCESS_DENIED" + SUBNET_ACCESS_DENIED = "SUBNET_ACCESS_DENIED" + SECURITY_GROUP_ACCESS_DENIED = "SECURITY_GROUP_ACCESS_DENIED" + UNKNOWN_ERROR = "UNKNOWN_ERROR" + + +class DeliveryStreamStatus(StrEnum): + CREATING = "CREATING" + CREATING_FAILED = "CREATING_FAILED" + DELETING = "DELETING" + DELETING_FAILED = "DELETING_FAILED" + ACTIVE = "ACTIVE" + + +class DeliveryStreamType(StrEnum): + DirectPut = "DirectPut" + KinesisStreamAsSource = "KinesisStreamAsSource" + MSKAsSource = "MSKAsSource" + DatabaseAsSource = "DatabaseAsSource" + + +class ElasticsearchIndexRotationPeriod(StrEnum): + NoRotation = "NoRotation" + OneHour = "OneHour" + OneDay = "OneDay" + OneWeek = "OneWeek" + OneMonth = "OneMonth" + + +class ElasticsearchS3BackupMode(StrEnum): + FailedDocumentsOnly = "FailedDocumentsOnly" + AllDocuments = "AllDocuments" + + +class HECEndpointType(StrEnum): + Raw = "Raw" + Event = "Event" + + +class HttpEndpointS3BackupMode(StrEnum): + FailedDataOnly = "FailedDataOnly" + AllData = "AllData" + + +class IcebergS3BackupMode(StrEnum): + FailedDataOnly = "FailedDataOnly" + AllData = "AllData" + + +class KeyType(StrEnum): + AWS_OWNED_CMK = "AWS_OWNED_CMK" + CUSTOMER_MANAGED_CMK = "CUSTOMER_MANAGED_CMK" + + +class NoEncryptionConfig(StrEnum): + NoEncryption = "NoEncryption" + + +class OrcCompression(StrEnum): + NONE = "NONE" + ZLIB = "ZLIB" + SNAPPY = "SNAPPY" + + +class OrcFormatVersion(StrEnum): + V0_11 = "V0_11" + V0_12 = "V0_12" + + +class ParquetCompression(StrEnum): + UNCOMPRESSED = "UNCOMPRESSED" + GZIP = "GZIP" + SNAPPY = "SNAPPY" + + +class ParquetWriterVersion(StrEnum): + V1 = "V1" + V2 = "V2" + + +class ProcessorParameterName(StrEnum): + LambdaArn = "LambdaArn" + NumberOfRetries = "NumberOfRetries" + MetadataExtractionQuery = "MetadataExtractionQuery" + JsonParsingEngine = "JsonParsingEngine" + RoleArn = "RoleArn" + BufferSizeInMBs = "BufferSizeInMBs" + BufferIntervalInSeconds = "BufferIntervalInSeconds" + SubRecordType = "SubRecordType" + Delimiter = "Delimiter" + CompressionFormat = "CompressionFormat" + DataMessageExtraction = "DataMessageExtraction" + + +class ProcessorType(StrEnum): + RecordDeAggregation = "RecordDeAggregation" + Decompression = "Decompression" + CloudWatchLogProcessing = "CloudWatchLogProcessing" + Lambda = "Lambda" + MetadataExtraction = "MetadataExtraction" + AppendDelimiterToRecord = "AppendDelimiterToRecord" + + +class RedshiftS3BackupMode(StrEnum): + Disabled = "Disabled" + Enabled = "Enabled" + + +class S3BackupMode(StrEnum): + Disabled = "Disabled" + Enabled = "Enabled" + + +class SSLMode(StrEnum): + Disabled = "Disabled" + Enabled = "Enabled" + + +class SnapshotRequestedBy(StrEnum): + USER = "USER" + FIREHOSE = "FIREHOSE" + + +class SnapshotStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + COMPLETE = "COMPLETE" + SUSPENDED = "SUSPENDED" + + +class SnowflakeDataLoadingOption(StrEnum): + JSON_MAPPING = "JSON_MAPPING" + VARIANT_CONTENT_MAPPING = "VARIANT_CONTENT_MAPPING" + VARIANT_CONTENT_AND_METADATA_MAPPING = "VARIANT_CONTENT_AND_METADATA_MAPPING" + + +class SnowflakeS3BackupMode(StrEnum): + FailedDataOnly = "FailedDataOnly" + AllData = "AllData" + + +class SplunkS3BackupMode(StrEnum): + FailedEventsOnly = "FailedEventsOnly" + AllEvents = "AllEvents" + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModificationException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArgumentException(ServiceException): + code: str = "InvalidArgumentException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidKMSResourceException(ServiceException): + code: str = "InvalidKMSResourceException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSourceException(ServiceException): + code: str = "InvalidSourceException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceUnavailableException(ServiceException): + code: str = "ServiceUnavailableException" + sender_fault: bool = False + status_code: int = 400 + + +class AmazonOpenSearchServerlessBufferingHints(TypedDict, total=False): + IntervalInSeconds: Optional[AmazonOpenSearchServerlessBufferingIntervalInSeconds] + SizeInMBs: Optional[AmazonOpenSearchServerlessBufferingSizeInMBs] + + +SecurityGroupIdList = List[NonEmptyStringWithoutWhitespace] +SubnetIdList = List[NonEmptyStringWithoutWhitespace] + + +class VpcConfiguration(TypedDict, total=False): + SubnetIds: SubnetIdList + RoleARN: RoleARN + SecurityGroupIds: SecurityGroupIdList + + +class CloudWatchLoggingOptions(TypedDict, total=False): + Enabled: Optional[BooleanObject] + LogGroupName: Optional[LogGroupName] + LogStreamName: Optional[LogStreamName] + + +class ProcessorParameter(TypedDict, total=False): + ParameterName: ProcessorParameterName + ParameterValue: ProcessorParameterValue + + +ProcessorParameterList = List[ProcessorParameter] + + +class Processor(TypedDict, total=False): + Type: ProcessorType + Parameters: Optional[ProcessorParameterList] + + +ProcessorList = List[Processor] + + +class ProcessingConfiguration(TypedDict, total=False): + Enabled: Optional[BooleanObject] + Processors: Optional[ProcessorList] + + +class KMSEncryptionConfig(TypedDict, total=False): + AWSKMSKeyARN: AWSKMSKeyARN + + +class EncryptionConfiguration(TypedDict, total=False): + NoEncryptionConfig: Optional[NoEncryptionConfig] + KMSEncryptionConfig: Optional[KMSEncryptionConfig] + + +class BufferingHints(TypedDict, total=False): + SizeInMBs: Optional[SizeInMBs] + IntervalInSeconds: Optional[IntervalInSeconds] + + +class S3DestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + BucketARN: BucketARN + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: Optional[BufferingHints] + CompressionFormat: Optional[CompressionFormat] + EncryptionConfiguration: Optional[EncryptionConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + + +class AmazonOpenSearchServerlessRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[AmazonOpenSearchServerlessRetryDurationInSeconds] + + +class AmazonOpenSearchServerlessDestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint] + IndexName: AmazonOpenSearchServerlessIndexName + BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints] + RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions] + S3BackupMode: Optional[AmazonOpenSearchServerlessS3BackupMode] + S3Configuration: S3DestinationConfiguration + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfiguration: Optional[VpcConfiguration] + + +class VpcConfigurationDescription(TypedDict, total=False): + SubnetIds: SubnetIdList + RoleARN: RoleARN + SecurityGroupIds: SecurityGroupIdList + VpcId: NonEmptyStringWithoutWhitespace + + +class S3DestinationDescription(TypedDict, total=False): + RoleARN: RoleARN + BucketARN: BucketARN + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: BufferingHints + CompressionFormat: CompressionFormat + EncryptionConfiguration: EncryptionConfiguration + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + + +class AmazonOpenSearchServerlessDestinationDescription(TypedDict, total=False): + RoleARN: Optional[RoleARN] + CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint] + IndexName: Optional[AmazonOpenSearchServerlessIndexName] + BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints] + RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions] + S3BackupMode: Optional[AmazonOpenSearchServerlessS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfigurationDescription: Optional[VpcConfigurationDescription] + + +class S3DestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + BucketARN: Optional[BucketARN] + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: Optional[BufferingHints] + CompressionFormat: Optional[CompressionFormat] + EncryptionConfiguration: Optional[EncryptionConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + + +class AmazonOpenSearchServerlessDestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + CollectionEndpoint: Optional[AmazonOpenSearchServerlessCollectionEndpoint] + IndexName: Optional[AmazonOpenSearchServerlessIndexName] + BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints] + RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions] + S3Update: Optional[S3DestinationUpdate] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + + +class AmazonopensearchserviceBufferingHints(TypedDict, total=False): + IntervalInSeconds: Optional[AmazonopensearchserviceBufferingIntervalInSeconds] + SizeInMBs: Optional[AmazonopensearchserviceBufferingSizeInMBs] + + +class DocumentIdOptions(TypedDict, total=False): + DefaultDocumentIdFormat: DefaultDocumentIdFormat + + +class AmazonopensearchserviceRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[AmazonopensearchserviceRetryDurationInSeconds] + + +class AmazonopensearchserviceDestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + DomainARN: Optional[AmazonopensearchserviceDomainARN] + ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint] + IndexName: AmazonopensearchserviceIndexName + TypeName: Optional[AmazonopensearchserviceTypeName] + IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod] + BufferingHints: Optional[AmazonopensearchserviceBufferingHints] + RetryOptions: Optional[AmazonopensearchserviceRetryOptions] + S3BackupMode: Optional[AmazonopensearchserviceS3BackupMode] + S3Configuration: S3DestinationConfiguration + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfiguration: Optional[VpcConfiguration] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class AmazonopensearchserviceDestinationDescription(TypedDict, total=False): + RoleARN: Optional[RoleARN] + DomainARN: Optional[AmazonopensearchserviceDomainARN] + ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint] + IndexName: Optional[AmazonopensearchserviceIndexName] + TypeName: Optional[AmazonopensearchserviceTypeName] + IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod] + BufferingHints: Optional[AmazonopensearchserviceBufferingHints] + RetryOptions: Optional[AmazonopensearchserviceRetryOptions] + S3BackupMode: Optional[AmazonopensearchserviceS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfigurationDescription: Optional[VpcConfigurationDescription] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class AmazonopensearchserviceDestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + DomainARN: Optional[AmazonopensearchserviceDomainARN] + ClusterEndpoint: Optional[AmazonopensearchserviceClusterEndpoint] + IndexName: Optional[AmazonopensearchserviceIndexName] + TypeName: Optional[AmazonopensearchserviceTypeName] + IndexRotationPeriod: Optional[AmazonopensearchserviceIndexRotationPeriod] + BufferingHints: Optional[AmazonopensearchserviceBufferingHints] + RetryOptions: Optional[AmazonopensearchserviceRetryOptions] + S3Update: Optional[S3DestinationUpdate] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class AuthenticationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + Connectivity: Connectivity + + +class CatalogConfiguration(TypedDict, total=False): + CatalogARN: Optional[GlueDataCatalogARN] + WarehouseLocation: Optional[WarehouseLocation] + + +ColumnToJsonKeyMappings = Dict[NonEmptyStringWithoutWhitespace, NonEmptyString] + + +class CopyCommand(TypedDict, total=False): + DataTableName: DataTableName + DataTableColumns: Optional[DataTableColumns] + CopyOptions: Optional[CopyOptions] + + +class DatabaseSourceVPCConfiguration(TypedDict, total=False): + VpcEndpointServiceName: VpcEndpointServiceName + + +class SecretsManagerConfiguration(TypedDict, total=False): + SecretARN: Optional[SecretARN] + RoleARN: Optional[RoleARN] + Enabled: BooleanObject + + +class DatabaseSourceAuthenticationConfiguration(TypedDict, total=False): + SecretsManagerConfiguration: SecretsManagerConfiguration + + +DatabaseSurrogateKeyList = List[NonEmptyStringWithoutWhitespace] +DatabaseColumnIncludeOrExcludeList = List[DatabaseColumnName] + + +class DatabaseColumnList(TypedDict, total=False): + Include: Optional[DatabaseColumnIncludeOrExcludeList] + Exclude: Optional[DatabaseColumnIncludeOrExcludeList] + + +DatabaseTableIncludeOrExcludeList = List[DatabaseTableName] + + +class DatabaseTableList(TypedDict, total=False): + Include: Optional[DatabaseTableIncludeOrExcludeList] + Exclude: Optional[DatabaseTableIncludeOrExcludeList] + + +DatabaseIncludeOrExcludeList = List[DatabaseName] + + +class DatabaseList(TypedDict, total=False): + Include: Optional[DatabaseIncludeOrExcludeList] + Exclude: Optional[DatabaseIncludeOrExcludeList] + + +class DatabaseSourceConfiguration(TypedDict, total=False): + Type: DatabaseType + Endpoint: DatabaseEndpoint + Port: DatabasePort + SSLMode: Optional[SSLMode] + Databases: DatabaseList + Tables: DatabaseTableList + Columns: Optional[DatabaseColumnList] + SurrogateKeys: Optional[DatabaseSurrogateKeyList] + SnapshotWatermarkTable: DatabaseTableName + DatabaseSourceAuthenticationConfiguration: DatabaseSourceAuthenticationConfiguration + DatabaseSourceVPCConfiguration: DatabaseSourceVPCConfiguration + + +class RetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[RetryDurationInSeconds] + + +class TableCreationConfiguration(TypedDict, total=False): + Enabled: BooleanObject + + +class SchemaEvolutionConfiguration(TypedDict, total=False): + Enabled: BooleanObject + + +class PartitionField(TypedDict, total=False): + SourceName: NonEmptyStringWithoutWhitespace + + +PartitionFields = List[PartitionField] + + +class PartitionSpec(TypedDict, total=False): + Identity: Optional[PartitionFields] + + +ListOfNonEmptyStringsWithoutWhitespace = List[NonEmptyStringWithoutWhitespace] + + +class DestinationTableConfiguration(TypedDict, total=False): + DestinationTableName: StringWithLettersDigitsUnderscoresDots + DestinationDatabaseName: StringWithLettersDigitsUnderscoresDots + UniqueKeys: Optional[ListOfNonEmptyStringsWithoutWhitespace] + PartitionSpec: Optional[PartitionSpec] + S3ErrorOutputPrefix: Optional[ErrorOutputPrefix] + + +DestinationTableConfigurationList = List[DestinationTableConfiguration] + + +class IcebergDestinationConfiguration(TypedDict, total=False): + DestinationTableConfigurationList: Optional[DestinationTableConfigurationList] + SchemaEvolutionConfiguration: Optional[SchemaEvolutionConfiguration] + TableCreationConfiguration: Optional[TableCreationConfiguration] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[IcebergS3BackupMode] + RetryOptions: Optional[RetryOptions] + RoleARN: RoleARN + AppendOnly: Optional[BooleanObject] + CatalogConfiguration: CatalogConfiguration + S3Configuration: S3DestinationConfiguration + + +class SnowflakeBufferingHints(TypedDict, total=False): + SizeInMBs: Optional[SnowflakeBufferingSizeInMBs] + IntervalInSeconds: Optional[SnowflakeBufferingIntervalInSeconds] + + +class SnowflakeRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[SnowflakeRetryDurationInSeconds] + + +class SnowflakeVpcConfiguration(TypedDict, total=False): + PrivateLinkVpceId: SnowflakePrivateLinkVpceId + + +class SnowflakeRoleConfiguration(TypedDict, total=False): + Enabled: Optional[BooleanObject] + SnowflakeRole: Optional[SnowflakeRole] + + +class SnowflakeDestinationConfiguration(TypedDict, total=False): + AccountUrl: SnowflakeAccountUrl + PrivateKey: Optional[SnowflakePrivateKey] + KeyPassphrase: Optional[SnowflakeKeyPassphrase] + User: Optional[SnowflakeUser] + Database: SnowflakeDatabase + Schema: SnowflakeSchema + Table: SnowflakeTable + SnowflakeRoleConfiguration: Optional[SnowflakeRoleConfiguration] + DataLoadingOption: Optional[SnowflakeDataLoadingOption] + MetaDataColumnName: Optional[SnowflakeMetaDataColumnName] + ContentColumnName: Optional[SnowflakeContentColumnName] + SnowflakeVpcConfiguration: Optional[SnowflakeVpcConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: RoleARN + RetryOptions: Optional[SnowflakeRetryOptions] + S3BackupMode: Optional[SnowflakeS3BackupMode] + S3Configuration: S3DestinationConfiguration + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + BufferingHints: Optional[SnowflakeBufferingHints] + + +ReadFromTimestamp = datetime + + +class MSKSourceConfiguration(TypedDict, total=False): + MSKClusterARN: MSKClusterARN + TopicName: TopicName + AuthenticationConfiguration: AuthenticationConfiguration + ReadFromTimestamp: Optional[ReadFromTimestamp] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: Optional[TagValue] + + +TagDeliveryStreamInputTagList = List[Tag] + + +class HttpEndpointRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[HttpEndpointRetryDurationInSeconds] + + +class HttpEndpointCommonAttribute(TypedDict, total=False): + AttributeName: HttpEndpointAttributeName + AttributeValue: HttpEndpointAttributeValue + + +HttpEndpointCommonAttributesList = List[HttpEndpointCommonAttribute] + + +class HttpEndpointRequestConfiguration(TypedDict, total=False): + ContentEncoding: Optional[ContentEncoding] + CommonAttributes: Optional[HttpEndpointCommonAttributesList] + + +class HttpEndpointBufferingHints(TypedDict, total=False): + SizeInMBs: Optional[HttpEndpointBufferingSizeInMBs] + IntervalInSeconds: Optional[HttpEndpointBufferingIntervalInSeconds] + + +class HttpEndpointConfiguration(TypedDict, total=False): + Url: HttpEndpointUrl + Name: Optional[HttpEndpointName] + AccessKey: Optional[HttpEndpointAccessKey] + + +class HttpEndpointDestinationConfiguration(TypedDict, total=False): + EndpointConfiguration: HttpEndpointConfiguration + BufferingHints: Optional[HttpEndpointBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + RequestConfiguration: Optional[HttpEndpointRequestConfiguration] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: Optional[RoleARN] + RetryOptions: Optional[HttpEndpointRetryOptions] + S3BackupMode: Optional[HttpEndpointS3BackupMode] + S3Configuration: S3DestinationConfiguration + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class SplunkBufferingHints(TypedDict, total=False): + IntervalInSeconds: Optional[SplunkBufferingIntervalInSeconds] + SizeInMBs: Optional[SplunkBufferingSizeInMBs] + + +class SplunkRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[SplunkRetryDurationInSeconds] + + +class SplunkDestinationConfiguration(TypedDict, total=False): + HECEndpoint: HECEndpoint + HECEndpointType: HECEndpointType + HECToken: Optional[HECToken] + HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds] + RetryOptions: Optional[SplunkRetryOptions] + S3BackupMode: Optional[SplunkS3BackupMode] + S3Configuration: S3DestinationConfiguration + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + BufferingHints: Optional[SplunkBufferingHints] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class ElasticsearchRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[ElasticsearchRetryDurationInSeconds] + + +class ElasticsearchBufferingHints(TypedDict, total=False): + IntervalInSeconds: Optional[ElasticsearchBufferingIntervalInSeconds] + SizeInMBs: Optional[ElasticsearchBufferingSizeInMBs] + + +class ElasticsearchDestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + DomainARN: Optional[ElasticsearchDomainARN] + ClusterEndpoint: Optional[ElasticsearchClusterEndpoint] + IndexName: ElasticsearchIndexName + TypeName: Optional[ElasticsearchTypeName] + IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod] + BufferingHints: Optional[ElasticsearchBufferingHints] + RetryOptions: Optional[ElasticsearchRetryOptions] + S3BackupMode: Optional[ElasticsearchS3BackupMode] + S3Configuration: S3DestinationConfiguration + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfiguration: Optional[VpcConfiguration] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class RedshiftRetryOptions(TypedDict, total=False): + DurationInSeconds: Optional[RedshiftRetryDurationInSeconds] + + +class RedshiftDestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + ClusterJDBCURL: ClusterJDBCURL + CopyCommand: CopyCommand + Username: Optional[Username] + Password: Optional[Password] + RetryOptions: Optional[RedshiftRetryOptions] + S3Configuration: S3DestinationConfiguration + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[RedshiftS3BackupMode] + S3BackupConfiguration: Optional[S3DestinationConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class DynamicPartitioningConfiguration(TypedDict, total=False): + RetryOptions: Optional[RetryOptions] + Enabled: Optional[BooleanObject] + + +class OrcSerDe(TypedDict, total=False): + StripeSizeBytes: Optional[OrcStripeSizeBytes] + BlockSizeBytes: Optional[BlockSizeBytes] + RowIndexStride: Optional[OrcRowIndexStride] + EnablePadding: Optional[BooleanObject] + PaddingTolerance: Optional[Proportion] + Compression: Optional[OrcCompression] + BloomFilterColumns: Optional[ListOfNonEmptyStringsWithoutWhitespace] + BloomFilterFalsePositiveProbability: Optional[Proportion] + DictionaryKeyThreshold: Optional[Proportion] + FormatVersion: Optional[OrcFormatVersion] + + +class ParquetSerDe(TypedDict, total=False): + BlockSizeBytes: Optional[BlockSizeBytes] + PageSizeBytes: Optional[ParquetPageSizeBytes] + Compression: Optional[ParquetCompression] + EnableDictionaryCompression: Optional[BooleanObject] + MaxPaddingBytes: Optional[NonNegativeIntegerObject] + WriterVersion: Optional[ParquetWriterVersion] + + +class Serializer(TypedDict, total=False): + ParquetSerDe: Optional[ParquetSerDe] + OrcSerDe: Optional[OrcSerDe] + + +class OutputFormatConfiguration(TypedDict, total=False): + Serializer: Optional[Serializer] + + +ListOfNonEmptyStrings = List[NonEmptyString] + + +class HiveJsonSerDe(TypedDict, total=False): + TimestampFormats: Optional[ListOfNonEmptyStrings] + + +class OpenXJsonSerDe(TypedDict, total=False): + ConvertDotsInJsonKeysToUnderscores: Optional[BooleanObject] + CaseInsensitive: Optional[BooleanObject] + ColumnToJsonKeyMappings: Optional[ColumnToJsonKeyMappings] + + +class Deserializer(TypedDict, total=False): + OpenXJsonSerDe: Optional[OpenXJsonSerDe] + HiveJsonSerDe: Optional[HiveJsonSerDe] + + +class InputFormatConfiguration(TypedDict, total=False): + Deserializer: Optional[Deserializer] + + +class SchemaConfiguration(TypedDict, total=False): + RoleARN: Optional[NonEmptyStringWithoutWhitespace] + CatalogId: Optional[NonEmptyStringWithoutWhitespace] + DatabaseName: Optional[NonEmptyStringWithoutWhitespace] + TableName: Optional[NonEmptyStringWithoutWhitespace] + Region: Optional[NonEmptyStringWithoutWhitespace] + VersionId: Optional[NonEmptyStringWithoutWhitespace] + + +class DataFormatConversionConfiguration(TypedDict, total=False): + SchemaConfiguration: Optional[SchemaConfiguration] + InputFormatConfiguration: Optional[InputFormatConfiguration] + OutputFormatConfiguration: Optional[OutputFormatConfiguration] + Enabled: Optional[BooleanObject] + + +class ExtendedS3DestinationConfiguration(TypedDict, total=False): + RoleARN: RoleARN + BucketARN: BucketARN + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: Optional[BufferingHints] + CompressionFormat: Optional[CompressionFormat] + EncryptionConfiguration: Optional[EncryptionConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[S3BackupMode] + S3BackupConfiguration: Optional[S3DestinationConfiguration] + DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration] + DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration] + FileExtension: Optional[FileExtension] + CustomTimeZone: Optional[CustomTimeZone] + + +class DeliveryStreamEncryptionConfigurationInput(TypedDict, total=False): + KeyARN: Optional[AWSKMSKeyARN] + KeyType: KeyType + + +class KinesisStreamSourceConfiguration(TypedDict, total=False): + KinesisStreamARN: KinesisStreamARN + RoleARN: RoleARN + + +class DirectPutSourceConfiguration(TypedDict, total=False): + ThroughputHintInMBs: ThroughputHintInMBs + + +class CreateDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + DeliveryStreamType: Optional[DeliveryStreamType] + DirectPutSourceConfiguration: Optional[DirectPutSourceConfiguration] + KinesisStreamSourceConfiguration: Optional[KinesisStreamSourceConfiguration] + DeliveryStreamEncryptionConfigurationInput: Optional[DeliveryStreamEncryptionConfigurationInput] + S3DestinationConfiguration: Optional[S3DestinationConfiguration] + ExtendedS3DestinationConfiguration: Optional[ExtendedS3DestinationConfiguration] + RedshiftDestinationConfiguration: Optional[RedshiftDestinationConfiguration] + ElasticsearchDestinationConfiguration: Optional[ElasticsearchDestinationConfiguration] + AmazonopensearchserviceDestinationConfiguration: Optional[ + AmazonopensearchserviceDestinationConfiguration + ] + SplunkDestinationConfiguration: Optional[SplunkDestinationConfiguration] + HttpEndpointDestinationConfiguration: Optional[HttpEndpointDestinationConfiguration] + Tags: Optional[TagDeliveryStreamInputTagList] + AmazonOpenSearchServerlessDestinationConfiguration: Optional[ + AmazonOpenSearchServerlessDestinationConfiguration + ] + MSKSourceConfiguration: Optional[MSKSourceConfiguration] + SnowflakeDestinationConfiguration: Optional[SnowflakeDestinationConfiguration] + IcebergDestinationConfiguration: Optional[IcebergDestinationConfiguration] + DatabaseSourceConfiguration: Optional[DatabaseSourceConfiguration] + + +class CreateDeliveryStreamOutput(TypedDict, total=False): + DeliveryStreamARN: Optional[DeliveryStreamARN] + + +Data = bytes + + +class FailureDescription(TypedDict, total=False): + Type: DeliveryStreamFailureType + Details: NonEmptyString + + +Timestamp = datetime + + +class DatabaseSnapshotInfo(TypedDict, total=False): + Id: NonEmptyStringWithoutWhitespace + Table: DatabaseTableName + RequestTimestamp: Timestamp + RequestedBy: SnapshotRequestedBy + Status: SnapshotStatus + FailureDescription: Optional[FailureDescription] + + +DatabaseSnapshotInfoList = List[DatabaseSnapshotInfo] + + +class DatabaseSourceDescription(TypedDict, total=False): + Type: Optional[DatabaseType] + Endpoint: Optional[DatabaseEndpoint] + Port: Optional[DatabasePort] + SSLMode: Optional[SSLMode] + Databases: Optional[DatabaseList] + Tables: Optional[DatabaseTableList] + Columns: Optional[DatabaseColumnList] + SurrogateKeys: Optional[DatabaseColumnIncludeOrExcludeList] + SnapshotWatermarkTable: Optional[DatabaseTableName] + SnapshotInfo: Optional[DatabaseSnapshotInfoList] + DatabaseSourceAuthenticationConfiguration: Optional[DatabaseSourceAuthenticationConfiguration] + DatabaseSourceVPCConfiguration: Optional[DatabaseSourceVPCConfiguration] + + +class DeleteDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + AllowForceDelete: Optional[BooleanObject] + + +class DeleteDeliveryStreamOutput(TypedDict, total=False): + pass + + +DeliveryStartTimestamp = datetime + + +class IcebergDestinationDescription(TypedDict, total=False): + DestinationTableConfigurationList: Optional[DestinationTableConfigurationList] + SchemaEvolutionConfiguration: Optional[SchemaEvolutionConfiguration] + TableCreationConfiguration: Optional[TableCreationConfiguration] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[IcebergS3BackupMode] + RetryOptions: Optional[RetryOptions] + RoleARN: Optional[RoleARN] + AppendOnly: Optional[BooleanObject] + CatalogConfiguration: Optional[CatalogConfiguration] + S3DestinationDescription: Optional[S3DestinationDescription] + + +class SnowflakeDestinationDescription(TypedDict, total=False): + AccountUrl: Optional[SnowflakeAccountUrl] + User: Optional[SnowflakeUser] + Database: Optional[SnowflakeDatabase] + Schema: Optional[SnowflakeSchema] + Table: Optional[SnowflakeTable] + SnowflakeRoleConfiguration: Optional[SnowflakeRoleConfiguration] + DataLoadingOption: Optional[SnowflakeDataLoadingOption] + MetaDataColumnName: Optional[SnowflakeMetaDataColumnName] + ContentColumnName: Optional[SnowflakeContentColumnName] + SnowflakeVpcConfiguration: Optional[SnowflakeVpcConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: Optional[RoleARN] + RetryOptions: Optional[SnowflakeRetryOptions] + S3BackupMode: Optional[SnowflakeS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + BufferingHints: Optional[SnowflakeBufferingHints] + + +class HttpEndpointDescription(TypedDict, total=False): + Url: Optional[HttpEndpointUrl] + Name: Optional[HttpEndpointName] + + +class HttpEndpointDestinationDescription(TypedDict, total=False): + EndpointConfiguration: Optional[HttpEndpointDescription] + BufferingHints: Optional[HttpEndpointBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + RequestConfiguration: Optional[HttpEndpointRequestConfiguration] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: Optional[RoleARN] + RetryOptions: Optional[HttpEndpointRetryOptions] + S3BackupMode: Optional[HttpEndpointS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class SplunkDestinationDescription(TypedDict, total=False): + HECEndpoint: Optional[HECEndpoint] + HECEndpointType: Optional[HECEndpointType] + HECToken: Optional[HECToken] + HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds] + RetryOptions: Optional[SplunkRetryOptions] + S3BackupMode: Optional[SplunkS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + BufferingHints: Optional[SplunkBufferingHints] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class ElasticsearchDestinationDescription(TypedDict, total=False): + RoleARN: Optional[RoleARN] + DomainARN: Optional[ElasticsearchDomainARN] + ClusterEndpoint: Optional[ElasticsearchClusterEndpoint] + IndexName: Optional[ElasticsearchIndexName] + TypeName: Optional[ElasticsearchTypeName] + IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod] + BufferingHints: Optional[ElasticsearchBufferingHints] + RetryOptions: Optional[ElasticsearchRetryOptions] + S3BackupMode: Optional[ElasticsearchS3BackupMode] + S3DestinationDescription: Optional[S3DestinationDescription] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + VpcConfigurationDescription: Optional[VpcConfigurationDescription] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class RedshiftDestinationDescription(TypedDict, total=False): + RoleARN: RoleARN + ClusterJDBCURL: ClusterJDBCURL + CopyCommand: CopyCommand + Username: Optional[Username] + RetryOptions: Optional[RedshiftRetryOptions] + S3DestinationDescription: S3DestinationDescription + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[RedshiftS3BackupMode] + S3BackupDescription: Optional[S3DestinationDescription] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class ExtendedS3DestinationDescription(TypedDict, total=False): + RoleARN: RoleARN + BucketARN: BucketARN + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: BufferingHints + CompressionFormat: CompressionFormat + EncryptionConfiguration: EncryptionConfiguration + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[S3BackupMode] + S3BackupDescription: Optional[S3DestinationDescription] + DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration] + DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration] + FileExtension: Optional[FileExtension] + CustomTimeZone: Optional[CustomTimeZone] + + +class DestinationDescription(TypedDict, total=False): + DestinationId: DestinationId + S3DestinationDescription: Optional[S3DestinationDescription] + ExtendedS3DestinationDescription: Optional[ExtendedS3DestinationDescription] + RedshiftDestinationDescription: Optional[RedshiftDestinationDescription] + ElasticsearchDestinationDescription: Optional[ElasticsearchDestinationDescription] + AmazonopensearchserviceDestinationDescription: Optional[ + AmazonopensearchserviceDestinationDescription + ] + SplunkDestinationDescription: Optional[SplunkDestinationDescription] + HttpEndpointDestinationDescription: Optional[HttpEndpointDestinationDescription] + SnowflakeDestinationDescription: Optional[SnowflakeDestinationDescription] + AmazonOpenSearchServerlessDestinationDescription: Optional[ + AmazonOpenSearchServerlessDestinationDescription + ] + IcebergDestinationDescription: Optional[IcebergDestinationDescription] + + +DestinationDescriptionList = List[DestinationDescription] + + +class MSKSourceDescription(TypedDict, total=False): + MSKClusterARN: Optional[MSKClusterARN] + TopicName: Optional[TopicName] + AuthenticationConfiguration: Optional[AuthenticationConfiguration] + DeliveryStartTimestamp: Optional[DeliveryStartTimestamp] + ReadFromTimestamp: Optional[ReadFromTimestamp] + + +class KinesisStreamSourceDescription(TypedDict, total=False): + KinesisStreamARN: Optional[KinesisStreamARN] + RoleARN: Optional[RoleARN] + DeliveryStartTimestamp: Optional[DeliveryStartTimestamp] + + +class DirectPutSourceDescription(TypedDict, total=False): + ThroughputHintInMBs: Optional[ThroughputHintInMBs] + + +class SourceDescription(TypedDict, total=False): + DirectPutSourceDescription: Optional[DirectPutSourceDescription] + KinesisStreamSourceDescription: Optional[KinesisStreamSourceDescription] + MSKSourceDescription: Optional[MSKSourceDescription] + DatabaseSourceDescription: Optional[DatabaseSourceDescription] + + +class DeliveryStreamEncryptionConfiguration(TypedDict, total=False): + KeyARN: Optional[AWSKMSKeyARN] + KeyType: Optional[KeyType] + Status: Optional[DeliveryStreamEncryptionStatus] + FailureDescription: Optional[FailureDescription] + + +class DeliveryStreamDescription(TypedDict, total=False): + DeliveryStreamName: DeliveryStreamName + DeliveryStreamARN: DeliveryStreamARN + DeliveryStreamStatus: DeliveryStreamStatus + FailureDescription: Optional[FailureDescription] + DeliveryStreamEncryptionConfiguration: Optional[DeliveryStreamEncryptionConfiguration] + DeliveryStreamType: DeliveryStreamType + VersionId: DeliveryStreamVersionId + CreateTimestamp: Optional[Timestamp] + LastUpdateTimestamp: Optional[Timestamp] + Source: Optional[SourceDescription] + Destinations: DestinationDescriptionList + HasMoreDestinations: BooleanObject + + +DeliveryStreamNameList = List[DeliveryStreamName] + + +class DescribeDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + Limit: Optional[DescribeDeliveryStreamInputLimit] + ExclusiveStartDestinationId: Optional[DestinationId] + + +class DescribeDeliveryStreamOutput(TypedDict, total=False): + DeliveryStreamDescription: DeliveryStreamDescription + + +class ElasticsearchDestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + DomainARN: Optional[ElasticsearchDomainARN] + ClusterEndpoint: Optional[ElasticsearchClusterEndpoint] + IndexName: Optional[ElasticsearchIndexName] + TypeName: Optional[ElasticsearchTypeName] + IndexRotationPeriod: Optional[ElasticsearchIndexRotationPeriod] + BufferingHints: Optional[ElasticsearchBufferingHints] + RetryOptions: Optional[ElasticsearchRetryOptions] + S3Update: Optional[S3DestinationUpdate] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + DocumentIdOptions: Optional[DocumentIdOptions] + + +class ExtendedS3DestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + BucketARN: Optional[BucketARN] + Prefix: Optional[Prefix] + ErrorOutputPrefix: Optional[ErrorOutputPrefix] + BufferingHints: Optional[BufferingHints] + CompressionFormat: Optional[CompressionFormat] + EncryptionConfiguration: Optional[EncryptionConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[S3BackupMode] + S3BackupUpdate: Optional[S3DestinationUpdate] + DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration] + DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration] + FileExtension: Optional[FileExtension] + CustomTimeZone: Optional[CustomTimeZone] + + +class HttpEndpointDestinationUpdate(TypedDict, total=False): + EndpointConfiguration: Optional[HttpEndpointConfiguration] + BufferingHints: Optional[HttpEndpointBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + RequestConfiguration: Optional[HttpEndpointRequestConfiguration] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: Optional[RoleARN] + RetryOptions: Optional[HttpEndpointRetryOptions] + S3BackupMode: Optional[HttpEndpointS3BackupMode] + S3Update: Optional[S3DestinationUpdate] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class IcebergDestinationUpdate(TypedDict, total=False): + DestinationTableConfigurationList: Optional[DestinationTableConfigurationList] + SchemaEvolutionConfiguration: Optional[SchemaEvolutionConfiguration] + TableCreationConfiguration: Optional[TableCreationConfiguration] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[IcebergS3BackupMode] + RetryOptions: Optional[RetryOptions] + RoleARN: Optional[RoleARN] + AppendOnly: Optional[BooleanObject] + CatalogConfiguration: Optional[CatalogConfiguration] + S3Configuration: Optional[S3DestinationConfiguration] + + +class ListDeliveryStreamsInput(ServiceRequest): + Limit: Optional[ListDeliveryStreamsInputLimit] + DeliveryStreamType: Optional[DeliveryStreamType] + ExclusiveStartDeliveryStreamName: Optional[DeliveryStreamName] + + +class ListDeliveryStreamsOutput(TypedDict, total=False): + DeliveryStreamNames: DeliveryStreamNameList + HasMoreDeliveryStreams: BooleanObject + + +class ListTagsForDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + ExclusiveStartTagKey: Optional[TagKey] + Limit: Optional[ListTagsForDeliveryStreamInputLimit] + + +ListTagsForDeliveryStreamOutputTagList = List[Tag] + + +class ListTagsForDeliveryStreamOutput(TypedDict, total=False): + Tags: ListTagsForDeliveryStreamOutputTagList + HasMoreTags: BooleanObject + + +class Record(TypedDict, total=False): + Data: Data + + +PutRecordBatchRequestEntryList = List[Record] + + +class PutRecordBatchInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + Records: PutRecordBatchRequestEntryList + + +class PutRecordBatchResponseEntry(TypedDict, total=False): + RecordId: Optional[PutResponseRecordId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +PutRecordBatchResponseEntryList = List[PutRecordBatchResponseEntry] + + +class PutRecordBatchOutput(TypedDict, total=False): + FailedPutCount: NonNegativeIntegerObject + Encrypted: Optional[BooleanObject] + RequestResponses: PutRecordBatchResponseEntryList + + +class PutRecordInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + Record: Record + + +class PutRecordOutput(TypedDict, total=False): + RecordId: PutResponseRecordId + Encrypted: Optional[BooleanObject] + + +class RedshiftDestinationUpdate(TypedDict, total=False): + RoleARN: Optional[RoleARN] + ClusterJDBCURL: Optional[ClusterJDBCURL] + CopyCommand: Optional[CopyCommand] + Username: Optional[Username] + Password: Optional[Password] + RetryOptions: Optional[RedshiftRetryOptions] + S3Update: Optional[S3DestinationUpdate] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupMode: Optional[RedshiftS3BackupMode] + S3BackupUpdate: Optional[S3DestinationUpdate] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class SnowflakeDestinationUpdate(TypedDict, total=False): + AccountUrl: Optional[SnowflakeAccountUrl] + PrivateKey: Optional[SnowflakePrivateKey] + KeyPassphrase: Optional[SnowflakeKeyPassphrase] + User: Optional[SnowflakeUser] + Database: Optional[SnowflakeDatabase] + Schema: Optional[SnowflakeSchema] + Table: Optional[SnowflakeTable] + SnowflakeRoleConfiguration: Optional[SnowflakeRoleConfiguration] + DataLoadingOption: Optional[SnowflakeDataLoadingOption] + MetaDataColumnName: Optional[SnowflakeMetaDataColumnName] + ContentColumnName: Optional[SnowflakeContentColumnName] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RoleARN: Optional[RoleARN] + RetryOptions: Optional[SnowflakeRetryOptions] + S3BackupMode: Optional[SnowflakeS3BackupMode] + S3Update: Optional[S3DestinationUpdate] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + BufferingHints: Optional[SnowflakeBufferingHints] + + +class SplunkDestinationUpdate(TypedDict, total=False): + HECEndpoint: Optional[HECEndpoint] + HECEndpointType: Optional[HECEndpointType] + HECToken: Optional[HECToken] + HECAcknowledgmentTimeoutInSeconds: Optional[HECAcknowledgmentTimeoutInSeconds] + RetryOptions: Optional[SplunkRetryOptions] + S3BackupMode: Optional[SplunkS3BackupMode] + S3Update: Optional[S3DestinationUpdate] + ProcessingConfiguration: Optional[ProcessingConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + BufferingHints: Optional[SplunkBufferingHints] + SecretsManagerConfiguration: Optional[SecretsManagerConfiguration] + + +class StartDeliveryStreamEncryptionInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + DeliveryStreamEncryptionConfigurationInput: Optional[DeliveryStreamEncryptionConfigurationInput] + + +class StartDeliveryStreamEncryptionOutput(TypedDict, total=False): + pass + + +class StopDeliveryStreamEncryptionInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + + +class StopDeliveryStreamEncryptionOutput(TypedDict, total=False): + pass + + +class TagDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + Tags: TagDeliveryStreamInputTagList + + +class TagDeliveryStreamOutput(TypedDict, total=False): + pass + + +TagKeyList = List[TagKey] + + +class UntagDeliveryStreamInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + TagKeys: TagKeyList + + +class UntagDeliveryStreamOutput(TypedDict, total=False): + pass + + +class UpdateDestinationInput(ServiceRequest): + DeliveryStreamName: DeliveryStreamName + CurrentDeliveryStreamVersionId: DeliveryStreamVersionId + DestinationId: DestinationId + S3DestinationUpdate: Optional[S3DestinationUpdate] + ExtendedS3DestinationUpdate: Optional[ExtendedS3DestinationUpdate] + RedshiftDestinationUpdate: Optional[RedshiftDestinationUpdate] + ElasticsearchDestinationUpdate: Optional[ElasticsearchDestinationUpdate] + AmazonopensearchserviceDestinationUpdate: Optional[AmazonopensearchserviceDestinationUpdate] + SplunkDestinationUpdate: Optional[SplunkDestinationUpdate] + HttpEndpointDestinationUpdate: Optional[HttpEndpointDestinationUpdate] + AmazonOpenSearchServerlessDestinationUpdate: Optional[ + AmazonOpenSearchServerlessDestinationUpdate + ] + SnowflakeDestinationUpdate: Optional[SnowflakeDestinationUpdate] + IcebergDestinationUpdate: Optional[IcebergDestinationUpdate] + + +class UpdateDestinationOutput(TypedDict, total=False): + pass + + +class FirehoseApi: + service = "firehose" + version = "2015-08-04" + + @handler("CreateDeliveryStream") + def create_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + delivery_stream_type: DeliveryStreamType | None = None, + direct_put_source_configuration: DirectPutSourceConfiguration | None = None, + kinesis_stream_source_configuration: KinesisStreamSourceConfiguration | None = None, + delivery_stream_encryption_configuration_input: DeliveryStreamEncryptionConfigurationInput + | None = None, + s3_destination_configuration: S3DestinationConfiguration | None = None, + extended_s3_destination_configuration: ExtendedS3DestinationConfiguration | None = None, + redshift_destination_configuration: RedshiftDestinationConfiguration | None = None, + elasticsearch_destination_configuration: ElasticsearchDestinationConfiguration + | None = None, + amazonopensearchservice_destination_configuration: AmazonopensearchserviceDestinationConfiguration + | None = None, + splunk_destination_configuration: SplunkDestinationConfiguration | None = None, + http_endpoint_destination_configuration: HttpEndpointDestinationConfiguration | None = None, + tags: TagDeliveryStreamInputTagList | None = None, + amazon_open_search_serverless_destination_configuration: AmazonOpenSearchServerlessDestinationConfiguration + | None = None, + msk_source_configuration: MSKSourceConfiguration | None = None, + snowflake_destination_configuration: SnowflakeDestinationConfiguration | None = None, + iceberg_destination_configuration: IcebergDestinationConfiguration | None = None, + database_source_configuration: DatabaseSourceConfiguration | None = None, + **kwargs, + ) -> CreateDeliveryStreamOutput: + raise NotImplementedError + + @handler("DeleteDeliveryStream") + def delete_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + allow_force_delete: BooleanObject | None = None, + **kwargs, + ) -> DeleteDeliveryStreamOutput: + raise NotImplementedError + + @handler("DescribeDeliveryStream") + def describe_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + limit: DescribeDeliveryStreamInputLimit | None = None, + exclusive_start_destination_id: DestinationId | None = None, + **kwargs, + ) -> DescribeDeliveryStreamOutput: + raise NotImplementedError + + @handler("ListDeliveryStreams") + def list_delivery_streams( + self, + context: RequestContext, + limit: ListDeliveryStreamsInputLimit | None = None, + delivery_stream_type: DeliveryStreamType | None = None, + exclusive_start_delivery_stream_name: DeliveryStreamName | None = None, + **kwargs, + ) -> ListDeliveryStreamsOutput: + raise NotImplementedError + + @handler("ListTagsForDeliveryStream") + def list_tags_for_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + exclusive_start_tag_key: TagKey | None = None, + limit: ListTagsForDeliveryStreamInputLimit | None = None, + **kwargs, + ) -> ListTagsForDeliveryStreamOutput: + raise NotImplementedError + + @handler("PutRecord") + def put_record( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + record: Record, + **kwargs, + ) -> PutRecordOutput: + raise NotImplementedError + + @handler("PutRecordBatch") + def put_record_batch( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + records: PutRecordBatchRequestEntryList, + **kwargs, + ) -> PutRecordBatchOutput: + raise NotImplementedError + + @handler("StartDeliveryStreamEncryption") + def start_delivery_stream_encryption( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + delivery_stream_encryption_configuration_input: DeliveryStreamEncryptionConfigurationInput + | None = None, + **kwargs, + ) -> StartDeliveryStreamEncryptionOutput: + raise NotImplementedError + + @handler("StopDeliveryStreamEncryption") + def stop_delivery_stream_encryption( + self, context: RequestContext, delivery_stream_name: DeliveryStreamName, **kwargs + ) -> StopDeliveryStreamEncryptionOutput: + raise NotImplementedError + + @handler("TagDeliveryStream") + def tag_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + tags: TagDeliveryStreamInputTagList, + **kwargs, + ) -> TagDeliveryStreamOutput: + raise NotImplementedError + + @handler("UntagDeliveryStream") + def untag_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagDeliveryStreamOutput: + raise NotImplementedError + + @handler("UpdateDestination") + def update_destination( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + current_delivery_stream_version_id: DeliveryStreamVersionId, + destination_id: DestinationId, + s3_destination_update: S3DestinationUpdate | None = None, + extended_s3_destination_update: ExtendedS3DestinationUpdate | None = None, + redshift_destination_update: RedshiftDestinationUpdate | None = None, + elasticsearch_destination_update: ElasticsearchDestinationUpdate | None = None, + amazonopensearchservice_destination_update: AmazonopensearchserviceDestinationUpdate + | None = None, + splunk_destination_update: SplunkDestinationUpdate | None = None, + http_endpoint_destination_update: HttpEndpointDestinationUpdate | None = None, + amazon_open_search_serverless_destination_update: AmazonOpenSearchServerlessDestinationUpdate + | None = None, + snowflake_destination_update: SnowflakeDestinationUpdate | None = None, + iceberg_destination_update: IcebergDestinationUpdate | None = None, + **kwargs, + ) -> UpdateDestinationOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/iam/__init__.py b/localstack-core/localstack/aws/api/iam/__init__.py new file mode 100644 index 0000000000000..9d9c9e6994325 --- /dev/null +++ b/localstack-core/localstack/aws/api/iam/__init__.py @@ -0,0 +1,3929 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ActionNameType = str +CertificationKeyType = str +CertificationValueType = str +ColumnNumber = int +ConcurrentModificationMessage = str +ContextKeyNameType = str +ContextKeyValueType = str +DeletionTaskIdType = str +EvalDecisionSourceType = str +LineNumber = int +OpenIDConnectProviderUrlType = str +OrganizationIdType = str +PolicyIdentifierType = str +ReasonType = str +RegionNameType = str +ReportStateDescriptionType = str +ResourceHandlingOptionType = str +ResourceNameType = str +SAMLMetadataDocumentType = str +SAMLProviderNameType = str +accessKeyIdType = str +accessKeySecretType = str +accountAliasType = str +arnType = str +attachmentCountType = int +authenticationCodeType = str +booleanObjectType = bool +booleanType = bool +certificateBodyType = str +certificateChainType = str +certificateIdType = str +clientIDType = str +credentialReportExpiredExceptionMessage = str +credentialReportNotPresentExceptionMessage = str +credentialReportNotReadyExceptionMessage = str +customSuffixType = str +deleteConflictMessage = str +duplicateCertificateMessage = str +duplicateSSHPublicKeyMessage = str +entityAlreadyExistsMessage = str +entityNameType = str +entityTemporarilyUnmodifiableMessage = str +existingUserNameType = str +groupNameType = str +idType = str +instanceProfileNameType = str +integerType = int +invalidAuthenticationCodeMessage = str +invalidCertificateMessage = str +invalidInputMessage = str +invalidPublicKeyMessage = str +invalidUserTypeMessage = str +jobIDType = str +keyPairMismatchMessage = str +limitExceededMessage = str +malformedCertificateMessage = str +malformedPolicyDocumentMessage = str +markerType = str +maxItemsType = int +maxPasswordAgeType = int +minimumPasswordLengthType = int +noSuchEntityMessage = str +openIdIdpCommunicationErrorExceptionMessage = str +organizationsEntityPathType = str +organizationsPolicyIdType = str +passwordPolicyViolationMessage = str +passwordReusePreventionType = int +passwordType = str +pathPrefixType = str +pathType = str +policyDescriptionType = str +policyDocumentType = str +policyEvaluationErrorMessage = str +policyNameType = str +policyNotAttachableMessage = str +policyPathType = str +policyVersionIdType = str +privateKeyIdType = str +privateKeyType = str +publicKeyFingerprintType = str +publicKeyIdType = str +publicKeyMaterialType = str +reportGenerationLimitExceededMessage = str +responseMarkerType = str +roleDescriptionType = str +roleMaxSessionDurationType = int +roleNameType = str +serialNumberType = str +serverCertificateNameType = str +serviceFailureExceptionMessage = str +serviceName = str +serviceNameType = str +serviceNamespaceType = str +serviceNotSupportedMessage = str +servicePassword = str +serviceSpecificCredentialId = str +serviceUserName = str +stringType = str +summaryValueType = int +tagKeyType = str +tagValueType = str +thumbprintType = str +unmodifiableEntityMessage = str +unrecognizedPublicKeyEncodingMessage = str +userNameType = str +virtualMFADeviceName = str + + +class AccessAdvisorUsageGranularityType(StrEnum): + SERVICE_LEVEL = "SERVICE_LEVEL" + ACTION_LEVEL = "ACTION_LEVEL" + + +class ContextKeyTypeEnum(StrEnum): + string = "string" + stringList = "stringList" + numeric = "numeric" + numericList = "numericList" + boolean = "boolean" + booleanList = "booleanList" + ip = "ip" + ipList = "ipList" + binary = "binary" + binaryList = "binaryList" + date = "date" + dateList = "dateList" + + +class DeletionTaskStatusType(StrEnum): + SUCCEEDED = "SUCCEEDED" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + NOT_STARTED = "NOT_STARTED" + + +class EntityType(StrEnum): + User = "User" + Role = "Role" + Group = "Group" + LocalManagedPolicy = "LocalManagedPolicy" + AWSManagedPolicy = "AWSManagedPolicy" + + +class FeatureType(StrEnum): + RootCredentialsManagement = "RootCredentialsManagement" + RootSessions = "RootSessions" + + +class PermissionsBoundaryAttachmentType(StrEnum): + PermissionsBoundaryPolicy = "PermissionsBoundaryPolicy" + + +class PolicyEvaluationDecisionType(StrEnum): + allowed = "allowed" + explicitDeny = "explicitDeny" + implicitDeny = "implicitDeny" + + +class PolicySourceType(StrEnum): + user = "user" + group = "group" + role = "role" + aws_managed = "aws-managed" + user_managed = "user-managed" + resource = "resource" + none = "none" + + +class PolicyUsageType(StrEnum): + PermissionsPolicy = "PermissionsPolicy" + PermissionsBoundary = "PermissionsBoundary" + + +class ReportFormatType(StrEnum): + text_csv = "text/csv" + + +class ReportStateType(StrEnum): + STARTED = "STARTED" + INPROGRESS = "INPROGRESS" + COMPLETE = "COMPLETE" + + +class assertionEncryptionModeType(StrEnum): + Required = "Required" + Allowed = "Allowed" + + +class assignmentStatusType(StrEnum): + Assigned = "Assigned" + Unassigned = "Unassigned" + Any = "Any" + + +class encodingType(StrEnum): + SSH = "SSH" + PEM = "PEM" + + +class globalEndpointTokenVersion(StrEnum): + v1Token = "v1Token" + v2Token = "v2Token" + + +class jobStatusType(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + +class policyOwnerEntityType(StrEnum): + USER = "USER" + ROLE = "ROLE" + GROUP = "GROUP" + + +class policyScopeType(StrEnum): + All = "All" + AWS = "AWS" + Local = "Local" + + +class policyType(StrEnum): + INLINE = "INLINE" + MANAGED = "MANAGED" + + +class sortKeyType(StrEnum): + SERVICE_NAMESPACE_ASCENDING = "SERVICE_NAMESPACE_ASCENDING" + SERVICE_NAMESPACE_DESCENDING = "SERVICE_NAMESPACE_DESCENDING" + LAST_AUTHENTICATED_TIME_ASCENDING = "LAST_AUTHENTICATED_TIME_ASCENDING" + LAST_AUTHENTICATED_TIME_DESCENDING = "LAST_AUTHENTICATED_TIME_DESCENDING" + + +class statusType(StrEnum): + Active = "Active" + Inactive = "Inactive" + + +class summaryKeyType(StrEnum): + Users = "Users" + UsersQuota = "UsersQuota" + Groups = "Groups" + GroupsQuota = "GroupsQuota" + ServerCertificates = "ServerCertificates" + ServerCertificatesQuota = "ServerCertificatesQuota" + UserPolicySizeQuota = "UserPolicySizeQuota" + GroupPolicySizeQuota = "GroupPolicySizeQuota" + GroupsPerUserQuota = "GroupsPerUserQuota" + SigningCertificatesPerUserQuota = "SigningCertificatesPerUserQuota" + AccessKeysPerUserQuota = "AccessKeysPerUserQuota" + MFADevices = "MFADevices" + MFADevicesInUse = "MFADevicesInUse" + AccountMFAEnabled = "AccountMFAEnabled" + AccountAccessKeysPresent = "AccountAccessKeysPresent" + AccountPasswordPresent = "AccountPasswordPresent" + AccountSigningCertificatesPresent = "AccountSigningCertificatesPresent" + AttachedPoliciesPerGroupQuota = "AttachedPoliciesPerGroupQuota" + AttachedPoliciesPerRoleQuota = "AttachedPoliciesPerRoleQuota" + AttachedPoliciesPerUserQuota = "AttachedPoliciesPerUserQuota" + Policies = "Policies" + PoliciesQuota = "PoliciesQuota" + PolicySizeQuota = "PolicySizeQuota" + PolicyVersionsInUse = "PolicyVersionsInUse" + PolicyVersionsInUseQuota = "PolicyVersionsInUseQuota" + VersionsPerPolicyQuota = "VersionsPerPolicyQuota" + GlobalEndpointTokenVersion = "GlobalEndpointTokenVersion" + + +class AccountNotManagementOrDelegatedAdministratorException(ServiceException): + code: str = "AccountNotManagementOrDelegatedAdministratorException" + sender_fault: bool = False + status_code: int = 400 + + +class CallerIsNotManagementAccountException(ServiceException): + code: str = "CallerIsNotManagementAccountException" + sender_fault: bool = False + status_code: int = 400 + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModification" + sender_fault: bool = True + status_code: int = 409 + + +class CredentialReportExpiredException(ServiceException): + code: str = "ReportExpired" + sender_fault: bool = True + status_code: int = 410 + + +class CredentialReportNotPresentException(ServiceException): + code: str = "ReportNotPresent" + sender_fault: bool = True + status_code: int = 410 + + +class CredentialReportNotReadyException(ServiceException): + code: str = "ReportInProgress" + sender_fault: bool = True + status_code: int = 404 + + +class DeleteConflictException(ServiceException): + code: str = "DeleteConflict" + sender_fault: bool = True + status_code: int = 409 + + +class DuplicateCertificateException(ServiceException): + code: str = "DuplicateCertificate" + sender_fault: bool = True + status_code: int = 409 + + +class DuplicateSSHPublicKeyException(ServiceException): + code: str = "DuplicateSSHPublicKey" + sender_fault: bool = True + status_code: int = 400 + + +class EntityAlreadyExistsException(ServiceException): + code: str = "EntityAlreadyExists" + sender_fault: bool = True + status_code: int = 409 + + +class EntityTemporarilyUnmodifiableException(ServiceException): + code: str = "EntityTemporarilyUnmodifiable" + sender_fault: bool = True + status_code: int = 409 + + +class InvalidAuthenticationCodeException(ServiceException): + code: str = "InvalidAuthenticationCode" + sender_fault: bool = True + status_code: int = 403 + + +class InvalidCertificateException(ServiceException): + code: str = "InvalidCertificate" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidInputException(ServiceException): + code: str = "InvalidInput" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidPublicKeyException(ServiceException): + code: str = "InvalidPublicKey" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidUserTypeException(ServiceException): + code: str = "InvalidUserType" + sender_fault: bool = True + status_code: int = 400 + + +class KeyPairMismatchException(ServiceException): + code: str = "KeyPairMismatch" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceeded" + sender_fault: bool = True + status_code: int = 409 + + +class MalformedCertificateException(ServiceException): + code: str = "MalformedCertificate" + sender_fault: bool = True + status_code: int = 400 + + +class MalformedPolicyDocumentException(ServiceException): + code: str = "MalformedPolicyDocument" + sender_fault: bool = True + status_code: int = 400 + + +class NoSuchEntityException(ServiceException): + code: str = "NoSuchEntity" + sender_fault: bool = True + status_code: int = 404 + + +class OpenIdIdpCommunicationErrorException(ServiceException): + code: str = "OpenIdIdpCommunicationError" + sender_fault: bool = True + status_code: int = 400 + + +class OrganizationNotFoundException(ServiceException): + code: str = "OrganizationNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class OrganizationNotInAllFeaturesModeException(ServiceException): + code: str = "OrganizationNotInAllFeaturesModeException" + sender_fault: bool = False + status_code: int = 400 + + +class PasswordPolicyViolationException(ServiceException): + code: str = "PasswordPolicyViolation" + sender_fault: bool = True + status_code: int = 400 + + +class PolicyEvaluationException(ServiceException): + code: str = "PolicyEvaluation" + sender_fault: bool = False + status_code: int = 500 + + +class PolicyNotAttachableException(ServiceException): + code: str = "PolicyNotAttachable" + sender_fault: bool = True + status_code: int = 400 + + +class ReportGenerationLimitExceededException(ServiceException): + code: str = "ReportGenerationLimitExceeded" + sender_fault: bool = True + status_code: int = 409 + + +class ServiceAccessNotEnabledException(ServiceException): + code: str = "ServiceAccessNotEnabledException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceFailureException(ServiceException): + code: str = "ServiceFailure" + sender_fault: bool = False + status_code: int = 500 + + +class ServiceNotSupportedException(ServiceException): + code: str = "NotSupportedService" + sender_fault: bool = True + status_code: int = 404 + + +class UnmodifiableEntityException(ServiceException): + code: str = "UnmodifiableEntity" + sender_fault: bool = True + status_code: int = 400 + + +class UnrecognizedPublicKeyEncodingException(ServiceException): + code: str = "UnrecognizedPublicKeyEncoding" + sender_fault: bool = True + status_code: int = 400 + + +dateType = datetime + + +class AccessDetail(TypedDict, total=False): + ServiceName: serviceNameType + ServiceNamespace: serviceNamespaceType + Region: Optional[stringType] + EntityPath: Optional[organizationsEntityPathType] + LastAuthenticatedTime: Optional[dateType] + TotalAuthenticatedEntities: Optional[integerType] + + +AccessDetails = List[AccessDetail] + + +class AccessKey(TypedDict, total=False): + UserName: userNameType + AccessKeyId: accessKeyIdType + Status: statusType + SecretAccessKey: accessKeySecretType + CreateDate: Optional[dateType] + + +class AccessKeyLastUsed(TypedDict, total=False): + LastUsedDate: Optional[dateType] + ServiceName: stringType + Region: stringType + + +class AccessKeyMetadata(TypedDict, total=False): + UserName: Optional[userNameType] + AccessKeyId: Optional[accessKeyIdType] + Status: Optional[statusType] + CreateDate: Optional[dateType] + + +ActionNameListType = List[ActionNameType] + + +class AddClientIDToOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + ClientID: clientIDType + + +class AddRoleToInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + RoleName: roleNameType + + +class AddUserToGroupRequest(ServiceRequest): + GroupName: groupNameType + UserName: existingUserNameType + + +ArnListType = List[arnType] + + +class AttachGroupPolicyRequest(ServiceRequest): + GroupName: groupNameType + PolicyArn: arnType + + +class AttachRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyArn: arnType + + +class AttachUserPolicyRequest(ServiceRequest): + UserName: userNameType + PolicyArn: arnType + + +class AttachedPermissionsBoundary(TypedDict, total=False): + PermissionsBoundaryType: Optional[PermissionsBoundaryAttachmentType] + PermissionsBoundaryArn: Optional[arnType] + + +class AttachedPolicy(TypedDict, total=False): + PolicyName: Optional[policyNameType] + PolicyArn: Optional[arnType] + + +BootstrapDatum = bytes +CertificationMapType = Dict[CertificationKeyType, CertificationValueType] + + +class ChangePasswordRequest(ServiceRequest): + OldPassword: passwordType + NewPassword: passwordType + + +ContextKeyValueListType = List[ContextKeyValueType] + + +class ContextEntry(TypedDict, total=False): + ContextKeyName: Optional[ContextKeyNameType] + ContextKeyValues: Optional[ContextKeyValueListType] + ContextKeyType: Optional[ContextKeyTypeEnum] + + +ContextEntryListType = List[ContextEntry] +ContextKeyNamesResultListType = List[ContextKeyNameType] + + +class CreateAccessKeyRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + + +class CreateAccessKeyResponse(TypedDict, total=False): + AccessKey: AccessKey + + +class CreateAccountAliasRequest(ServiceRequest): + AccountAlias: accountAliasType + + +class CreateGroupRequest(ServiceRequest): + Path: Optional[pathType] + GroupName: groupNameType + + +class Group(TypedDict, total=False): + Path: pathType + GroupName: groupNameType + GroupId: idType + Arn: arnType + CreateDate: dateType + + +class CreateGroupResponse(TypedDict, total=False): + Group: Group + + +class Tag(TypedDict, total=False): + Key: tagKeyType + Value: tagValueType + + +tagListType = List[Tag] + + +class CreateInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + Path: Optional[pathType] + Tags: Optional[tagListType] + + +class RoleLastUsed(TypedDict, total=False): + LastUsedDate: Optional[dateType] + Region: Optional[stringType] + + +class Role(TypedDict, total=False): + Path: pathType + RoleName: roleNameType + RoleId: idType + Arn: arnType + CreateDate: dateType + AssumeRolePolicyDocument: Optional[policyDocumentType] + Description: Optional[roleDescriptionType] + MaxSessionDuration: Optional[roleMaxSessionDurationType] + PermissionsBoundary: Optional[AttachedPermissionsBoundary] + Tags: Optional[tagListType] + RoleLastUsed: Optional[RoleLastUsed] + + +roleListType = List[Role] + + +class InstanceProfile(TypedDict, total=False): + Path: pathType + InstanceProfileName: instanceProfileNameType + InstanceProfileId: idType + Arn: arnType + CreateDate: dateType + Roles: roleListType + Tags: Optional[tagListType] + + +class CreateInstanceProfileResponse(TypedDict, total=False): + InstanceProfile: InstanceProfile + + +class CreateLoginProfileRequest(ServiceRequest): + UserName: Optional[userNameType] + Password: Optional[passwordType] + PasswordResetRequired: Optional[booleanType] + + +class LoginProfile(TypedDict, total=False): + UserName: userNameType + CreateDate: dateType + PasswordResetRequired: Optional[booleanType] + + +class CreateLoginProfileResponse(TypedDict, total=False): + LoginProfile: LoginProfile + + +thumbprintListType = List[thumbprintType] +clientIDListType = List[clientIDType] + + +class CreateOpenIDConnectProviderRequest(ServiceRequest): + Url: OpenIDConnectProviderUrlType + ClientIDList: Optional[clientIDListType] + ThumbprintList: Optional[thumbprintListType] + Tags: Optional[tagListType] + + +class CreateOpenIDConnectProviderResponse(TypedDict, total=False): + OpenIDConnectProviderArn: Optional[arnType] + Tags: Optional[tagListType] + + +class CreatePolicyRequest(ServiceRequest): + PolicyName: policyNameType + Path: Optional[policyPathType] + PolicyDocument: policyDocumentType + Description: Optional[policyDescriptionType] + Tags: Optional[tagListType] + + +class Policy(TypedDict, total=False): + PolicyName: Optional[policyNameType] + PolicyId: Optional[idType] + Arn: Optional[arnType] + Path: Optional[policyPathType] + DefaultVersionId: Optional[policyVersionIdType] + AttachmentCount: Optional[attachmentCountType] + PermissionsBoundaryUsageCount: Optional[attachmentCountType] + IsAttachable: Optional[booleanType] + Description: Optional[policyDescriptionType] + CreateDate: Optional[dateType] + UpdateDate: Optional[dateType] + Tags: Optional[tagListType] + + +class CreatePolicyResponse(TypedDict, total=False): + Policy: Optional[Policy] + + +class CreatePolicyVersionRequest(ServiceRequest): + PolicyArn: arnType + PolicyDocument: policyDocumentType + SetAsDefault: Optional[booleanType] + + +class PolicyVersion(TypedDict, total=False): + Document: Optional[policyDocumentType] + VersionId: Optional[policyVersionIdType] + IsDefaultVersion: Optional[booleanType] + CreateDate: Optional[dateType] + + +class CreatePolicyVersionResponse(TypedDict, total=False): + PolicyVersion: Optional[PolicyVersion] + + +class CreateRoleRequest(ServiceRequest): + Path: Optional[pathType] + RoleName: roleNameType + AssumeRolePolicyDocument: policyDocumentType + Description: Optional[roleDescriptionType] + MaxSessionDuration: Optional[roleMaxSessionDurationType] + PermissionsBoundary: Optional[arnType] + Tags: Optional[tagListType] + + +class CreateRoleResponse(TypedDict, total=False): + Role: Role + + +class CreateSAMLProviderRequest(ServiceRequest): + SAMLMetadataDocument: SAMLMetadataDocumentType + Name: SAMLProviderNameType + Tags: Optional[tagListType] + AssertionEncryptionMode: Optional[assertionEncryptionModeType] + AddPrivateKey: Optional[privateKeyType] + + +class CreateSAMLProviderResponse(TypedDict, total=False): + SAMLProviderArn: Optional[arnType] + Tags: Optional[tagListType] + + +class CreateServiceLinkedRoleRequest(ServiceRequest): + AWSServiceName: groupNameType + Description: Optional[roleDescriptionType] + CustomSuffix: Optional[customSuffixType] + + +class CreateServiceLinkedRoleResponse(TypedDict, total=False): + Role: Optional[Role] + + +class CreateServiceSpecificCredentialRequest(ServiceRequest): + UserName: userNameType + ServiceName: serviceName + + +class ServiceSpecificCredential(TypedDict, total=False): + CreateDate: dateType + ServiceName: serviceName + ServiceUserName: serviceUserName + ServicePassword: servicePassword + ServiceSpecificCredentialId: serviceSpecificCredentialId + UserName: userNameType + Status: statusType + + +class CreateServiceSpecificCredentialResponse(TypedDict, total=False): + ServiceSpecificCredential: Optional[ServiceSpecificCredential] + + +class CreateUserRequest(ServiceRequest): + Path: Optional[pathType] + UserName: userNameType + PermissionsBoundary: Optional[arnType] + Tags: Optional[tagListType] + + +class User(TypedDict, total=False): + Path: pathType + UserName: userNameType + UserId: idType + Arn: arnType + CreateDate: dateType + PasswordLastUsed: Optional[dateType] + PermissionsBoundary: Optional[AttachedPermissionsBoundary] + Tags: Optional[tagListType] + + +class CreateUserResponse(TypedDict, total=False): + User: Optional[User] + + +class CreateVirtualMFADeviceRequest(ServiceRequest): + Path: Optional[pathType] + VirtualMFADeviceName: virtualMFADeviceName + Tags: Optional[tagListType] + + +class VirtualMFADevice(TypedDict, total=False): + SerialNumber: serialNumberType + Base32StringSeed: Optional[BootstrapDatum] + QRCodePNG: Optional[BootstrapDatum] + User: Optional[User] + EnableDate: Optional[dateType] + Tags: Optional[tagListType] + + +class CreateVirtualMFADeviceResponse(TypedDict, total=False): + VirtualMFADevice: VirtualMFADevice + + +class DeactivateMFADeviceRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + SerialNumber: serialNumberType + + +class DeleteAccessKeyRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + AccessKeyId: accessKeyIdType + + +class DeleteAccountAliasRequest(ServiceRequest): + AccountAlias: accountAliasType + + +class DeleteGroupPolicyRequest(ServiceRequest): + GroupName: groupNameType + PolicyName: policyNameType + + +class DeleteGroupRequest(ServiceRequest): + GroupName: groupNameType + + +class DeleteInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + + +class DeleteLoginProfileRequest(ServiceRequest): + UserName: Optional[userNameType] + + +class DeleteOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + + +class DeletePolicyRequest(ServiceRequest): + PolicyArn: arnType + + +class DeletePolicyVersionRequest(ServiceRequest): + PolicyArn: arnType + VersionId: policyVersionIdType + + +class DeleteRolePermissionsBoundaryRequest(ServiceRequest): + RoleName: roleNameType + + +class DeleteRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyName: policyNameType + + +class DeleteRoleRequest(ServiceRequest): + RoleName: roleNameType + + +class DeleteSAMLProviderRequest(ServiceRequest): + SAMLProviderArn: arnType + + +class DeleteSSHPublicKeyRequest(ServiceRequest): + UserName: userNameType + SSHPublicKeyId: publicKeyIdType + + +class DeleteServerCertificateRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + + +class DeleteServiceLinkedRoleRequest(ServiceRequest): + RoleName: roleNameType + + +class DeleteServiceLinkedRoleResponse(TypedDict, total=False): + DeletionTaskId: DeletionTaskIdType + + +class DeleteServiceSpecificCredentialRequest(ServiceRequest): + UserName: Optional[userNameType] + ServiceSpecificCredentialId: serviceSpecificCredentialId + + +class DeleteSigningCertificateRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + CertificateId: certificateIdType + + +class DeleteUserPermissionsBoundaryRequest(ServiceRequest): + UserName: userNameType + + +class DeleteUserPolicyRequest(ServiceRequest): + UserName: existingUserNameType + PolicyName: policyNameType + + +class DeleteUserRequest(ServiceRequest): + UserName: existingUserNameType + + +class DeleteVirtualMFADeviceRequest(ServiceRequest): + SerialNumber: serialNumberType + + +class RoleUsageType(TypedDict, total=False): + Region: Optional[RegionNameType] + Resources: Optional[ArnListType] + + +RoleUsageListType = List[RoleUsageType] + + +class DeletionTaskFailureReasonType(TypedDict, total=False): + Reason: Optional[ReasonType] + RoleUsageList: Optional[RoleUsageListType] + + +class DetachGroupPolicyRequest(ServiceRequest): + GroupName: groupNameType + PolicyArn: arnType + + +class DetachRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyArn: arnType + + +class DetachUserPolicyRequest(ServiceRequest): + UserName: userNameType + PolicyArn: arnType + + +class DisableOrganizationsRootCredentialsManagementRequest(ServiceRequest): + pass + + +FeaturesListType = List[FeatureType] + + +class DisableOrganizationsRootCredentialsManagementResponse(TypedDict, total=False): + OrganizationId: Optional[OrganizationIdType] + EnabledFeatures: Optional[FeaturesListType] + + +class DisableOrganizationsRootSessionsRequest(ServiceRequest): + pass + + +class DisableOrganizationsRootSessionsResponse(TypedDict, total=False): + OrganizationId: Optional[OrganizationIdType] + EnabledFeatures: Optional[FeaturesListType] + + +class EnableMFADeviceRequest(ServiceRequest): + UserName: existingUserNameType + SerialNumber: serialNumberType + AuthenticationCode1: authenticationCodeType + AuthenticationCode2: authenticationCodeType + + +class EnableOrganizationsRootCredentialsManagementRequest(ServiceRequest): + pass + + +class EnableOrganizationsRootCredentialsManagementResponse(TypedDict, total=False): + OrganizationId: Optional[OrganizationIdType] + EnabledFeatures: Optional[FeaturesListType] + + +class EnableOrganizationsRootSessionsRequest(ServiceRequest): + pass + + +class EnableOrganizationsRootSessionsResponse(TypedDict, total=False): + OrganizationId: Optional[OrganizationIdType] + EnabledFeatures: Optional[FeaturesListType] + + +class EntityInfo(TypedDict, total=False): + Arn: arnType + Name: userNameType + Type: policyOwnerEntityType + Id: idType + Path: Optional[pathType] + + +class EntityDetails(TypedDict, total=False): + EntityInfo: EntityInfo + LastAuthenticated: Optional[dateType] + + +class ErrorDetails(TypedDict, total=False): + Message: stringType + Code: stringType + + +EvalDecisionDetailsType = Dict[EvalDecisionSourceType, PolicyEvaluationDecisionType] + + +class PermissionsBoundaryDecisionDetail(TypedDict, total=False): + AllowedByPermissionsBoundary: Optional[booleanType] + + +class Position(TypedDict, total=False): + Line: Optional[LineNumber] + Column: Optional[ColumnNumber] + + +class Statement(TypedDict, total=False): + SourcePolicyId: Optional[PolicyIdentifierType] + SourcePolicyType: Optional[PolicySourceType] + StartPosition: Optional[Position] + EndPosition: Optional[Position] + + +StatementListType = List[Statement] + + +class ResourceSpecificResult(TypedDict, total=False): + EvalResourceName: ResourceNameType + EvalResourceDecision: PolicyEvaluationDecisionType + MatchedStatements: Optional[StatementListType] + MissingContextValues: Optional[ContextKeyNamesResultListType] + EvalDecisionDetails: Optional[EvalDecisionDetailsType] + PermissionsBoundaryDecisionDetail: Optional[PermissionsBoundaryDecisionDetail] + + +ResourceSpecificResultListType = List[ResourceSpecificResult] + + +class OrganizationsDecisionDetail(TypedDict, total=False): + AllowedByOrganizations: Optional[booleanType] + + +class EvaluationResult(TypedDict, total=False): + EvalActionName: ActionNameType + EvalResourceName: Optional[ResourceNameType] + EvalDecision: PolicyEvaluationDecisionType + MatchedStatements: Optional[StatementListType] + MissingContextValues: Optional[ContextKeyNamesResultListType] + OrganizationsDecisionDetail: Optional[OrganizationsDecisionDetail] + PermissionsBoundaryDecisionDetail: Optional[PermissionsBoundaryDecisionDetail] + EvalDecisionDetails: Optional[EvalDecisionDetailsType] + ResourceSpecificResults: Optional[ResourceSpecificResultListType] + + +EvaluationResultsListType = List[EvaluationResult] + + +class GenerateCredentialReportResponse(TypedDict, total=False): + State: Optional[ReportStateType] + Description: Optional[ReportStateDescriptionType] + + +class GenerateOrganizationsAccessReportRequest(ServiceRequest): + EntityPath: organizationsEntityPathType + OrganizationsPolicyId: Optional[organizationsPolicyIdType] + + +class GenerateOrganizationsAccessReportResponse(TypedDict, total=False): + JobId: Optional[jobIDType] + + +class GenerateServiceLastAccessedDetailsRequest(ServiceRequest): + Arn: arnType + Granularity: Optional[AccessAdvisorUsageGranularityType] + + +class GenerateServiceLastAccessedDetailsResponse(TypedDict, total=False): + JobId: Optional[jobIDType] + + +class GetAccessKeyLastUsedRequest(ServiceRequest): + AccessKeyId: accessKeyIdType + + +class GetAccessKeyLastUsedResponse(TypedDict, total=False): + UserName: Optional[existingUserNameType] + AccessKeyLastUsed: Optional[AccessKeyLastUsed] + + +entityListType = List[EntityType] + + +class GetAccountAuthorizationDetailsRequest(ServiceRequest): + Filter: Optional[entityListType] + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + + +policyDocumentVersionListType = List[PolicyVersion] + + +class ManagedPolicyDetail(TypedDict, total=False): + PolicyName: Optional[policyNameType] + PolicyId: Optional[idType] + Arn: Optional[arnType] + Path: Optional[policyPathType] + DefaultVersionId: Optional[policyVersionIdType] + AttachmentCount: Optional[attachmentCountType] + PermissionsBoundaryUsageCount: Optional[attachmentCountType] + IsAttachable: Optional[booleanType] + Description: Optional[policyDescriptionType] + CreateDate: Optional[dateType] + UpdateDate: Optional[dateType] + PolicyVersionList: Optional[policyDocumentVersionListType] + + +ManagedPolicyDetailListType = List[ManagedPolicyDetail] +attachedPoliciesListType = List[AttachedPolicy] + + +class PolicyDetail(TypedDict, total=False): + PolicyName: Optional[policyNameType] + PolicyDocument: Optional[policyDocumentType] + + +policyDetailListType = List[PolicyDetail] +instanceProfileListType = List[InstanceProfile] + + +class RoleDetail(TypedDict, total=False): + Path: Optional[pathType] + RoleName: Optional[roleNameType] + RoleId: Optional[idType] + Arn: Optional[arnType] + CreateDate: Optional[dateType] + AssumeRolePolicyDocument: Optional[policyDocumentType] + InstanceProfileList: Optional[instanceProfileListType] + RolePolicyList: Optional[policyDetailListType] + AttachedManagedPolicies: Optional[attachedPoliciesListType] + PermissionsBoundary: Optional[AttachedPermissionsBoundary] + Tags: Optional[tagListType] + RoleLastUsed: Optional[RoleLastUsed] + + +roleDetailListType = List[RoleDetail] + + +class GroupDetail(TypedDict, total=False): + Path: Optional[pathType] + GroupName: Optional[groupNameType] + GroupId: Optional[idType] + Arn: Optional[arnType] + CreateDate: Optional[dateType] + GroupPolicyList: Optional[policyDetailListType] + AttachedManagedPolicies: Optional[attachedPoliciesListType] + + +groupDetailListType = List[GroupDetail] +groupNameListType = List[groupNameType] + + +class UserDetail(TypedDict, total=False): + Path: Optional[pathType] + UserName: Optional[userNameType] + UserId: Optional[idType] + Arn: Optional[arnType] + CreateDate: Optional[dateType] + UserPolicyList: Optional[policyDetailListType] + GroupList: Optional[groupNameListType] + AttachedManagedPolicies: Optional[attachedPoliciesListType] + PermissionsBoundary: Optional[AttachedPermissionsBoundary] + Tags: Optional[tagListType] + + +userDetailListType = List[UserDetail] + + +class GetAccountAuthorizationDetailsResponse(TypedDict, total=False): + UserDetailList: Optional[userDetailListType] + GroupDetailList: Optional[groupDetailListType] + RoleDetailList: Optional[roleDetailListType] + Policies: Optional[ManagedPolicyDetailListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class PasswordPolicy(TypedDict, total=False): + MinimumPasswordLength: Optional[minimumPasswordLengthType] + RequireSymbols: Optional[booleanType] + RequireNumbers: Optional[booleanType] + RequireUppercaseCharacters: Optional[booleanType] + RequireLowercaseCharacters: Optional[booleanType] + AllowUsersToChangePassword: Optional[booleanType] + ExpirePasswords: Optional[booleanType] + MaxPasswordAge: Optional[maxPasswordAgeType] + PasswordReusePrevention: Optional[passwordReusePreventionType] + HardExpiry: Optional[booleanObjectType] + + +class GetAccountPasswordPolicyResponse(TypedDict, total=False): + PasswordPolicy: PasswordPolicy + + +summaryMapType = Dict[summaryKeyType, summaryValueType] + + +class GetAccountSummaryResponse(TypedDict, total=False): + SummaryMap: Optional[summaryMapType] + + +SimulationPolicyListType = List[policyDocumentType] + + +class GetContextKeysForCustomPolicyRequest(ServiceRequest): + PolicyInputList: SimulationPolicyListType + + +class GetContextKeysForPolicyResponse(TypedDict, total=False): + ContextKeyNames: Optional[ContextKeyNamesResultListType] + + +class GetContextKeysForPrincipalPolicyRequest(ServiceRequest): + PolicySourceArn: arnType + PolicyInputList: Optional[SimulationPolicyListType] + + +ReportContentType = bytes + + +class GetCredentialReportResponse(TypedDict, total=False): + Content: Optional[ReportContentType] + ReportFormat: Optional[ReportFormatType] + GeneratedTime: Optional[dateType] + + +class GetGroupPolicyRequest(ServiceRequest): + GroupName: groupNameType + PolicyName: policyNameType + + +class GetGroupPolicyResponse(TypedDict, total=False): + GroupName: groupNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class GetGroupRequest(ServiceRequest): + GroupName: groupNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +userListType = List[User] + + +class GetGroupResponse(TypedDict, total=False): + Group: Group + Users: userListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class GetInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + + +class GetInstanceProfileResponse(TypedDict, total=False): + InstanceProfile: InstanceProfile + + +class GetLoginProfileRequest(ServiceRequest): + UserName: Optional[userNameType] + + +class GetLoginProfileResponse(TypedDict, total=False): + LoginProfile: LoginProfile + + +class GetMFADeviceRequest(ServiceRequest): + SerialNumber: serialNumberType + UserName: Optional[userNameType] + + +class GetMFADeviceResponse(TypedDict, total=False): + UserName: Optional[userNameType] + SerialNumber: serialNumberType + EnableDate: Optional[dateType] + Certifications: Optional[CertificationMapType] + + +class GetOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + + +class GetOpenIDConnectProviderResponse(TypedDict, total=False): + Url: Optional[OpenIDConnectProviderUrlType] + ClientIDList: Optional[clientIDListType] + ThumbprintList: Optional[thumbprintListType] + CreateDate: Optional[dateType] + Tags: Optional[tagListType] + + +class GetOrganizationsAccessReportRequest(ServiceRequest): + JobId: jobIDType + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + SortKey: Optional[sortKeyType] + + +class GetOrganizationsAccessReportResponse(TypedDict, total=False): + JobStatus: jobStatusType + JobCreationDate: dateType + JobCompletionDate: Optional[dateType] + NumberOfServicesAccessible: Optional[integerType] + NumberOfServicesNotAccessed: Optional[integerType] + AccessDetails: Optional[AccessDetails] + IsTruncated: Optional[booleanType] + Marker: Optional[markerType] + ErrorDetails: Optional[ErrorDetails] + + +class GetPolicyRequest(ServiceRequest): + PolicyArn: arnType + + +class GetPolicyResponse(TypedDict, total=False): + Policy: Optional[Policy] + + +class GetPolicyVersionRequest(ServiceRequest): + PolicyArn: arnType + VersionId: policyVersionIdType + + +class GetPolicyVersionResponse(TypedDict, total=False): + PolicyVersion: Optional[PolicyVersion] + + +class GetRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyName: policyNameType + + +class GetRolePolicyResponse(TypedDict, total=False): + RoleName: roleNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class GetRoleRequest(ServiceRequest): + RoleName: roleNameType + + +class GetRoleResponse(TypedDict, total=False): + Role: Role + + +class GetSAMLProviderRequest(ServiceRequest): + SAMLProviderArn: arnType + + +class SAMLPrivateKey(TypedDict, total=False): + KeyId: Optional[privateKeyIdType] + Timestamp: Optional[dateType] + + +privateKeyList = List[SAMLPrivateKey] + + +class GetSAMLProviderResponse(TypedDict, total=False): + SAMLProviderUUID: Optional[privateKeyIdType] + SAMLMetadataDocument: Optional[SAMLMetadataDocumentType] + CreateDate: Optional[dateType] + ValidUntil: Optional[dateType] + Tags: Optional[tagListType] + AssertionEncryptionMode: Optional[assertionEncryptionModeType] + PrivateKeyList: Optional[privateKeyList] + + +class GetSSHPublicKeyRequest(ServiceRequest): + UserName: userNameType + SSHPublicKeyId: publicKeyIdType + Encoding: encodingType + + +class SSHPublicKey(TypedDict, total=False): + UserName: userNameType + SSHPublicKeyId: publicKeyIdType + Fingerprint: publicKeyFingerprintType + SSHPublicKeyBody: publicKeyMaterialType + Status: statusType + UploadDate: Optional[dateType] + + +class GetSSHPublicKeyResponse(TypedDict, total=False): + SSHPublicKey: Optional[SSHPublicKey] + + +class GetServerCertificateRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + + +class ServerCertificateMetadata(TypedDict, total=False): + Path: pathType + ServerCertificateName: serverCertificateNameType + ServerCertificateId: idType + Arn: arnType + UploadDate: Optional[dateType] + Expiration: Optional[dateType] + + +class ServerCertificate(TypedDict, total=False): + ServerCertificateMetadata: ServerCertificateMetadata + CertificateBody: certificateBodyType + CertificateChain: Optional[certificateChainType] + Tags: Optional[tagListType] + + +class GetServerCertificateResponse(TypedDict, total=False): + ServerCertificate: ServerCertificate + + +class GetServiceLastAccessedDetailsRequest(ServiceRequest): + JobId: jobIDType + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + + +class TrackedActionLastAccessed(TypedDict, total=False): + ActionName: Optional[stringType] + LastAccessedEntity: Optional[arnType] + LastAccessedTime: Optional[dateType] + LastAccessedRegion: Optional[stringType] + + +TrackedActionsLastAccessed = List[TrackedActionLastAccessed] + + +class ServiceLastAccessed(TypedDict, total=False): + ServiceName: serviceNameType + LastAuthenticated: Optional[dateType] + ServiceNamespace: serviceNamespaceType + LastAuthenticatedEntity: Optional[arnType] + LastAuthenticatedRegion: Optional[stringType] + TotalAuthenticatedEntities: Optional[integerType] + TrackedActionsLastAccessed: Optional[TrackedActionsLastAccessed] + + +ServicesLastAccessed = List[ServiceLastAccessed] + + +class GetServiceLastAccessedDetailsResponse(TypedDict, total=False): + JobStatus: jobStatusType + JobType: Optional[AccessAdvisorUsageGranularityType] + JobCreationDate: dateType + ServicesLastAccessed: ServicesLastAccessed + JobCompletionDate: dateType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + Error: Optional[ErrorDetails] + + +class GetServiceLastAccessedDetailsWithEntitiesRequest(ServiceRequest): + JobId: jobIDType + ServiceNamespace: serviceNamespaceType + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + + +entityDetailsListType = List[EntityDetails] + + +class GetServiceLastAccessedDetailsWithEntitiesResponse(TypedDict, total=False): + JobStatus: jobStatusType + JobCreationDate: dateType + JobCompletionDate: dateType + EntityDetailsList: entityDetailsListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + Error: Optional[ErrorDetails] + + +class GetServiceLinkedRoleDeletionStatusRequest(ServiceRequest): + DeletionTaskId: DeletionTaskIdType + + +class GetServiceLinkedRoleDeletionStatusResponse(TypedDict, total=False): + Status: DeletionTaskStatusType + Reason: Optional[DeletionTaskFailureReasonType] + + +class GetUserPolicyRequest(ServiceRequest): + UserName: existingUserNameType + PolicyName: policyNameType + + +class GetUserPolicyResponse(TypedDict, total=False): + UserName: existingUserNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class GetUserRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + + +class GetUserResponse(TypedDict, total=False): + User: User + + +class ListAccessKeysRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +accessKeyMetadataListType = List[AccessKeyMetadata] + + +class ListAccessKeysResponse(TypedDict, total=False): + AccessKeyMetadata: accessKeyMetadataListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListAccountAliasesRequest(ServiceRequest): + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +accountAliasListType = List[accountAliasType] + + +class ListAccountAliasesResponse(TypedDict, total=False): + AccountAliases: accountAliasListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListAttachedGroupPoliciesRequest(ServiceRequest): + GroupName: groupNameType + PathPrefix: Optional[policyPathType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListAttachedGroupPoliciesResponse(TypedDict, total=False): + AttachedPolicies: Optional[attachedPoliciesListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListAttachedRolePoliciesRequest(ServiceRequest): + RoleName: roleNameType + PathPrefix: Optional[policyPathType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListAttachedRolePoliciesResponse(TypedDict, total=False): + AttachedPolicies: Optional[attachedPoliciesListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListAttachedUserPoliciesRequest(ServiceRequest): + UserName: userNameType + PathPrefix: Optional[policyPathType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListAttachedUserPoliciesResponse(TypedDict, total=False): + AttachedPolicies: Optional[attachedPoliciesListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListEntitiesForPolicyRequest(ServiceRequest): + PolicyArn: arnType + EntityFilter: Optional[EntityType] + PathPrefix: Optional[pathType] + PolicyUsageFilter: Optional[PolicyUsageType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class PolicyRole(TypedDict, total=False): + RoleName: Optional[roleNameType] + RoleId: Optional[idType] + + +PolicyRoleListType = List[PolicyRole] + + +class PolicyUser(TypedDict, total=False): + UserName: Optional[userNameType] + UserId: Optional[idType] + + +PolicyUserListType = List[PolicyUser] + + +class PolicyGroup(TypedDict, total=False): + GroupName: Optional[groupNameType] + GroupId: Optional[idType] + + +PolicyGroupListType = List[PolicyGroup] + + +class ListEntitiesForPolicyResponse(TypedDict, total=False): + PolicyGroups: Optional[PolicyGroupListType] + PolicyUsers: Optional[PolicyUserListType] + PolicyRoles: Optional[PolicyRoleListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListGroupPoliciesRequest(ServiceRequest): + GroupName: groupNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +policyNameListType = List[policyNameType] + + +class ListGroupPoliciesResponse(TypedDict, total=False): + PolicyNames: policyNameListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListGroupsForUserRequest(ServiceRequest): + UserName: existingUserNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +groupListType = List[Group] + + +class ListGroupsForUserResponse(TypedDict, total=False): + Groups: groupListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListGroupsRequest(ServiceRequest): + PathPrefix: Optional[pathPrefixType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListGroupsResponse(TypedDict, total=False): + Groups: groupListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListInstanceProfileTagsRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListInstanceProfileTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListInstanceProfilesForRoleRequest(ServiceRequest): + RoleName: roleNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListInstanceProfilesForRoleResponse(TypedDict, total=False): + InstanceProfiles: instanceProfileListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListInstanceProfilesRequest(ServiceRequest): + PathPrefix: Optional[pathPrefixType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListInstanceProfilesResponse(TypedDict, total=False): + InstanceProfiles: instanceProfileListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListMFADeviceTagsRequest(ServiceRequest): + SerialNumber: serialNumberType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListMFADeviceTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListMFADevicesRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class MFADevice(TypedDict, total=False): + UserName: userNameType + SerialNumber: serialNumberType + EnableDate: dateType + + +mfaDeviceListType = List[MFADevice] + + +class ListMFADevicesResponse(TypedDict, total=False): + MFADevices: mfaDeviceListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListOpenIDConnectProviderTagsRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListOpenIDConnectProviderTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListOpenIDConnectProvidersRequest(ServiceRequest): + pass + + +class OpenIDConnectProviderListEntry(TypedDict, total=False): + Arn: Optional[arnType] + + +OpenIDConnectProviderListType = List[OpenIDConnectProviderListEntry] + + +class ListOpenIDConnectProvidersResponse(TypedDict, total=False): + OpenIDConnectProviderList: Optional[OpenIDConnectProviderListType] + + +class ListOrganizationsFeaturesRequest(ServiceRequest): + pass + + +class ListOrganizationsFeaturesResponse(TypedDict, total=False): + OrganizationId: Optional[OrganizationIdType] + EnabledFeatures: Optional[FeaturesListType] + + +class PolicyGrantingServiceAccess(TypedDict, total=False): + PolicyName: policyNameType + PolicyType: policyType + PolicyArn: Optional[arnType] + EntityType: Optional[policyOwnerEntityType] + EntityName: Optional[entityNameType] + + +policyGrantingServiceAccessListType = List[PolicyGrantingServiceAccess] + + +class ListPoliciesGrantingServiceAccessEntry(TypedDict, total=False): + ServiceNamespace: Optional[serviceNamespaceType] + Policies: Optional[policyGrantingServiceAccessListType] + + +serviceNamespaceListType = List[serviceNamespaceType] + + +class ListPoliciesGrantingServiceAccessRequest(ServiceRequest): + Marker: Optional[markerType] + Arn: arnType + ServiceNamespaces: serviceNamespaceListType + + +listPolicyGrantingServiceAccessResponseListType = List[ListPoliciesGrantingServiceAccessEntry] + + +class ListPoliciesGrantingServiceAccessResponse(TypedDict, total=False): + PoliciesGrantingServiceAccess: listPolicyGrantingServiceAccessResponseListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListPoliciesRequest(ServiceRequest): + Scope: Optional[policyScopeType] + OnlyAttached: Optional[booleanType] + PathPrefix: Optional[policyPathType] + PolicyUsageFilter: Optional[PolicyUsageType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +policyListType = List[Policy] + + +class ListPoliciesResponse(TypedDict, total=False): + Policies: Optional[policyListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListPolicyTagsRequest(ServiceRequest): + PolicyArn: arnType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListPolicyTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListPolicyVersionsRequest(ServiceRequest): + PolicyArn: arnType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListPolicyVersionsResponse(TypedDict, total=False): + Versions: Optional[policyDocumentVersionListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListRolePoliciesRequest(ServiceRequest): + RoleName: roleNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListRolePoliciesResponse(TypedDict, total=False): + PolicyNames: policyNameListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListRoleTagsRequest(ServiceRequest): + RoleName: roleNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListRoleTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListRolesRequest(ServiceRequest): + PathPrefix: Optional[pathPrefixType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListRolesResponse(TypedDict, total=False): + Roles: roleListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListSAMLProviderTagsRequest(ServiceRequest): + SAMLProviderArn: arnType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListSAMLProviderTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListSAMLProvidersRequest(ServiceRequest): + pass + + +class SAMLProviderListEntry(TypedDict, total=False): + Arn: Optional[arnType] + ValidUntil: Optional[dateType] + CreateDate: Optional[dateType] + + +SAMLProviderListType = List[SAMLProviderListEntry] + + +class ListSAMLProvidersResponse(TypedDict, total=False): + SAMLProviderList: Optional[SAMLProviderListType] + + +class ListSSHPublicKeysRequest(ServiceRequest): + UserName: Optional[userNameType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class SSHPublicKeyMetadata(TypedDict, total=False): + UserName: userNameType + SSHPublicKeyId: publicKeyIdType + Status: statusType + UploadDate: dateType + + +SSHPublicKeyListType = List[SSHPublicKeyMetadata] + + +class ListSSHPublicKeysResponse(TypedDict, total=False): + SSHPublicKeys: Optional[SSHPublicKeyListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListServerCertificateTagsRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListServerCertificateTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListServerCertificatesRequest(ServiceRequest): + PathPrefix: Optional[pathPrefixType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +serverCertificateMetadataListType = List[ServerCertificateMetadata] + + +class ListServerCertificatesResponse(TypedDict, total=False): + ServerCertificateMetadataList: serverCertificateMetadataListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListServiceSpecificCredentialsRequest(ServiceRequest): + UserName: Optional[userNameType] + ServiceName: Optional[serviceName] + + +class ServiceSpecificCredentialMetadata(TypedDict, total=False): + UserName: userNameType + Status: statusType + ServiceUserName: serviceUserName + CreateDate: dateType + ServiceSpecificCredentialId: serviceSpecificCredentialId + ServiceName: serviceName + + +ServiceSpecificCredentialsListType = List[ServiceSpecificCredentialMetadata] + + +class ListServiceSpecificCredentialsResponse(TypedDict, total=False): + ServiceSpecificCredentials: Optional[ServiceSpecificCredentialsListType] + + +class ListSigningCertificatesRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class SigningCertificate(TypedDict, total=False): + UserName: userNameType + CertificateId: certificateIdType + CertificateBody: certificateBodyType + Status: statusType + UploadDate: Optional[dateType] + + +certificateListType = List[SigningCertificate] + + +class ListSigningCertificatesResponse(TypedDict, total=False): + Certificates: certificateListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListUserPoliciesRequest(ServiceRequest): + UserName: existingUserNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListUserPoliciesResponse(TypedDict, total=False): + PolicyNames: policyNameListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListUserTagsRequest(ServiceRequest): + UserName: existingUserNameType + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListUserTagsResponse(TypedDict, total=False): + Tags: tagListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListUsersRequest(ServiceRequest): + PathPrefix: Optional[pathPrefixType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +class ListUsersResponse(TypedDict, total=False): + Users: userListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class ListVirtualMFADevicesRequest(ServiceRequest): + AssignmentStatus: Optional[assignmentStatusType] + Marker: Optional[markerType] + MaxItems: Optional[maxItemsType] + + +virtualMFADeviceListType = List[VirtualMFADevice] + + +class ListVirtualMFADevicesResponse(TypedDict, total=False): + VirtualMFADevices: virtualMFADeviceListType + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class PutGroupPolicyRequest(ServiceRequest): + GroupName: groupNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class PutRolePermissionsBoundaryRequest(ServiceRequest): + RoleName: roleNameType + PermissionsBoundary: arnType + + +class PutRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class PutUserPermissionsBoundaryRequest(ServiceRequest): + UserName: userNameType + PermissionsBoundary: arnType + + +class PutUserPolicyRequest(ServiceRequest): + UserName: existingUserNameType + PolicyName: policyNameType + PolicyDocument: policyDocumentType + + +class RemoveClientIDFromOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + ClientID: clientIDType + + +class RemoveRoleFromInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + RoleName: roleNameType + + +class RemoveUserFromGroupRequest(ServiceRequest): + GroupName: groupNameType + UserName: existingUserNameType + + +class ResetServiceSpecificCredentialRequest(ServiceRequest): + UserName: Optional[userNameType] + ServiceSpecificCredentialId: serviceSpecificCredentialId + + +class ResetServiceSpecificCredentialResponse(TypedDict, total=False): + ServiceSpecificCredential: Optional[ServiceSpecificCredential] + + +ResourceNameListType = List[ResourceNameType] + + +class ResyncMFADeviceRequest(ServiceRequest): + UserName: existingUserNameType + SerialNumber: serialNumberType + AuthenticationCode1: authenticationCodeType + AuthenticationCode2: authenticationCodeType + + +class SetDefaultPolicyVersionRequest(ServiceRequest): + PolicyArn: arnType + VersionId: policyVersionIdType + + +class SetSecurityTokenServicePreferencesRequest(ServiceRequest): + GlobalEndpointTokenVersion: globalEndpointTokenVersion + + +class SimulateCustomPolicyRequest(ServiceRequest): + PolicyInputList: SimulationPolicyListType + PermissionsBoundaryPolicyInputList: Optional[SimulationPolicyListType] + ActionNames: ActionNameListType + ResourceArns: Optional[ResourceNameListType] + ResourcePolicy: Optional[policyDocumentType] + ResourceOwner: Optional[ResourceNameType] + CallerArn: Optional[ResourceNameType] + ContextEntries: Optional[ContextEntryListType] + ResourceHandlingOption: Optional[ResourceHandlingOptionType] + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + + +class SimulatePolicyResponse(TypedDict, total=False): + EvaluationResults: Optional[EvaluationResultsListType] + IsTruncated: Optional[booleanType] + Marker: Optional[responseMarkerType] + + +class SimulatePrincipalPolicyRequest(ServiceRequest): + PolicySourceArn: arnType + PolicyInputList: Optional[SimulationPolicyListType] + PermissionsBoundaryPolicyInputList: Optional[SimulationPolicyListType] + ActionNames: ActionNameListType + ResourceArns: Optional[ResourceNameListType] + ResourcePolicy: Optional[policyDocumentType] + ResourceOwner: Optional[ResourceNameType] + CallerArn: Optional[ResourceNameType] + ContextEntries: Optional[ContextEntryListType] + ResourceHandlingOption: Optional[ResourceHandlingOptionType] + MaxItems: Optional[maxItemsType] + Marker: Optional[markerType] + + +class TagInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + Tags: tagListType + + +class TagMFADeviceRequest(ServiceRequest): + SerialNumber: serialNumberType + Tags: tagListType + + +class TagOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + Tags: tagListType + + +class TagPolicyRequest(ServiceRequest): + PolicyArn: arnType + Tags: tagListType + + +class TagRoleRequest(ServiceRequest): + RoleName: roleNameType + Tags: tagListType + + +class TagSAMLProviderRequest(ServiceRequest): + SAMLProviderArn: arnType + Tags: tagListType + + +class TagServerCertificateRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + Tags: tagListType + + +class TagUserRequest(ServiceRequest): + UserName: existingUserNameType + Tags: tagListType + + +tagKeyListType = List[tagKeyType] + + +class UntagInstanceProfileRequest(ServiceRequest): + InstanceProfileName: instanceProfileNameType + TagKeys: tagKeyListType + + +class UntagMFADeviceRequest(ServiceRequest): + SerialNumber: serialNumberType + TagKeys: tagKeyListType + + +class UntagOpenIDConnectProviderRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + TagKeys: tagKeyListType + + +class UntagPolicyRequest(ServiceRequest): + PolicyArn: arnType + TagKeys: tagKeyListType + + +class UntagRoleRequest(ServiceRequest): + RoleName: roleNameType + TagKeys: tagKeyListType + + +class UntagSAMLProviderRequest(ServiceRequest): + SAMLProviderArn: arnType + TagKeys: tagKeyListType + + +class UntagServerCertificateRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + TagKeys: tagKeyListType + + +class UntagUserRequest(ServiceRequest): + UserName: existingUserNameType + TagKeys: tagKeyListType + + +class UpdateAccessKeyRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + AccessKeyId: accessKeyIdType + Status: statusType + + +class UpdateAccountPasswordPolicyRequest(ServiceRequest): + MinimumPasswordLength: Optional[minimumPasswordLengthType] + RequireSymbols: Optional[booleanType] + RequireNumbers: Optional[booleanType] + RequireUppercaseCharacters: Optional[booleanType] + RequireLowercaseCharacters: Optional[booleanType] + AllowUsersToChangePassword: Optional[booleanType] + MaxPasswordAge: Optional[maxPasswordAgeType] + PasswordReusePrevention: Optional[passwordReusePreventionType] + HardExpiry: Optional[booleanObjectType] + + +class UpdateAssumeRolePolicyRequest(ServiceRequest): + RoleName: roleNameType + PolicyDocument: policyDocumentType + + +class UpdateGroupRequest(ServiceRequest): + GroupName: groupNameType + NewPath: Optional[pathType] + NewGroupName: Optional[groupNameType] + + +class UpdateLoginProfileRequest(ServiceRequest): + UserName: userNameType + Password: Optional[passwordType] + PasswordResetRequired: Optional[booleanObjectType] + + +class UpdateOpenIDConnectProviderThumbprintRequest(ServiceRequest): + OpenIDConnectProviderArn: arnType + ThumbprintList: thumbprintListType + + +class UpdateRoleDescriptionRequest(ServiceRequest): + RoleName: roleNameType + Description: roleDescriptionType + + +class UpdateRoleDescriptionResponse(TypedDict, total=False): + Role: Optional[Role] + + +class UpdateRoleRequest(ServiceRequest): + RoleName: roleNameType + Description: Optional[roleDescriptionType] + MaxSessionDuration: Optional[roleMaxSessionDurationType] + + +class UpdateRoleResponse(TypedDict, total=False): + pass + + +class UpdateSAMLProviderRequest(ServiceRequest): + SAMLMetadataDocument: Optional[SAMLMetadataDocumentType] + SAMLProviderArn: arnType + AssertionEncryptionMode: Optional[assertionEncryptionModeType] + AddPrivateKey: Optional[privateKeyType] + RemovePrivateKey: Optional[privateKeyIdType] + + +class UpdateSAMLProviderResponse(TypedDict, total=False): + SAMLProviderArn: Optional[arnType] + + +class UpdateSSHPublicKeyRequest(ServiceRequest): + UserName: userNameType + SSHPublicKeyId: publicKeyIdType + Status: statusType + + +class UpdateServerCertificateRequest(ServiceRequest): + ServerCertificateName: serverCertificateNameType + NewPath: Optional[pathType] + NewServerCertificateName: Optional[serverCertificateNameType] + + +class UpdateServiceSpecificCredentialRequest(ServiceRequest): + UserName: Optional[userNameType] + ServiceSpecificCredentialId: serviceSpecificCredentialId + Status: statusType + + +class UpdateSigningCertificateRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + CertificateId: certificateIdType + Status: statusType + + +class UpdateUserRequest(ServiceRequest): + UserName: existingUserNameType + NewPath: Optional[pathType] + NewUserName: Optional[userNameType] + + +class UploadSSHPublicKeyRequest(ServiceRequest): + UserName: userNameType + SSHPublicKeyBody: publicKeyMaterialType + + +class UploadSSHPublicKeyResponse(TypedDict, total=False): + SSHPublicKey: Optional[SSHPublicKey] + + +class UploadServerCertificateRequest(ServiceRequest): + Path: Optional[pathType] + ServerCertificateName: serverCertificateNameType + CertificateBody: certificateBodyType + PrivateKey: privateKeyType + CertificateChain: Optional[certificateChainType] + Tags: Optional[tagListType] + + +class UploadServerCertificateResponse(TypedDict, total=False): + ServerCertificateMetadata: Optional[ServerCertificateMetadata] + Tags: Optional[tagListType] + + +class UploadSigningCertificateRequest(ServiceRequest): + UserName: Optional[existingUserNameType] + CertificateBody: certificateBodyType + + +class UploadSigningCertificateResponse(TypedDict, total=False): + Certificate: SigningCertificate + + +class IamApi: + service = "iam" + version = "2010-05-08" + + @handler("AddClientIDToOpenIDConnectProvider") + def add_client_id_to_open_id_connect_provider( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + client_id: clientIDType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("AddRoleToInstanceProfile") + def add_role_to_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + role_name: roleNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("AddUserToGroup") + def add_user_to_group( + self, + context: RequestContext, + group_name: groupNameType, + user_name: existingUserNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("AttachGroupPolicy") + def attach_group_policy( + self, context: RequestContext, group_name: groupNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("AttachRolePolicy") + def attach_role_policy( + self, context: RequestContext, role_name: roleNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("AttachUserPolicy") + def attach_user_policy( + self, context: RequestContext, user_name: userNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("ChangePassword") + def change_password( + self, + context: RequestContext, + old_password: passwordType, + new_password: passwordType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateAccessKey") + def create_access_key( + self, context: RequestContext, user_name: existingUserNameType | None = None, **kwargs + ) -> CreateAccessKeyResponse: + raise NotImplementedError + + @handler("CreateAccountAlias") + def create_account_alias( + self, context: RequestContext, account_alias: accountAliasType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CreateGroup") + def create_group( + self, + context: RequestContext, + group_name: groupNameType, + path: pathType | None = None, + **kwargs, + ) -> CreateGroupResponse: + raise NotImplementedError + + @handler("CreateInstanceProfile") + def create_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + path: pathType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreateInstanceProfileResponse: + raise NotImplementedError + + @handler("CreateLoginProfile") + def create_login_profile( + self, + context: RequestContext, + user_name: userNameType | None = None, + password: passwordType | None = None, + password_reset_required: booleanType | None = None, + **kwargs, + ) -> CreateLoginProfileResponse: + raise NotImplementedError + + @handler("CreateOpenIDConnectProvider") + def create_open_id_connect_provider( + self, + context: RequestContext, + url: OpenIDConnectProviderUrlType, + client_id_list: clientIDListType | None = None, + thumbprint_list: thumbprintListType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreateOpenIDConnectProviderResponse: + raise NotImplementedError + + @handler("CreatePolicy") + def create_policy( + self, + context: RequestContext, + policy_name: policyNameType, + policy_document: policyDocumentType, + path: policyPathType | None = None, + description: policyDescriptionType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreatePolicyResponse: + raise NotImplementedError + + @handler("CreatePolicyVersion") + def create_policy_version( + self, + context: RequestContext, + policy_arn: arnType, + policy_document: policyDocumentType, + set_as_default: booleanType | None = None, + **kwargs, + ) -> CreatePolicyVersionResponse: + raise NotImplementedError + + @handler("CreateRole") + def create_role( + self, + context: RequestContext, + role_name: roleNameType, + assume_role_policy_document: policyDocumentType, + path: pathType | None = None, + description: roleDescriptionType | None = None, + max_session_duration: roleMaxSessionDurationType | None = None, + permissions_boundary: arnType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreateRoleResponse: + raise NotImplementedError + + @handler("CreateSAMLProvider") + def create_saml_provider( + self, + context: RequestContext, + saml_metadata_document: SAMLMetadataDocumentType, + name: SAMLProviderNameType, + tags: tagListType | None = None, + assertion_encryption_mode: assertionEncryptionModeType | None = None, + add_private_key: privateKeyType | None = None, + **kwargs, + ) -> CreateSAMLProviderResponse: + raise NotImplementedError + + @handler("CreateServiceLinkedRole") + def create_service_linked_role( + self, + context: RequestContext, + aws_service_name: groupNameType, + description: roleDescriptionType | None = None, + custom_suffix: customSuffixType | None = None, + **kwargs, + ) -> CreateServiceLinkedRoleResponse: + raise NotImplementedError + + @handler("CreateServiceSpecificCredential") + def create_service_specific_credential( + self, context: RequestContext, user_name: userNameType, service_name: serviceName, **kwargs + ) -> CreateServiceSpecificCredentialResponse: + raise NotImplementedError + + @handler("CreateUser") + def create_user( + self, + context: RequestContext, + user_name: userNameType, + path: pathType | None = None, + permissions_boundary: arnType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreateUserResponse: + raise NotImplementedError + + @handler("CreateVirtualMFADevice") + def create_virtual_mfa_device( + self, + context: RequestContext, + virtual_mfa_device_name: virtualMFADeviceName, + path: pathType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> CreateVirtualMFADeviceResponse: + raise NotImplementedError + + @handler("DeactivateMFADevice") + def deactivate_mfa_device( + self, + context: RequestContext, + serial_number: serialNumberType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessKey") + def delete_access_key( + self, + context: RequestContext, + access_key_id: accessKeyIdType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccountAlias") + def delete_account_alias( + self, context: RequestContext, account_alias: accountAliasType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteAccountPasswordPolicy") + def delete_account_password_policy(self, context: RequestContext, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteGroup") + def delete_group(self, context: RequestContext, group_name: groupNameType, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteGroupPolicy") + def delete_group_policy( + self, + context: RequestContext, + group_name: groupNameType, + policy_name: policyNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteInstanceProfile") + def delete_instance_profile( + self, context: RequestContext, instance_profile_name: instanceProfileNameType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteLoginProfile") + def delete_login_profile( + self, context: RequestContext, user_name: userNameType | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteOpenIDConnectProvider") + def delete_open_id_connect_provider( + self, context: RequestContext, open_id_connect_provider_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeletePolicy") + def delete_policy(self, context: RequestContext, policy_arn: arnType, **kwargs) -> None: + raise NotImplementedError + + @handler("DeletePolicyVersion") + def delete_policy_version( + self, + context: RequestContext, + policy_arn: arnType, + version_id: policyVersionIdType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteRole") + def delete_role(self, context: RequestContext, role_name: roleNameType, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteRolePermissionsBoundary") + def delete_role_permissions_boundary( + self, context: RequestContext, role_name: roleNameType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteRolePolicy") + def delete_role_policy( + self, + context: RequestContext, + role_name: roleNameType, + policy_name: policyNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteSAMLProvider") + def delete_saml_provider( + self, context: RequestContext, saml_provider_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSSHPublicKey") + def delete_ssh_public_key( + self, + context: RequestContext, + user_name: userNameType, + ssh_public_key_id: publicKeyIdType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteServerCertificate") + def delete_server_certificate( + self, context: RequestContext, server_certificate_name: serverCertificateNameType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteServiceLinkedRole") + def delete_service_linked_role( + self, context: RequestContext, role_name: roleNameType, **kwargs + ) -> DeleteServiceLinkedRoleResponse: + raise NotImplementedError + + @handler("DeleteServiceSpecificCredential") + def delete_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + user_name: userNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteSigningCertificate") + def delete_signing_certificate( + self, + context: RequestContext, + certificate_id: certificateIdType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteUser") + def delete_user( + self, context: RequestContext, user_name: existingUserNameType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteUserPermissionsBoundary") + def delete_user_permissions_boundary( + self, context: RequestContext, user_name: userNameType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteUserPolicy") + def delete_user_policy( + self, + context: RequestContext, + user_name: existingUserNameType, + policy_name: policyNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteVirtualMFADevice") + def delete_virtual_mfa_device( + self, context: RequestContext, serial_number: serialNumberType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DetachGroupPolicy") + def detach_group_policy( + self, context: RequestContext, group_name: groupNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DetachRolePolicy") + def detach_role_policy( + self, context: RequestContext, role_name: roleNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DetachUserPolicy") + def detach_user_policy( + self, context: RequestContext, user_name: userNameType, policy_arn: arnType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DisableOrganizationsRootCredentialsManagement") + def disable_organizations_root_credentials_management( + self, context: RequestContext, **kwargs + ) -> DisableOrganizationsRootCredentialsManagementResponse: + raise NotImplementedError + + @handler("DisableOrganizationsRootSessions") + def disable_organizations_root_sessions( + self, context: RequestContext, **kwargs + ) -> DisableOrganizationsRootSessionsResponse: + raise NotImplementedError + + @handler("EnableMFADevice") + def enable_mfa_device( + self, + context: RequestContext, + user_name: existingUserNameType, + serial_number: serialNumberType, + authentication_code1: authenticationCodeType, + authentication_code2: authenticationCodeType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("EnableOrganizationsRootCredentialsManagement") + def enable_organizations_root_credentials_management( + self, context: RequestContext, **kwargs + ) -> EnableOrganizationsRootCredentialsManagementResponse: + raise NotImplementedError + + @handler("EnableOrganizationsRootSessions") + def enable_organizations_root_sessions( + self, context: RequestContext, **kwargs + ) -> EnableOrganizationsRootSessionsResponse: + raise NotImplementedError + + @handler("GenerateCredentialReport") + def generate_credential_report( + self, context: RequestContext, **kwargs + ) -> GenerateCredentialReportResponse: + raise NotImplementedError + + @handler("GenerateOrganizationsAccessReport") + def generate_organizations_access_report( + self, + context: RequestContext, + entity_path: organizationsEntityPathType, + organizations_policy_id: organizationsPolicyIdType | None = None, + **kwargs, + ) -> GenerateOrganizationsAccessReportResponse: + raise NotImplementedError + + @handler("GenerateServiceLastAccessedDetails") + def generate_service_last_accessed_details( + self, + context: RequestContext, + arn: arnType, + granularity: AccessAdvisorUsageGranularityType | None = None, + **kwargs, + ) -> GenerateServiceLastAccessedDetailsResponse: + raise NotImplementedError + + @handler("GetAccessKeyLastUsed") + def get_access_key_last_used( + self, context: RequestContext, access_key_id: accessKeyIdType, **kwargs + ) -> GetAccessKeyLastUsedResponse: + raise NotImplementedError + + @handler("GetAccountAuthorizationDetails") + def get_account_authorization_details( + self, + context: RequestContext, + filter: entityListType | None = None, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + **kwargs, + ) -> GetAccountAuthorizationDetailsResponse: + raise NotImplementedError + + @handler("GetAccountPasswordPolicy") + def get_account_password_policy( + self, context: RequestContext, **kwargs + ) -> GetAccountPasswordPolicyResponse: + raise NotImplementedError + + @handler("GetAccountSummary") + def get_account_summary(self, context: RequestContext, **kwargs) -> GetAccountSummaryResponse: + raise NotImplementedError + + @handler("GetContextKeysForCustomPolicy") + def get_context_keys_for_custom_policy( + self, context: RequestContext, policy_input_list: SimulationPolicyListType, **kwargs + ) -> GetContextKeysForPolicyResponse: + raise NotImplementedError + + @handler("GetContextKeysForPrincipalPolicy") + def get_context_keys_for_principal_policy( + self, + context: RequestContext, + policy_source_arn: arnType, + policy_input_list: SimulationPolicyListType | None = None, + **kwargs, + ) -> GetContextKeysForPolicyResponse: + raise NotImplementedError + + @handler("GetCredentialReport") + def get_credential_report( + self, context: RequestContext, **kwargs + ) -> GetCredentialReportResponse: + raise NotImplementedError + + @handler("GetGroup") + def get_group( + self, + context: RequestContext, + group_name: groupNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> GetGroupResponse: + raise NotImplementedError + + @handler("GetGroupPolicy") + def get_group_policy( + self, + context: RequestContext, + group_name: groupNameType, + policy_name: policyNameType, + **kwargs, + ) -> GetGroupPolicyResponse: + raise NotImplementedError + + @handler("GetInstanceProfile") + def get_instance_profile( + self, context: RequestContext, instance_profile_name: instanceProfileNameType, **kwargs + ) -> GetInstanceProfileResponse: + raise NotImplementedError + + @handler("GetLoginProfile") + def get_login_profile( + self, context: RequestContext, user_name: userNameType | None = None, **kwargs + ) -> GetLoginProfileResponse: + raise NotImplementedError + + @handler("GetMFADevice") + def get_mfa_device( + self, + context: RequestContext, + serial_number: serialNumberType, + user_name: userNameType | None = None, + **kwargs, + ) -> GetMFADeviceResponse: + raise NotImplementedError + + @handler("GetOpenIDConnectProvider") + def get_open_id_connect_provider( + self, context: RequestContext, open_id_connect_provider_arn: arnType, **kwargs + ) -> GetOpenIDConnectProviderResponse: + raise NotImplementedError + + @handler("GetOrganizationsAccessReport") + def get_organizations_access_report( + self, + context: RequestContext, + job_id: jobIDType, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + sort_key: sortKeyType | None = None, + **kwargs, + ) -> GetOrganizationsAccessReportResponse: + raise NotImplementedError + + @handler("GetPolicy") + def get_policy( + self, context: RequestContext, policy_arn: arnType, **kwargs + ) -> GetPolicyResponse: + raise NotImplementedError + + @handler("GetPolicyVersion") + def get_policy_version( + self, + context: RequestContext, + policy_arn: arnType, + version_id: policyVersionIdType, + **kwargs, + ) -> GetPolicyVersionResponse: + raise NotImplementedError + + @handler("GetRole") + def get_role( + self, context: RequestContext, role_name: roleNameType, **kwargs + ) -> GetRoleResponse: + raise NotImplementedError + + @handler("GetRolePolicy") + def get_role_policy( + self, + context: RequestContext, + role_name: roleNameType, + policy_name: policyNameType, + **kwargs, + ) -> GetRolePolicyResponse: + raise NotImplementedError + + @handler("GetSAMLProvider") + def get_saml_provider( + self, context: RequestContext, saml_provider_arn: arnType, **kwargs + ) -> GetSAMLProviderResponse: + raise NotImplementedError + + @handler("GetSSHPublicKey") + def get_ssh_public_key( + self, + context: RequestContext, + user_name: userNameType, + ssh_public_key_id: publicKeyIdType, + encoding: encodingType, + **kwargs, + ) -> GetSSHPublicKeyResponse: + raise NotImplementedError + + @handler("GetServerCertificate") + def get_server_certificate( + self, context: RequestContext, server_certificate_name: serverCertificateNameType, **kwargs + ) -> GetServerCertificateResponse: + raise NotImplementedError + + @handler("GetServiceLastAccessedDetails") + def get_service_last_accessed_details( + self, + context: RequestContext, + job_id: jobIDType, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + **kwargs, + ) -> GetServiceLastAccessedDetailsResponse: + raise NotImplementedError + + @handler("GetServiceLastAccessedDetailsWithEntities") + def get_service_last_accessed_details_with_entities( + self, + context: RequestContext, + job_id: jobIDType, + service_namespace: serviceNamespaceType, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + **kwargs, + ) -> GetServiceLastAccessedDetailsWithEntitiesResponse: + raise NotImplementedError + + @handler("GetServiceLinkedRoleDeletionStatus") + def get_service_linked_role_deletion_status( + self, context: RequestContext, deletion_task_id: DeletionTaskIdType, **kwargs + ) -> GetServiceLinkedRoleDeletionStatusResponse: + raise NotImplementedError + + @handler("GetUser") + def get_user( + self, context: RequestContext, user_name: existingUserNameType | None = None, **kwargs + ) -> GetUserResponse: + raise NotImplementedError + + @handler("GetUserPolicy") + def get_user_policy( + self, + context: RequestContext, + user_name: existingUserNameType, + policy_name: policyNameType, + **kwargs, + ) -> GetUserPolicyResponse: + raise NotImplementedError + + @handler("ListAccessKeys") + def list_access_keys( + self, + context: RequestContext, + user_name: existingUserNameType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListAccessKeysResponse: + raise NotImplementedError + + @handler("ListAccountAliases") + def list_account_aliases( + self, + context: RequestContext, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListAccountAliasesResponse: + raise NotImplementedError + + @handler("ListAttachedGroupPolicies") + def list_attached_group_policies( + self, + context: RequestContext, + group_name: groupNameType, + path_prefix: policyPathType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListAttachedGroupPoliciesResponse: + raise NotImplementedError + + @handler("ListAttachedRolePolicies") + def list_attached_role_policies( + self, + context: RequestContext, + role_name: roleNameType, + path_prefix: policyPathType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListAttachedRolePoliciesResponse: + raise NotImplementedError + + @handler("ListAttachedUserPolicies") + def list_attached_user_policies( + self, + context: RequestContext, + user_name: userNameType, + path_prefix: policyPathType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListAttachedUserPoliciesResponse: + raise NotImplementedError + + @handler("ListEntitiesForPolicy") + def list_entities_for_policy( + self, + context: RequestContext, + policy_arn: arnType, + entity_filter: EntityType | None = None, + path_prefix: pathType | None = None, + policy_usage_filter: PolicyUsageType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListEntitiesForPolicyResponse: + raise NotImplementedError + + @handler("ListGroupPolicies") + def list_group_policies( + self, + context: RequestContext, + group_name: groupNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListGroupPoliciesResponse: + raise NotImplementedError + + @handler("ListGroups") + def list_groups( + self, + context: RequestContext, + path_prefix: pathPrefixType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListGroupsResponse: + raise NotImplementedError + + @handler("ListGroupsForUser") + def list_groups_for_user( + self, + context: RequestContext, + user_name: existingUserNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListGroupsForUserResponse: + raise NotImplementedError + + @handler("ListInstanceProfileTags") + def list_instance_profile_tags( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListInstanceProfileTagsResponse: + raise NotImplementedError + + @handler("ListInstanceProfiles") + def list_instance_profiles( + self, + context: RequestContext, + path_prefix: pathPrefixType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListInstanceProfilesResponse: + raise NotImplementedError + + @handler("ListInstanceProfilesForRole") + def list_instance_profiles_for_role( + self, + context: RequestContext, + role_name: roleNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListInstanceProfilesForRoleResponse: + raise NotImplementedError + + @handler("ListMFADeviceTags") + def list_mfa_device_tags( + self, + context: RequestContext, + serial_number: serialNumberType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListMFADeviceTagsResponse: + raise NotImplementedError + + @handler("ListMFADevices") + def list_mfa_devices( + self, + context: RequestContext, + user_name: existingUserNameType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListMFADevicesResponse: + raise NotImplementedError + + @handler("ListOpenIDConnectProviderTags") + def list_open_id_connect_provider_tags( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListOpenIDConnectProviderTagsResponse: + raise NotImplementedError + + @handler("ListOpenIDConnectProviders") + def list_open_id_connect_providers( + self, context: RequestContext, **kwargs + ) -> ListOpenIDConnectProvidersResponse: + raise NotImplementedError + + @handler("ListOrganizationsFeatures") + def list_organizations_features( + self, context: RequestContext, **kwargs + ) -> ListOrganizationsFeaturesResponse: + raise NotImplementedError + + @handler("ListPolicies") + def list_policies( + self, + context: RequestContext, + scope: policyScopeType | None = None, + only_attached: booleanType | None = None, + path_prefix: policyPathType | None = None, + policy_usage_filter: PolicyUsageType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListPoliciesResponse: + raise NotImplementedError + + @handler("ListPoliciesGrantingServiceAccess") + def list_policies_granting_service_access( + self, + context: RequestContext, + arn: arnType, + service_namespaces: serviceNamespaceListType, + marker: markerType | None = None, + **kwargs, + ) -> ListPoliciesGrantingServiceAccessResponse: + raise NotImplementedError + + @handler("ListPolicyTags") + def list_policy_tags( + self, + context: RequestContext, + policy_arn: arnType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListPolicyTagsResponse: + raise NotImplementedError + + @handler("ListPolicyVersions") + def list_policy_versions( + self, + context: RequestContext, + policy_arn: arnType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListPolicyVersionsResponse: + raise NotImplementedError + + @handler("ListRolePolicies") + def list_role_policies( + self, + context: RequestContext, + role_name: roleNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListRolePoliciesResponse: + raise NotImplementedError + + @handler("ListRoleTags") + def list_role_tags( + self, + context: RequestContext, + role_name: roleNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListRoleTagsResponse: + raise NotImplementedError + + @handler("ListRoles") + def list_roles( + self, + context: RequestContext, + path_prefix: pathPrefixType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListRolesResponse: + raise NotImplementedError + + @handler("ListSAMLProviderTags") + def list_saml_provider_tags( + self, + context: RequestContext, + saml_provider_arn: arnType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListSAMLProviderTagsResponse: + raise NotImplementedError + + @handler("ListSAMLProviders") + def list_saml_providers(self, context: RequestContext, **kwargs) -> ListSAMLProvidersResponse: + raise NotImplementedError + + @handler("ListSSHPublicKeys") + def list_ssh_public_keys( + self, + context: RequestContext, + user_name: userNameType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListSSHPublicKeysResponse: + raise NotImplementedError + + @handler("ListServerCertificateTags") + def list_server_certificate_tags( + self, + context: RequestContext, + server_certificate_name: serverCertificateNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListServerCertificateTagsResponse: + raise NotImplementedError + + @handler("ListServerCertificates") + def list_server_certificates( + self, + context: RequestContext, + path_prefix: pathPrefixType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListServerCertificatesResponse: + raise NotImplementedError + + @handler("ListServiceSpecificCredentials") + def list_service_specific_credentials( + self, + context: RequestContext, + user_name: userNameType | None = None, + service_name: serviceName | None = None, + **kwargs, + ) -> ListServiceSpecificCredentialsResponse: + raise NotImplementedError + + @handler("ListSigningCertificates") + def list_signing_certificates( + self, + context: RequestContext, + user_name: existingUserNameType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListSigningCertificatesResponse: + raise NotImplementedError + + @handler("ListUserPolicies") + def list_user_policies( + self, + context: RequestContext, + user_name: existingUserNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListUserPoliciesResponse: + raise NotImplementedError + + @handler("ListUserTags") + def list_user_tags( + self, + context: RequestContext, + user_name: existingUserNameType, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListUserTagsResponse: + raise NotImplementedError + + @handler("ListUsers") + def list_users( + self, + context: RequestContext, + path_prefix: pathPrefixType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListUsersResponse: + raise NotImplementedError + + @handler("ListVirtualMFADevices") + def list_virtual_mfa_devices( + self, + context: RequestContext, + assignment_status: assignmentStatusType | None = None, + marker: markerType | None = None, + max_items: maxItemsType | None = None, + **kwargs, + ) -> ListVirtualMFADevicesResponse: + raise NotImplementedError + + @handler("PutGroupPolicy") + def put_group_policy( + self, + context: RequestContext, + group_name: groupNameType, + policy_name: policyNameType, + policy_document: policyDocumentType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutRolePermissionsBoundary") + def put_role_permissions_boundary( + self, + context: RequestContext, + role_name: roleNameType, + permissions_boundary: arnType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutRolePolicy") + def put_role_policy( + self, + context: RequestContext, + role_name: roleNameType, + policy_name: policyNameType, + policy_document: policyDocumentType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutUserPermissionsBoundary") + def put_user_permissions_boundary( + self, + context: RequestContext, + user_name: userNameType, + permissions_boundary: arnType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutUserPolicy") + def put_user_policy( + self, + context: RequestContext, + user_name: existingUserNameType, + policy_name: policyNameType, + policy_document: policyDocumentType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemoveClientIDFromOpenIDConnectProvider") + def remove_client_id_from_open_id_connect_provider( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + client_id: clientIDType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemoveRoleFromInstanceProfile") + def remove_role_from_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + role_name: roleNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemoveUserFromGroup") + def remove_user_from_group( + self, + context: RequestContext, + group_name: groupNameType, + user_name: existingUserNameType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ResetServiceSpecificCredential") + def reset_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + user_name: userNameType | None = None, + **kwargs, + ) -> ResetServiceSpecificCredentialResponse: + raise NotImplementedError + + @handler("ResyncMFADevice") + def resync_mfa_device( + self, + context: RequestContext, + user_name: existingUserNameType, + serial_number: serialNumberType, + authentication_code1: authenticationCodeType, + authentication_code2: authenticationCodeType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SetDefaultPolicyVersion") + def set_default_policy_version( + self, + context: RequestContext, + policy_arn: arnType, + version_id: policyVersionIdType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SetSecurityTokenServicePreferences") + def set_security_token_service_preferences( + self, + context: RequestContext, + global_endpoint_token_version: globalEndpointTokenVersion, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SimulateCustomPolicy") + def simulate_custom_policy( + self, + context: RequestContext, + policy_input_list: SimulationPolicyListType, + action_names: ActionNameListType, + permissions_boundary_policy_input_list: SimulationPolicyListType | None = None, + resource_arns: ResourceNameListType | None = None, + resource_policy: policyDocumentType | None = None, + resource_owner: ResourceNameType | None = None, + caller_arn: ResourceNameType | None = None, + context_entries: ContextEntryListType | None = None, + resource_handling_option: ResourceHandlingOptionType | None = None, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + **kwargs, + ) -> SimulatePolicyResponse: + raise NotImplementedError + + @handler("SimulatePrincipalPolicy") + def simulate_principal_policy( + self, + context: RequestContext, + policy_source_arn: arnType, + action_names: ActionNameListType, + policy_input_list: SimulationPolicyListType | None = None, + permissions_boundary_policy_input_list: SimulationPolicyListType | None = None, + resource_arns: ResourceNameListType | None = None, + resource_policy: policyDocumentType | None = None, + resource_owner: ResourceNameType | None = None, + caller_arn: ResourceNameType | None = None, + context_entries: ContextEntryListType | None = None, + resource_handling_option: ResourceHandlingOptionType | None = None, + max_items: maxItemsType | None = None, + marker: markerType | None = None, + **kwargs, + ) -> SimulatePolicyResponse: + raise NotImplementedError + + @handler("TagInstanceProfile") + def tag_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + tags: tagListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("TagMFADevice") + def tag_mfa_device( + self, context: RequestContext, serial_number: serialNumberType, tags: tagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagOpenIDConnectProvider") + def tag_open_id_connect_provider( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + tags: tagListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("TagPolicy") + def tag_policy( + self, context: RequestContext, policy_arn: arnType, tags: tagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagRole") + def tag_role( + self, context: RequestContext, role_name: roleNameType, tags: tagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagSAMLProvider") + def tag_saml_provider( + self, context: RequestContext, saml_provider_arn: arnType, tags: tagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagServerCertificate") + def tag_server_certificate( + self, + context: RequestContext, + server_certificate_name: serverCertificateNameType, + tags: tagListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("TagUser") + def tag_user( + self, context: RequestContext, user_name: existingUserNameType, tags: tagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagInstanceProfile") + def untag_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UntagMFADevice") + def untag_mfa_device( + self, + context: RequestContext, + serial_number: serialNumberType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UntagOpenIDConnectProvider") + def untag_open_id_connect_provider( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UntagPolicy") + def untag_policy( + self, context: RequestContext, policy_arn: arnType, tag_keys: tagKeyListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagRole") + def untag_role( + self, context: RequestContext, role_name: roleNameType, tag_keys: tagKeyListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagSAMLProvider") + def untag_saml_provider( + self, + context: RequestContext, + saml_provider_arn: arnType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UntagServerCertificate") + def untag_server_certificate( + self, + context: RequestContext, + server_certificate_name: serverCertificateNameType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UntagUser") + def untag_user( + self, + context: RequestContext, + user_name: existingUserNameType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateAccessKey") + def update_access_key( + self, + context: RequestContext, + access_key_id: accessKeyIdType, + status: statusType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateAccountPasswordPolicy") + def update_account_password_policy( + self, + context: RequestContext, + minimum_password_length: minimumPasswordLengthType | None = None, + require_symbols: booleanType | None = None, + require_numbers: booleanType | None = None, + require_uppercase_characters: booleanType | None = None, + require_lowercase_characters: booleanType | None = None, + allow_users_to_change_password: booleanType | None = None, + max_password_age: maxPasswordAgeType | None = None, + password_reuse_prevention: passwordReusePreventionType | None = None, + hard_expiry: booleanObjectType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateAssumeRolePolicy") + def update_assume_role_policy( + self, + context: RequestContext, + role_name: roleNameType, + policy_document: policyDocumentType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateGroup") + def update_group( + self, + context: RequestContext, + group_name: groupNameType, + new_path: pathType | None = None, + new_group_name: groupNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateLoginProfile") + def update_login_profile( + self, + context: RequestContext, + user_name: userNameType, + password: passwordType | None = None, + password_reset_required: booleanObjectType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateOpenIDConnectProviderThumbprint") + def update_open_id_connect_provider_thumbprint( + self, + context: RequestContext, + open_id_connect_provider_arn: arnType, + thumbprint_list: thumbprintListType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateRole") + def update_role( + self, + context: RequestContext, + role_name: roleNameType, + description: roleDescriptionType | None = None, + max_session_duration: roleMaxSessionDurationType | None = None, + **kwargs, + ) -> UpdateRoleResponse: + raise NotImplementedError + + @handler("UpdateRoleDescription") + def update_role_description( + self, + context: RequestContext, + role_name: roleNameType, + description: roleDescriptionType, + **kwargs, + ) -> UpdateRoleDescriptionResponse: + raise NotImplementedError + + @handler("UpdateSAMLProvider") + def update_saml_provider( + self, + context: RequestContext, + saml_provider_arn: arnType, + saml_metadata_document: SAMLMetadataDocumentType | None = None, + assertion_encryption_mode: assertionEncryptionModeType | None = None, + add_private_key: privateKeyType | None = None, + remove_private_key: privateKeyIdType | None = None, + **kwargs, + ) -> UpdateSAMLProviderResponse: + raise NotImplementedError + + @handler("UpdateSSHPublicKey") + def update_ssh_public_key( + self, + context: RequestContext, + user_name: userNameType, + ssh_public_key_id: publicKeyIdType, + status: statusType, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateServerCertificate") + def update_server_certificate( + self, + context: RequestContext, + server_certificate_name: serverCertificateNameType, + new_path: pathType | None = None, + new_server_certificate_name: serverCertificateNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateServiceSpecificCredential") + def update_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + status: statusType, + user_name: userNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateSigningCertificate") + def update_signing_certificate( + self, + context: RequestContext, + certificate_id: certificateIdType, + status: statusType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateUser") + def update_user( + self, + context: RequestContext, + user_name: existingUserNameType, + new_path: pathType | None = None, + new_user_name: userNameType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UploadSSHPublicKey") + def upload_ssh_public_key( + self, + context: RequestContext, + user_name: userNameType, + ssh_public_key_body: publicKeyMaterialType, + **kwargs, + ) -> UploadSSHPublicKeyResponse: + raise NotImplementedError + + @handler("UploadServerCertificate") + def upload_server_certificate( + self, + context: RequestContext, + server_certificate_name: serverCertificateNameType, + certificate_body: certificateBodyType, + private_key: privateKeyType, + path: pathType | None = None, + certificate_chain: certificateChainType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> UploadServerCertificateResponse: + raise NotImplementedError + + @handler("UploadSigningCertificate") + def upload_signing_certificate( + self, + context: RequestContext, + certificate_body: certificateBodyType, + user_name: existingUserNameType | None = None, + **kwargs, + ) -> UploadSigningCertificateResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/kinesis/__init__.py b/localstack-core/localstack/aws/api/kinesis/__init__.py new file mode 100644 index 0000000000000..61f6f105fac9c --- /dev/null +++ b/localstack-core/localstack/aws/api/kinesis/__init__.py @@ -0,0 +1,1055 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, Iterator, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +BooleanObject = bool +ConsumerARN = str +ConsumerCountObject = int +ConsumerName = str +DescribeStreamInputLimit = int +ErrorCode = str +ErrorMessage = str +GetRecordsInputLimit = int +HashKey = str +KeyId = str +ListShardsInputLimit = int +ListStreamConsumersInputLimit = int +ListStreamsInputLimit = int +ListTagsForStreamInputLimit = int +NextToken = str +OnDemandStreamCountLimitObject = int +OnDemandStreamCountObject = int +PartitionKey = str +Policy = str +PositiveIntegerObject = int +ResourceARN = str +RetentionPeriodHours = int +SequenceNumber = str +ShardCountObject = int +ShardId = str +ShardIterator = str +StreamARN = str +StreamName = str +TagKey = str +TagValue = str + + +class ConsumerStatus(StrEnum): + CREATING = "CREATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + + +class EncryptionType(StrEnum): + NONE = "NONE" + KMS = "KMS" + + +class MetricsName(StrEnum): + IncomingBytes = "IncomingBytes" + IncomingRecords = "IncomingRecords" + OutgoingBytes = "OutgoingBytes" + OutgoingRecords = "OutgoingRecords" + WriteProvisionedThroughputExceeded = "WriteProvisionedThroughputExceeded" + ReadProvisionedThroughputExceeded = "ReadProvisionedThroughputExceeded" + IteratorAgeMilliseconds = "IteratorAgeMilliseconds" + ALL = "ALL" + + +class ScalingType(StrEnum): + UNIFORM_SCALING = "UNIFORM_SCALING" + + +class ShardFilterType(StrEnum): + AFTER_SHARD_ID = "AFTER_SHARD_ID" + AT_TRIM_HORIZON = "AT_TRIM_HORIZON" + FROM_TRIM_HORIZON = "FROM_TRIM_HORIZON" + AT_LATEST = "AT_LATEST" + AT_TIMESTAMP = "AT_TIMESTAMP" + FROM_TIMESTAMP = "FROM_TIMESTAMP" + + +class ShardIteratorType(StrEnum): + AT_SEQUENCE_NUMBER = "AT_SEQUENCE_NUMBER" + AFTER_SEQUENCE_NUMBER = "AFTER_SEQUENCE_NUMBER" + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + AT_TIMESTAMP = "AT_TIMESTAMP" + + +class StreamMode(StrEnum): + PROVISIONED = "PROVISIONED" + ON_DEMAND = "ON_DEMAND" + + +class StreamStatus(StrEnum): + CREATING = "CREATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + UPDATING = "UPDATING" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class ExpiredIteratorException(ServiceException): + code: str = "ExpiredIteratorException" + sender_fault: bool = False + status_code: int = 400 + + +class ExpiredNextTokenException(ServiceException): + code: str = "ExpiredNextTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalFailureException(ServiceException): + code: str = "InternalFailureException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArgumentException(ServiceException): + code: str = "InvalidArgumentException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSAccessDeniedException(ServiceException): + code: str = "KMSAccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSDisabledException(ServiceException): + code: str = "KMSDisabledException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSInvalidStateException(ServiceException): + code: str = "KMSInvalidStateException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSNotFoundException(ServiceException): + code: str = "KMSNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSOptInRequired(ServiceException): + code: str = "KMSOptInRequired" + sender_fault: bool = False + status_code: int = 400 + + +class KMSThrottlingException(ServiceException): + code: str = "KMSThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ProvisionedThroughputExceededException(ServiceException): + code: str = "ProvisionedThroughputExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +TagMap = Dict[TagKey, TagValue] + + +class AddTagsToStreamInput(ServiceRequest): + StreamName: Optional[StreamName] + Tags: TagMap + StreamARN: Optional[StreamARN] + + +class HashKeyRange(TypedDict, total=False): + StartingHashKey: HashKey + EndingHashKey: HashKey + + +ShardIdList = List[ShardId] + + +class ChildShard(TypedDict, total=False): + ShardId: ShardId + ParentShards: ShardIdList + HashKeyRange: HashKeyRange + + +ChildShardList = List[ChildShard] +Timestamp = datetime + + +class Consumer(TypedDict, total=False): + ConsumerName: ConsumerName + ConsumerARN: ConsumerARN + ConsumerStatus: ConsumerStatus + ConsumerCreationTimestamp: Timestamp + + +class ConsumerDescription(TypedDict, total=False): + ConsumerName: ConsumerName + ConsumerARN: ConsumerARN + ConsumerStatus: ConsumerStatus + ConsumerCreationTimestamp: Timestamp + StreamARN: StreamARN + + +ConsumerList = List[Consumer] + + +class StreamModeDetails(TypedDict, total=False): + StreamMode: StreamMode + + +class CreateStreamInput(ServiceRequest): + StreamName: StreamName + ShardCount: Optional[PositiveIntegerObject] + StreamModeDetails: Optional[StreamModeDetails] + Tags: Optional[TagMap] + + +Data = bytes + + +class DecreaseStreamRetentionPeriodInput(ServiceRequest): + StreamName: Optional[StreamName] + RetentionPeriodHours: RetentionPeriodHours + StreamARN: Optional[StreamARN] + + +class DeleteResourcePolicyInput(ServiceRequest): + ResourceARN: ResourceARN + + +class DeleteStreamInput(ServiceRequest): + StreamName: Optional[StreamName] + EnforceConsumerDeletion: Optional[BooleanObject] + StreamARN: Optional[StreamARN] + + +class DeregisterStreamConsumerInput(ServiceRequest): + StreamARN: Optional[StreamARN] + ConsumerName: Optional[ConsumerName] + ConsumerARN: Optional[ConsumerARN] + + +class DescribeLimitsInput(ServiceRequest): + pass + + +class DescribeLimitsOutput(TypedDict, total=False): + ShardLimit: ShardCountObject + OpenShardCount: ShardCountObject + OnDemandStreamCount: OnDemandStreamCountObject + OnDemandStreamCountLimit: OnDemandStreamCountLimitObject + + +class DescribeStreamConsumerInput(ServiceRequest): + StreamARN: Optional[StreamARN] + ConsumerName: Optional[ConsumerName] + ConsumerARN: Optional[ConsumerARN] + + +class DescribeStreamConsumerOutput(TypedDict, total=False): + ConsumerDescription: ConsumerDescription + + +class DescribeStreamInput(ServiceRequest): + StreamName: Optional[StreamName] + Limit: Optional[DescribeStreamInputLimit] + ExclusiveStartShardId: Optional[ShardId] + StreamARN: Optional[StreamARN] + + +MetricsNameList = List[MetricsName] + + +class EnhancedMetrics(TypedDict, total=False): + ShardLevelMetrics: Optional[MetricsNameList] + + +EnhancedMonitoringList = List[EnhancedMetrics] + + +class SequenceNumberRange(TypedDict, total=False): + StartingSequenceNumber: SequenceNumber + EndingSequenceNumber: Optional[SequenceNumber] + + +class Shard(TypedDict, total=False): + ShardId: ShardId + ParentShardId: Optional[ShardId] + AdjacentParentShardId: Optional[ShardId] + HashKeyRange: HashKeyRange + SequenceNumberRange: SequenceNumberRange + + +ShardList = List[Shard] + + +class StreamDescription(TypedDict, total=False): + StreamName: StreamName + StreamARN: StreamARN + StreamStatus: StreamStatus + StreamModeDetails: Optional[StreamModeDetails] + Shards: ShardList + HasMoreShards: BooleanObject + RetentionPeriodHours: RetentionPeriodHours + StreamCreationTimestamp: Timestamp + EnhancedMonitoring: EnhancedMonitoringList + EncryptionType: Optional[EncryptionType] + KeyId: Optional[KeyId] + + +class DescribeStreamOutput(TypedDict, total=False): + StreamDescription: StreamDescription + + +class DescribeStreamSummaryInput(ServiceRequest): + StreamName: Optional[StreamName] + StreamARN: Optional[StreamARN] + + +class StreamDescriptionSummary(TypedDict, total=False): + StreamName: StreamName + StreamARN: StreamARN + StreamStatus: StreamStatus + StreamModeDetails: Optional[StreamModeDetails] + RetentionPeriodHours: RetentionPeriodHours + StreamCreationTimestamp: Timestamp + EnhancedMonitoring: EnhancedMonitoringList + EncryptionType: Optional[EncryptionType] + KeyId: Optional[KeyId] + OpenShardCount: ShardCountObject + ConsumerCount: Optional[ConsumerCountObject] + + +class DescribeStreamSummaryOutput(TypedDict, total=False): + StreamDescriptionSummary: StreamDescriptionSummary + + +class DisableEnhancedMonitoringInput(ServiceRequest): + StreamName: Optional[StreamName] + ShardLevelMetrics: MetricsNameList + StreamARN: Optional[StreamARN] + + +class EnableEnhancedMonitoringInput(ServiceRequest): + StreamName: Optional[StreamName] + ShardLevelMetrics: MetricsNameList + StreamARN: Optional[StreamARN] + + +class EnhancedMonitoringOutput(TypedDict, total=False): + StreamName: Optional[StreamName] + CurrentShardLevelMetrics: Optional[MetricsNameList] + DesiredShardLevelMetrics: Optional[MetricsNameList] + StreamARN: Optional[StreamARN] + + +class GetRecordsInput(ServiceRequest): + ShardIterator: ShardIterator + Limit: Optional[GetRecordsInputLimit] + StreamARN: Optional[StreamARN] + + +MillisBehindLatest = int + + +class Record(TypedDict, total=False): + SequenceNumber: SequenceNumber + ApproximateArrivalTimestamp: Optional[Timestamp] + Data: Data + PartitionKey: PartitionKey + EncryptionType: Optional[EncryptionType] + + +RecordList = List[Record] + + +class GetRecordsOutput(TypedDict, total=False): + Records: RecordList + NextShardIterator: Optional[ShardIterator] + MillisBehindLatest: Optional[MillisBehindLatest] + ChildShards: Optional[ChildShardList] + + +class GetResourcePolicyInput(ServiceRequest): + ResourceARN: ResourceARN + + +class GetResourcePolicyOutput(TypedDict, total=False): + Policy: Policy + + +class GetShardIteratorInput(ServiceRequest): + StreamName: Optional[StreamName] + ShardId: ShardId + ShardIteratorType: ShardIteratorType + StartingSequenceNumber: Optional[SequenceNumber] + Timestamp: Optional[Timestamp] + StreamARN: Optional[StreamARN] + + +class GetShardIteratorOutput(TypedDict, total=False): + ShardIterator: Optional[ShardIterator] + + +class IncreaseStreamRetentionPeriodInput(ServiceRequest): + StreamName: Optional[StreamName] + RetentionPeriodHours: RetentionPeriodHours + StreamARN: Optional[StreamARN] + + +class ShardFilter(TypedDict, total=False): + Type: ShardFilterType + ShardId: Optional[ShardId] + Timestamp: Optional[Timestamp] + + +class ListShardsInput(ServiceRequest): + StreamName: Optional[StreamName] + NextToken: Optional[NextToken] + ExclusiveStartShardId: Optional[ShardId] + MaxResults: Optional[ListShardsInputLimit] + StreamCreationTimestamp: Optional[Timestamp] + ShardFilter: Optional[ShardFilter] + StreamARN: Optional[StreamARN] + + +class ListShardsOutput(TypedDict, total=False): + Shards: Optional[ShardList] + NextToken: Optional[NextToken] + + +class ListStreamConsumersInput(ServiceRequest): + StreamARN: StreamARN + NextToken: Optional[NextToken] + MaxResults: Optional[ListStreamConsumersInputLimit] + StreamCreationTimestamp: Optional[Timestamp] + + +class ListStreamConsumersOutput(TypedDict, total=False): + Consumers: Optional[ConsumerList] + NextToken: Optional[NextToken] + + +class ListStreamsInput(ServiceRequest): + Limit: Optional[ListStreamsInputLimit] + ExclusiveStartStreamName: Optional[StreamName] + NextToken: Optional[NextToken] + + +class StreamSummary(TypedDict, total=False): + StreamName: StreamName + StreamARN: StreamARN + StreamStatus: StreamStatus + StreamModeDetails: Optional[StreamModeDetails] + StreamCreationTimestamp: Optional[Timestamp] + + +StreamSummaryList = List[StreamSummary] +StreamNameList = List[StreamName] + + +class ListStreamsOutput(TypedDict, total=False): + StreamNames: StreamNameList + HasMoreStreams: BooleanObject + NextToken: Optional[NextToken] + StreamSummaries: Optional[StreamSummaryList] + + +class ListTagsForResourceInput(ServiceRequest): + ResourceARN: ResourceARN + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: Optional[TagValue] + + +TagList = List[Tag] + + +class ListTagsForResourceOutput(TypedDict, total=False): + Tags: Optional[TagList] + + +class ListTagsForStreamInput(ServiceRequest): + StreamName: Optional[StreamName] + ExclusiveStartTagKey: Optional[TagKey] + Limit: Optional[ListTagsForStreamInputLimit] + StreamARN: Optional[StreamARN] + + +class ListTagsForStreamOutput(TypedDict, total=False): + Tags: TagList + HasMoreTags: BooleanObject + + +class MergeShardsInput(ServiceRequest): + StreamName: Optional[StreamName] + ShardToMerge: ShardId + AdjacentShardToMerge: ShardId + StreamARN: Optional[StreamARN] + + +class PutRecordInput(ServiceRequest): + StreamName: Optional[StreamName] + Data: Data + PartitionKey: PartitionKey + ExplicitHashKey: Optional[HashKey] + SequenceNumberForOrdering: Optional[SequenceNumber] + StreamARN: Optional[StreamARN] + + +class PutRecordOutput(TypedDict, total=False): + ShardId: ShardId + SequenceNumber: SequenceNumber + EncryptionType: Optional[EncryptionType] + + +class PutRecordsRequestEntry(TypedDict, total=False): + Data: Data + ExplicitHashKey: Optional[HashKey] + PartitionKey: PartitionKey + + +PutRecordsRequestEntryList = List[PutRecordsRequestEntry] + + +class PutRecordsInput(ServiceRequest): + Records: PutRecordsRequestEntryList + StreamName: Optional[StreamName] + StreamARN: Optional[StreamARN] + + +class PutRecordsResultEntry(TypedDict, total=False): + SequenceNumber: Optional[SequenceNumber] + ShardId: Optional[ShardId] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +PutRecordsResultEntryList = List[PutRecordsResultEntry] + + +class PutRecordsOutput(TypedDict, total=False): + FailedRecordCount: Optional[PositiveIntegerObject] + Records: PutRecordsResultEntryList + EncryptionType: Optional[EncryptionType] + + +class PutResourcePolicyInput(ServiceRequest): + ResourceARN: ResourceARN + Policy: Policy + + +class RegisterStreamConsumerInput(ServiceRequest): + StreamARN: StreamARN + ConsumerName: ConsumerName + Tags: Optional[TagMap] + + +class RegisterStreamConsumerOutput(TypedDict, total=False): + Consumer: Consumer + + +TagKeyList = List[TagKey] + + +class RemoveTagsFromStreamInput(ServiceRequest): + StreamName: Optional[StreamName] + TagKeys: TagKeyList + StreamARN: Optional[StreamARN] + + +class SplitShardInput(ServiceRequest): + StreamName: Optional[StreamName] + ShardToSplit: ShardId + NewStartingHashKey: HashKey + StreamARN: Optional[StreamARN] + + +class StartStreamEncryptionInput(ServiceRequest): + StreamName: Optional[StreamName] + EncryptionType: EncryptionType + KeyId: KeyId + StreamARN: Optional[StreamARN] + + +class StartingPosition(TypedDict, total=False): + Type: ShardIteratorType + SequenceNumber: Optional[SequenceNumber] + Timestamp: Optional[Timestamp] + + +class StopStreamEncryptionInput(ServiceRequest): + StreamName: Optional[StreamName] + EncryptionType: EncryptionType + KeyId: KeyId + StreamARN: Optional[StreamARN] + + +class SubscribeToShardEvent(TypedDict, total=False): + Records: RecordList + ContinuationSequenceNumber: SequenceNumber + MillisBehindLatest: MillisBehindLatest + ChildShards: Optional[ChildShardList] + + +class SubscribeToShardEventStream(TypedDict, total=False): + SubscribeToShardEvent: SubscribeToShardEvent + ResourceNotFoundException: Optional[ResourceNotFoundException] + ResourceInUseException: Optional[ResourceInUseException] + KMSDisabledException: Optional[KMSDisabledException] + KMSInvalidStateException: Optional[KMSInvalidStateException] + KMSAccessDeniedException: Optional[KMSAccessDeniedException] + KMSNotFoundException: Optional[KMSNotFoundException] + KMSOptInRequired: Optional[KMSOptInRequired] + KMSThrottlingException: Optional[KMSThrottlingException] + InternalFailureException: Optional[InternalFailureException] + + +class SubscribeToShardInput(ServiceRequest): + ConsumerARN: ConsumerARN + ShardId: ShardId + StartingPosition: StartingPosition + + +class SubscribeToShardOutput(TypedDict, total=False): + EventStream: Iterator[SubscribeToShardEventStream] + + +class TagResourceInput(ServiceRequest): + Tags: TagMap + ResourceARN: ResourceARN + + +class UntagResourceInput(ServiceRequest): + TagKeys: TagKeyList + ResourceARN: ResourceARN + + +class UpdateShardCountInput(ServiceRequest): + StreamName: Optional[StreamName] + TargetShardCount: PositiveIntegerObject + ScalingType: ScalingType + StreamARN: Optional[StreamARN] + + +class UpdateShardCountOutput(TypedDict, total=False): + StreamName: Optional[StreamName] + CurrentShardCount: Optional[PositiveIntegerObject] + TargetShardCount: Optional[PositiveIntegerObject] + StreamARN: Optional[StreamARN] + + +class UpdateStreamModeInput(ServiceRequest): + StreamARN: StreamARN + StreamModeDetails: StreamModeDetails + + +class KinesisApi: + service = "kinesis" + version = "2013-12-02" + + @handler("AddTagsToStream") + def add_tags_to_stream( + self, + context: RequestContext, + tags: TagMap, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateStream") + def create_stream( + self, + context: RequestContext, + stream_name: StreamName, + shard_count: PositiveIntegerObject | None = None, + stream_mode_details: StreamModeDetails | None = None, + tags: TagMap | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DecreaseStreamRetentionPeriod") + def decrease_stream_retention_period( + self, + context: RequestContext, + retention_period_hours: RetentionPeriodHours, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, context: RequestContext, resource_arn: ResourceARN, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteStream") + def delete_stream( + self, + context: RequestContext, + stream_name: StreamName | None = None, + enforce_consumer_deletion: BooleanObject | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeregisterStreamConsumer") + def deregister_stream_consumer( + self, + context: RequestContext, + stream_arn: StreamARN | None = None, + consumer_name: ConsumerName | None = None, + consumer_arn: ConsumerARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DescribeLimits") + def describe_limits(self, context: RequestContext, **kwargs) -> DescribeLimitsOutput: + raise NotImplementedError + + @handler("DescribeStream") + def describe_stream( + self, + context: RequestContext, + stream_name: StreamName | None = None, + limit: DescribeStreamInputLimit | None = None, + exclusive_start_shard_id: ShardId | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> DescribeStreamOutput: + raise NotImplementedError + + @handler("DescribeStreamConsumer") + def describe_stream_consumer( + self, + context: RequestContext, + stream_arn: StreamARN | None = None, + consumer_name: ConsumerName | None = None, + consumer_arn: ConsumerARN | None = None, + **kwargs, + ) -> DescribeStreamConsumerOutput: + raise NotImplementedError + + @handler("DescribeStreamSummary") + def describe_stream_summary( + self, + context: RequestContext, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> DescribeStreamSummaryOutput: + raise NotImplementedError + + @handler("DisableEnhancedMonitoring") + def disable_enhanced_monitoring( + self, + context: RequestContext, + shard_level_metrics: MetricsNameList, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> EnhancedMonitoringOutput: + raise NotImplementedError + + @handler("EnableEnhancedMonitoring") + def enable_enhanced_monitoring( + self, + context: RequestContext, + shard_level_metrics: MetricsNameList, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> EnhancedMonitoringOutput: + raise NotImplementedError + + @handler("GetRecords") + def get_records( + self, + context: RequestContext, + shard_iterator: ShardIterator, + limit: GetRecordsInputLimit | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> GetRecordsOutput: + raise NotImplementedError + + @handler("GetResourcePolicy") + def get_resource_policy( + self, context: RequestContext, resource_arn: ResourceARN, **kwargs + ) -> GetResourcePolicyOutput: + raise NotImplementedError + + @handler("GetShardIterator") + def get_shard_iterator( + self, + context: RequestContext, + shard_id: ShardId, + shard_iterator_type: ShardIteratorType, + stream_name: StreamName | None = None, + starting_sequence_number: SequenceNumber | None = None, + timestamp: Timestamp | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> GetShardIteratorOutput: + raise NotImplementedError + + @handler("IncreaseStreamRetentionPeriod") + def increase_stream_retention_period( + self, + context: RequestContext, + retention_period_hours: RetentionPeriodHours, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ListShards") + def list_shards( + self, + context: RequestContext, + stream_name: StreamName | None = None, + next_token: NextToken | None = None, + exclusive_start_shard_id: ShardId | None = None, + max_results: ListShardsInputLimit | None = None, + stream_creation_timestamp: Timestamp | None = None, + shard_filter: ShardFilter | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> ListShardsOutput: + raise NotImplementedError + + @handler("ListStreamConsumers") + def list_stream_consumers( + self, + context: RequestContext, + stream_arn: StreamARN, + next_token: NextToken | None = None, + max_results: ListStreamConsumersInputLimit | None = None, + stream_creation_timestamp: Timestamp | None = None, + **kwargs, + ) -> ListStreamConsumersOutput: + raise NotImplementedError + + @handler("ListStreams") + def list_streams( + self, + context: RequestContext, + limit: ListStreamsInputLimit | None = None, + exclusive_start_stream_name: StreamName | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListStreamsOutput: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: ResourceARN, **kwargs + ) -> ListTagsForResourceOutput: + raise NotImplementedError + + @handler("ListTagsForStream") + def list_tags_for_stream( + self, + context: RequestContext, + stream_name: StreamName | None = None, + exclusive_start_tag_key: TagKey | None = None, + limit: ListTagsForStreamInputLimit | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> ListTagsForStreamOutput: + raise NotImplementedError + + @handler("MergeShards") + def merge_shards( + self, + context: RequestContext, + shard_to_merge: ShardId, + adjacent_shard_to_merge: ShardId, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutRecord") + def put_record( + self, + context: RequestContext, + data: Data, + partition_key: PartitionKey, + stream_name: StreamName | None = None, + explicit_hash_key: HashKey | None = None, + sequence_number_for_ordering: SequenceNumber | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> PutRecordOutput: + raise NotImplementedError + + @handler("PutRecords") + def put_records( + self, + context: RequestContext, + records: PutRecordsRequestEntryList, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> PutRecordsOutput: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, context: RequestContext, resource_arn: ResourceARN, policy: Policy, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RegisterStreamConsumer") + def register_stream_consumer( + self, + context: RequestContext, + stream_arn: StreamARN, + consumer_name: ConsumerName, + tags: TagMap | None = None, + **kwargs, + ) -> RegisterStreamConsumerOutput: + raise NotImplementedError + + @handler("RemoveTagsFromStream") + def remove_tags_from_stream( + self, + context: RequestContext, + tag_keys: TagKeyList, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SplitShard") + def split_shard( + self, + context: RequestContext, + shard_to_split: ShardId, + new_starting_hash_key: HashKey, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartStreamEncryption") + def start_stream_encryption( + self, + context: RequestContext, + encryption_type: EncryptionType, + key_id: KeyId, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StopStreamEncryption") + def stop_stream_encryption( + self, + context: RequestContext, + encryption_type: EncryptionType, + key_id: KeyId, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SubscribeToShard") + def subscribe_to_shard( + self, + context: RequestContext, + consumer_arn: ConsumerARN, + shard_id: ShardId, + starting_position: StartingPosition, + **kwargs, + ) -> SubscribeToShardOutput: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, tags: TagMap, resource_arn: ResourceARN, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, tag_keys: TagKeyList, resource_arn: ResourceARN, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateShardCount") + def update_shard_count( + self, + context: RequestContext, + target_shard_count: PositiveIntegerObject, + scaling_type: ScalingType, + stream_name: StreamName | None = None, + stream_arn: StreamARN | None = None, + **kwargs, + ) -> UpdateShardCountOutput: + raise NotImplementedError + + @handler("UpdateStreamMode") + def update_stream_mode( + self, + context: RequestContext, + stream_arn: StreamARN, + stream_mode_details: StreamModeDetails, + **kwargs, + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/kms/__init__.py b/localstack-core/localstack/aws/api/kms/__init__.py new file mode 100644 index 0000000000000..9acaf5e5a100b --- /dev/null +++ b/localstack-core/localstack/aws/api/kms/__init__.py @@ -0,0 +1,1850 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AWSAccountIdType = str +AliasNameType = str +ArnType = str +BooleanType = bool +CloudHsmClusterIdType = str +CustomKeyStoreIdType = str +CustomKeyStoreNameType = str +DescriptionType = str +EncryptionContextKey = str +EncryptionContextValue = str +ErrorMessageType = str +GrantIdType = str +GrantNameType = str +GrantTokenType = str +KeyIdType = str +KeyStorePasswordType = str +LimitType = int +MarkerType = str +NullableBooleanType = bool +NumberOfBytesType = int +PendingWindowInDaysType = int +PolicyNameType = str +PolicyType = str +PrincipalIdType = str +RegionType = str +RotationPeriodInDaysType = int +TagKeyType = str +TagValueType = str +TrustAnchorCertificateType = str +XksKeyIdType = str +XksProxyAuthenticationAccessKeyIdType = str +XksProxyAuthenticationRawSecretAccessKeyType = str +XksProxyUriEndpointType = str +XksProxyUriPathType = str +XksProxyVpcEndpointServiceNameType = str + + +class AlgorithmSpec(StrEnum): + RSAES_PKCS1_V1_5 = "RSAES_PKCS1_V1_5" + RSAES_OAEP_SHA_1 = "RSAES_OAEP_SHA_1" + RSAES_OAEP_SHA_256 = "RSAES_OAEP_SHA_256" + RSA_AES_KEY_WRAP_SHA_1 = "RSA_AES_KEY_WRAP_SHA_1" + RSA_AES_KEY_WRAP_SHA_256 = "RSA_AES_KEY_WRAP_SHA_256" + SM2PKE = "SM2PKE" + + +class ConnectionErrorCodeType(StrEnum): + INVALID_CREDENTIALS = "INVALID_CREDENTIALS" + CLUSTER_NOT_FOUND = "CLUSTER_NOT_FOUND" + NETWORK_ERRORS = "NETWORK_ERRORS" + INTERNAL_ERROR = "INTERNAL_ERROR" + INSUFFICIENT_CLOUDHSM_HSMS = "INSUFFICIENT_CLOUDHSM_HSMS" + USER_LOCKED_OUT = "USER_LOCKED_OUT" + USER_NOT_FOUND = "USER_NOT_FOUND" + USER_LOGGED_IN = "USER_LOGGED_IN" + SUBNET_NOT_FOUND = "SUBNET_NOT_FOUND" + INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" + XKS_PROXY_ACCESS_DENIED = "XKS_PROXY_ACCESS_DENIED" + XKS_PROXY_NOT_REACHABLE = "XKS_PROXY_NOT_REACHABLE" + XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND = "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND" + XKS_PROXY_INVALID_RESPONSE = "XKS_PROXY_INVALID_RESPONSE" + XKS_PROXY_INVALID_CONFIGURATION = "XKS_PROXY_INVALID_CONFIGURATION" + XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION = ( + "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION" + ) + XKS_PROXY_TIMED_OUT = "XKS_PROXY_TIMED_OUT" + XKS_PROXY_INVALID_TLS_CONFIGURATION = "XKS_PROXY_INVALID_TLS_CONFIGURATION" + + +class ConnectionStateType(StrEnum): + CONNECTED = "CONNECTED" + CONNECTING = "CONNECTING" + FAILED = "FAILED" + DISCONNECTED = "DISCONNECTED" + DISCONNECTING = "DISCONNECTING" + + +class CustomKeyStoreType(StrEnum): + AWS_CLOUDHSM = "AWS_CLOUDHSM" + EXTERNAL_KEY_STORE = "EXTERNAL_KEY_STORE" + + +class CustomerMasterKeySpec(StrEnum): + RSA_2048 = "RSA_2048" + RSA_3072 = "RSA_3072" + RSA_4096 = "RSA_4096" + ECC_NIST_P256 = "ECC_NIST_P256" + ECC_NIST_P384 = "ECC_NIST_P384" + ECC_NIST_P521 = "ECC_NIST_P521" + ECC_SECG_P256K1 = "ECC_SECG_P256K1" + SYMMETRIC_DEFAULT = "SYMMETRIC_DEFAULT" + HMAC_224 = "HMAC_224" + HMAC_256 = "HMAC_256" + HMAC_384 = "HMAC_384" + HMAC_512 = "HMAC_512" + SM2 = "SM2" + + +class DataKeyPairSpec(StrEnum): + RSA_2048 = "RSA_2048" + RSA_3072 = "RSA_3072" + RSA_4096 = "RSA_4096" + ECC_NIST_P256 = "ECC_NIST_P256" + ECC_NIST_P384 = "ECC_NIST_P384" + ECC_NIST_P521 = "ECC_NIST_P521" + ECC_SECG_P256K1 = "ECC_SECG_P256K1" + SM2 = "SM2" + + +class DataKeySpec(StrEnum): + AES_256 = "AES_256" + AES_128 = "AES_128" + + +class EncryptionAlgorithmSpec(StrEnum): + SYMMETRIC_DEFAULT = "SYMMETRIC_DEFAULT" + RSAES_OAEP_SHA_1 = "RSAES_OAEP_SHA_1" + RSAES_OAEP_SHA_256 = "RSAES_OAEP_SHA_256" + SM2PKE = "SM2PKE" + + +class ExpirationModelType(StrEnum): + KEY_MATERIAL_EXPIRES = "KEY_MATERIAL_EXPIRES" + KEY_MATERIAL_DOES_NOT_EXPIRE = "KEY_MATERIAL_DOES_NOT_EXPIRE" + + +class GrantOperation(StrEnum): + Decrypt = "Decrypt" + Encrypt = "Encrypt" + GenerateDataKey = "GenerateDataKey" + GenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" + ReEncryptFrom = "ReEncryptFrom" + ReEncryptTo = "ReEncryptTo" + Sign = "Sign" + Verify = "Verify" + GetPublicKey = "GetPublicKey" + CreateGrant = "CreateGrant" + RetireGrant = "RetireGrant" + DescribeKey = "DescribeKey" + GenerateDataKeyPair = "GenerateDataKeyPair" + GenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" + GenerateMac = "GenerateMac" + VerifyMac = "VerifyMac" + DeriveSharedSecret = "DeriveSharedSecret" + + +class KeyAgreementAlgorithmSpec(StrEnum): + ECDH = "ECDH" + + +class KeyEncryptionMechanism(StrEnum): + RSAES_OAEP_SHA_256 = "RSAES_OAEP_SHA_256" + + +class KeyManagerType(StrEnum): + AWS = "AWS" + CUSTOMER = "CUSTOMER" + + +class KeySpec(StrEnum): + RSA_2048 = "RSA_2048" + RSA_3072 = "RSA_3072" + RSA_4096 = "RSA_4096" + ECC_NIST_P256 = "ECC_NIST_P256" + ECC_NIST_P384 = "ECC_NIST_P384" + ECC_NIST_P521 = "ECC_NIST_P521" + ECC_SECG_P256K1 = "ECC_SECG_P256K1" + SYMMETRIC_DEFAULT = "SYMMETRIC_DEFAULT" + HMAC_224 = "HMAC_224" + HMAC_256 = "HMAC_256" + HMAC_384 = "HMAC_384" + HMAC_512 = "HMAC_512" + SM2 = "SM2" + + +class KeyState(StrEnum): + Creating = "Creating" + Enabled = "Enabled" + Disabled = "Disabled" + PendingDeletion = "PendingDeletion" + PendingImport = "PendingImport" + PendingReplicaDeletion = "PendingReplicaDeletion" + Unavailable = "Unavailable" + Updating = "Updating" + + +class KeyUsageType(StrEnum): + SIGN_VERIFY = "SIGN_VERIFY" + ENCRYPT_DECRYPT = "ENCRYPT_DECRYPT" + GENERATE_VERIFY_MAC = "GENERATE_VERIFY_MAC" + KEY_AGREEMENT = "KEY_AGREEMENT" + + +class MacAlgorithmSpec(StrEnum): + HMAC_SHA_224 = "HMAC_SHA_224" + HMAC_SHA_256 = "HMAC_SHA_256" + HMAC_SHA_384 = "HMAC_SHA_384" + HMAC_SHA_512 = "HMAC_SHA_512" + + +class MessageType(StrEnum): + RAW = "RAW" + DIGEST = "DIGEST" + + +class MultiRegionKeyType(StrEnum): + PRIMARY = "PRIMARY" + REPLICA = "REPLICA" + + +class OriginType(StrEnum): + AWS_KMS = "AWS_KMS" + EXTERNAL = "EXTERNAL" + AWS_CLOUDHSM = "AWS_CLOUDHSM" + EXTERNAL_KEY_STORE = "EXTERNAL_KEY_STORE" + + +class RotationType(StrEnum): + AUTOMATIC = "AUTOMATIC" + ON_DEMAND = "ON_DEMAND" + + +class SigningAlgorithmSpec(StrEnum): + RSASSA_PSS_SHA_256 = "RSASSA_PSS_SHA_256" + RSASSA_PSS_SHA_384 = "RSASSA_PSS_SHA_384" + RSASSA_PSS_SHA_512 = "RSASSA_PSS_SHA_512" + RSASSA_PKCS1_V1_5_SHA_256 = "RSASSA_PKCS1_V1_5_SHA_256" + RSASSA_PKCS1_V1_5_SHA_384 = "RSASSA_PKCS1_V1_5_SHA_384" + RSASSA_PKCS1_V1_5_SHA_512 = "RSASSA_PKCS1_V1_5_SHA_512" + ECDSA_SHA_256 = "ECDSA_SHA_256" + ECDSA_SHA_384 = "ECDSA_SHA_384" + ECDSA_SHA_512 = "ECDSA_SHA_512" + SM2DSA = "SM2DSA" + + +class WrappingKeySpec(StrEnum): + RSA_2048 = "RSA_2048" + RSA_3072 = "RSA_3072" + RSA_4096 = "RSA_4096" + SM2 = "SM2" + + +class XksProxyConnectivityType(StrEnum): + PUBLIC_ENDPOINT = "PUBLIC_ENDPOINT" + VPC_ENDPOINT_SERVICE = "VPC_ENDPOINT_SERVICE" + + +class AlreadyExistsException(ServiceException): + code: str = "AlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class CloudHsmClusterInUseException(ServiceException): + code: str = "CloudHsmClusterInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class CloudHsmClusterInvalidConfigurationException(ServiceException): + code: str = "CloudHsmClusterInvalidConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class CloudHsmClusterNotActiveException(ServiceException): + code: str = "CloudHsmClusterNotActiveException" + sender_fault: bool = False + status_code: int = 400 + + +class CloudHsmClusterNotFoundException(ServiceException): + code: str = "CloudHsmClusterNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class CloudHsmClusterNotRelatedException(ServiceException): + code: str = "CloudHsmClusterNotRelatedException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class CustomKeyStoreHasCMKsException(ServiceException): + code: str = "CustomKeyStoreHasCMKsException" + sender_fault: bool = False + status_code: int = 400 + + +class CustomKeyStoreInvalidStateException(ServiceException): + code: str = "CustomKeyStoreInvalidStateException" + sender_fault: bool = False + status_code: int = 400 + + +class CustomKeyStoreNameInUseException(ServiceException): + code: str = "CustomKeyStoreNameInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class CustomKeyStoreNotFoundException(ServiceException): + code: str = "CustomKeyStoreNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class DependencyTimeoutException(ServiceException): + code: str = "DependencyTimeoutException" + sender_fault: bool = False + status_code: int = 400 + + +class DisabledException(ServiceException): + code: str = "DisabledException" + sender_fault: bool = False + status_code: int = 400 + + +class DryRunOperationException(ServiceException): + code: str = "DryRunOperationException" + sender_fault: bool = False + status_code: int = 400 + + +class ExpiredImportTokenException(ServiceException): + code: str = "ExpiredImportTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class IncorrectKeyException(ServiceException): + code: str = "IncorrectKeyException" + sender_fault: bool = False + status_code: int = 400 + + +class IncorrectKeyMaterialException(ServiceException): + code: str = "IncorrectKeyMaterialException" + sender_fault: bool = False + status_code: int = 400 + + +class IncorrectTrustAnchorException(ServiceException): + code: str = "IncorrectTrustAnchorException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAliasNameException(ServiceException): + code: str = "InvalidAliasNameException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArnException(ServiceException): + code: str = "InvalidArnException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidCiphertextException(ServiceException): + code: str = "InvalidCiphertextException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidGrantIdException(ServiceException): + code: str = "InvalidGrantIdException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidGrantTokenException(ServiceException): + code: str = "InvalidGrantTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidImportTokenException(ServiceException): + code: str = "InvalidImportTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidKeyUsageException(ServiceException): + code: str = "InvalidKeyUsageException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidMarkerException(ServiceException): + code: str = "InvalidMarkerException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSInternalException(ServiceException): + code: str = "KMSInternalException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSInvalidMacException(ServiceException): + code: str = "KMSInvalidMacException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSInvalidSignatureException(ServiceException): + code: str = "KMSInvalidSignatureException" + sender_fault: bool = False + status_code: int = 400 + + +class KMSInvalidStateException(ServiceException): + code: str = "KMSInvalidStateException" + sender_fault: bool = False + status_code: int = 400 + + +class KeyUnavailableException(ServiceException): + code: str = "KeyUnavailableException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MalformedPolicyDocumentException(ServiceException): + code: str = "MalformedPolicyDocumentException" + sender_fault: bool = False + status_code: int = 400 + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TagException(ServiceException): + code: str = "TagException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedOperationException(ServiceException): + code: str = "UnsupportedOperationException" + sender_fault: bool = False + status_code: int = 400 + + +class XksKeyAlreadyInUseException(ServiceException): + code: str = "XksKeyAlreadyInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class XksKeyInvalidConfigurationException(ServiceException): + code: str = "XksKeyInvalidConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class XksKeyNotFoundException(ServiceException): + code: str = "XksKeyNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyIncorrectAuthenticationCredentialException(ServiceException): + code: str = "XksProxyIncorrectAuthenticationCredentialException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyInvalidConfigurationException(ServiceException): + code: str = "XksProxyInvalidConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyInvalidResponseException(ServiceException): + code: str = "XksProxyInvalidResponseException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyUriEndpointInUseException(ServiceException): + code: str = "XksProxyUriEndpointInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyUriInUseException(ServiceException): + code: str = "XksProxyUriInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyUriUnreachableException(ServiceException): + code: str = "XksProxyUriUnreachableException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyVpcEndpointServiceInUseException(ServiceException): + code: str = "XksProxyVpcEndpointServiceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyVpcEndpointServiceInvalidConfigurationException(ServiceException): + code: str = "XksProxyVpcEndpointServiceInvalidConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class XksProxyVpcEndpointServiceNotFoundException(ServiceException): + code: str = "XksProxyVpcEndpointServiceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +DateType = datetime + + +class AliasListEntry(TypedDict, total=False): + AliasName: Optional[AliasNameType] + AliasArn: Optional[ArnType] + TargetKeyId: Optional[KeyIdType] + CreationDate: Optional[DateType] + LastUpdatedDate: Optional[DateType] + + +AliasList = List[AliasListEntry] +AttestationDocumentType = bytes + + +class CancelKeyDeletionRequest(ServiceRequest): + KeyId: KeyIdType + + +class CancelKeyDeletionResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + + +CiphertextType = bytes + + +class ConnectCustomKeyStoreRequest(ServiceRequest): + CustomKeyStoreId: CustomKeyStoreIdType + + +class ConnectCustomKeyStoreResponse(TypedDict, total=False): + pass + + +class CreateAliasRequest(ServiceRequest): + AliasName: AliasNameType + TargetKeyId: KeyIdType + + +class XksProxyAuthenticationCredentialType(TypedDict, total=False): + AccessKeyId: XksProxyAuthenticationAccessKeyIdType + RawSecretAccessKey: XksProxyAuthenticationRawSecretAccessKeyType + + +class CreateCustomKeyStoreRequest(ServiceRequest): + CustomKeyStoreName: CustomKeyStoreNameType + CloudHsmClusterId: Optional[CloudHsmClusterIdType] + TrustAnchorCertificate: Optional[TrustAnchorCertificateType] + KeyStorePassword: Optional[KeyStorePasswordType] + CustomKeyStoreType: Optional[CustomKeyStoreType] + XksProxyUriEndpoint: Optional[XksProxyUriEndpointType] + XksProxyUriPath: Optional[XksProxyUriPathType] + XksProxyVpcEndpointServiceName: Optional[XksProxyVpcEndpointServiceNameType] + XksProxyAuthenticationCredential: Optional[XksProxyAuthenticationCredentialType] + XksProxyConnectivity: Optional[XksProxyConnectivityType] + + +class CreateCustomKeyStoreResponse(TypedDict, total=False): + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + + +GrantTokenList = List[GrantTokenType] +EncryptionContextType = Dict[EncryptionContextKey, EncryptionContextValue] + + +class GrantConstraints(TypedDict, total=False): + EncryptionContextSubset: Optional[EncryptionContextType] + EncryptionContextEquals: Optional[EncryptionContextType] + + +GrantOperationList = List[GrantOperation] + + +class CreateGrantRequest(ServiceRequest): + KeyId: KeyIdType + GranteePrincipal: PrincipalIdType + RetiringPrincipal: Optional[PrincipalIdType] + Operations: GrantOperationList + Constraints: Optional[GrantConstraints] + GrantTokens: Optional[GrantTokenList] + Name: Optional[GrantNameType] + DryRun: Optional[NullableBooleanType] + + +class CreateGrantResponse(TypedDict, total=False): + GrantToken: Optional[GrantTokenType] + GrantId: Optional[GrantIdType] + + +class Tag(TypedDict, total=False): + TagKey: TagKeyType + TagValue: TagValueType + + +TagList = List[Tag] + + +class CreateKeyRequest(ServiceRequest): + Policy: Optional[PolicyType] + Description: Optional[DescriptionType] + KeyUsage: Optional[KeyUsageType] + CustomerMasterKeySpec: Optional[CustomerMasterKeySpec] + KeySpec: Optional[KeySpec] + Origin: Optional[OriginType] + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + BypassPolicyLockoutSafetyCheck: Optional[BooleanType] + Tags: Optional[TagList] + MultiRegion: Optional[NullableBooleanType] + XksKeyId: Optional[XksKeyIdType] + + +class XksKeyConfigurationType(TypedDict, total=False): + Id: Optional[XksKeyIdType] + + +MacAlgorithmSpecList = List[MacAlgorithmSpec] + + +class MultiRegionKey(TypedDict, total=False): + Arn: Optional[ArnType] + Region: Optional[RegionType] + + +MultiRegionKeyList = List[MultiRegionKey] + + +class MultiRegionConfiguration(TypedDict, total=False): + MultiRegionKeyType: Optional[MultiRegionKeyType] + PrimaryKey: Optional[MultiRegionKey] + ReplicaKeys: Optional[MultiRegionKeyList] + + +KeyAgreementAlgorithmSpecList = List[KeyAgreementAlgorithmSpec] +SigningAlgorithmSpecList = List[SigningAlgorithmSpec] +EncryptionAlgorithmSpecList = List[EncryptionAlgorithmSpec] + + +class KeyMetadata(TypedDict, total=False): + AWSAccountId: Optional[AWSAccountIdType] + KeyId: KeyIdType + Arn: Optional[ArnType] + CreationDate: Optional[DateType] + Enabled: Optional[BooleanType] + Description: Optional[DescriptionType] + KeyUsage: Optional[KeyUsageType] + KeyState: Optional[KeyState] + DeletionDate: Optional[DateType] + ValidTo: Optional[DateType] + Origin: Optional[OriginType] + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + CloudHsmClusterId: Optional[CloudHsmClusterIdType] + ExpirationModel: Optional[ExpirationModelType] + KeyManager: Optional[KeyManagerType] + CustomerMasterKeySpec: Optional[CustomerMasterKeySpec] + KeySpec: Optional[KeySpec] + EncryptionAlgorithms: Optional[EncryptionAlgorithmSpecList] + SigningAlgorithms: Optional[SigningAlgorithmSpecList] + KeyAgreementAlgorithms: Optional[KeyAgreementAlgorithmSpecList] + MultiRegion: Optional[NullableBooleanType] + MultiRegionConfiguration: Optional[MultiRegionConfiguration] + PendingDeletionWindowInDays: Optional[PendingWindowInDaysType] + MacAlgorithms: Optional[MacAlgorithmSpecList] + XksKeyConfiguration: Optional[XksKeyConfigurationType] + + +class CreateKeyResponse(TypedDict, total=False): + KeyMetadata: Optional[KeyMetadata] + + +class XksProxyConfigurationType(TypedDict, total=False): + Connectivity: Optional[XksProxyConnectivityType] + AccessKeyId: Optional[XksProxyAuthenticationAccessKeyIdType] + UriEndpoint: Optional[XksProxyUriEndpointType] + UriPath: Optional[XksProxyUriPathType] + VpcEndpointServiceName: Optional[XksProxyVpcEndpointServiceNameType] + + +class CustomKeyStoresListEntry(TypedDict, total=False): + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + CustomKeyStoreName: Optional[CustomKeyStoreNameType] + CloudHsmClusterId: Optional[CloudHsmClusterIdType] + TrustAnchorCertificate: Optional[TrustAnchorCertificateType] + ConnectionState: Optional[ConnectionStateType] + ConnectionErrorCode: Optional[ConnectionErrorCodeType] + CreationDate: Optional[DateType] + CustomKeyStoreType: Optional[CustomKeyStoreType] + XksProxyConfiguration: Optional[XksProxyConfigurationType] + + +CustomKeyStoresList = List[CustomKeyStoresListEntry] + + +class RecipientInfo(TypedDict, total=False): + KeyEncryptionAlgorithm: Optional[KeyEncryptionMechanism] + AttestationDocument: Optional[AttestationDocumentType] + + +class DecryptRequest(ServiceRequest): + CiphertextBlob: CiphertextType + EncryptionContext: Optional[EncryptionContextType] + GrantTokens: Optional[GrantTokenList] + KeyId: Optional[KeyIdType] + EncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + Recipient: Optional[RecipientInfo] + DryRun: Optional[NullableBooleanType] + + +PlaintextType = bytes + + +class DecryptResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + Plaintext: Optional[PlaintextType] + EncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + CiphertextForRecipient: Optional[CiphertextType] + + +class DeleteAliasRequest(ServiceRequest): + AliasName: AliasNameType + + +class DeleteCustomKeyStoreRequest(ServiceRequest): + CustomKeyStoreId: CustomKeyStoreIdType + + +class DeleteCustomKeyStoreResponse(TypedDict, total=False): + pass + + +class DeleteImportedKeyMaterialRequest(ServiceRequest): + KeyId: KeyIdType + + +PublicKeyType = bytes + + +class DeriveSharedSecretRequest(ServiceRequest): + KeyId: KeyIdType + KeyAgreementAlgorithm: KeyAgreementAlgorithmSpec + PublicKey: PublicKeyType + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + Recipient: Optional[RecipientInfo] + + +class DeriveSharedSecretResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + SharedSecret: Optional[PlaintextType] + CiphertextForRecipient: Optional[CiphertextType] + KeyAgreementAlgorithm: Optional[KeyAgreementAlgorithmSpec] + KeyOrigin: Optional[OriginType] + + +class DescribeCustomKeyStoresRequest(ServiceRequest): + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + CustomKeyStoreName: Optional[CustomKeyStoreNameType] + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +class DescribeCustomKeyStoresResponse(TypedDict, total=False): + CustomKeyStores: Optional[CustomKeyStoresList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class DescribeKeyRequest(ServiceRequest): + KeyId: KeyIdType + GrantTokens: Optional[GrantTokenList] + + +class DescribeKeyResponse(TypedDict, total=False): + KeyMetadata: Optional[KeyMetadata] + + +class DisableKeyRequest(ServiceRequest): + KeyId: KeyIdType + + +class DisableKeyRotationRequest(ServiceRequest): + KeyId: KeyIdType + + +class DisconnectCustomKeyStoreRequest(ServiceRequest): + CustomKeyStoreId: CustomKeyStoreIdType + + +class DisconnectCustomKeyStoreResponse(TypedDict, total=False): + pass + + +class EnableKeyRequest(ServiceRequest): + KeyId: KeyIdType + + +class EnableKeyRotationRequest(ServiceRequest): + KeyId: KeyIdType + RotationPeriodInDays: Optional[RotationPeriodInDaysType] + + +class EncryptRequest(ServiceRequest): + KeyId: KeyIdType + Plaintext: PlaintextType + EncryptionContext: Optional[EncryptionContextType] + GrantTokens: Optional[GrantTokenList] + EncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + DryRun: Optional[NullableBooleanType] + + +class EncryptResponse(TypedDict, total=False): + CiphertextBlob: Optional[CiphertextType] + KeyId: Optional[KeyIdType] + EncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + + +class GenerateDataKeyPairRequest(ServiceRequest): + EncryptionContext: Optional[EncryptionContextType] + KeyId: KeyIdType + KeyPairSpec: DataKeyPairSpec + GrantTokens: Optional[GrantTokenList] + Recipient: Optional[RecipientInfo] + DryRun: Optional[NullableBooleanType] + + +class GenerateDataKeyPairResponse(TypedDict, total=False): + PrivateKeyCiphertextBlob: Optional[CiphertextType] + PrivateKeyPlaintext: Optional[PlaintextType] + PublicKey: Optional[PublicKeyType] + KeyId: Optional[KeyIdType] + KeyPairSpec: Optional[DataKeyPairSpec] + CiphertextForRecipient: Optional[CiphertextType] + + +class GenerateDataKeyPairWithoutPlaintextRequest(ServiceRequest): + EncryptionContext: Optional[EncryptionContextType] + KeyId: KeyIdType + KeyPairSpec: DataKeyPairSpec + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class GenerateDataKeyPairWithoutPlaintextResponse(TypedDict, total=False): + PrivateKeyCiphertextBlob: Optional[CiphertextType] + PublicKey: Optional[PublicKeyType] + KeyId: Optional[KeyIdType] + KeyPairSpec: Optional[DataKeyPairSpec] + + +class GenerateDataKeyRequest(ServiceRequest): + KeyId: KeyIdType + EncryptionContext: Optional[EncryptionContextType] + NumberOfBytes: Optional[NumberOfBytesType] + KeySpec: Optional[DataKeySpec] + GrantTokens: Optional[GrantTokenList] + Recipient: Optional[RecipientInfo] + DryRun: Optional[NullableBooleanType] + + +class GenerateDataKeyResponse(TypedDict, total=False): + CiphertextBlob: Optional[CiphertextType] + Plaintext: Optional[PlaintextType] + KeyId: Optional[KeyIdType] + CiphertextForRecipient: Optional[CiphertextType] + + +class GenerateDataKeyWithoutPlaintextRequest(ServiceRequest): + KeyId: KeyIdType + EncryptionContext: Optional[EncryptionContextType] + KeySpec: Optional[DataKeySpec] + NumberOfBytes: Optional[NumberOfBytesType] + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class GenerateDataKeyWithoutPlaintextResponse(TypedDict, total=False): + CiphertextBlob: Optional[CiphertextType] + KeyId: Optional[KeyIdType] + + +class GenerateMacRequest(ServiceRequest): + Message: PlaintextType + KeyId: KeyIdType + MacAlgorithm: MacAlgorithmSpec + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class GenerateMacResponse(TypedDict, total=False): + Mac: Optional[CiphertextType] + MacAlgorithm: Optional[MacAlgorithmSpec] + KeyId: Optional[KeyIdType] + + +class GenerateRandomRequest(ServiceRequest): + NumberOfBytes: Optional[NumberOfBytesType] + CustomKeyStoreId: Optional[CustomKeyStoreIdType] + Recipient: Optional[RecipientInfo] + + +class GenerateRandomResponse(TypedDict, total=False): + Plaintext: Optional[PlaintextType] + CiphertextForRecipient: Optional[CiphertextType] + + +class GetKeyPolicyRequest(ServiceRequest): + KeyId: KeyIdType + PolicyName: Optional[PolicyNameType] + + +class GetKeyPolicyResponse(TypedDict, total=False): + Policy: Optional[PolicyType] + PolicyName: Optional[PolicyNameType] + + +class GetKeyRotationStatusRequest(ServiceRequest): + KeyId: KeyIdType + + +class GetKeyRotationStatusResponse(TypedDict, total=False): + KeyRotationEnabled: Optional[BooleanType] + KeyId: Optional[KeyIdType] + RotationPeriodInDays: Optional[RotationPeriodInDaysType] + NextRotationDate: Optional[DateType] + OnDemandRotationStartDate: Optional[DateType] + + +class GetParametersForImportRequest(ServiceRequest): + KeyId: KeyIdType + WrappingAlgorithm: AlgorithmSpec + WrappingKeySpec: WrappingKeySpec + + +class GetParametersForImportResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + ImportToken: Optional[CiphertextType] + PublicKey: Optional[PlaintextType] + ParametersValidTo: Optional[DateType] + + +class GetPublicKeyRequest(ServiceRequest): + KeyId: KeyIdType + GrantTokens: Optional[GrantTokenList] + + +class GetPublicKeyResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + PublicKey: Optional[PublicKeyType] + CustomerMasterKeySpec: Optional[CustomerMasterKeySpec] + KeySpec: Optional[KeySpec] + KeyUsage: Optional[KeyUsageType] + EncryptionAlgorithms: Optional[EncryptionAlgorithmSpecList] + SigningAlgorithms: Optional[SigningAlgorithmSpecList] + KeyAgreementAlgorithms: Optional[KeyAgreementAlgorithmSpecList] + + +class GrantListEntry(TypedDict, total=False): + KeyId: Optional[KeyIdType] + GrantId: Optional[GrantIdType] + Name: Optional[GrantNameType] + CreationDate: Optional[DateType] + GranteePrincipal: Optional[PrincipalIdType] + RetiringPrincipal: Optional[PrincipalIdType] + IssuingAccount: Optional[PrincipalIdType] + Operations: Optional[GrantOperationList] + Constraints: Optional[GrantConstraints] + + +GrantList = List[GrantListEntry] + + +class ImportKeyMaterialRequest(ServiceRequest): + KeyId: KeyIdType + ImportToken: CiphertextType + EncryptedKeyMaterial: CiphertextType + ValidTo: Optional[DateType] + ExpirationModel: Optional[ExpirationModelType] + + +class ImportKeyMaterialResponse(TypedDict, total=False): + pass + + +class KeyListEntry(TypedDict, total=False): + KeyId: Optional[KeyIdType] + KeyArn: Optional[ArnType] + + +KeyList = List[KeyListEntry] + + +class ListAliasesRequest(ServiceRequest): + KeyId: Optional[KeyIdType] + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +class ListAliasesResponse(TypedDict, total=False): + Aliases: Optional[AliasList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListGrantsRequest(ServiceRequest): + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + KeyId: KeyIdType + GrantId: Optional[GrantIdType] + GranteePrincipal: Optional[PrincipalIdType] + + +class ListGrantsResponse(TypedDict, total=False): + Grants: Optional[GrantList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListKeyPoliciesRequest(ServiceRequest): + KeyId: KeyIdType + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +PolicyNameList = List[PolicyNameType] + + +class ListKeyPoliciesResponse(TypedDict, total=False): + PolicyNames: Optional[PolicyNameList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListKeyRotationsRequest(ServiceRequest): + KeyId: KeyIdType + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +class RotationsListEntry(TypedDict, total=False): + KeyId: Optional[KeyIdType] + RotationDate: Optional[DateType] + RotationType: Optional[RotationType] + + +RotationsList = List[RotationsListEntry] + + +class ListKeyRotationsResponse(TypedDict, total=False): + Rotations: Optional[RotationsList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListKeysRequest(ServiceRequest): + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +class ListKeysResponse(TypedDict, total=False): + Keys: Optional[KeyList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListResourceTagsRequest(ServiceRequest): + KeyId: KeyIdType + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + + +class ListResourceTagsResponse(TypedDict, total=False): + Tags: Optional[TagList] + NextMarker: Optional[MarkerType] + Truncated: Optional[BooleanType] + + +class ListRetirableGrantsRequest(ServiceRequest): + Limit: Optional[LimitType] + Marker: Optional[MarkerType] + RetiringPrincipal: PrincipalIdType + + +class PutKeyPolicyRequest(ServiceRequest): + KeyId: KeyIdType + PolicyName: Optional[PolicyNameType] + Policy: PolicyType + BypassPolicyLockoutSafetyCheck: Optional[BooleanType] + + +class ReEncryptRequest(ServiceRequest): + CiphertextBlob: CiphertextType + SourceEncryptionContext: Optional[EncryptionContextType] + SourceKeyId: Optional[KeyIdType] + DestinationKeyId: KeyIdType + DestinationEncryptionContext: Optional[EncryptionContextType] + SourceEncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + DestinationEncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class ReEncryptResponse(TypedDict, total=False): + CiphertextBlob: Optional[CiphertextType] + SourceKeyId: Optional[KeyIdType] + KeyId: Optional[KeyIdType] + SourceEncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + DestinationEncryptionAlgorithm: Optional[EncryptionAlgorithmSpec] + + +class ReplicateKeyRequest(ServiceRequest): + KeyId: KeyIdType + ReplicaRegion: RegionType + Policy: Optional[PolicyType] + BypassPolicyLockoutSafetyCheck: Optional[BooleanType] + Description: Optional[DescriptionType] + Tags: Optional[TagList] + + +class ReplicateKeyResponse(TypedDict, total=False): + ReplicaKeyMetadata: Optional[KeyMetadata] + ReplicaPolicy: Optional[PolicyType] + ReplicaTags: Optional[TagList] + + +class RetireGrantRequest(ServiceRequest): + GrantToken: Optional[GrantTokenType] + KeyId: Optional[KeyIdType] + GrantId: Optional[GrantIdType] + DryRun: Optional[NullableBooleanType] + + +class RevokeGrantRequest(ServiceRequest): + KeyId: KeyIdType + GrantId: GrantIdType + DryRun: Optional[NullableBooleanType] + + +class RotateKeyOnDemandRequest(ServiceRequest): + KeyId: KeyIdType + + +class RotateKeyOnDemandResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + + +class ScheduleKeyDeletionRequest(ServiceRequest): + KeyId: KeyIdType + PendingWindowInDays: Optional[PendingWindowInDaysType] + + +class ScheduleKeyDeletionResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + DeletionDate: Optional[DateType] + KeyState: Optional[KeyState] + PendingWindowInDays: Optional[PendingWindowInDaysType] + + +class SignRequest(ServiceRequest): + KeyId: KeyIdType + Message: PlaintextType + MessageType: Optional[MessageType] + GrantTokens: Optional[GrantTokenList] + SigningAlgorithm: SigningAlgorithmSpec + DryRun: Optional[NullableBooleanType] + + +class SignResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + Signature: Optional[CiphertextType] + SigningAlgorithm: Optional[SigningAlgorithmSpec] + + +TagKeyList = List[TagKeyType] + + +class TagResourceRequest(ServiceRequest): + KeyId: KeyIdType + Tags: TagList + + +class UntagResourceRequest(ServiceRequest): + KeyId: KeyIdType + TagKeys: TagKeyList + + +class UpdateAliasRequest(ServiceRequest): + AliasName: AliasNameType + TargetKeyId: KeyIdType + + +class UpdateCustomKeyStoreRequest(ServiceRequest): + CustomKeyStoreId: CustomKeyStoreIdType + NewCustomKeyStoreName: Optional[CustomKeyStoreNameType] + KeyStorePassword: Optional[KeyStorePasswordType] + CloudHsmClusterId: Optional[CloudHsmClusterIdType] + XksProxyUriEndpoint: Optional[XksProxyUriEndpointType] + XksProxyUriPath: Optional[XksProxyUriPathType] + XksProxyVpcEndpointServiceName: Optional[XksProxyVpcEndpointServiceNameType] + XksProxyAuthenticationCredential: Optional[XksProxyAuthenticationCredentialType] + XksProxyConnectivity: Optional[XksProxyConnectivityType] + + +class UpdateCustomKeyStoreResponse(TypedDict, total=False): + pass + + +class UpdateKeyDescriptionRequest(ServiceRequest): + KeyId: KeyIdType + Description: DescriptionType + + +class UpdatePrimaryRegionRequest(ServiceRequest): + KeyId: KeyIdType + PrimaryRegion: RegionType + + +class VerifyMacRequest(ServiceRequest): + Message: PlaintextType + KeyId: KeyIdType + MacAlgorithm: MacAlgorithmSpec + Mac: CiphertextType + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class VerifyMacResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + MacValid: Optional[BooleanType] + MacAlgorithm: Optional[MacAlgorithmSpec] + + +class VerifyRequest(ServiceRequest): + KeyId: KeyIdType + Message: PlaintextType + MessageType: Optional[MessageType] + Signature: CiphertextType + SigningAlgorithm: SigningAlgorithmSpec + GrantTokens: Optional[GrantTokenList] + DryRun: Optional[NullableBooleanType] + + +class VerifyResponse(TypedDict, total=False): + KeyId: Optional[KeyIdType] + SignatureValid: Optional[BooleanType] + SigningAlgorithm: Optional[SigningAlgorithmSpec] + + +class KmsApi: + service = "kms" + version = "2014-11-01" + + @handler("CancelKeyDeletion") + def cancel_key_deletion( + self, context: RequestContext, key_id: KeyIdType, **kwargs + ) -> CancelKeyDeletionResponse: + raise NotImplementedError + + @handler("ConnectCustomKeyStore") + def connect_custom_key_store( + self, context: RequestContext, custom_key_store_id: CustomKeyStoreIdType, **kwargs + ) -> ConnectCustomKeyStoreResponse: + raise NotImplementedError + + @handler("CreateAlias") + def create_alias( + self, context: RequestContext, alias_name: AliasNameType, target_key_id: KeyIdType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CreateCustomKeyStore") + def create_custom_key_store( + self, + context: RequestContext, + custom_key_store_name: CustomKeyStoreNameType, + cloud_hsm_cluster_id: CloudHsmClusterIdType | None = None, + trust_anchor_certificate: TrustAnchorCertificateType | None = None, + key_store_password: KeyStorePasswordType | None = None, + custom_key_store_type: CustomKeyStoreType | None = None, + xks_proxy_uri_endpoint: XksProxyUriEndpointType | None = None, + xks_proxy_uri_path: XksProxyUriPathType | None = None, + xks_proxy_vpc_endpoint_service_name: XksProxyVpcEndpointServiceNameType | None = None, + xks_proxy_authentication_credential: XksProxyAuthenticationCredentialType | None = None, + xks_proxy_connectivity: XksProxyConnectivityType | None = None, + **kwargs, + ) -> CreateCustomKeyStoreResponse: + raise NotImplementedError + + @handler("CreateGrant") + def create_grant( + self, + context: RequestContext, + key_id: KeyIdType, + grantee_principal: PrincipalIdType, + operations: GrantOperationList, + retiring_principal: PrincipalIdType | None = None, + constraints: GrantConstraints | None = None, + grant_tokens: GrantTokenList | None = None, + name: GrantNameType | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> CreateGrantResponse: + raise NotImplementedError + + @handler("CreateKey") + def create_key( + self, + context: RequestContext, + policy: PolicyType | None = None, + description: DescriptionType | None = None, + key_usage: KeyUsageType | None = None, + customer_master_key_spec: CustomerMasterKeySpec | None = None, + key_spec: KeySpec | None = None, + origin: OriginType | None = None, + custom_key_store_id: CustomKeyStoreIdType | None = None, + bypass_policy_lockout_safety_check: BooleanType | None = None, + tags: TagList | None = None, + multi_region: NullableBooleanType | None = None, + xks_key_id: XksKeyIdType | None = None, + **kwargs, + ) -> CreateKeyResponse: + raise NotImplementedError + + @handler("Decrypt") + def decrypt( + self, + context: RequestContext, + ciphertext_blob: CiphertextType, + encryption_context: EncryptionContextType | None = None, + grant_tokens: GrantTokenList | None = None, + key_id: KeyIdType | None = None, + encryption_algorithm: EncryptionAlgorithmSpec | None = None, + recipient: RecipientInfo | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> DecryptResponse: + raise NotImplementedError + + @handler("DeleteAlias") + def delete_alias(self, context: RequestContext, alias_name: AliasNameType, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteCustomKeyStore") + def delete_custom_key_store( + self, context: RequestContext, custom_key_store_id: CustomKeyStoreIdType, **kwargs + ) -> DeleteCustomKeyStoreResponse: + raise NotImplementedError + + @handler("DeleteImportedKeyMaterial") + def delete_imported_key_material( + self, context: RequestContext, key_id: KeyIdType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeriveSharedSecret") + def derive_shared_secret( + self, + context: RequestContext, + key_id: KeyIdType, + key_agreement_algorithm: KeyAgreementAlgorithmSpec, + public_key: PublicKeyType, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + recipient: RecipientInfo | None = None, + **kwargs, + ) -> DeriveSharedSecretResponse: + raise NotImplementedError + + @handler("DescribeCustomKeyStores") + def describe_custom_key_stores( + self, + context: RequestContext, + custom_key_store_id: CustomKeyStoreIdType | None = None, + custom_key_store_name: CustomKeyStoreNameType | None = None, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> DescribeCustomKeyStoresResponse: + raise NotImplementedError + + @handler("DescribeKey") + def describe_key( + self, + context: RequestContext, + key_id: KeyIdType, + grant_tokens: GrantTokenList | None = None, + **kwargs, + ) -> DescribeKeyResponse: + raise NotImplementedError + + @handler("DisableKey") + def disable_key(self, context: RequestContext, key_id: KeyIdType, **kwargs) -> None: + raise NotImplementedError + + @handler("DisableKeyRotation") + def disable_key_rotation(self, context: RequestContext, key_id: KeyIdType, **kwargs) -> None: + raise NotImplementedError + + @handler("DisconnectCustomKeyStore") + def disconnect_custom_key_store( + self, context: RequestContext, custom_key_store_id: CustomKeyStoreIdType, **kwargs + ) -> DisconnectCustomKeyStoreResponse: + raise NotImplementedError + + @handler("EnableKey") + def enable_key(self, context: RequestContext, key_id: KeyIdType, **kwargs) -> None: + raise NotImplementedError + + @handler("EnableKeyRotation") + def enable_key_rotation( + self, + context: RequestContext, + key_id: KeyIdType, + rotation_period_in_days: RotationPeriodInDaysType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("Encrypt") + def encrypt( + self, + context: RequestContext, + key_id: KeyIdType, + plaintext: PlaintextType, + encryption_context: EncryptionContextType | None = None, + grant_tokens: GrantTokenList | None = None, + encryption_algorithm: EncryptionAlgorithmSpec | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> EncryptResponse: + raise NotImplementedError + + @handler("GenerateDataKey") + def generate_data_key( + self, + context: RequestContext, + key_id: KeyIdType, + encryption_context: EncryptionContextType | None = None, + number_of_bytes: NumberOfBytesType | None = None, + key_spec: DataKeySpec | None = None, + grant_tokens: GrantTokenList | None = None, + recipient: RecipientInfo | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> GenerateDataKeyResponse: + raise NotImplementedError + + @handler("GenerateDataKeyPair") + def generate_data_key_pair( + self, + context: RequestContext, + key_id: KeyIdType, + key_pair_spec: DataKeyPairSpec, + encryption_context: EncryptionContextType | None = None, + grant_tokens: GrantTokenList | None = None, + recipient: RecipientInfo | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> GenerateDataKeyPairResponse: + raise NotImplementedError + + @handler("GenerateDataKeyPairWithoutPlaintext") + def generate_data_key_pair_without_plaintext( + self, + context: RequestContext, + key_id: KeyIdType, + key_pair_spec: DataKeyPairSpec, + encryption_context: EncryptionContextType | None = None, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> GenerateDataKeyPairWithoutPlaintextResponse: + raise NotImplementedError + + @handler("GenerateDataKeyWithoutPlaintext") + def generate_data_key_without_plaintext( + self, + context: RequestContext, + key_id: KeyIdType, + encryption_context: EncryptionContextType | None = None, + key_spec: DataKeySpec | None = None, + number_of_bytes: NumberOfBytesType | None = None, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> GenerateDataKeyWithoutPlaintextResponse: + raise NotImplementedError + + @handler("GenerateMac") + def generate_mac( + self, + context: RequestContext, + message: PlaintextType, + key_id: KeyIdType, + mac_algorithm: MacAlgorithmSpec, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> GenerateMacResponse: + raise NotImplementedError + + @handler("GenerateRandom") + def generate_random( + self, + context: RequestContext, + number_of_bytes: NumberOfBytesType | None = None, + custom_key_store_id: CustomKeyStoreIdType | None = None, + recipient: RecipientInfo | None = None, + **kwargs, + ) -> GenerateRandomResponse: + raise NotImplementedError + + @handler("GetKeyPolicy") + def get_key_policy( + self, + context: RequestContext, + key_id: KeyIdType, + policy_name: PolicyNameType | None = None, + **kwargs, + ) -> GetKeyPolicyResponse: + raise NotImplementedError + + @handler("GetKeyRotationStatus") + def get_key_rotation_status( + self, context: RequestContext, key_id: KeyIdType, **kwargs + ) -> GetKeyRotationStatusResponse: + raise NotImplementedError + + @handler("GetParametersForImport") + def get_parameters_for_import( + self, + context: RequestContext, + key_id: KeyIdType, + wrapping_algorithm: AlgorithmSpec, + wrapping_key_spec: WrappingKeySpec, + **kwargs, + ) -> GetParametersForImportResponse: + raise NotImplementedError + + @handler("GetPublicKey") + def get_public_key( + self, + context: RequestContext, + key_id: KeyIdType, + grant_tokens: GrantTokenList | None = None, + **kwargs, + ) -> GetPublicKeyResponse: + raise NotImplementedError + + @handler("ImportKeyMaterial") + def import_key_material( + self, + context: RequestContext, + key_id: KeyIdType, + import_token: CiphertextType, + encrypted_key_material: CiphertextType, + valid_to: DateType | None = None, + expiration_model: ExpirationModelType | None = None, + **kwargs, + ) -> ImportKeyMaterialResponse: + raise NotImplementedError + + @handler("ListAliases") + def list_aliases( + self, + context: RequestContext, + key_id: KeyIdType | None = None, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListAliasesResponse: + raise NotImplementedError + + @handler("ListGrants") + def list_grants( + self, + context: RequestContext, + key_id: KeyIdType, + limit: LimitType | None = None, + marker: MarkerType | None = None, + grant_id: GrantIdType | None = None, + grantee_principal: PrincipalIdType | None = None, + **kwargs, + ) -> ListGrantsResponse: + raise NotImplementedError + + @handler("ListKeyPolicies") + def list_key_policies( + self, + context: RequestContext, + key_id: KeyIdType, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListKeyPoliciesResponse: + raise NotImplementedError + + @handler("ListKeyRotations") + def list_key_rotations( + self, + context: RequestContext, + key_id: KeyIdType, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListKeyRotationsResponse: + raise NotImplementedError + + @handler("ListKeys") + def list_keys( + self, + context: RequestContext, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListKeysResponse: + raise NotImplementedError + + @handler("ListResourceTags") + def list_resource_tags( + self, + context: RequestContext, + key_id: KeyIdType, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListResourceTagsResponse: + raise NotImplementedError + + @handler("ListRetirableGrants") + def list_retirable_grants( + self, + context: RequestContext, + retiring_principal: PrincipalIdType, + limit: LimitType | None = None, + marker: MarkerType | None = None, + **kwargs, + ) -> ListGrantsResponse: + raise NotImplementedError + + @handler("PutKeyPolicy") + def put_key_policy( + self, + context: RequestContext, + key_id: KeyIdType, + policy: PolicyType, + policy_name: PolicyNameType | None = None, + bypass_policy_lockout_safety_check: BooleanType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ReEncrypt") + def re_encrypt( + self, + context: RequestContext, + ciphertext_blob: CiphertextType, + destination_key_id: KeyIdType, + source_encryption_context: EncryptionContextType | None = None, + source_key_id: KeyIdType | None = None, + destination_encryption_context: EncryptionContextType | None = None, + source_encryption_algorithm: EncryptionAlgorithmSpec | None = None, + destination_encryption_algorithm: EncryptionAlgorithmSpec | None = None, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> ReEncryptResponse: + raise NotImplementedError + + @handler("ReplicateKey") + def replicate_key( + self, + context: RequestContext, + key_id: KeyIdType, + replica_region: RegionType, + policy: PolicyType | None = None, + bypass_policy_lockout_safety_check: BooleanType | None = None, + description: DescriptionType | None = None, + tags: TagList | None = None, + **kwargs, + ) -> ReplicateKeyResponse: + raise NotImplementedError + + @handler("RetireGrant") + def retire_grant( + self, + context: RequestContext, + grant_token: GrantTokenType | None = None, + key_id: KeyIdType | None = None, + grant_id: GrantIdType | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RevokeGrant") + def revoke_grant( + self, + context: RequestContext, + key_id: KeyIdType, + grant_id: GrantIdType, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RotateKeyOnDemand") + def rotate_key_on_demand( + self, context: RequestContext, key_id: KeyIdType, **kwargs + ) -> RotateKeyOnDemandResponse: + raise NotImplementedError + + @handler("ScheduleKeyDeletion") + def schedule_key_deletion( + self, + context: RequestContext, + key_id: KeyIdType, + pending_window_in_days: PendingWindowInDaysType | None = None, + **kwargs, + ) -> ScheduleKeyDeletionResponse: + raise NotImplementedError + + @handler("Sign") + def sign( + self, + context: RequestContext, + key_id: KeyIdType, + message: PlaintextType, + signing_algorithm: SigningAlgorithmSpec, + message_type: MessageType | None = None, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> SignResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, key_id: KeyIdType, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, key_id: KeyIdType, tag_keys: TagKeyList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateAlias") + def update_alias( + self, context: RequestContext, alias_name: AliasNameType, target_key_id: KeyIdType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateCustomKeyStore") + def update_custom_key_store( + self, + context: RequestContext, + custom_key_store_id: CustomKeyStoreIdType, + new_custom_key_store_name: CustomKeyStoreNameType | None = None, + key_store_password: KeyStorePasswordType | None = None, + cloud_hsm_cluster_id: CloudHsmClusterIdType | None = None, + xks_proxy_uri_endpoint: XksProxyUriEndpointType | None = None, + xks_proxy_uri_path: XksProxyUriPathType | None = None, + xks_proxy_vpc_endpoint_service_name: XksProxyVpcEndpointServiceNameType | None = None, + xks_proxy_authentication_credential: XksProxyAuthenticationCredentialType | None = None, + xks_proxy_connectivity: XksProxyConnectivityType | None = None, + **kwargs, + ) -> UpdateCustomKeyStoreResponse: + raise NotImplementedError + + @handler("UpdateKeyDescription") + def update_key_description( + self, context: RequestContext, key_id: KeyIdType, description: DescriptionType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdatePrimaryRegion") + def update_primary_region( + self, context: RequestContext, key_id: KeyIdType, primary_region: RegionType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("Verify") + def verify( + self, + context: RequestContext, + key_id: KeyIdType, + message: PlaintextType, + signature: CiphertextType, + signing_algorithm: SigningAlgorithmSpec, + message_type: MessageType | None = None, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> VerifyResponse: + raise NotImplementedError + + @handler("VerifyMac") + def verify_mac( + self, + context: RequestContext, + message: PlaintextType, + key_id: KeyIdType, + mac_algorithm: MacAlgorithmSpec, + mac: CiphertextType, + grant_tokens: GrantTokenList | None = None, + dry_run: NullableBooleanType | None = None, + **kwargs, + ) -> VerifyMacResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/lambda_/__init__.py b/localstack-core/localstack/aws/api/lambda_/__init__.py new file mode 100644 index 0000000000000..178a1609135a9 --- /dev/null +++ b/localstack-core/localstack/aws/api/lambda_/__init__.py @@ -0,0 +1,2601 @@ +from datetime import datetime +from enum import StrEnum +from typing import IO, Dict, Iterable, Iterator, List, Optional, TypedDict, Union + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Action = str +AdditionalVersion = str +Alias = str +AllowCredentials = bool +Arn = str +BatchSize = int +BisectBatchOnFunctionError = bool +Boolean = bool +CodeSigningConfigArn = str +CodeSigningConfigId = str +CollectionName = str +DatabaseName = str +Description = str +DestinationArn = str +Enabled = bool +Endpoint = str +EnvironmentVariableName = str +EnvironmentVariableValue = str +EphemeralStorageSize = int +EventSourceMappingArn = str +EventSourceToken = str +FileSystemArn = str +FilterCriteriaErrorCode = str +FilterCriteriaErrorMessage = str +FunctionArn = str +FunctionName = str +FunctionUrl = str +FunctionUrlQualifier = str +Handler = str +Header = str +HttpStatus = int +Integer = int +KMSKeyArn = str +LastUpdateStatusReason = str +LayerArn = str +LayerName = str +LayerPermissionAllowedAction = str +LayerPermissionAllowedPrincipal = str +LayerVersionArn = str +LicenseInfo = str +LocalMountPath = str +LogGroup = str +MasterRegion = str +MaxAge = int +MaxFunctionEventInvokeConfigListItems = int +MaxItems = int +MaxLayerListItems = int +MaxListItems = int +MaxProvisionedConcurrencyConfigListItems = int +MaximumBatchingWindowInSeconds = int +MaximumConcurrency = int +MaximumEventAgeInSeconds = int +MaximumNumberOfPollers = int +MaximumRecordAgeInSeconds = int +MaximumRetryAttempts = int +MaximumRetryAttemptsEventSourceMapping = int +MemorySize = int +Method = str +MinimumNumberOfPollers = int +NameSpacedFunctionArn = str +NamespacedFunctionName = str +NamespacedStatementId = str +NonNegativeInteger = int +NullableBoolean = bool +OrganizationId = str +Origin = str +ParallelizationFactor = int +Pattern = str +PositiveInteger = int +Principal = str +PrincipalOrgID = str +Qualifier = str +Queue = str +ReservedConcurrentExecutions = int +ResourceArn = str +RoleArn = str +RuntimeVersionArn = str +S3Bucket = str +S3Key = str +S3ObjectVersion = str +SecurityGroupId = str +SensitiveString = str +SourceOwner = str +StateReason = str +StatementId = str +String = str +SubnetId = str +TagKey = str +TagValue = str +TaggableResource = str +TagsErrorCode = str +TagsErrorMessage = str +Timeout = int +Timestamp = str +Topic = str +TumblingWindowInSeconds = int +URI = str +UnqualifiedFunctionName = str +UnreservedConcurrentExecutions = int +Version = str +VpcId = str +Weight = float +WorkingDirectory = str + + +class ApplicationLogLevel(StrEnum): + TRACE = "TRACE" + DEBUG = "DEBUG" + INFO = "INFO" + WARN = "WARN" + ERROR = "ERROR" + FATAL = "FATAL" + + +class Architecture(StrEnum): + x86_64 = "x86_64" + arm64 = "arm64" + + +class CodeSigningPolicy(StrEnum): + Warn = "Warn" + Enforce = "Enforce" + + +class EndPointType(StrEnum): + KAFKA_BOOTSTRAP_SERVERS = "KAFKA_BOOTSTRAP_SERVERS" + + +class EventSourceMappingMetric(StrEnum): + EventCount = "EventCount" + + +class EventSourcePosition(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + AT_TIMESTAMP = "AT_TIMESTAMP" + + +class FullDocument(StrEnum): + UpdateLookup = "UpdateLookup" + Default = "Default" + + +class FunctionResponseType(StrEnum): + ReportBatchItemFailures = "ReportBatchItemFailures" + + +class FunctionUrlAuthType(StrEnum): + NONE = "NONE" + AWS_IAM = "AWS_IAM" + + +class FunctionVersion(StrEnum): + ALL = "ALL" + + +class InvocationType(StrEnum): + Event = "Event" + RequestResponse = "RequestResponse" + DryRun = "DryRun" + + +class InvokeMode(StrEnum): + BUFFERED = "BUFFERED" + RESPONSE_STREAM = "RESPONSE_STREAM" + + +class LastUpdateStatus(StrEnum): + Successful = "Successful" + Failed = "Failed" + InProgress = "InProgress" + + +class LastUpdateStatusReasonCode(StrEnum): + EniLimitExceeded = "EniLimitExceeded" + InsufficientRolePermissions = "InsufficientRolePermissions" + InvalidConfiguration = "InvalidConfiguration" + InternalError = "InternalError" + SubnetOutOfIPAddresses = "SubnetOutOfIPAddresses" + InvalidSubnet = "InvalidSubnet" + InvalidSecurityGroup = "InvalidSecurityGroup" + ImageDeleted = "ImageDeleted" + ImageAccessDenied = "ImageAccessDenied" + InvalidImage = "InvalidImage" + KMSKeyAccessDenied = "KMSKeyAccessDenied" + KMSKeyNotFound = "KMSKeyNotFound" + InvalidStateKMSKey = "InvalidStateKMSKey" + DisabledKMSKey = "DisabledKMSKey" + EFSIOError = "EFSIOError" + EFSMountConnectivityError = "EFSMountConnectivityError" + EFSMountFailure = "EFSMountFailure" + EFSMountTimeout = "EFSMountTimeout" + InvalidRuntime = "InvalidRuntime" + InvalidZipFileException = "InvalidZipFileException" + FunctionError = "FunctionError" + + +class LogFormat(StrEnum): + JSON = "JSON" + Text = "Text" + + +class LogType(StrEnum): + None_ = "None" + Tail = "Tail" + + +class PackageType(StrEnum): + Zip = "Zip" + Image = "Image" + + +class ProvisionedConcurrencyStatusEnum(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + READY = "READY" + FAILED = "FAILED" + + +class RecursiveLoop(StrEnum): + Allow = "Allow" + Terminate = "Terminate" + + +class ResponseStreamingInvocationType(StrEnum): + RequestResponse = "RequestResponse" + DryRun = "DryRun" + + +class Runtime(StrEnum): + nodejs = "nodejs" + nodejs4_3 = "nodejs4.3" + nodejs6_10 = "nodejs6.10" + nodejs8_10 = "nodejs8.10" + nodejs10_x = "nodejs10.x" + nodejs12_x = "nodejs12.x" + nodejs14_x = "nodejs14.x" + nodejs16_x = "nodejs16.x" + java8 = "java8" + java8_al2 = "java8.al2" + java11 = "java11" + python2_7 = "python2.7" + python3_6 = "python3.6" + python3_7 = "python3.7" + python3_8 = "python3.8" + python3_9 = "python3.9" + dotnetcore1_0 = "dotnetcore1.0" + dotnetcore2_0 = "dotnetcore2.0" + dotnetcore2_1 = "dotnetcore2.1" + dotnetcore3_1 = "dotnetcore3.1" + dotnet6 = "dotnet6" + dotnet8 = "dotnet8" + nodejs4_3_edge = "nodejs4.3-edge" + go1_x = "go1.x" + ruby2_5 = "ruby2.5" + ruby2_7 = "ruby2.7" + provided = "provided" + provided_al2 = "provided.al2" + nodejs18_x = "nodejs18.x" + python3_10 = "python3.10" + java17 = "java17" + ruby3_2 = "ruby3.2" + ruby3_3 = "ruby3.3" + ruby3_4 = "ruby3.4" + python3_11 = "python3.11" + nodejs20_x = "nodejs20.x" + provided_al2023 = "provided.al2023" + python3_12 = "python3.12" + java21 = "java21" + python3_13 = "python3.13" + nodejs22_x = "nodejs22.x" + + +class SnapStartApplyOn(StrEnum): + PublishedVersions = "PublishedVersions" + None_ = "None" + + +class SnapStartOptimizationStatus(StrEnum): + On = "On" + Off = "Off" + + +class SourceAccessType(StrEnum): + BASIC_AUTH = "BASIC_AUTH" + VPC_SUBNET = "VPC_SUBNET" + VPC_SECURITY_GROUP = "VPC_SECURITY_GROUP" + SASL_SCRAM_512_AUTH = "SASL_SCRAM_512_AUTH" + SASL_SCRAM_256_AUTH = "SASL_SCRAM_256_AUTH" + VIRTUAL_HOST = "VIRTUAL_HOST" + CLIENT_CERTIFICATE_TLS_AUTH = "CLIENT_CERTIFICATE_TLS_AUTH" + SERVER_ROOT_CA_CERTIFICATE = "SERVER_ROOT_CA_CERTIFICATE" + + +class State(StrEnum): + Pending = "Pending" + Active = "Active" + Inactive = "Inactive" + Failed = "Failed" + + +class StateReasonCode(StrEnum): + Idle = "Idle" + Creating = "Creating" + Restoring = "Restoring" + EniLimitExceeded = "EniLimitExceeded" + InsufficientRolePermissions = "InsufficientRolePermissions" + InvalidConfiguration = "InvalidConfiguration" + InternalError = "InternalError" + SubnetOutOfIPAddresses = "SubnetOutOfIPAddresses" + InvalidSubnet = "InvalidSubnet" + InvalidSecurityGroup = "InvalidSecurityGroup" + ImageDeleted = "ImageDeleted" + ImageAccessDenied = "ImageAccessDenied" + InvalidImage = "InvalidImage" + KMSKeyAccessDenied = "KMSKeyAccessDenied" + KMSKeyNotFound = "KMSKeyNotFound" + InvalidStateKMSKey = "InvalidStateKMSKey" + DisabledKMSKey = "DisabledKMSKey" + EFSIOError = "EFSIOError" + EFSMountConnectivityError = "EFSMountConnectivityError" + EFSMountFailure = "EFSMountFailure" + EFSMountTimeout = "EFSMountTimeout" + InvalidRuntime = "InvalidRuntime" + InvalidZipFileException = "InvalidZipFileException" + FunctionError = "FunctionError" + + +class SystemLogLevel(StrEnum): + DEBUG = "DEBUG" + INFO = "INFO" + WARN = "WARN" + + +class ThrottleReason(StrEnum): + ConcurrentInvocationLimitExceeded = "ConcurrentInvocationLimitExceeded" + FunctionInvocationRateLimitExceeded = "FunctionInvocationRateLimitExceeded" + ReservedFunctionConcurrentInvocationLimitExceeded = ( + "ReservedFunctionConcurrentInvocationLimitExceeded" + ) + ReservedFunctionInvocationRateLimitExceeded = "ReservedFunctionInvocationRateLimitExceeded" + CallerRateLimitExceeded = "CallerRateLimitExceeded" + ConcurrentSnapshotCreateLimitExceeded = "ConcurrentSnapshotCreateLimitExceeded" + + +class TracingMode(StrEnum): + Active = "Active" + PassThrough = "PassThrough" + + +class UpdateRuntimeOn(StrEnum): + Auto = "Auto" + Manual = "Manual" + FunctionUpdate = "FunctionUpdate" + + +class CodeSigningConfigNotFoundException(ServiceException): + code: str = "CodeSigningConfigNotFoundException" + sender_fault: bool = False + status_code: int = 404 + Type: Optional[String] + + +class CodeStorageExceededException(ServiceException): + code: str = "CodeStorageExceededException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class CodeVerificationFailedException(ServiceException): + code: str = "CodeVerificationFailedException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class EC2AccessDeniedException(ServiceException): + code: str = "EC2AccessDeniedException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class EC2ThrottledException(ServiceException): + code: str = "EC2ThrottledException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class EC2UnexpectedException(ServiceException): + code: str = "EC2UnexpectedException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + EC2ErrorCode: Optional[String] + + +class EFSIOException(ServiceException): + code: str = "EFSIOException" + sender_fault: bool = False + status_code: int = 410 + Type: Optional[String] + + +class EFSMountConnectivityException(ServiceException): + code: str = "EFSMountConnectivityException" + sender_fault: bool = False + status_code: int = 408 + Type: Optional[String] + + +class EFSMountFailureException(ServiceException): + code: str = "EFSMountFailureException" + sender_fault: bool = False + status_code: int = 403 + Type: Optional[String] + + +class EFSMountTimeoutException(ServiceException): + code: str = "EFSMountTimeoutException" + sender_fault: bool = False + status_code: int = 408 + Type: Optional[String] + + +class ENILimitReachedException(ServiceException): + code: str = "ENILimitReachedException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class InvalidCodeSignatureException(ServiceException): + code: str = "InvalidCodeSignatureException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class InvalidParameterValueException(ServiceException): + code: str = "InvalidParameterValueException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class InvalidRequestContentException(ServiceException): + code: str = "InvalidRequestContentException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class InvalidRuntimeException(ServiceException): + code: str = "InvalidRuntimeException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class InvalidSecurityGroupIDException(ServiceException): + code: str = "InvalidSecurityGroupIDException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class InvalidSubnetIDException(ServiceException): + code: str = "InvalidSubnetIDException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class InvalidZipFileException(ServiceException): + code: str = "InvalidZipFileException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class KMSAccessDeniedException(ServiceException): + code: str = "KMSAccessDeniedException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class KMSDisabledException(ServiceException): + code: str = "KMSDisabledException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class KMSInvalidStateException(ServiceException): + code: str = "KMSInvalidStateException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class KMSNotFoundException(ServiceException): + code: str = "KMSNotFoundException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class PolicyLengthExceededException(ServiceException): + code: str = "PolicyLengthExceededException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class PreconditionFailedException(ServiceException): + code: str = "PreconditionFailedException" + sender_fault: bool = False + status_code: int = 412 + Type: Optional[String] + + +class ProvisionedConcurrencyConfigNotFoundException(ServiceException): + code: str = "ProvisionedConcurrencyConfigNotFoundException" + sender_fault: bool = False + status_code: int = 404 + Type: Optional[String] + + +class RecursiveInvocationException(ServiceException): + code: str = "RecursiveInvocationException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class RequestTooLargeException(ServiceException): + code: str = "RequestTooLargeException" + sender_fault: bool = False + status_code: int = 413 + Type: Optional[String] + + +class ResourceConflictException(ServiceException): + code: str = "ResourceConflictException" + sender_fault: bool = False + status_code: int = 409 + Type: Optional[String] + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 404 + Type: Optional[String] + + +class ResourceNotReadyException(ServiceException): + code: str = "ResourceNotReadyException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class ServiceException(ServiceException): + code: str = "ServiceException" + sender_fault: bool = False + status_code: int = 500 + Type: Optional[String] + + +class SnapStartException(ServiceException): + code: str = "SnapStartException" + sender_fault: bool = False + status_code: int = 400 + Type: Optional[String] + + +class SnapStartNotReadyException(ServiceException): + code: str = "SnapStartNotReadyException" + sender_fault: bool = False + status_code: int = 409 + Type: Optional[String] + + +class SnapStartTimeoutException(ServiceException): + code: str = "SnapStartTimeoutException" + sender_fault: bool = False + status_code: int = 408 + Type: Optional[String] + + +class SubnetIPAddressLimitReachedException(ServiceException): + code: str = "SubnetIPAddressLimitReachedException" + sender_fault: bool = False + status_code: int = 502 + Type: Optional[String] + + +class TooManyRequestsException(ServiceException): + code: str = "TooManyRequestsException" + sender_fault: bool = False + status_code: int = 429 + retryAfterSeconds: Optional[String] + Type: Optional[String] + Reason: Optional[ThrottleReason] + + +class UnsupportedMediaTypeException(ServiceException): + code: str = "UnsupportedMediaTypeException" + sender_fault: bool = False + status_code: int = 415 + Type: Optional[String] + + +Long = int + + +class AccountLimit(TypedDict, total=False): + TotalCodeSize: Optional[Long] + CodeSizeUnzipped: Optional[Long] + CodeSizeZipped: Optional[Long] + ConcurrentExecutions: Optional[Integer] + UnreservedConcurrentExecutions: Optional[UnreservedConcurrentExecutions] + + +class AccountUsage(TypedDict, total=False): + TotalCodeSize: Optional[Long] + FunctionCount: Optional[Long] + + +LayerVersionNumber = int + + +class AddLayerVersionPermissionRequest(ServiceRequest): + LayerName: LayerName + VersionNumber: LayerVersionNumber + StatementId: StatementId + Action: LayerPermissionAllowedAction + Principal: LayerPermissionAllowedPrincipal + OrganizationId: Optional[OrganizationId] + RevisionId: Optional[String] + + +class AddLayerVersionPermissionResponse(TypedDict, total=False): + Statement: Optional[String] + RevisionId: Optional[String] + + +class AddPermissionRequest(ServiceRequest): + FunctionName: FunctionName + StatementId: StatementId + Action: Action + Principal: Principal + SourceArn: Optional[Arn] + SourceAccount: Optional[SourceOwner] + EventSourceToken: Optional[EventSourceToken] + Qualifier: Optional[Qualifier] + RevisionId: Optional[String] + PrincipalOrgID: Optional[PrincipalOrgID] + FunctionUrlAuthType: Optional[FunctionUrlAuthType] + + +class AddPermissionResponse(TypedDict, total=False): + Statement: Optional[String] + + +AdditionalVersionWeights = Dict[AdditionalVersion, Weight] + + +class AliasRoutingConfiguration(TypedDict, total=False): + AdditionalVersionWeights: Optional[AdditionalVersionWeights] + + +class AliasConfiguration(TypedDict, total=False): + AliasArn: Optional[FunctionArn] + Name: Optional[Alias] + FunctionVersion: Optional[Version] + Description: Optional[Description] + RoutingConfig: Optional[AliasRoutingConfiguration] + RevisionId: Optional[String] + + +AliasList = List[AliasConfiguration] +AllowMethodsList = List[Method] +AllowOriginsList = List[Origin] +SigningProfileVersionArns = List[Arn] + + +class AllowedPublishers(TypedDict, total=False): + SigningProfileVersionArns: SigningProfileVersionArns + + +class AmazonManagedKafkaEventSourceConfig(TypedDict, total=False): + ConsumerGroupId: Optional[URI] + + +ArchitecturesList = List[Architecture] +Blob = bytes +BlobStream = bytes + + +class CodeSigningPolicies(TypedDict, total=False): + UntrustedArtifactOnDeployment: Optional[CodeSigningPolicy] + + +class CodeSigningConfig(TypedDict, total=False): + CodeSigningConfigId: CodeSigningConfigId + CodeSigningConfigArn: CodeSigningConfigArn + Description: Optional[Description] + AllowedPublishers: AllowedPublishers + CodeSigningPolicies: CodeSigningPolicies + LastModified: Timestamp + + +CodeSigningConfigList = List[CodeSigningConfig] +CompatibleArchitectures = List[Architecture] +CompatibleRuntimes = List[Runtime] + + +class Concurrency(TypedDict, total=False): + ReservedConcurrentExecutions: Optional[ReservedConcurrentExecutions] + + +HeadersList = List[Header] + + +class Cors(TypedDict, total=False): + AllowCredentials: Optional[AllowCredentials] + AllowHeaders: Optional[HeadersList] + AllowMethods: Optional[AllowMethodsList] + AllowOrigins: Optional[AllowOriginsList] + ExposeHeaders: Optional[HeadersList] + MaxAge: Optional[MaxAge] + + +class CreateAliasRequest(ServiceRequest): + FunctionName: FunctionName + Name: Alias + FunctionVersion: Version + Description: Optional[Description] + RoutingConfig: Optional[AliasRoutingConfiguration] + + +Tags = Dict[TagKey, TagValue] + + +class CreateCodeSigningConfigRequest(ServiceRequest): + Description: Optional[Description] + AllowedPublishers: AllowedPublishers + CodeSigningPolicies: Optional[CodeSigningPolicies] + Tags: Optional[Tags] + + +class CreateCodeSigningConfigResponse(TypedDict, total=False): + CodeSigningConfig: CodeSigningConfig + + +class ProvisionedPollerConfig(TypedDict, total=False): + MinimumPollers: Optional[MinimumNumberOfPollers] + MaximumPollers: Optional[MaximumNumberOfPollers] + + +EventSourceMappingMetricList = List[EventSourceMappingMetric] + + +class EventSourceMappingMetricsConfig(TypedDict, total=False): + Metrics: Optional[EventSourceMappingMetricList] + + +class DocumentDBEventSourceConfig(TypedDict, total=False): + DatabaseName: Optional[DatabaseName] + CollectionName: Optional[CollectionName] + FullDocument: Optional[FullDocument] + + +class ScalingConfig(TypedDict, total=False): + MaximumConcurrency: Optional[MaximumConcurrency] + + +class SelfManagedKafkaEventSourceConfig(TypedDict, total=False): + ConsumerGroupId: Optional[URI] + + +FunctionResponseTypeList = List[FunctionResponseType] +EndpointLists = List[Endpoint] +Endpoints = Dict[EndPointType, EndpointLists] + + +class SelfManagedEventSource(TypedDict, total=False): + Endpoints: Optional[Endpoints] + + +class SourceAccessConfiguration(TypedDict, total=False): + Type: Optional[SourceAccessType] + URI: Optional[URI] + + +SourceAccessConfigurations = List[SourceAccessConfiguration] +Queues = List[Queue] +Topics = List[Topic] + + +class OnFailure(TypedDict, total=False): + Destination: Optional[DestinationArn] + + +class OnSuccess(TypedDict, total=False): + Destination: Optional[DestinationArn] + + +class DestinationConfig(TypedDict, total=False): + OnSuccess: Optional[OnSuccess] + OnFailure: Optional[OnFailure] + + +Date = datetime + + +class Filter(TypedDict, total=False): + Pattern: Optional[Pattern] + + +FilterList = List[Filter] + + +class FilterCriteria(TypedDict, total=False): + Filters: Optional[FilterList] + + +class CreateEventSourceMappingRequest(ServiceRequest): + EventSourceArn: Optional[Arn] + FunctionName: FunctionName + Enabled: Optional[Enabled] + BatchSize: Optional[BatchSize] + FilterCriteria: Optional[FilterCriteria] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + ParallelizationFactor: Optional[ParallelizationFactor] + StartingPosition: Optional[EventSourcePosition] + StartingPositionTimestamp: Optional[Date] + DestinationConfig: Optional[DestinationConfig] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + BisectBatchOnFunctionError: Optional[BisectBatchOnFunctionError] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsEventSourceMapping] + Tags: Optional[Tags] + TumblingWindowInSeconds: Optional[TumblingWindowInSeconds] + Topics: Optional[Topics] + Queues: Optional[Queues] + SourceAccessConfigurations: Optional[SourceAccessConfigurations] + SelfManagedEventSource: Optional[SelfManagedEventSource] + FunctionResponseTypes: Optional[FunctionResponseTypeList] + AmazonManagedKafkaEventSourceConfig: Optional[AmazonManagedKafkaEventSourceConfig] + SelfManagedKafkaEventSourceConfig: Optional[SelfManagedKafkaEventSourceConfig] + ScalingConfig: Optional[ScalingConfig] + DocumentDBEventSourceConfig: Optional[DocumentDBEventSourceConfig] + KMSKeyArn: Optional[KMSKeyArn] + MetricsConfig: Optional[EventSourceMappingMetricsConfig] + ProvisionedPollerConfig: Optional[ProvisionedPollerConfig] + + +class LoggingConfig(TypedDict, total=False): + LogFormat: Optional[LogFormat] + ApplicationLogLevel: Optional[ApplicationLogLevel] + SystemLogLevel: Optional[SystemLogLevel] + LogGroup: Optional[LogGroup] + + +class SnapStart(TypedDict, total=False): + ApplyOn: Optional[SnapStartApplyOn] + + +class EphemeralStorage(TypedDict, total=False): + Size: EphemeralStorageSize + + +StringList = List[String] + + +class ImageConfig(TypedDict, total=False): + EntryPoint: Optional[StringList] + Command: Optional[StringList] + WorkingDirectory: Optional[WorkingDirectory] + + +class FileSystemConfig(TypedDict, total=False): + Arn: FileSystemArn + LocalMountPath: LocalMountPath + + +FileSystemConfigList = List[FileSystemConfig] +LayerList = List[LayerVersionArn] + + +class TracingConfig(TypedDict, total=False): + Mode: Optional[TracingMode] + + +EnvironmentVariables = Dict[EnvironmentVariableName, EnvironmentVariableValue] + + +class Environment(TypedDict, total=False): + Variables: Optional[EnvironmentVariables] + + +class DeadLetterConfig(TypedDict, total=False): + TargetArn: Optional[ResourceArn] + + +SecurityGroupIds = List[SecurityGroupId] +SubnetIds = List[SubnetId] + + +class VpcConfig(TypedDict, total=False): + SubnetIds: Optional[SubnetIds] + SecurityGroupIds: Optional[SecurityGroupIds] + Ipv6AllowedForDualStack: Optional[NullableBoolean] + + +class FunctionCode(TypedDict, total=False): + ZipFile: Optional[Blob] + S3Bucket: Optional[S3Bucket] + S3Key: Optional[S3Key] + S3ObjectVersion: Optional[S3ObjectVersion] + ImageUri: Optional[String] + SourceKMSKeyArn: Optional[KMSKeyArn] + + +class CreateFunctionRequest(ServiceRequest): + FunctionName: FunctionName + Runtime: Optional[Runtime] + Role: RoleArn + Handler: Optional[Handler] + Code: FunctionCode + Description: Optional[Description] + Timeout: Optional[Timeout] + MemorySize: Optional[MemorySize] + Publish: Optional[Boolean] + VpcConfig: Optional[VpcConfig] + PackageType: Optional[PackageType] + DeadLetterConfig: Optional[DeadLetterConfig] + Environment: Optional[Environment] + KMSKeyArn: Optional[KMSKeyArn] + TracingConfig: Optional[TracingConfig] + Tags: Optional[Tags] + Layers: Optional[LayerList] + FileSystemConfigs: Optional[FileSystemConfigList] + ImageConfig: Optional[ImageConfig] + CodeSigningConfigArn: Optional[CodeSigningConfigArn] + Architectures: Optional[ArchitecturesList] + EphemeralStorage: Optional[EphemeralStorage] + SnapStart: Optional[SnapStart] + LoggingConfig: Optional[LoggingConfig] + + +class CreateFunctionUrlConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[FunctionUrlQualifier] + AuthType: FunctionUrlAuthType + Cors: Optional[Cors] + InvokeMode: Optional[InvokeMode] + + +class CreateFunctionUrlConfigResponse(TypedDict, total=False): + FunctionUrl: FunctionUrl + FunctionArn: FunctionArn + AuthType: FunctionUrlAuthType + Cors: Optional[Cors] + CreationTime: Timestamp + InvokeMode: Optional[InvokeMode] + + +class DeleteAliasRequest(ServiceRequest): + FunctionName: FunctionName + Name: Alias + + +class DeleteCodeSigningConfigRequest(ServiceRequest): + CodeSigningConfigArn: CodeSigningConfigArn + + +class DeleteCodeSigningConfigResponse(TypedDict, total=False): + pass + + +class DeleteEventSourceMappingRequest(ServiceRequest): + UUID: String + + +class DeleteFunctionCodeSigningConfigRequest(ServiceRequest): + FunctionName: FunctionName + + +class DeleteFunctionConcurrencyRequest(ServiceRequest): + FunctionName: FunctionName + + +class DeleteFunctionEventInvokeConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + + +class DeleteFunctionRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + + +class DeleteFunctionUrlConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[FunctionUrlQualifier] + + +class DeleteLayerVersionRequest(ServiceRequest): + LayerName: LayerName + VersionNumber: LayerVersionNumber + + +class DeleteProvisionedConcurrencyConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Qualifier + + +class EnvironmentError(TypedDict, total=False): + ErrorCode: Optional[String] + Message: Optional[SensitiveString] + + +class EnvironmentResponse(TypedDict, total=False): + Variables: Optional[EnvironmentVariables] + Error: Optional[EnvironmentError] + + +class FilterCriteriaError(TypedDict, total=False): + ErrorCode: Optional[FilterCriteriaErrorCode] + Message: Optional[FilterCriteriaErrorMessage] + + +class EventSourceMappingConfiguration(TypedDict, total=False): + UUID: Optional[String] + StartingPosition: Optional[EventSourcePosition] + StartingPositionTimestamp: Optional[Date] + BatchSize: Optional[BatchSize] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + ParallelizationFactor: Optional[ParallelizationFactor] + EventSourceArn: Optional[Arn] + FilterCriteria: Optional[FilterCriteria] + FunctionArn: Optional[FunctionArn] + LastModified: Optional[Date] + LastProcessingResult: Optional[String] + State: Optional[String] + StateTransitionReason: Optional[String] + DestinationConfig: Optional[DestinationConfig] + Topics: Optional[Topics] + Queues: Optional[Queues] + SourceAccessConfigurations: Optional[SourceAccessConfigurations] + SelfManagedEventSource: Optional[SelfManagedEventSource] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + BisectBatchOnFunctionError: Optional[BisectBatchOnFunctionError] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsEventSourceMapping] + TumblingWindowInSeconds: Optional[TumblingWindowInSeconds] + FunctionResponseTypes: Optional[FunctionResponseTypeList] + AmazonManagedKafkaEventSourceConfig: Optional[AmazonManagedKafkaEventSourceConfig] + SelfManagedKafkaEventSourceConfig: Optional[SelfManagedKafkaEventSourceConfig] + ScalingConfig: Optional[ScalingConfig] + DocumentDBEventSourceConfig: Optional[DocumentDBEventSourceConfig] + KMSKeyArn: Optional[KMSKeyArn] + FilterCriteriaError: Optional[FilterCriteriaError] + EventSourceMappingArn: Optional[EventSourceMappingArn] + MetricsConfig: Optional[EventSourceMappingMetricsConfig] + ProvisionedPollerConfig: Optional[ProvisionedPollerConfig] + + +EventSourceMappingsList = List[EventSourceMappingConfiguration] +FunctionArnList = List[FunctionArn] + + +class FunctionCodeLocation(TypedDict, total=False): + RepositoryType: Optional[String] + Location: Optional[String] + ImageUri: Optional[String] + ResolvedImageUri: Optional[String] + SourceKMSKeyArn: Optional[String] + + +class RuntimeVersionError(TypedDict, total=False): + ErrorCode: Optional[String] + Message: Optional[SensitiveString] + + +class RuntimeVersionConfig(TypedDict, total=False): + RuntimeVersionArn: Optional[RuntimeVersionArn] + Error: Optional[RuntimeVersionError] + + +class SnapStartResponse(TypedDict, total=False): + ApplyOn: Optional[SnapStartApplyOn] + OptimizationStatus: Optional[SnapStartOptimizationStatus] + + +class ImageConfigError(TypedDict, total=False): + ErrorCode: Optional[String] + Message: Optional[SensitiveString] + + +class ImageConfigResponse(TypedDict, total=False): + ImageConfig: Optional[ImageConfig] + Error: Optional[ImageConfigError] + + +class Layer(TypedDict, total=False): + Arn: Optional[LayerVersionArn] + CodeSize: Optional[Long] + SigningProfileVersionArn: Optional[Arn] + SigningJobArn: Optional[Arn] + + +LayersReferenceList = List[Layer] + + +class TracingConfigResponse(TypedDict, total=False): + Mode: Optional[TracingMode] + + +class VpcConfigResponse(TypedDict, total=False): + SubnetIds: Optional[SubnetIds] + SecurityGroupIds: Optional[SecurityGroupIds] + VpcId: Optional[VpcId] + Ipv6AllowedForDualStack: Optional[NullableBoolean] + + +class FunctionConfiguration(TypedDict, total=False): + FunctionName: Optional[NamespacedFunctionName] + FunctionArn: Optional[NameSpacedFunctionArn] + Runtime: Optional[Runtime] + Role: Optional[RoleArn] + Handler: Optional[Handler] + CodeSize: Optional[Long] + Description: Optional[Description] + Timeout: Optional[Timeout] + MemorySize: Optional[MemorySize] + LastModified: Optional[Timestamp] + CodeSha256: Optional[String] + Version: Optional[Version] + VpcConfig: Optional[VpcConfigResponse] + DeadLetterConfig: Optional[DeadLetterConfig] + Environment: Optional[EnvironmentResponse] + KMSKeyArn: Optional[KMSKeyArn] + TracingConfig: Optional[TracingConfigResponse] + MasterArn: Optional[FunctionArn] + RevisionId: Optional[String] + Layers: Optional[LayersReferenceList] + State: Optional[State] + StateReason: Optional[StateReason] + StateReasonCode: Optional[StateReasonCode] + LastUpdateStatus: Optional[LastUpdateStatus] + LastUpdateStatusReason: Optional[LastUpdateStatusReason] + LastUpdateStatusReasonCode: Optional[LastUpdateStatusReasonCode] + FileSystemConfigs: Optional[FileSystemConfigList] + PackageType: Optional[PackageType] + ImageConfigResponse: Optional[ImageConfigResponse] + SigningProfileVersionArn: Optional[Arn] + SigningJobArn: Optional[Arn] + Architectures: Optional[ArchitecturesList] + EphemeralStorage: Optional[EphemeralStorage] + SnapStart: Optional[SnapStartResponse] + RuntimeVersionConfig: Optional[RuntimeVersionConfig] + LoggingConfig: Optional[LoggingConfig] + + +class FunctionEventInvokeConfig(TypedDict, total=False): + LastModified: Optional[Date] + FunctionArn: Optional[FunctionArn] + MaximumRetryAttempts: Optional[MaximumRetryAttempts] + MaximumEventAgeInSeconds: Optional[MaximumEventAgeInSeconds] + DestinationConfig: Optional[DestinationConfig] + + +FunctionEventInvokeConfigList = List[FunctionEventInvokeConfig] +FunctionList = List[FunctionConfiguration] + + +class FunctionUrlConfig(TypedDict, total=False): + FunctionUrl: FunctionUrl + FunctionArn: FunctionArn + CreationTime: Timestamp + LastModifiedTime: Timestamp + Cors: Optional[Cors] + AuthType: FunctionUrlAuthType + InvokeMode: Optional[InvokeMode] + + +FunctionUrlConfigList = List[FunctionUrlConfig] + + +class GetAccountSettingsRequest(ServiceRequest): + pass + + +class GetAccountSettingsResponse(TypedDict, total=False): + AccountLimit: Optional[AccountLimit] + AccountUsage: Optional[AccountUsage] + + +class GetAliasRequest(ServiceRequest): + FunctionName: FunctionName + Name: Alias + + +class GetCodeSigningConfigRequest(ServiceRequest): + CodeSigningConfigArn: CodeSigningConfigArn + + +class GetCodeSigningConfigResponse(TypedDict, total=False): + CodeSigningConfig: CodeSigningConfig + + +class GetEventSourceMappingRequest(ServiceRequest): + UUID: String + + +class GetFunctionCodeSigningConfigRequest(ServiceRequest): + FunctionName: FunctionName + + +class GetFunctionCodeSigningConfigResponse(TypedDict, total=False): + CodeSigningConfigArn: CodeSigningConfigArn + FunctionName: FunctionName + + +class GetFunctionConcurrencyRequest(ServiceRequest): + FunctionName: FunctionName + + +class GetFunctionConcurrencyResponse(TypedDict, total=False): + ReservedConcurrentExecutions: Optional[ReservedConcurrentExecutions] + + +class GetFunctionConfigurationRequest(ServiceRequest): + FunctionName: NamespacedFunctionName + Qualifier: Optional[Qualifier] + + +class GetFunctionEventInvokeConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + + +class GetFunctionRecursionConfigRequest(ServiceRequest): + FunctionName: UnqualifiedFunctionName + + +class GetFunctionRecursionConfigResponse(TypedDict, total=False): + RecursiveLoop: Optional[RecursiveLoop] + + +class GetFunctionRequest(ServiceRequest): + FunctionName: NamespacedFunctionName + Qualifier: Optional[Qualifier] + + +class TagsError(TypedDict, total=False): + ErrorCode: TagsErrorCode + Message: TagsErrorMessage + + +class GetFunctionResponse(TypedDict, total=False): + Configuration: Optional[FunctionConfiguration] + Code: Optional[FunctionCodeLocation] + Tags: Optional[Tags] + TagsError: Optional[TagsError] + Concurrency: Optional[Concurrency] + + +class GetFunctionUrlConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[FunctionUrlQualifier] + + +class GetFunctionUrlConfigResponse(TypedDict, total=False): + FunctionUrl: FunctionUrl + FunctionArn: FunctionArn + AuthType: FunctionUrlAuthType + Cors: Optional[Cors] + CreationTime: Timestamp + LastModifiedTime: Timestamp + InvokeMode: Optional[InvokeMode] + + +class GetLayerVersionByArnRequest(ServiceRequest): + Arn: LayerVersionArn + + +class GetLayerVersionPolicyRequest(ServiceRequest): + LayerName: LayerName + VersionNumber: LayerVersionNumber + + +class GetLayerVersionPolicyResponse(TypedDict, total=False): + Policy: Optional[String] + RevisionId: Optional[String] + + +class GetLayerVersionRequest(ServiceRequest): + LayerName: LayerName + VersionNumber: LayerVersionNumber + + +class LayerVersionContentOutput(TypedDict, total=False): + Location: Optional[String] + CodeSha256: Optional[String] + CodeSize: Optional[Long] + SigningProfileVersionArn: Optional[String] + SigningJobArn: Optional[String] + + +class GetLayerVersionResponse(TypedDict, total=False): + Content: Optional[LayerVersionContentOutput] + LayerArn: Optional[LayerArn] + LayerVersionArn: Optional[LayerVersionArn] + Description: Optional[Description] + CreatedDate: Optional[Timestamp] + Version: Optional[LayerVersionNumber] + CompatibleRuntimes: Optional[CompatibleRuntimes] + LicenseInfo: Optional[LicenseInfo] + CompatibleArchitectures: Optional[CompatibleArchitectures] + + +class GetPolicyRequest(ServiceRequest): + FunctionName: NamespacedFunctionName + Qualifier: Optional[Qualifier] + + +class GetPolicyResponse(TypedDict, total=False): + Policy: Optional[String] + RevisionId: Optional[String] + + +class GetProvisionedConcurrencyConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Qualifier + + +class GetProvisionedConcurrencyConfigResponse(TypedDict, total=False): + RequestedProvisionedConcurrentExecutions: Optional[PositiveInteger] + AvailableProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + AllocatedProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + Status: Optional[ProvisionedConcurrencyStatusEnum] + StatusReason: Optional[String] + LastModified: Optional[Timestamp] + + +class GetRuntimeManagementConfigRequest(ServiceRequest): + FunctionName: NamespacedFunctionName + Qualifier: Optional[Qualifier] + + +class GetRuntimeManagementConfigResponse(TypedDict, total=False): + UpdateRuntimeOn: Optional[UpdateRuntimeOn] + RuntimeVersionArn: Optional[RuntimeVersionArn] + FunctionArn: Optional[NameSpacedFunctionArn] + + +class InvocationRequest(ServiceRequest): + Payload: Optional[IO[Blob]] + FunctionName: NamespacedFunctionName + InvocationType: Optional[InvocationType] + LogType: Optional[LogType] + ClientContext: Optional[String] + Qualifier: Optional[Qualifier] + + +class InvocationResponse(TypedDict, total=False): + Payload: Optional[Union[Blob, IO[Blob], Iterable[Blob]]] + StatusCode: Optional[Integer] + FunctionError: Optional[String] + LogResult: Optional[String] + ExecutedVersion: Optional[Version] + + +class InvokeAsyncRequest(ServiceRequest): + InvokeArgs: IO[BlobStream] + FunctionName: NamespacedFunctionName + + +class InvokeAsyncResponse(TypedDict, total=False): + Status: Optional[HttpStatus] + + +class InvokeResponseStreamUpdate(TypedDict, total=False): + Payload: Optional[Blob] + + +class InvokeWithResponseStreamCompleteEvent(TypedDict, total=False): + ErrorCode: Optional[String] + ErrorDetails: Optional[String] + LogResult: Optional[String] + + +class InvokeWithResponseStreamRequest(ServiceRequest): + Payload: Optional[IO[Blob]] + FunctionName: NamespacedFunctionName + InvocationType: Optional[ResponseStreamingInvocationType] + LogType: Optional[LogType] + ClientContext: Optional[String] + Qualifier: Optional[Qualifier] + + +class InvokeWithResponseStreamResponseEvent(TypedDict, total=False): + PayloadChunk: Optional[InvokeResponseStreamUpdate] + InvokeComplete: Optional[InvokeWithResponseStreamCompleteEvent] + + +class InvokeWithResponseStreamResponse(TypedDict, total=False): + StatusCode: Optional[Integer] + ExecutedVersion: Optional[Version] + EventStream: Iterator[InvokeWithResponseStreamResponseEvent] + ResponseStreamContentType: Optional[String] + + +class LayerVersionContentInput(TypedDict, total=False): + S3Bucket: Optional[S3Bucket] + S3Key: Optional[S3Key] + S3ObjectVersion: Optional[S3ObjectVersion] + ZipFile: Optional[Blob] + + +class LayerVersionsListItem(TypedDict, total=False): + LayerVersionArn: Optional[LayerVersionArn] + Version: Optional[LayerVersionNumber] + Description: Optional[Description] + CreatedDate: Optional[Timestamp] + CompatibleRuntimes: Optional[CompatibleRuntimes] + LicenseInfo: Optional[LicenseInfo] + CompatibleArchitectures: Optional[CompatibleArchitectures] + + +LayerVersionsList = List[LayerVersionsListItem] + + +class LayersListItem(TypedDict, total=False): + LayerName: Optional[LayerName] + LayerArn: Optional[LayerArn] + LatestMatchingVersion: Optional[LayerVersionsListItem] + + +LayersList = List[LayersListItem] + + +class ListAliasesRequest(ServiceRequest): + FunctionName: FunctionName + FunctionVersion: Optional[Version] + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListAliasesResponse(TypedDict, total=False): + NextMarker: Optional[String] + Aliases: Optional[AliasList] + + +class ListCodeSigningConfigsRequest(ServiceRequest): + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListCodeSigningConfigsResponse(TypedDict, total=False): + NextMarker: Optional[String] + CodeSigningConfigs: Optional[CodeSigningConfigList] + + +class ListEventSourceMappingsRequest(ServiceRequest): + EventSourceArn: Optional[Arn] + FunctionName: Optional[FunctionName] + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListEventSourceMappingsResponse(TypedDict, total=False): + NextMarker: Optional[String] + EventSourceMappings: Optional[EventSourceMappingsList] + + +class ListFunctionEventInvokeConfigsRequest(ServiceRequest): + FunctionName: FunctionName + Marker: Optional[String] + MaxItems: Optional[MaxFunctionEventInvokeConfigListItems] + + +class ListFunctionEventInvokeConfigsResponse(TypedDict, total=False): + FunctionEventInvokeConfigs: Optional[FunctionEventInvokeConfigList] + NextMarker: Optional[String] + + +class ListFunctionUrlConfigsRequest(ServiceRequest): + FunctionName: FunctionName + Marker: Optional[String] + MaxItems: Optional[MaxItems] + + +class ListFunctionUrlConfigsResponse(TypedDict, total=False): + FunctionUrlConfigs: FunctionUrlConfigList + NextMarker: Optional[String] + + +class ListFunctionsByCodeSigningConfigRequest(ServiceRequest): + CodeSigningConfigArn: CodeSigningConfigArn + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListFunctionsByCodeSigningConfigResponse(TypedDict, total=False): + NextMarker: Optional[String] + FunctionArns: Optional[FunctionArnList] + + +class ListFunctionsRequest(ServiceRequest): + MasterRegion: Optional[MasterRegion] + FunctionVersion: Optional[FunctionVersion] + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListFunctionsResponse(TypedDict, total=False): + NextMarker: Optional[String] + Functions: Optional[FunctionList] + + +class ListLayerVersionsRequest(ServiceRequest): + CompatibleRuntime: Optional[Runtime] + LayerName: LayerName + Marker: Optional[String] + MaxItems: Optional[MaxLayerListItems] + CompatibleArchitecture: Optional[Architecture] + + +class ListLayerVersionsResponse(TypedDict, total=False): + NextMarker: Optional[String] + LayerVersions: Optional[LayerVersionsList] + + +class ListLayersRequest(ServiceRequest): + CompatibleRuntime: Optional[Runtime] + Marker: Optional[String] + MaxItems: Optional[MaxLayerListItems] + CompatibleArchitecture: Optional[Architecture] + + +class ListLayersResponse(TypedDict, total=False): + NextMarker: Optional[String] + Layers: Optional[LayersList] + + +class ListProvisionedConcurrencyConfigsRequest(ServiceRequest): + FunctionName: FunctionName + Marker: Optional[String] + MaxItems: Optional[MaxProvisionedConcurrencyConfigListItems] + + +class ProvisionedConcurrencyConfigListItem(TypedDict, total=False): + FunctionArn: Optional[FunctionArn] + RequestedProvisionedConcurrentExecutions: Optional[PositiveInteger] + AvailableProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + AllocatedProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + Status: Optional[ProvisionedConcurrencyStatusEnum] + StatusReason: Optional[String] + LastModified: Optional[Timestamp] + + +ProvisionedConcurrencyConfigList = List[ProvisionedConcurrencyConfigListItem] + + +class ListProvisionedConcurrencyConfigsResponse(TypedDict, total=False): + ProvisionedConcurrencyConfigs: Optional[ProvisionedConcurrencyConfigList] + NextMarker: Optional[String] + + +class ListTagsRequest(ServiceRequest): + Resource: TaggableResource + + +class ListTagsResponse(TypedDict, total=False): + Tags: Optional[Tags] + + +class ListVersionsByFunctionRequest(ServiceRequest): + FunctionName: NamespacedFunctionName + Marker: Optional[String] + MaxItems: Optional[MaxListItems] + + +class ListVersionsByFunctionResponse(TypedDict, total=False): + NextMarker: Optional[String] + Versions: Optional[FunctionList] + + +class PublishLayerVersionRequest(ServiceRequest): + LayerName: LayerName + Description: Optional[Description] + Content: LayerVersionContentInput + CompatibleRuntimes: Optional[CompatibleRuntimes] + LicenseInfo: Optional[LicenseInfo] + CompatibleArchitectures: Optional[CompatibleArchitectures] + + +class PublishLayerVersionResponse(TypedDict, total=False): + Content: Optional[LayerVersionContentOutput] + LayerArn: Optional[LayerArn] + LayerVersionArn: Optional[LayerVersionArn] + Description: Optional[Description] + CreatedDate: Optional[Timestamp] + Version: Optional[LayerVersionNumber] + CompatibleRuntimes: Optional[CompatibleRuntimes] + LicenseInfo: Optional[LicenseInfo] + CompatibleArchitectures: Optional[CompatibleArchitectures] + + +class PublishVersionRequest(ServiceRequest): + FunctionName: FunctionName + CodeSha256: Optional[String] + Description: Optional[Description] + RevisionId: Optional[String] + + +class PutFunctionCodeSigningConfigRequest(ServiceRequest): + CodeSigningConfigArn: CodeSigningConfigArn + FunctionName: FunctionName + + +class PutFunctionCodeSigningConfigResponse(TypedDict, total=False): + CodeSigningConfigArn: CodeSigningConfigArn + FunctionName: FunctionName + + +class PutFunctionConcurrencyRequest(ServiceRequest): + FunctionName: FunctionName + ReservedConcurrentExecutions: ReservedConcurrentExecutions + + +class PutFunctionEventInvokeConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + MaximumRetryAttempts: Optional[MaximumRetryAttempts] + MaximumEventAgeInSeconds: Optional[MaximumEventAgeInSeconds] + DestinationConfig: Optional[DestinationConfig] + + +class PutFunctionRecursionConfigRequest(ServiceRequest): + FunctionName: UnqualifiedFunctionName + RecursiveLoop: RecursiveLoop + + +class PutFunctionRecursionConfigResponse(TypedDict, total=False): + RecursiveLoop: Optional[RecursiveLoop] + + +class PutProvisionedConcurrencyConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Qualifier + ProvisionedConcurrentExecutions: PositiveInteger + + +class PutProvisionedConcurrencyConfigResponse(TypedDict, total=False): + RequestedProvisionedConcurrentExecutions: Optional[PositiveInteger] + AvailableProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + AllocatedProvisionedConcurrentExecutions: Optional[NonNegativeInteger] + Status: Optional[ProvisionedConcurrencyStatusEnum] + StatusReason: Optional[String] + LastModified: Optional[Timestamp] + + +class PutRuntimeManagementConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + UpdateRuntimeOn: UpdateRuntimeOn + RuntimeVersionArn: Optional[RuntimeVersionArn] + + +class PutRuntimeManagementConfigResponse(TypedDict, total=False): + UpdateRuntimeOn: UpdateRuntimeOn + FunctionArn: FunctionArn + RuntimeVersionArn: Optional[RuntimeVersionArn] + + +class RemoveLayerVersionPermissionRequest(ServiceRequest): + LayerName: LayerName + VersionNumber: LayerVersionNumber + StatementId: StatementId + RevisionId: Optional[String] + + +class RemovePermissionRequest(ServiceRequest): + FunctionName: FunctionName + StatementId: NamespacedStatementId + Qualifier: Optional[Qualifier] + RevisionId: Optional[String] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + Resource: TaggableResource + Tags: Tags + + +class UntagResourceRequest(ServiceRequest): + Resource: TaggableResource + TagKeys: TagKeyList + + +class UpdateAliasRequest(ServiceRequest): + FunctionName: FunctionName + Name: Alias + FunctionVersion: Optional[Version] + Description: Optional[Description] + RoutingConfig: Optional[AliasRoutingConfiguration] + RevisionId: Optional[String] + + +class UpdateCodeSigningConfigRequest(ServiceRequest): + CodeSigningConfigArn: CodeSigningConfigArn + Description: Optional[Description] + AllowedPublishers: Optional[AllowedPublishers] + CodeSigningPolicies: Optional[CodeSigningPolicies] + + +class UpdateCodeSigningConfigResponse(TypedDict, total=False): + CodeSigningConfig: CodeSigningConfig + + +class UpdateEventSourceMappingRequest(ServiceRequest): + UUID: String + FunctionName: Optional[FunctionName] + Enabled: Optional[Enabled] + BatchSize: Optional[BatchSize] + FilterCriteria: Optional[FilterCriteria] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + DestinationConfig: Optional[DestinationConfig] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + BisectBatchOnFunctionError: Optional[BisectBatchOnFunctionError] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsEventSourceMapping] + ParallelizationFactor: Optional[ParallelizationFactor] + SourceAccessConfigurations: Optional[SourceAccessConfigurations] + TumblingWindowInSeconds: Optional[TumblingWindowInSeconds] + FunctionResponseTypes: Optional[FunctionResponseTypeList] + ScalingConfig: Optional[ScalingConfig] + DocumentDBEventSourceConfig: Optional[DocumentDBEventSourceConfig] + KMSKeyArn: Optional[KMSKeyArn] + MetricsConfig: Optional[EventSourceMappingMetricsConfig] + ProvisionedPollerConfig: Optional[ProvisionedPollerConfig] + + +class UpdateFunctionCodeRequest(ServiceRequest): + FunctionName: FunctionName + ZipFile: Optional[Blob] + S3Bucket: Optional[S3Bucket] + S3Key: Optional[S3Key] + S3ObjectVersion: Optional[S3ObjectVersion] + ImageUri: Optional[String] + Publish: Optional[Boolean] + DryRun: Optional[Boolean] + RevisionId: Optional[String] + Architectures: Optional[ArchitecturesList] + SourceKMSKeyArn: Optional[KMSKeyArn] + + +class UpdateFunctionConfigurationRequest(ServiceRequest): + FunctionName: FunctionName + Role: Optional[RoleArn] + Handler: Optional[Handler] + Description: Optional[Description] + Timeout: Optional[Timeout] + MemorySize: Optional[MemorySize] + VpcConfig: Optional[VpcConfig] + Environment: Optional[Environment] + Runtime: Optional[Runtime] + DeadLetterConfig: Optional[DeadLetterConfig] + KMSKeyArn: Optional[KMSKeyArn] + TracingConfig: Optional[TracingConfig] + RevisionId: Optional[String] + Layers: Optional[LayerList] + FileSystemConfigs: Optional[FileSystemConfigList] + ImageConfig: Optional[ImageConfig] + EphemeralStorage: Optional[EphemeralStorage] + SnapStart: Optional[SnapStart] + LoggingConfig: Optional[LoggingConfig] + + +class UpdateFunctionEventInvokeConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[Qualifier] + MaximumRetryAttempts: Optional[MaximumRetryAttempts] + MaximumEventAgeInSeconds: Optional[MaximumEventAgeInSeconds] + DestinationConfig: Optional[DestinationConfig] + + +class UpdateFunctionUrlConfigRequest(ServiceRequest): + FunctionName: FunctionName + Qualifier: Optional[FunctionUrlQualifier] + AuthType: Optional[FunctionUrlAuthType] + Cors: Optional[Cors] + InvokeMode: Optional[InvokeMode] + + +class UpdateFunctionUrlConfigResponse(TypedDict, total=False): + FunctionUrl: FunctionUrl + FunctionArn: FunctionArn + AuthType: FunctionUrlAuthType + Cors: Optional[Cors] + CreationTime: Timestamp + LastModifiedTime: Timestamp + InvokeMode: Optional[InvokeMode] + + +class LambdaApi: + service = "lambda" + version = "2015-03-31" + + @handler("AddLayerVersionPermission") + def add_layer_version_permission( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + statement_id: StatementId, + action: LayerPermissionAllowedAction, + principal: LayerPermissionAllowedPrincipal, + organization_id: OrganizationId | None = None, + revision_id: String | None = None, + **kwargs, + ) -> AddLayerVersionPermissionResponse: + raise NotImplementedError + + @handler("AddPermission") + def add_permission( + self, + context: RequestContext, + function_name: FunctionName, + statement_id: StatementId, + action: Action, + principal: Principal, + source_arn: Arn | None = None, + source_account: SourceOwner | None = None, + event_source_token: EventSourceToken | None = None, + qualifier: Qualifier | None = None, + revision_id: String | None = None, + principal_org_id: PrincipalOrgID | None = None, + function_url_auth_type: FunctionUrlAuthType | None = None, + **kwargs, + ) -> AddPermissionResponse: + raise NotImplementedError + + @handler("CreateAlias") + def create_alias( + self, + context: RequestContext, + function_name: FunctionName, + name: Alias, + function_version: Version, + description: Description | None = None, + routing_config: AliasRoutingConfiguration | None = None, + **kwargs, + ) -> AliasConfiguration: + raise NotImplementedError + + @handler("CreateCodeSigningConfig") + def create_code_signing_config( + self, + context: RequestContext, + allowed_publishers: AllowedPublishers, + description: Description | None = None, + code_signing_policies: CodeSigningPolicies | None = None, + tags: Tags | None = None, + **kwargs, + ) -> CreateCodeSigningConfigResponse: + raise NotImplementedError + + @handler("CreateEventSourceMapping") + def create_event_source_mapping( + self, + context: RequestContext, + function_name: FunctionName, + event_source_arn: Arn | None = None, + enabled: Enabled | None = None, + batch_size: BatchSize | None = None, + filter_criteria: FilterCriteria | None = None, + maximum_batching_window_in_seconds: MaximumBatchingWindowInSeconds | None = None, + parallelization_factor: ParallelizationFactor | None = None, + starting_position: EventSourcePosition | None = None, + starting_position_timestamp: Date | None = None, + destination_config: DestinationConfig | None = None, + maximum_record_age_in_seconds: MaximumRecordAgeInSeconds | None = None, + bisect_batch_on_function_error: BisectBatchOnFunctionError | None = None, + maximum_retry_attempts: MaximumRetryAttemptsEventSourceMapping | None = None, + tags: Tags | None = None, + tumbling_window_in_seconds: TumblingWindowInSeconds | None = None, + topics: Topics | None = None, + queues: Queues | None = None, + source_access_configurations: SourceAccessConfigurations | None = None, + self_managed_event_source: SelfManagedEventSource | None = None, + function_response_types: FunctionResponseTypeList | None = None, + amazon_managed_kafka_event_source_config: AmazonManagedKafkaEventSourceConfig | None = None, + self_managed_kafka_event_source_config: SelfManagedKafkaEventSourceConfig | None = None, + scaling_config: ScalingConfig | None = None, + document_db_event_source_config: DocumentDBEventSourceConfig | None = None, + kms_key_arn: KMSKeyArn | None = None, + metrics_config: EventSourceMappingMetricsConfig | None = None, + provisioned_poller_config: ProvisionedPollerConfig | None = None, + **kwargs, + ) -> EventSourceMappingConfiguration: + raise NotImplementedError + + @handler("CreateFunction") + def create_function( + self, + context: RequestContext, + function_name: FunctionName, + role: RoleArn, + code: FunctionCode, + runtime: Runtime | None = None, + handler: Handler | None = None, + description: Description | None = None, + timeout: Timeout | None = None, + memory_size: MemorySize | None = None, + publish: Boolean | None = None, + vpc_config: VpcConfig | None = None, + package_type: PackageType | None = None, + dead_letter_config: DeadLetterConfig | None = None, + environment: Environment | None = None, + kms_key_arn: KMSKeyArn | None = None, + tracing_config: TracingConfig | None = None, + tags: Tags | None = None, + layers: LayerList | None = None, + file_system_configs: FileSystemConfigList | None = None, + image_config: ImageConfig | None = None, + code_signing_config_arn: CodeSigningConfigArn | None = None, + architectures: ArchitecturesList | None = None, + ephemeral_storage: EphemeralStorage | None = None, + snap_start: SnapStart | None = None, + logging_config: LoggingConfig | None = None, + **kwargs, + ) -> FunctionConfiguration: + raise NotImplementedError + + @handler("CreateFunctionUrlConfig") + def create_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + auth_type: FunctionUrlAuthType, + qualifier: FunctionUrlQualifier | None = None, + cors: Cors | None = None, + invoke_mode: InvokeMode | None = None, + **kwargs, + ) -> CreateFunctionUrlConfigResponse: + raise NotImplementedError + + @handler("DeleteAlias") + def delete_alias( + self, context: RequestContext, function_name: FunctionName, name: Alias, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteCodeSigningConfig") + def delete_code_signing_config( + self, context: RequestContext, code_signing_config_arn: CodeSigningConfigArn, **kwargs + ) -> DeleteCodeSigningConfigResponse: + raise NotImplementedError + + @handler("DeleteEventSourceMapping") + def delete_event_source_mapping( + self, context: RequestContext, uuid: String, **kwargs + ) -> EventSourceMappingConfiguration: + raise NotImplementedError + + @handler("DeleteFunction") + def delete_function( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteFunctionCodeSigningConfig") + def delete_function_code_signing_config( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteFunctionConcurrency") + def delete_function_concurrency( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteFunctionEventInvokeConfig") + def delete_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteFunctionUrlConfig") + def delete_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteLayerVersion") + def delete_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteProvisionedConcurrencyConfig") + def delete_provisioned_concurrency_config( + self, context: RequestContext, function_name: FunctionName, qualifier: Qualifier, **kwargs + ) -> None: + raise NotImplementedError + + @handler("GetAccountSettings") + def get_account_settings(self, context: RequestContext, **kwargs) -> GetAccountSettingsResponse: + raise NotImplementedError + + @handler("GetAlias") + def get_alias( + self, context: RequestContext, function_name: FunctionName, name: Alias, **kwargs + ) -> AliasConfiguration: + raise NotImplementedError + + @handler("GetCodeSigningConfig") + def get_code_signing_config( + self, context: RequestContext, code_signing_config_arn: CodeSigningConfigArn, **kwargs + ) -> GetCodeSigningConfigResponse: + raise NotImplementedError + + @handler("GetEventSourceMapping") + def get_event_source_mapping( + self, context: RequestContext, uuid: String, **kwargs + ) -> EventSourceMappingConfiguration: + raise NotImplementedError + + @handler("GetFunction") + def get_function( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> GetFunctionResponse: + raise NotImplementedError + + @handler("GetFunctionCodeSigningConfig") + def get_function_code_signing_config( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> GetFunctionCodeSigningConfigResponse: + raise NotImplementedError + + @handler("GetFunctionConcurrency") + def get_function_concurrency( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> GetFunctionConcurrencyResponse: + raise NotImplementedError + + @handler("GetFunctionConfiguration") + def get_function_configuration( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> FunctionConfiguration: + raise NotImplementedError + + @handler("GetFunctionEventInvokeConfig") + def get_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + raise NotImplementedError + + @handler("GetFunctionRecursionConfig") + def get_function_recursion_config( + self, context: RequestContext, function_name: UnqualifiedFunctionName, **kwargs + ) -> GetFunctionRecursionConfigResponse: + raise NotImplementedError + + @handler("GetFunctionUrlConfig") + def get_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier | None = None, + **kwargs, + ) -> GetFunctionUrlConfigResponse: + raise NotImplementedError + + @handler("GetLayerVersion") + def get_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> GetLayerVersionResponse: + raise NotImplementedError + + @handler("GetLayerVersionByArn") + def get_layer_version_by_arn( + self, context: RequestContext, arn: LayerVersionArn, **kwargs + ) -> GetLayerVersionResponse: + raise NotImplementedError + + @handler("GetLayerVersionPolicy") + def get_layer_version_policy( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> GetLayerVersionPolicyResponse: + raise NotImplementedError + + @handler("GetPolicy") + def get_policy( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> GetPolicyResponse: + raise NotImplementedError + + @handler("GetProvisionedConcurrencyConfig") + def get_provisioned_concurrency_config( + self, context: RequestContext, function_name: FunctionName, qualifier: Qualifier, **kwargs + ) -> GetProvisionedConcurrencyConfigResponse: + raise NotImplementedError + + @handler("GetRuntimeManagementConfig") + def get_runtime_management_config( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier | None = None, + **kwargs, + ) -> GetRuntimeManagementConfigResponse: + raise NotImplementedError + + @handler("Invoke") + def invoke( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + invocation_type: InvocationType | None = None, + log_type: LogType | None = None, + client_context: String | None = None, + payload: IO[Blob] | None = None, + qualifier: Qualifier | None = None, + **kwargs, + ) -> InvocationResponse: + raise NotImplementedError + + @handler("InvokeAsync") + def invoke_async( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + invoke_args: IO[BlobStream], + **kwargs, + ) -> InvokeAsyncResponse: + raise NotImplementedError + + @handler("InvokeWithResponseStream") + def invoke_with_response_stream( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + invocation_type: ResponseStreamingInvocationType | None = None, + log_type: LogType | None = None, + client_context: String | None = None, + qualifier: Qualifier | None = None, + payload: IO[Blob] | None = None, + **kwargs, + ) -> InvokeWithResponseStreamResponse: + raise NotImplementedError + + @handler("ListAliases") + def list_aliases( + self, + context: RequestContext, + function_name: FunctionName, + function_version: Version | None = None, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListAliasesResponse: + raise NotImplementedError + + @handler("ListCodeSigningConfigs") + def list_code_signing_configs( + self, + context: RequestContext, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListCodeSigningConfigsResponse: + raise NotImplementedError + + @handler("ListEventSourceMappings") + def list_event_source_mappings( + self, + context: RequestContext, + event_source_arn: Arn | None = None, + function_name: FunctionName | None = None, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListEventSourceMappingsResponse: + raise NotImplementedError + + @handler("ListFunctionEventInvokeConfigs") + def list_function_event_invoke_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String | None = None, + max_items: MaxFunctionEventInvokeConfigListItems | None = None, + **kwargs, + ) -> ListFunctionEventInvokeConfigsResponse: + raise NotImplementedError + + @handler("ListFunctionUrlConfigs") + def list_function_url_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String | None = None, + max_items: MaxItems | None = None, + **kwargs, + ) -> ListFunctionUrlConfigsResponse: + raise NotImplementedError + + @handler("ListFunctions") + def list_functions( + self, + context: RequestContext, + master_region: MasterRegion | None = None, + function_version: FunctionVersion | None = None, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListFunctionsResponse: + raise NotImplementedError + + @handler("ListFunctionsByCodeSigningConfig") + def list_functions_by_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListFunctionsByCodeSigningConfigResponse: + raise NotImplementedError + + @handler("ListLayerVersions") + def list_layer_versions( + self, + context: RequestContext, + layer_name: LayerName, + compatible_runtime: Runtime | None = None, + marker: String | None = None, + max_items: MaxLayerListItems | None = None, + compatible_architecture: Architecture | None = None, + **kwargs, + ) -> ListLayerVersionsResponse: + raise NotImplementedError + + @handler("ListLayers") + def list_layers( + self, + context: RequestContext, + compatible_runtime: Runtime | None = None, + marker: String | None = None, + max_items: MaxLayerListItems | None = None, + compatible_architecture: Architecture | None = None, + **kwargs, + ) -> ListLayersResponse: + raise NotImplementedError + + @handler("ListProvisionedConcurrencyConfigs") + def list_provisioned_concurrency_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String | None = None, + max_items: MaxProvisionedConcurrencyConfigListItems | None = None, + **kwargs, + ) -> ListProvisionedConcurrencyConfigsResponse: + raise NotImplementedError + + @handler("ListTags") + def list_tags( + self, context: RequestContext, resource: TaggableResource, **kwargs + ) -> ListTagsResponse: + raise NotImplementedError + + @handler("ListVersionsByFunction") + def list_versions_by_function( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + marker: String | None = None, + max_items: MaxListItems | None = None, + **kwargs, + ) -> ListVersionsByFunctionResponse: + raise NotImplementedError + + @handler("PublishLayerVersion") + def publish_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + content: LayerVersionContentInput, + description: Description | None = None, + compatible_runtimes: CompatibleRuntimes | None = None, + license_info: LicenseInfo | None = None, + compatible_architectures: CompatibleArchitectures | None = None, + **kwargs, + ) -> PublishLayerVersionResponse: + raise NotImplementedError + + @handler("PublishVersion") + def publish_version( + self, + context: RequestContext, + function_name: FunctionName, + code_sha256: String | None = None, + description: Description | None = None, + revision_id: String | None = None, + **kwargs, + ) -> FunctionConfiguration: + raise NotImplementedError + + @handler("PutFunctionCodeSigningConfig") + def put_function_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + function_name: FunctionName, + **kwargs, + ) -> PutFunctionCodeSigningConfigResponse: + raise NotImplementedError + + @handler("PutFunctionConcurrency") + def put_function_concurrency( + self, + context: RequestContext, + function_name: FunctionName, + reserved_concurrent_executions: ReservedConcurrentExecutions, + **kwargs, + ) -> Concurrency: + raise NotImplementedError + + @handler("PutFunctionEventInvokeConfig") + def put_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier | None = None, + maximum_retry_attempts: MaximumRetryAttempts | None = None, + maximum_event_age_in_seconds: MaximumEventAgeInSeconds | None = None, + destination_config: DestinationConfig | None = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + raise NotImplementedError + + @handler("PutFunctionRecursionConfig") + def put_function_recursion_config( + self, + context: RequestContext, + function_name: UnqualifiedFunctionName, + recursive_loop: RecursiveLoop, + **kwargs, + ) -> PutFunctionRecursionConfigResponse: + raise NotImplementedError + + @handler("PutProvisionedConcurrencyConfig") + def put_provisioned_concurrency_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier, + provisioned_concurrent_executions: PositiveInteger, + **kwargs, + ) -> PutProvisionedConcurrencyConfigResponse: + raise NotImplementedError + + @handler("PutRuntimeManagementConfig") + def put_runtime_management_config( + self, + context: RequestContext, + function_name: FunctionName, + update_runtime_on: UpdateRuntimeOn, + qualifier: Qualifier | None = None, + runtime_version_arn: RuntimeVersionArn | None = None, + **kwargs, + ) -> PutRuntimeManagementConfigResponse: + raise NotImplementedError + + @handler("RemoveLayerVersionPermission") + def remove_layer_version_permission( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + statement_id: StatementId, + revision_id: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemovePermission") + def remove_permission( + self, + context: RequestContext, + function_name: FunctionName, + statement_id: NamespacedStatementId, + qualifier: Qualifier | None = None, + revision_id: String | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource: TaggableResource, tags: Tags, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource: TaggableResource, tag_keys: TagKeyList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateAlias") + def update_alias( + self, + context: RequestContext, + function_name: FunctionName, + name: Alias, + function_version: Version | None = None, + description: Description | None = None, + routing_config: AliasRoutingConfiguration | None = None, + revision_id: String | None = None, + **kwargs, + ) -> AliasConfiguration: + raise NotImplementedError + + @handler("UpdateCodeSigningConfig") + def update_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + description: Description | None = None, + allowed_publishers: AllowedPublishers | None = None, + code_signing_policies: CodeSigningPolicies | None = None, + **kwargs, + ) -> UpdateCodeSigningConfigResponse: + raise NotImplementedError + + @handler("UpdateEventSourceMapping") + def update_event_source_mapping( + self, + context: RequestContext, + uuid: String, + function_name: FunctionName | None = None, + enabled: Enabled | None = None, + batch_size: BatchSize | None = None, + filter_criteria: FilterCriteria | None = None, + maximum_batching_window_in_seconds: MaximumBatchingWindowInSeconds | None = None, + destination_config: DestinationConfig | None = None, + maximum_record_age_in_seconds: MaximumRecordAgeInSeconds | None = None, + bisect_batch_on_function_error: BisectBatchOnFunctionError | None = None, + maximum_retry_attempts: MaximumRetryAttemptsEventSourceMapping | None = None, + parallelization_factor: ParallelizationFactor | None = None, + source_access_configurations: SourceAccessConfigurations | None = None, + tumbling_window_in_seconds: TumblingWindowInSeconds | None = None, + function_response_types: FunctionResponseTypeList | None = None, + scaling_config: ScalingConfig | None = None, + document_db_event_source_config: DocumentDBEventSourceConfig | None = None, + kms_key_arn: KMSKeyArn | None = None, + metrics_config: EventSourceMappingMetricsConfig | None = None, + provisioned_poller_config: ProvisionedPollerConfig | None = None, + **kwargs, + ) -> EventSourceMappingConfiguration: + raise NotImplementedError + + @handler("UpdateFunctionCode") + def update_function_code( + self, + context: RequestContext, + function_name: FunctionName, + zip_file: Blob | None = None, + s3_bucket: S3Bucket | None = None, + s3_key: S3Key | None = None, + s3_object_version: S3ObjectVersion | None = None, + image_uri: String | None = None, + publish: Boolean | None = None, + dry_run: Boolean | None = None, + revision_id: String | None = None, + architectures: ArchitecturesList | None = None, + source_kms_key_arn: KMSKeyArn | None = None, + **kwargs, + ) -> FunctionConfiguration: + raise NotImplementedError + + @handler("UpdateFunctionConfiguration") + def update_function_configuration( + self, + context: RequestContext, + function_name: FunctionName, + role: RoleArn | None = None, + handler: Handler | None = None, + description: Description | None = None, + timeout: Timeout | None = None, + memory_size: MemorySize | None = None, + vpc_config: VpcConfig | None = None, + environment: Environment | None = None, + runtime: Runtime | None = None, + dead_letter_config: DeadLetterConfig | None = None, + kms_key_arn: KMSKeyArn | None = None, + tracing_config: TracingConfig | None = None, + revision_id: String | None = None, + layers: LayerList | None = None, + file_system_configs: FileSystemConfigList | None = None, + image_config: ImageConfig | None = None, + ephemeral_storage: EphemeralStorage | None = None, + snap_start: SnapStart | None = None, + logging_config: LoggingConfig | None = None, + **kwargs, + ) -> FunctionConfiguration: + raise NotImplementedError + + @handler("UpdateFunctionEventInvokeConfig") + def update_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier | None = None, + maximum_retry_attempts: MaximumRetryAttempts | None = None, + maximum_event_age_in_seconds: MaximumEventAgeInSeconds | None = None, + destination_config: DestinationConfig | None = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + raise NotImplementedError + + @handler("UpdateFunctionUrlConfig") + def update_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier | None = None, + auth_type: FunctionUrlAuthType | None = None, + cors: Cors | None = None, + invoke_mode: InvokeMode | None = None, + **kwargs, + ) -> UpdateFunctionUrlConfigResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/logs/__init__.py b/localstack-core/localstack/aws/api/logs/__init__.py new file mode 100644 index 0000000000000..66088f97bc672 --- /dev/null +++ b/localstack-core/localstack/aws/api/logs/__init__.py @@ -0,0 +1,3031 @@ +from enum import StrEnum +from typing import Dict, Iterator, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccessPolicy = str +AccountId = str +AccountPolicyDocument = str +AddKeyValue = str +AllowedActionForAllowVendedLogsDeliveryForResource = str +AmazonResourceName = str +AnomalyDetectorArn = str +AnomalyId = str +ApplyOnTransformedLogs = bool +Arn = str +Baseline = bool +Boolean = bool +ClientToken = str +CollectionRetentionDays = int +Column = str +DataProtectionPolicyDocument = str +Days = int +DefaultValue = float +Delimiter = str +DeliveryDestinationName = str +DeliveryDestinationPolicy = str +DeliveryId = str +DeliverySourceName = str +DeliverySuffixPath = str +Descending = bool +DescribeLimit = int +DescribeQueriesMaxResults = int +Description = str +DestinationArn = str +DestinationField = str +DestinationName = str +DetectorKmsKeyArn = str +DetectorName = str +DimensionsKey = str +DimensionsValue = str +DynamicTokenPosition = int +EncryptionKey = str +EntityAttributesKey = str +EntityAttributesValue = str +EntityKeyAttributesKey = str +EntityKeyAttributesValue = str +EventId = str +EventMessage = str +EventsLimit = int +ExportDestinationBucket = str +ExportDestinationPrefix = str +ExportTaskId = str +ExportTaskName = str +ExportTaskStatusMessage = str +Field = str +FieldDelimiter = str +FieldHeader = str +FieldIndexName = str +FilterCount = int +FilterName = str +FilterPattern = str +Flatten = bool +Force = bool +ForceUpdate = bool +FromKey = str +GrokMatch = str +IncludeLinkedAccounts = bool +InferredTokenName = str +Integer = int +IntegrationName = str +IntegrationNamePrefix = str +IntegrationStatusMessage = str +Interleaved = bool +IsSampled = bool +Key = str +KeyPrefix = str +KeyValueDelimiter = str +KmsKeyId = str +ListAnomaliesLimit = int +ListLimit = int +ListLogAnomalyDetectorsLimit = int +ListLogGroupsForQueryMaxResults = int +Locale = str +LogEventIndex = int +LogGroupArn = str +LogGroupIdentifier = str +LogGroupName = str +LogGroupNamePattern = str +LogGroupNameRegexPattern = str +LogRecordPointer = str +LogStreamName = str +LogStreamSearchedCompletely = bool +LogType = str +MatchPattern = str +Message = str +MetricName = str +MetricNamespace = str +MetricValue = str +NextToken = str +NonMatchValue = str +OpenSearchApplicationEndpoint = str +OpenSearchApplicationId = str +OpenSearchCollectionEndpoint = str +OpenSearchDataSourceName = str +OpenSearchPolicyName = str +OpenSearchWorkspaceId = str +OverwriteIfExists = bool +ParserFieldDelimiter = str +PatternId = str +PatternRegex = str +PatternString = str +Percentage = int +PolicyDocument = str +PolicyName = str +Priority = str +QueryCharOffset = int +QueryDefinitionName = str +QueryDefinitionString = str +QueryId = str +QueryListMaxResults = int +QueryString = str +QuoteCharacter = str +RenameTo = str +RequestId = str +ResourceIdentifier = str +ResourceType = str +RoleArn = str +SelectionCriteria = str +SequenceToken = str +Service = str +SessionId = str +Source = str +SourceTimezone = str +SplitStringDelimiter = str +StartFromHead = bool +StatsValue = float +Success = bool +TagKey = str +TagValue = str +Target = str +TargetArn = str +TargetFormat = str +TargetTimezone = str +Time = str +ToKey = str +Token = str +TokenString = str +TransformedEventMessage = str +Unmask = bool +Value = str +ValueKey = str +WithKey = str + + +class AnomalyDetectorStatus(StrEnum): + INITIALIZING = "INITIALIZING" + TRAINING = "TRAINING" + ANALYZING = "ANALYZING" + FAILED = "FAILED" + DELETED = "DELETED" + PAUSED = "PAUSED" + + +class DataProtectionStatus(StrEnum): + ACTIVATED = "ACTIVATED" + DELETED = "DELETED" + ARCHIVED = "ARCHIVED" + DISABLED = "DISABLED" + + +class DeliveryDestinationType(StrEnum): + S3 = "S3" + CWL = "CWL" + FH = "FH" + + +class Distribution(StrEnum): + Random = "Random" + ByLogStream = "ByLogStream" + + +class EntityRejectionErrorType(StrEnum): + InvalidEntity = "InvalidEntity" + InvalidTypeValue = "InvalidTypeValue" + InvalidKeyAttributes = "InvalidKeyAttributes" + InvalidAttributes = "InvalidAttributes" + EntitySizeTooLarge = "EntitySizeTooLarge" + UnsupportedLogGroupType = "UnsupportedLogGroupType" + MissingRequiredFields = "MissingRequiredFields" + + +class EvaluationFrequency(StrEnum): + ONE_MIN = "ONE_MIN" + FIVE_MIN = "FIVE_MIN" + TEN_MIN = "TEN_MIN" + FIFTEEN_MIN = "FIFTEEN_MIN" + THIRTY_MIN = "THIRTY_MIN" + ONE_HOUR = "ONE_HOUR" + + +class ExportTaskStatusCode(StrEnum): + CANCELLED = "CANCELLED" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + PENDING = "PENDING" + PENDING_CANCEL = "PENDING_CANCEL" + RUNNING = "RUNNING" + + +class FlattenedElement(StrEnum): + first = "first" + last = "last" + + +class IndexSource(StrEnum): + ACCOUNT = "ACCOUNT" + LOG_GROUP = "LOG_GROUP" + + +class InheritedProperty(StrEnum): + ACCOUNT_DATA_PROTECTION = "ACCOUNT_DATA_PROTECTION" + + +class IntegrationStatus(StrEnum): + PROVISIONING = "PROVISIONING" + ACTIVE = "ACTIVE" + FAILED = "FAILED" + + +class IntegrationType(StrEnum): + OPENSEARCH = "OPENSEARCH" + + +class LogGroupClass(StrEnum): + STANDARD = "STANDARD" + INFREQUENT_ACCESS = "INFREQUENT_ACCESS" + DELIVERY = "DELIVERY" + + +class OpenSearchResourceStatusType(StrEnum): + ACTIVE = "ACTIVE" + NOT_FOUND = "NOT_FOUND" + ERROR = "ERROR" + + +class OrderBy(StrEnum): + LogStreamName = "LogStreamName" + LastEventTime = "LastEventTime" + + +class OutputFormat(StrEnum): + json = "json" + plain = "plain" + w3c = "w3c" + raw = "raw" + parquet = "parquet" + + +class PolicyType(StrEnum): + DATA_PROTECTION_POLICY = "DATA_PROTECTION_POLICY" + SUBSCRIPTION_FILTER_POLICY = "SUBSCRIPTION_FILTER_POLICY" + FIELD_INDEX_POLICY = "FIELD_INDEX_POLICY" + TRANSFORMER_POLICY = "TRANSFORMER_POLICY" + + +class QueryLanguage(StrEnum): + CWLI = "CWLI" + SQL = "SQL" + PPL = "PPL" + + +class QueryStatus(StrEnum): + Scheduled = "Scheduled" + Running = "Running" + Complete = "Complete" + Failed = "Failed" + Cancelled = "Cancelled" + Timeout = "Timeout" + Unknown = "Unknown" + + +class Scope(StrEnum): + ALL = "ALL" + + +class StandardUnit(StrEnum): + Seconds = "Seconds" + Microseconds = "Microseconds" + Milliseconds = "Milliseconds" + Bytes = "Bytes" + Kilobytes = "Kilobytes" + Megabytes = "Megabytes" + Gigabytes = "Gigabytes" + Terabytes = "Terabytes" + Bits = "Bits" + Kilobits = "Kilobits" + Megabits = "Megabits" + Gigabits = "Gigabits" + Terabits = "Terabits" + Percent = "Percent" + Count = "Count" + Bytes_Second = "Bytes/Second" + Kilobytes_Second = "Kilobytes/Second" + Megabytes_Second = "Megabytes/Second" + Gigabytes_Second = "Gigabytes/Second" + Terabytes_Second = "Terabytes/Second" + Bits_Second = "Bits/Second" + Kilobits_Second = "Kilobits/Second" + Megabits_Second = "Megabits/Second" + Gigabits_Second = "Gigabits/Second" + Terabits_Second = "Terabits/Second" + Count_Second = "Count/Second" + None_ = "None" + + +class State(StrEnum): + Active = "Active" + Suppressed = "Suppressed" + Baseline = "Baseline" + + +class SuppressionState(StrEnum): + SUPPRESSED = "SUPPRESSED" + UNSUPPRESSED = "UNSUPPRESSED" + + +class SuppressionType(StrEnum): + LIMITED = "LIMITED" + INFINITE = "INFINITE" + + +class SuppressionUnit(StrEnum): + SECONDS = "SECONDS" + MINUTES = "MINUTES" + HOURS = "HOURS" + + +class Type(StrEnum): + boolean = "boolean" + integer = "integer" + double = "double" + string = "string" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class DataAlreadyAcceptedException(ServiceException): + code: str = "DataAlreadyAcceptedException" + sender_fault: bool = False + status_code: int = 400 + expectedSequenceToken: Optional[SequenceToken] + + +class InvalidOperationException(ServiceException): + code: str = "InvalidOperationException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSequenceTokenException(ServiceException): + code: str = "InvalidSequenceTokenException" + sender_fault: bool = False + status_code: int = 400 + expectedSequenceToken: Optional[SequenceToken] + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class QueryCompileErrorLocation(TypedDict, total=False): + startCharOffset: Optional[QueryCharOffset] + endCharOffset: Optional[QueryCharOffset] + + +class QueryCompileError(TypedDict, total=False): + location: Optional[QueryCompileErrorLocation] + message: Optional[Message] + + +class MalformedQueryException(ServiceException): + code: str = "MalformedQueryException" + sender_fault: bool = False + status_code: int = 400 + queryCompileError: Optional[QueryCompileError] + + +class OperationAbortedException(ServiceException): + code: str = "OperationAbortedException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceAlreadyExistsException(ServiceException): + code: str = "ResourceAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceUnavailableException(ServiceException): + code: str = "ServiceUnavailableException" + sender_fault: bool = False + status_code: int = 400 + + +class SessionStreamingException(ServiceException): + code: str = "SessionStreamingException" + sender_fault: bool = False + status_code: int = 400 + + +class SessionTimeoutException(ServiceException): + code: str = "SessionTimeoutException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTagsException(ServiceException): + code: str = "TooManyTagsException" + sender_fault: bool = False + status_code: int = 400 + resourceName: Optional[AmazonResourceName] + + +class UnrecognizedClientException(ServiceException): + code: str = "UnrecognizedClientException" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +AccountIds = List[AccountId] +Timestamp = int + + +class AccountPolicy(TypedDict, total=False): + policyName: Optional[PolicyName] + policyDocument: Optional[AccountPolicyDocument] + lastUpdatedTime: Optional[Timestamp] + policyType: Optional[PolicyType] + scope: Optional[Scope] + selectionCriteria: Optional[SelectionCriteria] + accountId: Optional[AccountId] + + +AccountPolicies = List[AccountPolicy] + + +class AddKeyEntry(TypedDict, total=False): + key: Key + value: AddKeyValue + overwriteIfExists: Optional[OverwriteIfExists] + + +AddKeyEntries = List[AddKeyEntry] + + +class AddKeys(TypedDict, total=False): + entries: AddKeyEntries + + +AllowedFieldDelimiters = List[FieldDelimiter] + + +class RecordField(TypedDict, total=False): + name: Optional[FieldHeader] + mandatory: Optional[Boolean] + + +AllowedFields = List[RecordField] +EpochMillis = int +LogGroupArnList = List[LogGroupArn] +TokenValue = int +Enumerations = Dict[TokenString, TokenValue] + + +class PatternToken(TypedDict, total=False): + dynamicTokenPosition: Optional[DynamicTokenPosition] + isDynamic: Optional[Boolean] + tokenString: Optional[TokenString] + enumerations: Optional[Enumerations] + inferredTokenName: Optional[InferredTokenName] + + +PatternTokens = List[PatternToken] + + +class LogEvent(TypedDict, total=False): + timestamp: Optional[Timestamp] + message: Optional[EventMessage] + + +LogSamples = List[LogEvent] +Count = int +Histogram = Dict[Time, Count] + + +class Anomaly(TypedDict, total=False): + anomalyId: AnomalyId + patternId: PatternId + anomalyDetectorArn: AnomalyDetectorArn + patternString: PatternString + patternRegex: Optional[PatternRegex] + priority: Optional[Priority] + firstSeen: EpochMillis + lastSeen: EpochMillis + description: Description + active: Boolean + state: State + histogram: Histogram + logSamples: LogSamples + patternTokens: PatternTokens + logGroupArnList: LogGroupArnList + suppressed: Optional[Boolean] + suppressedDate: Optional[EpochMillis] + suppressedUntil: Optional[EpochMillis] + isPatternLevelSuppression: Optional[Boolean] + + +Anomalies = List[Anomaly] +AnomalyVisibilityTime = int + + +class AnomalyDetector(TypedDict, total=False): + anomalyDetectorArn: Optional[AnomalyDetectorArn] + detectorName: Optional[DetectorName] + logGroupArnList: Optional[LogGroupArnList] + evaluationFrequency: Optional[EvaluationFrequency] + filterPattern: Optional[FilterPattern] + anomalyDetectorStatus: Optional[AnomalyDetectorStatus] + kmsKeyId: Optional[KmsKeyId] + creationTimeStamp: Optional[EpochMillis] + lastModifiedTimeStamp: Optional[EpochMillis] + anomalyVisibilityTime: Optional[AnomalyVisibilityTime] + + +AnomalyDetectors = List[AnomalyDetector] + + +class AssociateKmsKeyRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + kmsKeyId: KmsKeyId + resourceIdentifier: Optional[ResourceIdentifier] + + +Columns = List[Column] + + +class CSV(TypedDict, total=False): + quoteCharacter: Optional[QuoteCharacter] + delimiter: Optional[Delimiter] + columns: Optional[Columns] + source: Optional[Source] + + +class CancelExportTaskRequest(ServiceRequest): + taskId: ExportTaskId + + +RecordFields = List[FieldHeader] +OutputFormats = List[OutputFormat] + + +class S3DeliveryConfiguration(TypedDict, total=False): + suffixPath: Optional[DeliverySuffixPath] + enableHiveCompatiblePath: Optional[Boolean] + + +class ConfigurationTemplateDeliveryConfigValues(TypedDict, total=False): + recordFields: Optional[RecordFields] + fieldDelimiter: Optional[FieldDelimiter] + s3DeliveryConfiguration: Optional[S3DeliveryConfiguration] + + +class ConfigurationTemplate(TypedDict, total=False): + service: Optional[Service] + logType: Optional[LogType] + resourceType: Optional[ResourceType] + deliveryDestinationType: Optional[DeliveryDestinationType] + defaultDeliveryConfigValues: Optional[ConfigurationTemplateDeliveryConfigValues] + allowedFields: Optional[AllowedFields] + allowedOutputFormats: Optional[OutputFormats] + allowedActionForAllowVendedLogsDeliveryForResource: Optional[ + AllowedActionForAllowVendedLogsDeliveryForResource + ] + allowedFieldDelimiters: Optional[AllowedFieldDelimiters] + allowedSuffixPathFields: Optional[RecordFields] + + +ConfigurationTemplates = List[ConfigurationTemplate] + + +class CopyValueEntry(TypedDict, total=False): + source: Source + target: Target + overwriteIfExists: Optional[OverwriteIfExists] + + +CopyValueEntries = List[CopyValueEntry] + + +class CopyValue(TypedDict, total=False): + entries: CopyValueEntries + + +Tags = Dict[TagKey, TagValue] + + +class CreateDeliveryRequest(ServiceRequest): + deliverySourceName: DeliverySourceName + deliveryDestinationArn: Arn + recordFields: Optional[RecordFields] + fieldDelimiter: Optional[FieldDelimiter] + s3DeliveryConfiguration: Optional[S3DeliveryConfiguration] + tags: Optional[Tags] + + +class Delivery(TypedDict, total=False): + id: Optional[DeliveryId] + arn: Optional[Arn] + deliverySourceName: Optional[DeliverySourceName] + deliveryDestinationArn: Optional[Arn] + deliveryDestinationType: Optional[DeliveryDestinationType] + recordFields: Optional[RecordFields] + fieldDelimiter: Optional[FieldDelimiter] + s3DeliveryConfiguration: Optional[S3DeliveryConfiguration] + tags: Optional[Tags] + + +class CreateDeliveryResponse(TypedDict, total=False): + delivery: Optional[Delivery] + + +CreateExportTaskRequest = TypedDict( + "CreateExportTaskRequest", + { + "taskName": Optional[ExportTaskName], + "logGroupName": LogGroupName, + "logStreamNamePrefix": Optional[LogStreamName], + "from": Timestamp, + "to": Timestamp, + "destination": ExportDestinationBucket, + "destinationPrefix": Optional[ExportDestinationPrefix], + }, + total=False, +) + + +class CreateExportTaskResponse(TypedDict, total=False): + taskId: Optional[ExportTaskId] + + +class CreateLogAnomalyDetectorRequest(ServiceRequest): + logGroupArnList: LogGroupArnList + detectorName: Optional[DetectorName] + evaluationFrequency: Optional[EvaluationFrequency] + filterPattern: Optional[FilterPattern] + kmsKeyId: Optional[DetectorKmsKeyArn] + anomalyVisibilityTime: Optional[AnomalyVisibilityTime] + tags: Optional[Tags] + + +class CreateLogAnomalyDetectorResponse(TypedDict, total=False): + anomalyDetectorArn: Optional[AnomalyDetectorArn] + + +class CreateLogGroupRequest(ServiceRequest): + logGroupName: LogGroupName + kmsKeyId: Optional[KmsKeyId] + tags: Optional[Tags] + logGroupClass: Optional[LogGroupClass] + + +class CreateLogStreamRequest(ServiceRequest): + logGroupName: LogGroupName + logStreamName: LogStreamName + + +DashboardViewerPrincipals = List[Arn] +MatchPatterns = List[MatchPattern] + + +class DateTimeConverter(TypedDict, total=False): + source: Source + target: Target + targetFormat: Optional[TargetFormat] + matchPatterns: MatchPatterns + sourceTimezone: Optional[SourceTimezone] + targetTimezone: Optional[TargetTimezone] + locale: Optional[Locale] + + +class DeleteAccountPolicyRequest(ServiceRequest): + policyName: PolicyName + policyType: PolicyType + + +class DeleteDataProtectionPolicyRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + + +class DeleteDeliveryDestinationPolicyRequest(ServiceRequest): + deliveryDestinationName: DeliveryDestinationName + + +class DeleteDeliveryDestinationRequest(ServiceRequest): + name: DeliveryDestinationName + + +class DeleteDeliveryRequest(ServiceRequest): + id: DeliveryId + + +class DeleteDeliverySourceRequest(ServiceRequest): + name: DeliverySourceName + + +class DeleteDestinationRequest(ServiceRequest): + destinationName: DestinationName + + +class DeleteIndexPolicyRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + + +class DeleteIndexPolicyResponse(TypedDict, total=False): + pass + + +class DeleteIntegrationRequest(ServiceRequest): + integrationName: IntegrationName + force: Optional[Force] + + +class DeleteIntegrationResponse(TypedDict, total=False): + pass + + +DeleteWithKeys = List[WithKey] + + +class DeleteKeys(TypedDict, total=False): + withKeys: DeleteWithKeys + + +class DeleteLogAnomalyDetectorRequest(ServiceRequest): + anomalyDetectorArn: AnomalyDetectorArn + + +class DeleteLogGroupRequest(ServiceRequest): + logGroupName: LogGroupName + + +class DeleteLogStreamRequest(ServiceRequest): + logGroupName: LogGroupName + logStreamName: LogStreamName + + +class DeleteMetricFilterRequest(ServiceRequest): + logGroupName: LogGroupName + filterName: FilterName + + +class DeleteQueryDefinitionRequest(ServiceRequest): + queryDefinitionId: QueryId + + +class DeleteQueryDefinitionResponse(TypedDict, total=False): + success: Optional[Success] + + +class DeleteResourcePolicyRequest(ServiceRequest): + policyName: Optional[PolicyName] + + +class DeleteRetentionPolicyRequest(ServiceRequest): + logGroupName: LogGroupName + + +class DeleteSubscriptionFilterRequest(ServiceRequest): + logGroupName: LogGroupName + filterName: FilterName + + +class DeleteTransformerRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + + +Deliveries = List[Delivery] + + +class DeliveryDestinationConfiguration(TypedDict, total=False): + destinationResourceArn: Arn + + +class DeliveryDestination(TypedDict, total=False): + name: Optional[DeliveryDestinationName] + arn: Optional[Arn] + deliveryDestinationType: Optional[DeliveryDestinationType] + outputFormat: Optional[OutputFormat] + deliveryDestinationConfiguration: Optional[DeliveryDestinationConfiguration] + tags: Optional[Tags] + + +DeliveryDestinationTypes = List[DeliveryDestinationType] +DeliveryDestinations = List[DeliveryDestination] +ResourceArns = List[Arn] + + +class DeliverySource(TypedDict, total=False): + name: Optional[DeliverySourceName] + arn: Optional[Arn] + resourceArns: Optional[ResourceArns] + service: Optional[Service] + logType: Optional[LogType] + tags: Optional[Tags] + + +DeliverySources = List[DeliverySource] + + +class DescribeAccountPoliciesRequest(ServiceRequest): + policyType: PolicyType + policyName: Optional[PolicyName] + accountIdentifiers: Optional[AccountIds] + nextToken: Optional[NextToken] + + +class DescribeAccountPoliciesResponse(TypedDict, total=False): + accountPolicies: Optional[AccountPolicies] + nextToken: Optional[NextToken] + + +ResourceTypes = List[ResourceType] +LogTypes = List[LogType] + + +class DescribeConfigurationTemplatesRequest(ServiceRequest): + service: Optional[Service] + logTypes: Optional[LogTypes] + resourceTypes: Optional[ResourceTypes] + deliveryDestinationTypes: Optional[DeliveryDestinationTypes] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class DescribeConfigurationTemplatesResponse(TypedDict, total=False): + configurationTemplates: Optional[ConfigurationTemplates] + nextToken: Optional[NextToken] + + +class DescribeDeliveriesRequest(ServiceRequest): + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class DescribeDeliveriesResponse(TypedDict, total=False): + deliveries: Optional[Deliveries] + nextToken: Optional[NextToken] + + +class DescribeDeliveryDestinationsRequest(ServiceRequest): + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class DescribeDeliveryDestinationsResponse(TypedDict, total=False): + deliveryDestinations: Optional[DeliveryDestinations] + nextToken: Optional[NextToken] + + +class DescribeDeliverySourcesRequest(ServiceRequest): + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class DescribeDeliverySourcesResponse(TypedDict, total=False): + deliverySources: Optional[DeliverySources] + nextToken: Optional[NextToken] + + +class DescribeDestinationsRequest(ServiceRequest): + DestinationNamePrefix: Optional[DestinationName] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class Destination(TypedDict, total=False): + destinationName: Optional[DestinationName] + targetArn: Optional[TargetArn] + roleArn: Optional[RoleArn] + accessPolicy: Optional[AccessPolicy] + arn: Optional[Arn] + creationTime: Optional[Timestamp] + + +Destinations = List[Destination] + + +class DescribeDestinationsResponse(TypedDict, total=False): + destinations: Optional[Destinations] + nextToken: Optional[NextToken] + + +class DescribeExportTasksRequest(ServiceRequest): + taskId: Optional[ExportTaskId] + statusCode: Optional[ExportTaskStatusCode] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class ExportTaskExecutionInfo(TypedDict, total=False): + creationTime: Optional[Timestamp] + completionTime: Optional[Timestamp] + + +class ExportTaskStatus(TypedDict, total=False): + code: Optional[ExportTaskStatusCode] + message: Optional[ExportTaskStatusMessage] + + +ExportTask = TypedDict( + "ExportTask", + { + "taskId": Optional[ExportTaskId], + "taskName": Optional[ExportTaskName], + "logGroupName": Optional[LogGroupName], + "from": Optional[Timestamp], + "to": Optional[Timestamp], + "destination": Optional[ExportDestinationBucket], + "destinationPrefix": Optional[ExportDestinationPrefix], + "status": Optional[ExportTaskStatus], + "executionInfo": Optional[ExportTaskExecutionInfo], + }, + total=False, +) +ExportTasks = List[ExportTask] + + +class DescribeExportTasksResponse(TypedDict, total=False): + exportTasks: Optional[ExportTasks] + nextToken: Optional[NextToken] + + +DescribeFieldIndexesLogGroupIdentifiers = List[LogGroupIdentifier] + + +class DescribeFieldIndexesRequest(ServiceRequest): + logGroupIdentifiers: DescribeFieldIndexesLogGroupIdentifiers + nextToken: Optional[NextToken] + + +class FieldIndex(TypedDict, total=False): + logGroupIdentifier: Optional[LogGroupIdentifier] + fieldIndexName: Optional[FieldIndexName] + lastScanTime: Optional[Timestamp] + firstEventTime: Optional[Timestamp] + lastEventTime: Optional[Timestamp] + + +FieldIndexes = List[FieldIndex] + + +class DescribeFieldIndexesResponse(TypedDict, total=False): + fieldIndexes: Optional[FieldIndexes] + nextToken: Optional[NextToken] + + +DescribeIndexPoliciesLogGroupIdentifiers = List[LogGroupIdentifier] + + +class DescribeIndexPoliciesRequest(ServiceRequest): + logGroupIdentifiers: DescribeIndexPoliciesLogGroupIdentifiers + nextToken: Optional[NextToken] + + +class IndexPolicy(TypedDict, total=False): + logGroupIdentifier: Optional[LogGroupIdentifier] + lastUpdateTime: Optional[Timestamp] + policyDocument: Optional[PolicyDocument] + policyName: Optional[PolicyName] + source: Optional[IndexSource] + + +IndexPolicies = List[IndexPolicy] + + +class DescribeIndexPoliciesResponse(TypedDict, total=False): + indexPolicies: Optional[IndexPolicies] + nextToken: Optional[NextToken] + + +DescribeLogGroupsLogGroupIdentifiers = List[LogGroupIdentifier] + + +class DescribeLogGroupsRequest(ServiceRequest): + accountIdentifiers: Optional[AccountIds] + logGroupNamePrefix: Optional[LogGroupName] + logGroupNamePattern: Optional[LogGroupNamePattern] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + includeLinkedAccounts: Optional[IncludeLinkedAccounts] + logGroupClass: Optional[LogGroupClass] + logGroupIdentifiers: Optional[DescribeLogGroupsLogGroupIdentifiers] + + +InheritedProperties = List[InheritedProperty] +StoredBytes = int + + +class LogGroup(TypedDict, total=False): + logGroupName: Optional[LogGroupName] + creationTime: Optional[Timestamp] + retentionInDays: Optional[Days] + metricFilterCount: Optional[FilterCount] + arn: Optional[Arn] + storedBytes: Optional[StoredBytes] + kmsKeyId: Optional[KmsKeyId] + dataProtectionStatus: Optional[DataProtectionStatus] + inheritedProperties: Optional[InheritedProperties] + logGroupClass: Optional[LogGroupClass] + logGroupArn: Optional[Arn] + + +LogGroups = List[LogGroup] + + +class DescribeLogGroupsResponse(TypedDict, total=False): + logGroups: Optional[LogGroups] + nextToken: Optional[NextToken] + + +class DescribeLogStreamsRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + logGroupIdentifier: Optional[LogGroupIdentifier] + logStreamNamePrefix: Optional[LogStreamName] + orderBy: Optional[OrderBy] + descending: Optional[Descending] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class LogStream(TypedDict, total=False): + logStreamName: Optional[LogStreamName] + creationTime: Optional[Timestamp] + firstEventTimestamp: Optional[Timestamp] + lastEventTimestamp: Optional[Timestamp] + lastIngestionTime: Optional[Timestamp] + uploadSequenceToken: Optional[SequenceToken] + arn: Optional[Arn] + storedBytes: Optional[StoredBytes] + + +LogStreams = List[LogStream] + + +class DescribeLogStreamsResponse(TypedDict, total=False): + logStreams: Optional[LogStreams] + nextToken: Optional[NextToken] + + +class DescribeMetricFiltersRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + filterNamePrefix: Optional[FilterName] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + metricName: Optional[MetricName] + metricNamespace: Optional[MetricNamespace] + + +Dimensions = Dict[DimensionsKey, DimensionsValue] + + +class MetricTransformation(TypedDict, total=False): + metricName: MetricName + metricNamespace: MetricNamespace + metricValue: MetricValue + defaultValue: Optional[DefaultValue] + dimensions: Optional[Dimensions] + unit: Optional[StandardUnit] + + +MetricTransformations = List[MetricTransformation] + + +class MetricFilter(TypedDict, total=False): + filterName: Optional[FilterName] + filterPattern: Optional[FilterPattern] + metricTransformations: Optional[MetricTransformations] + creationTime: Optional[Timestamp] + logGroupName: Optional[LogGroupName] + applyOnTransformedLogs: Optional[ApplyOnTransformedLogs] + + +MetricFilters = List[MetricFilter] + + +class DescribeMetricFiltersResponse(TypedDict, total=False): + metricFilters: Optional[MetricFilters] + nextToken: Optional[NextToken] + + +class DescribeQueriesRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + status: Optional[QueryStatus] + maxResults: Optional[DescribeQueriesMaxResults] + nextToken: Optional[NextToken] + queryLanguage: Optional[QueryLanguage] + + +class QueryInfo(TypedDict, total=False): + queryLanguage: Optional[QueryLanguage] + queryId: Optional[QueryId] + queryString: Optional[QueryString] + status: Optional[QueryStatus] + createTime: Optional[Timestamp] + logGroupName: Optional[LogGroupName] + + +QueryInfoList = List[QueryInfo] + + +class DescribeQueriesResponse(TypedDict, total=False): + queries: Optional[QueryInfoList] + nextToken: Optional[NextToken] + + +class DescribeQueryDefinitionsRequest(ServiceRequest): + queryLanguage: Optional[QueryLanguage] + queryDefinitionNamePrefix: Optional[QueryDefinitionName] + maxResults: Optional[QueryListMaxResults] + nextToken: Optional[NextToken] + + +LogGroupNames = List[LogGroupName] + + +class QueryDefinition(TypedDict, total=False): + queryLanguage: Optional[QueryLanguage] + queryDefinitionId: Optional[QueryId] + name: Optional[QueryDefinitionName] + queryString: Optional[QueryDefinitionString] + lastModified: Optional[Timestamp] + logGroupNames: Optional[LogGroupNames] + + +QueryDefinitionList = List[QueryDefinition] + + +class DescribeQueryDefinitionsResponse(TypedDict, total=False): + queryDefinitions: Optional[QueryDefinitionList] + nextToken: Optional[NextToken] + + +class DescribeResourcePoliciesRequest(ServiceRequest): + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class ResourcePolicy(TypedDict, total=False): + policyName: Optional[PolicyName] + policyDocument: Optional[PolicyDocument] + lastUpdatedTime: Optional[Timestamp] + + +ResourcePolicies = List[ResourcePolicy] + + +class DescribeResourcePoliciesResponse(TypedDict, total=False): + resourcePolicies: Optional[ResourcePolicies] + nextToken: Optional[NextToken] + + +class DescribeSubscriptionFiltersRequest(ServiceRequest): + logGroupName: LogGroupName + filterNamePrefix: Optional[FilterName] + nextToken: Optional[NextToken] + limit: Optional[DescribeLimit] + + +class SubscriptionFilter(TypedDict, total=False): + filterName: Optional[FilterName] + logGroupName: Optional[LogGroupName] + filterPattern: Optional[FilterPattern] + destinationArn: Optional[DestinationArn] + roleArn: Optional[RoleArn] + distribution: Optional[Distribution] + applyOnTransformedLogs: Optional[ApplyOnTransformedLogs] + creationTime: Optional[Timestamp] + + +SubscriptionFilters = List[SubscriptionFilter] + + +class DescribeSubscriptionFiltersResponse(TypedDict, total=False): + subscriptionFilters: Optional[SubscriptionFilters] + nextToken: Optional[NextToken] + + +class DisassociateKmsKeyRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + resourceIdentifier: Optional[ResourceIdentifier] + + +EntityAttributes = Dict[EntityAttributesKey, EntityAttributesValue] +EntityKeyAttributes = Dict[EntityKeyAttributesKey, EntityKeyAttributesValue] + + +class Entity(TypedDict, total=False): + keyAttributes: Optional[EntityKeyAttributes] + attributes: Optional[EntityAttributes] + + +EventNumber = int +ExtractedValues = Dict[Token, Value] +InputLogStreamNames = List[LogStreamName] + + +class FilterLogEventsRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + logGroupIdentifier: Optional[LogGroupIdentifier] + logStreamNames: Optional[InputLogStreamNames] + logStreamNamePrefix: Optional[LogStreamName] + startTime: Optional[Timestamp] + endTime: Optional[Timestamp] + filterPattern: Optional[FilterPattern] + nextToken: Optional[NextToken] + limit: Optional[EventsLimit] + interleaved: Optional[Interleaved] + unmask: Optional[Unmask] + + +class SearchedLogStream(TypedDict, total=False): + logStreamName: Optional[LogStreamName] + searchedCompletely: Optional[LogStreamSearchedCompletely] + + +SearchedLogStreams = List[SearchedLogStream] + + +class FilteredLogEvent(TypedDict, total=False): + logStreamName: Optional[LogStreamName] + timestamp: Optional[Timestamp] + message: Optional[EventMessage] + ingestionTime: Optional[Timestamp] + eventId: Optional[EventId] + + +FilteredLogEvents = List[FilteredLogEvent] + + +class FilterLogEventsResponse(TypedDict, total=False): + events: Optional[FilteredLogEvents] + searchedLogStreams: Optional[SearchedLogStreams] + nextToken: Optional[NextToken] + + +class GetDataProtectionPolicyRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + + +class GetDataProtectionPolicyResponse(TypedDict, total=False): + logGroupIdentifier: Optional[LogGroupIdentifier] + policyDocument: Optional[DataProtectionPolicyDocument] + lastUpdatedTime: Optional[Timestamp] + + +class GetDeliveryDestinationPolicyRequest(ServiceRequest): + deliveryDestinationName: DeliveryDestinationName + + +class Policy(TypedDict, total=False): + deliveryDestinationPolicy: Optional[DeliveryDestinationPolicy] + + +class GetDeliveryDestinationPolicyResponse(TypedDict, total=False): + policy: Optional[Policy] + + +class GetDeliveryDestinationRequest(ServiceRequest): + name: DeliveryDestinationName + + +class GetDeliveryDestinationResponse(TypedDict, total=False): + deliveryDestination: Optional[DeliveryDestination] + + +class GetDeliveryRequest(ServiceRequest): + id: DeliveryId + + +class GetDeliveryResponse(TypedDict, total=False): + delivery: Optional[Delivery] + + +class GetDeliverySourceRequest(ServiceRequest): + name: DeliverySourceName + + +class GetDeliverySourceResponse(TypedDict, total=False): + deliverySource: Optional[DeliverySource] + + +class GetIntegrationRequest(ServiceRequest): + integrationName: IntegrationName + + +class OpenSearchResourceStatus(TypedDict, total=False): + status: Optional[OpenSearchResourceStatusType] + statusMessage: Optional[IntegrationStatusMessage] + + +class OpenSearchLifecyclePolicy(TypedDict, total=False): + policyName: Optional[OpenSearchPolicyName] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchDataAccessPolicy(TypedDict, total=False): + policyName: Optional[OpenSearchPolicyName] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchNetworkPolicy(TypedDict, total=False): + policyName: Optional[OpenSearchPolicyName] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchEncryptionPolicy(TypedDict, total=False): + policyName: Optional[OpenSearchPolicyName] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchWorkspace(TypedDict, total=False): + workspaceId: Optional[OpenSearchWorkspaceId] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchCollection(TypedDict, total=False): + collectionEndpoint: Optional[OpenSearchCollectionEndpoint] + collectionArn: Optional[Arn] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchApplication(TypedDict, total=False): + applicationEndpoint: Optional[OpenSearchApplicationEndpoint] + applicationArn: Optional[Arn] + applicationId: Optional[OpenSearchApplicationId] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchDataSource(TypedDict, total=False): + dataSourceName: Optional[OpenSearchDataSourceName] + status: Optional[OpenSearchResourceStatus] + + +class OpenSearchIntegrationDetails(TypedDict, total=False): + dataSource: Optional[OpenSearchDataSource] + application: Optional[OpenSearchApplication] + collection: Optional[OpenSearchCollection] + workspace: Optional[OpenSearchWorkspace] + encryptionPolicy: Optional[OpenSearchEncryptionPolicy] + networkPolicy: Optional[OpenSearchNetworkPolicy] + accessPolicy: Optional[OpenSearchDataAccessPolicy] + lifecyclePolicy: Optional[OpenSearchLifecyclePolicy] + + +class IntegrationDetails(TypedDict, total=False): + openSearchIntegrationDetails: Optional[OpenSearchIntegrationDetails] + + +class GetIntegrationResponse(TypedDict, total=False): + integrationName: Optional[IntegrationName] + integrationType: Optional[IntegrationType] + integrationStatus: Optional[IntegrationStatus] + integrationDetails: Optional[IntegrationDetails] + + +class GetLogAnomalyDetectorRequest(ServiceRequest): + anomalyDetectorArn: AnomalyDetectorArn + + +class GetLogAnomalyDetectorResponse(TypedDict, total=False): + detectorName: Optional[DetectorName] + logGroupArnList: Optional[LogGroupArnList] + evaluationFrequency: Optional[EvaluationFrequency] + filterPattern: Optional[FilterPattern] + anomalyDetectorStatus: Optional[AnomalyDetectorStatus] + kmsKeyId: Optional[KmsKeyId] + creationTimeStamp: Optional[EpochMillis] + lastModifiedTimeStamp: Optional[EpochMillis] + anomalyVisibilityTime: Optional[AnomalyVisibilityTime] + + +class GetLogEventsRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + logGroupIdentifier: Optional[LogGroupIdentifier] + logStreamName: LogStreamName + startTime: Optional[Timestamp] + endTime: Optional[Timestamp] + nextToken: Optional[NextToken] + limit: Optional[EventsLimit] + startFromHead: Optional[StartFromHead] + unmask: Optional[Unmask] + + +class OutputLogEvent(TypedDict, total=False): + timestamp: Optional[Timestamp] + message: Optional[EventMessage] + ingestionTime: Optional[Timestamp] + + +OutputLogEvents = List[OutputLogEvent] + + +class GetLogEventsResponse(TypedDict, total=False): + events: Optional[OutputLogEvents] + nextForwardToken: Optional[NextToken] + nextBackwardToken: Optional[NextToken] + + +class GetLogGroupFieldsRequest(ServiceRequest): + logGroupName: Optional[LogGroupName] + time: Optional[Timestamp] + logGroupIdentifier: Optional[LogGroupIdentifier] + + +class LogGroupField(TypedDict, total=False): + name: Optional[Field] + percent: Optional[Percentage] + + +LogGroupFieldList = List[LogGroupField] + + +class GetLogGroupFieldsResponse(TypedDict, total=False): + logGroupFields: Optional[LogGroupFieldList] + + +class GetLogRecordRequest(ServiceRequest): + logRecordPointer: LogRecordPointer + unmask: Optional[Unmask] + + +LogRecord = Dict[Field, Value] + + +class GetLogRecordResponse(TypedDict, total=False): + logRecord: Optional[LogRecord] + + +class GetQueryResultsRequest(ServiceRequest): + queryId: QueryId + + +class QueryStatistics(TypedDict, total=False): + recordsMatched: Optional[StatsValue] + recordsScanned: Optional[StatsValue] + estimatedRecordsSkipped: Optional[StatsValue] + bytesScanned: Optional[StatsValue] + estimatedBytesSkipped: Optional[StatsValue] + logGroupsScanned: Optional[StatsValue] + + +class ResultField(TypedDict, total=False): + field: Optional[Field] + value: Optional[Value] + + +ResultRows = List[ResultField] +QueryResults = List[ResultRows] + + +class GetQueryResultsResponse(TypedDict, total=False): + queryLanguage: Optional[QueryLanguage] + results: Optional[QueryResults] + statistics: Optional[QueryStatistics] + status: Optional[QueryStatus] + encryptionKey: Optional[EncryptionKey] + + +class GetTransformerRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + + +UpperCaseStringWithKeys = List[WithKey] + + +class UpperCaseString(TypedDict, total=False): + withKeys: UpperCaseStringWithKeys + + +TypeConverterEntry = TypedDict( + "TypeConverterEntry", + { + "key": Key, + "type": Type, + }, + total=False, +) +TypeConverterEntries = List[TypeConverterEntry] + + +class TypeConverter(TypedDict, total=False): + entries: TypeConverterEntries + + +TrimStringWithKeys = List[WithKey] + + +class TrimString(TypedDict, total=False): + withKeys: TrimStringWithKeys + + +SubstituteStringEntry = TypedDict( + "SubstituteStringEntry", + { + "source": Source, + "from": FromKey, + "to": ToKey, + }, + total=False, +) +SubstituteStringEntries = List[SubstituteStringEntry] + + +class SubstituteString(TypedDict, total=False): + entries: SubstituteStringEntries + + +class SplitStringEntry(TypedDict, total=False): + source: Source + delimiter: SplitStringDelimiter + + +SplitStringEntries = List[SplitStringEntry] + + +class SplitString(TypedDict, total=False): + entries: SplitStringEntries + + +class RenameKeyEntry(TypedDict, total=False): + key: Key + renameTo: RenameTo + overwriteIfExists: Optional[OverwriteIfExists] + + +RenameKeyEntries = List[RenameKeyEntry] + + +class RenameKeys(TypedDict, total=False): + entries: RenameKeyEntries + + +class ParseWAF(TypedDict, total=False): + source: Optional[Source] + + +class ParseVPC(TypedDict, total=False): + source: Optional[Source] + + +class ParsePostgres(TypedDict, total=False): + source: Optional[Source] + + +class ParseRoute53(TypedDict, total=False): + source: Optional[Source] + + +class ParseKeyValue(TypedDict, total=False): + source: Optional[Source] + destination: Optional[DestinationField] + fieldDelimiter: Optional[ParserFieldDelimiter] + keyValueDelimiter: Optional[KeyValueDelimiter] + keyPrefix: Optional[KeyPrefix] + nonMatchValue: Optional[NonMatchValue] + overwriteIfExists: Optional[OverwriteIfExists] + + +class ParseJSON(TypedDict, total=False): + source: Optional[Source] + destination: Optional[DestinationField] + + +class ParseCloudfront(TypedDict, total=False): + source: Optional[Source] + + +class MoveKeyEntry(TypedDict, total=False): + source: Source + target: Target + overwriteIfExists: Optional[OverwriteIfExists] + + +MoveKeyEntries = List[MoveKeyEntry] + + +class MoveKeys(TypedDict, total=False): + entries: MoveKeyEntries + + +LowerCaseStringWithKeys = List[WithKey] + + +class LowerCaseString(TypedDict, total=False): + withKeys: LowerCaseStringWithKeys + + +class ListToMap(TypedDict, total=False): + source: Source + key: Key + valueKey: Optional[ValueKey] + target: Optional[Target] + flatten: Optional[Flatten] + flattenedElement: Optional[FlattenedElement] + + +class Grok(TypedDict, total=False): + source: Optional[Source] + match: GrokMatch + + +class Processor(TypedDict, total=False): + addKeys: Optional[AddKeys] + copyValue: Optional[CopyValue] + csv: Optional[CSV] + dateTimeConverter: Optional[DateTimeConverter] + deleteKeys: Optional[DeleteKeys] + grok: Optional[Grok] + listToMap: Optional[ListToMap] + lowerCaseString: Optional[LowerCaseString] + moveKeys: Optional[MoveKeys] + parseCloudfront: Optional[ParseCloudfront] + parseJSON: Optional[ParseJSON] + parseKeyValue: Optional[ParseKeyValue] + parseRoute53: Optional[ParseRoute53] + parsePostgres: Optional[ParsePostgres] + parseVPC: Optional[ParseVPC] + parseWAF: Optional[ParseWAF] + renameKeys: Optional[RenameKeys] + splitString: Optional[SplitString] + substituteString: Optional[SubstituteString] + trimString: Optional[TrimString] + typeConverter: Optional[TypeConverter] + upperCaseString: Optional[UpperCaseString] + + +Processors = List[Processor] + + +class GetTransformerResponse(TypedDict, total=False): + logGroupIdentifier: Optional[LogGroupIdentifier] + creationTime: Optional[Timestamp] + lastModifiedTime: Optional[Timestamp] + transformerConfig: Optional[Processors] + + +class InputLogEvent(TypedDict, total=False): + timestamp: Timestamp + message: EventMessage + + +InputLogEvents = List[InputLogEvent] + + +class IntegrationSummary(TypedDict, total=False): + integrationName: Optional[IntegrationName] + integrationType: Optional[IntegrationType] + integrationStatus: Optional[IntegrationStatus] + + +IntegrationSummaries = List[IntegrationSummary] + + +class ListAnomaliesRequest(ServiceRequest): + anomalyDetectorArn: Optional[AnomalyDetectorArn] + suppressionState: Optional[SuppressionState] + limit: Optional[ListAnomaliesLimit] + nextToken: Optional[NextToken] + + +class ListAnomaliesResponse(TypedDict, total=False): + anomalies: Optional[Anomalies] + nextToken: Optional[NextToken] + + +class ListIntegrationsRequest(ServiceRequest): + integrationNamePrefix: Optional[IntegrationNamePrefix] + integrationType: Optional[IntegrationType] + integrationStatus: Optional[IntegrationStatus] + + +class ListIntegrationsResponse(TypedDict, total=False): + integrationSummaries: Optional[IntegrationSummaries] + + +class ListLogAnomalyDetectorsRequest(ServiceRequest): + filterLogGroupArn: Optional[LogGroupArn] + limit: Optional[ListLogAnomalyDetectorsLimit] + nextToken: Optional[NextToken] + + +class ListLogAnomalyDetectorsResponse(TypedDict, total=False): + anomalyDetectors: Optional[AnomalyDetectors] + nextToken: Optional[NextToken] + + +class ListLogGroupsForQueryRequest(ServiceRequest): + queryId: QueryId + nextToken: Optional[NextToken] + maxResults: Optional[ListLogGroupsForQueryMaxResults] + + +LogGroupIdentifiers = List[LogGroupIdentifier] + + +class ListLogGroupsForQueryResponse(TypedDict, total=False): + logGroupIdentifiers: Optional[LogGroupIdentifiers] + nextToken: Optional[NextToken] + + +class ListLogGroupsRequest(ServiceRequest): + logGroupNamePattern: Optional[LogGroupNameRegexPattern] + logGroupClass: Optional[LogGroupClass] + includeLinkedAccounts: Optional[IncludeLinkedAccounts] + accountIdentifiers: Optional[AccountIds] + nextToken: Optional[NextToken] + limit: Optional[ListLimit] + + +class LogGroupSummary(TypedDict, total=False): + logGroupName: Optional[LogGroupName] + logGroupArn: Optional[Arn] + logGroupClass: Optional[LogGroupClass] + + +LogGroupSummaries = List[LogGroupSummary] + + +class ListLogGroupsResponse(TypedDict, total=False): + logGroups: Optional[LogGroupSummaries] + nextToken: Optional[NextToken] + + +class ListTagsForResourceRequest(ServiceRequest): + resourceArn: AmazonResourceName + + +class ListTagsForResourceResponse(TypedDict, total=False): + tags: Optional[Tags] + + +class ListTagsLogGroupRequest(ServiceRequest): + logGroupName: LogGroupName + + +class ListTagsLogGroupResponse(TypedDict, total=False): + tags: Optional[Tags] + + +class LiveTailSessionLogEvent(TypedDict, total=False): + logStreamName: Optional[LogStreamName] + logGroupIdentifier: Optional[LogGroupIdentifier] + message: Optional[EventMessage] + timestamp: Optional[Timestamp] + ingestionTime: Optional[Timestamp] + + +class LiveTailSessionMetadata(TypedDict, total=False): + sampled: Optional[IsSampled] + + +LiveTailSessionResults = List[LiveTailSessionLogEvent] +StartLiveTailLogGroupIdentifiers = List[LogGroupIdentifier] + + +class LiveTailSessionStart(TypedDict, total=False): + requestId: Optional[RequestId] + sessionId: Optional[SessionId] + logGroupIdentifiers: Optional[StartLiveTailLogGroupIdentifiers] + logStreamNames: Optional[InputLogStreamNames] + logStreamNamePrefixes: Optional[InputLogStreamNames] + logEventFilterPattern: Optional[FilterPattern] + + +class LiveTailSessionUpdate(TypedDict, total=False): + sessionMetadata: Optional[LiveTailSessionMetadata] + sessionResults: Optional[LiveTailSessionResults] + + +class MetricFilterMatchRecord(TypedDict, total=False): + eventNumber: Optional[EventNumber] + eventMessage: Optional[EventMessage] + extractedValues: Optional[ExtractedValues] + + +MetricFilterMatches = List[MetricFilterMatchRecord] + + +class OpenSearchResourceConfig(TypedDict, total=False): + kmsKeyArn: Optional[Arn] + dataSourceRoleArn: Arn + dashboardViewerPrincipals: DashboardViewerPrincipals + applicationArn: Optional[Arn] + retentionDays: CollectionRetentionDays + + +class PutAccountPolicyRequest(ServiceRequest): + policyName: PolicyName + policyDocument: AccountPolicyDocument + policyType: PolicyType + scope: Optional[Scope] + selectionCriteria: Optional[SelectionCriteria] + + +class PutAccountPolicyResponse(TypedDict, total=False): + accountPolicy: Optional[AccountPolicy] + + +class PutDataProtectionPolicyRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + policyDocument: DataProtectionPolicyDocument + + +class PutDataProtectionPolicyResponse(TypedDict, total=False): + logGroupIdentifier: Optional[LogGroupIdentifier] + policyDocument: Optional[DataProtectionPolicyDocument] + lastUpdatedTime: Optional[Timestamp] + + +class PutDeliveryDestinationPolicyRequest(ServiceRequest): + deliveryDestinationName: DeliveryDestinationName + deliveryDestinationPolicy: DeliveryDestinationPolicy + + +class PutDeliveryDestinationPolicyResponse(TypedDict, total=False): + policy: Optional[Policy] + + +class PutDeliveryDestinationRequest(ServiceRequest): + name: DeliveryDestinationName + outputFormat: Optional[OutputFormat] + deliveryDestinationConfiguration: DeliveryDestinationConfiguration + tags: Optional[Tags] + + +class PutDeliveryDestinationResponse(TypedDict, total=False): + deliveryDestination: Optional[DeliveryDestination] + + +class PutDeliverySourceRequest(ServiceRequest): + name: DeliverySourceName + resourceArn: Arn + logType: LogType + tags: Optional[Tags] + + +class PutDeliverySourceResponse(TypedDict, total=False): + deliverySource: Optional[DeliverySource] + + +class PutDestinationPolicyRequest(ServiceRequest): + destinationName: DestinationName + accessPolicy: AccessPolicy + forceUpdate: Optional[ForceUpdate] + + +class PutDestinationRequest(ServiceRequest): + destinationName: DestinationName + targetArn: TargetArn + roleArn: RoleArn + tags: Optional[Tags] + + +class PutDestinationResponse(TypedDict, total=False): + destination: Optional[Destination] + + +class PutIndexPolicyRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + policyDocument: PolicyDocument + + +class PutIndexPolicyResponse(TypedDict, total=False): + indexPolicy: Optional[IndexPolicy] + + +class ResourceConfig(TypedDict, total=False): + openSearchResourceConfig: Optional[OpenSearchResourceConfig] + + +class PutIntegrationRequest(ServiceRequest): + integrationName: IntegrationName + resourceConfig: ResourceConfig + integrationType: IntegrationType + + +class PutIntegrationResponse(TypedDict, total=False): + integrationName: Optional[IntegrationName] + integrationStatus: Optional[IntegrationStatus] + + +class PutLogEventsRequest(ServiceRequest): + logGroupName: LogGroupName + logStreamName: LogStreamName + logEvents: InputLogEvents + sequenceToken: Optional[SequenceToken] + entity: Optional[Entity] + + +class RejectedEntityInfo(TypedDict, total=False): + errorType: EntityRejectionErrorType + + +class RejectedLogEventsInfo(TypedDict, total=False): + tooNewLogEventStartIndex: Optional[LogEventIndex] + tooOldLogEventEndIndex: Optional[LogEventIndex] + expiredLogEventEndIndex: Optional[LogEventIndex] + + +class PutLogEventsResponse(TypedDict, total=False): + nextSequenceToken: Optional[SequenceToken] + rejectedLogEventsInfo: Optional[RejectedLogEventsInfo] + rejectedEntityInfo: Optional[RejectedEntityInfo] + + +class PutMetricFilterRequest(ServiceRequest): + logGroupName: LogGroupName + filterName: FilterName + filterPattern: FilterPattern + metricTransformations: MetricTransformations + applyOnTransformedLogs: Optional[ApplyOnTransformedLogs] + + +class PutQueryDefinitionRequest(ServiceRequest): + queryLanguage: Optional[QueryLanguage] + name: QueryDefinitionName + queryDefinitionId: Optional[QueryId] + logGroupNames: Optional[LogGroupNames] + queryString: QueryDefinitionString + clientToken: Optional[ClientToken] + + +class PutQueryDefinitionResponse(TypedDict, total=False): + queryDefinitionId: Optional[QueryId] + + +class PutResourcePolicyRequest(ServiceRequest): + policyName: Optional[PolicyName] + policyDocument: Optional[PolicyDocument] + + +class PutResourcePolicyResponse(TypedDict, total=False): + resourcePolicy: Optional[ResourcePolicy] + + +class PutRetentionPolicyRequest(ServiceRequest): + logGroupName: LogGroupName + retentionInDays: Days + + +class PutSubscriptionFilterRequest(ServiceRequest): + logGroupName: LogGroupName + filterName: FilterName + filterPattern: FilterPattern + destinationArn: DestinationArn + roleArn: Optional[RoleArn] + distribution: Optional[Distribution] + applyOnTransformedLogs: Optional[ApplyOnTransformedLogs] + + +class PutTransformerRequest(ServiceRequest): + logGroupIdentifier: LogGroupIdentifier + transformerConfig: Processors + + +class StartLiveTailRequest(ServiceRequest): + logGroupIdentifiers: StartLiveTailLogGroupIdentifiers + logStreamNames: Optional[InputLogStreamNames] + logStreamNamePrefixes: Optional[InputLogStreamNames] + logEventFilterPattern: Optional[FilterPattern] + + +class StartLiveTailResponseStream(TypedDict, total=False): + sessionStart: Optional[LiveTailSessionStart] + sessionUpdate: Optional[LiveTailSessionUpdate] + SessionTimeoutException: Optional[SessionTimeoutException] + SessionStreamingException: Optional[SessionStreamingException] + + +class StartLiveTailResponse(TypedDict, total=False): + responseStream: Iterator[StartLiveTailResponseStream] + + +class StartQueryRequest(ServiceRequest): + queryLanguage: Optional[QueryLanguage] + logGroupName: Optional[LogGroupName] + logGroupNames: Optional[LogGroupNames] + logGroupIdentifiers: Optional[LogGroupIdentifiers] + startTime: Timestamp + endTime: Timestamp + queryString: QueryString + limit: Optional[EventsLimit] + + +class StartQueryResponse(TypedDict, total=False): + queryId: Optional[QueryId] + + +class StopQueryRequest(ServiceRequest): + queryId: QueryId + + +class StopQueryResponse(TypedDict, total=False): + success: Optional[Success] + + +class SuppressionPeriod(TypedDict, total=False): + value: Optional[Integer] + suppressionUnit: Optional[SuppressionUnit] + + +TagKeyList = List[TagKey] +TagList = List[TagKey] + + +class TagLogGroupRequest(ServiceRequest): + logGroupName: LogGroupName + tags: Tags + + +class TagResourceRequest(ServiceRequest): + resourceArn: AmazonResourceName + tags: Tags + + +TestEventMessages = List[EventMessage] + + +class TestMetricFilterRequest(ServiceRequest): + filterPattern: FilterPattern + logEventMessages: TestEventMessages + + +class TestMetricFilterResponse(TypedDict, total=False): + matches: Optional[MetricFilterMatches] + + +class TestTransformerRequest(ServiceRequest): + transformerConfig: Processors + logEventMessages: TestEventMessages + + +class TransformedLogRecord(TypedDict, total=False): + eventNumber: Optional[EventNumber] + eventMessage: Optional[EventMessage] + transformedEventMessage: Optional[TransformedEventMessage] + + +TransformedLogs = List[TransformedLogRecord] + + +class TestTransformerResponse(TypedDict, total=False): + transformedLogs: Optional[TransformedLogs] + + +class UntagLogGroupRequest(ServiceRequest): + logGroupName: LogGroupName + tags: TagList + + +class UntagResourceRequest(ServiceRequest): + resourceArn: AmazonResourceName + tagKeys: TagKeyList + + +class UpdateAnomalyRequest(ServiceRequest): + anomalyId: Optional[AnomalyId] + patternId: Optional[PatternId] + anomalyDetectorArn: AnomalyDetectorArn + suppressionType: Optional[SuppressionType] + suppressionPeriod: Optional[SuppressionPeriod] + baseline: Optional[Baseline] + + +class UpdateDeliveryConfigurationRequest(ServiceRequest): + id: DeliveryId + recordFields: Optional[RecordFields] + fieldDelimiter: Optional[FieldDelimiter] + s3DeliveryConfiguration: Optional[S3DeliveryConfiguration] + + +class UpdateDeliveryConfigurationResponse(TypedDict, total=False): + pass + + +class UpdateLogAnomalyDetectorRequest(ServiceRequest): + anomalyDetectorArn: AnomalyDetectorArn + evaluationFrequency: Optional[EvaluationFrequency] + filterPattern: Optional[FilterPattern] + anomalyVisibilityTime: Optional[AnomalyVisibilityTime] + enabled: Boolean + + +class LogsApi: + service = "logs" + version = "2014-03-28" + + @handler("AssociateKmsKey") + def associate_kms_key( + self, + context: RequestContext, + kms_key_id: KmsKeyId, + log_group_name: LogGroupName | None = None, + resource_identifier: ResourceIdentifier | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CancelExportTask") + def cancel_export_task(self, context: RequestContext, task_id: ExportTaskId, **kwargs) -> None: + raise NotImplementedError + + @handler("CreateDelivery") + def create_delivery( + self, + context: RequestContext, + delivery_source_name: DeliverySourceName, + delivery_destination_arn: Arn, + record_fields: RecordFields | None = None, + field_delimiter: FieldDelimiter | None = None, + s3_delivery_configuration: S3DeliveryConfiguration | None = None, + tags: Tags | None = None, + **kwargs, + ) -> CreateDeliveryResponse: + raise NotImplementedError + + @handler("CreateExportTask", expand=False) + def create_export_task( + self, context: RequestContext, request: CreateExportTaskRequest, **kwargs + ) -> CreateExportTaskResponse: + raise NotImplementedError + + @handler("CreateLogAnomalyDetector") + def create_log_anomaly_detector( + self, + context: RequestContext, + log_group_arn_list: LogGroupArnList, + detector_name: DetectorName | None = None, + evaluation_frequency: EvaluationFrequency | None = None, + filter_pattern: FilterPattern | None = None, + kms_key_id: DetectorKmsKeyArn | None = None, + anomaly_visibility_time: AnomalyVisibilityTime | None = None, + tags: Tags | None = None, + **kwargs, + ) -> CreateLogAnomalyDetectorResponse: + raise NotImplementedError + + @handler("CreateLogGroup") + def create_log_group( + self, + context: RequestContext, + log_group_name: LogGroupName, + kms_key_id: KmsKeyId | None = None, + tags: Tags | None = None, + log_group_class: LogGroupClass | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateLogStream") + def create_log_stream( + self, + context: RequestContext, + log_group_name: LogGroupName, + log_stream_name: LogStreamName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccountPolicy") + def delete_account_policy( + self, context: RequestContext, policy_name: PolicyName, policy_type: PolicyType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDataProtectionPolicy") + def delete_data_protection_policy( + self, context: RequestContext, log_group_identifier: LogGroupIdentifier, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDelivery") + def delete_delivery(self, context: RequestContext, id: DeliveryId, **kwargs) -> None: + raise NotImplementedError + + @handler("DeleteDeliveryDestination") + def delete_delivery_destination( + self, context: RequestContext, name: DeliveryDestinationName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDeliveryDestinationPolicy") + def delete_delivery_destination_policy( + self, context: RequestContext, delivery_destination_name: DeliveryDestinationName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDeliverySource") + def delete_delivery_source( + self, context: RequestContext, name: DeliverySourceName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDestination") + def delete_destination( + self, context: RequestContext, destination_name: DestinationName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteIndexPolicy") + def delete_index_policy( + self, context: RequestContext, log_group_identifier: LogGroupIdentifier, **kwargs + ) -> DeleteIndexPolicyResponse: + raise NotImplementedError + + @handler("DeleteIntegration") + def delete_integration( + self, + context: RequestContext, + integration_name: IntegrationName, + force: Force | None = None, + **kwargs, + ) -> DeleteIntegrationResponse: + raise NotImplementedError + + @handler("DeleteLogAnomalyDetector") + def delete_log_anomaly_detector( + self, context: RequestContext, anomaly_detector_arn: AnomalyDetectorArn, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteLogGroup") + def delete_log_group( + self, context: RequestContext, log_group_name: LogGroupName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteLogStream") + def delete_log_stream( + self, + context: RequestContext, + log_group_name: LogGroupName, + log_stream_name: LogStreamName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteMetricFilter") + def delete_metric_filter( + self, + context: RequestContext, + log_group_name: LogGroupName, + filter_name: FilterName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteQueryDefinition") + def delete_query_definition( + self, context: RequestContext, query_definition_id: QueryId, **kwargs + ) -> DeleteQueryDefinitionResponse: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, context: RequestContext, policy_name: PolicyName | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteRetentionPolicy") + def delete_retention_policy( + self, context: RequestContext, log_group_name: LogGroupName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSubscriptionFilter") + def delete_subscription_filter( + self, + context: RequestContext, + log_group_name: LogGroupName, + filter_name: FilterName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteTransformer") + def delete_transformer( + self, context: RequestContext, log_group_identifier: LogGroupIdentifier, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DescribeAccountPolicies") + def describe_account_policies( + self, + context: RequestContext, + policy_type: PolicyType, + policy_name: PolicyName | None = None, + account_identifiers: AccountIds | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAccountPoliciesResponse: + raise NotImplementedError + + @handler("DescribeConfigurationTemplates") + def describe_configuration_templates( + self, + context: RequestContext, + service: Service | None = None, + log_types: LogTypes | None = None, + resource_types: ResourceTypes | None = None, + delivery_destination_types: DeliveryDestinationTypes | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeConfigurationTemplatesResponse: + raise NotImplementedError + + @handler("DescribeDeliveries") + def describe_deliveries( + self, + context: RequestContext, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeDeliveriesResponse: + raise NotImplementedError + + @handler("DescribeDeliveryDestinations") + def describe_delivery_destinations( + self, + context: RequestContext, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeDeliveryDestinationsResponse: + raise NotImplementedError + + @handler("DescribeDeliverySources") + def describe_delivery_sources( + self, + context: RequestContext, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeDeliverySourcesResponse: + raise NotImplementedError + + @handler("DescribeDestinations") + def describe_destinations( + self, + context: RequestContext, + destination_name_prefix: DestinationName | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeDestinationsResponse: + raise NotImplementedError + + @handler("DescribeExportTasks") + def describe_export_tasks( + self, + context: RequestContext, + task_id: ExportTaskId | None = None, + status_code: ExportTaskStatusCode | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeExportTasksResponse: + raise NotImplementedError + + @handler("DescribeFieldIndexes") + def describe_field_indexes( + self, + context: RequestContext, + log_group_identifiers: DescribeFieldIndexesLogGroupIdentifiers, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeFieldIndexesResponse: + raise NotImplementedError + + @handler("DescribeIndexPolicies") + def describe_index_policies( + self, + context: RequestContext, + log_group_identifiers: DescribeIndexPoliciesLogGroupIdentifiers, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeIndexPoliciesResponse: + raise NotImplementedError + + @handler("DescribeLogGroups") + def describe_log_groups( + self, + context: RequestContext, + account_identifiers: AccountIds | None = None, + log_group_name_prefix: LogGroupName | None = None, + log_group_name_pattern: LogGroupNamePattern | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + include_linked_accounts: IncludeLinkedAccounts | None = None, + log_group_class: LogGroupClass | None = None, + log_group_identifiers: DescribeLogGroupsLogGroupIdentifiers | None = None, + **kwargs, + ) -> DescribeLogGroupsResponse: + raise NotImplementedError + + @handler("DescribeLogStreams") + def describe_log_streams( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + log_group_identifier: LogGroupIdentifier | None = None, + log_stream_name_prefix: LogStreamName | None = None, + order_by: OrderBy | None = None, + descending: Descending | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeLogStreamsResponse: + raise NotImplementedError + + @handler("DescribeMetricFilters") + def describe_metric_filters( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + filter_name_prefix: FilterName | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + metric_name: MetricName | None = None, + metric_namespace: MetricNamespace | None = None, + **kwargs, + ) -> DescribeMetricFiltersResponse: + raise NotImplementedError + + @handler("DescribeQueries") + def describe_queries( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + status: QueryStatus | None = None, + max_results: DescribeQueriesMaxResults | None = None, + next_token: NextToken | None = None, + query_language: QueryLanguage | None = None, + **kwargs, + ) -> DescribeQueriesResponse: + raise NotImplementedError + + @handler("DescribeQueryDefinitions") + def describe_query_definitions( + self, + context: RequestContext, + query_language: QueryLanguage | None = None, + query_definition_name_prefix: QueryDefinitionName | None = None, + max_results: QueryListMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeQueryDefinitionsResponse: + raise NotImplementedError + + @handler("DescribeResourcePolicies") + def describe_resource_policies( + self, + context: RequestContext, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeResourcePoliciesResponse: + raise NotImplementedError + + @handler("DescribeSubscriptionFilters") + def describe_subscription_filters( + self, + context: RequestContext, + log_group_name: LogGroupName, + filter_name_prefix: FilterName | None = None, + next_token: NextToken | None = None, + limit: DescribeLimit | None = None, + **kwargs, + ) -> DescribeSubscriptionFiltersResponse: + raise NotImplementedError + + @handler("DisassociateKmsKey") + def disassociate_kms_key( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + resource_identifier: ResourceIdentifier | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("FilterLogEvents") + def filter_log_events( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + log_group_identifier: LogGroupIdentifier | None = None, + log_stream_names: InputLogStreamNames | None = None, + log_stream_name_prefix: LogStreamName | None = None, + start_time: Timestamp | None = None, + end_time: Timestamp | None = None, + filter_pattern: FilterPattern | None = None, + next_token: NextToken | None = None, + limit: EventsLimit | None = None, + interleaved: Interleaved | None = None, + unmask: Unmask | None = None, + **kwargs, + ) -> FilterLogEventsResponse: + raise NotImplementedError + + @handler("GetDataProtectionPolicy") + def get_data_protection_policy( + self, context: RequestContext, log_group_identifier: LogGroupIdentifier, **kwargs + ) -> GetDataProtectionPolicyResponse: + raise NotImplementedError + + @handler("GetDelivery") + def get_delivery( + self, context: RequestContext, id: DeliveryId, **kwargs + ) -> GetDeliveryResponse: + raise NotImplementedError + + @handler("GetDeliveryDestination") + def get_delivery_destination( + self, context: RequestContext, name: DeliveryDestinationName, **kwargs + ) -> GetDeliveryDestinationResponse: + raise NotImplementedError + + @handler("GetDeliveryDestinationPolicy") + def get_delivery_destination_policy( + self, context: RequestContext, delivery_destination_name: DeliveryDestinationName, **kwargs + ) -> GetDeliveryDestinationPolicyResponse: + raise NotImplementedError + + @handler("GetDeliverySource") + def get_delivery_source( + self, context: RequestContext, name: DeliverySourceName, **kwargs + ) -> GetDeliverySourceResponse: + raise NotImplementedError + + @handler("GetIntegration") + def get_integration( + self, context: RequestContext, integration_name: IntegrationName, **kwargs + ) -> GetIntegrationResponse: + raise NotImplementedError + + @handler("GetLogAnomalyDetector") + def get_log_anomaly_detector( + self, context: RequestContext, anomaly_detector_arn: AnomalyDetectorArn, **kwargs + ) -> GetLogAnomalyDetectorResponse: + raise NotImplementedError + + @handler("GetLogEvents") + def get_log_events( + self, + context: RequestContext, + log_stream_name: LogStreamName, + log_group_name: LogGroupName | None = None, + log_group_identifier: LogGroupIdentifier | None = None, + start_time: Timestamp | None = None, + end_time: Timestamp | None = None, + next_token: NextToken | None = None, + limit: EventsLimit | None = None, + start_from_head: StartFromHead | None = None, + unmask: Unmask | None = None, + **kwargs, + ) -> GetLogEventsResponse: + raise NotImplementedError + + @handler("GetLogGroupFields") + def get_log_group_fields( + self, + context: RequestContext, + log_group_name: LogGroupName | None = None, + time: Timestamp | None = None, + log_group_identifier: LogGroupIdentifier | None = None, + **kwargs, + ) -> GetLogGroupFieldsResponse: + raise NotImplementedError + + @handler("GetLogRecord") + def get_log_record( + self, + context: RequestContext, + log_record_pointer: LogRecordPointer, + unmask: Unmask | None = None, + **kwargs, + ) -> GetLogRecordResponse: + raise NotImplementedError + + @handler("GetQueryResults") + def get_query_results( + self, context: RequestContext, query_id: QueryId, **kwargs + ) -> GetQueryResultsResponse: + raise NotImplementedError + + @handler("GetTransformer") + def get_transformer( + self, context: RequestContext, log_group_identifier: LogGroupIdentifier, **kwargs + ) -> GetTransformerResponse: + raise NotImplementedError + + @handler("ListAnomalies") + def list_anomalies( + self, + context: RequestContext, + anomaly_detector_arn: AnomalyDetectorArn | None = None, + suppression_state: SuppressionState | None = None, + limit: ListAnomaliesLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListAnomaliesResponse: + raise NotImplementedError + + @handler("ListIntegrations") + def list_integrations( + self, + context: RequestContext, + integration_name_prefix: IntegrationNamePrefix | None = None, + integration_type: IntegrationType | None = None, + integration_status: IntegrationStatus | None = None, + **kwargs, + ) -> ListIntegrationsResponse: + raise NotImplementedError + + @handler("ListLogAnomalyDetectors") + def list_log_anomaly_detectors( + self, + context: RequestContext, + filter_log_group_arn: LogGroupArn | None = None, + limit: ListLogAnomalyDetectorsLimit | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListLogAnomalyDetectorsResponse: + raise NotImplementedError + + @handler("ListLogGroups") + def list_log_groups( + self, + context: RequestContext, + log_group_name_pattern: LogGroupNameRegexPattern | None = None, + log_group_class: LogGroupClass | None = None, + include_linked_accounts: IncludeLinkedAccounts | None = None, + account_identifiers: AccountIds | None = None, + next_token: NextToken | None = None, + limit: ListLimit | None = None, + **kwargs, + ) -> ListLogGroupsResponse: + raise NotImplementedError + + @handler("ListLogGroupsForQuery") + def list_log_groups_for_query( + self, + context: RequestContext, + query_id: QueryId, + next_token: NextToken | None = None, + max_results: ListLogGroupsForQueryMaxResults | None = None, + **kwargs, + ) -> ListLogGroupsForQueryResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("ListTagsLogGroup") + def list_tags_log_group( + self, context: RequestContext, log_group_name: LogGroupName, **kwargs + ) -> ListTagsLogGroupResponse: + raise NotImplementedError + + @handler("PutAccountPolicy") + def put_account_policy( + self, + context: RequestContext, + policy_name: PolicyName, + policy_document: AccountPolicyDocument, + policy_type: PolicyType, + scope: Scope | None = None, + selection_criteria: SelectionCriteria | None = None, + **kwargs, + ) -> PutAccountPolicyResponse: + raise NotImplementedError + + @handler("PutDataProtectionPolicy") + def put_data_protection_policy( + self, + context: RequestContext, + log_group_identifier: LogGroupIdentifier, + policy_document: DataProtectionPolicyDocument, + **kwargs, + ) -> PutDataProtectionPolicyResponse: + raise NotImplementedError + + @handler("PutDeliveryDestination") + def put_delivery_destination( + self, + context: RequestContext, + name: DeliveryDestinationName, + delivery_destination_configuration: DeliveryDestinationConfiguration, + output_format: OutputFormat | None = None, + tags: Tags | None = None, + **kwargs, + ) -> PutDeliveryDestinationResponse: + raise NotImplementedError + + @handler("PutDeliveryDestinationPolicy") + def put_delivery_destination_policy( + self, + context: RequestContext, + delivery_destination_name: DeliveryDestinationName, + delivery_destination_policy: DeliveryDestinationPolicy, + **kwargs, + ) -> PutDeliveryDestinationPolicyResponse: + raise NotImplementedError + + @handler("PutDeliverySource") + def put_delivery_source( + self, + context: RequestContext, + name: DeliverySourceName, + resource_arn: Arn, + log_type: LogType, + tags: Tags | None = None, + **kwargs, + ) -> PutDeliverySourceResponse: + raise NotImplementedError + + @handler("PutDestination") + def put_destination( + self, + context: RequestContext, + destination_name: DestinationName, + target_arn: TargetArn, + role_arn: RoleArn, + tags: Tags | None = None, + **kwargs, + ) -> PutDestinationResponse: + raise NotImplementedError + + @handler("PutDestinationPolicy") + def put_destination_policy( + self, + context: RequestContext, + destination_name: DestinationName, + access_policy: AccessPolicy, + force_update: ForceUpdate | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutIndexPolicy") + def put_index_policy( + self, + context: RequestContext, + log_group_identifier: LogGroupIdentifier, + policy_document: PolicyDocument, + **kwargs, + ) -> PutIndexPolicyResponse: + raise NotImplementedError + + @handler("PutIntegration") + def put_integration( + self, + context: RequestContext, + integration_name: IntegrationName, + resource_config: ResourceConfig, + integration_type: IntegrationType, + **kwargs, + ) -> PutIntegrationResponse: + raise NotImplementedError + + @handler("PutLogEvents") + def put_log_events( + self, + context: RequestContext, + log_group_name: LogGroupName, + log_stream_name: LogStreamName, + log_events: InputLogEvents, + sequence_token: SequenceToken | None = None, + entity: Entity | None = None, + **kwargs, + ) -> PutLogEventsResponse: + raise NotImplementedError + + @handler("PutMetricFilter") + def put_metric_filter( + self, + context: RequestContext, + log_group_name: LogGroupName, + filter_name: FilterName, + filter_pattern: FilterPattern, + metric_transformations: MetricTransformations, + apply_on_transformed_logs: ApplyOnTransformedLogs | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutQueryDefinition") + def put_query_definition( + self, + context: RequestContext, + name: QueryDefinitionName, + query_string: QueryDefinitionString, + query_language: QueryLanguage | None = None, + query_definition_id: QueryId | None = None, + log_group_names: LogGroupNames | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> PutQueryDefinitionResponse: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, + context: RequestContext, + policy_name: PolicyName | None = None, + policy_document: PolicyDocument | None = None, + **kwargs, + ) -> PutResourcePolicyResponse: + raise NotImplementedError + + @handler("PutRetentionPolicy") + def put_retention_policy( + self, + context: RequestContext, + log_group_name: LogGroupName, + retention_in_days: Days, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutSubscriptionFilter") + def put_subscription_filter( + self, + context: RequestContext, + log_group_name: LogGroupName, + filter_name: FilterName, + filter_pattern: FilterPattern, + destination_arn: DestinationArn, + role_arn: RoleArn | None = None, + distribution: Distribution | None = None, + apply_on_transformed_logs: ApplyOnTransformedLogs | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutTransformer") + def put_transformer( + self, + context: RequestContext, + log_group_identifier: LogGroupIdentifier, + transformer_config: Processors, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartLiveTail") + def start_live_tail( + self, + context: RequestContext, + log_group_identifiers: StartLiveTailLogGroupIdentifiers, + log_stream_names: InputLogStreamNames | None = None, + log_stream_name_prefixes: InputLogStreamNames | None = None, + log_event_filter_pattern: FilterPattern | None = None, + **kwargs, + ) -> StartLiveTailResponse: + raise NotImplementedError + + @handler("StartQuery") + def start_query( + self, + context: RequestContext, + start_time: Timestamp, + end_time: Timestamp, + query_string: QueryString, + query_language: QueryLanguage | None = None, + log_group_name: LogGroupName | None = None, + log_group_names: LogGroupNames | None = None, + log_group_identifiers: LogGroupIdentifiers | None = None, + limit: EventsLimit | None = None, + **kwargs, + ) -> StartQueryResponse: + raise NotImplementedError + + @handler("StopQuery") + def stop_query(self, context: RequestContext, query_id: QueryId, **kwargs) -> StopQueryResponse: + raise NotImplementedError + + @handler("TagLogGroup") + def tag_log_group( + self, context: RequestContext, log_group_name: LogGroupName, tags: Tags, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: Tags, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TestMetricFilter") + def test_metric_filter( + self, + context: RequestContext, + filter_pattern: FilterPattern, + log_event_messages: TestEventMessages, + **kwargs, + ) -> TestMetricFilterResponse: + raise NotImplementedError + + @handler("TestTransformer") + def test_transformer( + self, + context: RequestContext, + transformer_config: Processors, + log_event_messages: TestEventMessages, + **kwargs, + ) -> TestTransformerResponse: + raise NotImplementedError + + @handler("UntagLogGroup") + def untag_log_group( + self, context: RequestContext, log_group_name: LogGroupName, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateAnomaly") + def update_anomaly( + self, + context: RequestContext, + anomaly_detector_arn: AnomalyDetectorArn, + anomaly_id: AnomalyId | None = None, + pattern_id: PatternId | None = None, + suppression_type: SuppressionType | None = None, + suppression_period: SuppressionPeriod | None = None, + baseline: Baseline | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateDeliveryConfiguration") + def update_delivery_configuration( + self, + context: RequestContext, + id: DeliveryId, + record_fields: RecordFields | None = None, + field_delimiter: FieldDelimiter | None = None, + s3_delivery_configuration: S3DeliveryConfiguration | None = None, + **kwargs, + ) -> UpdateDeliveryConfigurationResponse: + raise NotImplementedError + + @handler("UpdateLogAnomalyDetector") + def update_log_anomaly_detector( + self, + context: RequestContext, + anomaly_detector_arn: AnomalyDetectorArn, + enabled: Boolean, + evaluation_frequency: EvaluationFrequency | None = None, + filter_pattern: FilterPattern | None = None, + anomaly_visibility_time: AnomalyVisibilityTime | None = None, + **kwargs, + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/opensearch/__init__.py b/localstack-core/localstack/aws/api/opensearch/__init__.py new file mode 100644 index 0000000000000..73c9074d0a619 --- /dev/null +++ b/localstack-core/localstack/aws/api/opensearch/__init__.py @@ -0,0 +1,3338 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ARN = str +AWSAccount = str +AppConfigValue = str +ApplicationName = str +AvailabilityZone = str +BackendRole = str +Boolean = bool +ChangeProgressStageName = str +ChangeProgressStageStatus = str +ClientToken = str +CloudWatchLogsLogGroupArn = str +CommitMessage = str +ConnectionAlias = str +ConnectionId = str +ConnectionStatusMessage = str +DataSourceDescription = str +DataSourceName = str +DeploymentType = str +DescribePackagesFilterValue = str +Description = str +DirectQueryDataSourceDescription = str +DirectQueryDataSourceName = str +DirectQueryDataSourceRoleArn = str +DomainArn = str +DomainId = str +DomainName = str +DomainNameFqdn = str +Double = float +DryRun = bool +Endpoint = str +EngineVersion = str +ErrorMessage = str +ErrorType = str +GUID = str +HostedZoneId = str +Id = str +IdentityCenterApplicationARN = str +IdentityCenterInstanceARN = str +IdentityPoolId = str +IdentityStoreId = str +InstanceCount = int +InstanceRole = str +InstanceTypeString = str +Integer = int +IntegerClass = int +Issue = str +KmsKeyId = str +LicenseFilepath = str +LimitName = str +LimitValue = str +MaintenanceStatusMessage = str +MaxResults = int +MaximumInstanceCount = int +Message = str +MinimumInstanceCount = int +NextToken = str +NodeId = str +NonEmptyString = str +NumberOfAZs = str +NumberOfNodes = str +NumberOfShards = str +OwnerId = str +PackageDescription = str +PackageID = str +PackageName = str +PackageOwner = str +PackageUser = str +PackageVersion = str +Password = str +PluginClassName = str +PluginDescription = str +PluginName = str +PluginVersion = str +PolicyDocument = str +ReferencePath = str +Region = str +RequestId = str +ReservationToken = str +RoleArn = str +RolesKey = str +S3BucketName = str +S3Key = str +SAMLEntityId = str +SAMLMetadata = str +ScheduledAutoTuneDescription = str +ServiceUrl = str +StorageSubTypeName = str +StorageTypeName = str +String = str +SubjectKey = str +TagKey = str +TagValue = str +TotalNumberOfStages = int +UIntValue = int +UpgradeName = str +UserPoolId = str +Username = str +VersionString = str +VolumeSize = str +VpcEndpointId = str + + +class AWSServicePrincipal(StrEnum): + application_opensearchservice_amazonaws_com = "application.opensearchservice.amazonaws.com" + + +class ActionSeverity(StrEnum): + HIGH = "HIGH" + MEDIUM = "MEDIUM" + LOW = "LOW" + + +class ActionStatus(StrEnum): + PENDING_UPDATE = "PENDING_UPDATE" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETED = "COMPLETED" + NOT_ELIGIBLE = "NOT_ELIGIBLE" + ELIGIBLE = "ELIGIBLE" + + +class ActionType(StrEnum): + SERVICE_SOFTWARE_UPDATE = "SERVICE_SOFTWARE_UPDATE" + JVM_HEAP_SIZE_TUNING = "JVM_HEAP_SIZE_TUNING" + JVM_YOUNG_GEN_TUNING = "JVM_YOUNG_GEN_TUNING" + + +class AppConfigType(StrEnum): + opensearchDashboards_dashboardAdmin_users = "opensearchDashboards.dashboardAdmin.users" + opensearchDashboards_dashboardAdmin_groups = "opensearchDashboards.dashboardAdmin.groups" + + +class ApplicationStatus(StrEnum): + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + ACTIVE = "ACTIVE" + FAILED = "FAILED" + + +class AutoTuneDesiredState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class AutoTuneState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + ENABLE_IN_PROGRESS = "ENABLE_IN_PROGRESS" + DISABLE_IN_PROGRESS = "DISABLE_IN_PROGRESS" + DISABLED_AND_ROLLBACK_SCHEDULED = "DISABLED_AND_ROLLBACK_SCHEDULED" + DISABLED_AND_ROLLBACK_IN_PROGRESS = "DISABLED_AND_ROLLBACK_IN_PROGRESS" + DISABLED_AND_ROLLBACK_COMPLETE = "DISABLED_AND_ROLLBACK_COMPLETE" + DISABLED_AND_ROLLBACK_ERROR = "DISABLED_AND_ROLLBACK_ERROR" + ERROR = "ERROR" + + +class AutoTuneType(StrEnum): + SCHEDULED_ACTION = "SCHEDULED_ACTION" + + +class ConfigChangeStatus(StrEnum): + Pending = "Pending" + Initializing = "Initializing" + Validating = "Validating" + ValidationFailed = "ValidationFailed" + ApplyingChanges = "ApplyingChanges" + Completed = "Completed" + PendingUserInput = "PendingUserInput" + Cancelled = "Cancelled" + + +class ConnectionMode(StrEnum): + DIRECT = "DIRECT" + VPC_ENDPOINT = "VPC_ENDPOINT" + + +class DataSourceStatus(StrEnum): + ACTIVE = "ACTIVE" + DISABLED = "DISABLED" + + +class DeploymentStatus(StrEnum): + PENDING_UPDATE = "PENDING_UPDATE" + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + NOT_ELIGIBLE = "NOT_ELIGIBLE" + ELIGIBLE = "ELIGIBLE" + + +class DescribePackagesFilterName(StrEnum): + PackageID = "PackageID" + PackageName = "PackageName" + PackageStatus = "PackageStatus" + PackageType = "PackageType" + EngineVersion = "EngineVersion" + PackageOwner = "PackageOwner" + + +class DomainHealth(StrEnum): + Red = "Red" + Yellow = "Yellow" + Green = "Green" + NotAvailable = "NotAvailable" + + +class DomainPackageStatus(StrEnum): + ASSOCIATING = "ASSOCIATING" + ASSOCIATION_FAILED = "ASSOCIATION_FAILED" + ACTIVE = "ACTIVE" + DISSOCIATING = "DISSOCIATING" + DISSOCIATION_FAILED = "DISSOCIATION_FAILED" + + +class DomainProcessingStatusType(StrEnum): + Creating = "Creating" + Active = "Active" + Modifying = "Modifying" + UpgradingEngineVersion = "UpgradingEngineVersion" + UpdatingServiceSoftware = "UpdatingServiceSoftware" + Isolated = "Isolated" + Deleting = "Deleting" + + +class DomainState(StrEnum): + Active = "Active" + Processing = "Processing" + NotAvailable = "NotAvailable" + + +class DryRunMode(StrEnum): + Basic = "Basic" + Verbose = "Verbose" + + +class EngineType(StrEnum): + OpenSearch = "OpenSearch" + Elasticsearch = "Elasticsearch" + + +class IPAddressType(StrEnum): + ipv4 = "ipv4" + dualstack = "dualstack" + + +class InboundConnectionStatusCode(StrEnum): + PENDING_ACCEPTANCE = "PENDING_ACCEPTANCE" + APPROVED = "APPROVED" + PROVISIONING = "PROVISIONING" + ACTIVE = "ACTIVE" + REJECTING = "REJECTING" + REJECTED = "REJECTED" + DELETING = "DELETING" + DELETED = "DELETED" + + +class InitiatedBy(StrEnum): + CUSTOMER = "CUSTOMER" + SERVICE = "SERVICE" + + +class LogType(StrEnum): + INDEX_SLOW_LOGS = "INDEX_SLOW_LOGS" + SEARCH_SLOW_LOGS = "SEARCH_SLOW_LOGS" + ES_APPLICATION_LOGS = "ES_APPLICATION_LOGS" + AUDIT_LOGS = "AUDIT_LOGS" + + +class MaintenanceStatus(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + TIMED_OUT = "TIMED_OUT" + + +class MaintenanceType(StrEnum): + REBOOT_NODE = "REBOOT_NODE" + RESTART_SEARCH_PROCESS = "RESTART_SEARCH_PROCESS" + RESTART_DASHBOARD = "RESTART_DASHBOARD" + + +class MasterNodeStatus(StrEnum): + Available = "Available" + UnAvailable = "UnAvailable" + + +class NaturalLanguageQueryGenerationCurrentState(StrEnum): + NOT_ENABLED = "NOT_ENABLED" + ENABLE_COMPLETE = "ENABLE_COMPLETE" + ENABLE_IN_PROGRESS = "ENABLE_IN_PROGRESS" + ENABLE_FAILED = "ENABLE_FAILED" + DISABLE_COMPLETE = "DISABLE_COMPLETE" + DISABLE_IN_PROGRESS = "DISABLE_IN_PROGRESS" + DISABLE_FAILED = "DISABLE_FAILED" + + +class NaturalLanguageQueryGenerationDesiredState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class NodeOptionsNodeType(StrEnum): + coordinator = "coordinator" + + +class NodeStatus(StrEnum): + Active = "Active" + StandBy = "StandBy" + NotAvailable = "NotAvailable" + + +class NodeType(StrEnum): + Data = "Data" + Ultrawarm = "Ultrawarm" + Master = "Master" + + +class OpenSearchPartitionInstanceType(StrEnum): + m3_medium_search = "m3.medium.search" + m3_large_search = "m3.large.search" + m3_xlarge_search = "m3.xlarge.search" + m3_2xlarge_search = "m3.2xlarge.search" + m4_large_search = "m4.large.search" + m4_xlarge_search = "m4.xlarge.search" + m4_2xlarge_search = "m4.2xlarge.search" + m4_4xlarge_search = "m4.4xlarge.search" + m4_10xlarge_search = "m4.10xlarge.search" + m5_large_search = "m5.large.search" + m5_xlarge_search = "m5.xlarge.search" + m5_2xlarge_search = "m5.2xlarge.search" + m5_4xlarge_search = "m5.4xlarge.search" + m5_12xlarge_search = "m5.12xlarge.search" + m5_24xlarge_search = "m5.24xlarge.search" + r5_large_search = "r5.large.search" + r5_xlarge_search = "r5.xlarge.search" + r5_2xlarge_search = "r5.2xlarge.search" + r5_4xlarge_search = "r5.4xlarge.search" + r5_12xlarge_search = "r5.12xlarge.search" + r5_24xlarge_search = "r5.24xlarge.search" + c5_large_search = "c5.large.search" + c5_xlarge_search = "c5.xlarge.search" + c5_2xlarge_search = "c5.2xlarge.search" + c5_4xlarge_search = "c5.4xlarge.search" + c5_9xlarge_search = "c5.9xlarge.search" + c5_18xlarge_search = "c5.18xlarge.search" + t3_nano_search = "t3.nano.search" + t3_micro_search = "t3.micro.search" + t3_small_search = "t3.small.search" + t3_medium_search = "t3.medium.search" + t3_large_search = "t3.large.search" + t3_xlarge_search = "t3.xlarge.search" + t3_2xlarge_search = "t3.2xlarge.search" + or1_medium_search = "or1.medium.search" + or1_large_search = "or1.large.search" + or1_xlarge_search = "or1.xlarge.search" + or1_2xlarge_search = "or1.2xlarge.search" + or1_4xlarge_search = "or1.4xlarge.search" + or1_8xlarge_search = "or1.8xlarge.search" + or1_12xlarge_search = "or1.12xlarge.search" + or1_16xlarge_search = "or1.16xlarge.search" + ultrawarm1_medium_search = "ultrawarm1.medium.search" + ultrawarm1_large_search = "ultrawarm1.large.search" + ultrawarm1_xlarge_search = "ultrawarm1.xlarge.search" + t2_micro_search = "t2.micro.search" + t2_small_search = "t2.small.search" + t2_medium_search = "t2.medium.search" + r3_large_search = "r3.large.search" + r3_xlarge_search = "r3.xlarge.search" + r3_2xlarge_search = "r3.2xlarge.search" + r3_4xlarge_search = "r3.4xlarge.search" + r3_8xlarge_search = "r3.8xlarge.search" + i2_xlarge_search = "i2.xlarge.search" + i2_2xlarge_search = "i2.2xlarge.search" + d2_xlarge_search = "d2.xlarge.search" + d2_2xlarge_search = "d2.2xlarge.search" + d2_4xlarge_search = "d2.4xlarge.search" + d2_8xlarge_search = "d2.8xlarge.search" + c4_large_search = "c4.large.search" + c4_xlarge_search = "c4.xlarge.search" + c4_2xlarge_search = "c4.2xlarge.search" + c4_4xlarge_search = "c4.4xlarge.search" + c4_8xlarge_search = "c4.8xlarge.search" + r4_large_search = "r4.large.search" + r4_xlarge_search = "r4.xlarge.search" + r4_2xlarge_search = "r4.2xlarge.search" + r4_4xlarge_search = "r4.4xlarge.search" + r4_8xlarge_search = "r4.8xlarge.search" + r4_16xlarge_search = "r4.16xlarge.search" + i3_large_search = "i3.large.search" + i3_xlarge_search = "i3.xlarge.search" + i3_2xlarge_search = "i3.2xlarge.search" + i3_4xlarge_search = "i3.4xlarge.search" + i3_8xlarge_search = "i3.8xlarge.search" + i3_16xlarge_search = "i3.16xlarge.search" + r6g_large_search = "r6g.large.search" + r6g_xlarge_search = "r6g.xlarge.search" + r6g_2xlarge_search = "r6g.2xlarge.search" + r6g_4xlarge_search = "r6g.4xlarge.search" + r6g_8xlarge_search = "r6g.8xlarge.search" + r6g_12xlarge_search = "r6g.12xlarge.search" + m6g_large_search = "m6g.large.search" + m6g_xlarge_search = "m6g.xlarge.search" + m6g_2xlarge_search = "m6g.2xlarge.search" + m6g_4xlarge_search = "m6g.4xlarge.search" + m6g_8xlarge_search = "m6g.8xlarge.search" + m6g_12xlarge_search = "m6g.12xlarge.search" + c6g_large_search = "c6g.large.search" + c6g_xlarge_search = "c6g.xlarge.search" + c6g_2xlarge_search = "c6g.2xlarge.search" + c6g_4xlarge_search = "c6g.4xlarge.search" + c6g_8xlarge_search = "c6g.8xlarge.search" + c6g_12xlarge_search = "c6g.12xlarge.search" + r6gd_large_search = "r6gd.large.search" + r6gd_xlarge_search = "r6gd.xlarge.search" + r6gd_2xlarge_search = "r6gd.2xlarge.search" + r6gd_4xlarge_search = "r6gd.4xlarge.search" + r6gd_8xlarge_search = "r6gd.8xlarge.search" + r6gd_12xlarge_search = "r6gd.12xlarge.search" + r6gd_16xlarge_search = "r6gd.16xlarge.search" + t4g_small_search = "t4g.small.search" + t4g_medium_search = "t4g.medium.search" + + +class OpenSearchWarmPartitionInstanceType(StrEnum): + ultrawarm1_medium_search = "ultrawarm1.medium.search" + ultrawarm1_large_search = "ultrawarm1.large.search" + ultrawarm1_xlarge_search = "ultrawarm1.xlarge.search" + + +class OptionState(StrEnum): + RequiresIndexDocuments = "RequiresIndexDocuments" + Processing = "Processing" + Active = "Active" + + +class OutboundConnectionStatusCode(StrEnum): + VALIDATING = "VALIDATING" + VALIDATION_FAILED = "VALIDATION_FAILED" + PENDING_ACCEPTANCE = "PENDING_ACCEPTANCE" + APPROVED = "APPROVED" + PROVISIONING = "PROVISIONING" + ACTIVE = "ACTIVE" + REJECTING = "REJECTING" + REJECTED = "REJECTED" + DELETING = "DELETING" + DELETED = "DELETED" + + +class OverallChangeStatus(StrEnum): + PENDING = "PENDING" + PROCESSING = "PROCESSING" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + +class PackageScopeOperationEnum(StrEnum): + ADD = "ADD" + OVERRIDE = "OVERRIDE" + REMOVE = "REMOVE" + + +class PackageStatus(StrEnum): + COPYING = "COPYING" + COPY_FAILED = "COPY_FAILED" + VALIDATING = "VALIDATING" + VALIDATION_FAILED = "VALIDATION_FAILED" + AVAILABLE = "AVAILABLE" + DELETING = "DELETING" + DELETED = "DELETED" + DELETE_FAILED = "DELETE_FAILED" + + +class PackageType(StrEnum): + TXT_DICTIONARY = "TXT-DICTIONARY" + ZIP_PLUGIN = "ZIP-PLUGIN" + PACKAGE_LICENSE = "PACKAGE-LICENSE" + PACKAGE_CONFIG = "PACKAGE-CONFIG" + + +class PrincipalType(StrEnum): + AWS_ACCOUNT = "AWS_ACCOUNT" + AWS_SERVICE = "AWS_SERVICE" + + +class PropertyValueType(StrEnum): + PLAIN_TEXT = "PLAIN_TEXT" + STRINGIFIED_JSON = "STRINGIFIED_JSON" + + +class RequirementLevel(StrEnum): + REQUIRED = "REQUIRED" + OPTIONAL = "OPTIONAL" + NONE = "NONE" + + +class ReservedInstancePaymentOption(StrEnum): + ALL_UPFRONT = "ALL_UPFRONT" + PARTIAL_UPFRONT = "PARTIAL_UPFRONT" + NO_UPFRONT = "NO_UPFRONT" + + +class RolesKeyIdCOption(StrEnum): + GroupName = "GroupName" + GroupId = "GroupId" + + +class RollbackOnDisable(StrEnum): + NO_ROLLBACK = "NO_ROLLBACK" + DEFAULT_ROLLBACK = "DEFAULT_ROLLBACK" + + +class ScheduleAt(StrEnum): + NOW = "NOW" + TIMESTAMP = "TIMESTAMP" + OFF_PEAK_WINDOW = "OFF_PEAK_WINDOW" + + +class ScheduledAutoTuneActionType(StrEnum): + JVM_HEAP_SIZE_TUNING = "JVM_HEAP_SIZE_TUNING" + JVM_YOUNG_GEN_TUNING = "JVM_YOUNG_GEN_TUNING" + + +class ScheduledAutoTuneSeverityType(StrEnum): + LOW = "LOW" + MEDIUM = "MEDIUM" + HIGH = "HIGH" + + +class ScheduledBy(StrEnum): + CUSTOMER = "CUSTOMER" + SYSTEM = "SYSTEM" + + +class SkipUnavailableStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class SubjectKeyIdCOption(StrEnum): + UserName = "UserName" + UserId = "UserId" + Email = "Email" + + +class TLSSecurityPolicy(StrEnum): + Policy_Min_TLS_1_0_2019_07 = "Policy-Min-TLS-1-0-2019-07" + Policy_Min_TLS_1_2_2019_07 = "Policy-Min-TLS-1-2-2019-07" + Policy_Min_TLS_1_2_PFS_2023_10 = "Policy-Min-TLS-1-2-PFS-2023-10" + + +class TimeUnit(StrEnum): + HOURS = "HOURS" + + +class UpgradeStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + SUCCEEDED = "SUCCEEDED" + SUCCEEDED_WITH_ISSUES = "SUCCEEDED_WITH_ISSUES" + FAILED = "FAILED" + + +class UpgradeStep(StrEnum): + PRE_UPGRADE_CHECK = "PRE_UPGRADE_CHECK" + SNAPSHOT = "SNAPSHOT" + UPGRADE = "UPGRADE" + + +class VolumeType(StrEnum): + standard = "standard" + gp2 = "gp2" + io1 = "io1" + gp3 = "gp3" + + +class VpcEndpointErrorCode(StrEnum): + ENDPOINT_NOT_FOUND = "ENDPOINT_NOT_FOUND" + SERVER_ERROR = "SERVER_ERROR" + + +class VpcEndpointStatus(StrEnum): + CREATING = "CREATING" + CREATE_FAILED = "CREATE_FAILED" + ACTIVE = "ACTIVE" + UPDATING = "UPDATING" + UPDATE_FAILED = "UPDATE_FAILED" + DELETING = "DELETING" + DELETE_FAILED = "DELETE_FAILED" + + +class ZoneStatus(StrEnum): + Active = "Active" + StandBy = "StandBy" + NotAvailable = "NotAvailable" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 403 + + +class BaseException(ServiceException): + code: str = "BaseException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 409 + + +class DependencyFailureException(ServiceException): + code: str = "DependencyFailureException" + sender_fault: bool = False + status_code: int = 424 + + +class DisabledOperationException(ServiceException): + code: str = "DisabledOperationException" + sender_fault: bool = False + status_code: int = 409 + + +class InternalException(ServiceException): + code: str = "InternalException" + sender_fault: bool = False + status_code: int = 500 + + +class InvalidPaginationTokenException(ServiceException): + code: str = "InvalidPaginationTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTypeException(ServiceException): + code: str = "InvalidTypeException" + sender_fault: bool = False + status_code: int = 409 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 409 + + +class ResourceAlreadyExistsException(ServiceException): + code: str = "ResourceAlreadyExistsException" + sender_fault: bool = False + status_code: int = 409 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 409 + + +Long = int +SlotList = List[Long] + + +class SlotNotAvailableException(ServiceException): + code: str = "SlotNotAvailableException" + sender_fault: bool = False + status_code: int = 409 + SlotSuggestions: Optional[SlotList] + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class NaturalLanguageQueryGenerationOptionsInput(TypedDict, total=False): + DesiredState: Optional[NaturalLanguageQueryGenerationDesiredState] + + +class AIMLOptionsInput(TypedDict, total=False): + NaturalLanguageQueryGenerationOptions: Optional[NaturalLanguageQueryGenerationOptionsInput] + + +class NaturalLanguageQueryGenerationOptionsOutput(TypedDict, total=False): + DesiredState: Optional[NaturalLanguageQueryGenerationDesiredState] + CurrentState: Optional[NaturalLanguageQueryGenerationCurrentState] + + +class AIMLOptionsOutput(TypedDict, total=False): + NaturalLanguageQueryGenerationOptions: Optional[NaturalLanguageQueryGenerationOptionsOutput] + + +UpdateTimestamp = datetime + + +class OptionStatus(TypedDict, total=False): + CreationDate: UpdateTimestamp + UpdateDate: UpdateTimestamp + UpdateVersion: Optional[UIntValue] + State: OptionState + PendingDeletion: Optional[Boolean] + + +class AIMLOptionsStatus(TypedDict, total=False): + Options: Optional[AIMLOptionsOutput] + Status: Optional[OptionStatus] + + +class AWSDomainInformation(TypedDict, total=False): + OwnerId: Optional[OwnerId] + DomainName: DomainName + Region: Optional[Region] + + +class AcceptInboundConnectionRequest(ServiceRequest): + ConnectionId: ConnectionId + + +class InboundConnectionStatus(TypedDict, total=False): + StatusCode: Optional[InboundConnectionStatusCode] + Message: Optional[ConnectionStatusMessage] + + +class DomainInformationContainer(TypedDict, total=False): + AWSDomainInformation: Optional[AWSDomainInformation] + + +class InboundConnection(TypedDict, total=False): + LocalDomainInfo: Optional[DomainInformationContainer] + RemoteDomainInfo: Optional[DomainInformationContainer] + ConnectionId: Optional[ConnectionId] + ConnectionStatus: Optional[InboundConnectionStatus] + ConnectionMode: Optional[ConnectionMode] + + +class AcceptInboundConnectionResponse(TypedDict, total=False): + Connection: Optional[InboundConnection] + + +class AccessPoliciesStatus(TypedDict, total=False): + Options: PolicyDocument + Status: OptionStatus + + +class S3GlueDataCatalog(TypedDict, total=False): + RoleArn: Optional[RoleArn] + + +class DataSourceType(TypedDict, total=False): + S3GlueDataCatalog: Optional[S3GlueDataCatalog] + + +class AddDataSourceRequest(ServiceRequest): + DomainName: DomainName + Name: DataSourceName + DataSourceType: DataSourceType + Description: Optional[DataSourceDescription] + + +class AddDataSourceResponse(TypedDict, total=False): + Message: Optional[String] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] +DirectQueryOpenSearchARNList = List[ARN] + + +class SecurityLakeDirectQueryDataSource(TypedDict, total=False): + RoleArn: DirectQueryDataSourceRoleArn + + +class CloudWatchDirectQueryDataSource(TypedDict, total=False): + RoleArn: DirectQueryDataSourceRoleArn + + +class DirectQueryDataSourceType(TypedDict, total=False): + CloudWatchLog: Optional[CloudWatchDirectQueryDataSource] + SecurityLake: Optional[SecurityLakeDirectQueryDataSource] + + +class AddDirectQueryDataSourceRequest(ServiceRequest): + DataSourceName: DirectQueryDataSourceName + DataSourceType: DirectQueryDataSourceType + Description: Optional[DirectQueryDataSourceDescription] + OpenSearchArns: DirectQueryOpenSearchARNList + TagList: Optional[TagList] + + +class AddDirectQueryDataSourceResponse(TypedDict, total=False): + DataSourceArn: Optional[String] + + +class AddTagsRequest(ServiceRequest): + ARN: ARN + TagList: TagList + + +LimitValueList = List[LimitValue] + + +class AdditionalLimit(TypedDict, total=False): + LimitName: Optional[LimitName] + LimitValues: Optional[LimitValueList] + + +AdditionalLimitList = List[AdditionalLimit] +AdvancedOptions = Dict[String, String] + + +class AdvancedOptionsStatus(TypedDict, total=False): + Options: AdvancedOptions + Status: OptionStatus + + +DisableTimestamp = datetime + + +class JWTOptionsOutput(TypedDict, total=False): + Enabled: Optional[Boolean] + SubjectKey: Optional[String] + RolesKey: Optional[String] + PublicKey: Optional[String] + + +class SAMLIdp(TypedDict, total=False): + MetadataContent: SAMLMetadata + EntityId: SAMLEntityId + + +class SAMLOptionsOutput(TypedDict, total=False): + Enabled: Optional[Boolean] + Idp: Optional[SAMLIdp] + SubjectKey: Optional[String] + RolesKey: Optional[String] + SessionTimeoutMinutes: Optional[IntegerClass] + + +class AdvancedSecurityOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + InternalUserDatabaseEnabled: Optional[Boolean] + SAMLOptions: Optional[SAMLOptionsOutput] + JWTOptions: Optional[JWTOptionsOutput] + AnonymousAuthDisableDate: Optional[DisableTimestamp] + AnonymousAuthEnabled: Optional[Boolean] + + +class JWTOptionsInput(TypedDict, total=False): + Enabled: Optional[Boolean] + SubjectKey: Optional[SubjectKey] + RolesKey: Optional[RolesKey] + PublicKey: Optional[String] + + +class SAMLOptionsInput(TypedDict, total=False): + Enabled: Optional[Boolean] + Idp: Optional[SAMLIdp] + MasterUserName: Optional[Username] + MasterBackendRole: Optional[BackendRole] + SubjectKey: Optional[String] + RolesKey: Optional[String] + SessionTimeoutMinutes: Optional[IntegerClass] + + +class MasterUserOptions(TypedDict, total=False): + MasterUserARN: Optional[ARN] + MasterUserName: Optional[Username] + MasterUserPassword: Optional[Password] + + +class AdvancedSecurityOptionsInput(TypedDict, total=False): + Enabled: Optional[Boolean] + InternalUserDatabaseEnabled: Optional[Boolean] + MasterUserOptions: Optional[MasterUserOptions] + SAMLOptions: Optional[SAMLOptionsInput] + JWTOptions: Optional[JWTOptionsInput] + AnonymousAuthEnabled: Optional[Boolean] + + +class AdvancedSecurityOptionsStatus(TypedDict, total=False): + Options: AdvancedSecurityOptions + Status: OptionStatus + + +class AppConfig(TypedDict, total=False): + key: Optional[AppConfigType] + value: Optional[AppConfigValue] + + +AppConfigs = List[AppConfig] +ApplicationStatuses = List[ApplicationStatus] +Timestamp = datetime + + +class ApplicationSummary(TypedDict, total=False): + id: Optional[Id] + arn: Optional[ARN] + name: Optional[ApplicationName] + endpoint: Optional[String] + status: Optional[ApplicationStatus] + createdAt: Optional[Timestamp] + lastUpdatedAt: Optional[Timestamp] + + +ApplicationSummaries = List[ApplicationSummary] + + +class KeyStoreAccessOption(TypedDict, total=False): + KeyAccessRoleArn: Optional[RoleArn] + KeyStoreAccessEnabled: Boolean + + +class PackageAssociationConfiguration(TypedDict, total=False): + KeyStoreAccessOption: Optional[KeyStoreAccessOption] + + +PackageIDList = List[PackageID] + + +class AssociatePackageRequest(ServiceRequest): + PackageID: PackageID + DomainName: DomainName + PrerequisitePackageIDList: Optional[PackageIDList] + AssociationConfiguration: Optional[PackageAssociationConfiguration] + + +class ErrorDetails(TypedDict, total=False): + ErrorType: Optional[ErrorType] + ErrorMessage: Optional[ErrorMessage] + + +LastUpdated = datetime + + +class DomainPackageDetails(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageName: Optional[PackageName] + PackageType: Optional[PackageType] + LastUpdated: Optional[LastUpdated] + DomainName: Optional[DomainName] + DomainPackageStatus: Optional[DomainPackageStatus] + PackageVersion: Optional[PackageVersion] + PrerequisitePackageIDList: Optional[PackageIDList] + ReferencePath: Optional[ReferencePath] + ErrorDetails: Optional[ErrorDetails] + AssociationConfiguration: Optional[PackageAssociationConfiguration] + + +class AssociatePackageResponse(TypedDict, total=False): + DomainPackageDetails: Optional[DomainPackageDetails] + + +class PackageDetailsForAssociation(TypedDict, total=False): + PackageID: PackageID + PrerequisitePackageIDList: Optional[PackageIDList] + AssociationConfiguration: Optional[PackageAssociationConfiguration] + + +PackageDetailsForAssociationList = List[PackageDetailsForAssociation] + + +class AssociatePackagesRequest(ServiceRequest): + PackageList: PackageDetailsForAssociationList + DomainName: DomainName + + +DomainPackageDetailsList = List[DomainPackageDetails] + + +class AssociatePackagesResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + + +class AuthorizeVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + Account: Optional[AWSAccount] + Service: Optional[AWSServicePrincipal] + + +class AuthorizedPrincipal(TypedDict, total=False): + PrincipalType: Optional[PrincipalType] + Principal: Optional[String] + + +class AuthorizeVpcEndpointAccessResponse(TypedDict, total=False): + AuthorizedPrincipal: AuthorizedPrincipal + + +AuthorizedPrincipalList = List[AuthorizedPrincipal] +AutoTuneDate = datetime + + +class ScheduledAutoTuneDetails(TypedDict, total=False): + Date: Optional[AutoTuneDate] + ActionType: Optional[ScheduledAutoTuneActionType] + Action: Optional[ScheduledAutoTuneDescription] + Severity: Optional[ScheduledAutoTuneSeverityType] + + +class AutoTuneDetails(TypedDict, total=False): + ScheduledAutoTuneDetails: Optional[ScheduledAutoTuneDetails] + + +class AutoTune(TypedDict, total=False): + AutoTuneType: Optional[AutoTuneType] + AutoTuneDetails: Optional[AutoTuneDetails] + + +AutoTuneList = List[AutoTune] +DurationValue = int + + +class Duration(TypedDict, total=False): + Value: Optional[DurationValue] + Unit: Optional[TimeUnit] + + +StartAt = datetime + + +class AutoTuneMaintenanceSchedule(TypedDict, total=False): + StartAt: Optional[StartAt] + Duration: Optional[Duration] + CronExpressionForRecurrence: Optional[String] + + +AutoTuneMaintenanceScheduleList = List[AutoTuneMaintenanceSchedule] + + +class AutoTuneOptions(TypedDict, total=False): + DesiredState: Optional[AutoTuneDesiredState] + RollbackOnDisable: Optional[RollbackOnDisable] + MaintenanceSchedules: Optional[AutoTuneMaintenanceScheduleList] + UseOffPeakWindow: Optional[Boolean] + + +class AutoTuneOptionsInput(TypedDict, total=False): + DesiredState: Optional[AutoTuneDesiredState] + MaintenanceSchedules: Optional[AutoTuneMaintenanceScheduleList] + UseOffPeakWindow: Optional[Boolean] + + +class AutoTuneOptionsOutput(TypedDict, total=False): + State: Optional[AutoTuneState] + ErrorMessage: Optional[String] + UseOffPeakWindow: Optional[Boolean] + + +class AutoTuneStatus(TypedDict, total=False): + CreationDate: UpdateTimestamp + UpdateDate: UpdateTimestamp + UpdateVersion: Optional[UIntValue] + State: AutoTuneState + ErrorMessage: Optional[String] + PendingDeletion: Optional[Boolean] + + +class AutoTuneOptionsStatus(TypedDict, total=False): + Options: Optional[AutoTuneOptions] + Status: Optional[AutoTuneStatus] + + +class AvailabilityZoneInfo(TypedDict, total=False): + AvailabilityZoneName: Optional[AvailabilityZone] + ZoneStatus: Optional[ZoneStatus] + ConfiguredDataNodeCount: Optional[NumberOfNodes] + AvailableDataNodeCount: Optional[NumberOfNodes] + TotalShards: Optional[NumberOfShards] + TotalUnAssignedShards: Optional[NumberOfShards] + + +AvailabilityZoneInfoList = List[AvailabilityZoneInfo] +AvailabilityZoneList = List[AvailabilityZone] + + +class CancelDomainConfigChangeRequest(ServiceRequest): + DomainName: DomainName + DryRun: Optional[DryRun] + + +class CancelledChangeProperty(TypedDict, total=False): + PropertyName: Optional[String] + CancelledValue: Optional[String] + ActiveValue: Optional[String] + + +CancelledChangePropertyList = List[CancelledChangeProperty] +GUIDList = List[GUID] + + +class CancelDomainConfigChangeResponse(TypedDict, total=False): + CancelledChangeIds: Optional[GUIDList] + CancelledChangeProperties: Optional[CancelledChangePropertyList] + DryRun: Optional[DryRun] + + +class CancelServiceSoftwareUpdateRequest(ServiceRequest): + DomainName: DomainName + + +DeploymentCloseDateTimeStamp = datetime + + +class ServiceSoftwareOptions(TypedDict, total=False): + CurrentVersion: Optional[String] + NewVersion: Optional[String] + UpdateAvailable: Optional[Boolean] + Cancellable: Optional[Boolean] + UpdateStatus: Optional[DeploymentStatus] + Description: Optional[String] + AutomatedUpdateDate: Optional[DeploymentCloseDateTimeStamp] + OptionalDeployment: Optional[Boolean] + + +class CancelServiceSoftwareUpdateResponse(TypedDict, total=False): + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + + +class ChangeProgressDetails(TypedDict, total=False): + ChangeId: Optional[GUID] + Message: Optional[Message] + ConfigChangeStatus: Optional[ConfigChangeStatus] + InitiatedBy: Optional[InitiatedBy] + StartTime: Optional[UpdateTimestamp] + LastUpdatedTime: Optional[UpdateTimestamp] + + +class ChangeProgressStage(TypedDict, total=False): + Name: Optional[ChangeProgressStageName] + Status: Optional[ChangeProgressStageStatus] + Description: Optional[Description] + LastUpdated: Optional[LastUpdated] + + +ChangeProgressStageList = List[ChangeProgressStage] +StringList = List[String] + + +class ChangeProgressStatusDetails(TypedDict, total=False): + ChangeId: Optional[GUID] + StartTime: Optional[UpdateTimestamp] + Status: Optional[OverallChangeStatus] + PendingProperties: Optional[StringList] + CompletedProperties: Optional[StringList] + TotalNumberOfStages: Optional[TotalNumberOfStages] + ChangeProgressStages: Optional[ChangeProgressStageList] + LastUpdatedTime: Optional[UpdateTimestamp] + ConfigChangeStatus: Optional[ConfigChangeStatus] + InitiatedBy: Optional[InitiatedBy] + + +class NodeConfig(TypedDict, total=False): + Enabled: Optional[Boolean] + Type: Optional[OpenSearchPartitionInstanceType] + Count: Optional[IntegerClass] + + +class NodeOption(TypedDict, total=False): + NodeType: Optional[NodeOptionsNodeType] + NodeConfig: Optional[NodeConfig] + + +NodeOptionsList = List[NodeOption] + + +class ColdStorageOptions(TypedDict, total=False): + Enabled: Boolean + + +class ZoneAwarenessConfig(TypedDict, total=False): + AvailabilityZoneCount: Optional[IntegerClass] + + +class ClusterConfig(TypedDict, total=False): + InstanceType: Optional[OpenSearchPartitionInstanceType] + InstanceCount: Optional[IntegerClass] + DedicatedMasterEnabled: Optional[Boolean] + ZoneAwarenessEnabled: Optional[Boolean] + ZoneAwarenessConfig: Optional[ZoneAwarenessConfig] + DedicatedMasterType: Optional[OpenSearchPartitionInstanceType] + DedicatedMasterCount: Optional[IntegerClass] + WarmEnabled: Optional[Boolean] + WarmType: Optional[OpenSearchWarmPartitionInstanceType] + WarmCount: Optional[IntegerClass] + ColdStorageOptions: Optional[ColdStorageOptions] + MultiAZWithStandbyEnabled: Optional[Boolean] + NodeOptions: Optional[NodeOptionsList] + + +class ClusterConfigStatus(TypedDict, total=False): + Options: ClusterConfig + Status: OptionStatus + + +class CognitoOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + UserPoolId: Optional[UserPoolId] + IdentityPoolId: Optional[IdentityPoolId] + RoleArn: Optional[RoleArn] + + +class CognitoOptionsStatus(TypedDict, total=False): + Options: CognitoOptions + Status: OptionStatus + + +VersionList = List[VersionString] + + +class CompatibleVersionsMap(TypedDict, total=False): + SourceVersion: Optional[VersionString] + TargetVersions: Optional[VersionList] + + +CompatibleVersionsList = List[CompatibleVersionsMap] + + +class CrossClusterSearchConnectionProperties(TypedDict, total=False): + SkipUnavailable: Optional[SkipUnavailableStatus] + + +class ConnectionProperties(TypedDict, total=False): + Endpoint: Optional[Endpoint] + CrossClusterSearch: Optional[CrossClusterSearchConnectionProperties] + + +class IamIdentityCenterOptionsInput(TypedDict, total=False): + enabled: Optional[Boolean] + iamIdentityCenterInstanceArn: Optional[ARN] + iamRoleForIdentityCenterApplicationArn: Optional[RoleArn] + + +class DataSource(TypedDict, total=False): + dataSourceArn: Optional[ARN] + dataSourceDescription: Optional[DataSourceDescription] + + +DataSources = List[DataSource] + + +class CreateApplicationRequest(ServiceRequest): + clientToken: Optional[ClientToken] + name: ApplicationName + dataSources: Optional[DataSources] + iamIdentityCenterOptions: Optional[IamIdentityCenterOptionsInput] + appConfigs: Optional[AppConfigs] + tagList: Optional[TagList] + + +class IamIdentityCenterOptions(TypedDict, total=False): + enabled: Optional[Boolean] + iamIdentityCenterInstanceArn: Optional[ARN] + iamRoleForIdentityCenterApplicationArn: Optional[RoleArn] + iamIdentityCenterApplicationArn: Optional[ARN] + + +class CreateApplicationResponse(TypedDict, total=False): + id: Optional[Id] + name: Optional[ApplicationName] + arn: Optional[ARN] + dataSources: Optional[DataSources] + iamIdentityCenterOptions: Optional[IamIdentityCenterOptions] + appConfigs: Optional[AppConfigs] + tagList: Optional[TagList] + createdAt: Optional[Timestamp] + + +class SoftwareUpdateOptions(TypedDict, total=False): + AutoSoftwareUpdateEnabled: Optional[Boolean] + + +StartTimeMinutes = int +StartTimeHours = int + + +class WindowStartTime(TypedDict, total=False): + Hours: StartTimeHours + Minutes: StartTimeMinutes + + +class OffPeakWindow(TypedDict, total=False): + WindowStartTime: Optional[WindowStartTime] + + +class OffPeakWindowOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + OffPeakWindow: Optional[OffPeakWindow] + + +class IdentityCenterOptionsInput(TypedDict, total=False): + EnabledAPIAccess: Optional[Boolean] + IdentityCenterInstanceARN: Optional[IdentityCenterInstanceARN] + SubjectKey: Optional[SubjectKeyIdCOption] + RolesKey: Optional[RolesKeyIdCOption] + + +class DomainEndpointOptions(TypedDict, total=False): + EnforceHTTPS: Optional[Boolean] + TLSSecurityPolicy: Optional[TLSSecurityPolicy] + CustomEndpointEnabled: Optional[Boolean] + CustomEndpoint: Optional[DomainNameFqdn] + CustomEndpointCertificateArn: Optional[ARN] + + +class LogPublishingOption(TypedDict, total=False): + CloudWatchLogsLogGroupArn: Optional[CloudWatchLogsLogGroupArn] + Enabled: Optional[Boolean] + + +LogPublishingOptions = Dict[LogType, LogPublishingOption] + + +class NodeToNodeEncryptionOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + + +class EncryptionAtRestOptions(TypedDict, total=False): + Enabled: Optional[Boolean] + KmsKeyId: Optional[KmsKeyId] + + +class VPCOptions(TypedDict, total=False): + SubnetIds: Optional[StringList] + SecurityGroupIds: Optional[StringList] + + +class SnapshotOptions(TypedDict, total=False): + AutomatedSnapshotStartHour: Optional[IntegerClass] + + +class EBSOptions(TypedDict, total=False): + EBSEnabled: Optional[Boolean] + VolumeType: Optional[VolumeType] + VolumeSize: Optional[IntegerClass] + Iops: Optional[IntegerClass] + Throughput: Optional[IntegerClass] + + +class CreateDomainRequest(ServiceRequest): + DomainName: DomainName + EngineVersion: Optional[VersionString] + ClusterConfig: Optional[ClusterConfig] + EBSOptions: Optional[EBSOptions] + AccessPolicies: Optional[PolicyDocument] + IPAddressType: Optional[IPAddressType] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCOptions] + CognitoOptions: Optional[CognitoOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + AdvancedOptions: Optional[AdvancedOptions] + LogPublishingOptions: Optional[LogPublishingOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + IdentityCenterOptions: Optional[IdentityCenterOptionsInput] + TagList: Optional[TagList] + AutoTuneOptions: Optional[AutoTuneOptionsInput] + OffPeakWindowOptions: Optional[OffPeakWindowOptions] + SoftwareUpdateOptions: Optional[SoftwareUpdateOptions] + AIMLOptions: Optional[AIMLOptionsInput] + + +class ModifyingProperties(TypedDict, total=False): + Name: Optional[String] + ActiveValue: Optional[String] + PendingValue: Optional[String] + ValueType: Optional[PropertyValueType] + + +ModifyingPropertiesList = List[ModifyingProperties] + + +class IdentityCenterOptions(TypedDict, total=False): + EnabledAPIAccess: Optional[Boolean] + IdentityCenterInstanceARN: Optional[IdentityCenterInstanceARN] + SubjectKey: Optional[SubjectKeyIdCOption] + RolesKey: Optional[RolesKeyIdCOption] + IdentityCenterApplicationARN: Optional[IdentityCenterApplicationARN] + IdentityStoreId: Optional[IdentityStoreId] + + +class VPCDerivedInfo(TypedDict, total=False): + VPCId: Optional[String] + SubnetIds: Optional[StringList] + AvailabilityZones: Optional[StringList] + SecurityGroupIds: Optional[StringList] + + +EndpointsMap = Dict[String, ServiceUrl] + + +class DomainStatus(TypedDict, total=False): + DomainId: DomainId + DomainName: DomainName + ARN: ARN + Created: Optional[Boolean] + Deleted: Optional[Boolean] + Endpoint: Optional[ServiceUrl] + EndpointV2: Optional[ServiceUrl] + Endpoints: Optional[EndpointsMap] + DomainEndpointV2HostedZoneId: Optional[HostedZoneId] + Processing: Optional[Boolean] + UpgradeProcessing: Optional[Boolean] + EngineVersion: Optional[VersionString] + ClusterConfig: ClusterConfig + EBSOptions: Optional[EBSOptions] + AccessPolicies: Optional[PolicyDocument] + IPAddressType: Optional[IPAddressType] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCDerivedInfo] + CognitoOptions: Optional[CognitoOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + AdvancedOptions: Optional[AdvancedOptions] + LogPublishingOptions: Optional[LogPublishingOptions] + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptions] + IdentityCenterOptions: Optional[IdentityCenterOptions] + AutoTuneOptions: Optional[AutoTuneOptionsOutput] + ChangeProgressDetails: Optional[ChangeProgressDetails] + OffPeakWindowOptions: Optional[OffPeakWindowOptions] + SoftwareUpdateOptions: Optional[SoftwareUpdateOptions] + DomainProcessingStatus: Optional[DomainProcessingStatusType] + ModifyingProperties: Optional[ModifyingPropertiesList] + AIMLOptions: Optional[AIMLOptionsOutput] + + +class CreateDomainResponse(TypedDict, total=False): + DomainStatus: Optional[DomainStatus] + + +class CreateOutboundConnectionRequest(ServiceRequest): + LocalDomainInfo: DomainInformationContainer + RemoteDomainInfo: DomainInformationContainer + ConnectionAlias: ConnectionAlias + ConnectionMode: Optional[ConnectionMode] + ConnectionProperties: Optional[ConnectionProperties] + + +class OutboundConnectionStatus(TypedDict, total=False): + StatusCode: Optional[OutboundConnectionStatusCode] + Message: Optional[ConnectionStatusMessage] + + +class CreateOutboundConnectionResponse(TypedDict, total=False): + LocalDomainInfo: Optional[DomainInformationContainer] + RemoteDomainInfo: Optional[DomainInformationContainer] + ConnectionAlias: Optional[ConnectionAlias] + ConnectionStatus: Optional[OutboundConnectionStatus] + ConnectionId: Optional[ConnectionId] + ConnectionMode: Optional[ConnectionMode] + ConnectionProperties: Optional[ConnectionProperties] + + +class PackageEncryptionOptions(TypedDict, total=False): + KmsKeyIdentifier: Optional[KmsKeyId] + EncryptionEnabled: Boolean + + +class PackageVendingOptions(TypedDict, total=False): + VendingEnabled: Boolean + + +class PackageConfiguration(TypedDict, total=False): + LicenseRequirement: RequirementLevel + LicenseFilepath: Optional[LicenseFilepath] + ConfigurationRequirement: RequirementLevel + RequiresRestartForConfigurationUpdate: Optional[Boolean] + + +class PackageSource(TypedDict, total=False): + S3BucketName: Optional[S3BucketName] + S3Key: Optional[S3Key] + + +class CreatePackageRequest(ServiceRequest): + PackageName: PackageName + PackageType: PackageType + PackageDescription: Optional[PackageDescription] + PackageSource: PackageSource + PackageConfiguration: Optional[PackageConfiguration] + EngineVersion: Optional[EngineVersion] + PackageVendingOptions: Optional[PackageVendingOptions] + PackageEncryptionOptions: Optional[PackageEncryptionOptions] + + +PackageUserList = List[PackageUser] +UncompressedPluginSizeInBytes = int + + +class PluginProperties(TypedDict, total=False): + Name: Optional[PluginName] + Description: Optional[PluginDescription] + Version: Optional[PluginVersion] + ClassName: Optional[PluginClassName] + UncompressedSizeInBytes: Optional[UncompressedPluginSizeInBytes] + + +CreatedAt = datetime + + +class PackageDetails(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageName: Optional[PackageName] + PackageType: Optional[PackageType] + PackageDescription: Optional[PackageDescription] + PackageStatus: Optional[PackageStatus] + CreatedAt: Optional[CreatedAt] + LastUpdatedAt: Optional[LastUpdated] + AvailablePackageVersion: Optional[PackageVersion] + ErrorDetails: Optional[ErrorDetails] + EngineVersion: Optional[EngineVersion] + AvailablePluginProperties: Optional[PluginProperties] + AvailablePackageConfiguration: Optional[PackageConfiguration] + AllowListedUserList: Optional[PackageUserList] + PackageOwner: Optional[PackageOwner] + PackageVendingOptions: Optional[PackageVendingOptions] + PackageEncryptionOptions: Optional[PackageEncryptionOptions] + + +class CreatePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class CreateVpcEndpointRequest(ServiceRequest): + DomainArn: DomainArn + VpcOptions: VPCOptions + ClientToken: Optional[ClientToken] + + +class VpcEndpoint(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + VpcEndpointOwner: Optional[AWSAccount] + DomainArn: Optional[DomainArn] + VpcOptions: Optional[VPCDerivedInfo] + Status: Optional[VpcEndpointStatus] + Endpoint: Optional[Endpoint] + + +class CreateVpcEndpointResponse(TypedDict, total=False): + VpcEndpoint: VpcEndpoint + + +class DataSourceDetails(TypedDict, total=False): + DataSourceType: Optional[DataSourceType] + Name: Optional[DataSourceName] + Description: Optional[DataSourceDescription] + Status: Optional[DataSourceStatus] + + +DataSourceList = List[DataSourceDetails] + + +class DeleteApplicationRequest(ServiceRequest): + id: Id + + +class DeleteApplicationResponse(TypedDict, total=False): + pass + + +class DeleteDataSourceRequest(ServiceRequest): + DomainName: DomainName + Name: DataSourceName + + +class DeleteDataSourceResponse(TypedDict, total=False): + Message: Optional[String] + + +class DeleteDirectQueryDataSourceRequest(ServiceRequest): + DataSourceName: DirectQueryDataSourceName + + +class DeleteDomainRequest(ServiceRequest): + DomainName: DomainName + + +class DeleteDomainResponse(TypedDict, total=False): + DomainStatus: Optional[DomainStatus] + + +class DeleteInboundConnectionRequest(ServiceRequest): + ConnectionId: ConnectionId + + +class DeleteInboundConnectionResponse(TypedDict, total=False): + Connection: Optional[InboundConnection] + + +class DeleteOutboundConnectionRequest(ServiceRequest): + ConnectionId: ConnectionId + + +class OutboundConnection(TypedDict, total=False): + LocalDomainInfo: Optional[DomainInformationContainer] + RemoteDomainInfo: Optional[DomainInformationContainer] + ConnectionId: Optional[ConnectionId] + ConnectionAlias: Optional[ConnectionAlias] + ConnectionStatus: Optional[OutboundConnectionStatus] + ConnectionMode: Optional[ConnectionMode] + ConnectionProperties: Optional[ConnectionProperties] + + +class DeleteOutboundConnectionResponse(TypedDict, total=False): + Connection: Optional[OutboundConnection] + + +class DeletePackageRequest(ServiceRequest): + PackageID: PackageID + + +class DeletePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class DeleteVpcEndpointRequest(ServiceRequest): + VpcEndpointId: VpcEndpointId + + +class VpcEndpointSummary(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + VpcEndpointOwner: Optional[String] + DomainArn: Optional[DomainArn] + Status: Optional[VpcEndpointStatus] + + +class DeleteVpcEndpointResponse(TypedDict, total=False): + VpcEndpointSummary: VpcEndpointSummary + + +class DescribeDomainAutoTunesRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeDomainAutoTunesResponse(TypedDict, total=False): + AutoTunes: Optional[AutoTuneList] + NextToken: Optional[NextToken] + + +class DescribeDomainChangeProgressRequest(ServiceRequest): + DomainName: DomainName + ChangeId: Optional[GUID] + + +class DescribeDomainChangeProgressResponse(TypedDict, total=False): + ChangeProgressStatus: Optional[ChangeProgressStatusDetails] + + +class DescribeDomainConfigRequest(ServiceRequest): + DomainName: DomainName + + +class SoftwareUpdateOptionsStatus(TypedDict, total=False): + Options: Optional[SoftwareUpdateOptions] + Status: Optional[OptionStatus] + + +class OffPeakWindowOptionsStatus(TypedDict, total=False): + Options: Optional[OffPeakWindowOptions] + Status: Optional[OptionStatus] + + +class IdentityCenterOptionsStatus(TypedDict, total=False): + Options: IdentityCenterOptions + Status: OptionStatus + + +class DomainEndpointOptionsStatus(TypedDict, total=False): + Options: DomainEndpointOptions + Status: OptionStatus + + +class LogPublishingOptionsStatus(TypedDict, total=False): + Options: Optional[LogPublishingOptions] + Status: Optional[OptionStatus] + + +class NodeToNodeEncryptionOptionsStatus(TypedDict, total=False): + Options: NodeToNodeEncryptionOptions + Status: OptionStatus + + +class EncryptionAtRestOptionsStatus(TypedDict, total=False): + Options: EncryptionAtRestOptions + Status: OptionStatus + + +class VPCDerivedInfoStatus(TypedDict, total=False): + Options: VPCDerivedInfo + Status: OptionStatus + + +class SnapshotOptionsStatus(TypedDict, total=False): + Options: SnapshotOptions + Status: OptionStatus + + +class IPAddressTypeStatus(TypedDict, total=False): + Options: IPAddressType + Status: OptionStatus + + +class EBSOptionsStatus(TypedDict, total=False): + Options: EBSOptions + Status: OptionStatus + + +class VersionStatus(TypedDict, total=False): + Options: VersionString + Status: OptionStatus + + +class DomainConfig(TypedDict, total=False): + EngineVersion: Optional[VersionStatus] + ClusterConfig: Optional[ClusterConfigStatus] + EBSOptions: Optional[EBSOptionsStatus] + AccessPolicies: Optional[AccessPoliciesStatus] + IPAddressType: Optional[IPAddressTypeStatus] + SnapshotOptions: Optional[SnapshotOptionsStatus] + VPCOptions: Optional[VPCDerivedInfoStatus] + CognitoOptions: Optional[CognitoOptionsStatus] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptionsStatus] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptionsStatus] + AdvancedOptions: Optional[AdvancedOptionsStatus] + LogPublishingOptions: Optional[LogPublishingOptionsStatus] + DomainEndpointOptions: Optional[DomainEndpointOptionsStatus] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsStatus] + IdentityCenterOptions: Optional[IdentityCenterOptionsStatus] + AutoTuneOptions: Optional[AutoTuneOptionsStatus] + ChangeProgressDetails: Optional[ChangeProgressDetails] + OffPeakWindowOptions: Optional[OffPeakWindowOptionsStatus] + SoftwareUpdateOptions: Optional[SoftwareUpdateOptionsStatus] + ModifyingProperties: Optional[ModifyingPropertiesList] + AIMLOptions: Optional[AIMLOptionsStatus] + + +class DescribeDomainConfigResponse(TypedDict, total=False): + DomainConfig: DomainConfig + + +class DescribeDomainHealthRequest(ServiceRequest): + DomainName: DomainName + + +class EnvironmentInfo(TypedDict, total=False): + AvailabilityZoneInformation: Optional[AvailabilityZoneInfoList] + + +EnvironmentInfoList = List[EnvironmentInfo] + + +class DescribeDomainHealthResponse(TypedDict, total=False): + DomainState: Optional[DomainState] + AvailabilityZoneCount: Optional[NumberOfAZs] + ActiveAvailabilityZoneCount: Optional[NumberOfAZs] + StandByAvailabilityZoneCount: Optional[NumberOfAZs] + DataNodeCount: Optional[NumberOfNodes] + DedicatedMaster: Optional[Boolean] + MasterEligibleNodeCount: Optional[NumberOfNodes] + WarmNodeCount: Optional[NumberOfNodes] + MasterNode: Optional[MasterNodeStatus] + ClusterHealth: Optional[DomainHealth] + TotalShards: Optional[NumberOfShards] + TotalUnAssignedShards: Optional[NumberOfShards] + EnvironmentInformation: Optional[EnvironmentInfoList] + + +class DescribeDomainNodesRequest(ServiceRequest): + DomainName: DomainName + + +class DomainNodesStatus(TypedDict, total=False): + NodeId: Optional[NodeId] + NodeType: Optional[NodeType] + AvailabilityZone: Optional[AvailabilityZone] + InstanceType: Optional[OpenSearchPartitionInstanceType] + NodeStatus: Optional[NodeStatus] + StorageType: Optional[StorageTypeName] + StorageVolumeType: Optional[VolumeType] + StorageSize: Optional[VolumeSize] + + +DomainNodesStatusList = List[DomainNodesStatus] + + +class DescribeDomainNodesResponse(TypedDict, total=False): + DomainNodesStatusList: Optional[DomainNodesStatusList] + + +class DescribeDomainRequest(ServiceRequest): + DomainName: DomainName + + +class DescribeDomainResponse(TypedDict, total=False): + DomainStatus: DomainStatus + + +DomainNameList = List[DomainName] + + +class DescribeDomainsRequest(ServiceRequest): + DomainNames: DomainNameList + + +DomainStatusList = List[DomainStatus] + + +class DescribeDomainsResponse(TypedDict, total=False): + DomainStatusList: DomainStatusList + + +class DescribeDryRunProgressRequest(ServiceRequest): + DomainName: DomainName + DryRunId: Optional[GUID] + LoadDryRunConfig: Optional[Boolean] + + +class DryRunResults(TypedDict, total=False): + DeploymentType: Optional[DeploymentType] + Message: Optional[Message] + + +class ValidationFailure(TypedDict, total=False): + Code: Optional[String] + Message: Optional[String] + + +ValidationFailures = List[ValidationFailure] + + +class DryRunProgressStatus(TypedDict, total=False): + DryRunId: GUID + DryRunStatus: String + CreationDate: String + UpdateDate: String + ValidationFailures: Optional[ValidationFailures] + + +class DescribeDryRunProgressResponse(TypedDict, total=False): + DryRunProgressStatus: Optional[DryRunProgressStatus] + DryRunConfig: Optional[DomainStatus] + DryRunResults: Optional[DryRunResults] + + +ValueStringList = List[NonEmptyString] + + +class Filter(TypedDict, total=False): + Name: Optional[NonEmptyString] + Values: Optional[ValueStringList] + + +FilterList = List[Filter] + + +class DescribeInboundConnectionsRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +InboundConnections = List[InboundConnection] + + +class DescribeInboundConnectionsResponse(TypedDict, total=False): + Connections: Optional[InboundConnections] + NextToken: Optional[NextToken] + + +class DescribeInstanceTypeLimitsRequest(ServiceRequest): + DomainName: Optional[DomainName] + InstanceType: OpenSearchPartitionInstanceType + EngineVersion: VersionString + + +class InstanceCountLimits(TypedDict, total=False): + MinimumInstanceCount: Optional[MinimumInstanceCount] + MaximumInstanceCount: Optional[MaximumInstanceCount] + + +class InstanceLimits(TypedDict, total=False): + InstanceCountLimits: Optional[InstanceCountLimits] + + +class StorageTypeLimit(TypedDict, total=False): + LimitName: Optional[LimitName] + LimitValues: Optional[LimitValueList] + + +StorageTypeLimitList = List[StorageTypeLimit] + + +class StorageType(TypedDict, total=False): + StorageTypeName: Optional[StorageTypeName] + StorageSubTypeName: Optional[StorageSubTypeName] + StorageTypeLimits: Optional[StorageTypeLimitList] + + +StorageTypeList = List[StorageType] + + +class Limits(TypedDict, total=False): + StorageTypes: Optional[StorageTypeList] + InstanceLimits: Optional[InstanceLimits] + AdditionalLimits: Optional[AdditionalLimitList] + + +LimitsByRole = Dict[InstanceRole, Limits] + + +class DescribeInstanceTypeLimitsResponse(TypedDict, total=False): + LimitsByRole: Optional[LimitsByRole] + + +class DescribeOutboundConnectionsRequest(ServiceRequest): + Filters: Optional[FilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +OutboundConnections = List[OutboundConnection] + + +class DescribeOutboundConnectionsResponse(TypedDict, total=False): + Connections: Optional[OutboundConnections] + NextToken: Optional[NextToken] + + +DescribePackagesFilterValues = List[DescribePackagesFilterValue] + + +class DescribePackagesFilter(TypedDict, total=False): + Name: Optional[DescribePackagesFilterName] + Value: Optional[DescribePackagesFilterValues] + + +DescribePackagesFilterList = List[DescribePackagesFilter] + + +class DescribePackagesRequest(ServiceRequest): + Filters: Optional[DescribePackagesFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +PackageDetailsList = List[PackageDetails] + + +class DescribePackagesResponse(TypedDict, total=False): + PackageDetailsList: Optional[PackageDetailsList] + NextToken: Optional[String] + + +class DescribeReservedInstanceOfferingsRequest(ServiceRequest): + ReservedInstanceOfferingId: Optional[GUID] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class RecurringCharge(TypedDict, total=False): + RecurringChargeAmount: Optional[Double] + RecurringChargeFrequency: Optional[String] + + +RecurringChargeList = List[RecurringCharge] + + +class ReservedInstanceOffering(TypedDict, total=False): + ReservedInstanceOfferingId: Optional[GUID] + InstanceType: Optional[OpenSearchPartitionInstanceType] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + PaymentOption: Optional[ReservedInstancePaymentOption] + RecurringCharges: Optional[RecurringChargeList] + + +ReservedInstanceOfferingList = List[ReservedInstanceOffering] + + +class DescribeReservedInstanceOfferingsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + ReservedInstanceOfferings: Optional[ReservedInstanceOfferingList] + + +class DescribeReservedInstancesRequest(ServiceRequest): + ReservedInstanceId: Optional[GUID] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ReservedInstance(TypedDict, total=False): + ReservationName: Optional[ReservationToken] + ReservedInstanceId: Optional[GUID] + BillingSubscriptionId: Optional[Long] + ReservedInstanceOfferingId: Optional[String] + InstanceType: Optional[OpenSearchPartitionInstanceType] + StartTime: Optional[UpdateTimestamp] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + InstanceCount: Optional[Integer] + State: Optional[String] + PaymentOption: Optional[ReservedInstancePaymentOption] + RecurringCharges: Optional[RecurringChargeList] + + +ReservedInstanceList = List[ReservedInstance] + + +class DescribeReservedInstancesResponse(TypedDict, total=False): + NextToken: Optional[String] + ReservedInstances: Optional[ReservedInstanceList] + + +VpcEndpointIdList = List[VpcEndpointId] + + +class DescribeVpcEndpointsRequest(ServiceRequest): + VpcEndpointIds: VpcEndpointIdList + + +class VpcEndpointError(TypedDict, total=False): + VpcEndpointId: Optional[VpcEndpointId] + ErrorCode: Optional[VpcEndpointErrorCode] + ErrorMessage: Optional[String] + + +VpcEndpointErrorList = List[VpcEndpointError] +VpcEndpoints = List[VpcEndpoint] + + +class DescribeVpcEndpointsResponse(TypedDict, total=False): + VpcEndpoints: VpcEndpoints + VpcEndpointErrors: VpcEndpointErrorList + + +class DirectQueryDataSource(TypedDict, total=False): + DataSourceName: Optional[DirectQueryDataSourceName] + DataSourceType: Optional[DirectQueryDataSourceType] + Description: Optional[DirectQueryDataSourceDescription] + OpenSearchArns: Optional[DirectQueryOpenSearchARNList] + DataSourceArn: Optional[String] + TagList: Optional[TagList] + + +DirectQueryDataSourceList = List[DirectQueryDataSource] + + +class DissociatePackageRequest(ServiceRequest): + PackageID: PackageID + DomainName: DomainName + + +class DissociatePackageResponse(TypedDict, total=False): + DomainPackageDetails: Optional[DomainPackageDetails] + + +class DissociatePackagesRequest(ServiceRequest): + PackageList: PackageIDList + DomainName: DomainName + + +class DissociatePackagesResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + + +class DomainInfo(TypedDict, total=False): + DomainName: Optional[DomainName] + EngineType: Optional[EngineType] + + +DomainInfoList = List[DomainInfo] + + +class DomainMaintenanceDetails(TypedDict, total=False): + MaintenanceId: Optional[RequestId] + DomainName: Optional[DomainName] + Action: Optional[MaintenanceType] + NodeId: Optional[NodeId] + Status: Optional[MaintenanceStatus] + StatusMessage: Optional[MaintenanceStatusMessage] + CreatedAt: Optional[UpdateTimestamp] + UpdatedAt: Optional[UpdateTimestamp] + + +DomainMaintenanceList = List[DomainMaintenanceDetails] + + +class GetApplicationRequest(ServiceRequest): + id: Id + + +class GetApplicationResponse(TypedDict, total=False): + id: Optional[Id] + arn: Optional[ARN] + name: Optional[ApplicationName] + endpoint: Optional[String] + status: Optional[ApplicationStatus] + iamIdentityCenterOptions: Optional[IamIdentityCenterOptions] + dataSources: Optional[DataSources] + appConfigs: Optional[AppConfigs] + createdAt: Optional[Timestamp] + lastUpdatedAt: Optional[Timestamp] + + +class GetCompatibleVersionsRequest(ServiceRequest): + DomainName: Optional[DomainName] + + +class GetCompatibleVersionsResponse(TypedDict, total=False): + CompatibleVersions: Optional[CompatibleVersionsList] + + +class GetDataSourceRequest(ServiceRequest): + DomainName: DomainName + Name: DataSourceName + + +class GetDataSourceResponse(TypedDict, total=False): + DataSourceType: Optional[DataSourceType] + Name: Optional[DataSourceName] + Description: Optional[DataSourceDescription] + Status: Optional[DataSourceStatus] + + +class GetDirectQueryDataSourceRequest(ServiceRequest): + DataSourceName: DirectQueryDataSourceName + + +class GetDirectQueryDataSourceResponse(TypedDict, total=False): + DataSourceName: Optional[DirectQueryDataSourceName] + DataSourceType: Optional[DirectQueryDataSourceType] + Description: Optional[DirectQueryDataSourceDescription] + OpenSearchArns: Optional[DirectQueryOpenSearchARNList] + DataSourceArn: Optional[String] + + +class GetDomainMaintenanceStatusRequest(ServiceRequest): + DomainName: DomainName + MaintenanceId: RequestId + + +class GetDomainMaintenanceStatusResponse(TypedDict, total=False): + Status: Optional[MaintenanceStatus] + StatusMessage: Optional[MaintenanceStatusMessage] + NodeId: Optional[NodeId] + Action: Optional[MaintenanceType] + CreatedAt: Optional[UpdateTimestamp] + UpdatedAt: Optional[UpdateTimestamp] + + +class GetPackageVersionHistoryRequest(ServiceRequest): + PackageID: PackageID + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class PackageVersionHistory(TypedDict, total=False): + PackageVersion: Optional[PackageVersion] + CommitMessage: Optional[CommitMessage] + CreatedAt: Optional[CreatedAt] + PluginProperties: Optional[PluginProperties] + PackageConfiguration: Optional[PackageConfiguration] + + +PackageVersionHistoryList = List[PackageVersionHistory] + + +class GetPackageVersionHistoryResponse(TypedDict, total=False): + PackageID: Optional[PackageID] + PackageVersionHistoryList: Optional[PackageVersionHistoryList] + NextToken: Optional[String] + + +class GetUpgradeHistoryRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +Issues = List[Issue] + + +class UpgradeStepItem(TypedDict, total=False): + UpgradeStep: Optional[UpgradeStep] + UpgradeStepStatus: Optional[UpgradeStatus] + Issues: Optional[Issues] + ProgressPercent: Optional[Double] + + +UpgradeStepsList = List[UpgradeStepItem] +StartTimestamp = datetime + + +class UpgradeHistory(TypedDict, total=False): + UpgradeName: Optional[UpgradeName] + StartTimestamp: Optional[StartTimestamp] + UpgradeStatus: Optional[UpgradeStatus] + StepsList: Optional[UpgradeStepsList] + + +UpgradeHistoryList = List[UpgradeHistory] + + +class GetUpgradeHistoryResponse(TypedDict, total=False): + UpgradeHistories: Optional[UpgradeHistoryList] + NextToken: Optional[String] + + +class GetUpgradeStatusRequest(ServiceRequest): + DomainName: DomainName + + +class GetUpgradeStatusResponse(TypedDict, total=False): + UpgradeStep: Optional[UpgradeStep] + StepStatus: Optional[UpgradeStatus] + UpgradeName: Optional[UpgradeName] + + +InstanceRoleList = List[InstanceRole] + + +class InstanceTypeDetails(TypedDict, total=False): + InstanceType: Optional[OpenSearchPartitionInstanceType] + EncryptionEnabled: Optional[Boolean] + CognitoEnabled: Optional[Boolean] + AppLogsEnabled: Optional[Boolean] + AdvancedSecurityEnabled: Optional[Boolean] + WarmEnabled: Optional[Boolean] + InstanceRole: Optional[InstanceRoleList] + AvailabilityZones: Optional[AvailabilityZoneList] + + +InstanceTypeDetailsList = List[InstanceTypeDetails] + + +class ListApplicationsRequest(ServiceRequest): + nextToken: Optional[NextToken] + statuses: Optional[ApplicationStatuses] + maxResults: Optional[MaxResults] + + +class ListApplicationsResponse(TypedDict, total=False): + ApplicationSummaries: Optional[ApplicationSummaries] + nextToken: Optional[NextToken] + + +class ListDataSourcesRequest(ServiceRequest): + DomainName: DomainName + + +class ListDataSourcesResponse(TypedDict, total=False): + DataSources: Optional[DataSourceList] + + +class ListDirectQueryDataSourcesRequest(ServiceRequest): + NextToken: Optional[NextToken] + + +class ListDirectQueryDataSourcesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + DirectQueryDataSources: Optional[DirectQueryDataSourceList] + + +class ListDomainMaintenancesRequest(ServiceRequest): + DomainName: DomainName + Action: Optional[MaintenanceType] + Status: Optional[MaintenanceStatus] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListDomainMaintenancesResponse(TypedDict, total=False): + DomainMaintenances: Optional[DomainMaintenanceList] + NextToken: Optional[NextToken] + + +class ListDomainNamesRequest(ServiceRequest): + EngineType: Optional[EngineType] + + +class ListDomainNamesResponse(TypedDict, total=False): + DomainNames: Optional[DomainInfoList] + + +class ListDomainsForPackageRequest(ServiceRequest): + PackageID: PackageID + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListDomainsForPackageResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + NextToken: Optional[String] + + +class ListInstanceTypeDetailsRequest(ServiceRequest): + EngineVersion: VersionString + DomainName: Optional[DomainName] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + RetrieveAZs: Optional[Boolean] + InstanceType: Optional[InstanceTypeString] + + +class ListInstanceTypeDetailsResponse(TypedDict, total=False): + InstanceTypeDetails: Optional[InstanceTypeDetailsList] + NextToken: Optional[NextToken] + + +class ListPackagesForDomainRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListPackagesForDomainResponse(TypedDict, total=False): + DomainPackageDetailsList: Optional[DomainPackageDetailsList] + NextToken: Optional[String] + + +class ListScheduledActionsRequest(ServiceRequest): + DomainName: DomainName + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ScheduledAction(TypedDict, total=False): + Id: String + Type: ActionType + Severity: ActionSeverity + ScheduledTime: Long + Description: Optional[String] + ScheduledBy: Optional[ScheduledBy] + Status: Optional[ActionStatus] + Mandatory: Optional[Boolean] + Cancellable: Optional[Boolean] + + +ScheduledActionsList = List[ScheduledAction] + + +class ListScheduledActionsResponse(TypedDict, total=False): + ScheduledActions: Optional[ScheduledActionsList] + NextToken: Optional[NextToken] + + +class ListTagsRequest(ServiceRequest): + ARN: ARN + + +class ListTagsResponse(TypedDict, total=False): + TagList: Optional[TagList] + + +class ListVersionsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListVersionsResponse(TypedDict, total=False): + Versions: Optional[VersionList] + NextToken: Optional[NextToken] + + +class ListVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + NextToken: Optional[NextToken] + + +class ListVpcEndpointAccessResponse(TypedDict, total=False): + AuthorizedPrincipalList: AuthorizedPrincipalList + NextToken: NextToken + + +class ListVpcEndpointsForDomainRequest(ServiceRequest): + DomainName: DomainName + NextToken: Optional[NextToken] + + +VpcEndpointSummaryList = List[VpcEndpointSummary] + + +class ListVpcEndpointsForDomainResponse(TypedDict, total=False): + VpcEndpointSummaryList: VpcEndpointSummaryList + NextToken: NextToken + + +class ListVpcEndpointsRequest(ServiceRequest): + NextToken: Optional[NextToken] + + +class ListVpcEndpointsResponse(TypedDict, total=False): + VpcEndpointSummaryList: VpcEndpointSummaryList + NextToken: NextToken + + +class PurchaseReservedInstanceOfferingRequest(ServiceRequest): + ReservedInstanceOfferingId: GUID + ReservationName: ReservationToken + InstanceCount: Optional[InstanceCount] + + +class PurchaseReservedInstanceOfferingResponse(TypedDict, total=False): + ReservedInstanceId: Optional[GUID] + ReservationName: Optional[ReservationToken] + + +class RejectInboundConnectionRequest(ServiceRequest): + ConnectionId: ConnectionId + + +class RejectInboundConnectionResponse(TypedDict, total=False): + Connection: Optional[InboundConnection] + + +class RemoveTagsRequest(ServiceRequest): + ARN: ARN + TagKeys: StringList + + +class RevokeVpcEndpointAccessRequest(ServiceRequest): + DomainName: DomainName + Account: Optional[AWSAccount] + Service: Optional[AWSServicePrincipal] + + +class RevokeVpcEndpointAccessResponse(TypedDict, total=False): + pass + + +class StartDomainMaintenanceRequest(ServiceRequest): + DomainName: DomainName + Action: MaintenanceType + NodeId: Optional[NodeId] + + +class StartDomainMaintenanceResponse(TypedDict, total=False): + MaintenanceId: Optional[RequestId] + + +class StartServiceSoftwareUpdateRequest(ServiceRequest): + DomainName: DomainName + ScheduleAt: Optional[ScheduleAt] + DesiredStartTime: Optional[Long] + + +class StartServiceSoftwareUpdateResponse(TypedDict, total=False): + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + + +class UpdateApplicationRequest(ServiceRequest): + id: Id + dataSources: Optional[DataSources] + appConfigs: Optional[AppConfigs] + + +class UpdateApplicationResponse(TypedDict, total=False): + id: Optional[Id] + name: Optional[ApplicationName] + arn: Optional[ARN] + dataSources: Optional[DataSources] + iamIdentityCenterOptions: Optional[IamIdentityCenterOptions] + appConfigs: Optional[AppConfigs] + createdAt: Optional[Timestamp] + lastUpdatedAt: Optional[Timestamp] + + +class UpdateDataSourceRequest(ServiceRequest): + DomainName: DomainName + Name: DataSourceName + DataSourceType: DataSourceType + Description: Optional[DataSourceDescription] + Status: Optional[DataSourceStatus] + + +class UpdateDataSourceResponse(TypedDict, total=False): + Message: Optional[String] + + +class UpdateDirectQueryDataSourceRequest(ServiceRequest): + DataSourceName: DirectQueryDataSourceName + DataSourceType: DirectQueryDataSourceType + Description: Optional[DirectQueryDataSourceDescription] + OpenSearchArns: DirectQueryOpenSearchARNList + + +class UpdateDirectQueryDataSourceResponse(TypedDict, total=False): + DataSourceArn: Optional[String] + + +class UpdateDomainConfigRequest(ServiceRequest): + DomainName: DomainName + ClusterConfig: Optional[ClusterConfig] + EBSOptions: Optional[EBSOptions] + SnapshotOptions: Optional[SnapshotOptions] + VPCOptions: Optional[VPCOptions] + CognitoOptions: Optional[CognitoOptions] + AdvancedOptions: Optional[AdvancedOptions] + AccessPolicies: Optional[PolicyDocument] + IPAddressType: Optional[IPAddressType] + LogPublishingOptions: Optional[LogPublishingOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + DomainEndpointOptions: Optional[DomainEndpointOptions] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + IdentityCenterOptions: Optional[IdentityCenterOptionsInput] + AutoTuneOptions: Optional[AutoTuneOptions] + DryRun: Optional[DryRun] + DryRunMode: Optional[DryRunMode] + OffPeakWindowOptions: Optional[OffPeakWindowOptions] + SoftwareUpdateOptions: Optional[SoftwareUpdateOptions] + AIMLOptions: Optional[AIMLOptionsInput] + + +class UpdateDomainConfigResponse(TypedDict, total=False): + DomainConfig: DomainConfig + DryRunResults: Optional[DryRunResults] + DryRunProgressStatus: Optional[DryRunProgressStatus] + + +class UpdatePackageRequest(ServiceRequest): + PackageID: PackageID + PackageSource: PackageSource + PackageDescription: Optional[PackageDescription] + CommitMessage: Optional[CommitMessage] + PackageConfiguration: Optional[PackageConfiguration] + PackageEncryptionOptions: Optional[PackageEncryptionOptions] + + +class UpdatePackageResponse(TypedDict, total=False): + PackageDetails: Optional[PackageDetails] + + +class UpdatePackageScopeRequest(ServiceRequest): + PackageID: PackageID + Operation: PackageScopeOperationEnum + PackageUserList: PackageUserList + + +class UpdatePackageScopeResponse(TypedDict, total=False): + PackageID: Optional[PackageID] + Operation: Optional[PackageScopeOperationEnum] + PackageUserList: Optional[PackageUserList] + + +class UpdateScheduledActionRequest(ServiceRequest): + DomainName: DomainName + ActionID: String + ActionType: ActionType + ScheduleAt: ScheduleAt + DesiredStartTime: Optional[Long] + + +class UpdateScheduledActionResponse(TypedDict, total=False): + ScheduledAction: Optional[ScheduledAction] + + +class UpdateVpcEndpointRequest(ServiceRequest): + VpcEndpointId: VpcEndpointId + VpcOptions: VPCOptions + + +class UpdateVpcEndpointResponse(TypedDict, total=False): + VpcEndpoint: VpcEndpoint + + +class UpgradeDomainRequest(ServiceRequest): + DomainName: DomainName + TargetVersion: VersionString + PerformCheckOnly: Optional[Boolean] + AdvancedOptions: Optional[AdvancedOptions] + + +class UpgradeDomainResponse(TypedDict, total=False): + UpgradeId: Optional[String] + DomainName: Optional[DomainName] + TargetVersion: Optional[VersionString] + PerformCheckOnly: Optional[Boolean] + AdvancedOptions: Optional[AdvancedOptions] + ChangeProgressDetails: Optional[ChangeProgressDetails] + + +class OpensearchApi: + service = "opensearch" + version = "2021-01-01" + + @handler("AcceptInboundConnection") + def accept_inbound_connection( + self, context: RequestContext, connection_id: ConnectionId, **kwargs + ) -> AcceptInboundConnectionResponse: + raise NotImplementedError + + @handler("AddDataSource") + def add_data_source( + self, + context: RequestContext, + domain_name: DomainName, + name: DataSourceName, + data_source_type: DataSourceType, + description: DataSourceDescription | None = None, + **kwargs, + ) -> AddDataSourceResponse: + raise NotImplementedError + + @handler("AddDirectQueryDataSource") + def add_direct_query_data_source( + self, + context: RequestContext, + data_source_name: DirectQueryDataSourceName, + data_source_type: DirectQueryDataSourceType, + open_search_arns: DirectQueryOpenSearchARNList, + description: DirectQueryDataSourceDescription | None = None, + tag_list: TagList | None = None, + **kwargs, + ) -> AddDirectQueryDataSourceResponse: + raise NotImplementedError + + @handler("AddTags") + def add_tags(self, context: RequestContext, arn: ARN, tag_list: TagList, **kwargs) -> None: + raise NotImplementedError + + @handler("AssociatePackage") + def associate_package( + self, + context: RequestContext, + package_id: PackageID, + domain_name: DomainName, + prerequisite_package_id_list: PackageIDList | None = None, + association_configuration: PackageAssociationConfiguration | None = None, + **kwargs, + ) -> AssociatePackageResponse: + raise NotImplementedError + + @handler("AssociatePackages") + def associate_packages( + self, + context: RequestContext, + package_list: PackageDetailsForAssociationList, + domain_name: DomainName, + **kwargs, + ) -> AssociatePackagesResponse: + raise NotImplementedError + + @handler("AuthorizeVpcEndpointAccess") + def authorize_vpc_endpoint_access( + self, + context: RequestContext, + domain_name: DomainName, + account: AWSAccount | None = None, + service: AWSServicePrincipal | None = None, + **kwargs, + ) -> AuthorizeVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("CancelDomainConfigChange") + def cancel_domain_config_change( + self, + context: RequestContext, + domain_name: DomainName, + dry_run: DryRun | None = None, + **kwargs, + ) -> CancelDomainConfigChangeResponse: + raise NotImplementedError + + @handler("CancelServiceSoftwareUpdate") + def cancel_service_software_update( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> CancelServiceSoftwareUpdateResponse: + raise NotImplementedError + + @handler("CreateApplication") + def create_application( + self, + context: RequestContext, + name: ApplicationName, + client_token: ClientToken | None = None, + data_sources: DataSources | None = None, + iam_identity_center_options: IamIdentityCenterOptionsInput | None = None, + app_configs: AppConfigs | None = None, + tag_list: TagList | None = None, + **kwargs, + ) -> CreateApplicationResponse: + raise NotImplementedError + + @handler("CreateDomain") + def create_domain( + self, + context: RequestContext, + domain_name: DomainName, + engine_version: VersionString | None = None, + cluster_config: ClusterConfig | None = None, + ebs_options: EBSOptions | None = None, + access_policies: PolicyDocument | None = None, + ip_address_type: IPAddressType | None = None, + snapshot_options: SnapshotOptions | None = None, + vpc_options: VPCOptions | None = None, + cognito_options: CognitoOptions | None = None, + encryption_at_rest_options: EncryptionAtRestOptions | None = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions | None = None, + advanced_options: AdvancedOptions | None = None, + log_publishing_options: LogPublishingOptions | None = None, + domain_endpoint_options: DomainEndpointOptions | None = None, + advanced_security_options: AdvancedSecurityOptionsInput | None = None, + identity_center_options: IdentityCenterOptionsInput | None = None, + tag_list: TagList | None = None, + auto_tune_options: AutoTuneOptionsInput | None = None, + off_peak_window_options: OffPeakWindowOptions | None = None, + software_update_options: SoftwareUpdateOptions | None = None, + aiml_options: AIMLOptionsInput | None = None, + **kwargs, + ) -> CreateDomainResponse: + raise NotImplementedError + + @handler("CreateOutboundConnection") + def create_outbound_connection( + self, + context: RequestContext, + local_domain_info: DomainInformationContainer, + remote_domain_info: DomainInformationContainer, + connection_alias: ConnectionAlias, + connection_mode: ConnectionMode | None = None, + connection_properties: ConnectionProperties | None = None, + **kwargs, + ) -> CreateOutboundConnectionResponse: + raise NotImplementedError + + @handler("CreatePackage") + def create_package( + self, + context: RequestContext, + package_name: PackageName, + package_type: PackageType, + package_source: PackageSource, + package_description: PackageDescription | None = None, + package_configuration: PackageConfiguration | None = None, + engine_version: EngineVersion | None = None, + package_vending_options: PackageVendingOptions | None = None, + package_encryption_options: PackageEncryptionOptions | None = None, + **kwargs, + ) -> CreatePackageResponse: + raise NotImplementedError + + @handler("CreateVpcEndpoint") + def create_vpc_endpoint( + self, + context: RequestContext, + domain_arn: DomainArn, + vpc_options: VPCOptions, + client_token: ClientToken | None = None, + **kwargs, + ) -> CreateVpcEndpointResponse: + raise NotImplementedError + + @handler("DeleteApplication") + def delete_application( + self, context: RequestContext, id: Id, **kwargs + ) -> DeleteApplicationResponse: + raise NotImplementedError + + @handler("DeleteDataSource") + def delete_data_source( + self, context: RequestContext, domain_name: DomainName, name: DataSourceName, **kwargs + ) -> DeleteDataSourceResponse: + raise NotImplementedError + + @handler("DeleteDirectQueryDataSource") + def delete_direct_query_data_source( + self, context: RequestContext, data_source_name: DirectQueryDataSourceName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteDomain") + def delete_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DeleteDomainResponse: + raise NotImplementedError + + @handler("DeleteInboundConnection") + def delete_inbound_connection( + self, context: RequestContext, connection_id: ConnectionId, **kwargs + ) -> DeleteInboundConnectionResponse: + raise NotImplementedError + + @handler("DeleteOutboundConnection") + def delete_outbound_connection( + self, context: RequestContext, connection_id: ConnectionId, **kwargs + ) -> DeleteOutboundConnectionResponse: + raise NotImplementedError + + @handler("DeletePackage") + def delete_package( + self, context: RequestContext, package_id: PackageID, **kwargs + ) -> DeletePackageResponse: + raise NotImplementedError + + @handler("DeleteVpcEndpoint") + def delete_vpc_endpoint( + self, context: RequestContext, vpc_endpoint_id: VpcEndpointId, **kwargs + ) -> DeleteVpcEndpointResponse: + raise NotImplementedError + + @handler("DescribeDomain") + def describe_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainResponse: + raise NotImplementedError + + @handler("DescribeDomainAutoTunes") + def describe_domain_auto_tunes( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeDomainAutoTunesResponse: + raise NotImplementedError + + @handler("DescribeDomainChangeProgress") + def describe_domain_change_progress( + self, + context: RequestContext, + domain_name: DomainName, + change_id: GUID | None = None, + **kwargs, + ) -> DescribeDomainChangeProgressResponse: + raise NotImplementedError + + @handler("DescribeDomainConfig") + def describe_domain_config( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainConfigResponse: + raise NotImplementedError + + @handler("DescribeDomainHealth") + def describe_domain_health( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainHealthResponse: + raise NotImplementedError + + @handler("DescribeDomainNodes") + def describe_domain_nodes( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainNodesResponse: + raise NotImplementedError + + @handler("DescribeDomains") + def describe_domains( + self, context: RequestContext, domain_names: DomainNameList, **kwargs + ) -> DescribeDomainsResponse: + raise NotImplementedError + + @handler("DescribeDryRunProgress") + def describe_dry_run_progress( + self, + context: RequestContext, + domain_name: DomainName, + dry_run_id: GUID | None = None, + load_dry_run_config: Boolean | None = None, + **kwargs, + ) -> DescribeDryRunProgressResponse: + raise NotImplementedError + + @handler("DescribeInboundConnections") + def describe_inbound_connections( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInboundConnectionsResponse: + raise NotImplementedError + + @handler("DescribeInstanceTypeLimits") + def describe_instance_type_limits( + self, + context: RequestContext, + instance_type: OpenSearchPartitionInstanceType, + engine_version: VersionString, + domain_name: DomainName | None = None, + **kwargs, + ) -> DescribeInstanceTypeLimitsResponse: + raise NotImplementedError + + @handler("DescribeOutboundConnections") + def describe_outbound_connections( + self, + context: RequestContext, + filters: FilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeOutboundConnectionsResponse: + raise NotImplementedError + + @handler("DescribePackages") + def describe_packages( + self, + context: RequestContext, + filters: DescribePackagesFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribePackagesResponse: + raise NotImplementedError + + @handler("DescribeReservedInstanceOfferings") + def describe_reserved_instance_offerings( + self, + context: RequestContext, + reserved_instance_offering_id: GUID | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeReservedInstanceOfferingsResponse: + raise NotImplementedError + + @handler("DescribeReservedInstances") + def describe_reserved_instances( + self, + context: RequestContext, + reserved_instance_id: GUID | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeReservedInstancesResponse: + raise NotImplementedError + + @handler("DescribeVpcEndpoints") + def describe_vpc_endpoints( + self, context: RequestContext, vpc_endpoint_ids: VpcEndpointIdList, **kwargs + ) -> DescribeVpcEndpointsResponse: + raise NotImplementedError + + @handler("DissociatePackage") + def dissociate_package( + self, context: RequestContext, package_id: PackageID, domain_name: DomainName, **kwargs + ) -> DissociatePackageResponse: + raise NotImplementedError + + @handler("DissociatePackages") + def dissociate_packages( + self, + context: RequestContext, + package_list: PackageIDList, + domain_name: DomainName, + **kwargs, + ) -> DissociatePackagesResponse: + raise NotImplementedError + + @handler("GetApplication") + def get_application(self, context: RequestContext, id: Id, **kwargs) -> GetApplicationResponse: + raise NotImplementedError + + @handler("GetCompatibleVersions") + def get_compatible_versions( + self, context: RequestContext, domain_name: DomainName | None = None, **kwargs + ) -> GetCompatibleVersionsResponse: + raise NotImplementedError + + @handler("GetDataSource") + def get_data_source( + self, context: RequestContext, domain_name: DomainName, name: DataSourceName, **kwargs + ) -> GetDataSourceResponse: + raise NotImplementedError + + @handler("GetDirectQueryDataSource") + def get_direct_query_data_source( + self, context: RequestContext, data_source_name: DirectQueryDataSourceName, **kwargs + ) -> GetDirectQueryDataSourceResponse: + raise NotImplementedError + + @handler("GetDomainMaintenanceStatus") + def get_domain_maintenance_status( + self, context: RequestContext, domain_name: DomainName, maintenance_id: RequestId, **kwargs + ) -> GetDomainMaintenanceStatusResponse: + raise NotImplementedError + + @handler("GetPackageVersionHistory") + def get_package_version_history( + self, + context: RequestContext, + package_id: PackageID, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetPackageVersionHistoryResponse: + raise NotImplementedError + + @handler("GetUpgradeHistory") + def get_upgrade_history( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetUpgradeHistoryResponse: + raise NotImplementedError + + @handler("GetUpgradeStatus") + def get_upgrade_status( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> GetUpgradeStatusResponse: + raise NotImplementedError + + @handler("ListApplications") + def list_applications( + self, + context: RequestContext, + next_token: NextToken | None = None, + statuses: ApplicationStatuses | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListApplicationsResponse: + raise NotImplementedError + + @handler("ListDataSources") + def list_data_sources( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> ListDataSourcesResponse: + raise NotImplementedError + + @handler("ListDirectQueryDataSources") + def list_direct_query_data_sources( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> ListDirectQueryDataSourcesResponse: + raise NotImplementedError + + @handler("ListDomainMaintenances") + def list_domain_maintenances( + self, + context: RequestContext, + domain_name: DomainName, + action: MaintenanceType | None = None, + status: MaintenanceStatus | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDomainMaintenancesResponse: + raise NotImplementedError + + @handler("ListDomainNames") + def list_domain_names( + self, context: RequestContext, engine_type: EngineType | None = None, **kwargs + ) -> ListDomainNamesResponse: + raise NotImplementedError + + @handler("ListDomainsForPackage") + def list_domains_for_package( + self, + context: RequestContext, + package_id: PackageID, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDomainsForPackageResponse: + raise NotImplementedError + + @handler("ListInstanceTypeDetails") + def list_instance_type_details( + self, + context: RequestContext, + engine_version: VersionString, + domain_name: DomainName | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + retrieve_azs: Boolean | None = None, + instance_type: InstanceTypeString | None = None, + **kwargs, + ) -> ListInstanceTypeDetailsResponse: + raise NotImplementedError + + @handler("ListPackagesForDomain") + def list_packages_for_domain( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListPackagesForDomainResponse: + raise NotImplementedError + + @handler("ListScheduledActions") + def list_scheduled_actions( + self, + context: RequestContext, + domain_name: DomainName, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListScheduledActionsResponse: + raise NotImplementedError + + @handler("ListTags") + def list_tags(self, context: RequestContext, arn: ARN, **kwargs) -> ListTagsResponse: + raise NotImplementedError + + @handler("ListVersions") + def list_versions( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListVersionsResponse: + raise NotImplementedError + + @handler("ListVpcEndpointAccess") + def list_vpc_endpoint_access( + self, + context: RequestContext, + domain_name: DomainName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("ListVpcEndpoints") + def list_vpc_endpoints( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> ListVpcEndpointsResponse: + raise NotImplementedError + + @handler("ListVpcEndpointsForDomain") + def list_vpc_endpoints_for_domain( + self, + context: RequestContext, + domain_name: DomainName, + next_token: NextToken | None = None, + **kwargs, + ) -> ListVpcEndpointsForDomainResponse: + raise NotImplementedError + + @handler("PurchaseReservedInstanceOffering") + def purchase_reserved_instance_offering( + self, + context: RequestContext, + reserved_instance_offering_id: GUID, + reservation_name: ReservationToken, + instance_count: InstanceCount | None = None, + **kwargs, + ) -> PurchaseReservedInstanceOfferingResponse: + raise NotImplementedError + + @handler("RejectInboundConnection") + def reject_inbound_connection( + self, context: RequestContext, connection_id: ConnectionId, **kwargs + ) -> RejectInboundConnectionResponse: + raise NotImplementedError + + @handler("RemoveTags") + def remove_tags( + self, context: RequestContext, arn: ARN, tag_keys: StringList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RevokeVpcEndpointAccess") + def revoke_vpc_endpoint_access( + self, + context: RequestContext, + domain_name: DomainName, + account: AWSAccount | None = None, + service: AWSServicePrincipal | None = None, + **kwargs, + ) -> RevokeVpcEndpointAccessResponse: + raise NotImplementedError + + @handler("StartDomainMaintenance") + def start_domain_maintenance( + self, + context: RequestContext, + domain_name: DomainName, + action: MaintenanceType, + node_id: NodeId | None = None, + **kwargs, + ) -> StartDomainMaintenanceResponse: + raise NotImplementedError + + @handler("StartServiceSoftwareUpdate") + def start_service_software_update( + self, + context: RequestContext, + domain_name: DomainName, + schedule_at: ScheduleAt | None = None, + desired_start_time: Long | None = None, + **kwargs, + ) -> StartServiceSoftwareUpdateResponse: + raise NotImplementedError + + @handler("UpdateApplication") + def update_application( + self, + context: RequestContext, + id: Id, + data_sources: DataSources | None = None, + app_configs: AppConfigs | None = None, + **kwargs, + ) -> UpdateApplicationResponse: + raise NotImplementedError + + @handler("UpdateDataSource") + def update_data_source( + self, + context: RequestContext, + domain_name: DomainName, + name: DataSourceName, + data_source_type: DataSourceType, + description: DataSourceDescription | None = None, + status: DataSourceStatus | None = None, + **kwargs, + ) -> UpdateDataSourceResponse: + raise NotImplementedError + + @handler("UpdateDirectQueryDataSource") + def update_direct_query_data_source( + self, + context: RequestContext, + data_source_name: DirectQueryDataSourceName, + data_source_type: DirectQueryDataSourceType, + open_search_arns: DirectQueryOpenSearchARNList, + description: DirectQueryDataSourceDescription | None = None, + **kwargs, + ) -> UpdateDirectQueryDataSourceResponse: + raise NotImplementedError + + @handler("UpdateDomainConfig") + def update_domain_config( + self, + context: RequestContext, + domain_name: DomainName, + cluster_config: ClusterConfig | None = None, + ebs_options: EBSOptions | None = None, + snapshot_options: SnapshotOptions | None = None, + vpc_options: VPCOptions | None = None, + cognito_options: CognitoOptions | None = None, + advanced_options: AdvancedOptions | None = None, + access_policies: PolicyDocument | None = None, + ip_address_type: IPAddressType | None = None, + log_publishing_options: LogPublishingOptions | None = None, + encryption_at_rest_options: EncryptionAtRestOptions | None = None, + domain_endpoint_options: DomainEndpointOptions | None = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions | None = None, + advanced_security_options: AdvancedSecurityOptionsInput | None = None, + identity_center_options: IdentityCenterOptionsInput | None = None, + auto_tune_options: AutoTuneOptions | None = None, + dry_run: DryRun | None = None, + dry_run_mode: DryRunMode | None = None, + off_peak_window_options: OffPeakWindowOptions | None = None, + software_update_options: SoftwareUpdateOptions | None = None, + aiml_options: AIMLOptionsInput | None = None, + **kwargs, + ) -> UpdateDomainConfigResponse: + raise NotImplementedError + + @handler("UpdatePackage") + def update_package( + self, + context: RequestContext, + package_id: PackageID, + package_source: PackageSource, + package_description: PackageDescription | None = None, + commit_message: CommitMessage | None = None, + package_configuration: PackageConfiguration | None = None, + package_encryption_options: PackageEncryptionOptions | None = None, + **kwargs, + ) -> UpdatePackageResponse: + raise NotImplementedError + + @handler("UpdatePackageScope") + def update_package_scope( + self, + context: RequestContext, + package_id: PackageID, + operation: PackageScopeOperationEnum, + package_user_list: PackageUserList, + **kwargs, + ) -> UpdatePackageScopeResponse: + raise NotImplementedError + + @handler("UpdateScheduledAction") + def update_scheduled_action( + self, + context: RequestContext, + domain_name: DomainName, + action_id: String, + action_type: ActionType, + schedule_at: ScheduleAt, + desired_start_time: Long | None = None, + **kwargs, + ) -> UpdateScheduledActionResponse: + raise NotImplementedError + + @handler("UpdateVpcEndpoint") + def update_vpc_endpoint( + self, + context: RequestContext, + vpc_endpoint_id: VpcEndpointId, + vpc_options: VPCOptions, + **kwargs, + ) -> UpdateVpcEndpointResponse: + raise NotImplementedError + + @handler("UpgradeDomain") + def upgrade_domain( + self, + context: RequestContext, + domain_name: DomainName, + target_version: VersionString, + perform_check_only: Boolean | None = None, + advanced_options: AdvancedOptions | None = None, + **kwargs, + ) -> UpgradeDomainResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/pipes/__init__.py b/localstack-core/localstack/aws/api/pipes/__init__.py new file mode 100644 index 0000000000000..6fe68d846fa23 --- /dev/null +++ b/localstack-core/localstack/aws/api/pipes/__init__.py @@ -0,0 +1,1114 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Arn = str +ArnOrJsonPath = str +ArnOrUrl = str +BatchArraySize = int +BatchRetryAttempts = int +Boolean = bool +CapacityProvider = str +CapacityProviderStrategyItemBase = int +CapacityProviderStrategyItemWeight = int +CloudwatchLogGroupArn = str +Database = str +DbUser = str +DimensionName = str +DimensionValue = str +EndpointString = str +EphemeralStorageSize = int +ErrorMessage = str +EventBridgeDetailType = str +EventBridgeEndpointId = str +EventBridgeEventSource = str +EventPattern = str +FirehoseArn = str +HeaderKey = str +HeaderValue = str +InputTemplate = str +Integer = int +JsonPath = str +KafkaTopicName = str +KinesisPartitionKey = str +KmsKeyIdentifier = str +LimitMax10 = int +LimitMax100 = int +LimitMax10000 = int +LimitMin1 = int +LogStreamName = str +MQBrokerQueueName = str +MaximumBatchingWindowInSeconds = int +MaximumRecordAgeInSeconds = int +MaximumRetryAttemptsESM = int +MeasureName = str +MeasureValue = str +MessageDeduplicationId = str +MessageGroupId = str +MultiMeasureAttributeName = str +MultiMeasureName = str +NextToken = str +OptionalArn = str +PathParameter = str +PipeArn = str +PipeDescription = str +PipeName = str +PipeStateReason = str +PlacementConstraintExpression = str +PlacementStrategyField = str +QueryStringKey = str +QueryStringValue = str +ReferenceId = str +ResourceArn = str +RoleArn = str +S3LogDestinationParametersBucketNameString = str +S3LogDestinationParametersBucketOwnerString = str +S3LogDestinationParametersPrefixString = str +SageMakerPipelineParameterName = str +SageMakerPipelineParameterValue = str +SecretManagerArn = str +SecretManagerArnOrJsonPath = str +SecurityGroup = str +SecurityGroupId = str +Sql = str +StatementName = str +String = str +Subnet = str +SubnetId = str +TagKey = str +TagValue = str +TimeValue = str +TimestampFormat = str +URI = str +VersionValue = str + + +class AssignPublicIp(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class BatchJobDependencyType(StrEnum): + N_TO_N = "N_TO_N" + SEQUENTIAL = "SEQUENTIAL" + + +class BatchResourceRequirementType(StrEnum): + GPU = "GPU" + MEMORY = "MEMORY" + VCPU = "VCPU" + + +class DimensionValueType(StrEnum): + VARCHAR = "VARCHAR" + + +class DynamoDBStreamStartPosition(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + + +class EcsEnvironmentFileType(StrEnum): + s3 = "s3" + + +class EcsResourceRequirementType(StrEnum): + GPU = "GPU" + InferenceAccelerator = "InferenceAccelerator" + + +class EpochTimeUnit(StrEnum): + MILLISECONDS = "MILLISECONDS" + SECONDS = "SECONDS" + MICROSECONDS = "MICROSECONDS" + NANOSECONDS = "NANOSECONDS" + + +class IncludeExecutionDataOption(StrEnum): + ALL = "ALL" + + +class KinesisStreamStartPosition(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + AT_TIMESTAMP = "AT_TIMESTAMP" + + +class LaunchType(StrEnum): + EC2 = "EC2" + FARGATE = "FARGATE" + EXTERNAL = "EXTERNAL" + + +class LogLevel(StrEnum): + OFF = "OFF" + ERROR = "ERROR" + INFO = "INFO" + TRACE = "TRACE" + + +class MSKStartPosition(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + + +class MeasureValueType(StrEnum): + DOUBLE = "DOUBLE" + BIGINT = "BIGINT" + VARCHAR = "VARCHAR" + BOOLEAN = "BOOLEAN" + TIMESTAMP = "TIMESTAMP" + + +class OnPartialBatchItemFailureStreams(StrEnum): + AUTOMATIC_BISECT = "AUTOMATIC_BISECT" + + +class PipeState(StrEnum): + RUNNING = "RUNNING" + STOPPED = "STOPPED" + CREATING = "CREATING" + UPDATING = "UPDATING" + DELETING = "DELETING" + STARTING = "STARTING" + STOPPING = "STOPPING" + CREATE_FAILED = "CREATE_FAILED" + UPDATE_FAILED = "UPDATE_FAILED" + START_FAILED = "START_FAILED" + STOP_FAILED = "STOP_FAILED" + DELETE_FAILED = "DELETE_FAILED" + CREATE_ROLLBACK_FAILED = "CREATE_ROLLBACK_FAILED" + DELETE_ROLLBACK_FAILED = "DELETE_ROLLBACK_FAILED" + UPDATE_ROLLBACK_FAILED = "UPDATE_ROLLBACK_FAILED" + + +class PipeTargetInvocationType(StrEnum): + REQUEST_RESPONSE = "REQUEST_RESPONSE" + FIRE_AND_FORGET = "FIRE_AND_FORGET" + + +class PlacementConstraintType(StrEnum): + distinctInstance = "distinctInstance" + memberOf = "memberOf" + + +class PlacementStrategyType(StrEnum): + random = "random" + spread = "spread" + binpack = "binpack" + + +class PropagateTags(StrEnum): + TASK_DEFINITION = "TASK_DEFINITION" + + +class RequestedPipeState(StrEnum): + RUNNING = "RUNNING" + STOPPED = "STOPPED" + + +class RequestedPipeStateDescribeResponse(StrEnum): + RUNNING = "RUNNING" + STOPPED = "STOPPED" + DELETED = "DELETED" + + +class S3OutputFormat(StrEnum): + json = "json" + plain = "plain" + w3c = "w3c" + + +class SelfManagedKafkaStartPosition(StrEnum): + TRIM_HORIZON = "TRIM_HORIZON" + LATEST = "LATEST" + + +class TimeFieldType(StrEnum): + EPOCH = "EPOCH" + TIMESTAMP_FORMAT = "TIMESTAMP_FORMAT" + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = True + status_code: int = 409 + resourceId: String + resourceType: String + + +class InternalException(ServiceException): + code: str = "InternalException" + sender_fault: bool = False + status_code: int = 500 + retryAfterSeconds: Optional[Integer] + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = True + status_code: int = 402 + resourceId: String + resourceType: String + serviceCode: String + quotaCode: String + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = True + status_code: int = 429 + serviceCode: Optional[String] + quotaCode: Optional[String] + retryAfterSeconds: Optional[Integer] + + +class ValidationExceptionField(TypedDict, total=False): + name: String + message: ErrorMessage + + +ValidationExceptionFieldList = List[ValidationExceptionField] + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = True + status_code: int = 400 + fieldList: Optional[ValidationExceptionFieldList] + + +SecurityGroups = List[SecurityGroup] +Subnets = List[Subnet] + + +class AwsVpcConfiguration(TypedDict, total=False): + Subnets: Subnets + SecurityGroups: Optional[SecurityGroups] + AssignPublicIp: Optional[AssignPublicIp] + + +class BatchArrayProperties(TypedDict, total=False): + Size: Optional[BatchArraySize] + + +class BatchResourceRequirement(TypedDict, total=False): + Type: BatchResourceRequirementType + Value: String + + +BatchResourceRequirementsList = List[BatchResourceRequirement] + + +class BatchEnvironmentVariable(TypedDict, total=False): + Name: Optional[String] + Value: Optional[String] + + +BatchEnvironmentVariableList = List[BatchEnvironmentVariable] +StringList = List[String] + + +class BatchContainerOverrides(TypedDict, total=False): + Command: Optional[StringList] + Environment: Optional[BatchEnvironmentVariableList] + InstanceType: Optional[String] + ResourceRequirements: Optional[BatchResourceRequirementsList] + + +class BatchJobDependency(TypedDict, total=False): + JobId: Optional[String] + Type: Optional[BatchJobDependencyType] + + +BatchDependsOn = List[BatchJobDependency] +BatchParametersMap = Dict[String, String] + + +class BatchRetryStrategy(TypedDict, total=False): + Attempts: Optional[BatchRetryAttempts] + + +class CapacityProviderStrategyItem(TypedDict, total=False): + capacityProvider: CapacityProvider + weight: Optional[CapacityProviderStrategyItemWeight] + base: Optional[CapacityProviderStrategyItemBase] + + +CapacityProviderStrategy = List[CapacityProviderStrategyItem] + + +class CloudwatchLogsLogDestination(TypedDict, total=False): + LogGroupArn: Optional[CloudwatchLogGroupArn] + + +class CloudwatchLogsLogDestinationParameters(TypedDict, total=False): + LogGroupArn: CloudwatchLogGroupArn + + +IncludeExecutionData = List[IncludeExecutionDataOption] + + +class FirehoseLogDestinationParameters(TypedDict, total=False): + DeliveryStreamArn: FirehoseArn + + +class S3LogDestinationParameters(TypedDict, total=False): + BucketName: S3LogDestinationParametersBucketNameString + BucketOwner: S3LogDestinationParametersBucketOwnerString + OutputFormat: Optional[S3OutputFormat] + Prefix: Optional[S3LogDestinationParametersPrefixString] + + +class PipeLogConfigurationParameters(TypedDict, total=False): + S3LogDestination: Optional[S3LogDestinationParameters] + FirehoseLogDestination: Optional[FirehoseLogDestinationParameters] + CloudwatchLogsLogDestination: Optional[CloudwatchLogsLogDestinationParameters] + Level: LogLevel + IncludeExecutionData: Optional[IncludeExecutionData] + + +TagMap = Dict[TagKey, TagValue] + + +class MultiMeasureAttributeMapping(TypedDict, total=False): + MeasureValue: MeasureValue + MeasureValueType: MeasureValueType + MultiMeasureAttributeName: MultiMeasureAttributeName + + +MultiMeasureAttributeMappings = List[MultiMeasureAttributeMapping] + + +class MultiMeasureMapping(TypedDict, total=False): + MultiMeasureName: MultiMeasureName + MultiMeasureAttributeMappings: MultiMeasureAttributeMappings + + +MultiMeasureMappings = List[MultiMeasureMapping] + + +class SingleMeasureMapping(TypedDict, total=False): + MeasureValue: MeasureValue + MeasureValueType: MeasureValueType + MeasureName: MeasureName + + +SingleMeasureMappings = List[SingleMeasureMapping] + + +class DimensionMapping(TypedDict, total=False): + DimensionValue: DimensionValue + DimensionValueType: DimensionValueType + DimensionName: DimensionName + + +DimensionMappings = List[DimensionMapping] + + +class PipeTargetTimestreamParameters(TypedDict, total=False): + TimeValue: TimeValue + EpochTimeUnit: Optional[EpochTimeUnit] + TimeFieldType: Optional[TimeFieldType] + TimestampFormat: Optional[TimestampFormat] + VersionValue: VersionValue + DimensionMappings: DimensionMappings + SingleMeasureMappings: Optional[SingleMeasureMappings] + MultiMeasureMappings: Optional[MultiMeasureMappings] + + +class PipeTargetCloudWatchLogsParameters(TypedDict, total=False): + LogStreamName: Optional[LogStreamName] + Timestamp: Optional[JsonPath] + + +EventBridgeEventResourceList = List[ArnOrJsonPath] + + +class PipeTargetEventBridgeEventBusParameters(TypedDict, total=False): + EndpointId: Optional[EventBridgeEndpointId] + DetailType: Optional[EventBridgeDetailType] + Source: Optional[EventBridgeEventSource] + Resources: Optional[EventBridgeEventResourceList] + Time: Optional[JsonPath] + + +class SageMakerPipelineParameter(TypedDict, total=False): + Name: SageMakerPipelineParameterName + Value: SageMakerPipelineParameterValue + + +SageMakerPipelineParameterList = List[SageMakerPipelineParameter] + + +class PipeTargetSageMakerPipelineParameters(TypedDict, total=False): + PipelineParameterList: Optional[SageMakerPipelineParameterList] + + +Sqls = List[Sql] + + +class PipeTargetRedshiftDataParameters(TypedDict, total=False): + SecretManagerArn: Optional[SecretManagerArnOrJsonPath] + Database: Database + DbUser: Optional[DbUser] + StatementName: Optional[StatementName] + WithEvent: Optional[Boolean] + Sqls: Sqls + + +QueryStringParametersMap = Dict[QueryStringKey, QueryStringValue] +HeaderParametersMap = Dict[HeaderKey, HeaderValue] +PathParameterList = List[PathParameter] + + +class PipeTargetHttpParameters(TypedDict, total=False): + PathParameterValues: Optional[PathParameterList] + HeaderParameters: Optional[HeaderParametersMap] + QueryStringParameters: Optional[QueryStringParametersMap] + + +class PipeTargetSqsQueueParameters(TypedDict, total=False): + MessageGroupId: Optional[MessageGroupId] + MessageDeduplicationId: Optional[MessageDeduplicationId] + + +class PipeTargetBatchJobParameters(TypedDict, total=False): + JobDefinition: String + JobName: String + ArrayProperties: Optional[BatchArrayProperties] + RetryStrategy: Optional[BatchRetryStrategy] + ContainerOverrides: Optional[BatchContainerOverrides] + DependsOn: Optional[BatchDependsOn] + Parameters: Optional[BatchParametersMap] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class EcsInferenceAcceleratorOverride(TypedDict, total=False): + deviceName: Optional[String] + deviceType: Optional[String] + + +EcsInferenceAcceleratorOverrideList = List[EcsInferenceAcceleratorOverride] + + +class EcsEphemeralStorage(TypedDict, total=False): + sizeInGiB: EphemeralStorageSize + + +EcsResourceRequirement = TypedDict( + "EcsResourceRequirement", + { + "type": EcsResourceRequirementType, + "value": String, + }, + total=False, +) +EcsResourceRequirementsList = List[EcsResourceRequirement] +EcsEnvironmentFile = TypedDict( + "EcsEnvironmentFile", + { + "type": EcsEnvironmentFileType, + "value": String, + }, + total=False, +) +EcsEnvironmentFileList = List[EcsEnvironmentFile] + + +class EcsEnvironmentVariable(TypedDict, total=False): + name: Optional[String] + value: Optional[String] + + +EcsEnvironmentVariableList = List[EcsEnvironmentVariable] + + +class EcsContainerOverride(TypedDict, total=False): + Command: Optional[StringList] + Cpu: Optional[Integer] + Environment: Optional[EcsEnvironmentVariableList] + EnvironmentFiles: Optional[EcsEnvironmentFileList] + Memory: Optional[Integer] + MemoryReservation: Optional[Integer] + Name: Optional[String] + ResourceRequirements: Optional[EcsResourceRequirementsList] + + +EcsContainerOverrideList = List[EcsContainerOverride] + + +class EcsTaskOverride(TypedDict, total=False): + ContainerOverrides: Optional[EcsContainerOverrideList] + Cpu: Optional[String] + EphemeralStorage: Optional[EcsEphemeralStorage] + ExecutionRoleArn: Optional[ArnOrJsonPath] + InferenceAcceleratorOverrides: Optional[EcsInferenceAcceleratorOverrideList] + Memory: Optional[String] + TaskRoleArn: Optional[ArnOrJsonPath] + + +PlacementStrategy = TypedDict( + "PlacementStrategy", + { + "type": Optional[PlacementStrategyType], + "field": Optional[PlacementStrategyField], + }, + total=False, +) +PlacementStrategies = List[PlacementStrategy] +PlacementConstraint = TypedDict( + "PlacementConstraint", + { + "type": Optional[PlacementConstraintType], + "expression": Optional[PlacementConstraintExpression], + }, + total=False, +) +PlacementConstraints = List[PlacementConstraint] + + +class NetworkConfiguration(TypedDict, total=False): + awsvpcConfiguration: Optional[AwsVpcConfiguration] + + +class PipeTargetEcsTaskParameters(TypedDict, total=False): + TaskDefinitionArn: ArnOrJsonPath + TaskCount: Optional[LimitMin1] + LaunchType: Optional[LaunchType] + NetworkConfiguration: Optional[NetworkConfiguration] + PlatformVersion: Optional[String] + Group: Optional[String] + CapacityProviderStrategy: Optional[CapacityProviderStrategy] + EnableECSManagedTags: Optional[Boolean] + EnableExecuteCommand: Optional[Boolean] + PlacementConstraints: Optional[PlacementConstraints] + PlacementStrategy: Optional[PlacementStrategies] + PropagateTags: Optional[PropagateTags] + ReferenceId: Optional[ReferenceId] + Overrides: Optional[EcsTaskOverride] + Tags: Optional[TagList] + + +class PipeTargetKinesisStreamParameters(TypedDict, total=False): + PartitionKey: KinesisPartitionKey + + +class PipeTargetStateMachineParameters(TypedDict, total=False): + InvocationType: Optional[PipeTargetInvocationType] + + +class PipeTargetLambdaFunctionParameters(TypedDict, total=False): + InvocationType: Optional[PipeTargetInvocationType] + + +class PipeTargetParameters(TypedDict, total=False): + InputTemplate: Optional[InputTemplate] + LambdaFunctionParameters: Optional[PipeTargetLambdaFunctionParameters] + StepFunctionStateMachineParameters: Optional[PipeTargetStateMachineParameters] + KinesisStreamParameters: Optional[PipeTargetKinesisStreamParameters] + EcsTaskParameters: Optional[PipeTargetEcsTaskParameters] + BatchJobParameters: Optional[PipeTargetBatchJobParameters] + SqsQueueParameters: Optional[PipeTargetSqsQueueParameters] + HttpParameters: Optional[PipeTargetHttpParameters] + RedshiftDataParameters: Optional[PipeTargetRedshiftDataParameters] + SageMakerPipelineParameters: Optional[PipeTargetSageMakerPipelineParameters] + EventBridgeEventBusParameters: Optional[PipeTargetEventBridgeEventBusParameters] + CloudWatchLogsParameters: Optional[PipeTargetCloudWatchLogsParameters] + TimestreamParameters: Optional[PipeTargetTimestreamParameters] + + +class PipeEnrichmentHttpParameters(TypedDict, total=False): + PathParameterValues: Optional[PathParameterList] + HeaderParameters: Optional[HeaderParametersMap] + QueryStringParameters: Optional[QueryStringParametersMap] + + +class PipeEnrichmentParameters(TypedDict, total=False): + InputTemplate: Optional[InputTemplate] + HttpParameters: Optional[PipeEnrichmentHttpParameters] + + +SecurityGroupIds = List[SecurityGroupId] +SubnetIds = List[SubnetId] + + +class SelfManagedKafkaAccessConfigurationVpc(TypedDict, total=False): + Subnets: Optional[SubnetIds] + SecurityGroup: Optional[SecurityGroupIds] + + +class SelfManagedKafkaAccessConfigurationCredentials(TypedDict, total=False): + BasicAuth: Optional[SecretManagerArn] + SaslScram512Auth: Optional[SecretManagerArn] + SaslScram256Auth: Optional[SecretManagerArn] + ClientCertificateTlsAuth: Optional[SecretManagerArn] + + +KafkaBootstrapServers = List[EndpointString] + + +class PipeSourceSelfManagedKafkaParameters(TypedDict, total=False): + TopicName: KafkaTopicName + StartingPosition: Optional[SelfManagedKafkaStartPosition] + AdditionalBootstrapServers: Optional[KafkaBootstrapServers] + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + ConsumerGroupID: Optional[URI] + Credentials: Optional[SelfManagedKafkaAccessConfigurationCredentials] + ServerRootCaCertificate: Optional[SecretManagerArn] + Vpc: Optional[SelfManagedKafkaAccessConfigurationVpc] + + +class MSKAccessCredentials(TypedDict, total=False): + SaslScram512Auth: Optional[SecretManagerArn] + ClientCertificateTlsAuth: Optional[SecretManagerArn] + + +class PipeSourceManagedStreamingKafkaParameters(TypedDict, total=False): + TopicName: KafkaTopicName + StartingPosition: Optional[MSKStartPosition] + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + ConsumerGroupID: Optional[URI] + Credentials: Optional[MSKAccessCredentials] + + +class MQBrokerAccessCredentials(TypedDict, total=False): + BasicAuth: Optional[SecretManagerArn] + + +class PipeSourceRabbitMQBrokerParameters(TypedDict, total=False): + Credentials: MQBrokerAccessCredentials + QueueName: MQBrokerQueueName + VirtualHost: Optional[URI] + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class PipeSourceActiveMQBrokerParameters(TypedDict, total=False): + Credentials: MQBrokerAccessCredentials + QueueName: MQBrokerQueueName + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class PipeSourceSqsQueueParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class DeadLetterConfig(TypedDict, total=False): + Arn: Optional[Arn] + + +class PipeSourceDynamoDBStreamParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + DeadLetterConfig: Optional[DeadLetterConfig] + OnPartialBatchItemFailure: Optional[OnPartialBatchItemFailureStreams] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsESM] + ParallelizationFactor: Optional[LimitMax10] + StartingPosition: DynamoDBStreamStartPosition + + +Timestamp = datetime + + +class PipeSourceKinesisStreamParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + DeadLetterConfig: Optional[DeadLetterConfig] + OnPartialBatchItemFailure: Optional[OnPartialBatchItemFailureStreams] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsESM] + ParallelizationFactor: Optional[LimitMax10] + StartingPosition: KinesisStreamStartPosition + StartingPositionTimestamp: Optional[Timestamp] + + +class Filter(TypedDict, total=False): + Pattern: Optional[EventPattern] + + +FilterList = List[Filter] + + +class FilterCriteria(TypedDict, total=False): + Filters: Optional[FilterList] + + +class PipeSourceParameters(TypedDict, total=False): + FilterCriteria: Optional[FilterCriteria] + KinesisStreamParameters: Optional[PipeSourceKinesisStreamParameters] + DynamoDBStreamParameters: Optional[PipeSourceDynamoDBStreamParameters] + SqsQueueParameters: Optional[PipeSourceSqsQueueParameters] + ActiveMQBrokerParameters: Optional[PipeSourceActiveMQBrokerParameters] + RabbitMQBrokerParameters: Optional[PipeSourceRabbitMQBrokerParameters] + ManagedStreamingKafkaParameters: Optional[PipeSourceManagedStreamingKafkaParameters] + SelfManagedKafkaParameters: Optional[PipeSourceSelfManagedKafkaParameters] + + +class CreatePipeRequest(ServiceRequest): + Name: PipeName + Description: Optional[PipeDescription] + DesiredState: Optional[RequestedPipeState] + Source: ArnOrUrl + SourceParameters: Optional[PipeSourceParameters] + Enrichment: Optional[OptionalArn] + EnrichmentParameters: Optional[PipeEnrichmentParameters] + Target: Arn + TargetParameters: Optional[PipeTargetParameters] + RoleArn: RoleArn + Tags: Optional[TagMap] + LogConfiguration: Optional[PipeLogConfigurationParameters] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class CreatePipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class DeletePipeRequest(ServiceRequest): + Name: PipeName + + +class DeletePipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + DesiredState: Optional[RequestedPipeStateDescribeResponse] + CurrentState: Optional[PipeState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class DescribePipeRequest(ServiceRequest): + Name: PipeName + + +class FirehoseLogDestination(TypedDict, total=False): + DeliveryStreamArn: Optional[FirehoseArn] + + +class S3LogDestination(TypedDict, total=False): + BucketName: Optional[String] + Prefix: Optional[String] + BucketOwner: Optional[String] + OutputFormat: Optional[S3OutputFormat] + + +class PipeLogConfiguration(TypedDict, total=False): + S3LogDestination: Optional[S3LogDestination] + FirehoseLogDestination: Optional[FirehoseLogDestination] + CloudwatchLogsLogDestination: Optional[CloudwatchLogsLogDestination] + Level: Optional[LogLevel] + IncludeExecutionData: Optional[IncludeExecutionData] + + +class DescribePipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + Description: Optional[PipeDescription] + DesiredState: Optional[RequestedPipeStateDescribeResponse] + CurrentState: Optional[PipeState] + StateReason: Optional[PipeStateReason] + Source: Optional[ArnOrUrl] + SourceParameters: Optional[PipeSourceParameters] + Enrichment: Optional[OptionalArn] + EnrichmentParameters: Optional[PipeEnrichmentParameters] + Target: Optional[Arn] + TargetParameters: Optional[PipeTargetParameters] + RoleArn: Optional[RoleArn] + Tags: Optional[TagMap] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + LogConfiguration: Optional[PipeLogConfiguration] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class ListPipesRequest(ServiceRequest): + NamePrefix: Optional[PipeName] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + SourcePrefix: Optional[ResourceArn] + TargetPrefix: Optional[ResourceArn] + NextToken: Optional[NextToken] + Limit: Optional[LimitMax100] + + +class Pipe(TypedDict, total=False): + Name: Optional[PipeName] + Arn: Optional[PipeArn] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + StateReason: Optional[PipeStateReason] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + Source: Optional[ArnOrUrl] + Target: Optional[Arn] + Enrichment: Optional[OptionalArn] + + +PipeList = List[Pipe] + + +class ListPipesResponse(TypedDict, total=False): + Pipes: Optional[PipeList] + NextToken: Optional[NextToken] + + +class ListTagsForResourceRequest(ServiceRequest): + resourceArn: PipeArn + + +class ListTagsForResourceResponse(TypedDict, total=False): + tags: Optional[TagMap] + + +class StartPipeRequest(ServiceRequest): + Name: PipeName + + +class StartPipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class StopPipeRequest(ServiceRequest): + Name: PipeName + + +class StopPipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + resourceArn: PipeArn + tags: TagMap + + +class TagResourceResponse(TypedDict, total=False): + pass + + +class UntagResourceRequest(ServiceRequest): + resourceArn: PipeArn + tagKeys: TagKeyList + + +class UntagResourceResponse(TypedDict, total=False): + pass + + +class UpdatePipeSourceSelfManagedKafkaParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + Credentials: Optional[SelfManagedKafkaAccessConfigurationCredentials] + ServerRootCaCertificate: Optional[SecretManagerArn] + Vpc: Optional[SelfManagedKafkaAccessConfigurationVpc] + + +class UpdatePipeSourceManagedStreamingKafkaParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + Credentials: Optional[MSKAccessCredentials] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class UpdatePipeSourceRabbitMQBrokerParameters(TypedDict, total=False): + Credentials: MQBrokerAccessCredentials + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class UpdatePipeSourceActiveMQBrokerParameters(TypedDict, total=False): + Credentials: MQBrokerAccessCredentials + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class UpdatePipeSourceSqsQueueParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + + +class UpdatePipeSourceDynamoDBStreamParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + DeadLetterConfig: Optional[DeadLetterConfig] + OnPartialBatchItemFailure: Optional[OnPartialBatchItemFailureStreams] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsESM] + ParallelizationFactor: Optional[LimitMax10] + + +class UpdatePipeSourceKinesisStreamParameters(TypedDict, total=False): + BatchSize: Optional[LimitMax10000] + DeadLetterConfig: Optional[DeadLetterConfig] + OnPartialBatchItemFailure: Optional[OnPartialBatchItemFailureStreams] + MaximumBatchingWindowInSeconds: Optional[MaximumBatchingWindowInSeconds] + MaximumRecordAgeInSeconds: Optional[MaximumRecordAgeInSeconds] + MaximumRetryAttempts: Optional[MaximumRetryAttemptsESM] + ParallelizationFactor: Optional[LimitMax10] + + +class UpdatePipeSourceParameters(TypedDict, total=False): + FilterCriteria: Optional[FilterCriteria] + KinesisStreamParameters: Optional[UpdatePipeSourceKinesisStreamParameters] + DynamoDBStreamParameters: Optional[UpdatePipeSourceDynamoDBStreamParameters] + SqsQueueParameters: Optional[UpdatePipeSourceSqsQueueParameters] + ActiveMQBrokerParameters: Optional[UpdatePipeSourceActiveMQBrokerParameters] + RabbitMQBrokerParameters: Optional[UpdatePipeSourceRabbitMQBrokerParameters] + ManagedStreamingKafkaParameters: Optional[UpdatePipeSourceManagedStreamingKafkaParameters] + SelfManagedKafkaParameters: Optional[UpdatePipeSourceSelfManagedKafkaParameters] + + +class UpdatePipeRequest(ServiceRequest): + Name: PipeName + Description: Optional[PipeDescription] + DesiredState: Optional[RequestedPipeState] + SourceParameters: Optional[UpdatePipeSourceParameters] + Enrichment: Optional[OptionalArn] + EnrichmentParameters: Optional[PipeEnrichmentParameters] + Target: Optional[Arn] + TargetParameters: Optional[PipeTargetParameters] + RoleArn: RoleArn + LogConfiguration: Optional[PipeLogConfigurationParameters] + KmsKeyIdentifier: Optional[KmsKeyIdentifier] + + +class UpdatePipeResponse(TypedDict, total=False): + Arn: Optional[PipeArn] + Name: Optional[PipeName] + DesiredState: Optional[RequestedPipeState] + CurrentState: Optional[PipeState] + CreationTime: Optional[Timestamp] + LastModifiedTime: Optional[Timestamp] + + +class PipesApi: + service = "pipes" + version = "2015-10-07" + + @handler("CreatePipe") + def create_pipe( + self, + context: RequestContext, + name: PipeName, + source: ArnOrUrl, + target: Arn, + role_arn: RoleArn, + description: PipeDescription | None = None, + desired_state: RequestedPipeState | None = None, + source_parameters: PipeSourceParameters | None = None, + enrichment: OptionalArn | None = None, + enrichment_parameters: PipeEnrichmentParameters | None = None, + target_parameters: PipeTargetParameters | None = None, + tags: TagMap | None = None, + log_configuration: PipeLogConfigurationParameters | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> CreatePipeResponse: + raise NotImplementedError + + @handler("DeletePipe") + def delete_pipe(self, context: RequestContext, name: PipeName, **kwargs) -> DeletePipeResponse: + raise NotImplementedError + + @handler("DescribePipe") + def describe_pipe( + self, context: RequestContext, name: PipeName, **kwargs + ) -> DescribePipeResponse: + raise NotImplementedError + + @handler("ListPipes") + def list_pipes( + self, + context: RequestContext, + name_prefix: PipeName | None = None, + desired_state: RequestedPipeState | None = None, + current_state: PipeState | None = None, + source_prefix: ResourceArn | None = None, + target_prefix: ResourceArn | None = None, + next_token: NextToken | None = None, + limit: LimitMax100 | None = None, + **kwargs, + ) -> ListPipesResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: PipeArn, **kwargs + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("StartPipe") + def start_pipe(self, context: RequestContext, name: PipeName, **kwargs) -> StartPipeResponse: + raise NotImplementedError + + @handler("StopPipe") + def stop_pipe(self, context: RequestContext, name: PipeName, **kwargs) -> StopPipeResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: PipeArn, tags: TagMap, **kwargs + ) -> TagResourceResponse: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: PipeArn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceResponse: + raise NotImplementedError + + @handler("UpdatePipe") + def update_pipe( + self, + context: RequestContext, + name: PipeName, + role_arn: RoleArn, + description: PipeDescription | None = None, + desired_state: RequestedPipeState | None = None, + source_parameters: UpdatePipeSourceParameters | None = None, + enrichment: OptionalArn | None = None, + enrichment_parameters: PipeEnrichmentParameters | None = None, + target: Arn | None = None, + target_parameters: PipeTargetParameters | None = None, + log_configuration: PipeLogConfigurationParameters | None = None, + kms_key_identifier: KmsKeyIdentifier | None = None, + **kwargs, + ) -> UpdatePipeResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/redshift/__init__.py b/localstack-core/localstack/aws/api/redshift/__init__.py new file mode 100644 index 0000000000000..1bcc3ad7816ad --- /dev/null +++ b/localstack-core/localstack/aws/api/redshift/__init__.py @@ -0,0 +1,5186 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AuthenticationProfileNameString = str +Boolean = bool +BooleanOptional = bool +CustomDomainCertificateArnString = str +CustomDomainNameString = str +Description = str +Double = float +DoubleOptional = float +IdcDisplayNameString = str +IdentityNamespaceString = str +InboundIntegrationArn = str +Integer = int +IntegerOptional = int +IntegrationArn = str +IntegrationDescription = str +IntegrationName = str +PartnerIntegrationAccountId = str +PartnerIntegrationClusterIdentifier = str +PartnerIntegrationDatabaseName = str +PartnerIntegrationPartnerName = str +PartnerIntegrationStatusMessage = str +RedshiftIdcApplicationName = str +S3KeyPrefixValue = str +SensitiveString = str +SourceArn = str +String = str +TargetArn = str + + +class ActionType(StrEnum): + restore_cluster = "restore-cluster" + recommend_node_config = "recommend-node-config" + resize_cluster = "resize-cluster" + + +class AquaConfigurationStatus(StrEnum): + enabled = "enabled" + disabled = "disabled" + auto = "auto" + + +class AquaStatus(StrEnum): + enabled = "enabled" + disabled = "disabled" + applying = "applying" + + +class AuthorizationStatus(StrEnum): + Authorized = "Authorized" + Revoking = "Revoking" + + +class DataShareStatus(StrEnum): + ACTIVE = "ACTIVE" + PENDING_AUTHORIZATION = "PENDING_AUTHORIZATION" + AUTHORIZED = "AUTHORIZED" + DEAUTHORIZED = "DEAUTHORIZED" + REJECTED = "REJECTED" + AVAILABLE = "AVAILABLE" + + +class DataShareStatusForConsumer(StrEnum): + ACTIVE = "ACTIVE" + AVAILABLE = "AVAILABLE" + + +class DataShareStatusForProducer(StrEnum): + ACTIVE = "ACTIVE" + AUTHORIZED = "AUTHORIZED" + PENDING_AUTHORIZATION = "PENDING_AUTHORIZATION" + DEAUTHORIZED = "DEAUTHORIZED" + REJECTED = "REJECTED" + + +class DataShareType(StrEnum): + INTERNAL = "INTERNAL" + + +class DescribeIntegrationsFilterName(StrEnum): + integration_arn = "integration-arn" + source_arn = "source-arn" + source_types = "source-types" + status = "status" + + +class ImpactRankingType(StrEnum): + HIGH = "HIGH" + MEDIUM = "MEDIUM" + LOW = "LOW" + + +class LogDestinationType(StrEnum): + s3 = "s3" + cloudwatch = "cloudwatch" + + +class Mode(StrEnum): + standard = "standard" + high_performance = "high-performance" + + +class NamespaceRegistrationStatus(StrEnum): + Registering = "Registering" + Deregistering = "Deregistering" + + +class NodeConfigurationOptionsFilterName(StrEnum): + NodeType = "NodeType" + NumberOfNodes = "NumberOfNodes" + EstimatedDiskUtilizationPercent = "EstimatedDiskUtilizationPercent" + Mode = "Mode" + + +class OperatorType(StrEnum): + eq = "eq" + lt = "lt" + gt = "gt" + le = "le" + ge = "ge" + in_ = "in" + between = "between" + + +class ParameterApplyType(StrEnum): + static = "static" + dynamic = "dynamic" + + +class PartnerIntegrationStatus(StrEnum): + Active = "Active" + Inactive = "Inactive" + RuntimeFailure = "RuntimeFailure" + ConnectionFailure = "ConnectionFailure" + + +class RecommendedActionType(StrEnum): + SQL = "SQL" + CLI = "CLI" + + +class ReservedNodeExchangeActionType(StrEnum): + restore_cluster = "restore-cluster" + resize_cluster = "resize-cluster" + + +class ReservedNodeExchangeStatusType(StrEnum): + REQUESTED = "REQUESTED" + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + RETRYING = "RETRYING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + + +class ReservedNodeOfferingType(StrEnum): + Regular = "Regular" + Upgradable = "Upgradable" + + +class ScheduleState(StrEnum): + MODIFYING = "MODIFYING" + ACTIVE = "ACTIVE" + FAILED = "FAILED" + + +class ScheduledActionFilterName(StrEnum): + cluster_identifier = "cluster-identifier" + iam_role = "iam-role" + + +class ScheduledActionState(StrEnum): + ACTIVE = "ACTIVE" + DISABLED = "DISABLED" + + +class ScheduledActionTypeValues(StrEnum): + ResizeCluster = "ResizeCluster" + PauseCluster = "PauseCluster" + ResumeCluster = "ResumeCluster" + + +class ServiceAuthorization(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class SnapshotAttributeToSortBy(StrEnum): + SOURCE_TYPE = "SOURCE_TYPE" + TOTAL_SIZE = "TOTAL_SIZE" + CREATE_TIME = "CREATE_TIME" + + +class SortByOrder(StrEnum): + ASC = "ASC" + DESC = "DESC" + + +class SourceType(StrEnum): + cluster = "cluster" + cluster_parameter_group = "cluster-parameter-group" + cluster_security_group = "cluster-security-group" + cluster_snapshot = "cluster-snapshot" + scheduled_action = "scheduled-action" + + +class TableRestoreStatusType(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + CANCELED = "CANCELED" + + +class UsageLimitBreachAction(StrEnum): + log = "log" + emit_metric = "emit-metric" + disable = "disable" + + +class UsageLimitFeatureType(StrEnum): + spectrum = "spectrum" + concurrency_scaling = "concurrency-scaling" + cross_region_datasharing = "cross-region-datasharing" + + +class UsageLimitLimitType(StrEnum): + time = "time" + data_scanned = "data-scanned" + + +class UsageLimitPeriod(StrEnum): + daily = "daily" + weekly = "weekly" + monthly = "monthly" + + +class ZeroETLIntegrationStatus(StrEnum): + creating = "creating" + active = "active" + modifying = "modifying" + failed = "failed" + deleting = "deleting" + syncing = "syncing" + needs_attention = "needs_attention" + + +class AccessToClusterDeniedFault(ServiceException): + code: str = "AccessToClusterDenied" + sender_fault: bool = True + status_code: int = 400 + + +class AccessToSnapshotDeniedFault(ServiceException): + code: str = "AccessToSnapshotDenied" + sender_fault: bool = True + status_code: int = 400 + + +class AuthenticationProfileAlreadyExistsFault(ServiceException): + code: str = "AuthenticationProfileAlreadyExistsFault" + sender_fault: bool = True + status_code: int = 400 + + +class AuthenticationProfileNotFoundFault(ServiceException): + code: str = "AuthenticationProfileNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class AuthenticationProfileQuotaExceededFault(ServiceException): + code: str = "AuthenticationProfileQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class AuthorizationAlreadyExistsFault(ServiceException): + code: str = "AuthorizationAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class AuthorizationNotFoundFault(ServiceException): + code: str = "AuthorizationNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class AuthorizationQuotaExceededFault(ServiceException): + code: str = "AuthorizationQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class BatchDeleteRequestSizeExceededFault(ServiceException): + code: str = "BatchDeleteRequestSizeExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class BatchModifyClusterSnapshotsLimitExceededFault(ServiceException): + code: str = "BatchModifyClusterSnapshotsLimitExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class BucketNotFoundFault(ServiceException): + code: str = "BucketNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterAlreadyExistsFault(ServiceException): + code: str = "ClusterAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterNotFoundFault(ServiceException): + code: str = "ClusterNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ClusterOnLatestRevisionFault(ServiceException): + code: str = "ClusterOnLatestRevision" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterParameterGroupAlreadyExistsFault(ServiceException): + code: str = "ClusterParameterGroupAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterParameterGroupNotFoundFault(ServiceException): + code: str = "ClusterParameterGroupNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ClusterParameterGroupQuotaExceededFault(ServiceException): + code: str = "ClusterParameterGroupQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterQuotaExceededFault(ServiceException): + code: str = "ClusterQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSecurityGroupAlreadyExistsFault(ServiceException): + code: str = "ClusterSecurityGroupAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSecurityGroupNotFoundFault(ServiceException): + code: str = "ClusterSecurityGroupNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ClusterSecurityGroupQuotaExceededFault(ServiceException): + code: str = "QuotaExceeded.ClusterSecurityGroup" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSnapshotAlreadyExistsFault(ServiceException): + code: str = "ClusterSnapshotAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSnapshotNotFoundFault(ServiceException): + code: str = "ClusterSnapshotNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ClusterSnapshotQuotaExceededFault(ServiceException): + code: str = "ClusterSnapshotQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSubnetGroupAlreadyExistsFault(ServiceException): + code: str = "ClusterSubnetGroupAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSubnetGroupNotFoundFault(ServiceException): + code: str = "ClusterSubnetGroupNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSubnetGroupQuotaExceededFault(ServiceException): + code: str = "ClusterSubnetGroupQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ClusterSubnetQuotaExceededFault(ServiceException): + code: str = "ClusterSubnetQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class ConflictPolicyUpdateFault(ServiceException): + code: str = "ConflictPolicyUpdateFault" + sender_fault: bool = True + status_code: int = 409 + + +class CopyToRegionDisabledFault(ServiceException): + code: str = "CopyToRegionDisabledFault" + sender_fault: bool = True + status_code: int = 400 + + +class CustomCnameAssociationFault(ServiceException): + code: str = "CustomCnameAssociationFault" + sender_fault: bool = True + status_code: int = 400 + + +class CustomDomainAssociationNotFoundFault(ServiceException): + code: str = "CustomDomainAssociationNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class DependentServiceAccessDeniedFault(ServiceException): + code: str = "DependentServiceAccessDenied" + sender_fault: bool = True + status_code: int = 403 + + +class DependentServiceRequestThrottlingFault(ServiceException): + code: str = "DependentServiceRequestThrottlingFault" + sender_fault: bool = True + status_code: int = 400 + + +class DependentServiceUnavailableFault(ServiceException): + code: str = "DependentServiceUnavailableFault" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointAlreadyExistsFault(ServiceException): + code: str = "EndpointAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointAuthorizationAlreadyExistsFault(ServiceException): + code: str = "EndpointAuthorizationAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointAuthorizationNotFoundFault(ServiceException): + code: str = "EndpointAuthorizationNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class EndpointAuthorizationsPerClusterLimitExceededFault(ServiceException): + code: str = "EndpointAuthorizationsPerClusterLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointNotFoundFault(ServiceException): + code: str = "EndpointNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class EndpointsPerAuthorizationLimitExceededFault(ServiceException): + code: str = "EndpointsPerAuthorizationLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointsPerClusterLimitExceededFault(ServiceException): + code: str = "EndpointsPerClusterLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class EventSubscriptionQuotaExceededFault(ServiceException): + code: str = "EventSubscriptionQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class HsmClientCertificateAlreadyExistsFault(ServiceException): + code: str = "HsmClientCertificateAlreadyExistsFault" + sender_fault: bool = True + status_code: int = 400 + + +class HsmClientCertificateNotFoundFault(ServiceException): + code: str = "HsmClientCertificateNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class HsmClientCertificateQuotaExceededFault(ServiceException): + code: str = "HsmClientCertificateQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class HsmConfigurationAlreadyExistsFault(ServiceException): + code: str = "HsmConfigurationAlreadyExistsFault" + sender_fault: bool = True + status_code: int = 400 + + +class HsmConfigurationNotFoundFault(ServiceException): + code: str = "HsmConfigurationNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class HsmConfigurationQuotaExceededFault(ServiceException): + code: str = "HsmConfigurationQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class InProgressTableRestoreQuotaExceededFault(ServiceException): + code: str = "InProgressTableRestoreQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class IncompatibleOrderableOptions(ServiceException): + code: str = "IncompatibleOrderableOptions" + sender_fault: bool = True + status_code: int = 400 + + +class InsufficientClusterCapacityFault(ServiceException): + code: str = "InsufficientClusterCapacity" + sender_fault: bool = True + status_code: int = 400 + + +class InsufficientS3BucketPolicyFault(ServiceException): + code: str = "InsufficientS3BucketPolicyFault" + sender_fault: bool = True + status_code: int = 400 + + +class IntegrationAlreadyExistsFault(ServiceException): + code: str = "IntegrationAlreadyExistsFault" + sender_fault: bool = True + status_code: int = 400 + + +class IntegrationConflictOperationFault(ServiceException): + code: str = "IntegrationConflictOperationFault" + sender_fault: bool = True + status_code: int = 400 + + +class IntegrationConflictStateFault(ServiceException): + code: str = "IntegrationConflictStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class IntegrationNotFoundFault(ServiceException): + code: str = "IntegrationNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class IntegrationQuotaExceededFault(ServiceException): + code: str = "IntegrationQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class IntegrationSourceNotFoundFault(ServiceException): + code: str = "IntegrationSourceNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class IntegrationTargetNotFoundFault(ServiceException): + code: str = "IntegrationTargetNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class InvalidAuthenticationProfileRequestFault(ServiceException): + code: str = "InvalidAuthenticationProfileRequestFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidAuthorizationStateFault(ServiceException): + code: str = "InvalidAuthorizationState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterParameterGroupStateFault(ServiceException): + code: str = "InvalidClusterParameterGroupState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterSecurityGroupStateFault(ServiceException): + code: str = "InvalidClusterSecurityGroupState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterSnapshotScheduleStateFault(ServiceException): + code: str = "InvalidClusterSnapshotScheduleState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterSnapshotStateFault(ServiceException): + code: str = "InvalidClusterSnapshotState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterStateFault(ServiceException): + code: str = "InvalidClusterState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterSubnetGroupStateFault(ServiceException): + code: str = "InvalidClusterSubnetGroupStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterSubnetStateFault(ServiceException): + code: str = "InvalidClusterSubnetStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidClusterTrackFault(ServiceException): + code: str = "InvalidClusterTrack" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidDataShareFault(ServiceException): + code: str = "InvalidDataShareFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidElasticIpFault(ServiceException): + code: str = "InvalidElasticIpFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidEndpointStateFault(ServiceException): + code: str = "InvalidEndpointState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidHsmClientCertificateStateFault(ServiceException): + code: str = "InvalidHsmClientCertificateStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidHsmConfigurationStateFault(ServiceException): + code: str = "InvalidHsmConfigurationStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidNamespaceFault(ServiceException): + code: str = "InvalidNamespaceFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidPolicyFault(ServiceException): + code: str = "InvalidPolicyFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidReservedNodeStateFault(ServiceException): + code: str = "InvalidReservedNodeState" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidRestoreFault(ServiceException): + code: str = "InvalidRestore" + sender_fault: bool = True + status_code: int = 406 + + +class InvalidRetentionPeriodFault(ServiceException): + code: str = "InvalidRetentionPeriodFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidS3BucketNameFault(ServiceException): + code: str = "InvalidS3BucketNameFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidS3KeyPrefixFault(ServiceException): + code: str = "InvalidS3KeyPrefixFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidScheduleFault(ServiceException): + code: str = "InvalidSchedule" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidScheduledActionFault(ServiceException): + code: str = "InvalidScheduledAction" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidSnapshotCopyGrantStateFault(ServiceException): + code: str = "InvalidSnapshotCopyGrantStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidSubnet(ServiceException): + code: str = "InvalidSubnet" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidSubscriptionStateFault(ServiceException): + code: str = "InvalidSubscriptionStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidTableRestoreArgumentFault(ServiceException): + code: str = "InvalidTableRestoreArgument" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidTagFault(ServiceException): + code: str = "InvalidTagFault" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidUsageLimitFault(ServiceException): + code: str = "InvalidUsageLimit" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidVPCNetworkStateFault(ServiceException): + code: str = "InvalidVPCNetworkStateFault" + sender_fault: bool = True + status_code: int = 400 + + +class Ipv6CidrBlockNotFoundFault(ServiceException): + code: str = "Ipv6CidrBlockNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededFault(ServiceException): + code: str = "LimitExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class NumberOfNodesPerClusterLimitExceededFault(ServiceException): + code: str = "NumberOfNodesPerClusterLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class NumberOfNodesQuotaExceededFault(ServiceException): + code: str = "NumberOfNodesQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class PartnerNotFoundFault(ServiceException): + code: str = "PartnerNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class RedshiftIdcApplicationAlreadyExistsFault(ServiceException): + code: str = "RedshiftIdcApplicationAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class RedshiftIdcApplicationNotExistsFault(ServiceException): + code: str = "RedshiftIdcApplicationNotExists" + sender_fault: bool = True + status_code: int = 404 + + +class RedshiftIdcApplicationQuotaExceededFault(ServiceException): + code: str = "RedshiftIdcApplicationQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ReservedNodeAlreadyExistsFault(ServiceException): + code: str = "ReservedNodeAlreadyExists" + sender_fault: bool = True + status_code: int = 404 + + +class ReservedNodeAlreadyMigratedFault(ServiceException): + code: str = "ReservedNodeAlreadyMigrated" + sender_fault: bool = True + status_code: int = 400 + + +class ReservedNodeExchangeNotFoundFault(ServiceException): + code: str = "ReservedNodeExchangeNotFond" + sender_fault: bool = True + status_code: int = 404 + + +class ReservedNodeNotFoundFault(ServiceException): + code: str = "ReservedNodeNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ReservedNodeOfferingNotFoundFault(ServiceException): + code: str = "ReservedNodeOfferingNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ReservedNodeQuotaExceededFault(ServiceException): + code: str = "ReservedNodeQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ResizeNotFoundFault(ServiceException): + code: str = "ResizeNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ResourceNotFoundFault(ServiceException): + code: str = "ResourceNotFoundFault" + sender_fault: bool = True + status_code: int = 404 + + +class SNSInvalidTopicFault(ServiceException): + code: str = "SNSInvalidTopic" + sender_fault: bool = True + status_code: int = 400 + + +class SNSNoAuthorizationFault(ServiceException): + code: str = "SNSNoAuthorization" + sender_fault: bool = True + status_code: int = 400 + + +class SNSTopicArnNotFoundFault(ServiceException): + code: str = "SNSTopicArnNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class ScheduleDefinitionTypeUnsupportedFault(ServiceException): + code: str = "ScheduleDefinitionTypeUnsupported" + sender_fault: bool = True + status_code: int = 400 + + +class ScheduledActionAlreadyExistsFault(ServiceException): + code: str = "ScheduledActionAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class ScheduledActionNotFoundFault(ServiceException): + code: str = "ScheduledActionNotFound" + sender_fault: bool = True + status_code: int = 400 + + +class ScheduledActionQuotaExceededFault(ServiceException): + code: str = "ScheduledActionQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class ScheduledActionTypeUnsupportedFault(ServiceException): + code: str = "ScheduledActionTypeUnsupported" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyAlreadyDisabledFault(ServiceException): + code: str = "SnapshotCopyAlreadyDisabledFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyAlreadyEnabledFault(ServiceException): + code: str = "SnapshotCopyAlreadyEnabledFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyDisabledFault(ServiceException): + code: str = "SnapshotCopyDisabledFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyGrantAlreadyExistsFault(ServiceException): + code: str = "SnapshotCopyGrantAlreadyExistsFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyGrantNotFoundFault(ServiceException): + code: str = "SnapshotCopyGrantNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotCopyGrantQuotaExceededFault(ServiceException): + code: str = "SnapshotCopyGrantQuotaExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotScheduleAlreadyExistsFault(ServiceException): + code: str = "SnapshotScheduleAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotScheduleNotFoundFault(ServiceException): + code: str = "SnapshotScheduleNotFound" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotScheduleQuotaExceededFault(ServiceException): + code: str = "SnapshotScheduleQuotaExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class SnapshotScheduleUpdateInProgressFault(ServiceException): + code: str = "SnapshotScheduleUpdateInProgress" + sender_fault: bool = True + status_code: int = 400 + + +class SourceNotFoundFault(ServiceException): + code: str = "SourceNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class SubnetAlreadyInUse(ServiceException): + code: str = "SubnetAlreadyInUse" + sender_fault: bool = True + status_code: int = 400 + + +class SubscriptionAlreadyExistFault(ServiceException): + code: str = "SubscriptionAlreadyExist" + sender_fault: bool = True + status_code: int = 400 + + +class SubscriptionCategoryNotFoundFault(ServiceException): + code: str = "SubscriptionCategoryNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class SubscriptionEventIdNotFoundFault(ServiceException): + code: str = "SubscriptionEventIdNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class SubscriptionNotFoundFault(ServiceException): + code: str = "SubscriptionNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class SubscriptionSeverityNotFoundFault(ServiceException): + code: str = "SubscriptionSeverityNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class TableLimitExceededFault(ServiceException): + code: str = "TableLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class TableRestoreNotFoundFault(ServiceException): + code: str = "TableRestoreNotFoundFault" + sender_fault: bool = True + status_code: int = 400 + + +class TagLimitExceededFault(ServiceException): + code: str = "TagLimitExceededFault" + sender_fault: bool = True + status_code: int = 400 + + +class UnauthorizedOperation(ServiceException): + code: str = "UnauthorizedOperation" + sender_fault: bool = True + status_code: int = 400 + + +class UnauthorizedPartnerIntegrationFault(ServiceException): + code: str = "UnauthorizedPartnerIntegration" + sender_fault: bool = True + status_code: int = 401 + + +class UnknownSnapshotCopyRegionFault(ServiceException): + code: str = "UnknownSnapshotCopyRegionFault" + sender_fault: bool = True + status_code: int = 404 + + +class UnsupportedOperationFault(ServiceException): + code: str = "UnsupportedOperation" + sender_fault: bool = True + status_code: int = 400 + + +class UnsupportedOptionFault(ServiceException): + code: str = "UnsupportedOptionFault" + sender_fault: bool = True + status_code: int = 400 + + +class UsageLimitAlreadyExistsFault(ServiceException): + code: str = "UsageLimitAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + + +class UsageLimitNotFoundFault(ServiceException): + code: str = "UsageLimitNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class AcceptReservedNodeExchangeInputMessage(ServiceRequest): + ReservedNodeId: String + TargetReservedNodeOfferingId: String + + +class RecurringCharge(TypedDict, total=False): + RecurringChargeAmount: Optional[Double] + RecurringChargeFrequency: Optional[String] + + +RecurringChargeList = List[RecurringCharge] +TStamp = datetime + + +class ReservedNode(TypedDict, total=False): + ReservedNodeId: Optional[String] + ReservedNodeOfferingId: Optional[String] + NodeType: Optional[String] + StartTime: Optional[TStamp] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + NodeCount: Optional[Integer] + State: Optional[String] + OfferingType: Optional[String] + RecurringCharges: Optional[RecurringChargeList] + ReservedNodeOfferingType: Optional[ReservedNodeOfferingType] + + +class AcceptReservedNodeExchangeOutputMessage(TypedDict, total=False): + ExchangedReservedNode: Optional[ReservedNode] + + +class AttributeValueTarget(TypedDict, total=False): + AttributeValue: Optional[String] + + +AttributeValueList = List[AttributeValueTarget] + + +class AccountAttribute(TypedDict, total=False): + AttributeName: Optional[String] + AttributeValues: Optional[AttributeValueList] + + +AttributeList = List[AccountAttribute] + + +class AccountAttributeList(TypedDict, total=False): + AccountAttributes: Optional[AttributeList] + + +class AccountWithRestoreAccess(TypedDict, total=False): + AccountId: Optional[String] + AccountAlias: Optional[String] + + +AccountsWithRestoreAccessList = List[AccountWithRestoreAccess] + + +class AquaConfiguration(TypedDict, total=False): + AquaStatus: Optional[AquaStatus] + AquaConfigurationStatus: Optional[AquaConfigurationStatus] + + +class AssociateDataShareConsumerMessage(ServiceRequest): + DataShareArn: String + AssociateEntireAccount: Optional[BooleanOptional] + ConsumerArn: Optional[String] + ConsumerRegion: Optional[String] + AllowWrites: Optional[BooleanOptional] + + +class ClusterAssociatedToSchedule(TypedDict, total=False): + ClusterIdentifier: Optional[String] + ScheduleAssociationState: Optional[ScheduleState] + + +AssociatedClusterList = List[ClusterAssociatedToSchedule] + + +class CertificateAssociation(TypedDict, total=False): + CustomDomainName: Optional[String] + ClusterIdentifier: Optional[String] + + +CertificateAssociationList = List[CertificateAssociation] + + +class Association(TypedDict, total=False): + CustomDomainCertificateArn: Optional[String] + CustomDomainCertificateExpiryDate: Optional[TStamp] + CertificateAssociations: Optional[CertificateAssociationList] + + +AssociationList = List[Association] +AttributeNameList = List[String] + + +class AuthenticationProfile(TypedDict, total=False): + AuthenticationProfileName: Optional[AuthenticationProfileNameString] + AuthenticationProfileContent: Optional[String] + + +AuthenticationProfileList = List[AuthenticationProfile] + + +class AuthorizeClusterSecurityGroupIngressMessage(ServiceRequest): + ClusterSecurityGroupName: String + CIDRIP: Optional[String] + EC2SecurityGroupName: Optional[String] + EC2SecurityGroupOwnerId: Optional[String] + + +class Tag(TypedDict, total=False): + Key: Optional[String] + Value: Optional[String] + + +TagList = List[Tag] + + +class IPRange(TypedDict, total=False): + Status: Optional[String] + CIDRIP: Optional[String] + Tags: Optional[TagList] + + +IPRangeList = List[IPRange] + + +class EC2SecurityGroup(TypedDict, total=False): + Status: Optional[String] + EC2SecurityGroupName: Optional[String] + EC2SecurityGroupOwnerId: Optional[String] + Tags: Optional[TagList] + + +EC2SecurityGroupList = List[EC2SecurityGroup] + + +class ClusterSecurityGroup(TypedDict, total=False): + ClusterSecurityGroupName: Optional[String] + Description: Optional[String] + EC2SecurityGroups: Optional[EC2SecurityGroupList] + IPRanges: Optional[IPRangeList] + Tags: Optional[TagList] + + +class AuthorizeClusterSecurityGroupIngressResult(TypedDict, total=False): + ClusterSecurityGroup: Optional[ClusterSecurityGroup] + + +class AuthorizeDataShareMessage(ServiceRequest): + DataShareArn: String + ConsumerIdentifier: String + AllowWrites: Optional[BooleanOptional] + + +VpcIdentifierList = List[String] + + +class AuthorizeEndpointAccessMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + Account: String + VpcIds: Optional[VpcIdentifierList] + + +class AuthorizeSnapshotAccessMessage(ServiceRequest): + SnapshotIdentifier: Optional[String] + SnapshotArn: Optional[String] + SnapshotClusterIdentifier: Optional[String] + AccountWithRestoreAccess: String + + +RestorableNodeTypeList = List[String] +Long = int + + +class Snapshot(TypedDict, total=False): + SnapshotIdentifier: Optional[String] + ClusterIdentifier: Optional[String] + SnapshotCreateTime: Optional[TStamp] + Status: Optional[String] + Port: Optional[Integer] + AvailabilityZone: Optional[String] + ClusterCreateTime: Optional[TStamp] + MasterUsername: Optional[String] + ClusterVersion: Optional[String] + EngineFullVersion: Optional[String] + SnapshotType: Optional[String] + NodeType: Optional[String] + NumberOfNodes: Optional[Integer] + DBName: Optional[String] + VpcId: Optional[String] + Encrypted: Optional[Boolean] + KmsKeyId: Optional[String] + EncryptedWithHSM: Optional[Boolean] + AccountsWithRestoreAccess: Optional[AccountsWithRestoreAccessList] + OwnerAccount: Optional[String] + TotalBackupSizeInMegaBytes: Optional[Double] + ActualIncrementalBackupSizeInMegaBytes: Optional[Double] + BackupProgressInMegaBytes: Optional[Double] + CurrentBackupRateInMegaBytesPerSecond: Optional[Double] + EstimatedSecondsToCompletion: Optional[Long] + ElapsedTimeInSeconds: Optional[Long] + SourceRegion: Optional[String] + Tags: Optional[TagList] + RestorableNodeTypes: Optional[RestorableNodeTypeList] + EnhancedVpcRouting: Optional[Boolean] + MaintenanceTrackName: Optional[String] + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + ManualSnapshotRemainingDays: Optional[IntegerOptional] + SnapshotRetentionStartTime: Optional[TStamp] + MasterPasswordSecretArn: Optional[String] + MasterPasswordSecretKmsKeyId: Optional[String] + SnapshotArn: Optional[String] + + +class AuthorizeSnapshotAccessResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +AuthorizedAudienceList = List[String] + + +class AuthorizedTokenIssuer(TypedDict, total=False): + TrustedTokenIssuerArn: Optional[String] + AuthorizedAudiencesList: Optional[AuthorizedAudienceList] + + +AuthorizedTokenIssuerList = List[AuthorizedTokenIssuer] + + +class SupportedPlatform(TypedDict, total=False): + Name: Optional[String] + + +SupportedPlatformsList = List[SupportedPlatform] + + +class AvailabilityZone(TypedDict, total=False): + Name: Optional[String] + SupportedPlatforms: Optional[SupportedPlatformsList] + + +AvailabilityZoneList = List[AvailabilityZone] + + +class DeleteClusterSnapshotMessage(ServiceRequest): + SnapshotIdentifier: String + SnapshotClusterIdentifier: Optional[String] + + +DeleteClusterSnapshotMessageList = List[DeleteClusterSnapshotMessage] + + +class BatchDeleteClusterSnapshotsRequest(ServiceRequest): + Identifiers: DeleteClusterSnapshotMessageList + + +class SnapshotErrorMessage(TypedDict, total=False): + SnapshotIdentifier: Optional[String] + SnapshotClusterIdentifier: Optional[String] + FailureCode: Optional[String] + FailureReason: Optional[String] + + +BatchSnapshotOperationErrorList = List[SnapshotErrorMessage] +SnapshotIdentifierList = List[String] + + +class BatchDeleteClusterSnapshotsResult(TypedDict, total=False): + Resources: Optional[SnapshotIdentifierList] + Errors: Optional[BatchSnapshotOperationErrorList] + + +class BatchModifyClusterSnapshotsMessage(ServiceRequest): + SnapshotIdentifierList: SnapshotIdentifierList + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + Force: Optional[Boolean] + + +BatchSnapshotOperationErrors = List[SnapshotErrorMessage] + + +class BatchModifyClusterSnapshotsOutputMessage(TypedDict, total=False): + Resources: Optional[SnapshotIdentifierList] + Errors: Optional[BatchSnapshotOperationErrors] + + +class CancelResizeMessage(ServiceRequest): + ClusterIdentifier: String + + +class ClusterNode(TypedDict, total=False): + NodeRole: Optional[String] + PrivateIPAddress: Optional[String] + PublicIPAddress: Optional[String] + + +ClusterNodesList = List[ClusterNode] + + +class SecondaryClusterInfo(TypedDict, total=False): + AvailabilityZone: Optional[String] + ClusterNodes: Optional[ClusterNodesList] + + +class ReservedNodeExchangeStatus(TypedDict, total=False): + ReservedNodeExchangeRequestId: Optional[String] + Status: Optional[ReservedNodeExchangeStatusType] + RequestTime: Optional[TStamp] + SourceReservedNodeId: Optional[String] + SourceReservedNodeType: Optional[String] + SourceReservedNodeCount: Optional[Integer] + TargetReservedNodeOfferingId: Optional[String] + TargetReservedNodeType: Optional[String] + TargetReservedNodeCount: Optional[Integer] + + +LongOptional = int + + +class ResizeInfo(TypedDict, total=False): + ResizeType: Optional[String] + AllowCancelResize: Optional[Boolean] + + +class DeferredMaintenanceWindow(TypedDict, total=False): + DeferMaintenanceIdentifier: Optional[String] + DeferMaintenanceStartTime: Optional[TStamp] + DeferMaintenanceEndTime: Optional[TStamp] + + +DeferredMaintenanceWindowsList = List[DeferredMaintenanceWindow] +PendingActionsList = List[String] + + +class ClusterIamRole(TypedDict, total=False): + IamRoleArn: Optional[String] + ApplyStatus: Optional[String] + + +ClusterIamRoleList = List[ClusterIamRole] + + +class ElasticIpStatus(TypedDict, total=False): + ElasticIp: Optional[String] + Status: Optional[String] + + +class ClusterSnapshotCopyStatus(TypedDict, total=False): + DestinationRegion: Optional[String] + RetentionPeriod: Optional[Long] + ManualSnapshotRetentionPeriod: Optional[Integer] + SnapshotCopyGrantName: Optional[String] + + +class HsmStatus(TypedDict, total=False): + HsmClientCertificateIdentifier: Optional[String] + HsmConfigurationIdentifier: Optional[String] + Status: Optional[String] + + +class DataTransferProgress(TypedDict, total=False): + Status: Optional[String] + CurrentRateInMegaBytesPerSecond: Optional[DoubleOptional] + TotalDataInMegaBytes: Optional[Long] + DataTransferredInMegaBytes: Optional[Long] + EstimatedTimeToCompletionInSeconds: Optional[LongOptional] + ElapsedTimeInSeconds: Optional[LongOptional] + + +class RestoreStatus(TypedDict, total=False): + Status: Optional[String] + CurrentRestoreRateInMegaBytesPerSecond: Optional[Double] + SnapshotSizeInMegaBytes: Optional[Long] + ProgressInMegaBytes: Optional[Long] + ElapsedTimeInSeconds: Optional[Long] + EstimatedTimeToCompletionInSeconds: Optional[Long] + + +class PendingModifiedValues(TypedDict, total=False): + MasterUserPassword: Optional[SensitiveString] + NodeType: Optional[String] + NumberOfNodes: Optional[IntegerOptional] + ClusterType: Optional[String] + ClusterVersion: Optional[String] + AutomatedSnapshotRetentionPeriod: Optional[IntegerOptional] + ClusterIdentifier: Optional[String] + PubliclyAccessible: Optional[BooleanOptional] + EnhancedVpcRouting: Optional[BooleanOptional] + MaintenanceTrackName: Optional[String] + EncryptionType: Optional[String] + + +class ClusterParameterStatus(TypedDict, total=False): + ParameterName: Optional[String] + ParameterApplyStatus: Optional[String] + ParameterApplyErrorDescription: Optional[String] + + +ClusterParameterStatusList = List[ClusterParameterStatus] + + +class ClusterParameterGroupStatus(TypedDict, total=False): + ParameterGroupName: Optional[String] + ParameterApplyStatus: Optional[String] + ClusterParameterStatusList: Optional[ClusterParameterStatusList] + + +ClusterParameterGroupStatusList = List[ClusterParameterGroupStatus] + + +class VpcSecurityGroupMembership(TypedDict, total=False): + VpcSecurityGroupId: Optional[String] + Status: Optional[String] + + +VpcSecurityGroupMembershipList = List[VpcSecurityGroupMembership] + + +class ClusterSecurityGroupMembership(TypedDict, total=False): + ClusterSecurityGroupName: Optional[String] + Status: Optional[String] + + +ClusterSecurityGroupMembershipList = List[ClusterSecurityGroupMembership] + + +class NetworkInterface(TypedDict, total=False): + NetworkInterfaceId: Optional[String] + SubnetId: Optional[String] + PrivateIpAddress: Optional[String] + AvailabilityZone: Optional[String] + Ipv6Address: Optional[String] + + +NetworkInterfaceList = List[NetworkInterface] + + +class VpcEndpoint(TypedDict, total=False): + VpcEndpointId: Optional[String] + VpcId: Optional[String] + NetworkInterfaces: Optional[NetworkInterfaceList] + + +VpcEndpointsList = List[VpcEndpoint] + + +class Endpoint(TypedDict, total=False): + Address: Optional[String] + Port: Optional[Integer] + VpcEndpoints: Optional[VpcEndpointsList] + + +class Cluster(TypedDict, total=False): + ClusterIdentifier: Optional[String] + NodeType: Optional[String] + ClusterStatus: Optional[String] + ClusterAvailabilityStatus: Optional[String] + ModifyStatus: Optional[String] + MasterUsername: Optional[String] + DBName: Optional[String] + Endpoint: Optional[Endpoint] + ClusterCreateTime: Optional[TStamp] + AutomatedSnapshotRetentionPeriod: Optional[Integer] + ManualSnapshotRetentionPeriod: Optional[Integer] + ClusterSecurityGroups: Optional[ClusterSecurityGroupMembershipList] + VpcSecurityGroups: Optional[VpcSecurityGroupMembershipList] + ClusterParameterGroups: Optional[ClusterParameterGroupStatusList] + ClusterSubnetGroupName: Optional[String] + VpcId: Optional[String] + AvailabilityZone: Optional[String] + PreferredMaintenanceWindow: Optional[String] + PendingModifiedValues: Optional[PendingModifiedValues] + ClusterVersion: Optional[String] + AllowVersionUpgrade: Optional[Boolean] + NumberOfNodes: Optional[Integer] + PubliclyAccessible: Optional[Boolean] + Encrypted: Optional[Boolean] + RestoreStatus: Optional[RestoreStatus] + DataTransferProgress: Optional[DataTransferProgress] + HsmStatus: Optional[HsmStatus] + ClusterSnapshotCopyStatus: Optional[ClusterSnapshotCopyStatus] + ClusterPublicKey: Optional[String] + ClusterNodes: Optional[ClusterNodesList] + ElasticIpStatus: Optional[ElasticIpStatus] + ClusterRevisionNumber: Optional[String] + Tags: Optional[TagList] + KmsKeyId: Optional[String] + EnhancedVpcRouting: Optional[Boolean] + IamRoles: Optional[ClusterIamRoleList] + PendingActions: Optional[PendingActionsList] + MaintenanceTrackName: Optional[String] + ElasticResizeNumberOfNodeOptions: Optional[String] + DeferredMaintenanceWindows: Optional[DeferredMaintenanceWindowsList] + SnapshotScheduleIdentifier: Optional[String] + SnapshotScheduleState: Optional[ScheduleState] + ExpectedNextSnapshotScheduleTime: Optional[TStamp] + ExpectedNextSnapshotScheduleTimeStatus: Optional[String] + NextMaintenanceWindowStartTime: Optional[TStamp] + ResizeInfo: Optional[ResizeInfo] + AvailabilityZoneRelocationStatus: Optional[String] + ClusterNamespaceArn: Optional[String] + TotalStorageCapacityInMegaBytes: Optional[LongOptional] + AquaConfiguration: Optional[AquaConfiguration] + DefaultIamRoleArn: Optional[String] + ReservedNodeExchangeStatus: Optional[ReservedNodeExchangeStatus] + CustomDomainName: Optional[String] + CustomDomainCertificateArn: Optional[String] + CustomDomainCertificateExpiryDate: Optional[TStamp] + MasterPasswordSecretArn: Optional[String] + MasterPasswordSecretKmsKeyId: Optional[String] + IpAddressType: Optional[String] + MultiAZ: Optional[String] + MultiAZSecondary: Optional[SecondaryClusterInfo] + + +class ClusterCredentials(TypedDict, total=False): + DbUser: Optional[String] + DbPassword: Optional[SensitiveString] + Expiration: Optional[TStamp] + + +class RevisionTarget(TypedDict, total=False): + DatabaseRevision: Optional[String] + Description: Optional[String] + DatabaseRevisionReleaseDate: Optional[TStamp] + + +RevisionTargetsList = List[RevisionTarget] + + +class ClusterDbRevision(TypedDict, total=False): + ClusterIdentifier: Optional[String] + CurrentDatabaseRevision: Optional[String] + DatabaseRevisionReleaseDate: Optional[TStamp] + RevisionTargets: Optional[RevisionTargetsList] + + +ClusterDbRevisionsList = List[ClusterDbRevision] + + +class ClusterDbRevisionsMessage(TypedDict, total=False): + Marker: Optional[String] + ClusterDbRevisions: Optional[ClusterDbRevisionsList] + + +class ClusterExtendedCredentials(TypedDict, total=False): + DbUser: Optional[String] + DbPassword: Optional[SensitiveString] + Expiration: Optional[TStamp] + NextRefreshTime: Optional[TStamp] + + +ClusterList = List[Cluster] + + +class ClusterParameterGroup(TypedDict, total=False): + ParameterGroupName: Optional[String] + ParameterGroupFamily: Optional[String] + Description: Optional[String] + Tags: Optional[TagList] + + +class Parameter(TypedDict, total=False): + ParameterName: Optional[String] + ParameterValue: Optional[String] + Description: Optional[String] + Source: Optional[String] + DataType: Optional[String] + AllowedValues: Optional[String] + ApplyType: Optional[ParameterApplyType] + IsModifiable: Optional[Boolean] + MinimumEngineVersion: Optional[String] + + +ParametersList = List[Parameter] + + +class ClusterParameterGroupDetails(TypedDict, total=False): + Parameters: Optional[ParametersList] + Marker: Optional[String] + + +class ClusterParameterGroupNameMessage(TypedDict, total=False): + ParameterGroupName: Optional[String] + ParameterGroupStatus: Optional[String] + + +ParameterGroupList = List[ClusterParameterGroup] + + +class ClusterParameterGroupsMessage(TypedDict, total=False): + Marker: Optional[String] + ParameterGroups: Optional[ParameterGroupList] + + +ClusterSecurityGroups = List[ClusterSecurityGroup] + + +class ClusterSecurityGroupMessage(TypedDict, total=False): + Marker: Optional[String] + ClusterSecurityGroups: Optional[ClusterSecurityGroups] + + +ClusterSecurityGroupNameList = List[String] +ValueStringList = List[String] + + +class Subnet(TypedDict, total=False): + SubnetIdentifier: Optional[String] + SubnetAvailabilityZone: Optional[AvailabilityZone] + SubnetStatus: Optional[String] + + +SubnetList = List[Subnet] + + +class ClusterSubnetGroup(TypedDict, total=False): + ClusterSubnetGroupName: Optional[String] + Description: Optional[String] + VpcId: Optional[String] + SubnetGroupStatus: Optional[String] + Subnets: Optional[SubnetList] + Tags: Optional[TagList] + SupportedClusterIpAddressTypes: Optional[ValueStringList] + + +ClusterSubnetGroups = List[ClusterSubnetGroup] + + +class ClusterSubnetGroupMessage(TypedDict, total=False): + Marker: Optional[String] + ClusterSubnetGroups: Optional[ClusterSubnetGroups] + + +class ClusterVersion(TypedDict, total=False): + ClusterVersion: Optional[String] + ClusterParameterGroupFamily: Optional[String] + Description: Optional[String] + + +ClusterVersionList = List[ClusterVersion] + + +class ClusterVersionsMessage(TypedDict, total=False): + Marker: Optional[String] + ClusterVersions: Optional[ClusterVersionList] + + +class ClustersMessage(TypedDict, total=False): + Marker: Optional[String] + Clusters: Optional[ClusterList] + + +ConsumerIdentifierList = List[String] + + +class CopyClusterSnapshotMessage(ServiceRequest): + SourceSnapshotIdentifier: String + SourceSnapshotClusterIdentifier: Optional[String] + TargetSnapshotIdentifier: String + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + + +class CopyClusterSnapshotResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +class CreateAuthenticationProfileMessage(ServiceRequest): + AuthenticationProfileName: AuthenticationProfileNameString + AuthenticationProfileContent: String + + +class CreateAuthenticationProfileResult(TypedDict, total=False): + AuthenticationProfileName: Optional[AuthenticationProfileNameString] + AuthenticationProfileContent: Optional[String] + + +IamRoleArnList = List[String] +VpcSecurityGroupIdList = List[String] + + +class CreateClusterMessage(ServiceRequest): + DBName: Optional[String] + ClusterIdentifier: String + ClusterType: Optional[String] + NodeType: String + MasterUsername: String + MasterUserPassword: Optional[SensitiveString] + ClusterSecurityGroups: Optional[ClusterSecurityGroupNameList] + VpcSecurityGroupIds: Optional[VpcSecurityGroupIdList] + ClusterSubnetGroupName: Optional[String] + AvailabilityZone: Optional[String] + PreferredMaintenanceWindow: Optional[String] + ClusterParameterGroupName: Optional[String] + AutomatedSnapshotRetentionPeriod: Optional[IntegerOptional] + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + Port: Optional[IntegerOptional] + ClusterVersion: Optional[String] + AllowVersionUpgrade: Optional[BooleanOptional] + NumberOfNodes: Optional[IntegerOptional] + PubliclyAccessible: Optional[BooleanOptional] + Encrypted: Optional[BooleanOptional] + HsmClientCertificateIdentifier: Optional[String] + HsmConfigurationIdentifier: Optional[String] + ElasticIp: Optional[String] + Tags: Optional[TagList] + KmsKeyId: Optional[String] + EnhancedVpcRouting: Optional[BooleanOptional] + AdditionalInfo: Optional[String] + IamRoles: Optional[IamRoleArnList] + MaintenanceTrackName: Optional[String] + SnapshotScheduleIdentifier: Optional[String] + AvailabilityZoneRelocation: Optional[BooleanOptional] + AquaConfigurationStatus: Optional[AquaConfigurationStatus] + DefaultIamRoleArn: Optional[String] + LoadSampleData: Optional[String] + ManageMasterPassword: Optional[BooleanOptional] + MasterPasswordSecretKmsKeyId: Optional[String] + IpAddressType: Optional[String] + MultiAZ: Optional[BooleanOptional] + RedshiftIdcApplicationArn: Optional[String] + + +class CreateClusterParameterGroupMessage(ServiceRequest): + ParameterGroupName: String + ParameterGroupFamily: String + Description: String + Tags: Optional[TagList] + + +class CreateClusterParameterGroupResult(TypedDict, total=False): + ClusterParameterGroup: Optional[ClusterParameterGroup] + + +class CreateClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class CreateClusterSecurityGroupMessage(ServiceRequest): + ClusterSecurityGroupName: String + Description: String + Tags: Optional[TagList] + + +class CreateClusterSecurityGroupResult(TypedDict, total=False): + ClusterSecurityGroup: Optional[ClusterSecurityGroup] + + +class CreateClusterSnapshotMessage(ServiceRequest): + SnapshotIdentifier: String + ClusterIdentifier: String + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + Tags: Optional[TagList] + + +class CreateClusterSnapshotResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +SubnetIdentifierList = List[String] + + +class CreateClusterSubnetGroupMessage(ServiceRequest): + ClusterSubnetGroupName: String + Description: String + SubnetIds: SubnetIdentifierList + Tags: Optional[TagList] + + +class CreateClusterSubnetGroupResult(TypedDict, total=False): + ClusterSubnetGroup: Optional[ClusterSubnetGroup] + + +class CreateCustomDomainAssociationMessage(ServiceRequest): + CustomDomainName: CustomDomainNameString + CustomDomainCertificateArn: CustomDomainCertificateArnString + ClusterIdentifier: String + + +class CreateCustomDomainAssociationResult(TypedDict, total=False): + CustomDomainName: Optional[CustomDomainNameString] + CustomDomainCertificateArn: Optional[CustomDomainCertificateArnString] + ClusterIdentifier: Optional[String] + CustomDomainCertExpiryTime: Optional[String] + + +class CreateEndpointAccessMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + ResourceOwner: Optional[String] + EndpointName: String + SubnetGroupName: String + VpcSecurityGroupIds: Optional[VpcSecurityGroupIdList] + + +EventCategoriesList = List[String] +SourceIdsList = List[String] + + +class CreateEventSubscriptionMessage(ServiceRequest): + SubscriptionName: String + SnsTopicArn: String + SourceType: Optional[String] + SourceIds: Optional[SourceIdsList] + EventCategories: Optional[EventCategoriesList] + Severity: Optional[String] + Enabled: Optional[BooleanOptional] + Tags: Optional[TagList] + + +class EventSubscription(TypedDict, total=False): + CustomerAwsId: Optional[String] + CustSubscriptionId: Optional[String] + SnsTopicArn: Optional[String] + Status: Optional[String] + SubscriptionCreationTime: Optional[TStamp] + SourceType: Optional[String] + SourceIdsList: Optional[SourceIdsList] + EventCategoriesList: Optional[EventCategoriesList] + Severity: Optional[String] + Enabled: Optional[Boolean] + Tags: Optional[TagList] + + +class CreateEventSubscriptionResult(TypedDict, total=False): + EventSubscription: Optional[EventSubscription] + + +class CreateHsmClientCertificateMessage(ServiceRequest): + HsmClientCertificateIdentifier: String + Tags: Optional[TagList] + + +class HsmClientCertificate(TypedDict, total=False): + HsmClientCertificateIdentifier: Optional[String] + HsmClientCertificatePublicKey: Optional[String] + Tags: Optional[TagList] + + +class CreateHsmClientCertificateResult(TypedDict, total=False): + HsmClientCertificate: Optional[HsmClientCertificate] + + +class CreateHsmConfigurationMessage(ServiceRequest): + HsmConfigurationIdentifier: String + Description: String + HsmIpAddress: String + HsmPartitionName: String + HsmPartitionPassword: String + HsmServerPublicCertificate: String + Tags: Optional[TagList] + + +class HsmConfiguration(TypedDict, total=False): + HsmConfigurationIdentifier: Optional[String] + Description: Optional[String] + HsmIpAddress: Optional[String] + HsmPartitionName: Optional[String] + Tags: Optional[TagList] + + +class CreateHsmConfigurationResult(TypedDict, total=False): + HsmConfiguration: Optional[HsmConfiguration] + + +EncryptionContextMap = Dict[String, String] + + +class CreateIntegrationMessage(ServiceRequest): + SourceArn: SourceArn + TargetArn: TargetArn + IntegrationName: IntegrationName + KMSKeyId: Optional[String] + TagList: Optional[TagList] + AdditionalEncryptionContext: Optional[EncryptionContextMap] + Description: Optional[IntegrationDescription] + + +class ReadWriteAccess(TypedDict, total=False): + Authorization: ServiceAuthorization + + +class S3AccessGrantsScopeUnion(TypedDict, total=False): + ReadWriteAccess: Optional[ReadWriteAccess] + + +S3AccessGrantsServiceIntegrations = List[S3AccessGrantsScopeUnion] + + +class LakeFormationQuery(TypedDict, total=False): + Authorization: ServiceAuthorization + + +class LakeFormationScopeUnion(TypedDict, total=False): + LakeFormationQuery: Optional[LakeFormationQuery] + + +LakeFormationServiceIntegrations = List[LakeFormationScopeUnion] + + +class ServiceIntegrationsUnion(TypedDict, total=False): + LakeFormation: Optional[LakeFormationServiceIntegrations] + S3AccessGrants: Optional[S3AccessGrantsServiceIntegrations] + + +ServiceIntegrationList = List[ServiceIntegrationsUnion] + + +class CreateRedshiftIdcApplicationMessage(ServiceRequest): + IdcInstanceArn: String + RedshiftIdcApplicationName: RedshiftIdcApplicationName + IdentityNamespace: Optional[IdentityNamespaceString] + IdcDisplayName: IdcDisplayNameString + IamRoleArn: String + AuthorizedTokenIssuerList: Optional[AuthorizedTokenIssuerList] + ServiceIntegrations: Optional[ServiceIntegrationList] + + +class RedshiftIdcApplication(TypedDict, total=False): + IdcInstanceArn: Optional[String] + RedshiftIdcApplicationName: Optional[RedshiftIdcApplicationName] + RedshiftIdcApplicationArn: Optional[String] + IdentityNamespace: Optional[IdentityNamespaceString] + IdcDisplayName: Optional[IdcDisplayNameString] + IamRoleArn: Optional[String] + IdcManagedApplicationArn: Optional[String] + IdcOnboardStatus: Optional[String] + AuthorizedTokenIssuerList: Optional[AuthorizedTokenIssuerList] + ServiceIntegrations: Optional[ServiceIntegrationList] + + +class CreateRedshiftIdcApplicationResult(TypedDict, total=False): + RedshiftIdcApplication: Optional[RedshiftIdcApplication] + + +class ResumeClusterMessage(ServiceRequest): + ClusterIdentifier: String + + +class PauseClusterMessage(ServiceRequest): + ClusterIdentifier: String + + +class ResizeClusterMessage(ServiceRequest): + ClusterIdentifier: String + ClusterType: Optional[String] + NodeType: Optional[String] + NumberOfNodes: Optional[IntegerOptional] + Classic: Optional[BooleanOptional] + ReservedNodeId: Optional[String] + TargetReservedNodeOfferingId: Optional[String] + + +class ScheduledActionType(TypedDict, total=False): + ResizeCluster: Optional[ResizeClusterMessage] + PauseCluster: Optional[PauseClusterMessage] + ResumeCluster: Optional[ResumeClusterMessage] + + +class CreateScheduledActionMessage(ServiceRequest): + ScheduledActionName: String + TargetAction: ScheduledActionType + Schedule: String + IamRole: String + ScheduledActionDescription: Optional[String] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + Enable: Optional[BooleanOptional] + + +class CreateSnapshotCopyGrantMessage(ServiceRequest): + SnapshotCopyGrantName: String + KmsKeyId: Optional[String] + Tags: Optional[TagList] + + +class SnapshotCopyGrant(TypedDict, total=False): + SnapshotCopyGrantName: Optional[String] + KmsKeyId: Optional[String] + Tags: Optional[TagList] + + +class CreateSnapshotCopyGrantResult(TypedDict, total=False): + SnapshotCopyGrant: Optional[SnapshotCopyGrant] + + +ScheduleDefinitionList = List[String] + + +class CreateSnapshotScheduleMessage(ServiceRequest): + ScheduleDefinitions: Optional[ScheduleDefinitionList] + ScheduleIdentifier: Optional[String] + ScheduleDescription: Optional[String] + Tags: Optional[TagList] + DryRun: Optional[BooleanOptional] + NextInvocations: Optional[IntegerOptional] + + +class CreateTagsMessage(ServiceRequest): + ResourceName: String + Tags: TagList + + +class CreateUsageLimitMessage(ServiceRequest): + ClusterIdentifier: String + FeatureType: UsageLimitFeatureType + LimitType: UsageLimitLimitType + Amount: Long + Period: Optional[UsageLimitPeriod] + BreachAction: Optional[UsageLimitBreachAction] + Tags: Optional[TagList] + + +class CustomDomainAssociationsMessage(TypedDict, total=False): + Marker: Optional[String] + Associations: Optional[AssociationList] + + +class CustomerStorageMessage(TypedDict, total=False): + TotalBackupSizeInMegaBytes: Optional[Double] + TotalProvisionedStorageInMegaBytes: Optional[Double] + + +class DataShareAssociation(TypedDict, total=False): + ConsumerIdentifier: Optional[String] + Status: Optional[DataShareStatus] + ConsumerRegion: Optional[String] + CreatedDate: Optional[TStamp] + StatusChangeDate: Optional[TStamp] + ProducerAllowedWrites: Optional[BooleanOptional] + ConsumerAcceptedWrites: Optional[BooleanOptional] + + +DataShareAssociationList = List[DataShareAssociation] + + +class DataShare(TypedDict, total=False): + DataShareArn: Optional[String] + ProducerArn: Optional[String] + AllowPubliclyAccessibleConsumers: Optional[Boolean] + DataShareAssociations: Optional[DataShareAssociationList] + ManagedBy: Optional[String] + DataShareType: Optional[DataShareType] + + +DataShareList = List[DataShare] +DbGroupList = List[String] + + +class DeauthorizeDataShareMessage(ServiceRequest): + DataShareArn: String + ConsumerIdentifier: String + + +class DefaultClusterParameters(TypedDict, total=False): + ParameterGroupFamily: Optional[String] + Marker: Optional[String] + Parameters: Optional[ParametersList] + + +class DeleteAuthenticationProfileMessage(ServiceRequest): + AuthenticationProfileName: AuthenticationProfileNameString + + +class DeleteAuthenticationProfileResult(TypedDict, total=False): + AuthenticationProfileName: Optional[AuthenticationProfileNameString] + + +class DeleteClusterMessage(ServiceRequest): + ClusterIdentifier: String + SkipFinalClusterSnapshot: Optional[Boolean] + FinalClusterSnapshotIdentifier: Optional[String] + FinalClusterSnapshotRetentionPeriod: Optional[IntegerOptional] + + +class DeleteClusterParameterGroupMessage(ServiceRequest): + ParameterGroupName: String + + +class DeleteClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class DeleteClusterSecurityGroupMessage(ServiceRequest): + ClusterSecurityGroupName: String + + +class DeleteClusterSnapshotResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +class DeleteClusterSubnetGroupMessage(ServiceRequest): + ClusterSubnetGroupName: String + + +class DeleteCustomDomainAssociationMessage(ServiceRequest): + ClusterIdentifier: String + CustomDomainName: CustomDomainNameString + + +class DeleteEndpointAccessMessage(ServiceRequest): + EndpointName: String + + +class DeleteEventSubscriptionMessage(ServiceRequest): + SubscriptionName: String + + +class DeleteHsmClientCertificateMessage(ServiceRequest): + HsmClientCertificateIdentifier: String + + +class DeleteHsmConfigurationMessage(ServiceRequest): + HsmConfigurationIdentifier: String + + +class DeleteIntegrationMessage(ServiceRequest): + IntegrationArn: IntegrationArn + + +class DeleteRedshiftIdcApplicationMessage(ServiceRequest): + RedshiftIdcApplicationArn: String + + +class DeleteResourcePolicyMessage(ServiceRequest): + ResourceArn: String + + +class DeleteScheduledActionMessage(ServiceRequest): + ScheduledActionName: String + + +class DeleteSnapshotCopyGrantMessage(ServiceRequest): + SnapshotCopyGrantName: String + + +class DeleteSnapshotScheduleMessage(ServiceRequest): + ScheduleIdentifier: String + + +TagKeyList = List[String] + + +class DeleteTagsMessage(ServiceRequest): + ResourceName: String + TagKeys: TagKeyList + + +class DeleteUsageLimitMessage(ServiceRequest): + UsageLimitId: String + + +class ProvisionedIdentifier(TypedDict, total=False): + ClusterIdentifier: String + + +class ServerlessIdentifier(TypedDict, total=False): + NamespaceIdentifier: String + WorkgroupIdentifier: String + + +class NamespaceIdentifierUnion(TypedDict, total=False): + ServerlessIdentifier: Optional[ServerlessIdentifier] + ProvisionedIdentifier: Optional[ProvisionedIdentifier] + + +class DeregisterNamespaceInputMessage(ServiceRequest): + NamespaceIdentifier: NamespaceIdentifierUnion + ConsumerIdentifiers: ConsumerIdentifierList + + +class DeregisterNamespaceOutputMessage(TypedDict, total=False): + Status: Optional[NamespaceRegistrationStatus] + + +class DescribeAccountAttributesMessage(ServiceRequest): + AttributeNames: Optional[AttributeNameList] + + +class DescribeAuthenticationProfilesMessage(ServiceRequest): + AuthenticationProfileName: Optional[AuthenticationProfileNameString] + + +class DescribeAuthenticationProfilesResult(TypedDict, total=False): + AuthenticationProfiles: Optional[AuthenticationProfileList] + + +class DescribeClusterDbRevisionsMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +TagValueList = List[String] + + +class DescribeClusterParameterGroupsMessage(ServiceRequest): + ParameterGroupName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeClusterParametersMessage(ServiceRequest): + ParameterGroupName: String + Source: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeClusterSecurityGroupsMessage(ServiceRequest): + ClusterSecurityGroupName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class SnapshotSortingEntity(TypedDict, total=False): + Attribute: SnapshotAttributeToSortBy + SortOrder: Optional[SortByOrder] + + +SnapshotSortingEntityList = List[SnapshotSortingEntity] + + +class DescribeClusterSnapshotsMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + SnapshotIdentifier: Optional[String] + SnapshotArn: Optional[String] + SnapshotType: Optional[String] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + OwnerAccount: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + ClusterExists: Optional[BooleanOptional] + SortingEntities: Optional[SnapshotSortingEntityList] + + +class DescribeClusterSubnetGroupsMessage(ServiceRequest): + ClusterSubnetGroupName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeClusterTracksMessage(ServiceRequest): + MaintenanceTrackName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeClusterVersionsMessage(ServiceRequest): + ClusterVersion: Optional[String] + ClusterParameterGroupFamily: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeClustersMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeCustomDomainAssociationsMessage(ServiceRequest): + CustomDomainName: Optional[CustomDomainNameString] + CustomDomainCertificateArn: Optional[CustomDomainCertificateArnString] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeDataSharesForConsumerMessage(ServiceRequest): + ConsumerArn: Optional[String] + Status: Optional[DataShareStatusForConsumer] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeDataSharesForConsumerResult(TypedDict, total=False): + DataShares: Optional[DataShareList] + Marker: Optional[String] + + +class DescribeDataSharesForProducerMessage(ServiceRequest): + ProducerArn: Optional[String] + Status: Optional[DataShareStatusForProducer] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeDataSharesForProducerResult(TypedDict, total=False): + DataShares: Optional[DataShareList] + Marker: Optional[String] + + +class DescribeDataSharesMessage(ServiceRequest): + DataShareArn: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeDataSharesResult(TypedDict, total=False): + DataShares: Optional[DataShareList] + Marker: Optional[String] + + +class DescribeDefaultClusterParametersMessage(ServiceRequest): + ParameterGroupFamily: String + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeDefaultClusterParametersResult(TypedDict, total=False): + DefaultClusterParameters: Optional[DefaultClusterParameters] + + +class DescribeEndpointAccessMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + ResourceOwner: Optional[String] + EndpointName: Optional[String] + VpcId: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeEndpointAuthorizationMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + Account: Optional[String] + Grantee: Optional[BooleanOptional] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeEventCategoriesMessage(ServiceRequest): + SourceType: Optional[String] + + +class DescribeEventSubscriptionsMessage(ServiceRequest): + SubscriptionName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeEventsMessage(ServiceRequest): + SourceIdentifier: Optional[String] + SourceType: Optional[SourceType] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + Duration: Optional[IntegerOptional] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeHsmClientCertificatesMessage(ServiceRequest): + HsmClientCertificateIdentifier: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeHsmConfigurationsMessage(ServiceRequest): + HsmConfigurationIdentifier: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeInboundIntegrationsMessage(ServiceRequest): + IntegrationArn: Optional[InboundIntegrationArn] + TargetArn: Optional[TargetArn] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +DescribeIntegrationsFilterValueList = List[String] + + +class DescribeIntegrationsFilter(TypedDict, total=False): + Name: DescribeIntegrationsFilterName + Values: DescribeIntegrationsFilterValueList + + +DescribeIntegrationsFilterList = List[DescribeIntegrationsFilter] + + +class DescribeIntegrationsMessage(ServiceRequest): + IntegrationArn: Optional[IntegrationArn] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + Filters: Optional[DescribeIntegrationsFilterList] + + +class DescribeLoggingStatusMessage(ServiceRequest): + ClusterIdentifier: String + + +class NodeConfigurationOptionsFilter(TypedDict, total=False): + Name: Optional[NodeConfigurationOptionsFilterName] + Operator: Optional[OperatorType] + Values: Optional[ValueStringList] + + +NodeConfigurationOptionsFilterList = List[NodeConfigurationOptionsFilter] + + +class DescribeNodeConfigurationOptionsMessage(ServiceRequest): + ActionType: ActionType + ClusterIdentifier: Optional[String] + SnapshotIdentifier: Optional[String] + SnapshotArn: Optional[String] + OwnerAccount: Optional[String] + Filters: Optional[NodeConfigurationOptionsFilterList] + Marker: Optional[String] + MaxRecords: Optional[IntegerOptional] + + +class DescribeOrderableClusterOptionsMessage(ServiceRequest): + ClusterVersion: Optional[String] + NodeType: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribePartnersInputMessage(ServiceRequest): + AccountId: PartnerIntegrationAccountId + ClusterIdentifier: PartnerIntegrationClusterIdentifier + DatabaseName: Optional[PartnerIntegrationDatabaseName] + PartnerName: Optional[PartnerIntegrationPartnerName] + + +class PartnerIntegrationInfo(TypedDict, total=False): + DatabaseName: Optional[PartnerIntegrationDatabaseName] + PartnerName: Optional[PartnerIntegrationPartnerName] + Status: Optional[PartnerIntegrationStatus] + StatusMessage: Optional[PartnerIntegrationStatusMessage] + CreatedAt: Optional[TStamp] + UpdatedAt: Optional[TStamp] + + +PartnerIntegrationInfoList = List[PartnerIntegrationInfo] + + +class DescribePartnersOutputMessage(TypedDict, total=False): + PartnerIntegrationInfoList: Optional[PartnerIntegrationInfoList] + + +class DescribeRedshiftIdcApplicationsMessage(ServiceRequest): + RedshiftIdcApplicationArn: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +RedshiftIdcApplicationList = List[RedshiftIdcApplication] + + +class DescribeRedshiftIdcApplicationsResult(TypedDict, total=False): + RedshiftIdcApplications: Optional[RedshiftIdcApplicationList] + Marker: Optional[String] + + +class DescribeReservedNodeExchangeStatusInputMessage(ServiceRequest): + ReservedNodeId: Optional[String] + ReservedNodeExchangeRequestId: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +ReservedNodeExchangeStatusList = List[ReservedNodeExchangeStatus] + + +class DescribeReservedNodeExchangeStatusOutputMessage(TypedDict, total=False): + ReservedNodeExchangeStatusDetails: Optional[ReservedNodeExchangeStatusList] + Marker: Optional[String] + + +class DescribeReservedNodeOfferingsMessage(ServiceRequest): + ReservedNodeOfferingId: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeReservedNodesMessage(ServiceRequest): + ReservedNodeId: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeResizeMessage(ServiceRequest): + ClusterIdentifier: String + + +class ScheduledActionFilter(TypedDict, total=False): + Name: ScheduledActionFilterName + Values: ValueStringList + + +ScheduledActionFilterList = List[ScheduledActionFilter] + + +class DescribeScheduledActionsMessage(ServiceRequest): + ScheduledActionName: Optional[String] + TargetActionType: Optional[ScheduledActionTypeValues] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + Active: Optional[BooleanOptional] + Filters: Optional[ScheduledActionFilterList] + Marker: Optional[String] + MaxRecords: Optional[IntegerOptional] + + +class DescribeSnapshotCopyGrantsMessage(ServiceRequest): + SnapshotCopyGrantName: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeSnapshotSchedulesMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + ScheduleIdentifier: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + Marker: Optional[String] + MaxRecords: Optional[IntegerOptional] + + +ScheduledSnapshotTimeList = List[TStamp] + + +class SnapshotSchedule(TypedDict, total=False): + ScheduleDefinitions: Optional[ScheduleDefinitionList] + ScheduleIdentifier: Optional[String] + ScheduleDescription: Optional[String] + Tags: Optional[TagList] + NextInvocations: Optional[ScheduledSnapshotTimeList] + AssociatedClusterCount: Optional[IntegerOptional] + AssociatedClusters: Optional[AssociatedClusterList] + + +SnapshotScheduleList = List[SnapshotSchedule] + + +class DescribeSnapshotSchedulesOutputMessage(TypedDict, total=False): + SnapshotSchedules: Optional[SnapshotScheduleList] + Marker: Optional[String] + + +class DescribeTableRestoreStatusMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + TableRestoreRequestId: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class DescribeTagsMessage(ServiceRequest): + ResourceName: Optional[String] + ResourceType: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DescribeUsageLimitsMessage(ServiceRequest): + UsageLimitId: Optional[String] + ClusterIdentifier: Optional[String] + FeatureType: Optional[UsageLimitFeatureType] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + TagKeys: Optional[TagKeyList] + TagValues: Optional[TagValueList] + + +class DisableLoggingMessage(ServiceRequest): + ClusterIdentifier: String + + +class DisableSnapshotCopyMessage(ServiceRequest): + ClusterIdentifier: String + + +class DisableSnapshotCopyResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class DisassociateDataShareConsumerMessage(ServiceRequest): + DataShareArn: String + DisassociateEntireAccount: Optional[BooleanOptional] + ConsumerArn: Optional[String] + ConsumerRegion: Optional[String] + + +class SupportedOperation(TypedDict, total=False): + OperationName: Optional[String] + + +SupportedOperationList = List[SupportedOperation] + + +class UpdateTarget(TypedDict, total=False): + MaintenanceTrackName: Optional[String] + DatabaseVersion: Optional[String] + SupportedOperations: Optional[SupportedOperationList] + + +EligibleTracksToUpdateList = List[UpdateTarget] +LogTypeList = List[String] + + +class EnableLoggingMessage(ServiceRequest): + ClusterIdentifier: String + BucketName: Optional[String] + S3KeyPrefix: Optional[S3KeyPrefixValue] + LogDestinationType: Optional[LogDestinationType] + LogExports: Optional[LogTypeList] + + +class EnableSnapshotCopyMessage(ServiceRequest): + ClusterIdentifier: String + DestinationRegion: String + RetentionPeriod: Optional[IntegerOptional] + SnapshotCopyGrantName: Optional[String] + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + + +class EnableSnapshotCopyResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class EndpointAccess(TypedDict, total=False): + ClusterIdentifier: Optional[String] + ResourceOwner: Optional[String] + SubnetGroupName: Optional[String] + EndpointStatus: Optional[String] + EndpointName: Optional[String] + EndpointCreateTime: Optional[TStamp] + Port: Optional[Integer] + Address: Optional[String] + VpcSecurityGroups: Optional[VpcSecurityGroupMembershipList] + VpcEndpoint: Optional[VpcEndpoint] + + +EndpointAccesses = List[EndpointAccess] + + +class EndpointAccessList(TypedDict, total=False): + EndpointAccessList: Optional[EndpointAccesses] + Marker: Optional[String] + + +class EndpointAuthorization(TypedDict, total=False): + Grantor: Optional[String] + Grantee: Optional[String] + ClusterIdentifier: Optional[String] + AuthorizeTime: Optional[TStamp] + ClusterStatus: Optional[String] + Status: Optional[AuthorizationStatus] + AllowedAllVPCs: Optional[Boolean] + AllowedVPCs: Optional[VpcIdentifierList] + EndpointCount: Optional[Integer] + + +EndpointAuthorizations = List[EndpointAuthorization] + + +class EndpointAuthorizationList(TypedDict, total=False): + EndpointAuthorizationList: Optional[EndpointAuthorizations] + Marker: Optional[String] + + +class Event(TypedDict, total=False): + SourceIdentifier: Optional[String] + SourceType: Optional[SourceType] + Message: Optional[String] + EventCategories: Optional[EventCategoriesList] + Severity: Optional[String] + Date: Optional[TStamp] + EventId: Optional[String] + + +class EventInfoMap(TypedDict, total=False): + EventId: Optional[String] + EventCategories: Optional[EventCategoriesList] + EventDescription: Optional[String] + Severity: Optional[String] + + +EventInfoMapList = List[EventInfoMap] + + +class EventCategoriesMap(TypedDict, total=False): + SourceType: Optional[String] + Events: Optional[EventInfoMapList] + + +EventCategoriesMapList = List[EventCategoriesMap] + + +class EventCategoriesMessage(TypedDict, total=False): + EventCategoriesMapList: Optional[EventCategoriesMapList] + + +EventList = List[Event] +EventSubscriptionsList = List[EventSubscription] + + +class EventSubscriptionsMessage(TypedDict, total=False): + Marker: Optional[String] + EventSubscriptionsList: Optional[EventSubscriptionsList] + + +class EventsMessage(TypedDict, total=False): + Marker: Optional[String] + Events: Optional[EventList] + + +class FailoverPrimaryComputeInputMessage(ServiceRequest): + ClusterIdentifier: String + + +class FailoverPrimaryComputeResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class GetClusterCredentialsMessage(ServiceRequest): + DbUser: String + DbName: Optional[String] + ClusterIdentifier: Optional[String] + DurationSeconds: Optional[IntegerOptional] + AutoCreate: Optional[BooleanOptional] + DbGroups: Optional[DbGroupList] + CustomDomainName: Optional[String] + + +class GetClusterCredentialsWithIAMMessage(ServiceRequest): + DbName: Optional[String] + ClusterIdentifier: Optional[String] + DurationSeconds: Optional[IntegerOptional] + CustomDomainName: Optional[String] + + +class GetReservedNodeExchangeConfigurationOptionsInputMessage(ServiceRequest): + ActionType: ReservedNodeExchangeActionType + ClusterIdentifier: Optional[String] + SnapshotIdentifier: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class ReservedNodeOffering(TypedDict, total=False): + ReservedNodeOfferingId: Optional[String] + NodeType: Optional[String] + Duration: Optional[Integer] + FixedPrice: Optional[Double] + UsagePrice: Optional[Double] + CurrencyCode: Optional[String] + OfferingType: Optional[String] + RecurringCharges: Optional[RecurringChargeList] + ReservedNodeOfferingType: Optional[ReservedNodeOfferingType] + + +class ReservedNodeConfigurationOption(TypedDict, total=False): + SourceReservedNode: Optional[ReservedNode] + TargetReservedNodeCount: Optional[Integer] + TargetReservedNodeOffering: Optional[ReservedNodeOffering] + + +ReservedNodeConfigurationOptionList = List[ReservedNodeConfigurationOption] + + +class GetReservedNodeExchangeConfigurationOptionsOutputMessage(TypedDict, total=False): + Marker: Optional[String] + ReservedNodeConfigurationOptionList: Optional[ReservedNodeConfigurationOptionList] + + +class GetReservedNodeExchangeOfferingsInputMessage(ServiceRequest): + ReservedNodeId: String + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +ReservedNodeOfferingList = List[ReservedNodeOffering] + + +class GetReservedNodeExchangeOfferingsOutputMessage(TypedDict, total=False): + Marker: Optional[String] + ReservedNodeOfferings: Optional[ReservedNodeOfferingList] + + +class GetResourcePolicyMessage(ServiceRequest): + ResourceArn: String + + +class ResourcePolicy(TypedDict, total=False): + ResourceArn: Optional[String] + Policy: Optional[String] + + +class GetResourcePolicyResult(TypedDict, total=False): + ResourcePolicy: Optional[ResourcePolicy] + + +HsmClientCertificateList = List[HsmClientCertificate] + + +class HsmClientCertificateMessage(TypedDict, total=False): + Marker: Optional[String] + HsmClientCertificates: Optional[HsmClientCertificateList] + + +HsmConfigurationList = List[HsmConfiguration] + + +class HsmConfigurationMessage(TypedDict, total=False): + Marker: Optional[String] + HsmConfigurations: Optional[HsmConfigurationList] + + +ImportTablesCompleted = List[String] +ImportTablesInProgress = List[String] +ImportTablesNotStarted = List[String] + + +class IntegrationError(TypedDict, total=False): + ErrorCode: String + ErrorMessage: Optional[String] + + +IntegrationErrorList = List[IntegrationError] + + +class InboundIntegration(TypedDict, total=False): + IntegrationArn: Optional[InboundIntegrationArn] + SourceArn: Optional[String] + TargetArn: Optional[TargetArn] + Status: Optional[ZeroETLIntegrationStatus] + Errors: Optional[IntegrationErrorList] + CreateTime: Optional[TStamp] + + +InboundIntegrationList = List[InboundIntegration] + + +class InboundIntegrationsMessage(TypedDict, total=False): + Marker: Optional[String] + InboundIntegrations: Optional[InboundIntegrationList] + + +class Integration(TypedDict, total=False): + IntegrationArn: Optional[IntegrationArn] + IntegrationName: Optional[IntegrationName] + SourceArn: Optional[SourceArn] + TargetArn: Optional[TargetArn] + Status: Optional[ZeroETLIntegrationStatus] + Errors: Optional[IntegrationErrorList] + CreateTime: Optional[TStamp] + Description: Optional[Description] + KMSKeyId: Optional[String] + AdditionalEncryptionContext: Optional[EncryptionContextMap] + Tags: Optional[TagList] + + +IntegrationList = List[Integration] + + +class IntegrationsMessage(TypedDict, total=False): + Marker: Optional[String] + Integrations: Optional[IntegrationList] + + +class ListRecommendationsMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + NamespaceArn: Optional[String] + MaxRecords: Optional[IntegerOptional] + Marker: Optional[String] + + +class ReferenceLink(TypedDict, total=False): + Text: Optional[String] + Link: Optional[String] + + +ReferenceLinkList = List[ReferenceLink] + + +class RecommendedAction(TypedDict, total=False): + Text: Optional[String] + Database: Optional[String] + Command: Optional[String] + Type: Optional[RecommendedActionType] + + +RecommendedActionList = List[RecommendedAction] + + +class Recommendation(TypedDict, total=False): + Id: Optional[String] + ClusterIdentifier: Optional[String] + NamespaceArn: Optional[String] + CreatedAt: Optional[TStamp] + RecommendationType: Optional[String] + Title: Optional[String] + Description: Optional[String] + Observation: Optional[String] + ImpactRanking: Optional[ImpactRankingType] + RecommendationText: Optional[String] + RecommendedActions: Optional[RecommendedActionList] + ReferenceLinks: Optional[ReferenceLinkList] + + +RecommendationList = List[Recommendation] + + +class ListRecommendationsResult(TypedDict, total=False): + Recommendations: Optional[RecommendationList] + Marker: Optional[String] + + +class LoggingStatus(TypedDict, total=False): + LoggingEnabled: Optional[Boolean] + BucketName: Optional[String] + S3KeyPrefix: Optional[S3KeyPrefixValue] + LastSuccessfulDeliveryTime: Optional[TStamp] + LastFailureTime: Optional[TStamp] + LastFailureMessage: Optional[String] + LogDestinationType: Optional[LogDestinationType] + LogExports: Optional[LogTypeList] + + +class MaintenanceTrack(TypedDict, total=False): + MaintenanceTrackName: Optional[String] + DatabaseVersion: Optional[String] + UpdateTargets: Optional[EligibleTracksToUpdateList] + + +class ModifyAquaInputMessage(ServiceRequest): + ClusterIdentifier: String + AquaConfigurationStatus: Optional[AquaConfigurationStatus] + + +class ModifyAquaOutputMessage(TypedDict, total=False): + AquaConfiguration: Optional[AquaConfiguration] + + +class ModifyAuthenticationProfileMessage(ServiceRequest): + AuthenticationProfileName: AuthenticationProfileNameString + AuthenticationProfileContent: String + + +class ModifyAuthenticationProfileResult(TypedDict, total=False): + AuthenticationProfileName: Optional[AuthenticationProfileNameString] + AuthenticationProfileContent: Optional[String] + + +class ModifyClusterDbRevisionMessage(ServiceRequest): + ClusterIdentifier: String + RevisionTarget: String + + +class ModifyClusterDbRevisionResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ModifyClusterIamRolesMessage(ServiceRequest): + ClusterIdentifier: String + AddIamRoles: Optional[IamRoleArnList] + RemoveIamRoles: Optional[IamRoleArnList] + DefaultIamRoleArn: Optional[String] + + +class ModifyClusterIamRolesResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ModifyClusterMaintenanceMessage(ServiceRequest): + ClusterIdentifier: String + DeferMaintenance: Optional[BooleanOptional] + DeferMaintenanceIdentifier: Optional[String] + DeferMaintenanceStartTime: Optional[TStamp] + DeferMaintenanceEndTime: Optional[TStamp] + DeferMaintenanceDuration: Optional[IntegerOptional] + + +class ModifyClusterMaintenanceResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ModifyClusterMessage(ServiceRequest): + ClusterIdentifier: String + ClusterType: Optional[String] + NodeType: Optional[String] + NumberOfNodes: Optional[IntegerOptional] + ClusterSecurityGroups: Optional[ClusterSecurityGroupNameList] + VpcSecurityGroupIds: Optional[VpcSecurityGroupIdList] + MasterUserPassword: Optional[SensitiveString] + ClusterParameterGroupName: Optional[String] + AutomatedSnapshotRetentionPeriod: Optional[IntegerOptional] + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + PreferredMaintenanceWindow: Optional[String] + ClusterVersion: Optional[String] + AllowVersionUpgrade: Optional[BooleanOptional] + HsmClientCertificateIdentifier: Optional[String] + HsmConfigurationIdentifier: Optional[String] + NewClusterIdentifier: Optional[String] + PubliclyAccessible: Optional[BooleanOptional] + ElasticIp: Optional[String] + EnhancedVpcRouting: Optional[BooleanOptional] + MaintenanceTrackName: Optional[String] + Encrypted: Optional[BooleanOptional] + KmsKeyId: Optional[String] + AvailabilityZoneRelocation: Optional[BooleanOptional] + AvailabilityZone: Optional[String] + Port: Optional[IntegerOptional] + ManageMasterPassword: Optional[BooleanOptional] + MasterPasswordSecretKmsKeyId: Optional[String] + IpAddressType: Optional[String] + MultiAZ: Optional[BooleanOptional] + + +class ModifyClusterParameterGroupMessage(ServiceRequest): + ParameterGroupName: String + Parameters: ParametersList + + +class ModifyClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ModifyClusterSnapshotMessage(ServiceRequest): + SnapshotIdentifier: String + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + Force: Optional[Boolean] + + +class ModifyClusterSnapshotResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +class ModifyClusterSnapshotScheduleMessage(ServiceRequest): + ClusterIdentifier: String + ScheduleIdentifier: Optional[String] + DisassociateSchedule: Optional[BooleanOptional] + + +class ModifyClusterSubnetGroupMessage(ServiceRequest): + ClusterSubnetGroupName: String + Description: Optional[String] + SubnetIds: SubnetIdentifierList + + +class ModifyClusterSubnetGroupResult(TypedDict, total=False): + ClusterSubnetGroup: Optional[ClusterSubnetGroup] + + +class ModifyCustomDomainAssociationMessage(ServiceRequest): + CustomDomainName: CustomDomainNameString + CustomDomainCertificateArn: CustomDomainCertificateArnString + ClusterIdentifier: String + + +class ModifyCustomDomainAssociationResult(TypedDict, total=False): + CustomDomainName: Optional[CustomDomainNameString] + CustomDomainCertificateArn: Optional[CustomDomainCertificateArnString] + ClusterIdentifier: Optional[String] + CustomDomainCertExpiryTime: Optional[String] + + +class ModifyEndpointAccessMessage(ServiceRequest): + EndpointName: String + VpcSecurityGroupIds: Optional[VpcSecurityGroupIdList] + + +class ModifyEventSubscriptionMessage(ServiceRequest): + SubscriptionName: String + SnsTopicArn: Optional[String] + SourceType: Optional[String] + SourceIds: Optional[SourceIdsList] + EventCategories: Optional[EventCategoriesList] + Severity: Optional[String] + Enabled: Optional[BooleanOptional] + + +class ModifyEventSubscriptionResult(TypedDict, total=False): + EventSubscription: Optional[EventSubscription] + + +class ModifyIntegrationMessage(ServiceRequest): + IntegrationArn: IntegrationArn + Description: Optional[IntegrationDescription] + IntegrationName: Optional[IntegrationName] + + +class ModifyRedshiftIdcApplicationMessage(ServiceRequest): + RedshiftIdcApplicationArn: String + IdentityNamespace: Optional[IdentityNamespaceString] + IamRoleArn: Optional[String] + IdcDisplayName: Optional[IdcDisplayNameString] + AuthorizedTokenIssuerList: Optional[AuthorizedTokenIssuerList] + ServiceIntegrations: Optional[ServiceIntegrationList] + + +class ModifyRedshiftIdcApplicationResult(TypedDict, total=False): + RedshiftIdcApplication: Optional[RedshiftIdcApplication] + + +class ModifyScheduledActionMessage(ServiceRequest): + ScheduledActionName: String + TargetAction: Optional[ScheduledActionType] + Schedule: Optional[String] + IamRole: Optional[String] + ScheduledActionDescription: Optional[String] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + Enable: Optional[BooleanOptional] + + +class ModifySnapshotCopyRetentionPeriodMessage(ServiceRequest): + ClusterIdentifier: String + RetentionPeriod: Integer + Manual: Optional[Boolean] + + +class ModifySnapshotCopyRetentionPeriodResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ModifySnapshotScheduleMessage(ServiceRequest): + ScheduleIdentifier: String + ScheduleDefinitions: ScheduleDefinitionList + + +class ModifyUsageLimitMessage(ServiceRequest): + UsageLimitId: String + Amount: Optional[LongOptional] + BreachAction: Optional[UsageLimitBreachAction] + + +class NodeConfigurationOption(TypedDict, total=False): + NodeType: Optional[String] + NumberOfNodes: Optional[Integer] + EstimatedDiskUtilizationPercent: Optional[DoubleOptional] + Mode: Optional[Mode] + + +NodeConfigurationOptionList = List[NodeConfigurationOption] + + +class NodeConfigurationOptionsMessage(TypedDict, total=False): + NodeConfigurationOptionList: Optional[NodeConfigurationOptionList] + Marker: Optional[String] + + +class OrderableClusterOption(TypedDict, total=False): + ClusterVersion: Optional[String] + ClusterType: Optional[String] + NodeType: Optional[String] + AvailabilityZones: Optional[AvailabilityZoneList] + + +OrderableClusterOptionsList = List[OrderableClusterOption] + + +class OrderableClusterOptionsMessage(TypedDict, total=False): + OrderableClusterOptions: Optional[OrderableClusterOptionsList] + Marker: Optional[String] + + +class PartnerIntegrationInputMessage(ServiceRequest): + AccountId: PartnerIntegrationAccountId + ClusterIdentifier: PartnerIntegrationClusterIdentifier + DatabaseName: PartnerIntegrationDatabaseName + PartnerName: PartnerIntegrationPartnerName + + +class PartnerIntegrationOutputMessage(TypedDict, total=False): + DatabaseName: Optional[PartnerIntegrationDatabaseName] + PartnerName: Optional[PartnerIntegrationPartnerName] + + +class PauseClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class PurchaseReservedNodeOfferingMessage(ServiceRequest): + ReservedNodeOfferingId: String + NodeCount: Optional[IntegerOptional] + + +class PurchaseReservedNodeOfferingResult(TypedDict, total=False): + ReservedNode: Optional[ReservedNode] + + +class PutResourcePolicyMessage(ServiceRequest): + ResourceArn: String + Policy: String + + +class PutResourcePolicyResult(TypedDict, total=False): + ResourcePolicy: Optional[ResourcePolicy] + + +class RebootClusterMessage(ServiceRequest): + ClusterIdentifier: String + + +class RebootClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class RegisterNamespaceInputMessage(ServiceRequest): + NamespaceIdentifier: NamespaceIdentifierUnion + ConsumerIdentifiers: ConsumerIdentifierList + + +class RegisterNamespaceOutputMessage(TypedDict, total=False): + Status: Optional[NamespaceRegistrationStatus] + + +class RejectDataShareMessage(ServiceRequest): + DataShareArn: String + + +ReservedNodeList = List[ReservedNode] + + +class ReservedNodeOfferingsMessage(TypedDict, total=False): + Marker: Optional[String] + ReservedNodeOfferings: Optional[ReservedNodeOfferingList] + + +class ReservedNodesMessage(TypedDict, total=False): + Marker: Optional[String] + ReservedNodes: Optional[ReservedNodeList] + + +class ResetClusterParameterGroupMessage(ServiceRequest): + ParameterGroupName: String + ResetAllParameters: Optional[Boolean] + Parameters: Optional[ParametersList] + + +class ResizeClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class ResizeProgressMessage(TypedDict, total=False): + TargetNodeType: Optional[String] + TargetNumberOfNodes: Optional[IntegerOptional] + TargetClusterType: Optional[String] + Status: Optional[String] + ImportTablesCompleted: Optional[ImportTablesCompleted] + ImportTablesInProgress: Optional[ImportTablesInProgress] + ImportTablesNotStarted: Optional[ImportTablesNotStarted] + AvgResizeRateInMegaBytesPerSecond: Optional[DoubleOptional] + TotalResizeDataInMegaBytes: Optional[LongOptional] + ProgressInMegaBytes: Optional[LongOptional] + ElapsedTimeInSeconds: Optional[LongOptional] + EstimatedTimeToCompletionInSeconds: Optional[LongOptional] + ResizeType: Optional[String] + Message: Optional[String] + TargetEncryptionType: Optional[String] + DataTransferProgressPercent: Optional[DoubleOptional] + + +class RestoreFromClusterSnapshotMessage(ServiceRequest): + ClusterIdentifier: String + SnapshotIdentifier: Optional[String] + SnapshotArn: Optional[String] + SnapshotClusterIdentifier: Optional[String] + Port: Optional[IntegerOptional] + AvailabilityZone: Optional[String] + AllowVersionUpgrade: Optional[BooleanOptional] + ClusterSubnetGroupName: Optional[String] + PubliclyAccessible: Optional[BooleanOptional] + OwnerAccount: Optional[String] + HsmClientCertificateIdentifier: Optional[String] + HsmConfigurationIdentifier: Optional[String] + ElasticIp: Optional[String] + ClusterParameterGroupName: Optional[String] + ClusterSecurityGroups: Optional[ClusterSecurityGroupNameList] + VpcSecurityGroupIds: Optional[VpcSecurityGroupIdList] + PreferredMaintenanceWindow: Optional[String] + AutomatedSnapshotRetentionPeriod: Optional[IntegerOptional] + ManualSnapshotRetentionPeriod: Optional[IntegerOptional] + KmsKeyId: Optional[String] + NodeType: Optional[String] + EnhancedVpcRouting: Optional[BooleanOptional] + AdditionalInfo: Optional[String] + IamRoles: Optional[IamRoleArnList] + MaintenanceTrackName: Optional[String] + SnapshotScheduleIdentifier: Optional[String] + NumberOfNodes: Optional[IntegerOptional] + AvailabilityZoneRelocation: Optional[BooleanOptional] + AquaConfigurationStatus: Optional[AquaConfigurationStatus] + DefaultIamRoleArn: Optional[String] + ReservedNodeId: Optional[String] + TargetReservedNodeOfferingId: Optional[String] + Encrypted: Optional[BooleanOptional] + ManageMasterPassword: Optional[BooleanOptional] + MasterPasswordSecretKmsKeyId: Optional[String] + IpAddressType: Optional[String] + MultiAZ: Optional[BooleanOptional] + + +class RestoreFromClusterSnapshotResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class RestoreTableFromClusterSnapshotMessage(ServiceRequest): + ClusterIdentifier: String + SnapshotIdentifier: String + SourceDatabaseName: String + SourceSchemaName: Optional[String] + SourceTableName: String + TargetDatabaseName: Optional[String] + TargetSchemaName: Optional[String] + NewTableName: String + EnableCaseSensitiveIdentifier: Optional[BooleanOptional] + + +class TableRestoreStatus(TypedDict, total=False): + TableRestoreRequestId: Optional[String] + Status: Optional[TableRestoreStatusType] + Message: Optional[String] + RequestTime: Optional[TStamp] + ProgressInMegaBytes: Optional[LongOptional] + TotalDataInMegaBytes: Optional[LongOptional] + ClusterIdentifier: Optional[String] + SnapshotIdentifier: Optional[String] + SourceDatabaseName: Optional[String] + SourceSchemaName: Optional[String] + SourceTableName: Optional[String] + TargetDatabaseName: Optional[String] + TargetSchemaName: Optional[String] + NewTableName: Optional[String] + + +class RestoreTableFromClusterSnapshotResult(TypedDict, total=False): + TableRestoreStatus: Optional[TableRestoreStatus] + + +class ResumeClusterResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +class RevokeClusterSecurityGroupIngressMessage(ServiceRequest): + ClusterSecurityGroupName: String + CIDRIP: Optional[String] + EC2SecurityGroupName: Optional[String] + EC2SecurityGroupOwnerId: Optional[String] + + +class RevokeClusterSecurityGroupIngressResult(TypedDict, total=False): + ClusterSecurityGroup: Optional[ClusterSecurityGroup] + + +class RevokeEndpointAccessMessage(ServiceRequest): + ClusterIdentifier: Optional[String] + Account: Optional[String] + VpcIds: Optional[VpcIdentifierList] + Force: Optional[Boolean] + + +class RevokeSnapshotAccessMessage(ServiceRequest): + SnapshotIdentifier: Optional[String] + SnapshotArn: Optional[String] + SnapshotClusterIdentifier: Optional[String] + AccountWithRestoreAccess: String + + +class RevokeSnapshotAccessResult(TypedDict, total=False): + Snapshot: Optional[Snapshot] + + +class RotateEncryptionKeyMessage(ServiceRequest): + ClusterIdentifier: String + + +class RotateEncryptionKeyResult(TypedDict, total=False): + Cluster: Optional[Cluster] + + +ScheduledActionTimeList = List[TStamp] + + +class ScheduledAction(TypedDict, total=False): + ScheduledActionName: Optional[String] + TargetAction: Optional[ScheduledActionType] + Schedule: Optional[String] + IamRole: Optional[String] + ScheduledActionDescription: Optional[String] + State: Optional[ScheduledActionState] + NextInvocations: Optional[ScheduledActionTimeList] + StartTime: Optional[TStamp] + EndTime: Optional[TStamp] + + +ScheduledActionList = List[ScheduledAction] + + +class ScheduledActionsMessage(TypedDict, total=False): + Marker: Optional[String] + ScheduledActions: Optional[ScheduledActionList] + + +SnapshotCopyGrantList = List[SnapshotCopyGrant] + + +class SnapshotCopyGrantMessage(TypedDict, total=False): + Marker: Optional[String] + SnapshotCopyGrants: Optional[SnapshotCopyGrantList] + + +SnapshotList = List[Snapshot] + + +class SnapshotMessage(TypedDict, total=False): + Marker: Optional[String] + Snapshots: Optional[SnapshotList] + + +TableRestoreStatusList = List[TableRestoreStatus] + + +class TableRestoreStatusMessage(TypedDict, total=False): + TableRestoreStatusDetails: Optional[TableRestoreStatusList] + Marker: Optional[String] + + +class TaggedResource(TypedDict, total=False): + Tag: Optional[Tag] + ResourceName: Optional[String] + ResourceType: Optional[String] + + +TaggedResourceList = List[TaggedResource] + + +class TaggedResourceListMessage(TypedDict, total=False): + TaggedResources: Optional[TaggedResourceList] + Marker: Optional[String] + + +TrackList = List[MaintenanceTrack] + + +class TrackListMessage(TypedDict, total=False): + MaintenanceTracks: Optional[TrackList] + Marker: Optional[String] + + +class UpdatePartnerStatusInputMessage(ServiceRequest): + AccountId: PartnerIntegrationAccountId + ClusterIdentifier: PartnerIntegrationClusterIdentifier + DatabaseName: PartnerIntegrationDatabaseName + PartnerName: PartnerIntegrationPartnerName + Status: PartnerIntegrationStatus + StatusMessage: Optional[PartnerIntegrationStatusMessage] + + +class UsageLimit(TypedDict, total=False): + UsageLimitId: Optional[String] + ClusterIdentifier: Optional[String] + FeatureType: Optional[UsageLimitFeatureType] + LimitType: Optional[UsageLimitLimitType] + Amount: Optional[Long] + Period: Optional[UsageLimitPeriod] + BreachAction: Optional[UsageLimitBreachAction] + Tags: Optional[TagList] + + +UsageLimits = List[UsageLimit] + + +class UsageLimitList(TypedDict, total=False): + UsageLimits: Optional[UsageLimits] + Marker: Optional[String] + + +class RedshiftApi: + service = "redshift" + version = "2012-12-01" + + @handler("AcceptReservedNodeExchange") + def accept_reserved_node_exchange( + self, + context: RequestContext, + reserved_node_id: String, + target_reserved_node_offering_id: String, + **kwargs, + ) -> AcceptReservedNodeExchangeOutputMessage: + raise NotImplementedError + + @handler("AddPartner") + def add_partner( + self, + context: RequestContext, + account_id: PartnerIntegrationAccountId, + cluster_identifier: PartnerIntegrationClusterIdentifier, + database_name: PartnerIntegrationDatabaseName, + partner_name: PartnerIntegrationPartnerName, + **kwargs, + ) -> PartnerIntegrationOutputMessage: + raise NotImplementedError + + @handler("AssociateDataShareConsumer") + def associate_data_share_consumer( + self, + context: RequestContext, + data_share_arn: String, + associate_entire_account: BooleanOptional | None = None, + consumer_arn: String | None = None, + consumer_region: String | None = None, + allow_writes: BooleanOptional | None = None, + **kwargs, + ) -> DataShare: + raise NotImplementedError + + @handler("AuthorizeClusterSecurityGroupIngress") + def authorize_cluster_security_group_ingress( + self, + context: RequestContext, + cluster_security_group_name: String, + cidrip: String | None = None, + ec2_security_group_name: String | None = None, + ec2_security_group_owner_id: String | None = None, + **kwargs, + ) -> AuthorizeClusterSecurityGroupIngressResult: + raise NotImplementedError + + @handler("AuthorizeDataShare") + def authorize_data_share( + self, + context: RequestContext, + data_share_arn: String, + consumer_identifier: String, + allow_writes: BooleanOptional | None = None, + **kwargs, + ) -> DataShare: + raise NotImplementedError + + @handler("AuthorizeEndpointAccess") + def authorize_endpoint_access( + self, + context: RequestContext, + account: String, + cluster_identifier: String | None = None, + vpc_ids: VpcIdentifierList | None = None, + **kwargs, + ) -> EndpointAuthorization: + raise NotImplementedError + + @handler("AuthorizeSnapshotAccess") + def authorize_snapshot_access( + self, + context: RequestContext, + account_with_restore_access: String, + snapshot_identifier: String | None = None, + snapshot_arn: String | None = None, + snapshot_cluster_identifier: String | None = None, + **kwargs, + ) -> AuthorizeSnapshotAccessResult: + raise NotImplementedError + + @handler("BatchDeleteClusterSnapshots") + def batch_delete_cluster_snapshots( + self, context: RequestContext, identifiers: DeleteClusterSnapshotMessageList, **kwargs + ) -> BatchDeleteClusterSnapshotsResult: + raise NotImplementedError + + @handler("BatchModifyClusterSnapshots") + def batch_modify_cluster_snapshots( + self, + context: RequestContext, + snapshot_identifier_list: SnapshotIdentifierList, + manual_snapshot_retention_period: IntegerOptional | None = None, + force: Boolean | None = None, + **kwargs, + ) -> BatchModifyClusterSnapshotsOutputMessage: + raise NotImplementedError + + @handler("CancelResize") + def cancel_resize( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> ResizeProgressMessage: + raise NotImplementedError + + @handler("CopyClusterSnapshot") + def copy_cluster_snapshot( + self, + context: RequestContext, + source_snapshot_identifier: String, + target_snapshot_identifier: String, + source_snapshot_cluster_identifier: String | None = None, + manual_snapshot_retention_period: IntegerOptional | None = None, + **kwargs, + ) -> CopyClusterSnapshotResult: + raise NotImplementedError + + @handler("CreateAuthenticationProfile") + def create_authentication_profile( + self, + context: RequestContext, + authentication_profile_name: AuthenticationProfileNameString, + authentication_profile_content: String, + **kwargs, + ) -> CreateAuthenticationProfileResult: + raise NotImplementedError + + @handler("CreateCluster") + def create_cluster( + self, + context: RequestContext, + cluster_identifier: String, + node_type: String, + master_username: String, + db_name: String | None = None, + cluster_type: String | None = None, + master_user_password: SensitiveString | None = None, + cluster_security_groups: ClusterSecurityGroupNameList | None = None, + vpc_security_group_ids: VpcSecurityGroupIdList | None = None, + cluster_subnet_group_name: String | None = None, + availability_zone: String | None = None, + preferred_maintenance_window: String | None = None, + cluster_parameter_group_name: String | None = None, + automated_snapshot_retention_period: IntegerOptional | None = None, + manual_snapshot_retention_period: IntegerOptional | None = None, + port: IntegerOptional | None = None, + cluster_version: String | None = None, + allow_version_upgrade: BooleanOptional | None = None, + number_of_nodes: IntegerOptional | None = None, + publicly_accessible: BooleanOptional | None = None, + encrypted: BooleanOptional | None = None, + hsm_client_certificate_identifier: String | None = None, + hsm_configuration_identifier: String | None = None, + elastic_ip: String | None = None, + tags: TagList | None = None, + kms_key_id: String | None = None, + enhanced_vpc_routing: BooleanOptional | None = None, + additional_info: String | None = None, + iam_roles: IamRoleArnList | None = None, + maintenance_track_name: String | None = None, + snapshot_schedule_identifier: String | None = None, + availability_zone_relocation: BooleanOptional | None = None, + aqua_configuration_status: AquaConfigurationStatus | None = None, + default_iam_role_arn: String | None = None, + load_sample_data: String | None = None, + manage_master_password: BooleanOptional | None = None, + master_password_secret_kms_key_id: String | None = None, + ip_address_type: String | None = None, + multi_az: BooleanOptional | None = None, + redshift_idc_application_arn: String | None = None, + **kwargs, + ) -> CreateClusterResult: + raise NotImplementedError + + @handler("CreateClusterParameterGroup") + def create_cluster_parameter_group( + self, + context: RequestContext, + parameter_group_name: String, + parameter_group_family: String, + description: String, + tags: TagList | None = None, + **kwargs, + ) -> CreateClusterParameterGroupResult: + raise NotImplementedError + + @handler("CreateClusterSecurityGroup") + def create_cluster_security_group( + self, + context: RequestContext, + cluster_security_group_name: String, + description: String, + tags: TagList | None = None, + **kwargs, + ) -> CreateClusterSecurityGroupResult: + raise NotImplementedError + + @handler("CreateClusterSnapshot") + def create_cluster_snapshot( + self, + context: RequestContext, + snapshot_identifier: String, + cluster_identifier: String, + manual_snapshot_retention_period: IntegerOptional | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateClusterSnapshotResult: + raise NotImplementedError + + @handler("CreateClusterSubnetGroup") + def create_cluster_subnet_group( + self, + context: RequestContext, + cluster_subnet_group_name: String, + description: String, + subnet_ids: SubnetIdentifierList, + tags: TagList | None = None, + **kwargs, + ) -> CreateClusterSubnetGroupResult: + raise NotImplementedError + + @handler("CreateCustomDomainAssociation") + def create_custom_domain_association( + self, + context: RequestContext, + custom_domain_name: CustomDomainNameString, + custom_domain_certificate_arn: CustomDomainCertificateArnString, + cluster_identifier: String, + **kwargs, + ) -> CreateCustomDomainAssociationResult: + raise NotImplementedError + + @handler("CreateEndpointAccess") + def create_endpoint_access( + self, + context: RequestContext, + endpoint_name: String, + subnet_group_name: String, + cluster_identifier: String | None = None, + resource_owner: String | None = None, + vpc_security_group_ids: VpcSecurityGroupIdList | None = None, + **kwargs, + ) -> EndpointAccess: + raise NotImplementedError + + @handler("CreateEventSubscription") + def create_event_subscription( + self, + context: RequestContext, + subscription_name: String, + sns_topic_arn: String, + source_type: String | None = None, + source_ids: SourceIdsList | None = None, + event_categories: EventCategoriesList | None = None, + severity: String | None = None, + enabled: BooleanOptional | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateEventSubscriptionResult: + raise NotImplementedError + + @handler("CreateHsmClientCertificate") + def create_hsm_client_certificate( + self, + context: RequestContext, + hsm_client_certificate_identifier: String, + tags: TagList | None = None, + **kwargs, + ) -> CreateHsmClientCertificateResult: + raise NotImplementedError + + @handler("CreateHsmConfiguration") + def create_hsm_configuration( + self, + context: RequestContext, + hsm_configuration_identifier: String, + description: String, + hsm_ip_address: String, + hsm_partition_name: String, + hsm_partition_password: String, + hsm_server_public_certificate: String, + tags: TagList | None = None, + **kwargs, + ) -> CreateHsmConfigurationResult: + raise NotImplementedError + + @handler("CreateIntegration") + def create_integration( + self, + context: RequestContext, + source_arn: SourceArn, + target_arn: TargetArn, + integration_name: IntegrationName, + kms_key_id: String | None = None, + tag_list: TagList | None = None, + additional_encryption_context: EncryptionContextMap | None = None, + description: IntegrationDescription | None = None, + **kwargs, + ) -> Integration: + raise NotImplementedError + + @handler("CreateRedshiftIdcApplication") + def create_redshift_idc_application( + self, + context: RequestContext, + idc_instance_arn: String, + redshift_idc_application_name: RedshiftIdcApplicationName, + idc_display_name: IdcDisplayNameString, + iam_role_arn: String, + identity_namespace: IdentityNamespaceString | None = None, + authorized_token_issuer_list: AuthorizedTokenIssuerList | None = None, + service_integrations: ServiceIntegrationList | None = None, + **kwargs, + ) -> CreateRedshiftIdcApplicationResult: + raise NotImplementedError + + @handler("CreateScheduledAction") + def create_scheduled_action( + self, + context: RequestContext, + scheduled_action_name: String, + target_action: ScheduledActionType, + schedule: String, + iam_role: String, + scheduled_action_description: String | None = None, + start_time: TStamp | None = None, + end_time: TStamp | None = None, + enable: BooleanOptional | None = None, + **kwargs, + ) -> ScheduledAction: + raise NotImplementedError + + @handler("CreateSnapshotCopyGrant") + def create_snapshot_copy_grant( + self, + context: RequestContext, + snapshot_copy_grant_name: String, + kms_key_id: String | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateSnapshotCopyGrantResult: + raise NotImplementedError + + @handler("CreateSnapshotSchedule") + def create_snapshot_schedule( + self, + context: RequestContext, + schedule_definitions: ScheduleDefinitionList | None = None, + schedule_identifier: String | None = None, + schedule_description: String | None = None, + tags: TagList | None = None, + dry_run: BooleanOptional | None = None, + next_invocations: IntegerOptional | None = None, + **kwargs, + ) -> SnapshotSchedule: + raise NotImplementedError + + @handler("CreateTags") + def create_tags( + self, context: RequestContext, resource_name: String, tags: TagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CreateUsageLimit") + def create_usage_limit( + self, + context: RequestContext, + cluster_identifier: String, + feature_type: UsageLimitFeatureType, + limit_type: UsageLimitLimitType, + amount: Long, + period: UsageLimitPeriod | None = None, + breach_action: UsageLimitBreachAction | None = None, + tags: TagList | None = None, + **kwargs, + ) -> UsageLimit: + raise NotImplementedError + + @handler("DeauthorizeDataShare") + def deauthorize_data_share( + self, context: RequestContext, data_share_arn: String, consumer_identifier: String, **kwargs + ) -> DataShare: + raise NotImplementedError + + @handler("DeleteAuthenticationProfile") + def delete_authentication_profile( + self, + context: RequestContext, + authentication_profile_name: AuthenticationProfileNameString, + **kwargs, + ) -> DeleteAuthenticationProfileResult: + raise NotImplementedError + + @handler("DeleteCluster") + def delete_cluster( + self, + context: RequestContext, + cluster_identifier: String, + skip_final_cluster_snapshot: Boolean | None = None, + final_cluster_snapshot_identifier: String | None = None, + final_cluster_snapshot_retention_period: IntegerOptional | None = None, + **kwargs, + ) -> DeleteClusterResult: + raise NotImplementedError + + @handler("DeleteClusterParameterGroup") + def delete_cluster_parameter_group( + self, context: RequestContext, parameter_group_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteClusterSecurityGroup") + def delete_cluster_security_group( + self, context: RequestContext, cluster_security_group_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteClusterSnapshot") + def delete_cluster_snapshot( + self, + context: RequestContext, + snapshot_identifier: String, + snapshot_cluster_identifier: String | None = None, + **kwargs, + ) -> DeleteClusterSnapshotResult: + raise NotImplementedError + + @handler("DeleteClusterSubnetGroup") + def delete_cluster_subnet_group( + self, context: RequestContext, cluster_subnet_group_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteCustomDomainAssociation") + def delete_custom_domain_association( + self, + context: RequestContext, + cluster_identifier: String, + custom_domain_name: CustomDomainNameString, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteEndpointAccess") + def delete_endpoint_access( + self, context: RequestContext, endpoint_name: String, **kwargs + ) -> EndpointAccess: + raise NotImplementedError + + @handler("DeleteEventSubscription") + def delete_event_subscription( + self, context: RequestContext, subscription_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteHsmClientCertificate") + def delete_hsm_client_certificate( + self, context: RequestContext, hsm_client_certificate_identifier: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteHsmConfiguration") + def delete_hsm_configuration( + self, context: RequestContext, hsm_configuration_identifier: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteIntegration") + def delete_integration( + self, context: RequestContext, integration_arn: IntegrationArn, **kwargs + ) -> Integration: + raise NotImplementedError + + @handler("DeletePartner") + def delete_partner( + self, + context: RequestContext, + account_id: PartnerIntegrationAccountId, + cluster_identifier: PartnerIntegrationClusterIdentifier, + database_name: PartnerIntegrationDatabaseName, + partner_name: PartnerIntegrationPartnerName, + **kwargs, + ) -> PartnerIntegrationOutputMessage: + raise NotImplementedError + + @handler("DeleteRedshiftIdcApplication") + def delete_redshift_idc_application( + self, context: RequestContext, redshift_idc_application_arn: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, context: RequestContext, resource_arn: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteScheduledAction") + def delete_scheduled_action( + self, context: RequestContext, scheduled_action_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSnapshotCopyGrant") + def delete_snapshot_copy_grant( + self, context: RequestContext, snapshot_copy_grant_name: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSnapshotSchedule") + def delete_snapshot_schedule( + self, context: RequestContext, schedule_identifier: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteTags") + def delete_tags( + self, context: RequestContext, resource_name: String, tag_keys: TagKeyList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteUsageLimit") + def delete_usage_limit(self, context: RequestContext, usage_limit_id: String, **kwargs) -> None: + raise NotImplementedError + + @handler("DeregisterNamespace") + def deregister_namespace( + self, + context: RequestContext, + namespace_identifier: NamespaceIdentifierUnion, + consumer_identifiers: ConsumerIdentifierList, + **kwargs, + ) -> DeregisterNamespaceOutputMessage: + raise NotImplementedError + + @handler("DescribeAccountAttributes") + def describe_account_attributes( + self, context: RequestContext, attribute_names: AttributeNameList | None = None, **kwargs + ) -> AccountAttributeList: + raise NotImplementedError + + @handler("DescribeAuthenticationProfiles") + def describe_authentication_profiles( + self, + context: RequestContext, + authentication_profile_name: AuthenticationProfileNameString | None = None, + **kwargs, + ) -> DescribeAuthenticationProfilesResult: + raise NotImplementedError + + @handler("DescribeClusterDbRevisions") + def describe_cluster_db_revisions( + self, + context: RequestContext, + cluster_identifier: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ClusterDbRevisionsMessage: + raise NotImplementedError + + @handler("DescribeClusterParameterGroups") + def describe_cluster_parameter_groups( + self, + context: RequestContext, + parameter_group_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> ClusterParameterGroupsMessage: + raise NotImplementedError + + @handler("DescribeClusterParameters") + def describe_cluster_parameters( + self, + context: RequestContext, + parameter_group_name: String, + source: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ClusterParameterGroupDetails: + raise NotImplementedError + + @handler("DescribeClusterSecurityGroups") + def describe_cluster_security_groups( + self, + context: RequestContext, + cluster_security_group_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> ClusterSecurityGroupMessage: + raise NotImplementedError + + @handler("DescribeClusterSnapshots") + def describe_cluster_snapshots( + self, + context: RequestContext, + cluster_identifier: String | None = None, + snapshot_identifier: String | None = None, + snapshot_arn: String | None = None, + snapshot_type: String | None = None, + start_time: TStamp | None = None, + end_time: TStamp | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + owner_account: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + cluster_exists: BooleanOptional | None = None, + sorting_entities: SnapshotSortingEntityList | None = None, + **kwargs, + ) -> SnapshotMessage: + raise NotImplementedError + + @handler("DescribeClusterSubnetGroups") + def describe_cluster_subnet_groups( + self, + context: RequestContext, + cluster_subnet_group_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> ClusterSubnetGroupMessage: + raise NotImplementedError + + @handler("DescribeClusterTracks") + def describe_cluster_tracks( + self, + context: RequestContext, + maintenance_track_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> TrackListMessage: + raise NotImplementedError + + @handler("DescribeClusterVersions") + def describe_cluster_versions( + self, + context: RequestContext, + cluster_version: String | None = None, + cluster_parameter_group_family: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ClusterVersionsMessage: + raise NotImplementedError + + @handler("DescribeClusters") + def describe_clusters( + self, + context: RequestContext, + cluster_identifier: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> ClustersMessage: + raise NotImplementedError + + @handler("DescribeCustomDomainAssociations") + def describe_custom_domain_associations( + self, + context: RequestContext, + custom_domain_name: CustomDomainNameString | None = None, + custom_domain_certificate_arn: CustomDomainCertificateArnString | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> CustomDomainAssociationsMessage: + raise NotImplementedError + + @handler("DescribeDataShares") + def describe_data_shares( + self, + context: RequestContext, + data_share_arn: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeDataSharesResult: + raise NotImplementedError + + @handler("DescribeDataSharesForConsumer") + def describe_data_shares_for_consumer( + self, + context: RequestContext, + consumer_arn: String | None = None, + status: DataShareStatusForConsumer | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeDataSharesForConsumerResult: + raise NotImplementedError + + @handler("DescribeDataSharesForProducer") + def describe_data_shares_for_producer( + self, + context: RequestContext, + producer_arn: String | None = None, + status: DataShareStatusForProducer | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeDataSharesForProducerResult: + raise NotImplementedError + + @handler("DescribeDefaultClusterParameters") + def describe_default_cluster_parameters( + self, + context: RequestContext, + parameter_group_family: String, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeDefaultClusterParametersResult: + raise NotImplementedError + + @handler("DescribeEndpointAccess") + def describe_endpoint_access( + self, + context: RequestContext, + cluster_identifier: String | None = None, + resource_owner: String | None = None, + endpoint_name: String | None = None, + vpc_id: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> EndpointAccessList: + raise NotImplementedError + + @handler("DescribeEndpointAuthorization") + def describe_endpoint_authorization( + self, + context: RequestContext, + cluster_identifier: String | None = None, + account: String | None = None, + grantee: BooleanOptional | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> EndpointAuthorizationList: + raise NotImplementedError + + @handler("DescribeEventCategories") + def describe_event_categories( + self, context: RequestContext, source_type: String | None = None, **kwargs + ) -> EventCategoriesMessage: + raise NotImplementedError + + @handler("DescribeEventSubscriptions") + def describe_event_subscriptions( + self, + context: RequestContext, + subscription_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> EventSubscriptionsMessage: + raise NotImplementedError + + @handler("DescribeEvents") + def describe_events( + self, + context: RequestContext, + source_identifier: String | None = None, + source_type: SourceType | None = None, + start_time: TStamp | None = None, + end_time: TStamp | None = None, + duration: IntegerOptional | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> EventsMessage: + raise NotImplementedError + + @handler("DescribeHsmClientCertificates") + def describe_hsm_client_certificates( + self, + context: RequestContext, + hsm_client_certificate_identifier: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> HsmClientCertificateMessage: + raise NotImplementedError + + @handler("DescribeHsmConfigurations") + def describe_hsm_configurations( + self, + context: RequestContext, + hsm_configuration_identifier: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> HsmConfigurationMessage: + raise NotImplementedError + + @handler("DescribeInboundIntegrations") + def describe_inbound_integrations( + self, + context: RequestContext, + integration_arn: InboundIntegrationArn | None = None, + target_arn: TargetArn | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> InboundIntegrationsMessage: + raise NotImplementedError + + @handler("DescribeIntegrations") + def describe_integrations( + self, + context: RequestContext, + integration_arn: IntegrationArn | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + filters: DescribeIntegrationsFilterList | None = None, + **kwargs, + ) -> IntegrationsMessage: + raise NotImplementedError + + @handler("DescribeLoggingStatus") + def describe_logging_status( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> LoggingStatus: + raise NotImplementedError + + @handler("DescribeNodeConfigurationOptions") + def describe_node_configuration_options( + self, + context: RequestContext, + action_type: ActionType, + cluster_identifier: String | None = None, + snapshot_identifier: String | None = None, + snapshot_arn: String | None = None, + owner_account: String | None = None, + filters: NodeConfigurationOptionsFilterList | None = None, + marker: String | None = None, + max_records: IntegerOptional | None = None, + **kwargs, + ) -> NodeConfigurationOptionsMessage: + raise NotImplementedError + + @handler("DescribeOrderableClusterOptions") + def describe_orderable_cluster_options( + self, + context: RequestContext, + cluster_version: String | None = None, + node_type: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> OrderableClusterOptionsMessage: + raise NotImplementedError + + @handler("DescribePartners") + def describe_partners( + self, + context: RequestContext, + account_id: PartnerIntegrationAccountId, + cluster_identifier: PartnerIntegrationClusterIdentifier, + database_name: PartnerIntegrationDatabaseName | None = None, + partner_name: PartnerIntegrationPartnerName | None = None, + **kwargs, + ) -> DescribePartnersOutputMessage: + raise NotImplementedError + + @handler("DescribeRedshiftIdcApplications") + def describe_redshift_idc_applications( + self, + context: RequestContext, + redshift_idc_application_arn: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeRedshiftIdcApplicationsResult: + raise NotImplementedError + + @handler("DescribeReservedNodeExchangeStatus") + def describe_reserved_node_exchange_status( + self, + context: RequestContext, + reserved_node_id: String | None = None, + reserved_node_exchange_request_id: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> DescribeReservedNodeExchangeStatusOutputMessage: + raise NotImplementedError + + @handler("DescribeReservedNodeOfferings") + def describe_reserved_node_offerings( + self, + context: RequestContext, + reserved_node_offering_id: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ReservedNodeOfferingsMessage: + raise NotImplementedError + + @handler("DescribeReservedNodes") + def describe_reserved_nodes( + self, + context: RequestContext, + reserved_node_id: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ReservedNodesMessage: + raise NotImplementedError + + @handler("DescribeResize") + def describe_resize( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> ResizeProgressMessage: + raise NotImplementedError + + @handler("DescribeScheduledActions") + def describe_scheduled_actions( + self, + context: RequestContext, + scheduled_action_name: String | None = None, + target_action_type: ScheduledActionTypeValues | None = None, + start_time: TStamp | None = None, + end_time: TStamp | None = None, + active: BooleanOptional | None = None, + filters: ScheduledActionFilterList | None = None, + marker: String | None = None, + max_records: IntegerOptional | None = None, + **kwargs, + ) -> ScheduledActionsMessage: + raise NotImplementedError + + @handler("DescribeSnapshotCopyGrants") + def describe_snapshot_copy_grants( + self, + context: RequestContext, + snapshot_copy_grant_name: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> SnapshotCopyGrantMessage: + raise NotImplementedError + + @handler("DescribeSnapshotSchedules") + def describe_snapshot_schedules( + self, + context: RequestContext, + cluster_identifier: String | None = None, + schedule_identifier: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + marker: String | None = None, + max_records: IntegerOptional | None = None, + **kwargs, + ) -> DescribeSnapshotSchedulesOutputMessage: + raise NotImplementedError + + @handler("DescribeStorage") + def describe_storage(self, context: RequestContext, **kwargs) -> CustomerStorageMessage: + raise NotImplementedError + + @handler("DescribeTableRestoreStatus") + def describe_table_restore_status( + self, + context: RequestContext, + cluster_identifier: String | None = None, + table_restore_request_id: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> TableRestoreStatusMessage: + raise NotImplementedError + + @handler("DescribeTags") + def describe_tags( + self, + context: RequestContext, + resource_name: String | None = None, + resource_type: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> TaggedResourceListMessage: + raise NotImplementedError + + @handler("DescribeUsageLimits") + def describe_usage_limits( + self, + context: RequestContext, + usage_limit_id: String | None = None, + cluster_identifier: String | None = None, + feature_type: UsageLimitFeatureType | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + tag_keys: TagKeyList | None = None, + tag_values: TagValueList | None = None, + **kwargs, + ) -> UsageLimitList: + raise NotImplementedError + + @handler("DisableLogging") + def disable_logging( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> LoggingStatus: + raise NotImplementedError + + @handler("DisableSnapshotCopy") + def disable_snapshot_copy( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> DisableSnapshotCopyResult: + raise NotImplementedError + + @handler("DisassociateDataShareConsumer") + def disassociate_data_share_consumer( + self, + context: RequestContext, + data_share_arn: String, + disassociate_entire_account: BooleanOptional | None = None, + consumer_arn: String | None = None, + consumer_region: String | None = None, + **kwargs, + ) -> DataShare: + raise NotImplementedError + + @handler("EnableLogging") + def enable_logging( + self, + context: RequestContext, + cluster_identifier: String, + bucket_name: String | None = None, + s3_key_prefix: S3KeyPrefixValue | None = None, + log_destination_type: LogDestinationType | None = None, + log_exports: LogTypeList | None = None, + **kwargs, + ) -> LoggingStatus: + raise NotImplementedError + + @handler("EnableSnapshotCopy") + def enable_snapshot_copy( + self, + context: RequestContext, + cluster_identifier: String, + destination_region: String, + retention_period: IntegerOptional | None = None, + snapshot_copy_grant_name: String | None = None, + manual_snapshot_retention_period: IntegerOptional | None = None, + **kwargs, + ) -> EnableSnapshotCopyResult: + raise NotImplementedError + + @handler("FailoverPrimaryCompute") + def failover_primary_compute( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> FailoverPrimaryComputeResult: + raise NotImplementedError + + @handler("GetClusterCredentials") + def get_cluster_credentials( + self, + context: RequestContext, + db_user: String, + db_name: String | None = None, + cluster_identifier: String | None = None, + duration_seconds: IntegerOptional | None = None, + auto_create: BooleanOptional | None = None, + db_groups: DbGroupList | None = None, + custom_domain_name: String | None = None, + **kwargs, + ) -> ClusterCredentials: + raise NotImplementedError + + @handler("GetClusterCredentialsWithIAM") + def get_cluster_credentials_with_iam( + self, + context: RequestContext, + db_name: String | None = None, + cluster_identifier: String | None = None, + duration_seconds: IntegerOptional | None = None, + custom_domain_name: String | None = None, + **kwargs, + ) -> ClusterExtendedCredentials: + raise NotImplementedError + + @handler("GetReservedNodeExchangeConfigurationOptions") + def get_reserved_node_exchange_configuration_options( + self, + context: RequestContext, + action_type: ReservedNodeExchangeActionType, + cluster_identifier: String | None = None, + snapshot_identifier: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> GetReservedNodeExchangeConfigurationOptionsOutputMessage: + raise NotImplementedError + + @handler("GetReservedNodeExchangeOfferings") + def get_reserved_node_exchange_offerings( + self, + context: RequestContext, + reserved_node_id: String, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> GetReservedNodeExchangeOfferingsOutputMessage: + raise NotImplementedError + + @handler("GetResourcePolicy") + def get_resource_policy( + self, context: RequestContext, resource_arn: String, **kwargs + ) -> GetResourcePolicyResult: + raise NotImplementedError + + @handler("ListRecommendations") + def list_recommendations( + self, + context: RequestContext, + cluster_identifier: String | None = None, + namespace_arn: String | None = None, + max_records: IntegerOptional | None = None, + marker: String | None = None, + **kwargs, + ) -> ListRecommendationsResult: + raise NotImplementedError + + @handler("ModifyAquaConfiguration") + def modify_aqua_configuration( + self, + context: RequestContext, + cluster_identifier: String, + aqua_configuration_status: AquaConfigurationStatus | None = None, + **kwargs, + ) -> ModifyAquaOutputMessage: + raise NotImplementedError + + @handler("ModifyAuthenticationProfile") + def modify_authentication_profile( + self, + context: RequestContext, + authentication_profile_name: AuthenticationProfileNameString, + authentication_profile_content: String, + **kwargs, + ) -> ModifyAuthenticationProfileResult: + raise NotImplementedError + + @handler("ModifyCluster") + def modify_cluster( + self, + context: RequestContext, + cluster_identifier: String, + cluster_type: String | None = None, + node_type: String | None = None, + number_of_nodes: IntegerOptional | None = None, + cluster_security_groups: ClusterSecurityGroupNameList | None = None, + vpc_security_group_ids: VpcSecurityGroupIdList | None = None, + master_user_password: SensitiveString | None = None, + cluster_parameter_group_name: String | None = None, + automated_snapshot_retention_period: IntegerOptional | None = None, + manual_snapshot_retention_period: IntegerOptional | None = None, + preferred_maintenance_window: String | None = None, + cluster_version: String | None = None, + allow_version_upgrade: BooleanOptional | None = None, + hsm_client_certificate_identifier: String | None = None, + hsm_configuration_identifier: String | None = None, + new_cluster_identifier: String | None = None, + publicly_accessible: BooleanOptional | None = None, + elastic_ip: String | None = None, + enhanced_vpc_routing: BooleanOptional | None = None, + maintenance_track_name: String | None = None, + encrypted: BooleanOptional | None = None, + kms_key_id: String | None = None, + availability_zone_relocation: BooleanOptional | None = None, + availability_zone: String | None = None, + port: IntegerOptional | None = None, + manage_master_password: BooleanOptional | None = None, + master_password_secret_kms_key_id: String | None = None, + ip_address_type: String | None = None, + multi_az: BooleanOptional | None = None, + **kwargs, + ) -> ModifyClusterResult: + raise NotImplementedError + + @handler("ModifyClusterDbRevision") + def modify_cluster_db_revision( + self, context: RequestContext, cluster_identifier: String, revision_target: String, **kwargs + ) -> ModifyClusterDbRevisionResult: + raise NotImplementedError + + @handler("ModifyClusterIamRoles") + def modify_cluster_iam_roles( + self, + context: RequestContext, + cluster_identifier: String, + add_iam_roles: IamRoleArnList | None = None, + remove_iam_roles: IamRoleArnList | None = None, + default_iam_role_arn: String | None = None, + **kwargs, + ) -> ModifyClusterIamRolesResult: + raise NotImplementedError + + @handler("ModifyClusterMaintenance") + def modify_cluster_maintenance( + self, + context: RequestContext, + cluster_identifier: String, + defer_maintenance: BooleanOptional | None = None, + defer_maintenance_identifier: String | None = None, + defer_maintenance_start_time: TStamp | None = None, + defer_maintenance_end_time: TStamp | None = None, + defer_maintenance_duration: IntegerOptional | None = None, + **kwargs, + ) -> ModifyClusterMaintenanceResult: + raise NotImplementedError + + @handler("ModifyClusterParameterGroup") + def modify_cluster_parameter_group( + self, + context: RequestContext, + parameter_group_name: String, + parameters: ParametersList, + **kwargs, + ) -> ClusterParameterGroupNameMessage: + raise NotImplementedError + + @handler("ModifyClusterSnapshot") + def modify_cluster_snapshot( + self, + context: RequestContext, + snapshot_identifier: String, + manual_snapshot_retention_period: IntegerOptional | None = None, + force: Boolean | None = None, + **kwargs, + ) -> ModifyClusterSnapshotResult: + raise NotImplementedError + + @handler("ModifyClusterSnapshotSchedule") + def modify_cluster_snapshot_schedule( + self, + context: RequestContext, + cluster_identifier: String, + schedule_identifier: String | None = None, + disassociate_schedule: BooleanOptional | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ModifyClusterSubnetGroup") + def modify_cluster_subnet_group( + self, + context: RequestContext, + cluster_subnet_group_name: String, + subnet_ids: SubnetIdentifierList, + description: String | None = None, + **kwargs, + ) -> ModifyClusterSubnetGroupResult: + raise NotImplementedError + + @handler("ModifyCustomDomainAssociation") + def modify_custom_domain_association( + self, + context: RequestContext, + custom_domain_name: CustomDomainNameString, + custom_domain_certificate_arn: CustomDomainCertificateArnString, + cluster_identifier: String, + **kwargs, + ) -> ModifyCustomDomainAssociationResult: + raise NotImplementedError + + @handler("ModifyEndpointAccess") + def modify_endpoint_access( + self, + context: RequestContext, + endpoint_name: String, + vpc_security_group_ids: VpcSecurityGroupIdList | None = None, + **kwargs, + ) -> EndpointAccess: + raise NotImplementedError + + @handler("ModifyEventSubscription") + def modify_event_subscription( + self, + context: RequestContext, + subscription_name: String, + sns_topic_arn: String | None = None, + source_type: String | None = None, + source_ids: SourceIdsList | None = None, + event_categories: EventCategoriesList | None = None, + severity: String | None = None, + enabled: BooleanOptional | None = None, + **kwargs, + ) -> ModifyEventSubscriptionResult: + raise NotImplementedError + + @handler("ModifyIntegration") + def modify_integration( + self, + context: RequestContext, + integration_arn: IntegrationArn, + description: IntegrationDescription | None = None, + integration_name: IntegrationName | None = None, + **kwargs, + ) -> Integration: + raise NotImplementedError + + @handler("ModifyRedshiftIdcApplication") + def modify_redshift_idc_application( + self, + context: RequestContext, + redshift_idc_application_arn: String, + identity_namespace: IdentityNamespaceString | None = None, + iam_role_arn: String | None = None, + idc_display_name: IdcDisplayNameString | None = None, + authorized_token_issuer_list: AuthorizedTokenIssuerList | None = None, + service_integrations: ServiceIntegrationList | None = None, + **kwargs, + ) -> ModifyRedshiftIdcApplicationResult: + raise NotImplementedError + + @handler("ModifyScheduledAction") + def modify_scheduled_action( + self, + context: RequestContext, + scheduled_action_name: String, + target_action: ScheduledActionType | None = None, + schedule: String | None = None, + iam_role: String | None = None, + scheduled_action_description: String | None = None, + start_time: TStamp | None = None, + end_time: TStamp | None = None, + enable: BooleanOptional | None = None, + **kwargs, + ) -> ScheduledAction: + raise NotImplementedError + + @handler("ModifySnapshotCopyRetentionPeriod") + def modify_snapshot_copy_retention_period( + self, + context: RequestContext, + cluster_identifier: String, + retention_period: Integer, + manual: Boolean | None = None, + **kwargs, + ) -> ModifySnapshotCopyRetentionPeriodResult: + raise NotImplementedError + + @handler("ModifySnapshotSchedule") + def modify_snapshot_schedule( + self, + context: RequestContext, + schedule_identifier: String, + schedule_definitions: ScheduleDefinitionList, + **kwargs, + ) -> SnapshotSchedule: + raise NotImplementedError + + @handler("ModifyUsageLimit") + def modify_usage_limit( + self, + context: RequestContext, + usage_limit_id: String, + amount: LongOptional | None = None, + breach_action: UsageLimitBreachAction | None = None, + **kwargs, + ) -> UsageLimit: + raise NotImplementedError + + @handler("PauseCluster") + def pause_cluster( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> PauseClusterResult: + raise NotImplementedError + + @handler("PurchaseReservedNodeOffering") + def purchase_reserved_node_offering( + self, + context: RequestContext, + reserved_node_offering_id: String, + node_count: IntegerOptional | None = None, + **kwargs, + ) -> PurchaseReservedNodeOfferingResult: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, context: RequestContext, resource_arn: String, policy: String, **kwargs + ) -> PutResourcePolicyResult: + raise NotImplementedError + + @handler("RebootCluster") + def reboot_cluster( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> RebootClusterResult: + raise NotImplementedError + + @handler("RegisterNamespace") + def register_namespace( + self, + context: RequestContext, + namespace_identifier: NamespaceIdentifierUnion, + consumer_identifiers: ConsumerIdentifierList, + **kwargs, + ) -> RegisterNamespaceOutputMessage: + raise NotImplementedError + + @handler("RejectDataShare") + def reject_data_share( + self, context: RequestContext, data_share_arn: String, **kwargs + ) -> DataShare: + raise NotImplementedError + + @handler("ResetClusterParameterGroup") + def reset_cluster_parameter_group( + self, + context: RequestContext, + parameter_group_name: String, + reset_all_parameters: Boolean | None = None, + parameters: ParametersList | None = None, + **kwargs, + ) -> ClusterParameterGroupNameMessage: + raise NotImplementedError + + @handler("ResizeCluster") + def resize_cluster( + self, + context: RequestContext, + cluster_identifier: String, + cluster_type: String | None = None, + node_type: String | None = None, + number_of_nodes: IntegerOptional | None = None, + classic: BooleanOptional | None = None, + reserved_node_id: String | None = None, + target_reserved_node_offering_id: String | None = None, + **kwargs, + ) -> ResizeClusterResult: + raise NotImplementedError + + @handler("RestoreFromClusterSnapshot") + def restore_from_cluster_snapshot( + self, + context: RequestContext, + cluster_identifier: String, + snapshot_identifier: String | None = None, + snapshot_arn: String | None = None, + snapshot_cluster_identifier: String | None = None, + port: IntegerOptional | None = None, + availability_zone: String | None = None, + allow_version_upgrade: BooleanOptional | None = None, + cluster_subnet_group_name: String | None = None, + publicly_accessible: BooleanOptional | None = None, + owner_account: String | None = None, + hsm_client_certificate_identifier: String | None = None, + hsm_configuration_identifier: String | None = None, + elastic_ip: String | None = None, + cluster_parameter_group_name: String | None = None, + cluster_security_groups: ClusterSecurityGroupNameList | None = None, + vpc_security_group_ids: VpcSecurityGroupIdList | None = None, + preferred_maintenance_window: String | None = None, + automated_snapshot_retention_period: IntegerOptional | None = None, + manual_snapshot_retention_period: IntegerOptional | None = None, + kms_key_id: String | None = None, + node_type: String | None = None, + enhanced_vpc_routing: BooleanOptional | None = None, + additional_info: String | None = None, + iam_roles: IamRoleArnList | None = None, + maintenance_track_name: String | None = None, + snapshot_schedule_identifier: String | None = None, + number_of_nodes: IntegerOptional | None = None, + availability_zone_relocation: BooleanOptional | None = None, + aqua_configuration_status: AquaConfigurationStatus | None = None, + default_iam_role_arn: String | None = None, + reserved_node_id: String | None = None, + target_reserved_node_offering_id: String | None = None, + encrypted: BooleanOptional | None = None, + manage_master_password: BooleanOptional | None = None, + master_password_secret_kms_key_id: String | None = None, + ip_address_type: String | None = None, + multi_az: BooleanOptional | None = None, + **kwargs, + ) -> RestoreFromClusterSnapshotResult: + raise NotImplementedError + + @handler("RestoreTableFromClusterSnapshot") + def restore_table_from_cluster_snapshot( + self, + context: RequestContext, + cluster_identifier: String, + snapshot_identifier: String, + source_database_name: String, + source_table_name: String, + new_table_name: String, + source_schema_name: String | None = None, + target_database_name: String | None = None, + target_schema_name: String | None = None, + enable_case_sensitive_identifier: BooleanOptional | None = None, + **kwargs, + ) -> RestoreTableFromClusterSnapshotResult: + raise NotImplementedError + + @handler("ResumeCluster") + def resume_cluster( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> ResumeClusterResult: + raise NotImplementedError + + @handler("RevokeClusterSecurityGroupIngress") + def revoke_cluster_security_group_ingress( + self, + context: RequestContext, + cluster_security_group_name: String, + cidrip: String | None = None, + ec2_security_group_name: String | None = None, + ec2_security_group_owner_id: String | None = None, + **kwargs, + ) -> RevokeClusterSecurityGroupIngressResult: + raise NotImplementedError + + @handler("RevokeEndpointAccess") + def revoke_endpoint_access( + self, + context: RequestContext, + cluster_identifier: String | None = None, + account: String | None = None, + vpc_ids: VpcIdentifierList | None = None, + force: Boolean | None = None, + **kwargs, + ) -> EndpointAuthorization: + raise NotImplementedError + + @handler("RevokeSnapshotAccess") + def revoke_snapshot_access( + self, + context: RequestContext, + account_with_restore_access: String, + snapshot_identifier: String | None = None, + snapshot_arn: String | None = None, + snapshot_cluster_identifier: String | None = None, + **kwargs, + ) -> RevokeSnapshotAccessResult: + raise NotImplementedError + + @handler("RotateEncryptionKey") + def rotate_encryption_key( + self, context: RequestContext, cluster_identifier: String, **kwargs + ) -> RotateEncryptionKeyResult: + raise NotImplementedError + + @handler("UpdatePartnerStatus") + def update_partner_status( + self, + context: RequestContext, + account_id: PartnerIntegrationAccountId, + cluster_identifier: PartnerIntegrationClusterIdentifier, + database_name: PartnerIntegrationDatabaseName, + partner_name: PartnerIntegrationPartnerName, + status: PartnerIntegrationStatus, + status_message: PartnerIntegrationStatusMessage | None = None, + **kwargs, + ) -> PartnerIntegrationOutputMessage: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/resource_groups/__init__.py b/localstack-core/localstack/aws/api/resource_groups/__init__.py new file mode 100644 index 0000000000000..b7511726ef579 --- /dev/null +++ b/localstack-core/localstack/aws/api/resource_groups/__init__.py @@ -0,0 +1,805 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ApplicationArn = str +ApplicationTagKey = str +CreateGroupName = str +Criticality = int +Description = str +DisplayName = str +ErrorCode = str +ErrorMessage = str +GroupArn = str +GroupArnV2 = str +GroupConfigurationFailureReason = str +GroupConfigurationParameterName = str +GroupConfigurationParameterValue = str +GroupConfigurationType = str +GroupFilterValue = str +GroupLifecycleEventsStatusMessage = str +GroupName = str +GroupString = str +GroupStringV2 = str +ListGroupingStatusesFilterValue = str +MaxResults = int +NextToken = str +Owner = str +Query = str +QueryErrorMessage = str +ResourceArn = str +ResourceFilterValue = str +ResourceType = str +RoleArn = str +TagKey = str +TagSyncTaskArn = str +TagValue = str + + +class GroupConfigurationStatus(StrEnum): + UPDATING = "UPDATING" + UPDATE_COMPLETE = "UPDATE_COMPLETE" + UPDATE_FAILED = "UPDATE_FAILED" + + +class GroupFilterName(StrEnum): + resource_type = "resource-type" + configuration_type = "configuration-type" + owner = "owner" + display_name = "display-name" + criticality = "criticality" + + +class GroupLifecycleEventsDesiredStatus(StrEnum): + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + + +class GroupLifecycleEventsStatus(StrEnum): + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + IN_PROGRESS = "IN_PROGRESS" + ERROR = "ERROR" + + +class GroupingStatus(StrEnum): + SUCCESS = "SUCCESS" + FAILED = "FAILED" + IN_PROGRESS = "IN_PROGRESS" + SKIPPED = "SKIPPED" + + +class GroupingType(StrEnum): + GROUP = "GROUP" + UNGROUP = "UNGROUP" + + +class ListGroupingStatusesFilterName(StrEnum): + status = "status" + resource_arn = "resource-arn" + + +class QueryErrorCode(StrEnum): + CLOUDFORMATION_STACK_INACTIVE = "CLOUDFORMATION_STACK_INACTIVE" + CLOUDFORMATION_STACK_NOT_EXISTING = "CLOUDFORMATION_STACK_NOT_EXISTING" + CLOUDFORMATION_STACK_UNASSUMABLE_ROLE = "CLOUDFORMATION_STACK_UNASSUMABLE_ROLE" + RESOURCE_TYPE_NOT_SUPPORTED = "RESOURCE_TYPE_NOT_SUPPORTED" + + +class QueryType(StrEnum): + TAG_FILTERS_1_0 = "TAG_FILTERS_1_0" + CLOUDFORMATION_STACK_1_0 = "CLOUDFORMATION_STACK_1_0" + + +class ResourceFilterName(StrEnum): + resource_type = "resource-type" + + +class ResourceStatusValue(StrEnum): + PENDING = "PENDING" + + +class TagSyncTaskStatus(StrEnum): + ACTIVE = "ACTIVE" + ERROR = "ERROR" + + +class BadRequestException(ServiceException): + code: str = "BadRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class ForbiddenException(ServiceException): + code: str = "ForbiddenException" + sender_fault: bool = False + status_code: int = 403 + + +class InternalServerErrorException(ServiceException): + code: str = "InternalServerErrorException" + sender_fault: bool = False + status_code: int = 500 + + +class MethodNotAllowedException(ServiceException): + code: str = "MethodNotAllowedException" + sender_fault: bool = False + status_code: int = 405 + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = False + status_code: int = 404 + + +class TooManyRequestsException(ServiceException): + code: str = "TooManyRequestsException" + sender_fault: bool = False + status_code: int = 429 + + +class UnauthorizedException(ServiceException): + code: str = "UnauthorizedException" + sender_fault: bool = False + status_code: int = 401 + + +class AccountSettings(TypedDict, total=False): + GroupLifecycleEventsDesiredStatus: Optional[GroupLifecycleEventsDesiredStatus] + GroupLifecycleEventsStatus: Optional[GroupLifecycleEventsStatus] + GroupLifecycleEventsStatusMessage: Optional[GroupLifecycleEventsStatusMessage] + + +ApplicationTag = Dict[ApplicationTagKey, ApplicationArn] + + +class CancelTagSyncTaskInput(ServiceRequest): + TaskArn: TagSyncTaskArn + + +GroupConfigurationParameterValueList = List[GroupConfigurationParameterValue] + + +class GroupConfigurationParameter(TypedDict, total=False): + Name: GroupConfigurationParameterName + Values: Optional[GroupConfigurationParameterValueList] + + +GroupParameterList = List[GroupConfigurationParameter] + + +class GroupConfigurationItem(TypedDict, total=False): + Type: GroupConfigurationType + Parameters: Optional[GroupParameterList] + + +GroupConfigurationList = List[GroupConfigurationItem] +Tags = Dict[TagKey, TagValue] + + +class ResourceQuery(TypedDict, total=False): + Type: QueryType + Query: Query + + +class CreateGroupInput(ServiceRequest): + Name: CreateGroupName + Description: Optional[Description] + ResourceQuery: Optional[ResourceQuery] + Tags: Optional[Tags] + Configuration: Optional[GroupConfigurationList] + Criticality: Optional[Criticality] + Owner: Optional[Owner] + DisplayName: Optional[DisplayName] + + +class GroupConfiguration(TypedDict, total=False): + Configuration: Optional[GroupConfigurationList] + ProposedConfiguration: Optional[GroupConfigurationList] + Status: Optional[GroupConfigurationStatus] + FailureReason: Optional[GroupConfigurationFailureReason] + + +class Group(TypedDict, total=False): + GroupArn: GroupArnV2 + Name: GroupName + Description: Optional[Description] + Criticality: Optional[Criticality] + Owner: Optional[Owner] + DisplayName: Optional[DisplayName] + ApplicationTag: Optional[ApplicationTag] + + +class CreateGroupOutput(TypedDict, total=False): + Group: Optional[Group] + ResourceQuery: Optional[ResourceQuery] + Tags: Optional[Tags] + GroupConfiguration: Optional[GroupConfiguration] + + +class DeleteGroupInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupStringV2] + + +class DeleteGroupOutput(TypedDict, total=False): + Group: Optional[Group] + + +class FailedResource(TypedDict, total=False): + ResourceArn: Optional[ResourceArn] + ErrorMessage: Optional[ErrorMessage] + ErrorCode: Optional[ErrorCode] + + +FailedResourceList = List[FailedResource] + + +class GetAccountSettingsOutput(TypedDict, total=False): + AccountSettings: Optional[AccountSettings] + + +class GetGroupConfigurationInput(ServiceRequest): + Group: Optional[GroupString] + + +class GetGroupConfigurationOutput(TypedDict, total=False): + GroupConfiguration: Optional[GroupConfiguration] + + +class GetGroupInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupStringV2] + + +class GetGroupOutput(TypedDict, total=False): + Group: Optional[Group] + + +class GetGroupQueryInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupString] + + +class GroupQuery(TypedDict, total=False): + GroupName: GroupName + ResourceQuery: ResourceQuery + + +class GetGroupQueryOutput(TypedDict, total=False): + GroupQuery: Optional[GroupQuery] + + +class GetTagSyncTaskInput(ServiceRequest): + TaskArn: TagSyncTaskArn + + +timestamp = datetime + + +class GetTagSyncTaskOutput(TypedDict, total=False): + GroupArn: Optional[GroupArnV2] + GroupName: Optional[GroupName] + TaskArn: Optional[TagSyncTaskArn] + TagKey: Optional[TagKey] + TagValue: Optional[TagValue] + ResourceQuery: Optional[ResourceQuery] + RoleArn: Optional[RoleArn] + Status: Optional[TagSyncTaskStatus] + ErrorMessage: Optional[ErrorMessage] + CreatedAt: Optional[timestamp] + + +class GetTagsInput(ServiceRequest): + Arn: GroupArnV2 + + +class GetTagsOutput(TypedDict, total=False): + Arn: Optional[GroupArnV2] + Tags: Optional[Tags] + + +GroupFilterValues = List[GroupFilterValue] + + +class GroupFilter(TypedDict, total=False): + Name: GroupFilterName + Values: GroupFilterValues + + +GroupFilterList = List[GroupFilter] + + +class GroupIdentifier(TypedDict, total=False): + GroupName: Optional[GroupName] + GroupArn: Optional[GroupArn] + Description: Optional[Description] + Criticality: Optional[Criticality] + Owner: Optional[Owner] + DisplayName: Optional[DisplayName] + + +GroupIdentifierList = List[GroupIdentifier] +GroupList = List[Group] +ResourceArnList = List[ResourceArn] + + +class GroupResourcesInput(ServiceRequest): + Group: GroupStringV2 + ResourceArns: ResourceArnList + + +class PendingResource(TypedDict, total=False): + ResourceArn: Optional[ResourceArn] + + +PendingResourceList = List[PendingResource] + + +class GroupResourcesOutput(TypedDict, total=False): + Succeeded: Optional[ResourceArnList] + Failed: Optional[FailedResourceList] + Pending: Optional[PendingResourceList] + + +class GroupingStatusesItem(TypedDict, total=False): + ResourceArn: Optional[ResourceArn] + Action: Optional[GroupingType] + Status: Optional[GroupingStatus] + ErrorMessage: Optional[ErrorMessage] + ErrorCode: Optional[ErrorCode] + UpdatedAt: Optional[timestamp] + + +GroupingStatusesList = List[GroupingStatusesItem] +ResourceFilterValues = List[ResourceFilterValue] + + +class ResourceFilter(TypedDict, total=False): + Name: ResourceFilterName + Values: ResourceFilterValues + + +ResourceFilterList = List[ResourceFilter] + + +class ListGroupResourcesInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupStringV2] + Filters: Optional[ResourceFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ResourceStatus(TypedDict, total=False): + Name: Optional[ResourceStatusValue] + + +class ResourceIdentifier(TypedDict, total=False): + ResourceArn: Optional[ResourceArn] + ResourceType: Optional[ResourceType] + + +class ListGroupResourcesItem(TypedDict, total=False): + Identifier: Optional[ResourceIdentifier] + Status: Optional[ResourceStatus] + + +ListGroupResourcesItemList = List[ListGroupResourcesItem] + + +class QueryError(TypedDict, total=False): + ErrorCode: Optional[QueryErrorCode] + Message: Optional[QueryErrorMessage] + + +QueryErrorList = List[QueryError] +ResourceIdentifierList = List[ResourceIdentifier] + + +class ListGroupResourcesOutput(TypedDict, total=False): + Resources: Optional[ListGroupResourcesItemList] + ResourceIdentifiers: Optional[ResourceIdentifierList] + NextToken: Optional[NextToken] + QueryErrors: Optional[QueryErrorList] + + +ListGroupingStatusesFilterValues = List[ListGroupingStatusesFilterValue] + + +class ListGroupingStatusesFilter(TypedDict, total=False): + Name: ListGroupingStatusesFilterName + Values: ListGroupingStatusesFilterValues + + +ListGroupingStatusesFilterList = List[ListGroupingStatusesFilter] + + +class ListGroupingStatusesInput(ServiceRequest): + Group: GroupStringV2 + MaxResults: Optional[MaxResults] + Filters: Optional[ListGroupingStatusesFilterList] + NextToken: Optional[NextToken] + + +class ListGroupingStatusesOutput(TypedDict, total=False): + Group: Optional[GroupStringV2] + GroupingStatuses: Optional[GroupingStatusesList] + NextToken: Optional[NextToken] + + +class ListGroupsInput(ServiceRequest): + Filters: Optional[GroupFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListGroupsOutput(TypedDict, total=False): + GroupIdentifiers: Optional[GroupIdentifierList] + Groups: Optional[GroupList] + NextToken: Optional[NextToken] + + +class ListTagSyncTasksFilter(TypedDict, total=False): + GroupArn: Optional[GroupArnV2] + GroupName: Optional[GroupName] + + +ListTagSyncTasksFilterList = List[ListTagSyncTasksFilter] + + +class ListTagSyncTasksInput(ServiceRequest): + Filters: Optional[ListTagSyncTasksFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class TagSyncTaskItem(TypedDict, total=False): + GroupArn: Optional[GroupArnV2] + GroupName: Optional[GroupName] + TaskArn: Optional[TagSyncTaskArn] + TagKey: Optional[TagKey] + TagValue: Optional[TagValue] + ResourceQuery: Optional[ResourceQuery] + RoleArn: Optional[RoleArn] + Status: Optional[TagSyncTaskStatus] + ErrorMessage: Optional[ErrorMessage] + CreatedAt: Optional[timestamp] + + +TagSyncTaskList = List[TagSyncTaskItem] + + +class ListTagSyncTasksOutput(TypedDict, total=False): + TagSyncTasks: Optional[TagSyncTaskList] + NextToken: Optional[NextToken] + + +class PutGroupConfigurationInput(ServiceRequest): + Group: Optional[GroupString] + Configuration: Optional[GroupConfigurationList] + + +class PutGroupConfigurationOutput(TypedDict, total=False): + pass + + +class SearchResourcesInput(ServiceRequest): + ResourceQuery: ResourceQuery + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class SearchResourcesOutput(TypedDict, total=False): + ResourceIdentifiers: Optional[ResourceIdentifierList] + NextToken: Optional[NextToken] + QueryErrors: Optional[QueryErrorList] + + +class StartTagSyncTaskInput(ServiceRequest): + Group: GroupStringV2 + TagKey: Optional[TagKey] + TagValue: Optional[TagValue] + ResourceQuery: Optional[ResourceQuery] + RoleArn: RoleArn + + +class StartTagSyncTaskOutput(TypedDict, total=False): + GroupArn: Optional[GroupArnV2] + GroupName: Optional[GroupName] + TaskArn: Optional[TagSyncTaskArn] + TagKey: Optional[TagKey] + TagValue: Optional[TagValue] + ResourceQuery: Optional[ResourceQuery] + RoleArn: Optional[RoleArn] + + +class TagInput(ServiceRequest): + Arn: GroupArnV2 + Tags: Tags + + +TagKeyList = List[TagKey] + + +class TagOutput(TypedDict, total=False): + Arn: Optional[GroupArnV2] + Tags: Optional[Tags] + + +class UngroupResourcesInput(ServiceRequest): + Group: GroupStringV2 + ResourceArns: ResourceArnList + + +class UngroupResourcesOutput(TypedDict, total=False): + Succeeded: Optional[ResourceArnList] + Failed: Optional[FailedResourceList] + Pending: Optional[PendingResourceList] + + +class UntagInput(ServiceRequest): + Arn: GroupArnV2 + Keys: TagKeyList + + +class UntagOutput(TypedDict, total=False): + Arn: Optional[GroupArnV2] + Keys: Optional[TagKeyList] + + +class UpdateAccountSettingsInput(ServiceRequest): + GroupLifecycleEventsDesiredStatus: Optional[GroupLifecycleEventsDesiredStatus] + + +class UpdateAccountSettingsOutput(TypedDict, total=False): + AccountSettings: Optional[AccountSettings] + + +class UpdateGroupInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupStringV2] + Description: Optional[Description] + Criticality: Optional[Criticality] + Owner: Optional[Owner] + DisplayName: Optional[DisplayName] + + +class UpdateGroupOutput(TypedDict, total=False): + Group: Optional[Group] + + +class UpdateGroupQueryInput(ServiceRequest): + GroupName: Optional[GroupName] + Group: Optional[GroupString] + ResourceQuery: ResourceQuery + + +class UpdateGroupQueryOutput(TypedDict, total=False): + GroupQuery: Optional[GroupQuery] + + +class ResourceGroupsApi: + service = "resource-groups" + version = "2017-11-27" + + @handler("CancelTagSyncTask") + def cancel_tag_sync_task( + self, context: RequestContext, task_arn: TagSyncTaskArn, **kwargs + ) -> None: + raise NotImplementedError + + @handler("CreateGroup") + def create_group( + self, + context: RequestContext, + name: CreateGroupName, + description: Description | None = None, + resource_query: ResourceQuery | None = None, + tags: Tags | None = None, + configuration: GroupConfigurationList | None = None, + criticality: Criticality | None = None, + owner: Owner | None = None, + display_name: DisplayName | None = None, + **kwargs, + ) -> CreateGroupOutput: + raise NotImplementedError + + @handler("DeleteGroup") + def delete_group( + self, + context: RequestContext, + group_name: GroupName | None = None, + group: GroupStringV2 | None = None, + **kwargs, + ) -> DeleteGroupOutput: + raise NotImplementedError + + @handler("GetAccountSettings") + def get_account_settings(self, context: RequestContext, **kwargs) -> GetAccountSettingsOutput: + raise NotImplementedError + + @handler("GetGroup") + def get_group( + self, + context: RequestContext, + group_name: GroupName | None = None, + group: GroupStringV2 | None = None, + **kwargs, + ) -> GetGroupOutput: + raise NotImplementedError + + @handler("GetGroupConfiguration") + def get_group_configuration( + self, context: RequestContext, group: GroupString | None = None, **kwargs + ) -> GetGroupConfigurationOutput: + raise NotImplementedError + + @handler("GetGroupQuery") + def get_group_query( + self, + context: RequestContext, + group_name: GroupName | None = None, + group: GroupString | None = None, + **kwargs, + ) -> GetGroupQueryOutput: + raise NotImplementedError + + @handler("GetTagSyncTask") + def get_tag_sync_task( + self, context: RequestContext, task_arn: TagSyncTaskArn, **kwargs + ) -> GetTagSyncTaskOutput: + raise NotImplementedError + + @handler("GetTags") + def get_tags(self, context: RequestContext, arn: GroupArnV2, **kwargs) -> GetTagsOutput: + raise NotImplementedError + + @handler("GroupResources") + def group_resources( + self, + context: RequestContext, + group: GroupStringV2, + resource_arns: ResourceArnList, + **kwargs, + ) -> GroupResourcesOutput: + raise NotImplementedError + + @handler("ListGroupResources") + def list_group_resources( + self, + context: RequestContext, + group_name: GroupName | None = None, + group: GroupStringV2 | None = None, + filters: ResourceFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListGroupResourcesOutput: + raise NotImplementedError + + @handler("ListGroupingStatuses") + def list_grouping_statuses( + self, + context: RequestContext, + group: GroupStringV2, + max_results: MaxResults | None = None, + filters: ListGroupingStatusesFilterList | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListGroupingStatusesOutput: + raise NotImplementedError + + @handler("ListGroups") + def list_groups( + self, + context: RequestContext, + filters: GroupFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListGroupsOutput: + raise NotImplementedError + + @handler("ListTagSyncTasks") + def list_tag_sync_tasks( + self, + context: RequestContext, + filters: ListTagSyncTasksFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListTagSyncTasksOutput: + raise NotImplementedError + + @handler("PutGroupConfiguration") + def put_group_configuration( + self, + context: RequestContext, + group: GroupString | None = None, + configuration: GroupConfigurationList | None = None, + **kwargs, + ) -> PutGroupConfigurationOutput: + raise NotImplementedError + + @handler("SearchResources") + def search_resources( + self, + context: RequestContext, + resource_query: ResourceQuery, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> SearchResourcesOutput: + raise NotImplementedError + + @handler("StartTagSyncTask") + def start_tag_sync_task( + self, + context: RequestContext, + group: GroupStringV2, + role_arn: RoleArn, + tag_key: TagKey | None = None, + tag_value: TagValue | None = None, + resource_query: ResourceQuery | None = None, + **kwargs, + ) -> StartTagSyncTaskOutput: + raise NotImplementedError + + @handler("Tag") + def tag(self, context: RequestContext, arn: GroupArnV2, tags: Tags, **kwargs) -> TagOutput: + raise NotImplementedError + + @handler("UngroupResources") + def ungroup_resources( + self, + context: RequestContext, + group: GroupStringV2, + resource_arns: ResourceArnList, + **kwargs, + ) -> UngroupResourcesOutput: + raise NotImplementedError + + @handler("Untag") + def untag( + self, context: RequestContext, arn: GroupArnV2, keys: TagKeyList, **kwargs + ) -> UntagOutput: + raise NotImplementedError + + @handler("UpdateAccountSettings") + def update_account_settings( + self, + context: RequestContext, + group_lifecycle_events_desired_status: GroupLifecycleEventsDesiredStatus | None = None, + **kwargs, + ) -> UpdateAccountSettingsOutput: + raise NotImplementedError + + @handler("UpdateGroup") + def update_group( + self, + context: RequestContext, + group_name: GroupName | None = None, + group: GroupStringV2 | None = None, + description: Description | None = None, + criticality: Criticality | None = None, + owner: Owner | None = None, + display_name: DisplayName | None = None, + **kwargs, + ) -> UpdateGroupOutput: + raise NotImplementedError + + @handler("UpdateGroupQuery") + def update_group_query( + self, + context: RequestContext, + resource_query: ResourceQuery, + group_name: GroupName | None = None, + group: GroupString | None = None, + **kwargs, + ) -> UpdateGroupQueryOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/resourcegroupstaggingapi/__init__.py b/localstack-core/localstack/aws/api/resourcegroupstaggingapi/__init__.py new file mode 100644 index 0000000000000..cc496818d3120 --- /dev/null +++ b/localstack-core/localstack/aws/api/resourcegroupstaggingapi/__init__.py @@ -0,0 +1,325 @@ +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AmazonResourceType = str +ComplianceStatus = bool +ErrorMessage = str +ExceptionMessage = str +ExcludeCompliantResources = bool +IncludeComplianceDetails = bool +LastUpdated = str +MaxResultsGetComplianceSummary = int +PaginationToken = str +Region = str +ResourceARN = str +ResourcesPerPage = int +S3Bucket = str +S3Location = str +Status = str +StatusCode = int +TagKey = str +TagValue = str +TagsPerPage = int +TargetId = str + + +class ErrorCode(StrEnum): + InternalServiceException = "InternalServiceException" + InvalidParameterException = "InvalidParameterException" + + +class GroupByAttribute(StrEnum): + TARGET_ID = "TARGET_ID" + REGION = "REGION" + RESOURCE_TYPE = "RESOURCE_TYPE" + + +class TargetIdType(StrEnum): + ACCOUNT = "ACCOUNT" + OU = "OU" + ROOT = "ROOT" + + +class ConcurrentModificationException(ServiceException): + code: str = "ConcurrentModificationException" + sender_fault: bool = False + status_code: int = 400 + + +class ConstraintViolationException(ServiceException): + code: str = "ConstraintViolationException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServiceException(ServiceException): + code: str = "InternalServiceException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + + +class PaginationTokenExpiredException(ServiceException): + code: str = "PaginationTokenExpiredException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottledException(ServiceException): + code: str = "ThrottledException" + sender_fault: bool = False + status_code: int = 400 + + +TagKeyList = List[TagKey] + + +class ComplianceDetails(TypedDict, total=False): + NoncompliantKeys: Optional[TagKeyList] + KeysWithNoncompliantValues: Optional[TagKeyList] + ComplianceStatus: Optional[ComplianceStatus] + + +class DescribeReportCreationInput(ServiceRequest): + pass + + +class DescribeReportCreationOutput(TypedDict, total=False): + Status: Optional[Status] + S3Location: Optional[S3Location] + ErrorMessage: Optional[ErrorMessage] + + +class FailureInfo(TypedDict, total=False): + StatusCode: Optional[StatusCode] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +FailedResourcesMap = Dict[ResourceARN, FailureInfo] +GroupBy = List[GroupByAttribute] +TagKeyFilterList = List[TagKey] +ResourceTypeFilterList = List[AmazonResourceType] +RegionFilterList = List[Region] +TargetIdFilterList = List[TargetId] + + +class GetComplianceSummaryInput(ServiceRequest): + TargetIdFilters: Optional[TargetIdFilterList] + RegionFilters: Optional[RegionFilterList] + ResourceTypeFilters: Optional[ResourceTypeFilterList] + TagKeyFilters: Optional[TagKeyFilterList] + GroupBy: Optional[GroupBy] + MaxResults: Optional[MaxResultsGetComplianceSummary] + PaginationToken: Optional[PaginationToken] + + +NonCompliantResources = int + + +class Summary(TypedDict, total=False): + LastUpdated: Optional[LastUpdated] + TargetId: Optional[TargetId] + TargetIdType: Optional[TargetIdType] + Region: Optional[Region] + ResourceType: Optional[AmazonResourceType] + NonCompliantResources: Optional[NonCompliantResources] + + +SummaryList = List[Summary] + + +class GetComplianceSummaryOutput(TypedDict, total=False): + SummaryList: Optional[SummaryList] + PaginationToken: Optional[PaginationToken] + + +ResourceARNListForGet = List[ResourceARN] +TagValueList = List[TagValue] + + +class TagFilter(TypedDict, total=False): + Key: Optional[TagKey] + Values: Optional[TagValueList] + + +TagFilterList = List[TagFilter] + + +class GetResourcesInput(ServiceRequest): + PaginationToken: Optional[PaginationToken] + TagFilters: Optional[TagFilterList] + ResourcesPerPage: Optional[ResourcesPerPage] + TagsPerPage: Optional[TagsPerPage] + ResourceTypeFilters: Optional[ResourceTypeFilterList] + IncludeComplianceDetails: Optional[IncludeComplianceDetails] + ExcludeCompliantResources: Optional[ExcludeCompliantResources] + ResourceARNList: Optional[ResourceARNListForGet] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class ResourceTagMapping(TypedDict, total=False): + ResourceARN: Optional[ResourceARN] + Tags: Optional[TagList] + ComplianceDetails: Optional[ComplianceDetails] + + +ResourceTagMappingList = List[ResourceTagMapping] + + +class GetResourcesOutput(TypedDict, total=False): + PaginationToken: Optional[PaginationToken] + ResourceTagMappingList: Optional[ResourceTagMappingList] + + +class GetTagKeysInput(ServiceRequest): + PaginationToken: Optional[PaginationToken] + + +class GetTagKeysOutput(TypedDict, total=False): + PaginationToken: Optional[PaginationToken] + TagKeys: Optional[TagKeyList] + + +class GetTagValuesInput(ServiceRequest): + PaginationToken: Optional[PaginationToken] + Key: TagKey + + +TagValuesOutputList = List[TagValue] + + +class GetTagValuesOutput(TypedDict, total=False): + PaginationToken: Optional[PaginationToken] + TagValues: Optional[TagValuesOutputList] + + +ResourceARNListForTagUntag = List[ResourceARN] + + +class StartReportCreationInput(ServiceRequest): + S3Bucket: S3Bucket + + +class StartReportCreationOutput(TypedDict, total=False): + pass + + +TagKeyListForUntag = List[TagKey] +TagMap = Dict[TagKey, TagValue] + + +class TagResourcesInput(ServiceRequest): + ResourceARNList: ResourceARNListForTagUntag + Tags: TagMap + + +class TagResourcesOutput(TypedDict, total=False): + FailedResourcesMap: Optional[FailedResourcesMap] + + +class UntagResourcesInput(ServiceRequest): + ResourceARNList: ResourceARNListForTagUntag + TagKeys: TagKeyListForUntag + + +class UntagResourcesOutput(TypedDict, total=False): + FailedResourcesMap: Optional[FailedResourcesMap] + + +class ResourcegroupstaggingapiApi: + service = "resourcegroupstaggingapi" + version = "2017-01-26" + + @handler("DescribeReportCreation") + def describe_report_creation( + self, context: RequestContext, **kwargs + ) -> DescribeReportCreationOutput: + raise NotImplementedError + + @handler("GetComplianceSummary") + def get_compliance_summary( + self, + context: RequestContext, + target_id_filters: TargetIdFilterList | None = None, + region_filters: RegionFilterList | None = None, + resource_type_filters: ResourceTypeFilterList | None = None, + tag_key_filters: TagKeyFilterList | None = None, + group_by: GroupBy | None = None, + max_results: MaxResultsGetComplianceSummary | None = None, + pagination_token: PaginationToken | None = None, + **kwargs, + ) -> GetComplianceSummaryOutput: + raise NotImplementedError + + @handler("GetResources") + def get_resources( + self, + context: RequestContext, + pagination_token: PaginationToken | None = None, + tag_filters: TagFilterList | None = None, + resources_per_page: ResourcesPerPage | None = None, + tags_per_page: TagsPerPage | None = None, + resource_type_filters: ResourceTypeFilterList | None = None, + include_compliance_details: IncludeComplianceDetails | None = None, + exclude_compliant_resources: ExcludeCompliantResources | None = None, + resource_arn_list: ResourceARNListForGet | None = None, + **kwargs, + ) -> GetResourcesOutput: + raise NotImplementedError + + @handler("GetTagKeys") + def get_tag_keys( + self, context: RequestContext, pagination_token: PaginationToken | None = None, **kwargs + ) -> GetTagKeysOutput: + raise NotImplementedError + + @handler("GetTagValues") + def get_tag_values( + self, + context: RequestContext, + key: TagKey, + pagination_token: PaginationToken | None = None, + **kwargs, + ) -> GetTagValuesOutput: + raise NotImplementedError + + @handler("StartReportCreation") + def start_report_creation( + self, context: RequestContext, s3_bucket: S3Bucket, **kwargs + ) -> StartReportCreationOutput: + raise NotImplementedError + + @handler("TagResources") + def tag_resources( + self, + context: RequestContext, + resource_arn_list: ResourceARNListForTagUntag, + tags: TagMap, + **kwargs, + ) -> TagResourcesOutput: + raise NotImplementedError + + @handler("UntagResources") + def untag_resources( + self, + context: RequestContext, + resource_arn_list: ResourceARNListForTagUntag, + tag_keys: TagKeyListForUntag, + **kwargs, + ) -> UntagResourcesOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/route53/__init__.py b/localstack-core/localstack/aws/api/route53/__init__.py new file mode 100644 index 0000000000000..a2c3b810aa20b --- /dev/null +++ b/localstack-core/localstack/aws/api/route53/__init__.py @@ -0,0 +1,2557 @@ +from datetime import datetime +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ARN = str +AWSAccountID = str +AWSRegion = str +AlarmName = str +AliasHealthEnabled = bool +AssociateVPCComment = str +Bias = int +ChangeId = str +Cidr = str +CidrLocationNameDefaultAllowed = str +CidrLocationNameDefaultNotAllowed = str +CidrNonce = str +CloudWatchLogsLogGroupArn = str +CollectionName = str +DNSName = str +DNSRCode = str +DimensionField = str +Disabled = bool +DisassociateVPCComment = str +EnableSNI = bool +ErrorMessage = str +EvaluationPeriods = int +FailureThreshold = int +FullyQualifiedDomainName = str +GeoLocationContinentCode = str +GeoLocationContinentName = str +GeoLocationCountryCode = str +GeoLocationCountryName = str +GeoLocationSubdivisionCode = str +GeoLocationSubdivisionName = str +HealthCheckId = str +HealthCheckNonce = str +HealthThreshold = int +HostedZoneOwningService = str +IPAddress = str +IPAddressCidr = str +Inverted = bool +IsPrivateZone = bool +Latitude = str +LocalZoneGroup = str +Longitude = str +MaxResults = str +MeasureLatency = bool +Message = str +MetricName = str +Nameserver = str +Namespace = str +Nonce = str +PageMarker = str +PageMaxItems = str +PageTruncated = bool +PaginationToken = str +Period = int +Port = int +QueryLoggingConfigId = str +RData = str +RecordDataEntry = str +RequestInterval = int +ResourceDescription = str +ResourceId = str +ResourcePath = str +ResourceRecordSetIdentifier = str +ResourceRecordSetMultiValueAnswer = bool +ResourceURI = str +RoutingControlArn = str +SearchString = str +ServeSignature = str +ServicePrincipal = str +SigningKeyInteger = int +SigningKeyName = str +SigningKeyStatus = str +SigningKeyStatusMessage = str +SigningKeyString = str +SigningKeyTag = int +Status = str +SubnetMask = str +TagKey = str +TagResourceId = str +TagValue = str +Threshold = float +TrafficPolicyComment = str +TrafficPolicyDocument = str +TrafficPolicyId = str +TrafficPolicyInstanceCount = int +TrafficPolicyInstanceId = str +TrafficPolicyInstanceState = str +TrafficPolicyName = str +TrafficPolicyVersion = int +TrafficPolicyVersionMarker = str +TransportProtocol = str +UUID = str +VPCId = str + + +class AccountLimitType(StrEnum): + MAX_HEALTH_CHECKS_BY_OWNER = "MAX_HEALTH_CHECKS_BY_OWNER" + MAX_HOSTED_ZONES_BY_OWNER = "MAX_HOSTED_ZONES_BY_OWNER" + MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER = "MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER" + MAX_REUSABLE_DELEGATION_SETS_BY_OWNER = "MAX_REUSABLE_DELEGATION_SETS_BY_OWNER" + MAX_TRAFFIC_POLICIES_BY_OWNER = "MAX_TRAFFIC_POLICIES_BY_OWNER" + + +class ChangeAction(StrEnum): + CREATE = "CREATE" + DELETE = "DELETE" + UPSERT = "UPSERT" + + +class ChangeStatus(StrEnum): + PENDING = "PENDING" + INSYNC = "INSYNC" + + +class CidrCollectionChangeAction(StrEnum): + PUT = "PUT" + DELETE_IF_EXISTS = "DELETE_IF_EXISTS" + + +class CloudWatchRegion(StrEnum): + us_east_1 = "us-east-1" + us_east_2 = "us-east-2" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + ca_central_1 = "ca-central-1" + eu_central_1 = "eu-central-1" + eu_central_2 = "eu-central-2" + eu_west_1 = "eu-west-1" + eu_west_2 = "eu-west-2" + eu_west_3 = "eu-west-3" + ap_east_1 = "ap-east-1" + me_south_1 = "me-south-1" + me_central_1 = "me-central-1" + ap_south_1 = "ap-south-1" + ap_south_2 = "ap-south-2" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_southeast_3 = "ap-southeast-3" + ap_northeast_1 = "ap-northeast-1" + ap_northeast_2 = "ap-northeast-2" + ap_northeast_3 = "ap-northeast-3" + eu_north_1 = "eu-north-1" + sa_east_1 = "sa-east-1" + cn_northwest_1 = "cn-northwest-1" + cn_north_1 = "cn-north-1" + af_south_1 = "af-south-1" + eu_south_1 = "eu-south-1" + eu_south_2 = "eu-south-2" + us_gov_west_1 = "us-gov-west-1" + us_gov_east_1 = "us-gov-east-1" + us_iso_east_1 = "us-iso-east-1" + us_iso_west_1 = "us-iso-west-1" + us_isob_east_1 = "us-isob-east-1" + ap_southeast_4 = "ap-southeast-4" + il_central_1 = "il-central-1" + ca_west_1 = "ca-west-1" + ap_southeast_5 = "ap-southeast-5" + mx_central_1 = "mx-central-1" + us_isof_south_1 = "us-isof-south-1" + us_isof_east_1 = "us-isof-east-1" + ap_southeast_7 = "ap-southeast-7" + + +class ComparisonOperator(StrEnum): + GreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + GreaterThanThreshold = "GreaterThanThreshold" + LessThanThreshold = "LessThanThreshold" + LessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" + + +class HealthCheckRegion(StrEnum): + us_east_1 = "us-east-1" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + eu_west_1 = "eu-west-1" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_northeast_1 = "ap-northeast-1" + sa_east_1 = "sa-east-1" + + +class HealthCheckType(StrEnum): + HTTP = "HTTP" + HTTPS = "HTTPS" + HTTP_STR_MATCH = "HTTP_STR_MATCH" + HTTPS_STR_MATCH = "HTTPS_STR_MATCH" + TCP = "TCP" + CALCULATED = "CALCULATED" + CLOUDWATCH_METRIC = "CLOUDWATCH_METRIC" + RECOVERY_CONTROL = "RECOVERY_CONTROL" + + +class HostedZoneLimitType(StrEnum): + MAX_RRSETS_BY_ZONE = "MAX_RRSETS_BY_ZONE" + MAX_VPCS_ASSOCIATED_BY_ZONE = "MAX_VPCS_ASSOCIATED_BY_ZONE" + + +class HostedZoneType(StrEnum): + PrivateHostedZone = "PrivateHostedZone" + + +class InsufficientDataHealthStatus(StrEnum): + Healthy = "Healthy" + Unhealthy = "Unhealthy" + LastKnownStatus = "LastKnownStatus" + + +class RRType(StrEnum): + SOA = "SOA" + A = "A" + TXT = "TXT" + NS = "NS" + CNAME = "CNAME" + MX = "MX" + NAPTR = "NAPTR" + PTR = "PTR" + SRV = "SRV" + SPF = "SPF" + AAAA = "AAAA" + CAA = "CAA" + DS = "DS" + TLSA = "TLSA" + SSHFP = "SSHFP" + SVCB = "SVCB" + HTTPS = "HTTPS" + + +class ResettableElementName(StrEnum): + FullyQualifiedDomainName = "FullyQualifiedDomainName" + Regions = "Regions" + ResourcePath = "ResourcePath" + ChildHealthChecks = "ChildHealthChecks" + + +class ResourceRecordSetFailover(StrEnum): + PRIMARY = "PRIMARY" + SECONDARY = "SECONDARY" + + +class ResourceRecordSetRegion(StrEnum): + us_east_1 = "us-east-1" + us_east_2 = "us-east-2" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + ca_central_1 = "ca-central-1" + eu_west_1 = "eu-west-1" + eu_west_2 = "eu-west-2" + eu_west_3 = "eu-west-3" + eu_central_1 = "eu-central-1" + eu_central_2 = "eu-central-2" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_southeast_3 = "ap-southeast-3" + ap_northeast_1 = "ap-northeast-1" + ap_northeast_2 = "ap-northeast-2" + ap_northeast_3 = "ap-northeast-3" + eu_north_1 = "eu-north-1" + sa_east_1 = "sa-east-1" + cn_north_1 = "cn-north-1" + cn_northwest_1 = "cn-northwest-1" + ap_east_1 = "ap-east-1" + me_south_1 = "me-south-1" + me_central_1 = "me-central-1" + ap_south_1 = "ap-south-1" + ap_south_2 = "ap-south-2" + af_south_1 = "af-south-1" + eu_south_1 = "eu-south-1" + eu_south_2 = "eu-south-2" + ap_southeast_4 = "ap-southeast-4" + il_central_1 = "il-central-1" + ca_west_1 = "ca-west-1" + ap_southeast_5 = "ap-southeast-5" + mx_central_1 = "mx-central-1" + ap_southeast_7 = "ap-southeast-7" + us_gov_east_1 = "us-gov-east-1" + us_gov_west_1 = "us-gov-west-1" + + +class ReusableDelegationSetLimitType(StrEnum): + MAX_ZONES_BY_REUSABLE_DELEGATION_SET = "MAX_ZONES_BY_REUSABLE_DELEGATION_SET" + + +class Statistic(StrEnum): + Average = "Average" + Sum = "Sum" + SampleCount = "SampleCount" + Maximum = "Maximum" + Minimum = "Minimum" + + +class TagResourceType(StrEnum): + healthcheck = "healthcheck" + hostedzone = "hostedzone" + + +class VPCRegion(StrEnum): + us_east_1 = "us-east-1" + us_east_2 = "us-east-2" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + eu_west_1 = "eu-west-1" + eu_west_2 = "eu-west-2" + eu_west_3 = "eu-west-3" + eu_central_1 = "eu-central-1" + eu_central_2 = "eu-central-2" + ap_east_1 = "ap-east-1" + me_south_1 = "me-south-1" + us_gov_west_1 = "us-gov-west-1" + us_gov_east_1 = "us-gov-east-1" + us_iso_east_1 = "us-iso-east-1" + us_iso_west_1 = "us-iso-west-1" + us_isob_east_1 = "us-isob-east-1" + me_central_1 = "me-central-1" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_southeast_3 = "ap-southeast-3" + ap_south_1 = "ap-south-1" + ap_south_2 = "ap-south-2" + ap_northeast_1 = "ap-northeast-1" + ap_northeast_2 = "ap-northeast-2" + ap_northeast_3 = "ap-northeast-3" + eu_north_1 = "eu-north-1" + sa_east_1 = "sa-east-1" + ca_central_1 = "ca-central-1" + cn_north_1 = "cn-north-1" + cn_northwest_1 = "cn-northwest-1" + af_south_1 = "af-south-1" + eu_south_1 = "eu-south-1" + eu_south_2 = "eu-south-2" + ap_southeast_4 = "ap-southeast-4" + il_central_1 = "il-central-1" + ca_west_1 = "ca-west-1" + ap_southeast_5 = "ap-southeast-5" + mx_central_1 = "mx-central-1" + us_isof_south_1 = "us-isof-south-1" + us_isof_east_1 = "us-isof-east-1" + ap_southeast_7 = "ap-southeast-7" + + +class CidrBlockInUseException(ServiceException): + code: str = "CidrBlockInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class CidrCollectionAlreadyExistsException(ServiceException): + code: str = "CidrCollectionAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class CidrCollectionInUseException(ServiceException): + code: str = "CidrCollectionInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class CidrCollectionVersionMismatchException(ServiceException): + code: str = "CidrCollectionVersionMismatchException" + sender_fault: bool = False + status_code: int = 409 + + +class ConcurrentModification(ServiceException): + code: str = "ConcurrentModification" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictingDomainExists(ServiceException): + code: str = "ConflictingDomainExists" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictingTypes(ServiceException): + code: str = "ConflictingTypes" + sender_fault: bool = False + status_code: int = 400 + + +class DNSSECNotFound(ServiceException): + code: str = "DNSSECNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class DelegationSetAlreadyCreated(ServiceException): + code: str = "DelegationSetAlreadyCreated" + sender_fault: bool = False + status_code: int = 400 + + +class DelegationSetAlreadyReusable(ServiceException): + code: str = "DelegationSetAlreadyReusable" + sender_fault: bool = False + status_code: int = 400 + + +class DelegationSetInUse(ServiceException): + code: str = "DelegationSetInUse" + sender_fault: bool = False + status_code: int = 400 + + +class DelegationSetNotAvailable(ServiceException): + code: str = "DelegationSetNotAvailable" + sender_fault: bool = False + status_code: int = 400 + + +class DelegationSetNotReusable(ServiceException): + code: str = "DelegationSetNotReusable" + sender_fault: bool = False + status_code: int = 400 + + +class HealthCheckAlreadyExists(ServiceException): + code: str = "HealthCheckAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class HealthCheckInUse(ServiceException): + code: str = "HealthCheckInUse" + sender_fault: bool = False + status_code: int = 400 + + +class HealthCheckVersionMismatch(ServiceException): + code: str = "HealthCheckVersionMismatch" + sender_fault: bool = False + status_code: int = 409 + + +class HostedZoneAlreadyExists(ServiceException): + code: str = "HostedZoneAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class HostedZoneNotEmpty(ServiceException): + code: str = "HostedZoneNotEmpty" + sender_fault: bool = False + status_code: int = 400 + + +class HostedZoneNotFound(ServiceException): + code: str = "HostedZoneNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class HostedZoneNotPrivate(ServiceException): + code: str = "HostedZoneNotPrivate" + sender_fault: bool = False + status_code: int = 400 + + +class HostedZonePartiallyDelegated(ServiceException): + code: str = "HostedZonePartiallyDelegated" + sender_fault: bool = False + status_code: int = 400 + + +class IncompatibleVersion(ServiceException): + code: str = "IncompatibleVersion" + sender_fault: bool = False + status_code: int = 400 + + +class InsufficientCloudWatchLogsResourcePolicy(ServiceException): + code: str = "InsufficientCloudWatchLogsResourcePolicy" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArgument(ServiceException): + code: str = "InvalidArgument" + sender_fault: bool = False + status_code: int = 400 + + +ErrorMessages = List[ErrorMessage] + + +class InvalidChangeBatch(ServiceException): + code: str = "InvalidChangeBatch" + sender_fault: bool = False + status_code: int = 400 + messages: Optional[ErrorMessages] + + +class InvalidDomainName(ServiceException): + code: str = "InvalidDomainName" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInput(ServiceException): + code: str = "InvalidInput" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidKMSArn(ServiceException): + code: str = "InvalidKMSArn" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidKeySigningKeyName(ServiceException): + code: str = "InvalidKeySigningKeyName" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidKeySigningKeyStatus(ServiceException): + code: str = "InvalidKeySigningKeyStatus" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidPaginationToken(ServiceException): + code: str = "InvalidPaginationToken" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSigningStatus(ServiceException): + code: str = "InvalidSigningStatus" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTrafficPolicyDocument(ServiceException): + code: str = "InvalidTrafficPolicyDocument" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidVPCId(ServiceException): + code: str = "InvalidVPCId" + sender_fault: bool = False + status_code: int = 400 + + +class KeySigningKeyAlreadyExists(ServiceException): + code: str = "KeySigningKeyAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class KeySigningKeyInParentDSRecord(ServiceException): + code: str = "KeySigningKeyInParentDSRecord" + sender_fault: bool = False + status_code: int = 400 + + +class KeySigningKeyInUse(ServiceException): + code: str = "KeySigningKeyInUse" + sender_fault: bool = False + status_code: int = 400 + + +class KeySigningKeyWithActiveStatusNotFound(ServiceException): + code: str = "KeySigningKeyWithActiveStatusNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class LastVPCAssociation(ServiceException): + code: str = "LastVPCAssociation" + sender_fault: bool = False + status_code: int = 400 + + +class LimitsExceeded(ServiceException): + code: str = "LimitsExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchChange(ServiceException): + code: str = "NoSuchChange" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchCidrCollectionException(ServiceException): + code: str = "NoSuchCidrCollectionException" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchCidrLocationException(ServiceException): + code: str = "NoSuchCidrLocationException" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchCloudWatchLogsLogGroup(ServiceException): + code: str = "NoSuchCloudWatchLogsLogGroup" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchDelegationSet(ServiceException): + code: str = "NoSuchDelegationSet" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchGeoLocation(ServiceException): + code: str = "NoSuchGeoLocation" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchHealthCheck(ServiceException): + code: str = "NoSuchHealthCheck" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchHostedZone(ServiceException): + code: str = "NoSuchHostedZone" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchKeySigningKey(ServiceException): + code: str = "NoSuchKeySigningKey" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchQueryLoggingConfig(ServiceException): + code: str = "NoSuchQueryLoggingConfig" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchTrafficPolicy(ServiceException): + code: str = "NoSuchTrafficPolicy" + sender_fault: bool = False + status_code: int = 404 + + +class NoSuchTrafficPolicyInstance(ServiceException): + code: str = "NoSuchTrafficPolicyInstance" + sender_fault: bool = False + status_code: int = 404 + + +class NotAuthorizedException(ServiceException): + code: str = "NotAuthorizedException" + sender_fault: bool = False + status_code: int = 401 + + +class PriorRequestNotComplete(ServiceException): + code: str = "PriorRequestNotComplete" + sender_fault: bool = False + status_code: int = 400 + + +class PublicZoneVPCAssociation(ServiceException): + code: str = "PublicZoneVPCAssociation" + sender_fault: bool = False + status_code: int = 400 + + +class QueryLoggingConfigAlreadyExists(ServiceException): + code: str = "QueryLoggingConfigAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyHealthChecks(ServiceException): + code: str = "TooManyHealthChecks" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyHostedZones(ServiceException): + code: str = "TooManyHostedZones" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyKeySigningKeys(ServiceException): + code: str = "TooManyKeySigningKeys" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTrafficPolicies(ServiceException): + code: str = "TooManyTrafficPolicies" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTrafficPolicyInstances(ServiceException): + code: str = "TooManyTrafficPolicyInstances" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTrafficPolicyVersionsForCurrentPolicy(ServiceException): + code: str = "TooManyTrafficPolicyVersionsForCurrentPolicy" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyVPCAssociationAuthorizations(ServiceException): + code: str = "TooManyVPCAssociationAuthorizations" + sender_fault: bool = False + status_code: int = 400 + + +class TrafficPolicyAlreadyExists(ServiceException): + code: str = "TrafficPolicyAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class TrafficPolicyInUse(ServiceException): + code: str = "TrafficPolicyInUse" + sender_fault: bool = False + status_code: int = 400 + + +class TrafficPolicyInstanceAlreadyExists(ServiceException): + code: str = "TrafficPolicyInstanceAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class VPCAssociationAuthorizationNotFound(ServiceException): + code: str = "VPCAssociationAuthorizationNotFound" + sender_fault: bool = False + status_code: int = 404 + + +class VPCAssociationNotFound(ServiceException): + code: str = "VPCAssociationNotFound" + sender_fault: bool = False + status_code: int = 404 + + +LimitValue = int + + +class AccountLimit(TypedDict, total=False): + Type: AccountLimitType + Value: LimitValue + + +class ActivateKeySigningKeyRequest(ServiceRequest): + HostedZoneId: ResourceId + Name: SigningKeyName + + +TimeStamp = datetime + + +class ChangeInfo(TypedDict, total=False): + Id: ResourceId + Status: ChangeStatus + SubmittedAt: TimeStamp + Comment: Optional[ResourceDescription] + + +class ActivateKeySigningKeyResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class AlarmIdentifier(TypedDict, total=False): + Region: CloudWatchRegion + Name: AlarmName + + +class AliasTarget(TypedDict, total=False): + HostedZoneId: ResourceId + DNSName: DNSName + EvaluateTargetHealth: AliasHealthEnabled + + +class VPC(TypedDict, total=False): + VPCRegion: Optional[VPCRegion] + VPCId: Optional[VPCId] + + +class AssociateVPCWithHostedZoneRequest(ServiceRequest): + HostedZoneId: ResourceId + VPC: VPC + Comment: Optional[AssociateVPCComment] + + +class AssociateVPCWithHostedZoneResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class Coordinates(TypedDict, total=False): + Latitude: Latitude + Longitude: Longitude + + +class GeoProximityLocation(TypedDict, total=False): + AWSRegion: Optional[AWSRegion] + LocalZoneGroup: Optional[LocalZoneGroup] + Coordinates: Optional[Coordinates] + Bias: Optional[Bias] + + +class CidrRoutingConfig(TypedDict, total=False): + CollectionId: UUID + LocationName: CidrLocationNameDefaultAllowed + + +class ResourceRecord(TypedDict, total=False): + Value: RData + + +ResourceRecords = List[ResourceRecord] +TTL = int + + +class GeoLocation(TypedDict, total=False): + ContinentCode: Optional[GeoLocationContinentCode] + CountryCode: Optional[GeoLocationCountryCode] + SubdivisionCode: Optional[GeoLocationSubdivisionCode] + + +ResourceRecordSetWeight = int + + +class ResourceRecordSet(TypedDict, total=False): + Name: DNSName + Type: RRType + SetIdentifier: Optional[ResourceRecordSetIdentifier] + Weight: Optional[ResourceRecordSetWeight] + Region: Optional[ResourceRecordSetRegion] + GeoLocation: Optional[GeoLocation] + Failover: Optional[ResourceRecordSetFailover] + MultiValueAnswer: Optional[ResourceRecordSetMultiValueAnswer] + TTL: Optional[TTL] + ResourceRecords: Optional[ResourceRecords] + AliasTarget: Optional[AliasTarget] + HealthCheckId: Optional[HealthCheckId] + TrafficPolicyInstanceId: Optional[TrafficPolicyInstanceId] + CidrRoutingConfig: Optional[CidrRoutingConfig] + GeoProximityLocation: Optional[GeoProximityLocation] + + +class Change(TypedDict, total=False): + Action: ChangeAction + ResourceRecordSet: ResourceRecordSet + + +Changes = List[Change] + + +class ChangeBatch(TypedDict, total=False): + Comment: Optional[ResourceDescription] + Changes: Changes + + +CidrList = List[Cidr] + + +class CidrCollectionChange(TypedDict, total=False): + LocationName: CidrLocationNameDefaultNotAllowed + Action: CidrCollectionChangeAction + CidrList: CidrList + + +CidrCollectionChanges = List[CidrCollectionChange] +CollectionVersion = int + + +class ChangeCidrCollectionRequest(ServiceRequest): + Id: UUID + CollectionVersion: Optional[CollectionVersion] + Changes: CidrCollectionChanges + + +class ChangeCidrCollectionResponse(TypedDict, total=False): + Id: ChangeId + + +class ChangeResourceRecordSetsRequest(ServiceRequest): + HostedZoneId: ResourceId + ChangeBatch: ChangeBatch + + +class ChangeResourceRecordSetsResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +TagKeyList = List[TagKey] + + +class Tag(TypedDict, total=False): + Key: Optional[TagKey] + Value: Optional[TagValue] + + +TagList = List[Tag] + + +class ChangeTagsForResourceRequest(ServiceRequest): + ResourceType: TagResourceType + ResourceId: TagResourceId + AddTags: Optional[TagList] + RemoveTagKeys: Optional[TagKeyList] + + +class ChangeTagsForResourceResponse(TypedDict, total=False): + pass + + +CheckerIpRanges = List[IPAddressCidr] +ChildHealthCheckList = List[HealthCheckId] + + +class CidrBlockSummary(TypedDict, total=False): + CidrBlock: Optional[Cidr] + LocationName: Optional[CidrLocationNameDefaultNotAllowed] + + +CidrBlockSummaries = List[CidrBlockSummary] + + +class CidrCollection(TypedDict, total=False): + Arn: Optional[ARN] + Id: Optional[UUID] + Name: Optional[CollectionName] + Version: Optional[CollectionVersion] + + +class Dimension(TypedDict, total=False): + Name: DimensionField + Value: DimensionField + + +DimensionList = List[Dimension] + + +class CloudWatchAlarmConfiguration(TypedDict, total=False): + EvaluationPeriods: EvaluationPeriods + Threshold: Threshold + ComparisonOperator: ComparisonOperator + Period: Period + MetricName: MetricName + Namespace: Namespace + Statistic: Statistic + Dimensions: Optional[DimensionList] + + +class CollectionSummary(TypedDict, total=False): + Arn: Optional[ARN] + Id: Optional[UUID] + Name: Optional[CollectionName] + Version: Optional[CollectionVersion] + + +CollectionSummaries = List[CollectionSummary] + + +class CreateCidrCollectionRequest(ServiceRequest): + Name: CollectionName + CallerReference: CidrNonce + + +class CreateCidrCollectionResponse(TypedDict, total=False): + Collection: Optional[CidrCollection] + Location: Optional[ResourceURI] + + +HealthCheckRegionList = List[HealthCheckRegion] + + +class HealthCheckConfig(TypedDict, total=False): + IPAddress: Optional[IPAddress] + Port: Optional[Port] + Type: HealthCheckType + ResourcePath: Optional[ResourcePath] + FullyQualifiedDomainName: Optional[FullyQualifiedDomainName] + SearchString: Optional[SearchString] + RequestInterval: Optional[RequestInterval] + FailureThreshold: Optional[FailureThreshold] + MeasureLatency: Optional[MeasureLatency] + Inverted: Optional[Inverted] + Disabled: Optional[Disabled] + HealthThreshold: Optional[HealthThreshold] + ChildHealthChecks: Optional[ChildHealthCheckList] + EnableSNI: Optional[EnableSNI] + Regions: Optional[HealthCheckRegionList] + AlarmIdentifier: Optional[AlarmIdentifier] + InsufficientDataHealthStatus: Optional[InsufficientDataHealthStatus] + RoutingControlArn: Optional[RoutingControlArn] + + +class CreateHealthCheckRequest(ServiceRequest): + CallerReference: HealthCheckNonce + HealthCheckConfig: HealthCheckConfig + + +HealthCheckVersion = int + + +class LinkedService(TypedDict, total=False): + ServicePrincipal: Optional[ServicePrincipal] + Description: Optional[ResourceDescription] + + +class HealthCheck(TypedDict, total=False): + Id: HealthCheckId + CallerReference: HealthCheckNonce + LinkedService: Optional[LinkedService] + HealthCheckConfig: HealthCheckConfig + HealthCheckVersion: HealthCheckVersion + CloudWatchAlarmConfiguration: Optional[CloudWatchAlarmConfiguration] + + +class CreateHealthCheckResponse(TypedDict, total=False): + HealthCheck: HealthCheck + Location: ResourceURI + + +class HostedZoneConfig(TypedDict, total=False): + Comment: Optional[ResourceDescription] + PrivateZone: Optional[IsPrivateZone] + + +class CreateHostedZoneRequest(ServiceRequest): + Name: DNSName + VPC: Optional[VPC] + CallerReference: Nonce + HostedZoneConfig: Optional[HostedZoneConfig] + DelegationSetId: Optional[ResourceId] + + +DelegationSetNameServers = List[DNSName] + + +class DelegationSet(TypedDict, total=False): + Id: Optional[ResourceId] + CallerReference: Optional[Nonce] + NameServers: DelegationSetNameServers + + +HostedZoneRRSetCount = int + + +class HostedZone(TypedDict, total=False): + Id: ResourceId + Name: DNSName + CallerReference: Nonce + Config: Optional[HostedZoneConfig] + ResourceRecordSetCount: Optional[HostedZoneRRSetCount] + LinkedService: Optional[LinkedService] + + +class CreateHostedZoneResponse(TypedDict, total=False): + HostedZone: HostedZone + ChangeInfo: ChangeInfo + DelegationSet: DelegationSet + VPC: Optional[VPC] + Location: ResourceURI + + +class CreateKeySigningKeyRequest(ServiceRequest): + CallerReference: Nonce + HostedZoneId: ResourceId + KeyManagementServiceArn: SigningKeyString + Name: SigningKeyName + Status: SigningKeyStatus + + +class KeySigningKey(TypedDict, total=False): + Name: Optional[SigningKeyName] + KmsArn: Optional[SigningKeyString] + Flag: Optional[SigningKeyInteger] + SigningAlgorithmMnemonic: Optional[SigningKeyString] + SigningAlgorithmType: Optional[SigningKeyInteger] + DigestAlgorithmMnemonic: Optional[SigningKeyString] + DigestAlgorithmType: Optional[SigningKeyInteger] + KeyTag: Optional[SigningKeyTag] + DigestValue: Optional[SigningKeyString] + PublicKey: Optional[SigningKeyString] + DSRecord: Optional[SigningKeyString] + DNSKEYRecord: Optional[SigningKeyString] + Status: Optional[SigningKeyStatus] + StatusMessage: Optional[SigningKeyStatusMessage] + CreatedDate: Optional[TimeStamp] + LastModifiedDate: Optional[TimeStamp] + + +class CreateKeySigningKeyResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + KeySigningKey: KeySigningKey + Location: ResourceURI + + +class CreateQueryLoggingConfigRequest(ServiceRequest): + HostedZoneId: ResourceId + CloudWatchLogsLogGroupArn: CloudWatchLogsLogGroupArn + + +class QueryLoggingConfig(TypedDict, total=False): + Id: QueryLoggingConfigId + HostedZoneId: ResourceId + CloudWatchLogsLogGroupArn: CloudWatchLogsLogGroupArn + + +class CreateQueryLoggingConfigResponse(TypedDict, total=False): + QueryLoggingConfig: QueryLoggingConfig + Location: ResourceURI + + +class CreateReusableDelegationSetRequest(ServiceRequest): + CallerReference: Nonce + HostedZoneId: Optional[ResourceId] + + +class CreateReusableDelegationSetResponse(TypedDict, total=False): + DelegationSet: DelegationSet + Location: ResourceURI + + +class CreateTrafficPolicyInstanceRequest(ServiceRequest): + HostedZoneId: ResourceId + Name: DNSName + TTL: TTL + TrafficPolicyId: TrafficPolicyId + TrafficPolicyVersion: TrafficPolicyVersion + + +class TrafficPolicyInstance(TypedDict, total=False): + Id: TrafficPolicyInstanceId + HostedZoneId: ResourceId + Name: DNSName + TTL: TTL + State: TrafficPolicyInstanceState + Message: Message + TrafficPolicyId: TrafficPolicyId + TrafficPolicyVersion: TrafficPolicyVersion + TrafficPolicyType: RRType + + +class CreateTrafficPolicyInstanceResponse(TypedDict, total=False): + TrafficPolicyInstance: TrafficPolicyInstance + Location: ResourceURI + + +class CreateTrafficPolicyRequest(ServiceRequest): + Name: TrafficPolicyName + Document: TrafficPolicyDocument + Comment: Optional[TrafficPolicyComment] + + +class TrafficPolicy(TypedDict, total=False): + Id: TrafficPolicyId + Version: TrafficPolicyVersion + Name: TrafficPolicyName + Type: RRType + Document: TrafficPolicyDocument + Comment: Optional[TrafficPolicyComment] + + +class CreateTrafficPolicyResponse(TypedDict, total=False): + TrafficPolicy: TrafficPolicy + Location: ResourceURI + + +class CreateTrafficPolicyVersionRequest(ServiceRequest): + Id: TrafficPolicyId + Document: TrafficPolicyDocument + Comment: Optional[TrafficPolicyComment] + + +class CreateTrafficPolicyVersionResponse(TypedDict, total=False): + TrafficPolicy: TrafficPolicy + Location: ResourceURI + + +class CreateVPCAssociationAuthorizationRequest(ServiceRequest): + HostedZoneId: ResourceId + VPC: VPC + + +class CreateVPCAssociationAuthorizationResponse(TypedDict, total=False): + HostedZoneId: ResourceId + VPC: VPC + + +class DNSSECStatus(TypedDict, total=False): + ServeSignature: Optional[ServeSignature] + StatusMessage: Optional[SigningKeyStatusMessage] + + +class DeactivateKeySigningKeyRequest(ServiceRequest): + HostedZoneId: ResourceId + Name: SigningKeyName + + +class DeactivateKeySigningKeyResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +DelegationSets = List[DelegationSet] + + +class DeleteCidrCollectionRequest(ServiceRequest): + Id: UUID + + +class DeleteCidrCollectionResponse(TypedDict, total=False): + pass + + +class DeleteHealthCheckRequest(ServiceRequest): + HealthCheckId: HealthCheckId + + +class DeleteHealthCheckResponse(TypedDict, total=False): + pass + + +class DeleteHostedZoneRequest(ServiceRequest): + Id: ResourceId + + +class DeleteHostedZoneResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class DeleteKeySigningKeyRequest(ServiceRequest): + HostedZoneId: ResourceId + Name: SigningKeyName + + +class DeleteKeySigningKeyResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class DeleteQueryLoggingConfigRequest(ServiceRequest): + Id: QueryLoggingConfigId + + +class DeleteQueryLoggingConfigResponse(TypedDict, total=False): + pass + + +class DeleteReusableDelegationSetRequest(ServiceRequest): + Id: ResourceId + + +class DeleteReusableDelegationSetResponse(TypedDict, total=False): + pass + + +class DeleteTrafficPolicyInstanceRequest(ServiceRequest): + Id: TrafficPolicyInstanceId + + +class DeleteTrafficPolicyInstanceResponse(TypedDict, total=False): + pass + + +class DeleteTrafficPolicyRequest(ServiceRequest): + Id: TrafficPolicyId + Version: TrafficPolicyVersion + + +class DeleteTrafficPolicyResponse(TypedDict, total=False): + pass + + +class DeleteVPCAssociationAuthorizationRequest(ServiceRequest): + HostedZoneId: ResourceId + VPC: VPC + + +class DeleteVPCAssociationAuthorizationResponse(TypedDict, total=False): + pass + + +class DisableHostedZoneDNSSECRequest(ServiceRequest): + HostedZoneId: ResourceId + + +class DisableHostedZoneDNSSECResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class DisassociateVPCFromHostedZoneRequest(ServiceRequest): + HostedZoneId: ResourceId + VPC: VPC + Comment: Optional[DisassociateVPCComment] + + +class DisassociateVPCFromHostedZoneResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class EnableHostedZoneDNSSECRequest(ServiceRequest): + HostedZoneId: ResourceId + + +class EnableHostedZoneDNSSECResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class GeoLocationDetails(TypedDict, total=False): + ContinentCode: Optional[GeoLocationContinentCode] + ContinentName: Optional[GeoLocationContinentName] + CountryCode: Optional[GeoLocationCountryCode] + CountryName: Optional[GeoLocationCountryName] + SubdivisionCode: Optional[GeoLocationSubdivisionCode] + SubdivisionName: Optional[GeoLocationSubdivisionName] + + +GeoLocationDetailsList = List[GeoLocationDetails] + + +class GetAccountLimitRequest(ServiceRequest): + Type: AccountLimitType + + +UsageCount = int + + +class GetAccountLimitResponse(TypedDict, total=False): + Limit: AccountLimit + Count: UsageCount + + +class GetChangeRequest(ServiceRequest): + Id: ChangeId + + +class GetChangeResponse(TypedDict, total=False): + ChangeInfo: ChangeInfo + + +class GetCheckerIpRangesRequest(ServiceRequest): + pass + + +class GetCheckerIpRangesResponse(TypedDict, total=False): + CheckerIpRanges: CheckerIpRanges + + +class GetDNSSECRequest(ServiceRequest): + HostedZoneId: ResourceId + + +KeySigningKeys = List[KeySigningKey] + + +class GetDNSSECResponse(TypedDict, total=False): + Status: DNSSECStatus + KeySigningKeys: KeySigningKeys + + +class GetGeoLocationRequest(ServiceRequest): + ContinentCode: Optional[GeoLocationContinentCode] + CountryCode: Optional[GeoLocationCountryCode] + SubdivisionCode: Optional[GeoLocationSubdivisionCode] + + +class GetGeoLocationResponse(TypedDict, total=False): + GeoLocationDetails: GeoLocationDetails + + +class GetHealthCheckCountRequest(ServiceRequest): + pass + + +HealthCheckCount = int + + +class GetHealthCheckCountResponse(TypedDict, total=False): + HealthCheckCount: HealthCheckCount + + +class GetHealthCheckLastFailureReasonRequest(ServiceRequest): + HealthCheckId: HealthCheckId + + +class StatusReport(TypedDict, total=False): + Status: Optional[Status] + CheckedTime: Optional[TimeStamp] + + +class HealthCheckObservation(TypedDict, total=False): + Region: Optional[HealthCheckRegion] + IPAddress: Optional[IPAddress] + StatusReport: Optional[StatusReport] + + +HealthCheckObservations = List[HealthCheckObservation] + + +class GetHealthCheckLastFailureReasonResponse(TypedDict, total=False): + HealthCheckObservations: HealthCheckObservations + + +class GetHealthCheckRequest(ServiceRequest): + HealthCheckId: HealthCheckId + + +class GetHealthCheckResponse(TypedDict, total=False): + HealthCheck: HealthCheck + + +class GetHealthCheckStatusRequest(ServiceRequest): + HealthCheckId: HealthCheckId + + +class GetHealthCheckStatusResponse(TypedDict, total=False): + HealthCheckObservations: HealthCheckObservations + + +class GetHostedZoneCountRequest(ServiceRequest): + pass + + +HostedZoneCount = int + + +class GetHostedZoneCountResponse(TypedDict, total=False): + HostedZoneCount: HostedZoneCount + + +class GetHostedZoneLimitRequest(ServiceRequest): + Type: HostedZoneLimitType + HostedZoneId: ResourceId + + +class HostedZoneLimit(TypedDict, total=False): + Type: HostedZoneLimitType + Value: LimitValue + + +class GetHostedZoneLimitResponse(TypedDict, total=False): + Limit: HostedZoneLimit + Count: UsageCount + + +class GetHostedZoneRequest(ServiceRequest): + Id: ResourceId + + +VPCs = List[VPC] + + +class GetHostedZoneResponse(TypedDict, total=False): + HostedZone: HostedZone + DelegationSet: Optional[DelegationSet] + VPCs: Optional[VPCs] + + +class GetQueryLoggingConfigRequest(ServiceRequest): + Id: QueryLoggingConfigId + + +class GetQueryLoggingConfigResponse(TypedDict, total=False): + QueryLoggingConfig: QueryLoggingConfig + + +class GetReusableDelegationSetLimitRequest(ServiceRequest): + Type: ReusableDelegationSetLimitType + DelegationSetId: ResourceId + + +class ReusableDelegationSetLimit(TypedDict, total=False): + Type: ReusableDelegationSetLimitType + Value: LimitValue + + +class GetReusableDelegationSetLimitResponse(TypedDict, total=False): + Limit: ReusableDelegationSetLimit + Count: UsageCount + + +class GetReusableDelegationSetRequest(ServiceRequest): + Id: ResourceId + + +class GetReusableDelegationSetResponse(TypedDict, total=False): + DelegationSet: DelegationSet + + +class GetTrafficPolicyInstanceCountRequest(ServiceRequest): + pass + + +class GetTrafficPolicyInstanceCountResponse(TypedDict, total=False): + TrafficPolicyInstanceCount: TrafficPolicyInstanceCount + + +class GetTrafficPolicyInstanceRequest(ServiceRequest): + Id: TrafficPolicyInstanceId + + +class GetTrafficPolicyInstanceResponse(TypedDict, total=False): + TrafficPolicyInstance: TrafficPolicyInstance + + +class GetTrafficPolicyRequest(ServiceRequest): + Id: TrafficPolicyId + Version: TrafficPolicyVersion + + +class GetTrafficPolicyResponse(TypedDict, total=False): + TrafficPolicy: TrafficPolicy + + +HealthChecks = List[HealthCheck] + + +class HostedZoneOwner(TypedDict, total=False): + OwningAccount: Optional[AWSAccountID] + OwningService: Optional[HostedZoneOwningService] + + +class HostedZoneSummary(TypedDict, total=False): + HostedZoneId: ResourceId + Name: DNSName + Owner: HostedZoneOwner + + +HostedZoneSummaries = List[HostedZoneSummary] +HostedZones = List[HostedZone] + + +class ListCidrBlocksRequest(ServiceRequest): + CollectionId: UUID + LocationName: Optional[CidrLocationNameDefaultNotAllowed] + NextToken: Optional[PaginationToken] + MaxResults: Optional[MaxResults] + + +class ListCidrBlocksResponse(TypedDict, total=False): + NextToken: Optional[PaginationToken] + CidrBlocks: Optional[CidrBlockSummaries] + + +class ListCidrCollectionsRequest(ServiceRequest): + NextToken: Optional[PaginationToken] + MaxResults: Optional[MaxResults] + + +class ListCidrCollectionsResponse(TypedDict, total=False): + NextToken: Optional[PaginationToken] + CidrCollections: Optional[CollectionSummaries] + + +class ListCidrLocationsRequest(ServiceRequest): + CollectionId: UUID + NextToken: Optional[PaginationToken] + MaxResults: Optional[MaxResults] + + +class LocationSummary(TypedDict, total=False): + LocationName: Optional[CidrLocationNameDefaultAllowed] + + +LocationSummaries = List[LocationSummary] + + +class ListCidrLocationsResponse(TypedDict, total=False): + NextToken: Optional[PaginationToken] + CidrLocations: Optional[LocationSummaries] + + +class ListGeoLocationsRequest(ServiceRequest): + StartContinentCode: Optional[GeoLocationContinentCode] + StartCountryCode: Optional[GeoLocationCountryCode] + StartSubdivisionCode: Optional[GeoLocationSubdivisionCode] + MaxItems: Optional[PageMaxItems] + + +class ListGeoLocationsResponse(TypedDict, total=False): + GeoLocationDetailsList: GeoLocationDetailsList + IsTruncated: PageTruncated + NextContinentCode: Optional[GeoLocationContinentCode] + NextCountryCode: Optional[GeoLocationCountryCode] + NextSubdivisionCode: Optional[GeoLocationSubdivisionCode] + MaxItems: PageMaxItems + + +class ListHealthChecksRequest(ServiceRequest): + Marker: Optional[PageMarker] + MaxItems: Optional[PageMaxItems] + + +class ListHealthChecksResponse(TypedDict, total=False): + HealthChecks: HealthChecks + Marker: PageMarker + IsTruncated: PageTruncated + NextMarker: Optional[PageMarker] + MaxItems: PageMaxItems + + +class ListHostedZonesByNameRequest(ServiceRequest): + DNSName: Optional[DNSName] + HostedZoneId: Optional[ResourceId] + MaxItems: Optional[PageMaxItems] + + +class ListHostedZonesByNameResponse(TypedDict, total=False): + HostedZones: HostedZones + DNSName: Optional[DNSName] + HostedZoneId: Optional[ResourceId] + IsTruncated: PageTruncated + NextDNSName: Optional[DNSName] + NextHostedZoneId: Optional[ResourceId] + MaxItems: PageMaxItems + + +class ListHostedZonesByVPCRequest(ServiceRequest): + VPCId: VPCId + VPCRegion: VPCRegion + MaxItems: Optional[PageMaxItems] + NextToken: Optional[PaginationToken] + + +class ListHostedZonesByVPCResponse(TypedDict, total=False): + HostedZoneSummaries: HostedZoneSummaries + MaxItems: PageMaxItems + NextToken: Optional[PaginationToken] + + +class ListHostedZonesRequest(ServiceRequest): + Marker: Optional[PageMarker] + MaxItems: Optional[PageMaxItems] + DelegationSetId: Optional[ResourceId] + HostedZoneType: Optional[HostedZoneType] + + +class ListHostedZonesResponse(TypedDict, total=False): + HostedZones: HostedZones + Marker: PageMarker + IsTruncated: PageTruncated + NextMarker: Optional[PageMarker] + MaxItems: PageMaxItems + + +class ListQueryLoggingConfigsRequest(ServiceRequest): + HostedZoneId: Optional[ResourceId] + NextToken: Optional[PaginationToken] + MaxResults: Optional[MaxResults] + + +QueryLoggingConfigs = List[QueryLoggingConfig] + + +class ListQueryLoggingConfigsResponse(TypedDict, total=False): + QueryLoggingConfigs: QueryLoggingConfigs + NextToken: Optional[PaginationToken] + + +class ListResourceRecordSetsRequest(ServiceRequest): + HostedZoneId: ResourceId + StartRecordName: Optional[DNSName] + StartRecordType: Optional[RRType] + StartRecordIdentifier: Optional[ResourceRecordSetIdentifier] + MaxItems: Optional[PageMaxItems] + + +ResourceRecordSets = List[ResourceRecordSet] + + +class ListResourceRecordSetsResponse(TypedDict, total=False): + ResourceRecordSets: ResourceRecordSets + IsTruncated: PageTruncated + NextRecordName: Optional[DNSName] + NextRecordType: Optional[RRType] + NextRecordIdentifier: Optional[ResourceRecordSetIdentifier] + MaxItems: PageMaxItems + + +class ListReusableDelegationSetsRequest(ServiceRequest): + Marker: Optional[PageMarker] + MaxItems: Optional[PageMaxItems] + + +class ListReusableDelegationSetsResponse(TypedDict, total=False): + DelegationSets: DelegationSets + Marker: PageMarker + IsTruncated: PageTruncated + NextMarker: Optional[PageMarker] + MaxItems: PageMaxItems + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceType: TagResourceType + ResourceId: TagResourceId + + +class ResourceTagSet(TypedDict, total=False): + ResourceType: Optional[TagResourceType] + ResourceId: Optional[TagResourceId] + Tags: Optional[TagList] + + +class ListTagsForResourceResponse(TypedDict, total=False): + ResourceTagSet: ResourceTagSet + + +TagResourceIdList = List[TagResourceId] + + +class ListTagsForResourcesRequest(ServiceRequest): + ResourceType: TagResourceType + ResourceIds: TagResourceIdList + + +ResourceTagSetList = List[ResourceTagSet] + + +class ListTagsForResourcesResponse(TypedDict, total=False): + ResourceTagSets: ResourceTagSetList + + +class ListTrafficPoliciesRequest(ServiceRequest): + TrafficPolicyIdMarker: Optional[TrafficPolicyId] + MaxItems: Optional[PageMaxItems] + + +class TrafficPolicySummary(TypedDict, total=False): + Id: TrafficPolicyId + Name: TrafficPolicyName + Type: RRType + LatestVersion: TrafficPolicyVersion + TrafficPolicyCount: TrafficPolicyVersion + + +TrafficPolicySummaries = List[TrafficPolicySummary] + + +class ListTrafficPoliciesResponse(TypedDict, total=False): + TrafficPolicySummaries: TrafficPolicySummaries + IsTruncated: PageTruncated + TrafficPolicyIdMarker: TrafficPolicyId + MaxItems: PageMaxItems + + +class ListTrafficPolicyInstancesByHostedZoneRequest(ServiceRequest): + HostedZoneId: ResourceId + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + MaxItems: Optional[PageMaxItems] + + +TrafficPolicyInstances = List[TrafficPolicyInstance] + + +class ListTrafficPolicyInstancesByHostedZoneResponse(TypedDict, total=False): + TrafficPolicyInstances: TrafficPolicyInstances + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + IsTruncated: PageTruncated + MaxItems: PageMaxItems + + +class ListTrafficPolicyInstancesByPolicyRequest(ServiceRequest): + TrafficPolicyId: TrafficPolicyId + TrafficPolicyVersion: TrafficPolicyVersion + HostedZoneIdMarker: Optional[ResourceId] + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + MaxItems: Optional[PageMaxItems] + + +class ListTrafficPolicyInstancesByPolicyResponse(TypedDict, total=False): + TrafficPolicyInstances: TrafficPolicyInstances + HostedZoneIdMarker: Optional[ResourceId] + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + IsTruncated: PageTruncated + MaxItems: PageMaxItems + + +class ListTrafficPolicyInstancesRequest(ServiceRequest): + HostedZoneIdMarker: Optional[ResourceId] + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + MaxItems: Optional[PageMaxItems] + + +class ListTrafficPolicyInstancesResponse(TypedDict, total=False): + TrafficPolicyInstances: TrafficPolicyInstances + HostedZoneIdMarker: Optional[ResourceId] + TrafficPolicyInstanceNameMarker: Optional[DNSName] + TrafficPolicyInstanceTypeMarker: Optional[RRType] + IsTruncated: PageTruncated + MaxItems: PageMaxItems + + +class ListTrafficPolicyVersionsRequest(ServiceRequest): + Id: TrafficPolicyId + TrafficPolicyVersionMarker: Optional[TrafficPolicyVersionMarker] + MaxItems: Optional[PageMaxItems] + + +TrafficPolicies = List[TrafficPolicy] + + +class ListTrafficPolicyVersionsResponse(TypedDict, total=False): + TrafficPolicies: TrafficPolicies + IsTruncated: PageTruncated + TrafficPolicyVersionMarker: TrafficPolicyVersionMarker + MaxItems: PageMaxItems + + +class ListVPCAssociationAuthorizationsRequest(ServiceRequest): + HostedZoneId: ResourceId + NextToken: Optional[PaginationToken] + MaxResults: Optional[MaxResults] + + +class ListVPCAssociationAuthorizationsResponse(TypedDict, total=False): + HostedZoneId: ResourceId + NextToken: Optional[PaginationToken] + VPCs: VPCs + + +RecordData = List[RecordDataEntry] +ResettableElementNameList = List[ResettableElementName] + + +class TestDNSAnswerRequest(ServiceRequest): + HostedZoneId: ResourceId + RecordName: DNSName + RecordType: RRType + ResolverIP: Optional[IPAddress] + EDNS0ClientSubnetIP: Optional[IPAddress] + EDNS0ClientSubnetMask: Optional[SubnetMask] + + +class TestDNSAnswerResponse(TypedDict, total=False): + Nameserver: Nameserver + RecordName: DNSName + RecordType: RRType + RecordData: RecordData + ResponseCode: DNSRCode + Protocol: TransportProtocol + + +class UpdateHealthCheckRequest(ServiceRequest): + HealthCheckId: HealthCheckId + HealthCheckVersion: Optional[HealthCheckVersion] + IPAddress: Optional[IPAddress] + Port: Optional[Port] + ResourcePath: Optional[ResourcePath] + FullyQualifiedDomainName: Optional[FullyQualifiedDomainName] + SearchString: Optional[SearchString] + FailureThreshold: Optional[FailureThreshold] + Inverted: Optional[Inverted] + Disabled: Optional[Disabled] + HealthThreshold: Optional[HealthThreshold] + ChildHealthChecks: Optional[ChildHealthCheckList] + EnableSNI: Optional[EnableSNI] + Regions: Optional[HealthCheckRegionList] + AlarmIdentifier: Optional[AlarmIdentifier] + InsufficientDataHealthStatus: Optional[InsufficientDataHealthStatus] + ResetElements: Optional[ResettableElementNameList] + + +class UpdateHealthCheckResponse(TypedDict, total=False): + HealthCheck: HealthCheck + + +class UpdateHostedZoneCommentRequest(ServiceRequest): + Id: ResourceId + Comment: Optional[ResourceDescription] + + +class UpdateHostedZoneCommentResponse(TypedDict, total=False): + HostedZone: HostedZone + + +class UpdateTrafficPolicyCommentRequest(ServiceRequest): + Id: TrafficPolicyId + Version: TrafficPolicyVersion + Comment: TrafficPolicyComment + + +class UpdateTrafficPolicyCommentResponse(TypedDict, total=False): + TrafficPolicy: TrafficPolicy + + +class UpdateTrafficPolicyInstanceRequest(ServiceRequest): + Id: TrafficPolicyInstanceId + TTL: TTL + TrafficPolicyId: TrafficPolicyId + TrafficPolicyVersion: TrafficPolicyVersion + + +class UpdateTrafficPolicyInstanceResponse(TypedDict, total=False): + TrafficPolicyInstance: TrafficPolicyInstance + + +class Route53Api: + service = "route53" + version = "2013-04-01" + + @handler("ActivateKeySigningKey") + def activate_key_signing_key( + self, context: RequestContext, hosted_zone_id: ResourceId, name: SigningKeyName, **kwargs + ) -> ActivateKeySigningKeyResponse: + raise NotImplementedError + + @handler("AssociateVPCWithHostedZone") + def associate_vpc_with_hosted_zone( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + vpc: VPC, + comment: AssociateVPCComment | None = None, + **kwargs, + ) -> AssociateVPCWithHostedZoneResponse: + raise NotImplementedError + + @handler("ChangeCidrCollection") + def change_cidr_collection( + self, + context: RequestContext, + id: UUID, + changes: CidrCollectionChanges, + collection_version: CollectionVersion | None = None, + **kwargs, + ) -> ChangeCidrCollectionResponse: + raise NotImplementedError + + @handler("ChangeResourceRecordSets") + def change_resource_record_sets( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + change_batch: ChangeBatch, + **kwargs, + ) -> ChangeResourceRecordSetsResponse: + raise NotImplementedError + + @handler("ChangeTagsForResource") + def change_tags_for_resource( + self, + context: RequestContext, + resource_type: TagResourceType, + resource_id: TagResourceId, + add_tags: TagList | None = None, + remove_tag_keys: TagKeyList | None = None, + **kwargs, + ) -> ChangeTagsForResourceResponse: + raise NotImplementedError + + @handler("CreateCidrCollection") + def create_cidr_collection( + self, context: RequestContext, name: CollectionName, caller_reference: CidrNonce, **kwargs + ) -> CreateCidrCollectionResponse: + raise NotImplementedError + + @handler("CreateHealthCheck") + def create_health_check( + self, + context: RequestContext, + caller_reference: HealthCheckNonce, + health_check_config: HealthCheckConfig, + **kwargs, + ) -> CreateHealthCheckResponse: + raise NotImplementedError + + @handler("CreateHostedZone") + def create_hosted_zone( + self, + context: RequestContext, + name: DNSName, + caller_reference: Nonce, + vpc: VPC | None = None, + hosted_zone_config: HostedZoneConfig | None = None, + delegation_set_id: ResourceId | None = None, + **kwargs, + ) -> CreateHostedZoneResponse: + raise NotImplementedError + + @handler("CreateKeySigningKey") + def create_key_signing_key( + self, + context: RequestContext, + caller_reference: Nonce, + hosted_zone_id: ResourceId, + key_management_service_arn: SigningKeyString, + name: SigningKeyName, + status: SigningKeyStatus, + **kwargs, + ) -> CreateKeySigningKeyResponse: + raise NotImplementedError + + @handler("CreateQueryLoggingConfig") + def create_query_logging_config( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + cloud_watch_logs_log_group_arn: CloudWatchLogsLogGroupArn, + **kwargs, + ) -> CreateQueryLoggingConfigResponse: + raise NotImplementedError + + @handler("CreateReusableDelegationSet") + def create_reusable_delegation_set( + self, + context: RequestContext, + caller_reference: Nonce, + hosted_zone_id: ResourceId | None = None, + **kwargs, + ) -> CreateReusableDelegationSetResponse: + raise NotImplementedError + + @handler("CreateTrafficPolicy") + def create_traffic_policy( + self, + context: RequestContext, + name: TrafficPolicyName, + document: TrafficPolicyDocument, + comment: TrafficPolicyComment | None = None, + **kwargs, + ) -> CreateTrafficPolicyResponse: + raise NotImplementedError + + @handler("CreateTrafficPolicyInstance") + def create_traffic_policy_instance( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + name: DNSName, + ttl: TTL, + traffic_policy_id: TrafficPolicyId, + traffic_policy_version: TrafficPolicyVersion, + **kwargs, + ) -> CreateTrafficPolicyInstanceResponse: + raise NotImplementedError + + @handler("CreateTrafficPolicyVersion") + def create_traffic_policy_version( + self, + context: RequestContext, + id: TrafficPolicyId, + document: TrafficPolicyDocument, + comment: TrafficPolicyComment | None = None, + **kwargs, + ) -> CreateTrafficPolicyVersionResponse: + raise NotImplementedError + + @handler("CreateVPCAssociationAuthorization") + def create_vpc_association_authorization( + self, context: RequestContext, hosted_zone_id: ResourceId, vpc: VPC, **kwargs + ) -> CreateVPCAssociationAuthorizationResponse: + raise NotImplementedError + + @handler("DeactivateKeySigningKey") + def deactivate_key_signing_key( + self, context: RequestContext, hosted_zone_id: ResourceId, name: SigningKeyName, **kwargs + ) -> DeactivateKeySigningKeyResponse: + raise NotImplementedError + + @handler("DeleteCidrCollection") + def delete_cidr_collection( + self, context: RequestContext, id: UUID, **kwargs + ) -> DeleteCidrCollectionResponse: + raise NotImplementedError + + @handler("DeleteHealthCheck") + def delete_health_check( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> DeleteHealthCheckResponse: + raise NotImplementedError + + @handler("DeleteHostedZone") + def delete_hosted_zone( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> DeleteHostedZoneResponse: + raise NotImplementedError + + @handler("DeleteKeySigningKey") + def delete_key_signing_key( + self, context: RequestContext, hosted_zone_id: ResourceId, name: SigningKeyName, **kwargs + ) -> DeleteKeySigningKeyResponse: + raise NotImplementedError + + @handler("DeleteQueryLoggingConfig") + def delete_query_logging_config( + self, context: RequestContext, id: QueryLoggingConfigId, **kwargs + ) -> DeleteQueryLoggingConfigResponse: + raise NotImplementedError + + @handler("DeleteReusableDelegationSet") + def delete_reusable_delegation_set( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> DeleteReusableDelegationSetResponse: + raise NotImplementedError + + @handler("DeleteTrafficPolicy") + def delete_traffic_policy( + self, context: RequestContext, id: TrafficPolicyId, version: TrafficPolicyVersion, **kwargs + ) -> DeleteTrafficPolicyResponse: + raise NotImplementedError + + @handler("DeleteTrafficPolicyInstance") + def delete_traffic_policy_instance( + self, context: RequestContext, id: TrafficPolicyInstanceId, **kwargs + ) -> DeleteTrafficPolicyInstanceResponse: + raise NotImplementedError + + @handler("DeleteVPCAssociationAuthorization") + def delete_vpc_association_authorization( + self, context: RequestContext, hosted_zone_id: ResourceId, vpc: VPC, **kwargs + ) -> DeleteVPCAssociationAuthorizationResponse: + raise NotImplementedError + + @handler("DisableHostedZoneDNSSEC") + def disable_hosted_zone_dnssec( + self, context: RequestContext, hosted_zone_id: ResourceId, **kwargs + ) -> DisableHostedZoneDNSSECResponse: + raise NotImplementedError + + @handler("DisassociateVPCFromHostedZone") + def disassociate_vpc_from_hosted_zone( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + vpc: VPC, + comment: DisassociateVPCComment | None = None, + **kwargs, + ) -> DisassociateVPCFromHostedZoneResponse: + raise NotImplementedError + + @handler("EnableHostedZoneDNSSEC") + def enable_hosted_zone_dnssec( + self, context: RequestContext, hosted_zone_id: ResourceId, **kwargs + ) -> EnableHostedZoneDNSSECResponse: + raise NotImplementedError + + @handler("GetAccountLimit", expand=False) + def get_account_limit( + self, context: RequestContext, request: GetAccountLimitRequest, **kwargs + ) -> GetAccountLimitResponse: + raise NotImplementedError + + @handler("GetChange") + def get_change(self, context: RequestContext, id: ChangeId, **kwargs) -> GetChangeResponse: + raise NotImplementedError + + @handler("GetCheckerIpRanges") + def get_checker_ip_ranges( + self, context: RequestContext, **kwargs + ) -> GetCheckerIpRangesResponse: + raise NotImplementedError + + @handler("GetDNSSEC") + def get_dnssec( + self, context: RequestContext, hosted_zone_id: ResourceId, **kwargs + ) -> GetDNSSECResponse: + raise NotImplementedError + + @handler("GetGeoLocation") + def get_geo_location( + self, + context: RequestContext, + continent_code: GeoLocationContinentCode | None = None, + country_code: GeoLocationCountryCode | None = None, + subdivision_code: GeoLocationSubdivisionCode | None = None, + **kwargs, + ) -> GetGeoLocationResponse: + raise NotImplementedError + + @handler("GetHealthCheck") + def get_health_check( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> GetHealthCheckResponse: + raise NotImplementedError + + @handler("GetHealthCheckCount") + def get_health_check_count( + self, context: RequestContext, **kwargs + ) -> GetHealthCheckCountResponse: + raise NotImplementedError + + @handler("GetHealthCheckLastFailureReason") + def get_health_check_last_failure_reason( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> GetHealthCheckLastFailureReasonResponse: + raise NotImplementedError + + @handler("GetHealthCheckStatus") + def get_health_check_status( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> GetHealthCheckStatusResponse: + raise NotImplementedError + + @handler("GetHostedZone") + def get_hosted_zone( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> GetHostedZoneResponse: + raise NotImplementedError + + @handler("GetHostedZoneCount") + def get_hosted_zone_count( + self, context: RequestContext, **kwargs + ) -> GetHostedZoneCountResponse: + raise NotImplementedError + + @handler("GetHostedZoneLimit", expand=False) + def get_hosted_zone_limit( + self, context: RequestContext, request: GetHostedZoneLimitRequest, **kwargs + ) -> GetHostedZoneLimitResponse: + raise NotImplementedError + + @handler("GetQueryLoggingConfig") + def get_query_logging_config( + self, context: RequestContext, id: QueryLoggingConfigId, **kwargs + ) -> GetQueryLoggingConfigResponse: + raise NotImplementedError + + @handler("GetReusableDelegationSet") + def get_reusable_delegation_set( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> GetReusableDelegationSetResponse: + raise NotImplementedError + + @handler("GetReusableDelegationSetLimit", expand=False) + def get_reusable_delegation_set_limit( + self, context: RequestContext, request: GetReusableDelegationSetLimitRequest, **kwargs + ) -> GetReusableDelegationSetLimitResponse: + raise NotImplementedError + + @handler("GetTrafficPolicy") + def get_traffic_policy( + self, context: RequestContext, id: TrafficPolicyId, version: TrafficPolicyVersion, **kwargs + ) -> GetTrafficPolicyResponse: + raise NotImplementedError + + @handler("GetTrafficPolicyInstance") + def get_traffic_policy_instance( + self, context: RequestContext, id: TrafficPolicyInstanceId, **kwargs + ) -> GetTrafficPolicyInstanceResponse: + raise NotImplementedError + + @handler("GetTrafficPolicyInstanceCount") + def get_traffic_policy_instance_count( + self, context: RequestContext, **kwargs + ) -> GetTrafficPolicyInstanceCountResponse: + raise NotImplementedError + + @handler("ListCidrBlocks") + def list_cidr_blocks( + self, + context: RequestContext, + collection_id: UUID, + location_name: CidrLocationNameDefaultNotAllowed | None = None, + next_token: PaginationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCidrBlocksResponse: + raise NotImplementedError + + @handler("ListCidrCollections") + def list_cidr_collections( + self, + context: RequestContext, + next_token: PaginationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCidrCollectionsResponse: + raise NotImplementedError + + @handler("ListCidrLocations") + def list_cidr_locations( + self, + context: RequestContext, + collection_id: UUID, + next_token: PaginationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCidrLocationsResponse: + raise NotImplementedError + + @handler("ListGeoLocations") + def list_geo_locations( + self, + context: RequestContext, + start_continent_code: GeoLocationContinentCode | None = None, + start_country_code: GeoLocationCountryCode | None = None, + start_subdivision_code: GeoLocationSubdivisionCode | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListGeoLocationsResponse: + raise NotImplementedError + + @handler("ListHealthChecks") + def list_health_checks( + self, + context: RequestContext, + marker: PageMarker | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListHealthChecksResponse: + raise NotImplementedError + + @handler("ListHostedZones") + def list_hosted_zones( + self, + context: RequestContext, + marker: PageMarker | None = None, + max_items: PageMaxItems | None = None, + delegation_set_id: ResourceId | None = None, + hosted_zone_type: HostedZoneType | None = None, + **kwargs, + ) -> ListHostedZonesResponse: + raise NotImplementedError + + @handler("ListHostedZonesByName") + def list_hosted_zones_by_name( + self, + context: RequestContext, + dns_name: DNSName | None = None, + hosted_zone_id: ResourceId | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListHostedZonesByNameResponse: + raise NotImplementedError + + @handler("ListHostedZonesByVPC") + def list_hosted_zones_by_vpc( + self, + context: RequestContext, + vpc_id: VPCId, + vpc_region: VPCRegion, + max_items: PageMaxItems | None = None, + next_token: PaginationToken | None = None, + **kwargs, + ) -> ListHostedZonesByVPCResponse: + raise NotImplementedError + + @handler("ListQueryLoggingConfigs") + def list_query_logging_configs( + self, + context: RequestContext, + hosted_zone_id: ResourceId | None = None, + next_token: PaginationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListQueryLoggingConfigsResponse: + raise NotImplementedError + + @handler("ListResourceRecordSets") + def list_resource_record_sets( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + start_record_name: DNSName | None = None, + start_record_type: RRType | None = None, + start_record_identifier: ResourceRecordSetIdentifier | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListResourceRecordSetsResponse: + raise NotImplementedError + + @handler("ListReusableDelegationSets") + def list_reusable_delegation_sets( + self, + context: RequestContext, + marker: PageMarker | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListReusableDelegationSetsResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, + context: RequestContext, + resource_type: TagResourceType, + resource_id: TagResourceId, + **kwargs, + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("ListTagsForResources") + def list_tags_for_resources( + self, + context: RequestContext, + resource_type: TagResourceType, + resource_ids: TagResourceIdList, + **kwargs, + ) -> ListTagsForResourcesResponse: + raise NotImplementedError + + @handler("ListTrafficPolicies") + def list_traffic_policies( + self, + context: RequestContext, + traffic_policy_id_marker: TrafficPolicyId | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListTrafficPoliciesResponse: + raise NotImplementedError + + @handler("ListTrafficPolicyInstances") + def list_traffic_policy_instances( + self, + context: RequestContext, + hosted_zone_id_marker: ResourceId | None = None, + traffic_policy_instance_name_marker: DNSName | None = None, + traffic_policy_instance_type_marker: RRType | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListTrafficPolicyInstancesResponse: + raise NotImplementedError + + @handler("ListTrafficPolicyInstancesByHostedZone") + def list_traffic_policy_instances_by_hosted_zone( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + traffic_policy_instance_name_marker: DNSName | None = None, + traffic_policy_instance_type_marker: RRType | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListTrafficPolicyInstancesByHostedZoneResponse: + raise NotImplementedError + + @handler("ListTrafficPolicyInstancesByPolicy") + def list_traffic_policy_instances_by_policy( + self, + context: RequestContext, + traffic_policy_id: TrafficPolicyId, + traffic_policy_version: TrafficPolicyVersion, + hosted_zone_id_marker: ResourceId | None = None, + traffic_policy_instance_name_marker: DNSName | None = None, + traffic_policy_instance_type_marker: RRType | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListTrafficPolicyInstancesByPolicyResponse: + raise NotImplementedError + + @handler("ListTrafficPolicyVersions") + def list_traffic_policy_versions( + self, + context: RequestContext, + id: TrafficPolicyId, + traffic_policy_version_marker: TrafficPolicyVersionMarker | None = None, + max_items: PageMaxItems | None = None, + **kwargs, + ) -> ListTrafficPolicyVersionsResponse: + raise NotImplementedError + + @handler("ListVPCAssociationAuthorizations") + def list_vpc_association_authorizations( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + next_token: PaginationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListVPCAssociationAuthorizationsResponse: + raise NotImplementedError + + @handler("TestDNSAnswer") + def test_dns_answer( + self, + context: RequestContext, + hosted_zone_id: ResourceId, + record_name: DNSName, + record_type: RRType, + resolver_ip: IPAddress | None = None, + edns0_client_subnet_ip: IPAddress | None = None, + edns0_client_subnet_mask: SubnetMask | None = None, + **kwargs, + ) -> TestDNSAnswerResponse: + raise NotImplementedError + + @handler("UpdateHealthCheck") + def update_health_check( + self, + context: RequestContext, + health_check_id: HealthCheckId, + health_check_version: HealthCheckVersion | None = None, + ip_address: IPAddress | None = None, + port: Port | None = None, + resource_path: ResourcePath | None = None, + fully_qualified_domain_name: FullyQualifiedDomainName | None = None, + search_string: SearchString | None = None, + failure_threshold: FailureThreshold | None = None, + inverted: Inverted | None = None, + disabled: Disabled | None = None, + health_threshold: HealthThreshold | None = None, + child_health_checks: ChildHealthCheckList | None = None, + enable_sni: EnableSNI | None = None, + regions: HealthCheckRegionList | None = None, + alarm_identifier: AlarmIdentifier | None = None, + insufficient_data_health_status: InsufficientDataHealthStatus | None = None, + reset_elements: ResettableElementNameList | None = None, + **kwargs, + ) -> UpdateHealthCheckResponse: + raise NotImplementedError + + @handler("UpdateHostedZoneComment") + def update_hosted_zone_comment( + self, + context: RequestContext, + id: ResourceId, + comment: ResourceDescription | None = None, + **kwargs, + ) -> UpdateHostedZoneCommentResponse: + raise NotImplementedError + + @handler("UpdateTrafficPolicyComment") + def update_traffic_policy_comment( + self, + context: RequestContext, + id: TrafficPolicyId, + version: TrafficPolicyVersion, + comment: TrafficPolicyComment, + **kwargs, + ) -> UpdateTrafficPolicyCommentResponse: + raise NotImplementedError + + @handler("UpdateTrafficPolicyInstance") + def update_traffic_policy_instance( + self, + context: RequestContext, + id: TrafficPolicyInstanceId, + ttl: TTL, + traffic_policy_id: TrafficPolicyId, + traffic_policy_version: TrafficPolicyVersion, + **kwargs, + ) -> UpdateTrafficPolicyInstanceResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/route53resolver/__init__.py b/localstack-core/localstack/aws/api/route53resolver/__init__.py new file mode 100644 index 0000000000000..29bb80aa29a4b --- /dev/null +++ b/localstack-core/localstack/aws/api/route53resolver/__init__.py @@ -0,0 +1,2034 @@ +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccountId = str +Arn = str +BlockOverrideDomain = str +BlockOverrideTtl = int +Boolean = bool +Count = int +CreatorRequestId = str +DestinationArn = str +DomainListFileUrl = str +DomainName = str +ExceptionMessage = str +FilterName = str +FilterValue = str +FirewallDomainName = str +FirewallRuleGroupPolicy = str +InstanceCount = int +Ip = str +IpAddressCount = int +Ipv6 = str +ListDomainMaxResults = int +ListFirewallConfigsMaxResult = int +ListResolverConfigsMaxResult = int +MaxResults = int +Name = str +NextToken = str +OutpostArn = str +OutpostInstanceType = str +OutpostResolverName = str +OutpostResolverStatusMessage = str +Port = int +Priority = int +Qtype = str +ResolverQueryLogConfigAssociationErrorMessage = str +ResolverQueryLogConfigName = str +ResolverQueryLogConfigPolicy = str +ResolverRulePolicy = str +ResourceId = str +Rfc3339TimeString = str +ServerNameIndication = str +ServicePrinciple = str +SortByKey = str +StatusMessage = str +String = str +SubnetId = str +TagKey = str +TagValue = str +Unsigned = int + + +class Action(StrEnum): + ALLOW = "ALLOW" + BLOCK = "BLOCK" + ALERT = "ALERT" + + +class AutodefinedReverseFlag(StrEnum): + ENABLE = "ENABLE" + DISABLE = "DISABLE" + USE_LOCAL_RESOURCE_SETTING = "USE_LOCAL_RESOURCE_SETTING" + + +class BlockOverrideDnsType(StrEnum): + CNAME = "CNAME" + + +class BlockResponse(StrEnum): + NODATA = "NODATA" + NXDOMAIN = "NXDOMAIN" + OVERRIDE = "OVERRIDE" + + +class ConfidenceThreshold(StrEnum): + LOW = "LOW" + MEDIUM = "MEDIUM" + HIGH = "HIGH" + + +class DnsThreatProtection(StrEnum): + DGA = "DGA" + DNS_TUNNELING = "DNS_TUNNELING" + + +class FirewallDomainImportOperation(StrEnum): + REPLACE = "REPLACE" + + +class FirewallDomainListStatus(StrEnum): + COMPLETE = "COMPLETE" + COMPLETE_IMPORT_FAILED = "COMPLETE_IMPORT_FAILED" + IMPORTING = "IMPORTING" + DELETING = "DELETING" + UPDATING = "UPDATING" + + +class FirewallDomainRedirectionAction(StrEnum): + INSPECT_REDIRECTION_DOMAIN = "INSPECT_REDIRECTION_DOMAIN" + TRUST_REDIRECTION_DOMAIN = "TRUST_REDIRECTION_DOMAIN" + + +class FirewallDomainUpdateOperation(StrEnum): + ADD = "ADD" + REMOVE = "REMOVE" + REPLACE = "REPLACE" + + +class FirewallFailOpenStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + USE_LOCAL_RESOURCE_SETTING = "USE_LOCAL_RESOURCE_SETTING" + + +class FirewallRuleGroupAssociationStatus(StrEnum): + COMPLETE = "COMPLETE" + DELETING = "DELETING" + UPDATING = "UPDATING" + + +class FirewallRuleGroupStatus(StrEnum): + COMPLETE = "COMPLETE" + DELETING = "DELETING" + UPDATING = "UPDATING" + + +class IpAddressStatus(StrEnum): + CREATING = "CREATING" + FAILED_CREATION = "FAILED_CREATION" + ATTACHING = "ATTACHING" + ATTACHED = "ATTACHED" + REMAP_DETACHING = "REMAP_DETACHING" + REMAP_ATTACHING = "REMAP_ATTACHING" + DETACHING = "DETACHING" + FAILED_RESOURCE_GONE = "FAILED_RESOURCE_GONE" + DELETING = "DELETING" + DELETE_FAILED_FAS_EXPIRED = "DELETE_FAILED_FAS_EXPIRED" + UPDATING = "UPDATING" + UPDATE_FAILED = "UPDATE_FAILED" + + +class MutationProtectionStatus(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class OutpostResolverStatus(StrEnum): + CREATING = "CREATING" + OPERATIONAL = "OPERATIONAL" + UPDATING = "UPDATING" + DELETING = "DELETING" + ACTION_NEEDED = "ACTION_NEEDED" + FAILED_CREATION = "FAILED_CREATION" + FAILED_DELETION = "FAILED_DELETION" + + +class Protocol(StrEnum): + DoH = "DoH" + Do53 = "Do53" + DoH_FIPS = "DoH-FIPS" + + +class ResolverAutodefinedReverseStatus(StrEnum): + ENABLING = "ENABLING" + ENABLED = "ENABLED" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + UPDATING_TO_USE_LOCAL_RESOURCE_SETTING = "UPDATING_TO_USE_LOCAL_RESOURCE_SETTING" + USE_LOCAL_RESOURCE_SETTING = "USE_LOCAL_RESOURCE_SETTING" + + +class ResolverDNSSECValidationStatus(StrEnum): + ENABLING = "ENABLING" + ENABLED = "ENABLED" + DISABLING = "DISABLING" + DISABLED = "DISABLED" + UPDATING_TO_USE_LOCAL_RESOURCE_SETTING = "UPDATING_TO_USE_LOCAL_RESOURCE_SETTING" + USE_LOCAL_RESOURCE_SETTING = "USE_LOCAL_RESOURCE_SETTING" + + +class ResolverEndpointDirection(StrEnum): + INBOUND = "INBOUND" + OUTBOUND = "OUTBOUND" + + +class ResolverEndpointStatus(StrEnum): + CREATING = "CREATING" + OPERATIONAL = "OPERATIONAL" + UPDATING = "UPDATING" + AUTO_RECOVERING = "AUTO_RECOVERING" + ACTION_NEEDED = "ACTION_NEEDED" + DELETING = "DELETING" + + +class ResolverEndpointType(StrEnum): + IPV6 = "IPV6" + IPV4 = "IPV4" + DUALSTACK = "DUALSTACK" + + +class ResolverQueryLogConfigAssociationError(StrEnum): + NONE = "NONE" + DESTINATION_NOT_FOUND = "DESTINATION_NOT_FOUND" + ACCESS_DENIED = "ACCESS_DENIED" + INTERNAL_SERVICE_ERROR = "INTERNAL_SERVICE_ERROR" + + +class ResolverQueryLogConfigAssociationStatus(StrEnum): + CREATING = "CREATING" + ACTIVE = "ACTIVE" + ACTION_NEEDED = "ACTION_NEEDED" + DELETING = "DELETING" + FAILED = "FAILED" + + +class ResolverQueryLogConfigStatus(StrEnum): + CREATING = "CREATING" + CREATED = "CREATED" + DELETING = "DELETING" + FAILED = "FAILED" + + +class ResolverRuleAssociationStatus(StrEnum): + CREATING = "CREATING" + COMPLETE = "COMPLETE" + DELETING = "DELETING" + FAILED = "FAILED" + OVERRIDDEN = "OVERRIDDEN" + + +class ResolverRuleStatus(StrEnum): + COMPLETE = "COMPLETE" + DELETING = "DELETING" + UPDATING = "UPDATING" + FAILED = "FAILED" + + +class RuleTypeOption(StrEnum): + FORWARD = "FORWARD" + SYSTEM = "SYSTEM" + RECURSIVE = "RECURSIVE" + + +class ShareStatus(StrEnum): + NOT_SHARED = "NOT_SHARED" + SHARED_WITH_ME = "SHARED_WITH_ME" + SHARED_BY_ME = "SHARED_BY_ME" + + +class SortOrder(StrEnum): + ASCENDING = "ASCENDING" + DESCENDING = "DESCENDING" + + +class Validation(StrEnum): + ENABLE = "ENABLE" + DISABLE = "DISABLE" + USE_LOCAL_RESOURCE_SETTING = "USE_LOCAL_RESOURCE_SETTING" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServiceErrorException(ServiceException): + code: str = "InternalServiceErrorException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNextTokenException(ServiceException): + code: str = "InvalidNextTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + FieldName: Optional[String] + + +class InvalidPolicyDocument(ServiceException): + code: str = "InvalidPolicyDocument" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRequestException(ServiceException): + code: str = "InvalidRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTagException(ServiceException): + code: str = "InvalidTagException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + ResourceType: Optional[String] + + +class ResourceExistsException(ServiceException): + code: str = "ResourceExistsException" + sender_fault: bool = False + status_code: int = 400 + ResourceType: Optional[String] + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + ResourceType: Optional[String] + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + ResourceType: Optional[String] + + +class ResourceUnavailableException(ServiceException): + code: str = "ResourceUnavailableException" + sender_fault: bool = False + status_code: int = 400 + ResourceType: Optional[String] + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class UnknownResourceException(ServiceException): + code: str = "UnknownResourceException" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class AssociateFirewallRuleGroupRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + FirewallRuleGroupId: ResourceId + VpcId: ResourceId + Priority: Priority + Name: Name + MutationProtection: Optional[MutationProtectionStatus] + Tags: Optional[TagList] + + +class FirewallRuleGroupAssociation(TypedDict, total=False): + Id: Optional[ResourceId] + Arn: Optional[Arn] + FirewallRuleGroupId: Optional[ResourceId] + VpcId: Optional[ResourceId] + Name: Optional[Name] + Priority: Optional[Priority] + MutationProtection: Optional[MutationProtectionStatus] + ManagedOwnerName: Optional[ServicePrinciple] + Status: Optional[FirewallRuleGroupAssociationStatus] + StatusMessage: Optional[StatusMessage] + CreatorRequestId: Optional[CreatorRequestId] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + + +class AssociateFirewallRuleGroupResponse(TypedDict, total=False): + FirewallRuleGroupAssociation: Optional[FirewallRuleGroupAssociation] + + +class IpAddressUpdate(TypedDict, total=False): + IpId: Optional[ResourceId] + SubnetId: Optional[SubnetId] + Ip: Optional[Ip] + Ipv6: Optional[Ipv6] + + +class AssociateResolverEndpointIpAddressRequest(ServiceRequest): + ResolverEndpointId: ResourceId + IpAddress: IpAddressUpdate + + +ProtocolList = List[Protocol] +SecurityGroupIds = List[ResourceId] + + +class ResolverEndpoint(TypedDict, total=False): + Id: Optional[ResourceId] + CreatorRequestId: Optional[CreatorRequestId] + Arn: Optional[Arn] + Name: Optional[Name] + SecurityGroupIds: Optional[SecurityGroupIds] + Direction: Optional[ResolverEndpointDirection] + IpAddressCount: Optional[IpAddressCount] + HostVPCId: Optional[ResourceId] + Status: Optional[ResolverEndpointStatus] + StatusMessage: Optional[StatusMessage] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + OutpostArn: Optional[OutpostArn] + PreferredInstanceType: Optional[OutpostInstanceType] + ResolverEndpointType: Optional[ResolverEndpointType] + Protocols: Optional[ProtocolList] + + +class AssociateResolverEndpointIpAddressResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class AssociateResolverQueryLogConfigRequest(ServiceRequest): + ResolverQueryLogConfigId: ResourceId + ResourceId: ResourceId + + +class ResolverQueryLogConfigAssociation(TypedDict, total=False): + Id: Optional[ResourceId] + ResolverQueryLogConfigId: Optional[ResourceId] + ResourceId: Optional[ResourceId] + Status: Optional[ResolverQueryLogConfigAssociationStatus] + Error: Optional[ResolverQueryLogConfigAssociationError] + ErrorMessage: Optional[ResolverQueryLogConfigAssociationErrorMessage] + CreationTime: Optional[Rfc3339TimeString] + + +class AssociateResolverQueryLogConfigResponse(TypedDict, total=False): + ResolverQueryLogConfigAssociation: Optional[ResolverQueryLogConfigAssociation] + + +class AssociateResolverRuleRequest(ServiceRequest): + ResolverRuleId: ResourceId + Name: Optional[Name] + VPCId: ResourceId + + +class ResolverRuleAssociation(TypedDict, total=False): + Id: Optional[ResourceId] + ResolverRuleId: Optional[ResourceId] + Name: Optional[Name] + VPCId: Optional[ResourceId] + Status: Optional[ResolverRuleAssociationStatus] + StatusMessage: Optional[StatusMessage] + + +class AssociateResolverRuleResponse(TypedDict, total=False): + ResolverRuleAssociation: Optional[ResolverRuleAssociation] + + +class CreateFirewallDomainListRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + Name: Name + Tags: Optional[TagList] + + +class FirewallDomainList(TypedDict, total=False): + Id: Optional[ResourceId] + Arn: Optional[Arn] + Name: Optional[Name] + DomainCount: Optional[Unsigned] + Status: Optional[FirewallDomainListStatus] + StatusMessage: Optional[StatusMessage] + ManagedOwnerName: Optional[ServicePrinciple] + CreatorRequestId: Optional[CreatorRequestId] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + + +class CreateFirewallDomainListResponse(TypedDict, total=False): + FirewallDomainList: Optional[FirewallDomainList] + + +class CreateFirewallRuleGroupRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + Name: Name + Tags: Optional[TagList] + + +class FirewallRuleGroup(TypedDict, total=False): + Id: Optional[ResourceId] + Arn: Optional[Arn] + Name: Optional[Name] + RuleCount: Optional[Unsigned] + Status: Optional[FirewallRuleGroupStatus] + StatusMessage: Optional[StatusMessage] + OwnerId: Optional[AccountId] + CreatorRequestId: Optional[CreatorRequestId] + ShareStatus: Optional[ShareStatus] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + + +class CreateFirewallRuleGroupResponse(TypedDict, total=False): + FirewallRuleGroup: Optional[FirewallRuleGroup] + + +class CreateFirewallRuleRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + FirewallRuleGroupId: ResourceId + FirewallDomainListId: Optional[ResourceId] + Priority: Priority + Action: Action + BlockResponse: Optional[BlockResponse] + BlockOverrideDomain: Optional[BlockOverrideDomain] + BlockOverrideDnsType: Optional[BlockOverrideDnsType] + BlockOverrideTtl: Optional[BlockOverrideTtl] + Name: Name + FirewallDomainRedirectionAction: Optional[FirewallDomainRedirectionAction] + Qtype: Optional[Qtype] + DnsThreatProtection: Optional[DnsThreatProtection] + ConfidenceThreshold: Optional[ConfidenceThreshold] + + +class FirewallRule(TypedDict, total=False): + FirewallRuleGroupId: Optional[ResourceId] + FirewallDomainListId: Optional[ResourceId] + FirewallThreatProtectionId: Optional[ResourceId] + Name: Optional[Name] + Priority: Optional[Priority] + Action: Optional[Action] + BlockResponse: Optional[BlockResponse] + BlockOverrideDomain: Optional[BlockOverrideDomain] + BlockOverrideDnsType: Optional[BlockOverrideDnsType] + BlockOverrideTtl: Optional[Unsigned] + CreatorRequestId: Optional[CreatorRequestId] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + FirewallDomainRedirectionAction: Optional[FirewallDomainRedirectionAction] + Qtype: Optional[Qtype] + DnsThreatProtection: Optional[DnsThreatProtection] + ConfidenceThreshold: Optional[ConfidenceThreshold] + + +class CreateFirewallRuleResponse(TypedDict, total=False): + FirewallRule: Optional[FirewallRule] + + +class CreateOutpostResolverRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + Name: OutpostResolverName + InstanceCount: Optional[InstanceCount] + PreferredInstanceType: OutpostInstanceType + OutpostArn: OutpostArn + Tags: Optional[TagList] + + +class OutpostResolver(TypedDict, total=False): + Arn: Optional[Arn] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + CreatorRequestId: Optional[CreatorRequestId] + Id: Optional[ResourceId] + InstanceCount: Optional[InstanceCount] + PreferredInstanceType: Optional[OutpostInstanceType] + Name: Optional[OutpostResolverName] + Status: Optional[OutpostResolverStatus] + StatusMessage: Optional[OutpostResolverStatusMessage] + OutpostArn: Optional[OutpostArn] + + +class CreateOutpostResolverResponse(TypedDict, total=False): + OutpostResolver: Optional[OutpostResolver] + + +class IpAddressRequest(TypedDict, total=False): + SubnetId: SubnetId + Ip: Optional[Ip] + Ipv6: Optional[Ipv6] + + +IpAddressesRequest = List[IpAddressRequest] + + +class CreateResolverEndpointRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + Name: Optional[Name] + SecurityGroupIds: SecurityGroupIds + Direction: ResolverEndpointDirection + IpAddresses: IpAddressesRequest + OutpostArn: Optional[OutpostArn] + PreferredInstanceType: Optional[OutpostInstanceType] + Tags: Optional[TagList] + ResolverEndpointType: Optional[ResolverEndpointType] + Protocols: Optional[ProtocolList] + + +class CreateResolverEndpointResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class CreateResolverQueryLogConfigRequest(ServiceRequest): + Name: ResolverQueryLogConfigName + DestinationArn: DestinationArn + CreatorRequestId: CreatorRequestId + Tags: Optional[TagList] + + +class ResolverQueryLogConfig(TypedDict, total=False): + Id: Optional[ResourceId] + OwnerId: Optional[AccountId] + Status: Optional[ResolverQueryLogConfigStatus] + ShareStatus: Optional[ShareStatus] + AssociationCount: Optional[Count] + Arn: Optional[Arn] + Name: Optional[ResolverQueryLogConfigName] + DestinationArn: Optional[DestinationArn] + CreatorRequestId: Optional[CreatorRequestId] + CreationTime: Optional[Rfc3339TimeString] + + +class CreateResolverQueryLogConfigResponse(TypedDict, total=False): + ResolverQueryLogConfig: Optional[ResolverQueryLogConfig] + + +class TargetAddress(TypedDict, total=False): + Ip: Optional[Ip] + Port: Optional[Port] + Ipv6: Optional[Ipv6] + Protocol: Optional[Protocol] + ServerNameIndication: Optional[ServerNameIndication] + + +TargetList = List[TargetAddress] + + +class CreateResolverRuleRequest(ServiceRequest): + CreatorRequestId: CreatorRequestId + Name: Optional[Name] + RuleType: RuleTypeOption + DomainName: Optional[DomainName] + TargetIps: Optional[TargetList] + ResolverEndpointId: Optional[ResourceId] + Tags: Optional[TagList] + + +class ResolverRule(TypedDict, total=False): + Id: Optional[ResourceId] + CreatorRequestId: Optional[CreatorRequestId] + Arn: Optional[Arn] + DomainName: Optional[DomainName] + Status: Optional[ResolverRuleStatus] + StatusMessage: Optional[StatusMessage] + RuleType: Optional[RuleTypeOption] + Name: Optional[Name] + TargetIps: Optional[TargetList] + ResolverEndpointId: Optional[ResourceId] + OwnerId: Optional[AccountId] + ShareStatus: Optional[ShareStatus] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + + +class CreateResolverRuleResponse(TypedDict, total=False): + ResolverRule: Optional[ResolverRule] + + +class DeleteFirewallDomainListRequest(ServiceRequest): + FirewallDomainListId: ResourceId + + +class DeleteFirewallDomainListResponse(TypedDict, total=False): + FirewallDomainList: Optional[FirewallDomainList] + + +class DeleteFirewallRuleGroupRequest(ServiceRequest): + FirewallRuleGroupId: ResourceId + + +class DeleteFirewallRuleGroupResponse(TypedDict, total=False): + FirewallRuleGroup: Optional[FirewallRuleGroup] + + +class DeleteFirewallRuleRequest(ServiceRequest): + FirewallRuleGroupId: ResourceId + FirewallDomainListId: Optional[ResourceId] + FirewallThreatProtectionId: Optional[ResourceId] + Qtype: Optional[Qtype] + + +class DeleteFirewallRuleResponse(TypedDict, total=False): + FirewallRule: Optional[FirewallRule] + + +class DeleteOutpostResolverRequest(ServiceRequest): + Id: ResourceId + + +class DeleteOutpostResolverResponse(TypedDict, total=False): + OutpostResolver: Optional[OutpostResolver] + + +class DeleteResolverEndpointRequest(ServiceRequest): + ResolverEndpointId: ResourceId + + +class DeleteResolverEndpointResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class DeleteResolverQueryLogConfigRequest(ServiceRequest): + ResolverQueryLogConfigId: ResourceId + + +class DeleteResolverQueryLogConfigResponse(TypedDict, total=False): + ResolverQueryLogConfig: Optional[ResolverQueryLogConfig] + + +class DeleteResolverRuleRequest(ServiceRequest): + ResolverRuleId: ResourceId + + +class DeleteResolverRuleResponse(TypedDict, total=False): + ResolverRule: Optional[ResolverRule] + + +class DisassociateFirewallRuleGroupRequest(ServiceRequest): + FirewallRuleGroupAssociationId: ResourceId + + +class DisassociateFirewallRuleGroupResponse(TypedDict, total=False): + FirewallRuleGroupAssociation: Optional[FirewallRuleGroupAssociation] + + +class DisassociateResolverEndpointIpAddressRequest(ServiceRequest): + ResolverEndpointId: ResourceId + IpAddress: IpAddressUpdate + + +class DisassociateResolverEndpointIpAddressResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class DisassociateResolverQueryLogConfigRequest(ServiceRequest): + ResolverQueryLogConfigId: ResourceId + ResourceId: ResourceId + + +class DisassociateResolverQueryLogConfigResponse(TypedDict, total=False): + ResolverQueryLogConfigAssociation: Optional[ResolverQueryLogConfigAssociation] + + +class DisassociateResolverRuleRequest(ServiceRequest): + VPCId: ResourceId + ResolverRuleId: ResourceId + + +class DisassociateResolverRuleResponse(TypedDict, total=False): + ResolverRuleAssociation: Optional[ResolverRuleAssociation] + + +FilterValues = List[FilterValue] + + +class Filter(TypedDict, total=False): + Name: Optional[FilterName] + Values: Optional[FilterValues] + + +Filters = List[Filter] + + +class FirewallConfig(TypedDict, total=False): + Id: Optional[ResourceId] + ResourceId: Optional[ResourceId] + OwnerId: Optional[AccountId] + FirewallFailOpen: Optional[FirewallFailOpenStatus] + + +FirewallConfigList = List[FirewallConfig] + + +class FirewallDomainListMetadata(TypedDict, total=False): + Id: Optional[ResourceId] + Arn: Optional[Arn] + Name: Optional[Name] + CreatorRequestId: Optional[CreatorRequestId] + ManagedOwnerName: Optional[ServicePrinciple] + + +FirewallDomainListMetadataList = List[FirewallDomainListMetadata] +FirewallDomains = List[FirewallDomainName] +FirewallRuleGroupAssociations = List[FirewallRuleGroupAssociation] + + +class FirewallRuleGroupMetadata(TypedDict, total=False): + Id: Optional[ResourceId] + Arn: Optional[Arn] + Name: Optional[Name] + OwnerId: Optional[AccountId] + CreatorRequestId: Optional[CreatorRequestId] + ShareStatus: Optional[ShareStatus] + + +FirewallRuleGroupMetadataList = List[FirewallRuleGroupMetadata] +FirewallRules = List[FirewallRule] + + +class GetFirewallConfigRequest(ServiceRequest): + ResourceId: ResourceId + + +class GetFirewallConfigResponse(TypedDict, total=False): + FirewallConfig: Optional[FirewallConfig] + + +class GetFirewallDomainListRequest(ServiceRequest): + FirewallDomainListId: ResourceId + + +class GetFirewallDomainListResponse(TypedDict, total=False): + FirewallDomainList: Optional[FirewallDomainList] + + +class GetFirewallRuleGroupAssociationRequest(ServiceRequest): + FirewallRuleGroupAssociationId: ResourceId + + +class GetFirewallRuleGroupAssociationResponse(TypedDict, total=False): + FirewallRuleGroupAssociation: Optional[FirewallRuleGroupAssociation] + + +class GetFirewallRuleGroupPolicyRequest(ServiceRequest): + Arn: Arn + + +class GetFirewallRuleGroupPolicyResponse(TypedDict, total=False): + FirewallRuleGroupPolicy: Optional[FirewallRuleGroupPolicy] + + +class GetFirewallRuleGroupRequest(ServiceRequest): + FirewallRuleGroupId: ResourceId + + +class GetFirewallRuleGroupResponse(TypedDict, total=False): + FirewallRuleGroup: Optional[FirewallRuleGroup] + + +class GetOutpostResolverRequest(ServiceRequest): + Id: ResourceId + + +class GetOutpostResolverResponse(TypedDict, total=False): + OutpostResolver: Optional[OutpostResolver] + + +class GetResolverConfigRequest(ServiceRequest): + ResourceId: ResourceId + + +class ResolverConfig(TypedDict, total=False): + Id: Optional[ResourceId] + ResourceId: Optional[ResourceId] + OwnerId: Optional[AccountId] + AutodefinedReverse: Optional[ResolverAutodefinedReverseStatus] + + +class GetResolverConfigResponse(TypedDict, total=False): + ResolverConfig: Optional[ResolverConfig] + + +class GetResolverDnssecConfigRequest(ServiceRequest): + ResourceId: ResourceId + + +class ResolverDnssecConfig(TypedDict, total=False): + Id: Optional[ResourceId] + OwnerId: Optional[AccountId] + ResourceId: Optional[ResourceId] + ValidationStatus: Optional[ResolverDNSSECValidationStatus] + + +class GetResolverDnssecConfigResponse(TypedDict, total=False): + ResolverDNSSECConfig: Optional[ResolverDnssecConfig] + + +class GetResolverEndpointRequest(ServiceRequest): + ResolverEndpointId: ResourceId + + +class GetResolverEndpointResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class GetResolverQueryLogConfigAssociationRequest(ServiceRequest): + ResolverQueryLogConfigAssociationId: ResourceId + + +class GetResolverQueryLogConfigAssociationResponse(TypedDict, total=False): + ResolverQueryLogConfigAssociation: Optional[ResolverQueryLogConfigAssociation] + + +class GetResolverQueryLogConfigPolicyRequest(ServiceRequest): + Arn: Arn + + +class GetResolverQueryLogConfigPolicyResponse(TypedDict, total=False): + ResolverQueryLogConfigPolicy: Optional[ResolverQueryLogConfigPolicy] + + +class GetResolverQueryLogConfigRequest(ServiceRequest): + ResolverQueryLogConfigId: ResourceId + + +class GetResolverQueryLogConfigResponse(TypedDict, total=False): + ResolverQueryLogConfig: Optional[ResolverQueryLogConfig] + + +class GetResolverRuleAssociationRequest(ServiceRequest): + ResolverRuleAssociationId: ResourceId + + +class GetResolverRuleAssociationResponse(TypedDict, total=False): + ResolverRuleAssociation: Optional[ResolverRuleAssociation] + + +class GetResolverRulePolicyRequest(ServiceRequest): + Arn: Arn + + +class GetResolverRulePolicyResponse(TypedDict, total=False): + ResolverRulePolicy: Optional[ResolverRulePolicy] + + +class GetResolverRuleRequest(ServiceRequest): + ResolverRuleId: ResourceId + + +class GetResolverRuleResponse(TypedDict, total=False): + ResolverRule: Optional[ResolverRule] + + +class ImportFirewallDomainsRequest(ServiceRequest): + FirewallDomainListId: ResourceId + Operation: FirewallDomainImportOperation + DomainFileUrl: DomainListFileUrl + + +class ImportFirewallDomainsResponse(TypedDict, total=False): + Id: Optional[ResourceId] + Name: Optional[Name] + Status: Optional[FirewallDomainListStatus] + StatusMessage: Optional[StatusMessage] + + +class IpAddressResponse(TypedDict, total=False): + IpId: Optional[ResourceId] + SubnetId: Optional[SubnetId] + Ip: Optional[Ip] + Ipv6: Optional[Ipv6] + Status: Optional[IpAddressStatus] + StatusMessage: Optional[StatusMessage] + CreationTime: Optional[Rfc3339TimeString] + ModificationTime: Optional[Rfc3339TimeString] + + +IpAddressesResponse = List[IpAddressResponse] + + +class ListFirewallConfigsRequest(ServiceRequest): + MaxResults: Optional[ListFirewallConfigsMaxResult] + NextToken: Optional[NextToken] + + +class ListFirewallConfigsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + FirewallConfigs: Optional[FirewallConfigList] + + +class ListFirewallDomainListsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListFirewallDomainListsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + FirewallDomainLists: Optional[FirewallDomainListMetadataList] + + +class ListFirewallDomainsRequest(ServiceRequest): + FirewallDomainListId: ResourceId + MaxResults: Optional[ListDomainMaxResults] + NextToken: Optional[NextToken] + + +class ListFirewallDomainsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + Domains: Optional[FirewallDomains] + + +class ListFirewallRuleGroupAssociationsRequest(ServiceRequest): + FirewallRuleGroupId: Optional[ResourceId] + VpcId: Optional[ResourceId] + Priority: Optional[Priority] + Status: Optional[FirewallRuleGroupAssociationStatus] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListFirewallRuleGroupAssociationsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + FirewallRuleGroupAssociations: Optional[FirewallRuleGroupAssociations] + + +class ListFirewallRuleGroupsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListFirewallRuleGroupsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + FirewallRuleGroups: Optional[FirewallRuleGroupMetadataList] + + +class ListFirewallRulesRequest(ServiceRequest): + FirewallRuleGroupId: ResourceId + Priority: Optional[Priority] + Action: Optional[Action] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListFirewallRulesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + FirewallRules: Optional[FirewallRules] + + +class ListOutpostResolversRequest(ServiceRequest): + OutpostArn: Optional[OutpostArn] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +OutpostResolverList = List[OutpostResolver] + + +class ListOutpostResolversResponse(TypedDict, total=False): + OutpostResolvers: Optional[OutpostResolverList] + NextToken: Optional[NextToken] + + +class ListResolverConfigsRequest(ServiceRequest): + MaxResults: Optional[ListResolverConfigsMaxResult] + NextToken: Optional[NextToken] + + +ResolverConfigList = List[ResolverConfig] + + +class ListResolverConfigsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + ResolverConfigs: Optional[ResolverConfigList] + + +class ListResolverDnssecConfigsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + + +ResolverDnssecConfigList = List[ResolverDnssecConfig] + + +class ListResolverDnssecConfigsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + ResolverDnssecConfigs: Optional[ResolverDnssecConfigList] + + +class ListResolverEndpointIpAddressesRequest(ServiceRequest): + ResolverEndpointId: ResourceId + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListResolverEndpointIpAddressesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + IpAddresses: Optional[IpAddressesResponse] + + +class ListResolverEndpointsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + + +ResolverEndpoints = List[ResolverEndpoint] + + +class ListResolverEndpointsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + ResolverEndpoints: Optional[ResolverEndpoints] + + +class ListResolverQueryLogConfigAssociationsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + SortBy: Optional[SortByKey] + SortOrder: Optional[SortOrder] + + +ResolverQueryLogConfigAssociationList = List[ResolverQueryLogConfigAssociation] + + +class ListResolverQueryLogConfigAssociationsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + TotalCount: Optional[Count] + TotalFilteredCount: Optional[Count] + ResolverQueryLogConfigAssociations: Optional[ResolverQueryLogConfigAssociationList] + + +class ListResolverQueryLogConfigsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + SortBy: Optional[SortByKey] + SortOrder: Optional[SortOrder] + + +ResolverQueryLogConfigList = List[ResolverQueryLogConfig] + + +class ListResolverQueryLogConfigsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + TotalCount: Optional[Count] + TotalFilteredCount: Optional[Count] + ResolverQueryLogConfigs: Optional[ResolverQueryLogConfigList] + + +class ListResolverRuleAssociationsRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + + +ResolverRuleAssociations = List[ResolverRuleAssociation] + + +class ListResolverRuleAssociationsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + ResolverRuleAssociations: Optional[ResolverRuleAssociations] + + +class ListResolverRulesRequest(ServiceRequest): + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Filters: Optional[Filters] + + +ResolverRules = List[ResolverRule] + + +class ListResolverRulesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + ResolverRules: Optional[ResolverRules] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceArn: Arn + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListTagsForResourceResponse(TypedDict, total=False): + Tags: Optional[TagList] + NextToken: Optional[NextToken] + + +class PutFirewallRuleGroupPolicyRequest(ServiceRequest): + Arn: Arn + FirewallRuleGroupPolicy: FirewallRuleGroupPolicy + + +class PutFirewallRuleGroupPolicyResponse(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class PutResolverQueryLogConfigPolicyRequest(ServiceRequest): + Arn: Arn + ResolverQueryLogConfigPolicy: ResolverQueryLogConfigPolicy + + +class PutResolverQueryLogConfigPolicyResponse(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class PutResolverRulePolicyRequest(ServiceRequest): + Arn: Arn + ResolverRulePolicy: ResolverRulePolicy + + +class PutResolverRulePolicyResponse(TypedDict, total=False): + ReturnValue: Optional[Boolean] + + +class ResolverRuleConfig(TypedDict, total=False): + Name: Optional[Name] + TargetIps: Optional[TargetList] + ResolverEndpointId: Optional[ResourceId] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + ResourceArn: Arn + Tags: TagList + + +class TagResourceResponse(TypedDict, total=False): + pass + + +class UntagResourceRequest(ServiceRequest): + ResourceArn: Arn + TagKeys: TagKeyList + + +class UntagResourceResponse(TypedDict, total=False): + pass + + +class UpdateFirewallConfigRequest(ServiceRequest): + ResourceId: ResourceId + FirewallFailOpen: FirewallFailOpenStatus + + +class UpdateFirewallConfigResponse(TypedDict, total=False): + FirewallConfig: Optional[FirewallConfig] + + +class UpdateFirewallDomainsRequest(ServiceRequest): + FirewallDomainListId: ResourceId + Operation: FirewallDomainUpdateOperation + Domains: FirewallDomains + + +class UpdateFirewallDomainsResponse(TypedDict, total=False): + Id: Optional[ResourceId] + Name: Optional[Name] + Status: Optional[FirewallDomainListStatus] + StatusMessage: Optional[StatusMessage] + + +class UpdateFirewallRuleGroupAssociationRequest(ServiceRequest): + FirewallRuleGroupAssociationId: ResourceId + Priority: Optional[Priority] + MutationProtection: Optional[MutationProtectionStatus] + Name: Optional[Name] + + +class UpdateFirewallRuleGroupAssociationResponse(TypedDict, total=False): + FirewallRuleGroupAssociation: Optional[FirewallRuleGroupAssociation] + + +class UpdateFirewallRuleRequest(ServiceRequest): + FirewallRuleGroupId: ResourceId + FirewallDomainListId: Optional[ResourceId] + FirewallThreatProtectionId: Optional[ResourceId] + Priority: Optional[Priority] + Action: Optional[Action] + BlockResponse: Optional[BlockResponse] + BlockOverrideDomain: Optional[BlockOverrideDomain] + BlockOverrideDnsType: Optional[BlockOverrideDnsType] + BlockOverrideTtl: Optional[BlockOverrideTtl] + Name: Optional[Name] + FirewallDomainRedirectionAction: Optional[FirewallDomainRedirectionAction] + Qtype: Optional[Qtype] + DnsThreatProtection: Optional[DnsThreatProtection] + ConfidenceThreshold: Optional[ConfidenceThreshold] + + +class UpdateFirewallRuleResponse(TypedDict, total=False): + FirewallRule: Optional[FirewallRule] + + +class UpdateIpAddress(TypedDict, total=False): + IpId: ResourceId + Ipv6: Ipv6 + + +UpdateIpAddresses = List[UpdateIpAddress] + + +class UpdateOutpostResolverRequest(ServiceRequest): + Id: ResourceId + Name: Optional[OutpostResolverName] + InstanceCount: Optional[InstanceCount] + PreferredInstanceType: Optional[OutpostInstanceType] + + +class UpdateOutpostResolverResponse(TypedDict, total=False): + OutpostResolver: Optional[OutpostResolver] + + +class UpdateResolverConfigRequest(ServiceRequest): + ResourceId: ResourceId + AutodefinedReverseFlag: AutodefinedReverseFlag + + +class UpdateResolverConfigResponse(TypedDict, total=False): + ResolverConfig: Optional[ResolverConfig] + + +class UpdateResolverDnssecConfigRequest(ServiceRequest): + ResourceId: ResourceId + Validation: Validation + + +class UpdateResolverDnssecConfigResponse(TypedDict, total=False): + ResolverDNSSECConfig: Optional[ResolverDnssecConfig] + + +class UpdateResolverEndpointRequest(ServiceRequest): + ResolverEndpointId: ResourceId + Name: Optional[Name] + ResolverEndpointType: Optional[ResolverEndpointType] + UpdateIpAddresses: Optional[UpdateIpAddresses] + Protocols: Optional[ProtocolList] + + +class UpdateResolverEndpointResponse(TypedDict, total=False): + ResolverEndpoint: Optional[ResolverEndpoint] + + +class UpdateResolverRuleRequest(ServiceRequest): + ResolverRuleId: ResourceId + Config: ResolverRuleConfig + + +class UpdateResolverRuleResponse(TypedDict, total=False): + ResolverRule: Optional[ResolverRule] + + +class Route53ResolverApi: + service = "route53resolver" + version = "2018-04-01" + + @handler("AssociateFirewallRuleGroup") + def associate_firewall_rule_group( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + firewall_rule_group_id: ResourceId, + vpc_id: ResourceId, + priority: Priority, + name: Name, + mutation_protection: MutationProtectionStatus | None = None, + tags: TagList | None = None, + **kwargs, + ) -> AssociateFirewallRuleGroupResponse: + raise NotImplementedError + + @handler("AssociateResolverEndpointIpAddress") + def associate_resolver_endpoint_ip_address( + self, + context: RequestContext, + resolver_endpoint_id: ResourceId, + ip_address: IpAddressUpdate, + **kwargs, + ) -> AssociateResolverEndpointIpAddressResponse: + raise NotImplementedError + + @handler("AssociateResolverQueryLogConfig") + def associate_resolver_query_log_config( + self, + context: RequestContext, + resolver_query_log_config_id: ResourceId, + resource_id: ResourceId, + **kwargs, + ) -> AssociateResolverQueryLogConfigResponse: + raise NotImplementedError + + @handler("AssociateResolverRule") + def associate_resolver_rule( + self, + context: RequestContext, + resolver_rule_id: ResourceId, + vpc_id: ResourceId, + name: Name | None = None, + **kwargs, + ) -> AssociateResolverRuleResponse: + raise NotImplementedError + + @handler("CreateFirewallDomainList") + def create_firewall_domain_list( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + name: Name, + tags: TagList | None = None, + **kwargs, + ) -> CreateFirewallDomainListResponse: + raise NotImplementedError + + @handler("CreateFirewallRule") + def create_firewall_rule( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + firewall_rule_group_id: ResourceId, + priority: Priority, + action: Action, + name: Name, + firewall_domain_list_id: ResourceId | None = None, + block_response: BlockResponse | None = None, + block_override_domain: BlockOverrideDomain | None = None, + block_override_dns_type: BlockOverrideDnsType | None = None, + block_override_ttl: BlockOverrideTtl | None = None, + firewall_domain_redirection_action: FirewallDomainRedirectionAction | None = None, + qtype: Qtype | None = None, + dns_threat_protection: DnsThreatProtection | None = None, + confidence_threshold: ConfidenceThreshold | None = None, + **kwargs, + ) -> CreateFirewallRuleResponse: + raise NotImplementedError + + @handler("CreateFirewallRuleGroup") + def create_firewall_rule_group( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + name: Name, + tags: TagList | None = None, + **kwargs, + ) -> CreateFirewallRuleGroupResponse: + raise NotImplementedError + + @handler("CreateOutpostResolver") + def create_outpost_resolver( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + name: OutpostResolverName, + preferred_instance_type: OutpostInstanceType, + outpost_arn: OutpostArn, + instance_count: InstanceCount | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateOutpostResolverResponse: + raise NotImplementedError + + @handler("CreateResolverEndpoint") + def create_resolver_endpoint( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + security_group_ids: SecurityGroupIds, + direction: ResolverEndpointDirection, + ip_addresses: IpAddressesRequest, + name: Name | None = None, + outpost_arn: OutpostArn | None = None, + preferred_instance_type: OutpostInstanceType | None = None, + tags: TagList | None = None, + resolver_endpoint_type: ResolverEndpointType | None = None, + protocols: ProtocolList | None = None, + **kwargs, + ) -> CreateResolverEndpointResponse: + raise NotImplementedError + + @handler("CreateResolverQueryLogConfig") + def create_resolver_query_log_config( + self, + context: RequestContext, + name: ResolverQueryLogConfigName, + destination_arn: DestinationArn, + creator_request_id: CreatorRequestId, + tags: TagList | None = None, + **kwargs, + ) -> CreateResolverQueryLogConfigResponse: + raise NotImplementedError + + @handler("CreateResolverRule") + def create_resolver_rule( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + rule_type: RuleTypeOption, + name: Name | None = None, + domain_name: DomainName | None = None, + target_ips: TargetList | None = None, + resolver_endpoint_id: ResourceId | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateResolverRuleResponse: + raise NotImplementedError + + @handler("DeleteFirewallDomainList") + def delete_firewall_domain_list( + self, context: RequestContext, firewall_domain_list_id: ResourceId, **kwargs + ) -> DeleteFirewallDomainListResponse: + raise NotImplementedError + + @handler("DeleteFirewallRule") + def delete_firewall_rule( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + firewall_domain_list_id: ResourceId | None = None, + firewall_threat_protection_id: ResourceId | None = None, + qtype: Qtype | None = None, + **kwargs, + ) -> DeleteFirewallRuleResponse: + raise NotImplementedError + + @handler("DeleteFirewallRuleGroup") + def delete_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_id: ResourceId, **kwargs + ) -> DeleteFirewallRuleGroupResponse: + raise NotImplementedError + + @handler("DeleteOutpostResolver") + def delete_outpost_resolver( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> DeleteOutpostResolverResponse: + raise NotImplementedError + + @handler("DeleteResolverEndpoint") + def delete_resolver_endpoint( + self, context: RequestContext, resolver_endpoint_id: ResourceId, **kwargs + ) -> DeleteResolverEndpointResponse: + raise NotImplementedError + + @handler("DeleteResolverQueryLogConfig") + def delete_resolver_query_log_config( + self, context: RequestContext, resolver_query_log_config_id: ResourceId, **kwargs + ) -> DeleteResolverQueryLogConfigResponse: + raise NotImplementedError + + @handler("DeleteResolverRule") + def delete_resolver_rule( + self, context: RequestContext, resolver_rule_id: ResourceId, **kwargs + ) -> DeleteResolverRuleResponse: + raise NotImplementedError + + @handler("DisassociateFirewallRuleGroup") + def disassociate_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_association_id: ResourceId, **kwargs + ) -> DisassociateFirewallRuleGroupResponse: + raise NotImplementedError + + @handler("DisassociateResolverEndpointIpAddress") + def disassociate_resolver_endpoint_ip_address( + self, + context: RequestContext, + resolver_endpoint_id: ResourceId, + ip_address: IpAddressUpdate, + **kwargs, + ) -> DisassociateResolverEndpointIpAddressResponse: + raise NotImplementedError + + @handler("DisassociateResolverQueryLogConfig") + def disassociate_resolver_query_log_config( + self, + context: RequestContext, + resolver_query_log_config_id: ResourceId, + resource_id: ResourceId, + **kwargs, + ) -> DisassociateResolverQueryLogConfigResponse: + raise NotImplementedError + + @handler("DisassociateResolverRule") + def disassociate_resolver_rule( + self, context: RequestContext, vpc_id: ResourceId, resolver_rule_id: ResourceId, **kwargs + ) -> DisassociateResolverRuleResponse: + raise NotImplementedError + + @handler("GetFirewallConfig") + def get_firewall_config( + self, context: RequestContext, resource_id: ResourceId, **kwargs + ) -> GetFirewallConfigResponse: + raise NotImplementedError + + @handler("GetFirewallDomainList") + def get_firewall_domain_list( + self, context: RequestContext, firewall_domain_list_id: ResourceId, **kwargs + ) -> GetFirewallDomainListResponse: + raise NotImplementedError + + @handler("GetFirewallRuleGroup") + def get_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_id: ResourceId, **kwargs + ) -> GetFirewallRuleGroupResponse: + raise NotImplementedError + + @handler("GetFirewallRuleGroupAssociation") + def get_firewall_rule_group_association( + self, context: RequestContext, firewall_rule_group_association_id: ResourceId, **kwargs + ) -> GetFirewallRuleGroupAssociationResponse: + raise NotImplementedError + + @handler("GetFirewallRuleGroupPolicy") + def get_firewall_rule_group_policy( + self, context: RequestContext, arn: Arn, **kwargs + ) -> GetFirewallRuleGroupPolicyResponse: + raise NotImplementedError + + @handler("GetOutpostResolver") + def get_outpost_resolver( + self, context: RequestContext, id: ResourceId, **kwargs + ) -> GetOutpostResolverResponse: + raise NotImplementedError + + @handler("GetResolverConfig") + def get_resolver_config( + self, context: RequestContext, resource_id: ResourceId, **kwargs + ) -> GetResolverConfigResponse: + raise NotImplementedError + + @handler("GetResolverDnssecConfig") + def get_resolver_dnssec_config( + self, context: RequestContext, resource_id: ResourceId, **kwargs + ) -> GetResolverDnssecConfigResponse: + raise NotImplementedError + + @handler("GetResolverEndpoint") + def get_resolver_endpoint( + self, context: RequestContext, resolver_endpoint_id: ResourceId, **kwargs + ) -> GetResolverEndpointResponse: + raise NotImplementedError + + @handler("GetResolverQueryLogConfig") + def get_resolver_query_log_config( + self, context: RequestContext, resolver_query_log_config_id: ResourceId, **kwargs + ) -> GetResolverQueryLogConfigResponse: + raise NotImplementedError + + @handler("GetResolverQueryLogConfigAssociation") + def get_resolver_query_log_config_association( + self, + context: RequestContext, + resolver_query_log_config_association_id: ResourceId, + **kwargs, + ) -> GetResolverQueryLogConfigAssociationResponse: + raise NotImplementedError + + @handler("GetResolverQueryLogConfigPolicy") + def get_resolver_query_log_config_policy( + self, context: RequestContext, arn: Arn, **kwargs + ) -> GetResolverQueryLogConfigPolicyResponse: + raise NotImplementedError + + @handler("GetResolverRule") + def get_resolver_rule( + self, context: RequestContext, resolver_rule_id: ResourceId, **kwargs + ) -> GetResolverRuleResponse: + raise NotImplementedError + + @handler("GetResolverRuleAssociation") + def get_resolver_rule_association( + self, context: RequestContext, resolver_rule_association_id: ResourceId, **kwargs + ) -> GetResolverRuleAssociationResponse: + raise NotImplementedError + + @handler("GetResolverRulePolicy") + def get_resolver_rule_policy( + self, context: RequestContext, arn: Arn, **kwargs + ) -> GetResolverRulePolicyResponse: + raise NotImplementedError + + @handler("ImportFirewallDomains") + def import_firewall_domains( + self, + context: RequestContext, + firewall_domain_list_id: ResourceId, + operation: FirewallDomainImportOperation, + domain_file_url: DomainListFileUrl, + **kwargs, + ) -> ImportFirewallDomainsResponse: + raise NotImplementedError + + @handler("ListFirewallConfigs") + def list_firewall_configs( + self, + context: RequestContext, + max_results: ListFirewallConfigsMaxResult | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallConfigsResponse: + raise NotImplementedError + + @handler("ListFirewallDomainLists") + def list_firewall_domain_lists( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallDomainListsResponse: + raise NotImplementedError + + @handler("ListFirewallDomains") + def list_firewall_domains( + self, + context: RequestContext, + firewall_domain_list_id: ResourceId, + max_results: ListDomainMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallDomainsResponse: + raise NotImplementedError + + @handler("ListFirewallRuleGroupAssociations") + def list_firewall_rule_group_associations( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId | None = None, + vpc_id: ResourceId | None = None, + priority: Priority | None = None, + status: FirewallRuleGroupAssociationStatus | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallRuleGroupAssociationsResponse: + raise NotImplementedError + + @handler("ListFirewallRuleGroups") + def list_firewall_rule_groups( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallRuleGroupsResponse: + raise NotImplementedError + + @handler("ListFirewallRules") + def list_firewall_rules( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + priority: Priority | None = None, + action: Action | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListFirewallRulesResponse: + raise NotImplementedError + + @handler("ListOutpostResolvers") + def list_outpost_resolvers( + self, + context: RequestContext, + outpost_arn: OutpostArn | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListOutpostResolversResponse: + raise NotImplementedError + + @handler("ListResolverConfigs") + def list_resolver_configs( + self, + context: RequestContext, + max_results: ListResolverConfigsMaxResult | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListResolverConfigsResponse: + raise NotImplementedError + + @handler("ListResolverDnssecConfigs") + def list_resolver_dnssec_configs( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + **kwargs, + ) -> ListResolverDnssecConfigsResponse: + raise NotImplementedError + + @handler("ListResolverEndpointIpAddresses") + def list_resolver_endpoint_ip_addresses( + self, + context: RequestContext, + resolver_endpoint_id: ResourceId, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListResolverEndpointIpAddressesResponse: + raise NotImplementedError + + @handler("ListResolverEndpoints") + def list_resolver_endpoints( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + **kwargs, + ) -> ListResolverEndpointsResponse: + raise NotImplementedError + + @handler("ListResolverQueryLogConfigAssociations") + def list_resolver_query_log_config_associations( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + sort_by: SortByKey | None = None, + sort_order: SortOrder | None = None, + **kwargs, + ) -> ListResolverQueryLogConfigAssociationsResponse: + raise NotImplementedError + + @handler("ListResolverQueryLogConfigs") + def list_resolver_query_log_configs( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + sort_by: SortByKey | None = None, + sort_order: SortOrder | None = None, + **kwargs, + ) -> ListResolverQueryLogConfigsResponse: + raise NotImplementedError + + @handler("ListResolverRuleAssociations") + def list_resolver_rule_associations( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + **kwargs, + ) -> ListResolverRuleAssociationsResponse: + raise NotImplementedError + + @handler("ListResolverRules") + def list_resolver_rules( + self, + context: RequestContext, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + filters: Filters | None = None, + **kwargs, + ) -> ListResolverRulesResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, + context: RequestContext, + resource_arn: Arn, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("PutFirewallRuleGroupPolicy") + def put_firewall_rule_group_policy( + self, + context: RequestContext, + arn: Arn, + firewall_rule_group_policy: FirewallRuleGroupPolicy, + **kwargs, + ) -> PutFirewallRuleGroupPolicyResponse: + raise NotImplementedError + + @handler("PutResolverQueryLogConfigPolicy") + def put_resolver_query_log_config_policy( + self, + context: RequestContext, + arn: Arn, + resolver_query_log_config_policy: ResolverQueryLogConfigPolicy, + **kwargs, + ) -> PutResolverQueryLogConfigPolicyResponse: + raise NotImplementedError + + @handler("PutResolverRulePolicy") + def put_resolver_rule_policy( + self, context: RequestContext, arn: Arn, resolver_rule_policy: ResolverRulePolicy, **kwargs + ) -> PutResolverRulePolicyResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: TagList, **kwargs + ) -> TagResourceResponse: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceResponse: + raise NotImplementedError + + @handler("UpdateFirewallConfig") + def update_firewall_config( + self, + context: RequestContext, + resource_id: ResourceId, + firewall_fail_open: FirewallFailOpenStatus, + **kwargs, + ) -> UpdateFirewallConfigResponse: + raise NotImplementedError + + @handler("UpdateFirewallDomains") + def update_firewall_domains( + self, + context: RequestContext, + firewall_domain_list_id: ResourceId, + operation: FirewallDomainUpdateOperation, + domains: FirewallDomains, + **kwargs, + ) -> UpdateFirewallDomainsResponse: + raise NotImplementedError + + @handler("UpdateFirewallRule") + def update_firewall_rule( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + firewall_domain_list_id: ResourceId | None = None, + firewall_threat_protection_id: ResourceId | None = None, + priority: Priority | None = None, + action: Action | None = None, + block_response: BlockResponse | None = None, + block_override_domain: BlockOverrideDomain | None = None, + block_override_dns_type: BlockOverrideDnsType | None = None, + block_override_ttl: BlockOverrideTtl | None = None, + name: Name | None = None, + firewall_domain_redirection_action: FirewallDomainRedirectionAction | None = None, + qtype: Qtype | None = None, + dns_threat_protection: DnsThreatProtection | None = None, + confidence_threshold: ConfidenceThreshold | None = None, + **kwargs, + ) -> UpdateFirewallRuleResponse: + raise NotImplementedError + + @handler("UpdateFirewallRuleGroupAssociation") + def update_firewall_rule_group_association( + self, + context: RequestContext, + firewall_rule_group_association_id: ResourceId, + priority: Priority | None = None, + mutation_protection: MutationProtectionStatus | None = None, + name: Name | None = None, + **kwargs, + ) -> UpdateFirewallRuleGroupAssociationResponse: + raise NotImplementedError + + @handler("UpdateOutpostResolver") + def update_outpost_resolver( + self, + context: RequestContext, + id: ResourceId, + name: OutpostResolverName | None = None, + instance_count: InstanceCount | None = None, + preferred_instance_type: OutpostInstanceType | None = None, + **kwargs, + ) -> UpdateOutpostResolverResponse: + raise NotImplementedError + + @handler("UpdateResolverConfig") + def update_resolver_config( + self, + context: RequestContext, + resource_id: ResourceId, + autodefined_reverse_flag: AutodefinedReverseFlag, + **kwargs, + ) -> UpdateResolverConfigResponse: + raise NotImplementedError + + @handler("UpdateResolverDnssecConfig") + def update_resolver_dnssec_config( + self, context: RequestContext, resource_id: ResourceId, validation: Validation, **kwargs + ) -> UpdateResolverDnssecConfigResponse: + raise NotImplementedError + + @handler("UpdateResolverEndpoint") + def update_resolver_endpoint( + self, + context: RequestContext, + resolver_endpoint_id: ResourceId, + name: Name | None = None, + resolver_endpoint_type: ResolverEndpointType | None = None, + update_ip_addresses: UpdateIpAddresses | None = None, + protocols: ProtocolList | None = None, + **kwargs, + ) -> UpdateResolverEndpointResponse: + raise NotImplementedError + + @handler("UpdateResolverRule") + def update_resolver_rule( + self, + context: RequestContext, + resolver_rule_id: ResourceId, + config: ResolverRuleConfig, + **kwargs, + ) -> UpdateResolverRuleResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/s3/__init__.py b/localstack-core/localstack/aws/api/s3/__init__.py new file mode 100644 index 0000000000000..55e5b0771dd8b --- /dev/null +++ b/localstack-core/localstack/aws/api/s3/__init__.py @@ -0,0 +1,5074 @@ +from datetime import datetime +from enum import StrEnum +from typing import IO, Dict, Iterable, Iterator, List, Optional, TypedDict, Union + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AbortRuleId = str +AcceptRanges = str +AccessKeyIdValue = str +AccessPointAlias = bool +AccessPointArn = str +AccountId = str +AllowQuotedRecordDelimiter = bool +AllowedHeader = str +AllowedMethod = str +AllowedOrigin = str +AnalyticsId = str +BucketKeyEnabled = bool +BucketLocationName = str +BucketName = str +BucketRegion = str +BypassGovernanceRetention = bool +CacheControl = str +ChecksumCRC32 = str +ChecksumCRC32C = str +ChecksumCRC64NVME = str +ChecksumSHA1 = str +ChecksumSHA256 = str +CloudFunction = str +CloudFunctionInvocationRole = str +Code = str +Comments = str +ConfirmRemoveSelfBucketAccess = bool +ContentDisposition = str +ContentEncoding = str +ContentLanguage = str +ContentMD5 = str +ContentRange = str +ContentType = str +CopySource = str +CopySourceIfMatch = str +CopySourceIfNoneMatch = str +CopySourceRange = str +CopySourceSSECustomerAlgorithm = str +CopySourceSSECustomerKey = str +CopySourceSSECustomerKeyMD5 = str +CopySourceVersionId = str +Days = int +DaysAfterInitiation = int +DeleteMarker = bool +DeleteMarkerVersionId = str +Delimiter = str +Description = str +DirectoryBucketToken = str +DisplayName = str +ETag = str +EmailAddress = str +EnableRequestProgress = bool +ErrorCode = str +ErrorMessage = str +Expiration = str +ExpiredObjectDeleteMarker = bool +ExposeHeader = str +Expression = str +FetchOwner = bool +FieldDelimiter = str +FilterRuleValue = str +GetObjectResponseStatusCode = int +GrantFullControl = str +GrantRead = str +GrantReadACP = str +GrantWrite = str +GrantWriteACP = str +HostName = str +HttpErrorCodeReturnedEquals = str +HttpRedirectCode = str +ID = str +IfMatch = str +IfNoneMatch = str +IntelligentTieringDays = int +IntelligentTieringId = str +InventoryId = str +IsEnabled = bool +IsLatest = bool +IsPublic = bool +IsRestoreInProgress = bool +IsTruncated = bool +KMSContext = str +KeyCount = int +KeyMarker = str +KeyPrefixEquals = str +LambdaFunctionArn = str +Location = str +LocationNameAsString = str +LocationPrefix = str +MFA = str +Marker = str +MaxAgeSeconds = int +MaxBuckets = int +MaxDirectoryBuckets = int +MaxKeys = int +MaxParts = int +MaxUploads = int +Message = str +MetadataKey = str +MetadataTableStatus = str +MetadataValue = str +MetricsId = str +Minutes = int +MissingMeta = int +MultipartUploadId = str +NextKeyMarker = str +NextMarker = str +NextPartNumberMarker = int +NextToken = str +NextUploadIdMarker = str +NextVersionIdMarker = str +NotificationId = str +ObjectKey = str +ObjectLockEnabledForBucket = bool +ObjectLockToken = str +ObjectVersionId = str +PartNumber = int +PartNumberMarker = int +PartsCount = int +Policy = str +Prefix = str +Priority = int +QueueArn = str +Quiet = bool +QuoteCharacter = str +QuoteEscapeCharacter = str +Range = str +RecordDelimiter = str +Region = str +ReplaceKeyPrefixWith = str +ReplaceKeyWith = str +ReplicaKmsKeyID = str +RequestRoute = str +RequestToken = str +ResponseCacheControl = str +ResponseContentDisposition = str +ResponseContentEncoding = str +ResponseContentLanguage = str +ResponseContentType = str +Restore = str +RestoreOutputPath = str +Role = str +S3TablesArn = str +S3TablesBucketArn = str +S3TablesName = str +S3TablesNamespace = str +SSECustomerAlgorithm = str +SSECustomerKey = str +SSECustomerKeyMD5 = str +SSEKMSEncryptionContext = str +SSEKMSKeyId = str +SessionCredentialValue = str +Setting = bool +SkipValidation = bool +StartAfter = str +Suffix = str +TagCount = int +TaggingHeader = str +TargetBucket = str +TargetPrefix = str +Token = str +TopicArn = str +URI = str +UploadIdMarker = str +Value = str +VersionCount = int +VersionIdMarker = str +WebsiteRedirectLocation = str +Years = int +BucketContentType = str +IfCondition = str +RestoreObjectOutputStatusCode = int +ArgumentName = str +ArgumentValue = str +AWSAccessKeyId = str +HostId = str +HeadersNotSigned = str +SignatureProvided = str +StringToSign = str +StringToSignBytes = str +CanonicalRequest = str +CanonicalRequestBytes = str +X_Amz_Expires = int +HttpMethod = str +ResourceType = str +MissingHeaderName = str +KeyLength = str +Header = str +additionalMessage = str + + +class AnalyticsS3ExportFileFormat(StrEnum): + CSV = "CSV" + + +class ArchiveStatus(StrEnum): + ARCHIVE_ACCESS = "ARCHIVE_ACCESS" + DEEP_ARCHIVE_ACCESS = "DEEP_ARCHIVE_ACCESS" + + +class BucketAccelerateStatus(StrEnum): + Enabled = "Enabled" + Suspended = "Suspended" + + +class BucketCannedACL(StrEnum): + private = "private" + public_read = "public-read" + public_read_write = "public-read-write" + authenticated_read = "authenticated-read" + log_delivery_write = "log-delivery-write" + + +class BucketLocationConstraint(StrEnum): + af_south_1 = "af-south-1" + ap_east_1 = "ap-east-1" + ap_northeast_1 = "ap-northeast-1" + ap_northeast_2 = "ap-northeast-2" + ap_northeast_3 = "ap-northeast-3" + ap_south_1 = "ap-south-1" + ap_south_2 = "ap-south-2" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_southeast_3 = "ap-southeast-3" + ap_southeast_4 = "ap-southeast-4" + ap_southeast_5 = "ap-southeast-5" + ca_central_1 = "ca-central-1" + cn_north_1 = "cn-north-1" + cn_northwest_1 = "cn-northwest-1" + EU = "EU" + eu_central_1 = "eu-central-1" + eu_central_2 = "eu-central-2" + eu_north_1 = "eu-north-1" + eu_south_1 = "eu-south-1" + eu_south_2 = "eu-south-2" + eu_west_1 = "eu-west-1" + eu_west_2 = "eu-west-2" + eu_west_3 = "eu-west-3" + il_central_1 = "il-central-1" + me_central_1 = "me-central-1" + me_south_1 = "me-south-1" + sa_east_1 = "sa-east-1" + us_east_2 = "us-east-2" + us_gov_east_1 = "us-gov-east-1" + us_gov_west_1 = "us-gov-west-1" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + + +class BucketLogsPermission(StrEnum): + FULL_CONTROL = "FULL_CONTROL" + READ = "READ" + WRITE = "WRITE" + + +class BucketType(StrEnum): + Directory = "Directory" + + +class BucketVersioningStatus(StrEnum): + Enabled = "Enabled" + Suspended = "Suspended" + + +class ChecksumAlgorithm(StrEnum): + CRC32 = "CRC32" + CRC32C = "CRC32C" + SHA1 = "SHA1" + SHA256 = "SHA256" + CRC64NVME = "CRC64NVME" + + +class ChecksumMode(StrEnum): + ENABLED = "ENABLED" + + +class ChecksumType(StrEnum): + COMPOSITE = "COMPOSITE" + FULL_OBJECT = "FULL_OBJECT" + + +class CompressionType(StrEnum): + NONE = "NONE" + GZIP = "GZIP" + BZIP2 = "BZIP2" + + +class DataRedundancy(StrEnum): + SingleAvailabilityZone = "SingleAvailabilityZone" + SingleLocalZone = "SingleLocalZone" + + +class DeleteMarkerReplicationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class EncodingType(StrEnum): + url = "url" + + +class Event(StrEnum): + s3_ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + s3_ObjectCreated_ = "s3:ObjectCreated:*" + s3_ObjectCreated_Put = "s3:ObjectCreated:Put" + s3_ObjectCreated_Post = "s3:ObjectCreated:Post" + s3_ObjectCreated_Copy = "s3:ObjectCreated:Copy" + s3_ObjectCreated_CompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + s3_ObjectRemoved_ = "s3:ObjectRemoved:*" + s3_ObjectRemoved_Delete = "s3:ObjectRemoved:Delete" + s3_ObjectRemoved_DeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + s3_ObjectRestore_ = "s3:ObjectRestore:*" + s3_ObjectRestore_Post = "s3:ObjectRestore:Post" + s3_ObjectRestore_Completed = "s3:ObjectRestore:Completed" + s3_Replication_ = "s3:Replication:*" + s3_Replication_OperationFailedReplication = "s3:Replication:OperationFailedReplication" + s3_Replication_OperationNotTracked = "s3:Replication:OperationNotTracked" + s3_Replication_OperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + s3_Replication_OperationReplicatedAfterThreshold = ( + "s3:Replication:OperationReplicatedAfterThreshold" + ) + s3_ObjectRestore_Delete = "s3:ObjectRestore:Delete" + s3_LifecycleTransition = "s3:LifecycleTransition" + s3_IntelligentTiering = "s3:IntelligentTiering" + s3_ObjectAcl_Put = "s3:ObjectAcl:Put" + s3_LifecycleExpiration_ = "s3:LifecycleExpiration:*" + s3_LifecycleExpiration_Delete = "s3:LifecycleExpiration:Delete" + s3_LifecycleExpiration_DeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" + s3_ObjectTagging_ = "s3:ObjectTagging:*" + s3_ObjectTagging_Put = "s3:ObjectTagging:Put" + s3_ObjectTagging_Delete = "s3:ObjectTagging:Delete" + + +class ExistingObjectReplicationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ExpirationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ExpressionType(StrEnum): + SQL = "SQL" + + +class FileHeaderInfo(StrEnum): + USE = "USE" + IGNORE = "IGNORE" + NONE = "NONE" + + +class FilterRuleName(StrEnum): + prefix = "prefix" + suffix = "suffix" + + +class IntelligentTieringAccessTier(StrEnum): + ARCHIVE_ACCESS = "ARCHIVE_ACCESS" + DEEP_ARCHIVE_ACCESS = "DEEP_ARCHIVE_ACCESS" + + +class IntelligentTieringStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class InventoryFormat(StrEnum): + CSV = "CSV" + ORC = "ORC" + Parquet = "Parquet" + + +class InventoryFrequency(StrEnum): + Daily = "Daily" + Weekly = "Weekly" + + +class InventoryIncludedObjectVersions(StrEnum): + All = "All" + Current = "Current" + + +class InventoryOptionalField(StrEnum): + Size = "Size" + LastModifiedDate = "LastModifiedDate" + StorageClass = "StorageClass" + ETag = "ETag" + IsMultipartUploaded = "IsMultipartUploaded" + ReplicationStatus = "ReplicationStatus" + EncryptionStatus = "EncryptionStatus" + ObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + ObjectLockMode = "ObjectLockMode" + ObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + IntelligentTieringAccessTier = "IntelligentTieringAccessTier" + BucketKeyStatus = "BucketKeyStatus" + ChecksumAlgorithm = "ChecksumAlgorithm" + ObjectAccessControlList = "ObjectAccessControlList" + ObjectOwner = "ObjectOwner" + + +class JSONType(StrEnum): + DOCUMENT = "DOCUMENT" + LINES = "LINES" + + +class LocationType(StrEnum): + AvailabilityZone = "AvailabilityZone" + LocalZone = "LocalZone" + + +class MFADelete(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class MFADeleteStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class MetadataDirective(StrEnum): + COPY = "COPY" + REPLACE = "REPLACE" + + +class MetricsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ObjectAttributes(StrEnum): + ETag = "ETag" + Checksum = "Checksum" + ObjectParts = "ObjectParts" + StorageClass = "StorageClass" + ObjectSize = "ObjectSize" + + +class ObjectCannedACL(StrEnum): + private = "private" + public_read = "public-read" + public_read_write = "public-read-write" + authenticated_read = "authenticated-read" + aws_exec_read = "aws-exec-read" + bucket_owner_read = "bucket-owner-read" + bucket_owner_full_control = "bucket-owner-full-control" + + +class ObjectLockEnabled(StrEnum): + Enabled = "Enabled" + + +class ObjectLockLegalHoldStatus(StrEnum): + ON = "ON" + OFF = "OFF" + + +class ObjectLockMode(StrEnum): + GOVERNANCE = "GOVERNANCE" + COMPLIANCE = "COMPLIANCE" + + +class ObjectLockRetentionMode(StrEnum): + GOVERNANCE = "GOVERNANCE" + COMPLIANCE = "COMPLIANCE" + + +class ObjectOwnership(StrEnum): + BucketOwnerPreferred = "BucketOwnerPreferred" + ObjectWriter = "ObjectWriter" + BucketOwnerEnforced = "BucketOwnerEnforced" + + +class ObjectStorageClass(StrEnum): + STANDARD = "STANDARD" + REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY" + GLACIER = "GLACIER" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + OUTPOSTS = "OUTPOSTS" + GLACIER_IR = "GLACIER_IR" + SNOW = "SNOW" + EXPRESS_ONEZONE = "EXPRESS_ONEZONE" + + +class ObjectVersionStorageClass(StrEnum): + STANDARD = "STANDARD" + + +class OptionalObjectAttributes(StrEnum): + RestoreStatus = "RestoreStatus" + + +class OwnerOverride(StrEnum): + Destination = "Destination" + + +class PartitionDateSource(StrEnum): + EventTime = "EventTime" + DeliveryTime = "DeliveryTime" + + +class Payer(StrEnum): + Requester = "Requester" + BucketOwner = "BucketOwner" + + +class Permission(StrEnum): + FULL_CONTROL = "FULL_CONTROL" + WRITE = "WRITE" + WRITE_ACP = "WRITE_ACP" + READ = "READ" + READ_ACP = "READ_ACP" + + +class Protocol(StrEnum): + http = "http" + https = "https" + + +class QuoteFields(StrEnum): + ALWAYS = "ALWAYS" + ASNEEDED = "ASNEEDED" + + +class ReplicaModificationsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ReplicationRuleStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ReplicationStatus(StrEnum): + COMPLETE = "COMPLETE" + PENDING = "PENDING" + FAILED = "FAILED" + REPLICA = "REPLICA" + COMPLETED = "COMPLETED" + + +class ReplicationTimeStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class RequestCharged(StrEnum): + requester = "requester" + + +class RequestPayer(StrEnum): + requester = "requester" + + +class RestoreRequestType(StrEnum): + SELECT = "SELECT" + + +class ServerSideEncryption(StrEnum): + AES256 = "AES256" + aws_kms = "aws:kms" + aws_kms_dsse = "aws:kms:dsse" + + +class SessionMode(StrEnum): + ReadOnly = "ReadOnly" + ReadWrite = "ReadWrite" + + +class SseKmsEncryptedObjectsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class StorageClass(StrEnum): + STANDARD = "STANDARD" + REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + GLACIER = "GLACIER" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + OUTPOSTS = "OUTPOSTS" + GLACIER_IR = "GLACIER_IR" + SNOW = "SNOW" + EXPRESS_ONEZONE = "EXPRESS_ONEZONE" + + +class StorageClassAnalysisSchemaVersion(StrEnum): + V_1 = "V_1" + + +class TaggingDirective(StrEnum): + COPY = "COPY" + REPLACE = "REPLACE" + + +class Tier(StrEnum): + Standard = "Standard" + Bulk = "Bulk" + Expedited = "Expedited" + + +class TransitionDefaultMinimumObjectSize(StrEnum): + varies_by_storage_class = "varies_by_storage_class" + all_storage_classes_128K = "all_storage_classes_128K" + + +class TransitionStorageClass(StrEnum): + GLACIER = "GLACIER" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + GLACIER_IR = "GLACIER_IR" + + +class Type(StrEnum): + CanonicalUser = "CanonicalUser" + AmazonCustomerByEmail = "AmazonCustomerByEmail" + Group = "Group" + + +class BucketAlreadyExists(ServiceException): + code: str = "BucketAlreadyExists" + sender_fault: bool = False + status_code: int = 409 + + +class BucketAlreadyOwnedByYou(ServiceException): + code: str = "BucketAlreadyOwnedByYou" + sender_fault: bool = False + status_code: int = 409 + BucketName: Optional[BucketName] + + +class EncryptionTypeMismatch(ServiceException): + code: str = "EncryptionTypeMismatch" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidObjectState(ServiceException): + code: str = "InvalidObjectState" + sender_fault: bool = False + status_code: int = 403 + StorageClass: Optional[StorageClass] + AccessTier: Optional[IntelligentTieringAccessTier] + + +class InvalidRequest(ServiceException): + code: str = "InvalidRequest" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidWriteOffset(ServiceException): + code: str = "InvalidWriteOffset" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchBucket(ServiceException): + code: str = "NoSuchBucket" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class NoSuchKey(ServiceException): + code: str = "NoSuchKey" + sender_fault: bool = False + status_code: int = 404 + Key: Optional[ObjectKey] + DeleteMarker: Optional[DeleteMarker] + VersionId: Optional[ObjectVersionId] + + +class NoSuchUpload(ServiceException): + code: str = "NoSuchUpload" + sender_fault: bool = False + status_code: int = 404 + UploadId: Optional[MultipartUploadId] + + +class ObjectAlreadyInActiveTierError(ServiceException): + code: str = "ObjectAlreadyInActiveTierError" + sender_fault: bool = False + status_code: int = 403 + + +class ObjectNotInActiveTierError(ServiceException): + code: str = "ObjectNotInActiveTierError" + sender_fault: bool = False + status_code: int = 403 + + +class TooManyParts(ServiceException): + code: str = "TooManyParts" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchLifecycleConfiguration(ServiceException): + code: str = "NoSuchLifecycleConfiguration" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class InvalidBucketName(ServiceException): + code: str = "InvalidBucketName" + sender_fault: bool = False + status_code: int = 400 + BucketName: Optional[BucketName] + + +class NoSuchVersion(ServiceException): + code: str = "NoSuchVersion" + sender_fault: bool = False + status_code: int = 404 + VersionId: Optional[ObjectVersionId] + Key: Optional[ObjectKey] + + +class PreconditionFailed(ServiceException): + code: str = "PreconditionFailed" + sender_fault: bool = False + status_code: int = 412 + Condition: Optional[IfCondition] + + +ObjectSize = int + + +class InvalidRange(ServiceException): + code: str = "InvalidRange" + sender_fault: bool = False + status_code: int = 416 + ActualObjectSize: Optional[ObjectSize] + RangeRequested: Optional[ContentRange] + + +class InvalidArgument(ServiceException): + code: str = "InvalidArgument" + sender_fault: bool = False + status_code: int = 400 + ArgumentName: Optional[ArgumentName] + ArgumentValue: Optional[ArgumentValue] + HostId: Optional[HostId] + + +class SignatureDoesNotMatch(ServiceException): + code: str = "SignatureDoesNotMatch" + sender_fault: bool = False + status_code: int = 403 + AWSAccessKeyId: Optional[AWSAccessKeyId] + CanonicalRequest: Optional[CanonicalRequest] + CanonicalRequestBytes: Optional[CanonicalRequestBytes] + HostId: Optional[HostId] + SignatureProvided: Optional[SignatureProvided] + StringToSign: Optional[StringToSign] + StringToSignBytes: Optional[StringToSignBytes] + + +ServerTime = datetime +Expires = datetime + + +class AccessDenied(ServiceException): + code: str = "AccessDenied" + sender_fault: bool = False + status_code: int = 403 + Expires: Optional[Expires] + ServerTime: Optional[ServerTime] + X_Amz_Expires: Optional[X_Amz_Expires] + HostId: Optional[HostId] + HeadersNotSigned: Optional[HeadersNotSigned] + + +class AuthorizationQueryParametersError(ServiceException): + code: str = "AuthorizationQueryParametersError" + sender_fault: bool = False + status_code: int = 400 + HostId: Optional[HostId] + + +class NoSuchWebsiteConfiguration(ServiceException): + code: str = "NoSuchWebsiteConfiguration" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class ReplicationConfigurationNotFoundError(ServiceException): + code: str = "ReplicationConfigurationNotFoundError" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class BadRequest(ServiceException): + code: str = "BadRequest" + sender_fault: bool = False + status_code: int = 400 + HostId: Optional[HostId] + + +class AccessForbidden(ServiceException): + code: str = "AccessForbidden" + sender_fault: bool = False + status_code: int = 403 + HostId: Optional[HostId] + Method: Optional[HttpMethod] + ResourceType: Optional[ResourceType] + + +class NoSuchCORSConfiguration(ServiceException): + code: str = "NoSuchCORSConfiguration" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class MissingSecurityHeader(ServiceException): + code: str = "MissingSecurityHeader" + sender_fault: bool = False + status_code: int = 400 + MissingHeaderName: Optional[MissingHeaderName] + + +class InvalidPartOrder(ServiceException): + code: str = "InvalidPartOrder" + sender_fault: bool = False + status_code: int = 400 + UploadId: Optional[MultipartUploadId] + + +class InvalidStorageClass(ServiceException): + code: str = "InvalidStorageClass" + sender_fault: bool = False + status_code: int = 400 + StorageClassRequested: Optional[StorageClass] + + +class MethodNotAllowed(ServiceException): + code: str = "MethodNotAllowed" + sender_fault: bool = False + status_code: int = 405 + Method: Optional[HttpMethod] + ResourceType: Optional[ResourceType] + DeleteMarker: Optional[DeleteMarker] + VersionId: Optional[ObjectVersionId] + Allow: Optional[HttpMethod] + + +class CrossLocationLoggingProhibitted(ServiceException): + code: str = "CrossLocationLoggingProhibitted" + sender_fault: bool = False + status_code: int = 403 + TargetBucketLocation: Optional[BucketRegion] + SourceBucketLocation: Optional[BucketRegion] + + +class InvalidTargetBucketForLogging(ServiceException): + code: str = "InvalidTargetBucketForLogging" + sender_fault: bool = False + status_code: int = 400 + TargetBucket: Optional[BucketName] + + +class BucketNotEmpty(ServiceException): + code: str = "BucketNotEmpty" + sender_fault: bool = False + status_code: int = 409 + BucketName: Optional[BucketName] + + +ProposedSize = int +MinSizeAllowed = int + + +class EntityTooSmall(ServiceException): + code: str = "EntityTooSmall" + sender_fault: bool = False + status_code: int = 400 + ETag: Optional[ETag] + MinSizeAllowed: Optional[MinSizeAllowed] + PartNumber: Optional[PartNumber] + ProposedSize: Optional[ProposedSize] + + +class InvalidPart(ServiceException): + code: str = "InvalidPart" + sender_fault: bool = False + status_code: int = 400 + ETag: Optional[ETag] + UploadId: Optional[MultipartUploadId] + PartNumber: Optional[PartNumber] + + +class NoSuchTagSet(ServiceException): + code: str = "NoSuchTagSet" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class InvalidTag(ServiceException): + code: str = "InvalidTag" + sender_fault: bool = False + status_code: int = 400 + TagKey: Optional[ObjectKey] + TagValue: Optional[Value] + + +class ObjectLockConfigurationNotFoundError(ServiceException): + code: str = "ObjectLockConfigurationNotFoundError" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class InvalidPartNumber(ServiceException): + code: str = "InvalidPartNumber" + sender_fault: bool = False + status_code: int = 416 + PartNumberRequested: Optional[PartNumber] + ActualPartCount: Optional[PartNumber] + + +class OwnershipControlsNotFoundError(ServiceException): + code: str = "OwnershipControlsNotFoundError" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class NoSuchPublicAccessBlockConfiguration(ServiceException): + code: str = "NoSuchPublicAccessBlockConfiguration" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class NoSuchBucketPolicy(ServiceException): + code: str = "NoSuchBucketPolicy" + sender_fault: bool = False + status_code: int = 404 + BucketName: Optional[BucketName] + + +class InvalidDigest(ServiceException): + code: str = "InvalidDigest" + sender_fault: bool = False + status_code: int = 400 + Content_MD5: Optional[ContentMD5] + + +class KeyTooLongError(ServiceException): + code: str = "KeyTooLongError" + sender_fault: bool = False + status_code: int = 400 + MaxSizeAllowed: Optional[KeyLength] + Size: Optional[KeyLength] + + +class InvalidLocationConstraint(ServiceException): + code: str = "InvalidLocationConstraint" + sender_fault: bool = False + status_code: int = 400 + LocationConstraint: Optional[BucketRegion] + + +class EntityTooLarge(ServiceException): + code: str = "EntityTooLarge" + sender_fault: bool = False + status_code: int = 400 + MaxSizeAllowed: Optional[KeyLength] + HostId: Optional[HostId] + ProposedSize: Optional[ProposedSize] + + +class InvalidEncryptionAlgorithmError(ServiceException): + code: str = "InvalidEncryptionAlgorithmError" + sender_fault: bool = False + status_code: int = 400 + ArgumentName: Optional[ArgumentName] + ArgumentValue: Optional[ArgumentValue] + + +class NotImplemented(ServiceException): + code: str = "NotImplemented" + sender_fault: bool = False + status_code: int = 501 + Header: Optional[Header] + additionalMessage: Optional[additionalMessage] + + +class ConditionalRequestConflict(ServiceException): + code: str = "ConditionalRequestConflict" + sender_fault: bool = False + status_code: int = 409 + Condition: Optional[IfCondition] + Key: Optional[ObjectKey] + + +class BadDigest(ServiceException): + code: str = "BadDigest" + sender_fault: bool = False + status_code: int = 400 + ExpectedDigest: Optional[ContentMD5] + CalculatedDigest: Optional[ContentMD5] + + +AbortDate = datetime + + +class AbortIncompleteMultipartUpload(TypedDict, total=False): + DaysAfterInitiation: Optional[DaysAfterInitiation] + + +class AbortMultipartUploadOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + + +IfMatchInitiatedTime = datetime + + +class AbortMultipartUploadRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + UploadId: MultipartUploadId + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + IfMatchInitiatedTime: Optional[IfMatchInitiatedTime] + + +class AccelerateConfiguration(TypedDict, total=False): + Status: Optional[BucketAccelerateStatus] + + +class Owner(TypedDict, total=False): + DisplayName: Optional[DisplayName] + ID: Optional[ID] + + +class Grantee(TypedDict, total=False): + DisplayName: Optional[DisplayName] + EmailAddress: Optional[EmailAddress] + ID: Optional[ID] + Type: Type + URI: Optional[URI] + + +class Grant(TypedDict, total=False): + Grantee: Optional[Grantee] + Permission: Optional[Permission] + + +Grants = List[Grant] + + +class AccessControlPolicy(TypedDict, total=False): + Grants: Optional[Grants] + Owner: Optional[Owner] + + +class AccessControlTranslation(TypedDict, total=False): + Owner: OwnerOverride + + +AllowedHeaders = List[AllowedHeader] +AllowedMethods = List[AllowedMethod] +AllowedOrigins = List[AllowedOrigin] + + +class Tag(TypedDict, total=False): + Key: ObjectKey + Value: Value + + +TagSet = List[Tag] + + +class AnalyticsAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[TagSet] + + +class AnalyticsS3BucketDestination(TypedDict, total=False): + Format: AnalyticsS3ExportFileFormat + BucketAccountId: Optional[AccountId] + Bucket: BucketName + Prefix: Optional[Prefix] + + +class AnalyticsExportDestination(TypedDict, total=False): + S3BucketDestination: AnalyticsS3BucketDestination + + +class StorageClassAnalysisDataExport(TypedDict, total=False): + OutputSchemaVersion: StorageClassAnalysisSchemaVersion + Destination: AnalyticsExportDestination + + +class StorageClassAnalysis(TypedDict, total=False): + DataExport: Optional[StorageClassAnalysisDataExport] + + +class AnalyticsFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[Tag] + And: Optional[AnalyticsAndOperator] + + +class AnalyticsConfiguration(TypedDict, total=False): + Id: AnalyticsId + Filter: Optional[AnalyticsFilter] + StorageClassAnalysis: StorageClassAnalysis + + +AnalyticsConfigurationList = List[AnalyticsConfiguration] +Body = bytes +CreationDate = datetime + + +class Bucket(TypedDict, total=False): + Name: Optional[BucketName] + CreationDate: Optional[CreationDate] + BucketRegion: Optional[BucketRegion] + + +class BucketInfo(TypedDict, total=False): + DataRedundancy: Optional[DataRedundancy] + Type: Optional[BucketType] + + +class NoncurrentVersionExpiration(TypedDict, total=False): + NoncurrentDays: Optional[Days] + NewerNoncurrentVersions: Optional[VersionCount] + + +class NoncurrentVersionTransition(TypedDict, total=False): + NoncurrentDays: Optional[Days] + StorageClass: Optional[TransitionStorageClass] + NewerNoncurrentVersions: Optional[VersionCount] + + +NoncurrentVersionTransitionList = List[NoncurrentVersionTransition] +Date = datetime + + +class Transition(TypedDict, total=False): + Date: Optional[Date] + Days: Optional[Days] + StorageClass: Optional[TransitionStorageClass] + + +TransitionList = List[Transition] +ObjectSizeLessThanBytes = int +ObjectSizeGreaterThanBytes = int + + +class LifecycleRuleAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[TagSet] + ObjectSizeGreaterThan: Optional[ObjectSizeGreaterThanBytes] + ObjectSizeLessThan: Optional[ObjectSizeLessThanBytes] + + +class LifecycleRuleFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[Tag] + ObjectSizeGreaterThan: Optional[ObjectSizeGreaterThanBytes] + ObjectSizeLessThan: Optional[ObjectSizeLessThanBytes] + And: Optional[LifecycleRuleAndOperator] + + +class LifecycleExpiration(TypedDict, total=False): + Date: Optional[Date] + Days: Optional[Days] + ExpiredObjectDeleteMarker: Optional[ExpiredObjectDeleteMarker] + + +class LifecycleRule(TypedDict, total=False): + Expiration: Optional[LifecycleExpiration] + ID: Optional[ID] + Prefix: Optional[Prefix] + Filter: Optional[LifecycleRuleFilter] + Status: ExpirationStatus + Transitions: Optional[TransitionList] + NoncurrentVersionTransitions: Optional[NoncurrentVersionTransitionList] + NoncurrentVersionExpiration: Optional[NoncurrentVersionExpiration] + AbortIncompleteMultipartUpload: Optional[AbortIncompleteMultipartUpload] + + +LifecycleRules = List[LifecycleRule] + + +class BucketLifecycleConfiguration(TypedDict, total=False): + Rules: LifecycleRules + + +class PartitionedPrefix(TypedDict, total=False): + PartitionDateSource: Optional[PartitionDateSource] + + +class SimplePrefix(TypedDict, total=False): + pass + + +class TargetObjectKeyFormat(TypedDict, total=False): + SimplePrefix: Optional[SimplePrefix] + PartitionedPrefix: Optional[PartitionedPrefix] + + +class TargetGrant(TypedDict, total=False): + Grantee: Optional[Grantee] + Permission: Optional[BucketLogsPermission] + + +TargetGrants = List[TargetGrant] + + +class LoggingEnabled(TypedDict, total=False): + TargetBucket: TargetBucket + TargetGrants: Optional[TargetGrants] + TargetPrefix: TargetPrefix + TargetObjectKeyFormat: Optional[TargetObjectKeyFormat] + + +class BucketLoggingStatus(TypedDict, total=False): + LoggingEnabled: Optional[LoggingEnabled] + + +Buckets = List[Bucket] +BytesProcessed = int +BytesReturned = int +BytesScanned = int +ExposeHeaders = List[ExposeHeader] + + +class CORSRule(TypedDict, total=False): + ID: Optional[ID] + AllowedHeaders: Optional[AllowedHeaders] + AllowedMethods: AllowedMethods + AllowedOrigins: AllowedOrigins + ExposeHeaders: Optional[ExposeHeaders] + MaxAgeSeconds: Optional[MaxAgeSeconds] + + +CORSRules = List[CORSRule] + + +class CORSConfiguration(TypedDict, total=False): + CORSRules: CORSRules + + +class CSVInput(TypedDict, total=False): + FileHeaderInfo: Optional[FileHeaderInfo] + Comments: Optional[Comments] + QuoteEscapeCharacter: Optional[QuoteEscapeCharacter] + RecordDelimiter: Optional[RecordDelimiter] + FieldDelimiter: Optional[FieldDelimiter] + QuoteCharacter: Optional[QuoteCharacter] + AllowQuotedRecordDelimiter: Optional[AllowQuotedRecordDelimiter] + + +class CSVOutput(TypedDict, total=False): + QuoteFields: Optional[QuoteFields] + QuoteEscapeCharacter: Optional[QuoteEscapeCharacter] + RecordDelimiter: Optional[RecordDelimiter] + FieldDelimiter: Optional[FieldDelimiter] + QuoteCharacter: Optional[QuoteCharacter] + + +class Checksum(TypedDict, total=False): + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + + +ChecksumAlgorithmList = List[ChecksumAlgorithm] +EventList = List[Event] + + +class CloudFunctionConfiguration(TypedDict, total=False): + Id: Optional[NotificationId] + Event: Optional[Event] + Events: Optional[EventList] + CloudFunction: Optional[CloudFunction] + InvocationRole: Optional[CloudFunctionInvocationRole] + + +class CommonPrefix(TypedDict, total=False): + Prefix: Optional[Prefix] + + +CommonPrefixList = List[CommonPrefix] + + +class CompleteMultipartUploadOutput(TypedDict, total=False): + Location: Optional[Location] + Bucket: Optional[BucketName] + Key: Optional[ObjectKey] + Expiration: Optional[Expiration] + ETag: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + ServerSideEncryption: Optional[ServerSideEncryption] + VersionId: Optional[ObjectVersionId] + SSEKMSKeyId: Optional[SSEKMSKeyId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + + +MpuObjectSize = int + + +class CompletedPart(TypedDict, total=False): + ETag: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + PartNumber: Optional[PartNumber] + + +CompletedPartList = List[CompletedPart] + + +class CompletedMultipartUpload(TypedDict, total=False): + Parts: Optional[CompletedPartList] + + +class CompleteMultipartUploadRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + MultipartUpload: Optional[CompletedMultipartUpload] + UploadId: MultipartUploadId + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + MpuObjectSize: Optional[MpuObjectSize] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + IfMatch: Optional[IfMatch] + IfNoneMatch: Optional[IfNoneMatch] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + + +class Condition(TypedDict, total=False): + HttpErrorCodeReturnedEquals: Optional[HttpErrorCodeReturnedEquals] + KeyPrefixEquals: Optional[KeyPrefixEquals] + + +ContentLength = int + + +class ContinuationEvent(TypedDict, total=False): + pass + + +LastModified = datetime + + +class CopyObjectResult(TypedDict, total=False): + ETag: Optional[ETag] + LastModified: Optional[LastModified] + ChecksumType: Optional[ChecksumType] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + + +class CopyObjectOutput(TypedDict, total=False): + CopyObjectResult: Optional[CopyObjectResult] + Expiration: Optional[Expiration] + CopySourceVersionId: Optional[CopySourceVersionId] + VersionId: Optional[ObjectVersionId] + ServerSideEncryption: Optional[ServerSideEncryption] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + + +ObjectLockRetainUntilDate = datetime +Metadata = Dict[MetadataKey, MetadataValue] +CopySourceIfUnmodifiedSince = datetime +CopySourceIfModifiedSince = datetime + + +class CopyObjectRequest(ServiceRequest): + ACL: Optional[ObjectCannedACL] + Bucket: BucketName + CacheControl: Optional[CacheControl] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentType: Optional[ContentType] + CopySource: CopySource + CopySourceIfMatch: Optional[CopySourceIfMatch] + CopySourceIfModifiedSince: Optional[CopySourceIfModifiedSince] + CopySourceIfNoneMatch: Optional[CopySourceIfNoneMatch] + CopySourceIfUnmodifiedSince: Optional[CopySourceIfUnmodifiedSince] + Expires: Optional[Expires] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWriteACP: Optional[GrantWriteACP] + Key: ObjectKey + Metadata: Optional[Metadata] + MetadataDirective: Optional[MetadataDirective] + TaggingDirective: Optional[TaggingDirective] + ServerSideEncryption: Optional[ServerSideEncryption] + StorageClass: Optional[StorageClass] + WebsiteRedirectLocation: Optional[WebsiteRedirectLocation] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + CopySourceSSECustomerAlgorithm: Optional[CopySourceSSECustomerAlgorithm] + CopySourceSSECustomerKey: Optional[CopySourceSSECustomerKey] + CopySourceSSECustomerKeyMD5: Optional[CopySourceSSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + Tagging: Optional[TaggingHeader] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + ExpectedBucketOwner: Optional[AccountId] + ExpectedSourceBucketOwner: Optional[AccountId] + + +class CopyPartResult(TypedDict, total=False): + ETag: Optional[ETag] + LastModified: Optional[LastModified] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + + +class LocationInfo(TypedDict, total=False): + Type: Optional[LocationType] + Name: Optional[LocationNameAsString] + + +class CreateBucketConfiguration(TypedDict, total=False): + LocationConstraint: Optional[BucketLocationConstraint] + Location: Optional[LocationInfo] + Bucket: Optional[BucketInfo] + + +class S3TablesDestination(TypedDict, total=False): + TableBucketArn: S3TablesBucketArn + TableName: S3TablesName + + +class MetadataTableConfiguration(TypedDict, total=False): + S3TablesDestination: S3TablesDestination + + +class CreateBucketMetadataTableConfigurationRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + MetadataTableConfiguration: MetadataTableConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class CreateBucketOutput(TypedDict, total=False): + Location: Optional[Location] + + +class CreateBucketRequest(ServiceRequest): + ACL: Optional[BucketCannedACL] + Bucket: BucketName + CreateBucketConfiguration: Optional[CreateBucketConfiguration] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWrite: Optional[GrantWrite] + GrantWriteACP: Optional[GrantWriteACP] + ObjectLockEnabledForBucket: Optional[ObjectLockEnabledForBucket] + ObjectOwnership: Optional[ObjectOwnership] + + +class CreateMultipartUploadOutput(TypedDict, total=False): + AbortDate: Optional[AbortDate] + AbortRuleId: Optional[AbortRuleId] + Bucket: Optional[BucketName] + Key: Optional[ObjectKey] + UploadId: Optional[MultipartUploadId] + ServerSideEncryption: Optional[ServerSideEncryption] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumType: Optional[ChecksumType] + + +class CreateMultipartUploadRequest(ServiceRequest): + ACL: Optional[ObjectCannedACL] + Bucket: BucketName + CacheControl: Optional[CacheControl] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentType: Optional[ContentType] + Expires: Optional[Expires] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWriteACP: Optional[GrantWriteACP] + Key: ObjectKey + Metadata: Optional[Metadata] + ServerSideEncryption: Optional[ServerSideEncryption] + StorageClass: Optional[StorageClass] + WebsiteRedirectLocation: Optional[WebsiteRedirectLocation] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestPayer: Optional[RequestPayer] + Tagging: Optional[TaggingHeader] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + ExpectedBucketOwner: Optional[AccountId] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumType: Optional[ChecksumType] + + +SessionExpiration = datetime + + +class SessionCredentials(TypedDict, total=False): + AccessKeyId: AccessKeyIdValue + SecretAccessKey: SessionCredentialValue + SessionToken: SessionCredentialValue + Expiration: SessionExpiration + + +class CreateSessionOutput(TypedDict, total=False): + ServerSideEncryption: Optional[ServerSideEncryption] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + Credentials: SessionCredentials + + +class CreateSessionRequest(ServiceRequest): + SessionMode: Optional[SessionMode] + Bucket: BucketName + ServerSideEncryption: Optional[ServerSideEncryption] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + + +class DefaultRetention(TypedDict, total=False): + Mode: Optional[ObjectLockRetentionMode] + Days: Optional[Days] + Years: Optional[Years] + + +Size = int +LastModifiedTime = datetime + + +class ObjectIdentifier(TypedDict, total=False): + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + ETag: Optional[ETag] + LastModifiedTime: Optional[LastModifiedTime] + Size: Optional[Size] + + +ObjectIdentifierList = List[ObjectIdentifier] + + +class Delete(TypedDict, total=False): + Objects: ObjectIdentifierList + Quiet: Optional[Quiet] + + +class DeleteBucketAnalyticsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: AnalyticsId + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketCorsRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketEncryptionRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketIntelligentTieringConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: IntelligentTieringId + + +class DeleteBucketInventoryConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: InventoryId + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketLifecycleRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketMetadataTableConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketMetricsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: MetricsId + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketOwnershipControlsRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketPolicyRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketReplicationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketTaggingRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteBucketWebsiteRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class DeleteMarkerEntry(TypedDict, total=False): + Owner: Optional[Owner] + Key: Optional[ObjectKey] + VersionId: Optional[ObjectVersionId] + IsLatest: Optional[IsLatest] + LastModified: Optional[LastModified] + + +class DeleteMarkerReplication(TypedDict, total=False): + Status: Optional[DeleteMarkerReplicationStatus] + + +DeleteMarkers = List[DeleteMarkerEntry] + + +class DeleteObjectOutput(TypedDict, total=False): + DeleteMarker: Optional[DeleteMarker] + VersionId: Optional[ObjectVersionId] + RequestCharged: Optional[RequestCharged] + + +IfMatchSize = int +IfMatchLastModifiedTime = datetime + + +class DeleteObjectRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + MFA: Optional[MFA] + VersionId: Optional[ObjectVersionId] + RequestPayer: Optional[RequestPayer] + BypassGovernanceRetention: Optional[BypassGovernanceRetention] + ExpectedBucketOwner: Optional[AccountId] + IfMatch: Optional[IfMatch] + IfMatchLastModifiedTime: Optional[IfMatchLastModifiedTime] + IfMatchSize: Optional[IfMatchSize] + + +class DeleteObjectTaggingOutput(TypedDict, total=False): + VersionId: Optional[ObjectVersionId] + + +class DeleteObjectTaggingRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + ExpectedBucketOwner: Optional[AccountId] + + +class Error(TypedDict, total=False): + Key: Optional[ObjectKey] + VersionId: Optional[ObjectVersionId] + Code: Optional[Code] + Message: Optional[Message] + + +Errors = List[Error] + + +class DeletedObject(TypedDict, total=False): + Key: Optional[ObjectKey] + VersionId: Optional[ObjectVersionId] + DeleteMarker: Optional[DeleteMarker] + DeleteMarkerVersionId: Optional[DeleteMarkerVersionId] + + +DeletedObjects = List[DeletedObject] + + +class DeleteObjectsOutput(TypedDict, total=False): + Deleted: Optional[DeletedObjects] + RequestCharged: Optional[RequestCharged] + Errors: Optional[Errors] + + +class DeleteObjectsRequest(ServiceRequest): + Bucket: BucketName + Delete: Delete + MFA: Optional[MFA] + RequestPayer: Optional[RequestPayer] + BypassGovernanceRetention: Optional[BypassGovernanceRetention] + ExpectedBucketOwner: Optional[AccountId] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + + +class DeletePublicAccessBlockRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class ReplicationTimeValue(TypedDict, total=False): + Minutes: Optional[Minutes] + + +class Metrics(TypedDict, total=False): + Status: MetricsStatus + EventThreshold: Optional[ReplicationTimeValue] + + +class ReplicationTime(TypedDict, total=False): + Status: ReplicationTimeStatus + Time: ReplicationTimeValue + + +class EncryptionConfiguration(TypedDict, total=False): + ReplicaKmsKeyID: Optional[ReplicaKmsKeyID] + + +class Destination(TypedDict, total=False): + Bucket: BucketName + Account: Optional[AccountId] + StorageClass: Optional[StorageClass] + AccessControlTranslation: Optional[AccessControlTranslation] + EncryptionConfiguration: Optional[EncryptionConfiguration] + ReplicationTime: Optional[ReplicationTime] + Metrics: Optional[Metrics] + + +class Encryption(TypedDict, total=False): + EncryptionType: ServerSideEncryption + KMSKeyId: Optional[SSEKMSKeyId] + KMSContext: Optional[KMSContext] + + +End = int + + +class EndEvent(TypedDict, total=False): + pass + + +class ErrorDetails(TypedDict, total=False): + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + + +class ErrorDocument(TypedDict, total=False): + Key: ObjectKey + + +class EventBridgeConfiguration(TypedDict, total=False): + pass + + +class ExistingObjectReplication(TypedDict, total=False): + Status: ExistingObjectReplicationStatus + + +class FilterRule(TypedDict, total=False): + Name: Optional[FilterRuleName] + Value: Optional[FilterRuleValue] + + +FilterRuleList = List[FilterRule] + + +class GetBucketAccelerateConfigurationOutput(TypedDict, total=False): + Status: Optional[BucketAccelerateStatus] + RequestCharged: Optional[RequestCharged] + + +class GetBucketAccelerateConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + RequestPayer: Optional[RequestPayer] + + +class GetBucketAclOutput(TypedDict, total=False): + Owner: Optional[Owner] + Grants: Optional[Grants] + + +class GetBucketAclRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketAnalyticsConfigurationOutput(TypedDict, total=False): + AnalyticsConfiguration: Optional[AnalyticsConfiguration] + + +class GetBucketAnalyticsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: AnalyticsId + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketCorsOutput(TypedDict, total=False): + CORSRules: Optional[CORSRules] + + +class GetBucketCorsRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class ServerSideEncryptionByDefault(TypedDict, total=False): + SSEAlgorithm: ServerSideEncryption + KMSMasterKeyID: Optional[SSEKMSKeyId] + + +class ServerSideEncryptionRule(TypedDict, total=False): + ApplyServerSideEncryptionByDefault: Optional[ServerSideEncryptionByDefault] + BucketKeyEnabled: Optional[BucketKeyEnabled] + + +ServerSideEncryptionRules = List[ServerSideEncryptionRule] + + +class ServerSideEncryptionConfiguration(TypedDict, total=False): + Rules: ServerSideEncryptionRules + + +class GetBucketEncryptionOutput(TypedDict, total=False): + ServerSideEncryptionConfiguration: Optional[ServerSideEncryptionConfiguration] + + +class GetBucketEncryptionRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class Tiering(TypedDict, total=False): + Days: IntelligentTieringDays + AccessTier: IntelligentTieringAccessTier + + +TieringList = List[Tiering] + + +class IntelligentTieringAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[TagSet] + + +class IntelligentTieringFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[Tag] + And: Optional[IntelligentTieringAndOperator] + + +class IntelligentTieringConfiguration(TypedDict, total=False): + Id: IntelligentTieringId + Filter: Optional[IntelligentTieringFilter] + Status: IntelligentTieringStatus + Tierings: TieringList + + +class GetBucketIntelligentTieringConfigurationOutput(TypedDict, total=False): + IntelligentTieringConfiguration: Optional[IntelligentTieringConfiguration] + + +class GetBucketIntelligentTieringConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: IntelligentTieringId + + +class InventorySchedule(TypedDict, total=False): + Frequency: InventoryFrequency + + +InventoryOptionalFields = List[InventoryOptionalField] + + +class InventoryFilter(TypedDict, total=False): + Prefix: Prefix + + +class SSEKMS(TypedDict, total=False): + KeyId: SSEKMSKeyId + + +class SSES3(TypedDict, total=False): + pass + + +class InventoryEncryption(TypedDict, total=False): + SSES3: Optional[SSES3] + SSEKMS: Optional[SSEKMS] + + +class InventoryS3BucketDestination(TypedDict, total=False): + AccountId: Optional[AccountId] + Bucket: BucketName + Format: InventoryFormat + Prefix: Optional[Prefix] + Encryption: Optional[InventoryEncryption] + + +class InventoryDestination(TypedDict, total=False): + S3BucketDestination: InventoryS3BucketDestination + + +class InventoryConfiguration(TypedDict, total=False): + Destination: InventoryDestination + IsEnabled: IsEnabled + Filter: Optional[InventoryFilter] + Id: InventoryId + IncludedObjectVersions: InventoryIncludedObjectVersions + OptionalFields: Optional[InventoryOptionalFields] + Schedule: InventorySchedule + + +class GetBucketInventoryConfigurationOutput(TypedDict, total=False): + InventoryConfiguration: Optional[InventoryConfiguration] + + +class GetBucketInventoryConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: InventoryId + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketLifecycleConfigurationOutput(TypedDict, total=False): + Rules: Optional[LifecycleRules] + TransitionDefaultMinimumObjectSize: Optional[TransitionDefaultMinimumObjectSize] + + +class GetBucketLifecycleConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class Rule(TypedDict, total=False): + Expiration: Optional[LifecycleExpiration] + ID: Optional[ID] + Prefix: Prefix + Status: ExpirationStatus + Transition: Optional[Transition] + NoncurrentVersionTransition: Optional[NoncurrentVersionTransition] + NoncurrentVersionExpiration: Optional[NoncurrentVersionExpiration] + AbortIncompleteMultipartUpload: Optional[AbortIncompleteMultipartUpload] + + +Rules = List[Rule] + + +class GetBucketLifecycleOutput(TypedDict, total=False): + Rules: Optional[Rules] + + +class GetBucketLifecycleRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketLocationOutput(TypedDict, total=False): + LocationConstraint: Optional[BucketLocationConstraint] + + +class GetBucketLocationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketLoggingOutput(TypedDict, total=False): + LoggingEnabled: Optional[LoggingEnabled] + + +class GetBucketLoggingRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class S3TablesDestinationResult(TypedDict, total=False): + TableBucketArn: S3TablesBucketArn + TableName: S3TablesName + TableArn: S3TablesArn + TableNamespace: S3TablesNamespace + + +class MetadataTableConfigurationResult(TypedDict, total=False): + S3TablesDestinationResult: S3TablesDestinationResult + + +class GetBucketMetadataTableConfigurationResult(TypedDict, total=False): + MetadataTableConfigurationResult: MetadataTableConfigurationResult + Status: MetadataTableStatus + Error: Optional[ErrorDetails] + + +class GetBucketMetadataTableConfigurationOutput(TypedDict, total=False): + GetBucketMetadataTableConfigurationResult: Optional[GetBucketMetadataTableConfigurationResult] + + +class GetBucketMetadataTableConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class MetricsAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[TagSet] + AccessPointArn: Optional[AccessPointArn] + + +class MetricsFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[Tag] + AccessPointArn: Optional[AccessPointArn] + And: Optional[MetricsAndOperator] + + +class MetricsConfiguration(TypedDict, total=False): + Id: MetricsId + Filter: Optional[MetricsFilter] + + +class GetBucketMetricsConfigurationOutput(TypedDict, total=False): + MetricsConfiguration: Optional[MetricsConfiguration] + + +class GetBucketMetricsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: MetricsId + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketNotificationConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class OwnershipControlsRule(TypedDict, total=False): + ObjectOwnership: ObjectOwnership + + +OwnershipControlsRules = List[OwnershipControlsRule] + + +class OwnershipControls(TypedDict, total=False): + Rules: OwnershipControlsRules + + +class GetBucketOwnershipControlsOutput(TypedDict, total=False): + OwnershipControls: Optional[OwnershipControls] + + +class GetBucketOwnershipControlsRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketPolicyOutput(TypedDict, total=False): + Policy: Optional[Policy] + + +class GetBucketPolicyRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class PolicyStatus(TypedDict, total=False): + IsPublic: Optional[IsPublic] + + +class GetBucketPolicyStatusOutput(TypedDict, total=False): + PolicyStatus: Optional[PolicyStatus] + + +class GetBucketPolicyStatusRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class ReplicaModifications(TypedDict, total=False): + Status: ReplicaModificationsStatus + + +class SseKmsEncryptedObjects(TypedDict, total=False): + Status: SseKmsEncryptedObjectsStatus + + +class SourceSelectionCriteria(TypedDict, total=False): + SseKmsEncryptedObjects: Optional[SseKmsEncryptedObjects] + ReplicaModifications: Optional[ReplicaModifications] + + +class ReplicationRuleAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[TagSet] + + +class ReplicationRuleFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[Tag] + And: Optional[ReplicationRuleAndOperator] + + +class ReplicationRule(TypedDict, total=False): + ID: Optional[ID] + Priority: Optional[Priority] + Prefix: Optional[Prefix] + Filter: Optional[ReplicationRuleFilter] + Status: ReplicationRuleStatus + SourceSelectionCriteria: Optional[SourceSelectionCriteria] + ExistingObjectReplication: Optional[ExistingObjectReplication] + Destination: Destination + DeleteMarkerReplication: Optional[DeleteMarkerReplication] + + +ReplicationRules = List[ReplicationRule] + + +class ReplicationConfiguration(TypedDict, total=False): + Role: Role + Rules: ReplicationRules + + +class GetBucketReplicationOutput(TypedDict, total=False): + ReplicationConfiguration: Optional[ReplicationConfiguration] + + +class GetBucketReplicationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketRequestPaymentOutput(TypedDict, total=False): + Payer: Optional[Payer] + + +class GetBucketRequestPaymentRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketTaggingOutput(TypedDict, total=False): + TagSet: TagSet + + +class GetBucketTaggingRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetBucketVersioningOutput(TypedDict, total=False): + Status: Optional[BucketVersioningStatus] + MFADelete: Optional[MFADeleteStatus] + + +class GetBucketVersioningRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class Redirect(TypedDict, total=False): + HostName: Optional[HostName] + HttpRedirectCode: Optional[HttpRedirectCode] + Protocol: Optional[Protocol] + ReplaceKeyPrefixWith: Optional[ReplaceKeyPrefixWith] + ReplaceKeyWith: Optional[ReplaceKeyWith] + + +class RoutingRule(TypedDict, total=False): + Condition: Optional[Condition] + Redirect: Redirect + + +RoutingRules = List[RoutingRule] + + +class IndexDocument(TypedDict, total=False): + Suffix: Suffix + + +class RedirectAllRequestsTo(TypedDict, total=False): + HostName: HostName + Protocol: Optional[Protocol] + + +class GetBucketWebsiteOutput(TypedDict, total=False): + RedirectAllRequestsTo: Optional[RedirectAllRequestsTo] + IndexDocument: Optional[IndexDocument] + ErrorDocument: Optional[ErrorDocument] + RoutingRules: Optional[RoutingRules] + + +class GetBucketWebsiteRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetObjectAclOutput(TypedDict, total=False): + Owner: Optional[Owner] + Grants: Optional[Grants] + RequestCharged: Optional[RequestCharged] + + +class GetObjectAclRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + + +class ObjectPart(TypedDict, total=False): + PartNumber: Optional[PartNumber] + Size: Optional[Size] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + + +PartsList = List[ObjectPart] + + +class GetObjectAttributesParts(TypedDict, total=False): + TotalPartsCount: Optional[PartsCount] + PartNumberMarker: Optional[PartNumberMarker] + NextPartNumberMarker: Optional[NextPartNumberMarker] + MaxParts: Optional[MaxParts] + IsTruncated: Optional[IsTruncated] + Parts: Optional[PartsList] + + +class GetObjectAttributesOutput(TypedDict, total=False): + DeleteMarker: Optional[DeleteMarker] + LastModified: Optional[LastModified] + VersionId: Optional[ObjectVersionId] + RequestCharged: Optional[RequestCharged] + ETag: Optional[ETag] + Checksum: Optional[Checksum] + ObjectParts: Optional[GetObjectAttributesParts] + StorageClass: Optional[StorageClass] + ObjectSize: Optional[ObjectSize] + + +ObjectAttributesList = List[ObjectAttributes] + + +class GetObjectAttributesRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + MaxParts: Optional[MaxParts] + PartNumberMarker: Optional[PartNumberMarker] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + ObjectAttributes: ObjectAttributesList + + +class ObjectLockLegalHold(TypedDict, total=False): + Status: Optional[ObjectLockLegalHoldStatus] + + +class GetObjectLegalHoldOutput(TypedDict, total=False): + LegalHold: Optional[ObjectLockLegalHold] + + +class GetObjectLegalHoldRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + + +class ObjectLockRule(TypedDict, total=False): + DefaultRetention: Optional[DefaultRetention] + + +class ObjectLockConfiguration(TypedDict, total=False): + ObjectLockEnabled: Optional[ObjectLockEnabled] + Rule: Optional[ObjectLockRule] + + +class GetObjectLockConfigurationOutput(TypedDict, total=False): + ObjectLockConfiguration: Optional[ObjectLockConfiguration] + + +class GetObjectLockConfigurationRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GetObjectOutput(TypedDict, total=False): + Body: Optional[Union[Body, IO[Body], Iterable[Body]]] + DeleteMarker: Optional[DeleteMarker] + AcceptRanges: Optional[AcceptRanges] + Expiration: Optional[Expiration] + Restore: Optional[Restore] + LastModified: Optional[LastModified] + ContentLength: Optional[ContentLength] + ETag: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + MissingMeta: Optional[MissingMeta] + VersionId: Optional[ObjectVersionId] + CacheControl: Optional[CacheControl] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentRange: Optional[ContentRange] + ContentType: Optional[ContentType] + Expires: Optional[Expires] + WebsiteRedirectLocation: Optional[WebsiteRedirectLocation] + ServerSideEncryption: Optional[ServerSideEncryption] + Metadata: Optional[Metadata] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + StorageClass: Optional[StorageClass] + RequestCharged: Optional[RequestCharged] + ReplicationStatus: Optional[ReplicationStatus] + PartsCount: Optional[PartsCount] + TagCount: Optional[TagCount] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + StatusCode: Optional[GetObjectResponseStatusCode] + + +ResponseExpires = datetime +IfUnmodifiedSince = datetime +IfModifiedSince = datetime + + +class GetObjectRequest(ServiceRequest): + Bucket: BucketName + IfMatch: Optional[IfMatch] + IfModifiedSince: Optional[IfModifiedSince] + IfNoneMatch: Optional[IfNoneMatch] + IfUnmodifiedSince: Optional[IfUnmodifiedSince] + Key: ObjectKey + Range: Optional[Range] + ResponseCacheControl: Optional[ResponseCacheControl] + ResponseContentDisposition: Optional[ResponseContentDisposition] + ResponseContentEncoding: Optional[ResponseContentEncoding] + ResponseContentLanguage: Optional[ResponseContentLanguage] + ResponseContentType: Optional[ResponseContentType] + ResponseExpires: Optional[ResponseExpires] + VersionId: Optional[ObjectVersionId] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + PartNumber: Optional[PartNumber] + ExpectedBucketOwner: Optional[AccountId] + ChecksumMode: Optional[ChecksumMode] + + +class ObjectLockRetention(TypedDict, total=False): + Mode: Optional[ObjectLockRetentionMode] + RetainUntilDate: Optional[Date] + + +class GetObjectRetentionOutput(TypedDict, total=False): + Retention: Optional[ObjectLockRetention] + + +class GetObjectRetentionRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + + +class GetObjectTaggingOutput(TypedDict, total=False): + VersionId: Optional[ObjectVersionId] + TagSet: TagSet + + +class GetObjectTaggingRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + ExpectedBucketOwner: Optional[AccountId] + RequestPayer: Optional[RequestPayer] + + +class GetObjectTorrentOutput(TypedDict, total=False): + Body: Optional[Union[Body, IO[Body], Iterable[Body]]] + RequestCharged: Optional[RequestCharged] + + +class GetObjectTorrentRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + + +class PublicAccessBlockConfiguration(TypedDict, total=False): + BlockPublicAcls: Optional[Setting] + IgnorePublicAcls: Optional[Setting] + BlockPublicPolicy: Optional[Setting] + RestrictPublicBuckets: Optional[Setting] + + +class GetPublicAccessBlockOutput(TypedDict, total=False): + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + + +class GetPublicAccessBlockRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class GlacierJobParameters(TypedDict, total=False): + Tier: Tier + + +class HeadBucketOutput(TypedDict, total=False): + BucketRegion: Optional[BucketRegion] + BucketContentType: Optional[BucketContentType] + + +class HeadBucketRequest(ServiceRequest): + Bucket: BucketName + ExpectedBucketOwner: Optional[AccountId] + + +class HeadObjectOutput(TypedDict, total=False): + DeleteMarker: Optional[DeleteMarker] + AcceptRanges: Optional[AcceptRanges] + Expiration: Optional[Expiration] + Restore: Optional[Restore] + ArchiveStatus: Optional[ArchiveStatus] + LastModified: Optional[LastModified] + ContentLength: Optional[ContentLength] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + ETag: Optional[ETag] + MissingMeta: Optional[MissingMeta] + VersionId: Optional[ObjectVersionId] + CacheControl: Optional[CacheControl] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentType: Optional[ContentType] + ContentRange: Optional[ContentRange] + Expires: Optional[Expires] + WebsiteRedirectLocation: Optional[WebsiteRedirectLocation] + ServerSideEncryption: Optional[ServerSideEncryption] + Metadata: Optional[Metadata] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + StorageClass: Optional[StorageClass] + RequestCharged: Optional[RequestCharged] + ReplicationStatus: Optional[ReplicationStatus] + PartsCount: Optional[PartsCount] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + StatusCode: Optional[GetObjectResponseStatusCode] + + +class HeadObjectRequest(ServiceRequest): + Bucket: BucketName + IfMatch: Optional[IfMatch] + IfModifiedSince: Optional[IfModifiedSince] + IfNoneMatch: Optional[IfNoneMatch] + IfUnmodifiedSince: Optional[IfUnmodifiedSince] + Key: ObjectKey + Range: Optional[Range] + ResponseCacheControl: Optional[ResponseCacheControl] + ResponseContentDisposition: Optional[ResponseContentDisposition] + ResponseContentEncoding: Optional[ResponseContentEncoding] + ResponseContentLanguage: Optional[ResponseContentLanguage] + ResponseContentType: Optional[ResponseContentType] + ResponseExpires: Optional[ResponseExpires] + VersionId: Optional[ObjectVersionId] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + PartNumber: Optional[PartNumber] + ExpectedBucketOwner: Optional[AccountId] + ChecksumMode: Optional[ChecksumMode] + + +Initiated = datetime + + +class Initiator(TypedDict, total=False): + ID: Optional[ID] + DisplayName: Optional[DisplayName] + + +class ParquetInput(TypedDict, total=False): + pass + + +class JSONInput(TypedDict, total=False): + Type: Optional[JSONType] + + +class InputSerialization(TypedDict, total=False): + CSV: Optional[CSVInput] + CompressionType: Optional[CompressionType] + JSON: Optional[JSONInput] + Parquet: Optional[ParquetInput] + + +IntelligentTieringConfigurationList = List[IntelligentTieringConfiguration] +InventoryConfigurationList = List[InventoryConfiguration] + + +class JSONOutput(TypedDict, total=False): + RecordDelimiter: Optional[RecordDelimiter] + + +class S3KeyFilter(TypedDict, total=False): + FilterRules: Optional[FilterRuleList] + + +class NotificationConfigurationFilter(TypedDict, total=False): + Key: Optional[S3KeyFilter] + + +class LambdaFunctionConfiguration(TypedDict, total=False): + Id: Optional[NotificationId] + LambdaFunctionArn: LambdaFunctionArn + Events: EventList + Filter: Optional[NotificationConfigurationFilter] + + +LambdaFunctionConfigurationList = List[LambdaFunctionConfiguration] + + +class LifecycleConfiguration(TypedDict, total=False): + Rules: Rules + + +class ListBucketAnalyticsConfigurationsOutput(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + ContinuationToken: Optional[Token] + NextContinuationToken: Optional[NextToken] + AnalyticsConfigurationList: Optional[AnalyticsConfigurationList] + + +class ListBucketAnalyticsConfigurationsRequest(ServiceRequest): + Bucket: BucketName + ContinuationToken: Optional[Token] + ExpectedBucketOwner: Optional[AccountId] + + +class ListBucketIntelligentTieringConfigurationsOutput(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + ContinuationToken: Optional[Token] + NextContinuationToken: Optional[NextToken] + IntelligentTieringConfigurationList: Optional[IntelligentTieringConfigurationList] + + +class ListBucketIntelligentTieringConfigurationsRequest(ServiceRequest): + Bucket: BucketName + ContinuationToken: Optional[Token] + + +class ListBucketInventoryConfigurationsOutput(TypedDict, total=False): + ContinuationToken: Optional[Token] + InventoryConfigurationList: Optional[InventoryConfigurationList] + IsTruncated: Optional[IsTruncated] + NextContinuationToken: Optional[NextToken] + + +class ListBucketInventoryConfigurationsRequest(ServiceRequest): + Bucket: BucketName + ContinuationToken: Optional[Token] + ExpectedBucketOwner: Optional[AccountId] + + +MetricsConfigurationList = List[MetricsConfiguration] + + +class ListBucketMetricsConfigurationsOutput(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + ContinuationToken: Optional[Token] + NextContinuationToken: Optional[NextToken] + MetricsConfigurationList: Optional[MetricsConfigurationList] + + +class ListBucketMetricsConfigurationsRequest(ServiceRequest): + Bucket: BucketName + ContinuationToken: Optional[Token] + ExpectedBucketOwner: Optional[AccountId] + + +class ListBucketsOutput(TypedDict, total=False): + Owner: Optional[Owner] + ContinuationToken: Optional[NextToken] + Prefix: Optional[Prefix] + Buckets: Optional[Buckets] + + +class ListBucketsRequest(ServiceRequest): + MaxBuckets: Optional[MaxBuckets] + ContinuationToken: Optional[Token] + Prefix: Optional[Prefix] + BucketRegion: Optional[BucketRegion] + + +class ListDirectoryBucketsOutput(TypedDict, total=False): + Buckets: Optional[Buckets] + ContinuationToken: Optional[DirectoryBucketToken] + + +class ListDirectoryBucketsRequest(ServiceRequest): + ContinuationToken: Optional[DirectoryBucketToken] + MaxDirectoryBuckets: Optional[MaxDirectoryBuckets] + + +class MultipartUpload(TypedDict, total=False): + UploadId: Optional[MultipartUploadId] + Key: Optional[ObjectKey] + Initiated: Optional[Initiated] + StorageClass: Optional[StorageClass] + Owner: Optional[Owner] + Initiator: Optional[Initiator] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumType: Optional[ChecksumType] + + +MultipartUploadList = List[MultipartUpload] + + +class ListMultipartUploadsOutput(TypedDict, total=False): + Bucket: Optional[BucketName] + KeyMarker: Optional[KeyMarker] + UploadIdMarker: Optional[UploadIdMarker] + NextKeyMarker: Optional[NextKeyMarker] + Prefix: Optional[Prefix] + Delimiter: Optional[Delimiter] + NextUploadIdMarker: Optional[NextUploadIdMarker] + MaxUploads: Optional[MaxUploads] + IsTruncated: Optional[IsTruncated] + Uploads: Optional[MultipartUploadList] + CommonPrefixes: Optional[CommonPrefixList] + EncodingType: Optional[EncodingType] + RequestCharged: Optional[RequestCharged] + + +class ListMultipartUploadsRequest(ServiceRequest): + Bucket: BucketName + Delimiter: Optional[Delimiter] + EncodingType: Optional[EncodingType] + KeyMarker: Optional[KeyMarker] + MaxUploads: Optional[MaxUploads] + Prefix: Optional[Prefix] + UploadIdMarker: Optional[UploadIdMarker] + ExpectedBucketOwner: Optional[AccountId] + RequestPayer: Optional[RequestPayer] + + +RestoreExpiryDate = datetime + + +class RestoreStatus(TypedDict, total=False): + IsRestoreInProgress: Optional[IsRestoreInProgress] + RestoreExpiryDate: Optional[RestoreExpiryDate] + + +class ObjectVersion(TypedDict, total=False): + ETag: Optional[ETag] + ChecksumAlgorithm: Optional[ChecksumAlgorithmList] + ChecksumType: Optional[ChecksumType] + Size: Optional[Size] + StorageClass: Optional[ObjectVersionStorageClass] + Key: Optional[ObjectKey] + VersionId: Optional[ObjectVersionId] + IsLatest: Optional[IsLatest] + LastModified: Optional[LastModified] + Owner: Optional[Owner] + RestoreStatus: Optional[RestoreStatus] + + +ObjectVersionList = List[ObjectVersion] + + +class ListObjectVersionsOutput(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + KeyMarker: Optional[KeyMarker] + VersionIdMarker: Optional[VersionIdMarker] + NextKeyMarker: Optional[NextKeyMarker] + NextVersionIdMarker: Optional[NextVersionIdMarker] + DeleteMarkers: Optional[DeleteMarkers] + Name: Optional[BucketName] + Prefix: Optional[Prefix] + Delimiter: Optional[Delimiter] + MaxKeys: Optional[MaxKeys] + CommonPrefixes: Optional[CommonPrefixList] + EncodingType: Optional[EncodingType] + RequestCharged: Optional[RequestCharged] + Versions: Optional[ObjectVersionList] + + +OptionalObjectAttributesList = List[OptionalObjectAttributes] + + +class ListObjectVersionsRequest(ServiceRequest): + Bucket: BucketName + Delimiter: Optional[Delimiter] + EncodingType: Optional[EncodingType] + KeyMarker: Optional[KeyMarker] + MaxKeys: Optional[MaxKeys] + Prefix: Optional[Prefix] + VersionIdMarker: Optional[VersionIdMarker] + ExpectedBucketOwner: Optional[AccountId] + RequestPayer: Optional[RequestPayer] + OptionalObjectAttributes: Optional[OptionalObjectAttributesList] + + +class Object(TypedDict, total=False): + Key: Optional[ObjectKey] + LastModified: Optional[LastModified] + ETag: Optional[ETag] + ChecksumAlgorithm: Optional[ChecksumAlgorithmList] + ChecksumType: Optional[ChecksumType] + Size: Optional[Size] + StorageClass: Optional[ObjectStorageClass] + Owner: Optional[Owner] + RestoreStatus: Optional[RestoreStatus] + + +ObjectList = List[Object] + + +class ListObjectsOutput(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + Marker: Optional[Marker] + NextMarker: Optional[NextMarker] + Name: Optional[BucketName] + Prefix: Optional[Prefix] + Delimiter: Optional[Delimiter] + MaxKeys: Optional[MaxKeys] + CommonPrefixes: Optional[CommonPrefixList] + EncodingType: Optional[EncodingType] + RequestCharged: Optional[RequestCharged] + BucketRegion: Optional[BucketRegion] + Contents: Optional[ObjectList] + + +class ListObjectsRequest(ServiceRequest): + Bucket: BucketName + Delimiter: Optional[Delimiter] + EncodingType: Optional[EncodingType] + Marker: Optional[Marker] + MaxKeys: Optional[MaxKeys] + Prefix: Optional[Prefix] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + OptionalObjectAttributes: Optional[OptionalObjectAttributesList] + + +class ListObjectsV2Output(TypedDict, total=False): + IsTruncated: Optional[IsTruncated] + Name: Optional[BucketName] + Prefix: Optional[Prefix] + Delimiter: Optional[Delimiter] + MaxKeys: Optional[MaxKeys] + CommonPrefixes: Optional[CommonPrefixList] + EncodingType: Optional[EncodingType] + KeyCount: Optional[KeyCount] + ContinuationToken: Optional[Token] + NextContinuationToken: Optional[NextToken] + StartAfter: Optional[StartAfter] + RequestCharged: Optional[RequestCharged] + BucketRegion: Optional[BucketRegion] + Contents: Optional[ObjectList] + + +class ListObjectsV2Request(ServiceRequest): + Bucket: BucketName + Delimiter: Optional[Delimiter] + EncodingType: Optional[EncodingType] + MaxKeys: Optional[MaxKeys] + Prefix: Optional[Prefix] + ContinuationToken: Optional[Token] + FetchOwner: Optional[FetchOwner] + StartAfter: Optional[StartAfter] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + OptionalObjectAttributes: Optional[OptionalObjectAttributesList] + + +class Part(TypedDict, total=False): + PartNumber: Optional[PartNumber] + LastModified: Optional[LastModified] + ETag: Optional[ETag] + Size: Optional[Size] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + + +Parts = List[Part] + + +class ListPartsOutput(TypedDict, total=False): + AbortDate: Optional[AbortDate] + AbortRuleId: Optional[AbortRuleId] + Bucket: Optional[BucketName] + Key: Optional[ObjectKey] + UploadId: Optional[MultipartUploadId] + PartNumberMarker: Optional[PartNumberMarker] + NextPartNumberMarker: Optional[NextPartNumberMarker] + MaxParts: Optional[MaxParts] + IsTruncated: Optional[IsTruncated] + Parts: Optional[Parts] + Initiator: Optional[Initiator] + Owner: Optional[Owner] + StorageClass: Optional[StorageClass] + RequestCharged: Optional[RequestCharged] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumType: Optional[ChecksumType] + + +class ListPartsRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + MaxParts: Optional[MaxParts] + PartNumberMarker: Optional[PartNumberMarker] + UploadId: MultipartUploadId + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + + +class MetadataEntry(TypedDict, total=False): + Name: Optional[MetadataKey] + Value: Optional[MetadataValue] + + +class QueueConfiguration(TypedDict, total=False): + Id: Optional[NotificationId] + QueueArn: QueueArn + Events: EventList + Filter: Optional[NotificationConfigurationFilter] + + +QueueConfigurationList = List[QueueConfiguration] + + +class TopicConfiguration(TypedDict, total=False): + Id: Optional[NotificationId] + TopicArn: TopicArn + Events: EventList + Filter: Optional[NotificationConfigurationFilter] + + +TopicConfigurationList = List[TopicConfiguration] + + +class NotificationConfiguration(TypedDict, total=False): + TopicConfigurations: Optional[TopicConfigurationList] + QueueConfigurations: Optional[QueueConfigurationList] + LambdaFunctionConfigurations: Optional[LambdaFunctionConfigurationList] + EventBridgeConfiguration: Optional[EventBridgeConfiguration] + + +class QueueConfigurationDeprecated(TypedDict, total=False): + Id: Optional[NotificationId] + Event: Optional[Event] + Events: Optional[EventList] + Queue: Optional[QueueArn] + + +class TopicConfigurationDeprecated(TypedDict, total=False): + Id: Optional[NotificationId] + Events: Optional[EventList] + Event: Optional[Event] + Topic: Optional[TopicArn] + + +class NotificationConfigurationDeprecated(TypedDict, total=False): + TopicConfiguration: Optional[TopicConfigurationDeprecated] + QueueConfiguration: Optional[QueueConfigurationDeprecated] + CloudFunctionConfiguration: Optional[CloudFunctionConfiguration] + + +UserMetadata = List[MetadataEntry] + + +class Tagging(TypedDict, total=False): + TagSet: TagSet + + +class S3Location(TypedDict, total=False): + BucketName: BucketName + Prefix: LocationPrefix + Encryption: Optional[Encryption] + CannedACL: Optional[ObjectCannedACL] + AccessControlList: Optional[Grants] + Tagging: Optional[Tagging] + UserMetadata: Optional[UserMetadata] + StorageClass: Optional[StorageClass] + + +class OutputLocation(TypedDict, total=False): + S3: Optional[S3Location] + + +class OutputSerialization(TypedDict, total=False): + CSV: Optional[CSVOutput] + JSON: Optional[JSONOutput] + + +class Progress(TypedDict, total=False): + BytesScanned: Optional[BytesScanned] + BytesProcessed: Optional[BytesProcessed] + BytesReturned: Optional[BytesReturned] + + +class ProgressEvent(TypedDict, total=False): + Details: Optional[Progress] + + +class PutBucketAccelerateConfigurationRequest(ServiceRequest): + Bucket: BucketName + AccelerateConfiguration: AccelerateConfiguration + ExpectedBucketOwner: Optional[AccountId] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + + +class PutBucketAclRequest(ServiceRequest): + ACL: Optional[BucketCannedACL] + AccessControlPolicy: Optional[AccessControlPolicy] + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWrite: Optional[GrantWrite] + GrantWriteACP: Optional[GrantWriteACP] + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketAnalyticsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: AnalyticsId + AnalyticsConfiguration: AnalyticsConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketCorsRequest(ServiceRequest): + Bucket: BucketName + CORSConfiguration: CORSConfiguration + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketEncryptionRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ServerSideEncryptionConfiguration: ServerSideEncryptionConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketIntelligentTieringConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: IntelligentTieringId + IntelligentTieringConfiguration: IntelligentTieringConfiguration + + +class PutBucketInventoryConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: InventoryId + InventoryConfiguration: InventoryConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketLifecycleConfigurationOutput(TypedDict, total=False): + TransitionDefaultMinimumObjectSize: Optional[TransitionDefaultMinimumObjectSize] + + +class PutBucketLifecycleConfigurationRequest(ServiceRequest): + Bucket: BucketName + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + LifecycleConfiguration: Optional[BucketLifecycleConfiguration] + ExpectedBucketOwner: Optional[AccountId] + TransitionDefaultMinimumObjectSize: Optional[TransitionDefaultMinimumObjectSize] + + +class PutBucketLifecycleRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + LifecycleConfiguration: Optional[LifecycleConfiguration] + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketLoggingRequest(ServiceRequest): + Bucket: BucketName + BucketLoggingStatus: BucketLoggingStatus + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketMetricsConfigurationRequest(ServiceRequest): + Bucket: BucketName + Id: MetricsId + MetricsConfiguration: MetricsConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketNotificationConfigurationRequest(ServiceRequest): + Bucket: BucketName + NotificationConfiguration: NotificationConfiguration + ExpectedBucketOwner: Optional[AccountId] + SkipDestinationValidation: Optional[SkipValidation] + + +class PutBucketNotificationRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + NotificationConfiguration: NotificationConfigurationDeprecated + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketOwnershipControlsRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ExpectedBucketOwner: Optional[AccountId] + OwnershipControls: OwnershipControls + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + + +class PutBucketPolicyRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ConfirmRemoveSelfBucketAccess: Optional[ConfirmRemoveSelfBucketAccess] + Policy: Policy + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketReplicationRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ReplicationConfiguration: ReplicationConfiguration + Token: Optional[ObjectLockToken] + ExpectedBucketOwner: Optional[AccountId] + + +class RequestPaymentConfiguration(TypedDict, total=False): + Payer: Payer + + +class PutBucketRequestPaymentRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + RequestPaymentConfiguration: RequestPaymentConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutBucketTaggingRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + Tagging: Tagging + ExpectedBucketOwner: Optional[AccountId] + + +class VersioningConfiguration(TypedDict, total=False): + MFADelete: Optional[MFADelete] + Status: Optional[BucketVersioningStatus] + + +class PutBucketVersioningRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + MFA: Optional[MFA] + VersioningConfiguration: VersioningConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class WebsiteConfiguration(TypedDict, total=False): + ErrorDocument: Optional[ErrorDocument] + IndexDocument: Optional[IndexDocument] + RedirectAllRequestsTo: Optional[RedirectAllRequestsTo] + RoutingRules: Optional[RoutingRules] + + +class PutBucketWebsiteRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + WebsiteConfiguration: WebsiteConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectAclOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + + +class PutObjectAclRequest(ServiceRequest): + ACL: Optional[ObjectCannedACL] + AccessControlPolicy: Optional[AccessControlPolicy] + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWrite: Optional[GrantWrite] + GrantWriteACP: Optional[GrantWriteACP] + Key: ObjectKey + RequestPayer: Optional[RequestPayer] + VersionId: Optional[ObjectVersionId] + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectLegalHoldOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + + +class PutObjectLegalHoldRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + LegalHold: Optional[ObjectLockLegalHold] + RequestPayer: Optional[RequestPayer] + VersionId: Optional[ObjectVersionId] + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectLockConfigurationOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + + +class PutObjectLockConfigurationRequest(ServiceRequest): + Bucket: BucketName + ObjectLockConfiguration: Optional[ObjectLockConfiguration] + RequestPayer: Optional[RequestPayer] + Token: Optional[ObjectLockToken] + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectOutput(TypedDict, total=False): + Expiration: Optional[Expiration] + ETag: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + ServerSideEncryption: Optional[ServerSideEncryption] + VersionId: Optional[ObjectVersionId] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + Size: Optional[Size] + RequestCharged: Optional[RequestCharged] + + +WriteOffsetBytes = int + + +class PutObjectRequest(ServiceRequest): + Body: Optional[IO[Body]] + ACL: Optional[ObjectCannedACL] + Bucket: BucketName + CacheControl: Optional[CacheControl] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentLength: Optional[ContentLength] + ContentMD5: Optional[ContentMD5] + ContentType: Optional[ContentType] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + Expires: Optional[Expires] + IfMatch: Optional[IfMatch] + IfNoneMatch: Optional[IfNoneMatch] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWriteACP: Optional[GrantWriteACP] + Key: ObjectKey + WriteOffsetBytes: Optional[WriteOffsetBytes] + Metadata: Optional[Metadata] + ServerSideEncryption: Optional[ServerSideEncryption] + StorageClass: Optional[StorageClass] + WebsiteRedirectLocation: Optional[WebsiteRedirectLocation] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestPayer: Optional[RequestPayer] + Tagging: Optional[TaggingHeader] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectRetentionOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + + +class PutObjectRetentionRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + Retention: Optional[ObjectLockRetention] + RequestPayer: Optional[RequestPayer] + VersionId: Optional[ObjectVersionId] + BypassGovernanceRetention: Optional[BypassGovernanceRetention] + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +class PutObjectTaggingOutput(TypedDict, total=False): + VersionId: Optional[ObjectVersionId] + + +class PutObjectTaggingRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + Tagging: Tagging + ExpectedBucketOwner: Optional[AccountId] + RequestPayer: Optional[RequestPayer] + + +class PutPublicAccessBlockRequest(ServiceRequest): + Bucket: BucketName + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + PublicAccessBlockConfiguration: PublicAccessBlockConfiguration + ExpectedBucketOwner: Optional[AccountId] + + +class RecordsEvent(TypedDict, total=False): + Payload: Optional[Body] + + +class RequestProgress(TypedDict, total=False): + Enabled: Optional[EnableRequestProgress] + + +class RestoreObjectOutput(TypedDict, total=False): + RequestCharged: Optional[RequestCharged] + RestoreOutputPath: Optional[RestoreOutputPath] + StatusCode: Optional[RestoreObjectOutputStatusCode] + + +class SelectParameters(TypedDict, total=False): + InputSerialization: InputSerialization + ExpressionType: ExpressionType + Expression: Expression + OutputSerialization: OutputSerialization + + +class RestoreRequest(TypedDict, total=False): + Days: Optional[Days] + GlacierJobParameters: Optional[GlacierJobParameters] + Type: Optional[RestoreRequestType] + Tier: Optional[Tier] + Description: Optional[Description] + SelectParameters: Optional[SelectParameters] + OutputLocation: Optional[OutputLocation] + + +class RestoreObjectRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + VersionId: Optional[ObjectVersionId] + RestoreRequest: Optional[RestoreRequest] + RequestPayer: Optional[RequestPayer] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ExpectedBucketOwner: Optional[AccountId] + + +Start = int + + +class ScanRange(TypedDict, total=False): + Start: Optional[Start] + End: Optional[End] + + +class Stats(TypedDict, total=False): + BytesScanned: Optional[BytesScanned] + BytesProcessed: Optional[BytesProcessed] + BytesReturned: Optional[BytesReturned] + + +class StatsEvent(TypedDict, total=False): + Details: Optional[Stats] + + +class SelectObjectContentEventStream(TypedDict, total=False): + Records: Optional[RecordsEvent] + Stats: Optional[StatsEvent] + Progress: Optional[ProgressEvent] + Cont: Optional[ContinuationEvent] + End: Optional[EndEvent] + + +class SelectObjectContentOutput(TypedDict, total=False): + Payload: Iterator[SelectObjectContentEventStream] + + +class SelectObjectContentRequest(ServiceRequest): + Bucket: BucketName + Key: ObjectKey + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + Expression: Expression + ExpressionType: ExpressionType + RequestProgress: Optional[RequestProgress] + InputSerialization: InputSerialization + OutputSerialization: OutputSerialization + ScanRange: Optional[ScanRange] + ExpectedBucketOwner: Optional[AccountId] + + +class UploadPartCopyOutput(TypedDict, total=False): + CopySourceVersionId: Optional[CopySourceVersionId] + CopyPartResult: Optional[CopyPartResult] + ServerSideEncryption: Optional[ServerSideEncryption] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + + +class UploadPartCopyRequest(ServiceRequest): + Bucket: BucketName + CopySource: CopySource + CopySourceIfMatch: Optional[CopySourceIfMatch] + CopySourceIfModifiedSince: Optional[CopySourceIfModifiedSince] + CopySourceIfNoneMatch: Optional[CopySourceIfNoneMatch] + CopySourceIfUnmodifiedSince: Optional[CopySourceIfUnmodifiedSince] + CopySourceRange: Optional[CopySourceRange] + Key: ObjectKey + PartNumber: PartNumber + UploadId: MultipartUploadId + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + CopySourceSSECustomerAlgorithm: Optional[CopySourceSSECustomerAlgorithm] + CopySourceSSECustomerKey: Optional[CopySourceSSECustomerKey] + CopySourceSSECustomerKeyMD5: Optional[CopySourceSSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + ExpectedSourceBucketOwner: Optional[AccountId] + + +class UploadPartOutput(TypedDict, total=False): + ServerSideEncryption: Optional[ServerSideEncryption] + ETag: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + + +class UploadPartRequest(ServiceRequest): + Body: Optional[IO[Body]] + Bucket: BucketName + ContentLength: Optional[ContentLength] + ContentMD5: Optional[ContentMD5] + ChecksumAlgorithm: Optional[ChecksumAlgorithm] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + Key: ObjectKey + PartNumber: PartNumber + UploadId: MultipartUploadId + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKey: Optional[SSECustomerKey] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + RequestPayer: Optional[RequestPayer] + ExpectedBucketOwner: Optional[AccountId] + + +class WriteGetObjectResponseRequest(ServiceRequest): + Body: Optional[IO[Body]] + RequestRoute: RequestRoute + RequestToken: RequestToken + StatusCode: Optional[GetObjectResponseStatusCode] + ErrorCode: Optional[ErrorCode] + ErrorMessage: Optional[ErrorMessage] + AcceptRanges: Optional[AcceptRanges] + CacheControl: Optional[CacheControl] + ContentDisposition: Optional[ContentDisposition] + ContentEncoding: Optional[ContentEncoding] + ContentLanguage: Optional[ContentLanguage] + ContentLength: Optional[ContentLength] + ContentRange: Optional[ContentRange] + ContentType: Optional[ContentType] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + DeleteMarker: Optional[DeleteMarker] + ETag: Optional[ETag] + Expires: Optional[Expires] + Expiration: Optional[Expiration] + LastModified: Optional[LastModified] + MissingMeta: Optional[MissingMeta] + Metadata: Optional[Metadata] + ObjectLockMode: Optional[ObjectLockMode] + ObjectLockLegalHoldStatus: Optional[ObjectLockLegalHoldStatus] + ObjectLockRetainUntilDate: Optional[ObjectLockRetainUntilDate] + PartsCount: Optional[PartsCount] + ReplicationStatus: Optional[ReplicationStatus] + RequestCharged: Optional[RequestCharged] + Restore: Optional[Restore] + ServerSideEncryption: Optional[ServerSideEncryption] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + StorageClass: Optional[StorageClass] + TagCount: Optional[TagCount] + VersionId: Optional[ObjectVersionId] + BucketKeyEnabled: Optional[BucketKeyEnabled] + + +class PostObjectRequest(ServiceRequest): + Body: Optional[IO[Body]] + Bucket: BucketName + + +class PostResponse(TypedDict, total=False): + StatusCode: Optional[GetObjectResponseStatusCode] + Location: Optional[Location] + LocationHeader: Optional[Location] + Bucket: Optional[BucketName] + Key: Optional[ObjectKey] + Expiration: Optional[Expiration] + ETag: Optional[ETag] + ETagHeader: Optional[ETag] + ChecksumCRC32: Optional[ChecksumCRC32] + ChecksumCRC32C: Optional[ChecksumCRC32C] + ChecksumCRC64NVME: Optional[ChecksumCRC64NVME] + ChecksumSHA1: Optional[ChecksumSHA1] + ChecksumSHA256: Optional[ChecksumSHA256] + ChecksumType: Optional[ChecksumType] + ServerSideEncryption: Optional[ServerSideEncryption] + VersionId: Optional[ObjectVersionId] + SSECustomerAlgorithm: Optional[SSECustomerAlgorithm] + SSECustomerKeyMD5: Optional[SSECustomerKeyMD5] + SSEKMSKeyId: Optional[SSEKMSKeyId] + SSEKMSEncryptionContext: Optional[SSEKMSEncryptionContext] + BucketKeyEnabled: Optional[BucketKeyEnabled] + RequestCharged: Optional[RequestCharged] + + +class S3Api: + service = "s3" + version = "2006-03-01" + + @handler("AbortMultipartUpload") + def abort_multipart_upload( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + if_match_initiated_time: IfMatchInitiatedTime | None = None, + **kwargs, + ) -> AbortMultipartUploadOutput: + raise NotImplementedError + + @handler("CompleteMultipartUpload") + def complete_multipart_upload( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + multipart_upload: CompletedMultipartUpload | None = None, + checksum_crc32: ChecksumCRC32 | None = None, + checksum_crc32_c: ChecksumCRC32C | None = None, + checksum_crc64_nvme: ChecksumCRC64NVME | None = None, + checksum_sha1: ChecksumSHA1 | None = None, + checksum_sha256: ChecksumSHA256 | None = None, + checksum_type: ChecksumType | None = None, + mpu_object_size: MpuObjectSize | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + if_match: IfMatch | None = None, + if_none_match: IfNoneMatch | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + **kwargs, + ) -> CompleteMultipartUploadOutput: + raise NotImplementedError + + @handler("CopyObject") + def copy_object( + self, + context: RequestContext, + bucket: BucketName, + copy_source: CopySource, + key: ObjectKey, + acl: ObjectCannedACL | None = None, + cache_control: CacheControl | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + content_disposition: ContentDisposition | None = None, + content_encoding: ContentEncoding | None = None, + content_language: ContentLanguage | None = None, + content_type: ContentType | None = None, + copy_source_if_match: CopySourceIfMatch | None = None, + copy_source_if_modified_since: CopySourceIfModifiedSince | None = None, + copy_source_if_none_match: CopySourceIfNoneMatch | None = None, + copy_source_if_unmodified_since: CopySourceIfUnmodifiedSince | None = None, + expires: Expires | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write_acp: GrantWriteACP | None = None, + metadata: Metadata | None = None, + metadata_directive: MetadataDirective | None = None, + tagging_directive: TaggingDirective | None = None, + server_side_encryption: ServerSideEncryption | None = None, + storage_class: StorageClass | None = None, + website_redirect_location: WebsiteRedirectLocation | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + ssekms_key_id: SSEKMSKeyId | None = None, + ssekms_encryption_context: SSEKMSEncryptionContext | None = None, + bucket_key_enabled: BucketKeyEnabled | None = None, + copy_source_sse_customer_algorithm: CopySourceSSECustomerAlgorithm | None = None, + copy_source_sse_customer_key: CopySourceSSECustomerKey | None = None, + copy_source_sse_customer_key_md5: CopySourceSSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + tagging: TaggingHeader | None = None, + object_lock_mode: ObjectLockMode | None = None, + object_lock_retain_until_date: ObjectLockRetainUntilDate | None = None, + object_lock_legal_hold_status: ObjectLockLegalHoldStatus | None = None, + expected_bucket_owner: AccountId | None = None, + expected_source_bucket_owner: AccountId | None = None, + **kwargs, + ) -> CopyObjectOutput: + raise NotImplementedError + + @handler("CreateBucket") + def create_bucket( + self, + context: RequestContext, + bucket: BucketName, + acl: BucketCannedACL | None = None, + create_bucket_configuration: CreateBucketConfiguration | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write: GrantWrite | None = None, + grant_write_acp: GrantWriteACP | None = None, + object_lock_enabled_for_bucket: ObjectLockEnabledForBucket | None = None, + object_ownership: ObjectOwnership | None = None, + **kwargs, + ) -> CreateBucketOutput: + raise NotImplementedError + + @handler("CreateBucketMetadataTableConfiguration") + def create_bucket_metadata_table_configuration( + self, + context: RequestContext, + bucket: BucketName, + metadata_table_configuration: MetadataTableConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateMultipartUpload") + def create_multipart_upload( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + acl: ObjectCannedACL | None = None, + cache_control: CacheControl | None = None, + content_disposition: ContentDisposition | None = None, + content_encoding: ContentEncoding | None = None, + content_language: ContentLanguage | None = None, + content_type: ContentType | None = None, + expires: Expires | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write_acp: GrantWriteACP | None = None, + metadata: Metadata | None = None, + server_side_encryption: ServerSideEncryption | None = None, + storage_class: StorageClass | None = None, + website_redirect_location: WebsiteRedirectLocation | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + ssekms_key_id: SSEKMSKeyId | None = None, + ssekms_encryption_context: SSEKMSEncryptionContext | None = None, + bucket_key_enabled: BucketKeyEnabled | None = None, + request_payer: RequestPayer | None = None, + tagging: TaggingHeader | None = None, + object_lock_mode: ObjectLockMode | None = None, + object_lock_retain_until_date: ObjectLockRetainUntilDate | None = None, + object_lock_legal_hold_status: ObjectLockLegalHoldStatus | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + checksum_type: ChecksumType | None = None, + **kwargs, + ) -> CreateMultipartUploadOutput: + raise NotImplementedError + + @handler("CreateSession") + def create_session( + self, + context: RequestContext, + bucket: BucketName, + session_mode: SessionMode | None = None, + server_side_encryption: ServerSideEncryption | None = None, + ssekms_key_id: SSEKMSKeyId | None = None, + ssekms_encryption_context: SSEKMSEncryptionContext | None = None, + bucket_key_enabled: BucketKeyEnabled | None = None, + **kwargs, + ) -> CreateSessionOutput: + raise NotImplementedError + + @handler("DeleteBucket") + def delete_bucket( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketAnalyticsConfiguration") + def delete_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketCors") + def delete_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketEncryption") + def delete_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketIntelligentTieringConfiguration") + def delete_bucket_intelligent_tiering_configuration( + self, context: RequestContext, bucket: BucketName, id: IntelligentTieringId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketInventoryConfiguration") + def delete_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketLifecycle") + def delete_bucket_lifecycle( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketMetadataTableConfiguration") + def delete_bucket_metadata_table_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketMetricsConfiguration") + def delete_bucket_metrics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: MetricsId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketOwnershipControls") + def delete_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketPolicy") + def delete_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketReplication") + def delete_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketTagging") + def delete_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketWebsite") + def delete_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteObject") + def delete_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + mfa: MFA | None = None, + version_id: ObjectVersionId | None = None, + request_payer: RequestPayer | None = None, + bypass_governance_retention: BypassGovernanceRetention | None = None, + expected_bucket_owner: AccountId | None = None, + if_match: IfMatch | None = None, + if_match_last_modified_time: IfMatchLastModifiedTime | None = None, + if_match_size: IfMatchSize | None = None, + **kwargs, + ) -> DeleteObjectOutput: + raise NotImplementedError + + @handler("DeleteObjectTagging") + def delete_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> DeleteObjectTaggingOutput: + raise NotImplementedError + + @handler("DeleteObjects") + def delete_objects( + self, + context: RequestContext, + bucket: BucketName, + delete: Delete, + mfa: MFA | None = None, + request_payer: RequestPayer | None = None, + bypass_governance_retention: BypassGovernanceRetention | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + **kwargs, + ) -> DeleteObjectsOutput: + raise NotImplementedError + + @handler("DeletePublicAccessBlock") + def delete_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("GetBucketAccelerateConfiguration") + def get_bucket_accelerate_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + request_payer: RequestPayer | None = None, + **kwargs, + ) -> GetBucketAccelerateConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketAcl") + def get_bucket_acl( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketAclOutput: + raise NotImplementedError + + @handler("GetBucketAnalyticsConfiguration") + def get_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketAnalyticsConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketCors") + def get_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketCorsOutput: + raise NotImplementedError + + @handler("GetBucketEncryption") + def get_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketEncryptionOutput: + raise NotImplementedError + + @handler("GetBucketIntelligentTieringConfiguration") + def get_bucket_intelligent_tiering_configuration( + self, context: RequestContext, bucket: BucketName, id: IntelligentTieringId, **kwargs + ) -> GetBucketIntelligentTieringConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketInventoryConfiguration") + def get_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketInventoryConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketLifecycle") + def get_bucket_lifecycle( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketLifecycleOutput: + raise NotImplementedError + + @handler("GetBucketLifecycleConfiguration") + def get_bucket_lifecycle_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketLifecycleConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketLocation") + def get_bucket_location( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketLocationOutput: + raise NotImplementedError + + @handler("GetBucketLogging") + def get_bucket_logging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketLoggingOutput: + raise NotImplementedError + + @handler("GetBucketMetadataTableConfiguration") + def get_bucket_metadata_table_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketMetadataTableConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketMetricsConfiguration") + def get_bucket_metrics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: MetricsId, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketMetricsConfigurationOutput: + raise NotImplementedError + + @handler("GetBucketNotification") + def get_bucket_notification( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> NotificationConfigurationDeprecated: + raise NotImplementedError + + @handler("GetBucketNotificationConfiguration") + def get_bucket_notification_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> NotificationConfiguration: + raise NotImplementedError + + @handler("GetBucketOwnershipControls") + def get_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketOwnershipControlsOutput: + raise NotImplementedError + + @handler("GetBucketPolicy") + def get_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketPolicyOutput: + raise NotImplementedError + + @handler("GetBucketPolicyStatus") + def get_bucket_policy_status( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketPolicyStatusOutput: + raise NotImplementedError + + @handler("GetBucketReplication") + def get_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketReplicationOutput: + raise NotImplementedError + + @handler("GetBucketRequestPayment") + def get_bucket_request_payment( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketRequestPaymentOutput: + raise NotImplementedError + + @handler("GetBucketTagging") + def get_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketTaggingOutput: + raise NotImplementedError + + @handler("GetBucketVersioning") + def get_bucket_versioning( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketVersioningOutput: + raise NotImplementedError + + @handler("GetBucketWebsite") + def get_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetBucketWebsiteOutput: + raise NotImplementedError + + @handler("GetObject") + def get_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + if_match: IfMatch | None = None, + if_modified_since: IfModifiedSince | None = None, + if_none_match: IfNoneMatch | None = None, + if_unmodified_since: IfUnmodifiedSince | None = None, + range: Range | None = None, + response_cache_control: ResponseCacheControl | None = None, + response_content_disposition: ResponseContentDisposition | None = None, + response_content_encoding: ResponseContentEncoding | None = None, + response_content_language: ResponseContentLanguage | None = None, + response_content_type: ResponseContentType | None = None, + response_expires: ResponseExpires | None = None, + version_id: ObjectVersionId | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + part_number: PartNumber | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_mode: ChecksumMode | None = None, + **kwargs, + ) -> GetObjectOutput: + raise NotImplementedError + + @handler("GetObjectAcl") + def get_object_acl( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectAclOutput: + raise NotImplementedError + + @handler("GetObjectAttributes") + def get_object_attributes( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + object_attributes: ObjectAttributesList, + version_id: ObjectVersionId | None = None, + max_parts: MaxParts | None = None, + part_number_marker: PartNumberMarker | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectAttributesOutput: + raise NotImplementedError + + @handler("GetObjectLegalHold") + def get_object_legal_hold( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectLegalHoldOutput: + raise NotImplementedError + + @handler("GetObjectLockConfiguration") + def get_object_lock_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectLockConfigurationOutput: + raise NotImplementedError + + @handler("GetObjectRetention") + def get_object_retention( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectRetentionOutput: + raise NotImplementedError + + @handler("GetObjectTagging") + def get_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + expected_bucket_owner: AccountId | None = None, + request_payer: RequestPayer | None = None, + **kwargs, + ) -> GetObjectTaggingOutput: + raise NotImplementedError + + @handler("GetObjectTorrent") + def get_object_torrent( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetObjectTorrentOutput: + raise NotImplementedError + + @handler("GetPublicAccessBlock") + def get_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> GetPublicAccessBlockOutput: + raise NotImplementedError + + @handler("HeadBucket") + def head_bucket( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> HeadBucketOutput: + raise NotImplementedError + + @handler("HeadObject") + def head_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + if_match: IfMatch | None = None, + if_modified_since: IfModifiedSince | None = None, + if_none_match: IfNoneMatch | None = None, + if_unmodified_since: IfUnmodifiedSince | None = None, + range: Range | None = None, + response_cache_control: ResponseCacheControl | None = None, + response_content_disposition: ResponseContentDisposition | None = None, + response_content_encoding: ResponseContentEncoding | None = None, + response_content_language: ResponseContentLanguage | None = None, + response_content_type: ResponseContentType | None = None, + response_expires: ResponseExpires | None = None, + version_id: ObjectVersionId | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + part_number: PartNumber | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_mode: ChecksumMode | None = None, + **kwargs, + ) -> HeadObjectOutput: + raise NotImplementedError + + @handler("ListBucketAnalyticsConfigurations") + def list_bucket_analytics_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> ListBucketAnalyticsConfigurationsOutput: + raise NotImplementedError + + @handler("ListBucketIntelligentTieringConfigurations") + def list_bucket_intelligent_tiering_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token | None = None, + **kwargs, + ) -> ListBucketIntelligentTieringConfigurationsOutput: + raise NotImplementedError + + @handler("ListBucketInventoryConfigurations") + def list_bucket_inventory_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> ListBucketInventoryConfigurationsOutput: + raise NotImplementedError + + @handler("ListBucketMetricsConfigurations") + def list_bucket_metrics_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> ListBucketMetricsConfigurationsOutput: + raise NotImplementedError + + @handler("ListBuckets") + def list_buckets( + self, + context: RequestContext, + max_buckets: MaxBuckets | None = None, + continuation_token: Token | None = None, + prefix: Prefix | None = None, + bucket_region: BucketRegion | None = None, + **kwargs, + ) -> ListBucketsOutput: + raise NotImplementedError + + @handler("ListDirectoryBuckets") + def list_directory_buckets( + self, + context: RequestContext, + continuation_token: DirectoryBucketToken | None = None, + max_directory_buckets: MaxDirectoryBuckets | None = None, + **kwargs, + ) -> ListDirectoryBucketsOutput: + raise NotImplementedError + + @handler("ListMultipartUploads") + def list_multipart_uploads( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter | None = None, + encoding_type: EncodingType | None = None, + key_marker: KeyMarker | None = None, + max_uploads: MaxUploads | None = None, + prefix: Prefix | None = None, + upload_id_marker: UploadIdMarker | None = None, + expected_bucket_owner: AccountId | None = None, + request_payer: RequestPayer | None = None, + **kwargs, + ) -> ListMultipartUploadsOutput: + raise NotImplementedError + + @handler("ListObjectVersions") + def list_object_versions( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter | None = None, + encoding_type: EncodingType | None = None, + key_marker: KeyMarker | None = None, + max_keys: MaxKeys | None = None, + prefix: Prefix | None = None, + version_id_marker: VersionIdMarker | None = None, + expected_bucket_owner: AccountId | None = None, + request_payer: RequestPayer | None = None, + optional_object_attributes: OptionalObjectAttributesList | None = None, + **kwargs, + ) -> ListObjectVersionsOutput: + raise NotImplementedError + + @handler("ListObjects") + def list_objects( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter | None = None, + encoding_type: EncodingType | None = None, + marker: Marker | None = None, + max_keys: MaxKeys | None = None, + prefix: Prefix | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + optional_object_attributes: OptionalObjectAttributesList | None = None, + **kwargs, + ) -> ListObjectsOutput: + raise NotImplementedError + + @handler("ListObjectsV2") + def list_objects_v2( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter | None = None, + encoding_type: EncodingType | None = None, + max_keys: MaxKeys | None = None, + prefix: Prefix | None = None, + continuation_token: Token | None = None, + fetch_owner: FetchOwner | None = None, + start_after: StartAfter | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + optional_object_attributes: OptionalObjectAttributesList | None = None, + **kwargs, + ) -> ListObjectsV2Output: + raise NotImplementedError + + @handler("ListParts") + def list_parts( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + max_parts: MaxParts | None = None, + part_number_marker: PartNumberMarker | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + **kwargs, + ) -> ListPartsOutput: + raise NotImplementedError + + @handler("PutBucketAccelerateConfiguration") + def put_bucket_accelerate_configuration( + self, + context: RequestContext, + bucket: BucketName, + accelerate_configuration: AccelerateConfiguration, + expected_bucket_owner: AccountId | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketAcl") + def put_bucket_acl( + self, + context: RequestContext, + bucket: BucketName, + acl: BucketCannedACL | None = None, + access_control_policy: AccessControlPolicy | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write: GrantWrite | None = None, + grant_write_acp: GrantWriteACP | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketAnalyticsConfiguration") + def put_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + analytics_configuration: AnalyticsConfiguration, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketCors") + def put_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + cors_configuration: CORSConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketEncryption") + def put_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + server_side_encryption_configuration: ServerSideEncryptionConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketIntelligentTieringConfiguration") + def put_bucket_intelligent_tiering_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: IntelligentTieringId, + intelligent_tiering_configuration: IntelligentTieringConfiguration, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketInventoryConfiguration") + def put_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + inventory_configuration: InventoryConfiguration, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketLifecycle") + def put_bucket_lifecycle( + self, + context: RequestContext, + bucket: BucketName, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + lifecycle_configuration: LifecycleConfiguration | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketLifecycleConfiguration") + def put_bucket_lifecycle_configuration( + self, + context: RequestContext, + bucket: BucketName, + checksum_algorithm: ChecksumAlgorithm | None = None, + lifecycle_configuration: BucketLifecycleConfiguration | None = None, + expected_bucket_owner: AccountId | None = None, + transition_default_minimum_object_size: TransitionDefaultMinimumObjectSize | None = None, + **kwargs, + ) -> PutBucketLifecycleConfigurationOutput: + raise NotImplementedError + + @handler("PutBucketLogging") + def put_bucket_logging( + self, + context: RequestContext, + bucket: BucketName, + bucket_logging_status: BucketLoggingStatus, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketMetricsConfiguration") + def put_bucket_metrics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: MetricsId, + metrics_configuration: MetricsConfiguration, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketNotification") + def put_bucket_notification( + self, + context: RequestContext, + bucket: BucketName, + notification_configuration: NotificationConfigurationDeprecated, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketNotificationConfiguration") + def put_bucket_notification_configuration( + self, + context: RequestContext, + bucket: BucketName, + notification_configuration: NotificationConfiguration, + expected_bucket_owner: AccountId | None = None, + skip_destination_validation: SkipValidation | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketOwnershipControls") + def put_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + ownership_controls: OwnershipControls, + content_md5: ContentMD5 | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketPolicy") + def put_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + policy: Policy, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + confirm_remove_self_bucket_access: ConfirmRemoveSelfBucketAccess | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketReplication") + def put_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + replication_configuration: ReplicationConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + token: ObjectLockToken | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketRequestPayment") + def put_bucket_request_payment( + self, + context: RequestContext, + bucket: BucketName, + request_payment_configuration: RequestPaymentConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketTagging") + def put_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + tagging: Tagging, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketVersioning") + def put_bucket_versioning( + self, + context: RequestContext, + bucket: BucketName, + versioning_configuration: VersioningConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + mfa: MFA | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketWebsite") + def put_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + website_configuration: WebsiteConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutObject") + def put_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + acl: ObjectCannedACL | None = None, + body: IO[Body] | None = None, + cache_control: CacheControl | None = None, + content_disposition: ContentDisposition | None = None, + content_encoding: ContentEncoding | None = None, + content_language: ContentLanguage | None = None, + content_length: ContentLength | None = None, + content_md5: ContentMD5 | None = None, + content_type: ContentType | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + checksum_crc32: ChecksumCRC32 | None = None, + checksum_crc32_c: ChecksumCRC32C | None = None, + checksum_crc64_nvme: ChecksumCRC64NVME | None = None, + checksum_sha1: ChecksumSHA1 | None = None, + checksum_sha256: ChecksumSHA256 | None = None, + expires: Expires | None = None, + if_match: IfMatch | None = None, + if_none_match: IfNoneMatch | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write_acp: GrantWriteACP | None = None, + write_offset_bytes: WriteOffsetBytes | None = None, + metadata: Metadata | None = None, + server_side_encryption: ServerSideEncryption | None = None, + storage_class: StorageClass | None = None, + website_redirect_location: WebsiteRedirectLocation | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + ssekms_key_id: SSEKMSKeyId | None = None, + ssekms_encryption_context: SSEKMSEncryptionContext | None = None, + bucket_key_enabled: BucketKeyEnabled | None = None, + request_payer: RequestPayer | None = None, + tagging: TaggingHeader | None = None, + object_lock_mode: ObjectLockMode | None = None, + object_lock_retain_until_date: ObjectLockRetainUntilDate | None = None, + object_lock_legal_hold_status: ObjectLockLegalHoldStatus | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> PutObjectOutput: + raise NotImplementedError + + @handler("PutObjectAcl") + def put_object_acl( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + acl: ObjectCannedACL | None = None, + access_control_policy: AccessControlPolicy | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write: GrantWrite | None = None, + grant_write_acp: GrantWriteACP | None = None, + request_payer: RequestPayer | None = None, + version_id: ObjectVersionId | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> PutObjectAclOutput: + raise NotImplementedError + + @handler("PutObjectLegalHold") + def put_object_legal_hold( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + legal_hold: ObjectLockLegalHold | None = None, + request_payer: RequestPayer | None = None, + version_id: ObjectVersionId | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> PutObjectLegalHoldOutput: + raise NotImplementedError + + @handler("PutObjectLockConfiguration") + def put_object_lock_configuration( + self, + context: RequestContext, + bucket: BucketName, + object_lock_configuration: ObjectLockConfiguration | None = None, + request_payer: RequestPayer | None = None, + token: ObjectLockToken | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> PutObjectLockConfigurationOutput: + raise NotImplementedError + + @handler("PutObjectRetention") + def put_object_retention( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + retention: ObjectLockRetention | None = None, + request_payer: RequestPayer | None = None, + version_id: ObjectVersionId | None = None, + bypass_governance_retention: BypassGovernanceRetention | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> PutObjectRetentionOutput: + raise NotImplementedError + + @handler("PutObjectTagging") + def put_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + tagging: Tagging, + version_id: ObjectVersionId | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + request_payer: RequestPayer | None = None, + **kwargs, + ) -> PutObjectTaggingOutput: + raise NotImplementedError + + @handler("PutPublicAccessBlock") + def put_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + public_access_block_configuration: PublicAccessBlockConfiguration, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RestoreObject") + def restore_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId | None = None, + restore_request: RestoreRequest | None = None, + request_payer: RequestPayer | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> RestoreObjectOutput: + raise NotImplementedError + + @handler("SelectObjectContent") + def select_object_content( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + expression: Expression, + expression_type: ExpressionType, + input_serialization: InputSerialization, + output_serialization: OutputSerialization, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + request_progress: RequestProgress | None = None, + scan_range: ScanRange | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> SelectObjectContentOutput: + raise NotImplementedError + + @handler("UploadPart") + def upload_part( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + part_number: PartNumber, + upload_id: MultipartUploadId, + body: IO[Body] | None = None, + content_length: ContentLength | None = None, + content_md5: ContentMD5 | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + checksum_crc32: ChecksumCRC32 | None = None, + checksum_crc32_c: ChecksumCRC32C | None = None, + checksum_crc64_nvme: ChecksumCRC64NVME | None = None, + checksum_sha1: ChecksumSHA1 | None = None, + checksum_sha256: ChecksumSHA256 | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + **kwargs, + ) -> UploadPartOutput: + raise NotImplementedError + + @handler("UploadPartCopy") + def upload_part_copy( + self, + context: RequestContext, + bucket: BucketName, + copy_source: CopySource, + key: ObjectKey, + part_number: PartNumber, + upload_id: MultipartUploadId, + copy_source_if_match: CopySourceIfMatch | None = None, + copy_source_if_modified_since: CopySourceIfModifiedSince | None = None, + copy_source_if_none_match: CopySourceIfNoneMatch | None = None, + copy_source_if_unmodified_since: CopySourceIfUnmodifiedSince | None = None, + copy_source_range: CopySourceRange | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + sse_customer_key: SSECustomerKey | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + copy_source_sse_customer_algorithm: CopySourceSSECustomerAlgorithm | None = None, + copy_source_sse_customer_key: CopySourceSSECustomerKey | None = None, + copy_source_sse_customer_key_md5: CopySourceSSECustomerKeyMD5 | None = None, + request_payer: RequestPayer | None = None, + expected_bucket_owner: AccountId | None = None, + expected_source_bucket_owner: AccountId | None = None, + **kwargs, + ) -> UploadPartCopyOutput: + raise NotImplementedError + + @handler("WriteGetObjectResponse") + def write_get_object_response( + self, + context: RequestContext, + request_route: RequestRoute, + request_token: RequestToken, + body: IO[Body] | None = None, + status_code: GetObjectResponseStatusCode | None = None, + error_code: ErrorCode | None = None, + error_message: ErrorMessage | None = None, + accept_ranges: AcceptRanges | None = None, + cache_control: CacheControl | None = None, + content_disposition: ContentDisposition | None = None, + content_encoding: ContentEncoding | None = None, + content_language: ContentLanguage | None = None, + content_length: ContentLength | None = None, + content_range: ContentRange | None = None, + content_type: ContentType | None = None, + checksum_crc32: ChecksumCRC32 | None = None, + checksum_crc32_c: ChecksumCRC32C | None = None, + checksum_crc64_nvme: ChecksumCRC64NVME | None = None, + checksum_sha1: ChecksumSHA1 | None = None, + checksum_sha256: ChecksumSHA256 | None = None, + delete_marker: DeleteMarker | None = None, + e_tag: ETag | None = None, + expires: Expires | None = None, + expiration: Expiration | None = None, + last_modified: LastModified | None = None, + missing_meta: MissingMeta | None = None, + metadata: Metadata | None = None, + object_lock_mode: ObjectLockMode | None = None, + object_lock_legal_hold_status: ObjectLockLegalHoldStatus | None = None, + object_lock_retain_until_date: ObjectLockRetainUntilDate | None = None, + parts_count: PartsCount | None = None, + replication_status: ReplicationStatus | None = None, + request_charged: RequestCharged | None = None, + restore: Restore | None = None, + server_side_encryption: ServerSideEncryption | None = None, + sse_customer_algorithm: SSECustomerAlgorithm | None = None, + ssekms_key_id: SSEKMSKeyId | None = None, + sse_customer_key_md5: SSECustomerKeyMD5 | None = None, + storage_class: StorageClass | None = None, + tag_count: TagCount | None = None, + version_id: ObjectVersionId | None = None, + bucket_key_enabled: BucketKeyEnabled | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PostObject") + def post_object( + self, context: RequestContext, bucket: BucketName, body: IO[Body] | None = None, **kwargs + ) -> PostResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/s3control/__init__.py b/localstack-core/localstack/aws/api/s3control/__init__.py new file mode 100644 index 0000000000000..2f8768c4892c1 --- /dev/null +++ b/localstack-core/localstack/aws/api/s3control/__init__.py @@ -0,0 +1,3267 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccessGrantArn = str +AccessGrantId = str +AccessGrantsInstanceArn = str +AccessGrantsInstanceId = str +AccessGrantsLocationArn = str +AccessGrantsLocationId = str +AccessKeyId = str +AccessPointName = str +AccountId = str +Alias = str +AsyncRequestStatus = str +AsyncRequestTokenARN = str +AwsLambdaTransformationPayload = str +AwsOrgArn = str +Boolean = bool +BucketIdentifierString = str +BucketName = str +ConfigId = str +ConfirmRemoveSelfBucketAccess = bool +ConfirmationRequired = bool +ContinuationToken = str +Days = int +DaysAfterInitiation = int +DurationSeconds = int +ExceptionMessage = str +ExpiredObjectDeleteMarker = bool +FunctionArnString = str +GrantFullControl = str +GrantRead = str +GrantReadACP = str +GrantWrite = str +GrantWriteACP = str +GranteeIdentifier = str +IAMRoleArn = str +ID = str +IdentityCenterApplicationArn = str +IdentityCenterArn = str +IsEnabled = bool +IsPublic = bool +JobArn = str +JobFailureCode = str +JobFailureReason = str +JobId = str +JobPriority = int +JobStatusUpdateReason = str +KmsKeyArnString = str +Location = str +MFA = str +ManifestPrefixString = str +MaxLength1024String = str +MaxResults = int +MinStorageBytesPercentage = float +Minutes = int +MultiRegionAccessPointAlias = str +MultiRegionAccessPointClientToken = str +MultiRegionAccessPointId = str +MultiRegionAccessPointName = str +NoSuchPublicAccessBlockConfigurationMessage = str +NonEmptyMaxLength1024String = str +NonEmptyMaxLength2048String = str +NonEmptyMaxLength256String = str +NonEmptyMaxLength64String = str +NoncurrentVersionCount = int +ObjectAgeValue = int +ObjectLambdaAccessPointAliasValue = str +ObjectLambdaAccessPointArn = str +ObjectLambdaAccessPointName = str +ObjectLambdaPolicy = str +ObjectLambdaSupportingAccessPointArn = str +ObjectLockEnabledForBucket = bool +Organization = str +Policy = str +PolicyDocument = str +Prefix = str +Priority = int +PublicAccessBlockEnabled = bool +RegionName = str +ReplicaKmsKeyID = str +ReportPrefixString = str +Role = str +S3AWSRegion = str +S3AccessPointArn = str +S3BucketArnString = str +S3ExpirationInDays = int +S3KeyArnString = str +S3ObjectVersionId = str +S3Prefix = str +S3RegionalBucketArn = str +S3RegionalOrS3ExpressBucketArnString = str +S3ResourceArn = str +SSEKMSKeyId = str +SecretAccessKey = str +SessionToken = str +Setting = bool +StorageLensArn = str +StorageLensGroupArn = str +StorageLensGroupName = str +StorageLensPrefixLevelDelimiter = str +StorageLensPrefixLevelMaxDepth = int +StringForNextToken = str +Suffix = str +SuspendedCause = str +TagKeyString = str +TagValueString = str +TrafficDialPercentage = int +VpcId = str + + +class AsyncOperationName(StrEnum): + CreateMultiRegionAccessPoint = "CreateMultiRegionAccessPoint" + DeleteMultiRegionAccessPoint = "DeleteMultiRegionAccessPoint" + PutMultiRegionAccessPointPolicy = "PutMultiRegionAccessPointPolicy" + + +class BucketCannedACL(StrEnum): + private = "private" + public_read = "public-read" + public_read_write = "public-read-write" + authenticated_read = "authenticated-read" + + +class BucketLocationConstraint(StrEnum): + EU = "EU" + eu_west_1 = "eu-west-1" + us_west_1 = "us-west-1" + us_west_2 = "us-west-2" + ap_south_1 = "ap-south-1" + ap_southeast_1 = "ap-southeast-1" + ap_southeast_2 = "ap-southeast-2" + ap_northeast_1 = "ap-northeast-1" + sa_east_1 = "sa-east-1" + cn_north_1 = "cn-north-1" + eu_central_1 = "eu-central-1" + + +class BucketVersioningStatus(StrEnum): + Enabled = "Enabled" + Suspended = "Suspended" + + +class DeleteMarkerReplicationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ExistingObjectReplicationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ExpirationStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class Format(StrEnum): + CSV = "CSV" + Parquet = "Parquet" + + +class GeneratedManifestFormat(StrEnum): + S3InventoryReport_CSV_20211130 = "S3InventoryReport_CSV_20211130" + + +class GranteeType(StrEnum): + DIRECTORY_USER = "DIRECTORY_USER" + DIRECTORY_GROUP = "DIRECTORY_GROUP" + IAM = "IAM" + + +class JobManifestFieldName(StrEnum): + Ignore = "Ignore" + Bucket = "Bucket" + Key = "Key" + VersionId = "VersionId" + + +class JobManifestFormat(StrEnum): + S3BatchOperations_CSV_20180820 = "S3BatchOperations_CSV_20180820" + S3InventoryReport_CSV_20161130 = "S3InventoryReport_CSV_20161130" + + +class JobReportFormat(StrEnum): + Report_CSV_20180820 = "Report_CSV_20180820" + + +class JobReportScope(StrEnum): + AllTasks = "AllTasks" + FailedTasksOnly = "FailedTasksOnly" + + +class JobStatus(StrEnum): + Active = "Active" + Cancelled = "Cancelled" + Cancelling = "Cancelling" + Complete = "Complete" + Completing = "Completing" + Failed = "Failed" + Failing = "Failing" + New = "New" + Paused = "Paused" + Pausing = "Pausing" + Preparing = "Preparing" + Ready = "Ready" + Suspended = "Suspended" + + +class MFADelete(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class MFADeleteStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class MetricsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class MultiRegionAccessPointStatus(StrEnum): + READY = "READY" + INCONSISTENT_ACROSS_REGIONS = "INCONSISTENT_ACROSS_REGIONS" + CREATING = "CREATING" + PARTIALLY_CREATED = "PARTIALLY_CREATED" + PARTIALLY_DELETED = "PARTIALLY_DELETED" + DELETING = "DELETING" + + +class NetworkOrigin(StrEnum): + Internet = "Internet" + VPC = "VPC" + + +class ObjectLambdaAccessPointAliasStatus(StrEnum): + PROVISIONING = "PROVISIONING" + READY = "READY" + + +class ObjectLambdaAllowedFeature(StrEnum): + GetObject_Range = "GetObject-Range" + GetObject_PartNumber = "GetObject-PartNumber" + HeadObject_Range = "HeadObject-Range" + HeadObject_PartNumber = "HeadObject-PartNumber" + + +class ObjectLambdaTransformationConfigurationAction(StrEnum): + GetObject = "GetObject" + HeadObject = "HeadObject" + ListObjects = "ListObjects" + ListObjectsV2 = "ListObjectsV2" + + +class OperationName(StrEnum): + LambdaInvoke = "LambdaInvoke" + S3PutObjectCopy = "S3PutObjectCopy" + S3PutObjectAcl = "S3PutObjectAcl" + S3PutObjectTagging = "S3PutObjectTagging" + S3DeleteObjectTagging = "S3DeleteObjectTagging" + S3InitiateRestoreObject = "S3InitiateRestoreObject" + S3PutObjectLegalHold = "S3PutObjectLegalHold" + S3PutObjectRetention = "S3PutObjectRetention" + S3ReplicateObject = "S3ReplicateObject" + + +class OutputSchemaVersion(StrEnum): + V_1 = "V_1" + + +class OwnerOverride(StrEnum): + Destination = "Destination" + + +class Permission(StrEnum): + READ = "READ" + WRITE = "WRITE" + READWRITE = "READWRITE" + + +class Privilege(StrEnum): + Minimal = "Minimal" + Default = "Default" + + +class ReplicaModificationsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ReplicationRuleStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class ReplicationStatus(StrEnum): + COMPLETED = "COMPLETED" + FAILED = "FAILED" + REPLICA = "REPLICA" + NONE = "NONE" + + +class ReplicationStorageClass(StrEnum): + STANDARD = "STANDARD" + REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + GLACIER = "GLACIER" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + OUTPOSTS = "OUTPOSTS" + GLACIER_IR = "GLACIER_IR" + + +class ReplicationTimeStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class RequestedJobStatus(StrEnum): + Cancelled = "Cancelled" + Ready = "Ready" + + +class S3CannedAccessControlList(StrEnum): + private = "private" + public_read = "public-read" + public_read_write = "public-read-write" + aws_exec_read = "aws-exec-read" + authenticated_read = "authenticated-read" + bucket_owner_read = "bucket-owner-read" + bucket_owner_full_control = "bucket-owner-full-control" + + +class S3ChecksumAlgorithm(StrEnum): + CRC32 = "CRC32" + CRC32C = "CRC32C" + SHA1 = "SHA1" + SHA256 = "SHA256" + CRC64NVME = "CRC64NVME" + + +class S3GlacierJobTier(StrEnum): + BULK = "BULK" + STANDARD = "STANDARD" + + +class S3GranteeTypeIdentifier(StrEnum): + id = "id" + emailAddress = "emailAddress" + uri = "uri" + + +class S3MetadataDirective(StrEnum): + COPY = "COPY" + REPLACE = "REPLACE" + + +class S3ObjectLockLegalHoldStatus(StrEnum): + OFF = "OFF" + ON = "ON" + + +class S3ObjectLockMode(StrEnum): + COMPLIANCE = "COMPLIANCE" + GOVERNANCE = "GOVERNANCE" + + +class S3ObjectLockRetentionMode(StrEnum): + COMPLIANCE = "COMPLIANCE" + GOVERNANCE = "GOVERNANCE" + + +class S3Permission(StrEnum): + FULL_CONTROL = "FULL_CONTROL" + READ = "READ" + WRITE = "WRITE" + READ_ACP = "READ_ACP" + WRITE_ACP = "WRITE_ACP" + + +class S3PrefixType(StrEnum): + Object = "Object" + + +class S3SSEAlgorithm(StrEnum): + AES256 = "AES256" + KMS = "KMS" + + +class S3StorageClass(StrEnum): + STANDARD = "STANDARD" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + GLACIER = "GLACIER" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + GLACIER_IR = "GLACIER_IR" + + +class ScopePermission(StrEnum): + GetObject = "GetObject" + GetObjectAttributes = "GetObjectAttributes" + ListMultipartUploadParts = "ListMultipartUploadParts" + ListBucket = "ListBucket" + ListBucketMultipartUploads = "ListBucketMultipartUploads" + PutObject = "PutObject" + DeleteObject = "DeleteObject" + AbortMultipartUpload = "AbortMultipartUpload" + + +class SseKmsEncryptedObjectsStatus(StrEnum): + Enabled = "Enabled" + Disabled = "Disabled" + + +class TransitionStorageClass(StrEnum): + GLACIER = "GLACIER" + STANDARD_IA = "STANDARD_IA" + ONEZONE_IA = "ONEZONE_IA" + INTELLIGENT_TIERING = "INTELLIGENT_TIERING" + DEEP_ARCHIVE = "DEEP_ARCHIVE" + + +class BadRequestException(ServiceException): + code: str = "BadRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class BucketAlreadyExists(ServiceException): + code: str = "BucketAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class BucketAlreadyOwnedByYou(ServiceException): + code: str = "BucketAlreadyOwnedByYou" + sender_fault: bool = False + status_code: int = 400 + + +class IdempotencyException(ServiceException): + code: str = "IdempotencyException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServiceException(ServiceException): + code: str = "InternalServiceException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNextTokenException(ServiceException): + code: str = "InvalidNextTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRequestException(ServiceException): + code: str = "InvalidRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class JobStatusException(ServiceException): + code: str = "JobStatusException" + sender_fault: bool = False + status_code: int = 400 + + +class NoSuchPublicAccessBlockConfiguration(ServiceException): + code: str = "NoSuchPublicAccessBlockConfiguration" + sender_fault: bool = False + status_code: int = 404 + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyRequestsException(ServiceException): + code: str = "TooManyRequestsException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTagsException(ServiceException): + code: str = "TooManyTagsException" + sender_fault: bool = False + status_code: int = 400 + + +class AbortIncompleteMultipartUpload(TypedDict, total=False): + DaysAfterInitiation: Optional[DaysAfterInitiation] + + +class AccessControlTranslation(TypedDict, total=False): + Owner: OwnerOverride + + +CreationTimestamp = datetime + + +class ListAccessGrantsInstanceEntry(TypedDict, total=False): + AccessGrantsInstanceId: Optional[AccessGrantsInstanceId] + AccessGrantsInstanceArn: Optional[AccessGrantsInstanceArn] + CreatedAt: Optional[CreationTimestamp] + IdentityCenterArn: Optional[IdentityCenterArn] + IdentityCenterInstanceArn: Optional[IdentityCenterArn] + IdentityCenterApplicationArn: Optional[IdentityCenterApplicationArn] + + +AccessGrantsInstancesList = List[ListAccessGrantsInstanceEntry] + + +class AccessGrantsLocationConfiguration(TypedDict, total=False): + S3SubPrefix: Optional[S3Prefix] + + +class Grantee(TypedDict, total=False): + GranteeType: Optional[GranteeType] + GranteeIdentifier: Optional[GranteeIdentifier] + + +class ListAccessGrantEntry(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantId: Optional[AccessGrantId] + AccessGrantArn: Optional[AccessGrantArn] + Grantee: Optional[Grantee] + Permission: Optional[Permission] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationConfiguration: Optional[AccessGrantsLocationConfiguration] + GrantScope: Optional[S3Prefix] + ApplicationArn: Optional[IdentityCenterApplicationArn] + + +AccessGrantsList = List[ListAccessGrantEntry] + + +class ListAccessGrantsLocationsEntry(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationArn: Optional[AccessGrantsLocationArn] + LocationScope: Optional[S3Prefix] + IAMRoleArn: Optional[IAMRoleArn] + + +AccessGrantsLocationsList = List[ListAccessGrantsLocationsEntry] + + +class VpcConfiguration(TypedDict, total=False): + VpcId: VpcId + + +class AccessPoint(TypedDict, total=False): + Name: AccessPointName + NetworkOrigin: NetworkOrigin + VpcConfiguration: Optional[VpcConfiguration] + Bucket: BucketName + AccessPointArn: Optional[S3AccessPointArn] + Alias: Optional[Alias] + BucketAccountId: Optional[AccountId] + + +AccessPointList = List[AccessPoint] +StorageLensGroupLevelExclude = List[StorageLensGroupArn] +StorageLensGroupLevelInclude = List[StorageLensGroupArn] + + +class StorageLensGroupLevelSelectionCriteria(TypedDict, total=False): + Include: Optional[StorageLensGroupLevelInclude] + Exclude: Optional[StorageLensGroupLevelExclude] + + +class StorageLensGroupLevel(TypedDict, total=False): + SelectionCriteria: Optional[StorageLensGroupLevelSelectionCriteria] + + +class DetailedStatusCodesMetrics(TypedDict, total=False): + IsEnabled: Optional[IsEnabled] + + +class AdvancedDataProtectionMetrics(TypedDict, total=False): + IsEnabled: Optional[IsEnabled] + + +class AdvancedCostOptimizationMetrics(TypedDict, total=False): + IsEnabled: Optional[IsEnabled] + + +class SelectionCriteria(TypedDict, total=False): + Delimiter: Optional[StorageLensPrefixLevelDelimiter] + MaxDepth: Optional[StorageLensPrefixLevelMaxDepth] + MinStorageBytesPercentage: Optional[MinStorageBytesPercentage] + + +class PrefixLevelStorageMetrics(TypedDict, total=False): + IsEnabled: Optional[IsEnabled] + SelectionCriteria: Optional[SelectionCriteria] + + +class PrefixLevel(TypedDict, total=False): + StorageMetrics: PrefixLevelStorageMetrics + + +class ActivityMetrics(TypedDict, total=False): + IsEnabled: Optional[IsEnabled] + + +class BucketLevel(TypedDict, total=False): + ActivityMetrics: Optional[ActivityMetrics] + PrefixLevel: Optional[PrefixLevel] + AdvancedCostOptimizationMetrics: Optional[AdvancedCostOptimizationMetrics] + AdvancedDataProtectionMetrics: Optional[AdvancedDataProtectionMetrics] + DetailedStatusCodesMetrics: Optional[DetailedStatusCodesMetrics] + + +class AccountLevel(TypedDict, total=False): + ActivityMetrics: Optional[ActivityMetrics] + BucketLevel: BucketLevel + AdvancedCostOptimizationMetrics: Optional[AdvancedCostOptimizationMetrics] + AdvancedDataProtectionMetrics: Optional[AdvancedDataProtectionMetrics] + DetailedStatusCodesMetrics: Optional[DetailedStatusCodesMetrics] + StorageLensGroupLevel: Optional[StorageLensGroupLevel] + + +class AssociateAccessGrantsIdentityCenterRequest(ServiceRequest): + AccountId: AccountId + IdentityCenterArn: IdentityCenterArn + + +AsyncCreationTimestamp = datetime + + +class AsyncErrorDetails(TypedDict, total=False): + Code: Optional[MaxLength1024String] + Message: Optional[MaxLength1024String] + Resource: Optional[MaxLength1024String] + RequestId: Optional[MaxLength1024String] + + +class MultiRegionAccessPointRegionalResponse(TypedDict, total=False): + Name: Optional[RegionName] + RequestStatus: Optional[AsyncRequestStatus] + + +MultiRegionAccessPointRegionalResponseList = List[MultiRegionAccessPointRegionalResponse] + + +class MultiRegionAccessPointsAsyncResponse(TypedDict, total=False): + Regions: Optional[MultiRegionAccessPointRegionalResponseList] + + +class AsyncResponseDetails(TypedDict, total=False): + MultiRegionAccessPointDetails: Optional[MultiRegionAccessPointsAsyncResponse] + ErrorDetails: Optional[AsyncErrorDetails] + + +class PutMultiRegionAccessPointPolicyInput(TypedDict, total=False): + Name: MultiRegionAccessPointName + Policy: Policy + + +class DeleteMultiRegionAccessPointInput(TypedDict, total=False): + Name: MultiRegionAccessPointName + + +class Region(TypedDict, total=False): + Bucket: BucketName + BucketAccountId: Optional[AccountId] + + +RegionCreationList = List[Region] + + +class PublicAccessBlockConfiguration(TypedDict, total=False): + BlockPublicAcls: Optional[Setting] + IgnorePublicAcls: Optional[Setting] + BlockPublicPolicy: Optional[Setting] + RestrictPublicBuckets: Optional[Setting] + + +class CreateMultiRegionAccessPointInput(TypedDict, total=False): + Name: MultiRegionAccessPointName + PublicAccessBlock: Optional[PublicAccessBlockConfiguration] + Regions: RegionCreationList + + +class AsyncRequestParameters(TypedDict, total=False): + CreateMultiRegionAccessPointRequest: Optional[CreateMultiRegionAccessPointInput] + DeleteMultiRegionAccessPointRequest: Optional[DeleteMultiRegionAccessPointInput] + PutMultiRegionAccessPointPolicyRequest: Optional[PutMultiRegionAccessPointPolicyInput] + + +class AsyncOperation(TypedDict, total=False): + CreationTime: Optional[AsyncCreationTimestamp] + Operation: Optional[AsyncOperationName] + RequestTokenARN: Optional[AsyncRequestTokenARN] + RequestParameters: Optional[AsyncRequestParameters] + RequestStatus: Optional[AsyncRequestStatus] + ResponseDetails: Optional[AsyncResponseDetails] + + +class AwsLambdaTransformation(TypedDict, total=False): + FunctionArn: FunctionArnString + FunctionPayload: Optional[AwsLambdaTransformationPayload] + + +Buckets = List[S3BucketArnString] + + +class ListCallerAccessGrantsEntry(TypedDict, total=False): + Permission: Optional[Permission] + GrantScope: Optional[S3Prefix] + ApplicationArn: Optional[IdentityCenterApplicationArn] + + +CallerAccessGrantsList = List[ListCallerAccessGrantsEntry] + + +class CloudWatchMetrics(TypedDict, total=False): + IsEnabled: IsEnabled + + +class Tag(TypedDict, total=False): + Key: TagKeyString + Value: TagValueString + + +TagList = List[Tag] + + +class CreateAccessGrantRequest(ServiceRequest): + AccountId: AccountId + AccessGrantsLocationId: AccessGrantsLocationId + AccessGrantsLocationConfiguration: Optional[AccessGrantsLocationConfiguration] + Grantee: Grantee + Permission: Permission + ApplicationArn: Optional[IdentityCenterApplicationArn] + S3PrefixType: Optional[S3PrefixType] + Tags: Optional[TagList] + + +class CreateAccessGrantResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantId: Optional[AccessGrantId] + AccessGrantArn: Optional[AccessGrantArn] + Grantee: Optional[Grantee] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationConfiguration: Optional[AccessGrantsLocationConfiguration] + Permission: Optional[Permission] + ApplicationArn: Optional[IdentityCenterApplicationArn] + GrantScope: Optional[S3Prefix] + + +class CreateAccessGrantsInstanceRequest(ServiceRequest): + AccountId: AccountId + IdentityCenterArn: Optional[IdentityCenterArn] + Tags: Optional[TagList] + + +class CreateAccessGrantsInstanceResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantsInstanceId: Optional[AccessGrantsInstanceId] + AccessGrantsInstanceArn: Optional[AccessGrantsInstanceArn] + IdentityCenterArn: Optional[IdentityCenterArn] + IdentityCenterInstanceArn: Optional[IdentityCenterArn] + IdentityCenterApplicationArn: Optional[IdentityCenterApplicationArn] + + +class CreateAccessGrantsLocationRequest(ServiceRequest): + AccountId: AccountId + LocationScope: S3Prefix + IAMRoleArn: IAMRoleArn + Tags: Optional[TagList] + + +class CreateAccessGrantsLocationResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationArn: Optional[AccessGrantsLocationArn] + LocationScope: Optional[S3Prefix] + IAMRoleArn: Optional[IAMRoleArn] + + +class ObjectLambdaContentTransformation(TypedDict, total=False): + AwsLambda: Optional[AwsLambdaTransformation] + + +ObjectLambdaTransformationConfigurationActionsList = List[ + ObjectLambdaTransformationConfigurationAction +] + + +class ObjectLambdaTransformationConfiguration(TypedDict, total=False): + Actions: ObjectLambdaTransformationConfigurationActionsList + ContentTransformation: ObjectLambdaContentTransformation + + +ObjectLambdaTransformationConfigurationsList = List[ObjectLambdaTransformationConfiguration] +ObjectLambdaAllowedFeaturesList = List[ObjectLambdaAllowedFeature] + + +class ObjectLambdaConfiguration(TypedDict, total=False): + SupportingAccessPoint: ObjectLambdaSupportingAccessPointArn + CloudWatchMetricsEnabled: Optional[Boolean] + AllowedFeatures: Optional[ObjectLambdaAllowedFeaturesList] + TransformationConfigurations: ObjectLambdaTransformationConfigurationsList + + +class CreateAccessPointForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + Configuration: ObjectLambdaConfiguration + + +class ObjectLambdaAccessPointAlias(TypedDict, total=False): + Value: Optional[ObjectLambdaAccessPointAliasValue] + Status: Optional[ObjectLambdaAccessPointAliasStatus] + + +class CreateAccessPointForObjectLambdaResult(TypedDict, total=False): + ObjectLambdaAccessPointArn: Optional[ObjectLambdaAccessPointArn] + Alias: Optional[ObjectLambdaAccessPointAlias] + + +ScopePermissionList = List[ScopePermission] +PrefixesList = List[Prefix] + + +class Scope(TypedDict, total=False): + Prefixes: Optional[PrefixesList] + Permissions: Optional[ScopePermissionList] + + +class CreateAccessPointRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + Bucket: BucketName + VpcConfiguration: Optional[VpcConfiguration] + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + BucketAccountId: Optional[AccountId] + Scope: Optional[Scope] + + +class CreateAccessPointResult(TypedDict, total=False): + AccessPointArn: Optional[S3AccessPointArn] + Alias: Optional[Alias] + + +class CreateBucketConfiguration(TypedDict, total=False): + LocationConstraint: Optional[BucketLocationConstraint] + + +class CreateBucketRequest(ServiceRequest): + ACL: Optional[BucketCannedACL] + Bucket: BucketName + CreateBucketConfiguration: Optional[CreateBucketConfiguration] + GrantFullControl: Optional[GrantFullControl] + GrantRead: Optional[GrantRead] + GrantReadACP: Optional[GrantReadACP] + GrantWrite: Optional[GrantWrite] + GrantWriteACP: Optional[GrantWriteACP] + ObjectLockEnabledForBucket: Optional[ObjectLockEnabledForBucket] + OutpostId: Optional[NonEmptyMaxLength64String] + + +class CreateBucketResult(TypedDict, total=False): + Location: Optional[Location] + BucketArn: Optional[S3RegionalBucketArn] + + +StorageClassList = List[S3StorageClass] +ObjectSizeLessThanBytes = int +ObjectSizeGreaterThanBytes = int +NonEmptyMaxLength1024StringList = List[NonEmptyMaxLength1024String] + + +class KeyNameConstraint(TypedDict, total=False): + MatchAnyPrefix: Optional[NonEmptyMaxLength1024StringList] + MatchAnySuffix: Optional[NonEmptyMaxLength1024StringList] + MatchAnySubstring: Optional[NonEmptyMaxLength1024StringList] + + +ReplicationStatusFilterList = List[ReplicationStatus] +ObjectCreationTime = datetime + + +class JobManifestGeneratorFilter(TypedDict, total=False): + EligibleForReplication: Optional[Boolean] + CreatedAfter: Optional[ObjectCreationTime] + CreatedBefore: Optional[ObjectCreationTime] + ObjectReplicationStatuses: Optional[ReplicationStatusFilterList] + KeyNameConstraint: Optional[KeyNameConstraint] + ObjectSizeGreaterThanBytes: Optional[ObjectSizeGreaterThanBytes] + ObjectSizeLessThanBytes: Optional[ObjectSizeLessThanBytes] + MatchAnyStorageClass: Optional[StorageClassList] + + +class SSEKMSEncryption(TypedDict, total=False): + KeyId: KmsKeyArnString + + +class SSES3Encryption(TypedDict, total=False): + pass + + +class GeneratedManifestEncryption(TypedDict, total=False): + SSES3: Optional[SSES3Encryption] + SSEKMS: Optional[SSEKMSEncryption] + + +class S3ManifestOutputLocation(TypedDict, total=False): + ExpectedManifestBucketOwner: Optional[AccountId] + Bucket: S3BucketArnString + ManifestPrefix: Optional[ManifestPrefixString] + ManifestEncryption: Optional[GeneratedManifestEncryption] + ManifestFormat: GeneratedManifestFormat + + +class S3JobManifestGenerator(TypedDict, total=False): + ExpectedBucketOwner: Optional[AccountId] + SourceBucket: S3BucketArnString + ManifestOutputLocation: Optional[S3ManifestOutputLocation] + Filter: Optional[JobManifestGeneratorFilter] + EnableManifestOutput: Boolean + + +class JobManifestGenerator(TypedDict, total=False): + S3JobManifestGenerator: Optional[S3JobManifestGenerator] + + +class S3Tag(TypedDict, total=False): + Key: TagKeyString + Value: TagValueString + + +S3TagSet = List[S3Tag] + + +class JobManifestLocation(TypedDict, total=False): + ObjectArn: S3KeyArnString + ObjectVersionId: Optional[S3ObjectVersionId] + ETag: NonEmptyMaxLength1024String + + +JobManifestFieldList = List[JobManifestFieldName] + + +class JobManifestSpec(TypedDict, total=False): + Format: JobManifestFormat + Fields: Optional[JobManifestFieldList] + + +class JobManifest(TypedDict, total=False): + Spec: JobManifestSpec + Location: JobManifestLocation + + +class JobReport(TypedDict, total=False): + Bucket: Optional[S3BucketArnString] + Format: Optional[JobReportFormat] + Enabled: Boolean + Prefix: Optional[ReportPrefixString] + ReportScope: Optional[JobReportScope] + + +class S3ReplicateObjectOperation(TypedDict, total=False): + pass + + +TimeStamp = datetime + + +class S3Retention(TypedDict, total=False): + RetainUntilDate: Optional[TimeStamp] + Mode: Optional[S3ObjectLockRetentionMode] + + +class S3SetObjectRetentionOperation(TypedDict, total=False): + BypassGovernanceRetention: Optional[Boolean] + Retention: S3Retention + + +class S3ObjectLockLegalHold(TypedDict, total=False): + Status: S3ObjectLockLegalHoldStatus + + +class S3SetObjectLegalHoldOperation(TypedDict, total=False): + LegalHold: S3ObjectLockLegalHold + + +class S3InitiateRestoreObjectOperation(TypedDict, total=False): + ExpirationInDays: Optional[S3ExpirationInDays] + GlacierJobTier: Optional[S3GlacierJobTier] + + +class S3DeleteObjectTaggingOperation(TypedDict, total=False): + pass + + +class S3SetObjectTaggingOperation(TypedDict, total=False): + TagSet: Optional[S3TagSet] + + +class S3Grantee(TypedDict, total=False): + TypeIdentifier: Optional[S3GranteeTypeIdentifier] + Identifier: Optional[NonEmptyMaxLength1024String] + DisplayName: Optional[NonEmptyMaxLength1024String] + + +class S3Grant(TypedDict, total=False): + Grantee: Optional[S3Grantee] + Permission: Optional[S3Permission] + + +S3GrantList = List[S3Grant] + + +class S3ObjectOwner(TypedDict, total=False): + ID: Optional[NonEmptyMaxLength1024String] + DisplayName: Optional[NonEmptyMaxLength1024String] + + +class S3AccessControlList(TypedDict, total=False): + Owner: S3ObjectOwner + Grants: Optional[S3GrantList] + + +class S3AccessControlPolicy(TypedDict, total=False): + AccessControlList: Optional[S3AccessControlList] + CannedAccessControlList: Optional[S3CannedAccessControlList] + + +class S3SetObjectAclOperation(TypedDict, total=False): + AccessControlPolicy: Optional[S3AccessControlPolicy] + + +S3ContentLength = int +S3UserMetadata = Dict[NonEmptyMaxLength1024String, MaxLength1024String] + + +class S3ObjectMetadata(TypedDict, total=False): + CacheControl: Optional[NonEmptyMaxLength1024String] + ContentDisposition: Optional[NonEmptyMaxLength1024String] + ContentEncoding: Optional[NonEmptyMaxLength1024String] + ContentLanguage: Optional[NonEmptyMaxLength1024String] + UserMetadata: Optional[S3UserMetadata] + ContentLength: Optional[S3ContentLength] + ContentMD5: Optional[NonEmptyMaxLength1024String] + ContentType: Optional[NonEmptyMaxLength1024String] + HttpExpiresDate: Optional[TimeStamp] + RequesterCharged: Optional[Boolean] + SSEAlgorithm: Optional[S3SSEAlgorithm] + + +class S3CopyObjectOperation(TypedDict, total=False): + TargetResource: Optional[S3RegionalOrS3ExpressBucketArnString] + CannedAccessControlList: Optional[S3CannedAccessControlList] + AccessControlGrants: Optional[S3GrantList] + MetadataDirective: Optional[S3MetadataDirective] + ModifiedSinceConstraint: Optional[TimeStamp] + NewObjectMetadata: Optional[S3ObjectMetadata] + NewObjectTagging: Optional[S3TagSet] + RedirectLocation: Optional[NonEmptyMaxLength2048String] + RequesterPays: Optional[Boolean] + StorageClass: Optional[S3StorageClass] + UnModifiedSinceConstraint: Optional[TimeStamp] + SSEAwsKmsKeyId: Optional[KmsKeyArnString] + TargetKeyPrefix: Optional[NonEmptyMaxLength1024String] + ObjectLockLegalHoldStatus: Optional[S3ObjectLockLegalHoldStatus] + ObjectLockMode: Optional[S3ObjectLockMode] + ObjectLockRetainUntilDate: Optional[TimeStamp] + BucketKeyEnabled: Optional[Boolean] + ChecksumAlgorithm: Optional[S3ChecksumAlgorithm] + + +UserArguments = Dict[NonEmptyMaxLength64String, MaxLength1024String] + + +class LambdaInvokeOperation(TypedDict, total=False): + FunctionArn: Optional[FunctionArnString] + InvocationSchemaVersion: Optional[NonEmptyMaxLength64String] + UserArguments: Optional[UserArguments] + + +class JobOperation(TypedDict, total=False): + LambdaInvoke: Optional[LambdaInvokeOperation] + S3PutObjectCopy: Optional[S3CopyObjectOperation] + S3PutObjectAcl: Optional[S3SetObjectAclOperation] + S3PutObjectTagging: Optional[S3SetObjectTaggingOperation] + S3DeleteObjectTagging: Optional[S3DeleteObjectTaggingOperation] + S3InitiateRestoreObject: Optional[S3InitiateRestoreObjectOperation] + S3PutObjectLegalHold: Optional[S3SetObjectLegalHoldOperation] + S3PutObjectRetention: Optional[S3SetObjectRetentionOperation] + S3ReplicateObject: Optional[S3ReplicateObjectOperation] + + +class CreateJobRequest(ServiceRequest): + AccountId: AccountId + ConfirmationRequired: Optional[ConfirmationRequired] + Operation: JobOperation + Report: JobReport + ClientRequestToken: NonEmptyMaxLength64String + Manifest: Optional[JobManifest] + Description: Optional[NonEmptyMaxLength256String] + Priority: JobPriority + RoleArn: IAMRoleArn + Tags: Optional[S3TagSet] + ManifestGenerator: Optional[JobManifestGenerator] + + +class CreateJobResult(TypedDict, total=False): + JobId: Optional[JobId] + + +class CreateMultiRegionAccessPointRequest(ServiceRequest): + AccountId: AccountId + ClientToken: MultiRegionAccessPointClientToken + Details: CreateMultiRegionAccessPointInput + + +class CreateMultiRegionAccessPointResult(TypedDict, total=False): + RequestTokenARN: Optional[AsyncRequestTokenARN] + + +ObjectSizeValue = int + + +class MatchObjectSize(TypedDict, total=False): + BytesGreaterThan: Optional[ObjectSizeValue] + BytesLessThan: Optional[ObjectSizeValue] + + +class MatchObjectAge(TypedDict, total=False): + DaysGreaterThan: Optional[ObjectAgeValue] + DaysLessThan: Optional[ObjectAgeValue] + + +MatchAnyTag = List[S3Tag] +MatchAnySuffix = List[Suffix] +MatchAnyPrefix = List[Prefix] + + +class StorageLensGroupOrOperator(TypedDict, total=False): + MatchAnyPrefix: Optional[MatchAnyPrefix] + MatchAnySuffix: Optional[MatchAnySuffix] + MatchAnyTag: Optional[MatchAnyTag] + MatchObjectAge: Optional[MatchObjectAge] + MatchObjectSize: Optional[MatchObjectSize] + + +class StorageLensGroupAndOperator(TypedDict, total=False): + MatchAnyPrefix: Optional[MatchAnyPrefix] + MatchAnySuffix: Optional[MatchAnySuffix] + MatchAnyTag: Optional[MatchAnyTag] + MatchObjectAge: Optional[MatchObjectAge] + MatchObjectSize: Optional[MatchObjectSize] + + +class StorageLensGroupFilter(TypedDict, total=False): + MatchAnyPrefix: Optional[MatchAnyPrefix] + MatchAnySuffix: Optional[MatchAnySuffix] + MatchAnyTag: Optional[MatchAnyTag] + MatchObjectAge: Optional[MatchObjectAge] + MatchObjectSize: Optional[MatchObjectSize] + And: Optional[StorageLensGroupAndOperator] + Or: Optional[StorageLensGroupOrOperator] + + +class StorageLensGroup(TypedDict, total=False): + Name: StorageLensGroupName + Filter: StorageLensGroupFilter + StorageLensGroupArn: Optional[StorageLensGroupArn] + + +class CreateStorageLensGroupRequest(ServiceRequest): + AccountId: AccountId + StorageLensGroup: StorageLensGroup + Tags: Optional[TagList] + + +CreationDate = datetime +Expiration = datetime + + +class Credentials(TypedDict, total=False): + AccessKeyId: Optional[AccessKeyId] + SecretAccessKey: Optional[SecretAccessKey] + SessionToken: Optional[SessionToken] + Expiration: Optional[Expiration] + + +Date = datetime + + +class DeleteAccessGrantRequest(ServiceRequest): + AccountId: AccountId + AccessGrantId: AccessGrantId + + +class DeleteAccessGrantsInstanceRequest(ServiceRequest): + AccountId: AccountId + + +class DeleteAccessGrantsInstanceResourcePolicyRequest(ServiceRequest): + AccountId: AccountId + + +class DeleteAccessGrantsLocationRequest(ServiceRequest): + AccountId: AccountId + AccessGrantsLocationId: AccessGrantsLocationId + + +class DeleteAccessPointForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class DeleteAccessPointPolicyForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class DeleteAccessPointPolicyRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class DeleteAccessPointRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class DeleteAccessPointScopeRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class DeleteBucketLifecycleConfigurationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class DeleteBucketPolicyRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class DeleteBucketReplicationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class DeleteBucketRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class DeleteBucketTaggingRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class DeleteJobTaggingRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + + +class DeleteJobTaggingResult(TypedDict, total=False): + pass + + +class DeleteMarkerReplication(TypedDict, total=False): + Status: DeleteMarkerReplicationStatus + + +class DeleteMultiRegionAccessPointRequest(ServiceRequest): + AccountId: AccountId + ClientToken: MultiRegionAccessPointClientToken + Details: DeleteMultiRegionAccessPointInput + + +class DeleteMultiRegionAccessPointResult(TypedDict, total=False): + RequestTokenARN: Optional[AsyncRequestTokenARN] + + +class DeletePublicAccessBlockRequest(ServiceRequest): + AccountId: AccountId + + +class DeleteStorageLensConfigurationRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + + +class DeleteStorageLensConfigurationTaggingRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + + +class DeleteStorageLensConfigurationTaggingResult(TypedDict, total=False): + pass + + +class DeleteStorageLensGroupRequest(ServiceRequest): + Name: StorageLensGroupName + AccountId: AccountId + + +class DescribeJobRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + + +class S3GeneratedManifestDescriptor(TypedDict, total=False): + Format: Optional[GeneratedManifestFormat] + Location: Optional[JobManifestLocation] + + +SuspendedDate = datetime +JobTerminationDate = datetime +JobCreationTime = datetime + + +class JobFailure(TypedDict, total=False): + FailureCode: Optional[JobFailureCode] + FailureReason: Optional[JobFailureReason] + + +JobFailureList = List[JobFailure] +JobTimeInStateSeconds = int + + +class JobTimers(TypedDict, total=False): + ElapsedTimeInActiveSeconds: Optional[JobTimeInStateSeconds] + + +JobNumberOfTasksFailed = int +JobNumberOfTasksSucceeded = int +JobTotalNumberOfTasks = int + + +class JobProgressSummary(TypedDict, total=False): + TotalNumberOfTasks: Optional[JobTotalNumberOfTasks] + NumberOfTasksSucceeded: Optional[JobNumberOfTasksSucceeded] + NumberOfTasksFailed: Optional[JobNumberOfTasksFailed] + Timers: Optional[JobTimers] + + +class JobDescriptor(TypedDict, total=False): + JobId: Optional[JobId] + ConfirmationRequired: Optional[ConfirmationRequired] + Description: Optional[NonEmptyMaxLength256String] + JobArn: Optional[JobArn] + Status: Optional[JobStatus] + Manifest: Optional[JobManifest] + Operation: Optional[JobOperation] + Priority: Optional[JobPriority] + ProgressSummary: Optional[JobProgressSummary] + StatusUpdateReason: Optional[JobStatusUpdateReason] + FailureReasons: Optional[JobFailureList] + Report: Optional[JobReport] + CreationTime: Optional[JobCreationTime] + TerminationDate: Optional[JobTerminationDate] + RoleArn: Optional[IAMRoleArn] + SuspendedDate: Optional[SuspendedDate] + SuspendedCause: Optional[SuspendedCause] + ManifestGenerator: Optional[JobManifestGenerator] + GeneratedManifestDescriptor: Optional[S3GeneratedManifestDescriptor] + + +class DescribeJobResult(TypedDict, total=False): + Job: Optional[JobDescriptor] + + +class DescribeMultiRegionAccessPointOperationRequest(ServiceRequest): + AccountId: AccountId + RequestTokenARN: AsyncRequestTokenARN + + +class DescribeMultiRegionAccessPointOperationResult(TypedDict, total=False): + AsyncOperation: Optional[AsyncOperation] + + +class ReplicationTimeValue(TypedDict, total=False): + Minutes: Optional[Minutes] + + +class Metrics(TypedDict, total=False): + Status: MetricsStatus + EventThreshold: Optional[ReplicationTimeValue] + + +class EncryptionConfiguration(TypedDict, total=False): + ReplicaKmsKeyID: Optional[ReplicaKmsKeyID] + + +class ReplicationTime(TypedDict, total=False): + Status: ReplicationTimeStatus + Time: ReplicationTimeValue + + +class Destination(TypedDict, total=False): + Account: Optional[AccountId] + Bucket: BucketIdentifierString + ReplicationTime: Optional[ReplicationTime] + AccessControlTranslation: Optional[AccessControlTranslation] + EncryptionConfiguration: Optional[EncryptionConfiguration] + Metrics: Optional[Metrics] + StorageClass: Optional[ReplicationStorageClass] + + +class DissociateAccessGrantsIdentityCenterRequest(ServiceRequest): + AccountId: AccountId + + +Endpoints = Dict[NonEmptyMaxLength64String, NonEmptyMaxLength1024String] + + +class EstablishedMultiRegionAccessPointPolicy(TypedDict, total=False): + Policy: Optional[Policy] + + +Regions = List[S3AWSRegion] + + +class Exclude(TypedDict, total=False): + Buckets: Optional[Buckets] + Regions: Optional[Regions] + + +class ExistingObjectReplication(TypedDict, total=False): + Status: ExistingObjectReplicationStatus + + +class GetAccessGrantRequest(ServiceRequest): + AccountId: AccountId + AccessGrantId: AccessGrantId + + +class GetAccessGrantResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantId: Optional[AccessGrantId] + AccessGrantArn: Optional[AccessGrantArn] + Grantee: Optional[Grantee] + Permission: Optional[Permission] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationConfiguration: Optional[AccessGrantsLocationConfiguration] + GrantScope: Optional[S3Prefix] + ApplicationArn: Optional[IdentityCenterApplicationArn] + + +class GetAccessGrantsInstanceForPrefixRequest(ServiceRequest): + AccountId: AccountId + S3Prefix: S3Prefix + + +class GetAccessGrantsInstanceForPrefixResult(TypedDict, total=False): + AccessGrantsInstanceArn: Optional[AccessGrantsInstanceArn] + AccessGrantsInstanceId: Optional[AccessGrantsInstanceId] + + +class GetAccessGrantsInstanceRequest(ServiceRequest): + AccountId: AccountId + + +class GetAccessGrantsInstanceResourcePolicyRequest(ServiceRequest): + AccountId: AccountId + + +class GetAccessGrantsInstanceResourcePolicyResult(TypedDict, total=False): + Policy: Optional[PolicyDocument] + Organization: Optional[Organization] + CreatedAt: Optional[CreationTimestamp] + + +class GetAccessGrantsInstanceResult(TypedDict, total=False): + AccessGrantsInstanceArn: Optional[AccessGrantsInstanceArn] + AccessGrantsInstanceId: Optional[AccessGrantsInstanceId] + IdentityCenterArn: Optional[IdentityCenterArn] + IdentityCenterInstanceArn: Optional[IdentityCenterArn] + IdentityCenterApplicationArn: Optional[IdentityCenterApplicationArn] + CreatedAt: Optional[CreationTimestamp] + + +class GetAccessGrantsLocationRequest(ServiceRequest): + AccountId: AccountId + AccessGrantsLocationId: AccessGrantsLocationId + + +class GetAccessGrantsLocationResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationArn: Optional[AccessGrantsLocationArn] + LocationScope: Optional[S3Prefix] + IAMRoleArn: Optional[IAMRoleArn] + + +class GetAccessPointConfigurationForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class GetAccessPointConfigurationForObjectLambdaResult(TypedDict, total=False): + Configuration: Optional[ObjectLambdaConfiguration] + + +class GetAccessPointForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class GetAccessPointForObjectLambdaResult(TypedDict, total=False): + Name: Optional[ObjectLambdaAccessPointName] + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + CreationDate: Optional[CreationDate] + Alias: Optional[ObjectLambdaAccessPointAlias] + + +class GetAccessPointPolicyForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class GetAccessPointPolicyForObjectLambdaResult(TypedDict, total=False): + Policy: Optional[ObjectLambdaPolicy] + + +class GetAccessPointPolicyRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class GetAccessPointPolicyResult(TypedDict, total=False): + Policy: Optional[Policy] + + +class GetAccessPointPolicyStatusForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + + +class PolicyStatus(TypedDict, total=False): + IsPublic: Optional[IsPublic] + + +class GetAccessPointPolicyStatusForObjectLambdaResult(TypedDict, total=False): + PolicyStatus: Optional[PolicyStatus] + + +class GetAccessPointPolicyStatusRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class GetAccessPointPolicyStatusResult(TypedDict, total=False): + PolicyStatus: Optional[PolicyStatus] + + +class GetAccessPointRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class GetAccessPointResult(TypedDict, total=False): + Name: Optional[AccessPointName] + Bucket: Optional[BucketName] + NetworkOrigin: Optional[NetworkOrigin] + VpcConfiguration: Optional[VpcConfiguration] + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + CreationDate: Optional[CreationDate] + Alias: Optional[Alias] + AccessPointArn: Optional[S3AccessPointArn] + Endpoints: Optional[Endpoints] + BucketAccountId: Optional[AccountId] + + +class GetAccessPointScopeRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + + +class GetAccessPointScopeResult(TypedDict, total=False): + Scope: Optional[Scope] + + +class GetBucketLifecycleConfigurationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class NoncurrentVersionExpiration(TypedDict, total=False): + NoncurrentDays: Optional[Days] + NewerNoncurrentVersions: Optional[NoncurrentVersionCount] + + +class NoncurrentVersionTransition(TypedDict, total=False): + NoncurrentDays: Optional[Days] + StorageClass: Optional[TransitionStorageClass] + + +NoncurrentVersionTransitionList = List[NoncurrentVersionTransition] + + +class Transition(TypedDict, total=False): + Date: Optional[Date] + Days: Optional[Days] + StorageClass: Optional[TransitionStorageClass] + + +TransitionList = List[Transition] + + +class LifecycleRuleAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[S3TagSet] + ObjectSizeGreaterThan: Optional[ObjectSizeGreaterThanBytes] + ObjectSizeLessThan: Optional[ObjectSizeLessThanBytes] + + +class LifecycleRuleFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[S3Tag] + And: Optional[LifecycleRuleAndOperator] + ObjectSizeGreaterThan: Optional[ObjectSizeGreaterThanBytes] + ObjectSizeLessThan: Optional[ObjectSizeLessThanBytes] + + +class LifecycleExpiration(TypedDict, total=False): + Date: Optional[Date] + Days: Optional[Days] + ExpiredObjectDeleteMarker: Optional[ExpiredObjectDeleteMarker] + + +class LifecycleRule(TypedDict, total=False): + Expiration: Optional[LifecycleExpiration] + ID: Optional[ID] + Filter: Optional[LifecycleRuleFilter] + Status: ExpirationStatus + Transitions: Optional[TransitionList] + NoncurrentVersionTransitions: Optional[NoncurrentVersionTransitionList] + NoncurrentVersionExpiration: Optional[NoncurrentVersionExpiration] + AbortIncompleteMultipartUpload: Optional[AbortIncompleteMultipartUpload] + + +LifecycleRules = List[LifecycleRule] + + +class GetBucketLifecycleConfigurationResult(TypedDict, total=False): + Rules: Optional[LifecycleRules] + + +class GetBucketPolicyRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class GetBucketPolicyResult(TypedDict, total=False): + Policy: Optional[Policy] + + +class GetBucketReplicationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class ReplicaModifications(TypedDict, total=False): + Status: ReplicaModificationsStatus + + +class SseKmsEncryptedObjects(TypedDict, total=False): + Status: SseKmsEncryptedObjectsStatus + + +class SourceSelectionCriteria(TypedDict, total=False): + SseKmsEncryptedObjects: Optional[SseKmsEncryptedObjects] + ReplicaModifications: Optional[ReplicaModifications] + + +class ReplicationRuleAndOperator(TypedDict, total=False): + Prefix: Optional[Prefix] + Tags: Optional[S3TagSet] + + +class ReplicationRuleFilter(TypedDict, total=False): + Prefix: Optional[Prefix] + Tag: Optional[S3Tag] + And: Optional[ReplicationRuleAndOperator] + + +class ReplicationRule(TypedDict, total=False): + ID: Optional[ID] + Priority: Optional[Priority] + Prefix: Optional[Prefix] + Filter: Optional[ReplicationRuleFilter] + Status: ReplicationRuleStatus + SourceSelectionCriteria: Optional[SourceSelectionCriteria] + ExistingObjectReplication: Optional[ExistingObjectReplication] + Destination: Destination + DeleteMarkerReplication: Optional[DeleteMarkerReplication] + Bucket: BucketIdentifierString + + +ReplicationRules = List[ReplicationRule] + + +class ReplicationConfiguration(TypedDict, total=False): + Role: Role + Rules: ReplicationRules + + +class GetBucketReplicationResult(TypedDict, total=False): + ReplicationConfiguration: Optional[ReplicationConfiguration] + + +class GetBucketRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class GetBucketResult(TypedDict, total=False): + Bucket: Optional[BucketName] + PublicAccessBlockEnabled: Optional[PublicAccessBlockEnabled] + CreationDate: Optional[CreationDate] + + +class GetBucketTaggingRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class GetBucketTaggingResult(TypedDict, total=False): + TagSet: S3TagSet + + +class GetBucketVersioningRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + + +class GetBucketVersioningResult(TypedDict, total=False): + Status: Optional[BucketVersioningStatus] + MFADelete: Optional[MFADeleteStatus] + + +class GetDataAccessRequest(ServiceRequest): + AccountId: AccountId + Target: S3Prefix + Permission: Permission + DurationSeconds: Optional[DurationSeconds] + Privilege: Optional[Privilege] + TargetType: Optional[S3PrefixType] + + +class GetDataAccessResult(TypedDict, total=False): + Credentials: Optional[Credentials] + MatchedGrantTarget: Optional[S3Prefix] + Grantee: Optional[Grantee] + + +class GetJobTaggingRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + + +class GetJobTaggingResult(TypedDict, total=False): + Tags: Optional[S3TagSet] + + +class GetMultiRegionAccessPointPolicyRequest(ServiceRequest): + AccountId: AccountId + Name: MultiRegionAccessPointName + + +class ProposedMultiRegionAccessPointPolicy(TypedDict, total=False): + Policy: Optional[Policy] + + +class MultiRegionAccessPointPolicyDocument(TypedDict, total=False): + Established: Optional[EstablishedMultiRegionAccessPointPolicy] + Proposed: Optional[ProposedMultiRegionAccessPointPolicy] + + +class GetMultiRegionAccessPointPolicyResult(TypedDict, total=False): + Policy: Optional[MultiRegionAccessPointPolicyDocument] + + +class GetMultiRegionAccessPointPolicyStatusRequest(ServiceRequest): + AccountId: AccountId + Name: MultiRegionAccessPointName + + +class GetMultiRegionAccessPointPolicyStatusResult(TypedDict, total=False): + Established: Optional[PolicyStatus] + + +class GetMultiRegionAccessPointRequest(ServiceRequest): + AccountId: AccountId + Name: MultiRegionAccessPointName + + +class RegionReport(TypedDict, total=False): + Bucket: Optional[BucketName] + Region: Optional[RegionName] + BucketAccountId: Optional[AccountId] + + +RegionReportList = List[RegionReport] + + +class MultiRegionAccessPointReport(TypedDict, total=False): + Name: Optional[MultiRegionAccessPointName] + Alias: Optional[MultiRegionAccessPointAlias] + CreatedAt: Optional[CreationTimestamp] + PublicAccessBlock: Optional[PublicAccessBlockConfiguration] + Status: Optional[MultiRegionAccessPointStatus] + Regions: Optional[RegionReportList] + + +class GetMultiRegionAccessPointResult(TypedDict, total=False): + AccessPoint: Optional[MultiRegionAccessPointReport] + + +class GetMultiRegionAccessPointRoutesRequest(ServiceRequest): + AccountId: AccountId + Mrap: MultiRegionAccessPointId + + +class MultiRegionAccessPointRoute(TypedDict, total=False): + Bucket: Optional[BucketName] + Region: Optional[RegionName] + TrafficDialPercentage: TrafficDialPercentage + + +RouteList = List[MultiRegionAccessPointRoute] + + +class GetMultiRegionAccessPointRoutesResult(TypedDict, total=False): + Mrap: Optional[MultiRegionAccessPointId] + Routes: Optional[RouteList] + + +class GetPublicAccessBlockOutput(TypedDict, total=False): + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + + +class GetPublicAccessBlockRequest(ServiceRequest): + AccountId: AccountId + + +class GetStorageLensConfigurationRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + + +class StorageLensAwsOrg(TypedDict, total=False): + Arn: AwsOrgArn + + +class SSEKMS(TypedDict, total=False): + KeyId: SSEKMSKeyId + + +class SSES3(TypedDict, total=False): + pass + + +class StorageLensDataExportEncryption(TypedDict, total=False): + SSES3: Optional[SSES3] + SSEKMS: Optional[SSEKMS] + + +class S3BucketDestination(TypedDict, total=False): + Format: Format + OutputSchemaVersion: OutputSchemaVersion + AccountId: AccountId + Arn: S3BucketArnString + Prefix: Optional[Prefix] + Encryption: Optional[StorageLensDataExportEncryption] + + +class StorageLensDataExport(TypedDict, total=False): + S3BucketDestination: Optional[S3BucketDestination] + CloudWatchMetrics: Optional[CloudWatchMetrics] + + +class Include(TypedDict, total=False): + Buckets: Optional[Buckets] + Regions: Optional[Regions] + + +class StorageLensConfiguration(TypedDict, total=False): + Id: ConfigId + AccountLevel: AccountLevel + Include: Optional[Include] + Exclude: Optional[Exclude] + DataExport: Optional[StorageLensDataExport] + IsEnabled: IsEnabled + AwsOrg: Optional[StorageLensAwsOrg] + StorageLensArn: Optional[StorageLensArn] + + +class GetStorageLensConfigurationResult(TypedDict, total=False): + StorageLensConfiguration: Optional[StorageLensConfiguration] + + +class GetStorageLensConfigurationTaggingRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + + +class StorageLensTag(TypedDict, total=False): + Key: TagKeyString + Value: TagValueString + + +StorageLensTags = List[StorageLensTag] + + +class GetStorageLensConfigurationTaggingResult(TypedDict, total=False): + Tags: Optional[StorageLensTags] + + +class GetStorageLensGroupRequest(ServiceRequest): + Name: StorageLensGroupName + AccountId: AccountId + + +class GetStorageLensGroupResult(TypedDict, total=False): + StorageLensGroup: Optional[StorageLensGroup] + + +class JobListDescriptor(TypedDict, total=False): + JobId: Optional[JobId] + Description: Optional[NonEmptyMaxLength256String] + Operation: Optional[OperationName] + Priority: Optional[JobPriority] + Status: Optional[JobStatus] + CreationTime: Optional[JobCreationTime] + TerminationDate: Optional[JobTerminationDate] + ProgressSummary: Optional[JobProgressSummary] + + +JobListDescriptorList = List[JobListDescriptor] +JobStatusList = List[JobStatus] + + +class LifecycleConfiguration(TypedDict, total=False): + Rules: Optional[LifecycleRules] + + +class ListAccessGrantsInstancesRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[ContinuationToken] + MaxResults: Optional[MaxResults] + + +class ListAccessGrantsInstancesResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + AccessGrantsInstancesList: Optional[AccessGrantsInstancesList] + + +class ListAccessGrantsLocationsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[ContinuationToken] + MaxResults: Optional[MaxResults] + LocationScope: Optional[S3Prefix] + + +class ListAccessGrantsLocationsResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + AccessGrantsLocationsList: Optional[AccessGrantsLocationsList] + + +class ListAccessGrantsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[ContinuationToken] + MaxResults: Optional[MaxResults] + GranteeType: Optional[GranteeType] + GranteeIdentifier: Optional[GranteeIdentifier] + Permission: Optional[Permission] + GrantScope: Optional[S3Prefix] + ApplicationArn: Optional[IdentityCenterApplicationArn] + + +class ListAccessGrantsResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + AccessGrantsList: Optional[AccessGrantsList] + + +class ListAccessPointsForDirectoryBucketsRequest(ServiceRequest): + AccountId: AccountId + DirectoryBucket: Optional[BucketName] + NextToken: Optional[NonEmptyMaxLength1024String] + MaxResults: Optional[MaxResults] + + +class ListAccessPointsForDirectoryBucketsResult(TypedDict, total=False): + AccessPointList: Optional[AccessPointList] + NextToken: Optional[NonEmptyMaxLength1024String] + + +class ListAccessPointsForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[NonEmptyMaxLength1024String] + MaxResults: Optional[MaxResults] + + +class ObjectLambdaAccessPoint(TypedDict, total=False): + Name: ObjectLambdaAccessPointName + ObjectLambdaAccessPointArn: Optional[ObjectLambdaAccessPointArn] + Alias: Optional[ObjectLambdaAccessPointAlias] + + +ObjectLambdaAccessPointList = List[ObjectLambdaAccessPoint] + + +class ListAccessPointsForObjectLambdaResult(TypedDict, total=False): + ObjectLambdaAccessPointList: Optional[ObjectLambdaAccessPointList] + NextToken: Optional[NonEmptyMaxLength1024String] + + +class ListAccessPointsRequest(ServiceRequest): + AccountId: AccountId + Bucket: Optional[BucketName] + NextToken: Optional[NonEmptyMaxLength1024String] + MaxResults: Optional[MaxResults] + + +class ListAccessPointsResult(TypedDict, total=False): + AccessPointList: Optional[AccessPointList] + NextToken: Optional[NonEmptyMaxLength1024String] + + +class ListCallerAccessGrantsRequest(ServiceRequest): + AccountId: AccountId + GrantScope: Optional[S3Prefix] + NextToken: Optional[ContinuationToken] + MaxResults: Optional[MaxResults] + AllowedByApplication: Optional[Boolean] + + +class ListCallerAccessGrantsResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + CallerAccessGrantsList: Optional[CallerAccessGrantsList] + + +class ListJobsRequest(ServiceRequest): + AccountId: AccountId + JobStatuses: Optional[JobStatusList] + NextToken: Optional[StringForNextToken] + MaxResults: Optional[MaxResults] + + +class ListJobsResult(TypedDict, total=False): + NextToken: Optional[StringForNextToken] + Jobs: Optional[JobListDescriptorList] + + +class ListMultiRegionAccessPointsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[NonEmptyMaxLength1024String] + MaxResults: Optional[MaxResults] + + +MultiRegionAccessPointReportList = List[MultiRegionAccessPointReport] + + +class ListMultiRegionAccessPointsResult(TypedDict, total=False): + AccessPoints: Optional[MultiRegionAccessPointReportList] + NextToken: Optional[NonEmptyMaxLength1024String] + + +class ListRegionalBucketsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[NonEmptyMaxLength1024String] + MaxResults: Optional[MaxResults] + OutpostId: Optional[NonEmptyMaxLength64String] + + +class RegionalBucket(TypedDict, total=False): + Bucket: BucketName + BucketArn: Optional[S3RegionalBucketArn] + PublicAccessBlockEnabled: PublicAccessBlockEnabled + CreationDate: CreationDate + OutpostId: Optional[NonEmptyMaxLength64String] + + +RegionalBucketList = List[RegionalBucket] + + +class ListRegionalBucketsResult(TypedDict, total=False): + RegionalBucketList: Optional[RegionalBucketList] + NextToken: Optional[NonEmptyMaxLength1024String] + + +class ListStorageLensConfigurationEntry(TypedDict, total=False): + Id: ConfigId + StorageLensArn: StorageLensArn + HomeRegion: S3AWSRegion + IsEnabled: Optional[IsEnabled] + + +class ListStorageLensConfigurationsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[ContinuationToken] + + +StorageLensConfigurationList = List[ListStorageLensConfigurationEntry] + + +class ListStorageLensConfigurationsResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + StorageLensConfigurationList: Optional[StorageLensConfigurationList] + + +class ListStorageLensGroupEntry(TypedDict, total=False): + Name: StorageLensGroupName + StorageLensGroupArn: StorageLensGroupArn + HomeRegion: S3AWSRegion + + +class ListStorageLensGroupsRequest(ServiceRequest): + AccountId: AccountId + NextToken: Optional[ContinuationToken] + + +StorageLensGroupList = List[ListStorageLensGroupEntry] + + +class ListStorageLensGroupsResult(TypedDict, total=False): + NextToken: Optional[ContinuationToken] + StorageLensGroupList: Optional[StorageLensGroupList] + + +class ListTagsForResourceRequest(ServiceRequest): + AccountId: AccountId + ResourceArn: S3ResourceArn + + +class ListTagsForResourceResult(TypedDict, total=False): + Tags: Optional[TagList] + + +class PutAccessGrantsInstanceResourcePolicyRequest(ServiceRequest): + AccountId: AccountId + Policy: PolicyDocument + Organization: Optional[Organization] + + +class PutAccessGrantsInstanceResourcePolicyResult(TypedDict, total=False): + Policy: Optional[PolicyDocument] + Organization: Optional[Organization] + CreatedAt: Optional[CreationTimestamp] + + +class PutAccessPointConfigurationForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + Configuration: ObjectLambdaConfiguration + + +class PutAccessPointPolicyForObjectLambdaRequest(ServiceRequest): + AccountId: AccountId + Name: ObjectLambdaAccessPointName + Policy: ObjectLambdaPolicy + + +class PutAccessPointPolicyRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + Policy: Policy + + +class PutAccessPointScopeRequest(ServiceRequest): + AccountId: AccountId + Name: AccessPointName + Scope: Scope + + +class PutBucketLifecycleConfigurationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + LifecycleConfiguration: Optional[LifecycleConfiguration] + + +class PutBucketPolicyRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + ConfirmRemoveSelfBucketAccess: Optional[ConfirmRemoveSelfBucketAccess] + Policy: Policy + + +class PutBucketReplicationRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + ReplicationConfiguration: ReplicationConfiguration + + +class Tagging(TypedDict, total=False): + TagSet: S3TagSet + + +class PutBucketTaggingRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + Tagging: Tagging + + +class VersioningConfiguration(TypedDict, total=False): + MFADelete: Optional[MFADelete] + Status: Optional[BucketVersioningStatus] + + +class PutBucketVersioningRequest(ServiceRequest): + AccountId: AccountId + Bucket: BucketName + MFA: Optional[MFA] + VersioningConfiguration: VersioningConfiguration + + +class PutJobTaggingRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + Tags: S3TagSet + + +class PutJobTaggingResult(TypedDict, total=False): + pass + + +class PutMultiRegionAccessPointPolicyRequest(ServiceRequest): + AccountId: AccountId + ClientToken: MultiRegionAccessPointClientToken + Details: PutMultiRegionAccessPointPolicyInput + + +class PutMultiRegionAccessPointPolicyResult(TypedDict, total=False): + RequestTokenARN: Optional[AsyncRequestTokenARN] + + +class PutPublicAccessBlockRequest(ServiceRequest): + PublicAccessBlockConfiguration: PublicAccessBlockConfiguration + AccountId: AccountId + + +class PutStorageLensConfigurationRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + StorageLensConfiguration: StorageLensConfiguration + Tags: Optional[StorageLensTags] + + +class PutStorageLensConfigurationTaggingRequest(ServiceRequest): + ConfigId: ConfigId + AccountId: AccountId + Tags: StorageLensTags + + +class PutStorageLensConfigurationTaggingResult(TypedDict, total=False): + pass + + +class SubmitMultiRegionAccessPointRoutesRequest(ServiceRequest): + AccountId: AccountId + Mrap: MultiRegionAccessPointId + RouteUpdates: RouteList + + +class SubmitMultiRegionAccessPointRoutesResult(TypedDict, total=False): + pass + + +TagKeyList = List[TagKeyString] + + +class TagResourceRequest(ServiceRequest): + AccountId: AccountId + ResourceArn: S3ResourceArn + Tags: TagList + + +class TagResourceResult(TypedDict, total=False): + pass + + +class UntagResourceRequest(ServiceRequest): + AccountId: AccountId + ResourceArn: S3ResourceArn + TagKeys: TagKeyList + + +class UntagResourceResult(TypedDict, total=False): + pass + + +class UpdateAccessGrantsLocationRequest(ServiceRequest): + AccountId: AccountId + AccessGrantsLocationId: AccessGrantsLocationId + IAMRoleArn: IAMRoleArn + + +class UpdateAccessGrantsLocationResult(TypedDict, total=False): + CreatedAt: Optional[CreationTimestamp] + AccessGrantsLocationId: Optional[AccessGrantsLocationId] + AccessGrantsLocationArn: Optional[AccessGrantsLocationArn] + LocationScope: Optional[S3Prefix] + IAMRoleArn: Optional[IAMRoleArn] + + +class UpdateJobPriorityRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + Priority: JobPriority + + +class UpdateJobPriorityResult(TypedDict, total=False): + JobId: JobId + Priority: JobPriority + + +class UpdateJobStatusRequest(ServiceRequest): + AccountId: AccountId + JobId: JobId + RequestedJobStatus: RequestedJobStatus + StatusUpdateReason: Optional[JobStatusUpdateReason] + + +class UpdateJobStatusResult(TypedDict, total=False): + JobId: Optional[JobId] + Status: Optional[JobStatus] + StatusUpdateReason: Optional[JobStatusUpdateReason] + + +class UpdateStorageLensGroupRequest(ServiceRequest): + Name: StorageLensGroupName + AccountId: AccountId + StorageLensGroup: StorageLensGroup + + +class S3ControlApi: + service = "s3control" + version = "2018-08-20" + + @handler("AssociateAccessGrantsIdentityCenter") + def associate_access_grants_identity_center( + self, + context: RequestContext, + account_id: AccountId, + identity_center_arn: IdentityCenterArn, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateAccessGrant") + def create_access_grant( + self, + context: RequestContext, + account_id: AccountId, + access_grants_location_id: AccessGrantsLocationId, + grantee: Grantee, + permission: Permission, + access_grants_location_configuration: AccessGrantsLocationConfiguration | None = None, + application_arn: IdentityCenterApplicationArn | None = None, + s3_prefix_type: S3PrefixType | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateAccessGrantResult: + raise NotImplementedError + + @handler("CreateAccessGrantsInstance") + def create_access_grants_instance( + self, + context: RequestContext, + account_id: AccountId, + identity_center_arn: IdentityCenterArn | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateAccessGrantsInstanceResult: + raise NotImplementedError + + @handler("CreateAccessGrantsLocation") + def create_access_grants_location( + self, + context: RequestContext, + account_id: AccountId, + location_scope: S3Prefix, + iam_role_arn: IAMRoleArn, + tags: TagList | None = None, + **kwargs, + ) -> CreateAccessGrantsLocationResult: + raise NotImplementedError + + @handler("CreateAccessPoint") + def create_access_point( + self, + context: RequestContext, + account_id: AccountId, + name: AccessPointName, + bucket: BucketName, + vpc_configuration: VpcConfiguration | None = None, + public_access_block_configuration: PublicAccessBlockConfiguration | None = None, + bucket_account_id: AccountId | None = None, + scope: Scope | None = None, + **kwargs, + ) -> CreateAccessPointResult: + raise NotImplementedError + + @handler("CreateAccessPointForObjectLambda") + def create_access_point_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + configuration: ObjectLambdaConfiguration, + **kwargs, + ) -> CreateAccessPointForObjectLambdaResult: + raise NotImplementedError + + @handler("CreateBucket") + def create_bucket( + self, + context: RequestContext, + bucket: BucketName, + acl: BucketCannedACL | None = None, + create_bucket_configuration: CreateBucketConfiguration | None = None, + grant_full_control: GrantFullControl | None = None, + grant_read: GrantRead | None = None, + grant_read_acp: GrantReadACP | None = None, + grant_write: GrantWrite | None = None, + grant_write_acp: GrantWriteACP | None = None, + object_lock_enabled_for_bucket: ObjectLockEnabledForBucket | None = None, + outpost_id: NonEmptyMaxLength64String | None = None, + **kwargs, + ) -> CreateBucketResult: + raise NotImplementedError + + @handler("CreateJob") + def create_job( + self, + context: RequestContext, + account_id: AccountId, + operation: JobOperation, + report: JobReport, + client_request_token: NonEmptyMaxLength64String, + priority: JobPriority, + role_arn: IAMRoleArn, + confirmation_required: ConfirmationRequired | None = None, + manifest: JobManifest | None = None, + description: NonEmptyMaxLength256String | None = None, + tags: S3TagSet | None = None, + manifest_generator: JobManifestGenerator | None = None, + **kwargs, + ) -> CreateJobResult: + raise NotImplementedError + + @handler("CreateMultiRegionAccessPoint") + def create_multi_region_access_point( + self, + context: RequestContext, + account_id: AccountId, + client_token: MultiRegionAccessPointClientToken, + details: CreateMultiRegionAccessPointInput, + **kwargs, + ) -> CreateMultiRegionAccessPointResult: + raise NotImplementedError + + @handler("CreateStorageLensGroup") + def create_storage_lens_group( + self, + context: RequestContext, + account_id: AccountId, + storage_lens_group: StorageLensGroup, + tags: TagList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessGrant") + def delete_access_grant( + self, + context: RequestContext, + account_id: AccountId, + access_grant_id: AccessGrantId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessGrantsInstance") + def delete_access_grants_instance( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessGrantsInstanceResourcePolicy") + def delete_access_grants_instance_resource_policy( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessGrantsLocation") + def delete_access_grants_location( + self, + context: RequestContext, + account_id: AccountId, + access_grants_location_id: AccessGrantsLocationId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessPoint") + def delete_access_point( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessPointForObjectLambda") + def delete_access_point_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessPointPolicy") + def delete_access_point_policy( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessPointPolicyForObjectLambda") + def delete_access_point_policy_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteAccessPointScope") + def delete_access_point_scope( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucket") + def delete_bucket( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketLifecycleConfiguration") + def delete_bucket_lifecycle_configuration( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketPolicy") + def delete_bucket_policy( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketReplication") + def delete_bucket_replication( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteBucketTagging") + def delete_bucket_tagging( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteJobTagging") + def delete_job_tagging( + self, context: RequestContext, account_id: AccountId, job_id: JobId, **kwargs + ) -> DeleteJobTaggingResult: + raise NotImplementedError + + @handler("DeleteMultiRegionAccessPoint") + def delete_multi_region_access_point( + self, + context: RequestContext, + account_id: AccountId, + client_token: MultiRegionAccessPointClientToken, + details: DeleteMultiRegionAccessPointInput, + **kwargs, + ) -> DeleteMultiRegionAccessPointResult: + raise NotImplementedError + + @handler("DeletePublicAccessBlock") + def delete_public_access_block( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteStorageLensConfiguration") + def delete_storage_lens_configuration( + self, context: RequestContext, config_id: ConfigId, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteStorageLensConfigurationTagging") + def delete_storage_lens_configuration_tagging( + self, context: RequestContext, config_id: ConfigId, account_id: AccountId, **kwargs + ) -> DeleteStorageLensConfigurationTaggingResult: + raise NotImplementedError + + @handler("DeleteStorageLensGroup") + def delete_storage_lens_group( + self, context: RequestContext, name: StorageLensGroupName, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DescribeJob") + def describe_job( + self, context: RequestContext, account_id: AccountId, job_id: JobId, **kwargs + ) -> DescribeJobResult: + raise NotImplementedError + + @handler("DescribeMultiRegionAccessPointOperation") + def describe_multi_region_access_point_operation( + self, + context: RequestContext, + account_id: AccountId, + request_token_arn: AsyncRequestTokenARN, + **kwargs, + ) -> DescribeMultiRegionAccessPointOperationResult: + raise NotImplementedError + + @handler("DissociateAccessGrantsIdentityCenter") + def dissociate_access_grants_identity_center( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> None: + raise NotImplementedError + + @handler("GetAccessGrant") + def get_access_grant( + self, + context: RequestContext, + account_id: AccountId, + access_grant_id: AccessGrantId, + **kwargs, + ) -> GetAccessGrantResult: + raise NotImplementedError + + @handler("GetAccessGrantsInstance") + def get_access_grants_instance( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> GetAccessGrantsInstanceResult: + raise NotImplementedError + + @handler("GetAccessGrantsInstanceForPrefix") + def get_access_grants_instance_for_prefix( + self, context: RequestContext, account_id: AccountId, s3_prefix: S3Prefix, **kwargs + ) -> GetAccessGrantsInstanceForPrefixResult: + raise NotImplementedError + + @handler("GetAccessGrantsInstanceResourcePolicy") + def get_access_grants_instance_resource_policy( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> GetAccessGrantsInstanceResourcePolicyResult: + raise NotImplementedError + + @handler("GetAccessGrantsLocation") + def get_access_grants_location( + self, + context: RequestContext, + account_id: AccountId, + access_grants_location_id: AccessGrantsLocationId, + **kwargs, + ) -> GetAccessGrantsLocationResult: + raise NotImplementedError + + @handler("GetAccessPoint") + def get_access_point( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> GetAccessPointResult: + raise NotImplementedError + + @handler("GetAccessPointConfigurationForObjectLambda") + def get_access_point_configuration_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> GetAccessPointConfigurationForObjectLambdaResult: + raise NotImplementedError + + @handler("GetAccessPointForObjectLambda") + def get_access_point_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> GetAccessPointForObjectLambdaResult: + raise NotImplementedError + + @handler("GetAccessPointPolicy") + def get_access_point_policy( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> GetAccessPointPolicyResult: + raise NotImplementedError + + @handler("GetAccessPointPolicyForObjectLambda") + def get_access_point_policy_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> GetAccessPointPolicyForObjectLambdaResult: + raise NotImplementedError + + @handler("GetAccessPointPolicyStatus") + def get_access_point_policy_status( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> GetAccessPointPolicyStatusResult: + raise NotImplementedError + + @handler("GetAccessPointPolicyStatusForObjectLambda") + def get_access_point_policy_status_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + **kwargs, + ) -> GetAccessPointPolicyStatusForObjectLambdaResult: + raise NotImplementedError + + @handler("GetAccessPointScope") + def get_access_point_scope( + self, context: RequestContext, account_id: AccountId, name: AccessPointName, **kwargs + ) -> GetAccessPointScopeResult: + raise NotImplementedError + + @handler("GetBucket") + def get_bucket( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketResult: + raise NotImplementedError + + @handler("GetBucketLifecycleConfiguration") + def get_bucket_lifecycle_configuration( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketLifecycleConfigurationResult: + raise NotImplementedError + + @handler("GetBucketPolicy") + def get_bucket_policy( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketPolicyResult: + raise NotImplementedError + + @handler("GetBucketReplication") + def get_bucket_replication( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketReplicationResult: + raise NotImplementedError + + @handler("GetBucketTagging") + def get_bucket_tagging( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketTaggingResult: + raise NotImplementedError + + @handler("GetBucketVersioning") + def get_bucket_versioning( + self, context: RequestContext, account_id: AccountId, bucket: BucketName, **kwargs + ) -> GetBucketVersioningResult: + raise NotImplementedError + + @handler("GetDataAccess") + def get_data_access( + self, + context: RequestContext, + account_id: AccountId, + target: S3Prefix, + permission: Permission, + duration_seconds: DurationSeconds | None = None, + privilege: Privilege | None = None, + target_type: S3PrefixType | None = None, + **kwargs, + ) -> GetDataAccessResult: + raise NotImplementedError + + @handler("GetJobTagging") + def get_job_tagging( + self, context: RequestContext, account_id: AccountId, job_id: JobId, **kwargs + ) -> GetJobTaggingResult: + raise NotImplementedError + + @handler("GetMultiRegionAccessPoint") + def get_multi_region_access_point( + self, + context: RequestContext, + account_id: AccountId, + name: MultiRegionAccessPointName, + **kwargs, + ) -> GetMultiRegionAccessPointResult: + raise NotImplementedError + + @handler("GetMultiRegionAccessPointPolicy") + def get_multi_region_access_point_policy( + self, + context: RequestContext, + account_id: AccountId, + name: MultiRegionAccessPointName, + **kwargs, + ) -> GetMultiRegionAccessPointPolicyResult: + raise NotImplementedError + + @handler("GetMultiRegionAccessPointPolicyStatus") + def get_multi_region_access_point_policy_status( + self, + context: RequestContext, + account_id: AccountId, + name: MultiRegionAccessPointName, + **kwargs, + ) -> GetMultiRegionAccessPointPolicyStatusResult: + raise NotImplementedError + + @handler("GetMultiRegionAccessPointRoutes") + def get_multi_region_access_point_routes( + self, + context: RequestContext, + account_id: AccountId, + mrap: MultiRegionAccessPointId, + **kwargs, + ) -> GetMultiRegionAccessPointRoutesResult: + raise NotImplementedError + + @handler("GetPublicAccessBlock") + def get_public_access_block( + self, context: RequestContext, account_id: AccountId, **kwargs + ) -> GetPublicAccessBlockOutput: + raise NotImplementedError + + @handler("GetStorageLensConfiguration") + def get_storage_lens_configuration( + self, context: RequestContext, config_id: ConfigId, account_id: AccountId, **kwargs + ) -> GetStorageLensConfigurationResult: + raise NotImplementedError + + @handler("GetStorageLensConfigurationTagging") + def get_storage_lens_configuration_tagging( + self, context: RequestContext, config_id: ConfigId, account_id: AccountId, **kwargs + ) -> GetStorageLensConfigurationTaggingResult: + raise NotImplementedError + + @handler("GetStorageLensGroup") + def get_storage_lens_group( + self, context: RequestContext, name: StorageLensGroupName, account_id: AccountId, **kwargs + ) -> GetStorageLensGroupResult: + raise NotImplementedError + + @handler("ListAccessGrants") + def list_access_grants( + self, + context: RequestContext, + account_id: AccountId, + next_token: ContinuationToken | None = None, + max_results: MaxResults | None = None, + grantee_type: GranteeType | None = None, + grantee_identifier: GranteeIdentifier | None = None, + permission: Permission | None = None, + grant_scope: S3Prefix | None = None, + application_arn: IdentityCenterApplicationArn | None = None, + **kwargs, + ) -> ListAccessGrantsResult: + raise NotImplementedError + + @handler("ListAccessGrantsInstances") + def list_access_grants_instances( + self, + context: RequestContext, + account_id: AccountId, + next_token: ContinuationToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListAccessGrantsInstancesResult: + raise NotImplementedError + + @handler("ListAccessGrantsLocations") + def list_access_grants_locations( + self, + context: RequestContext, + account_id: AccountId, + next_token: ContinuationToken | None = None, + max_results: MaxResults | None = None, + location_scope: S3Prefix | None = None, + **kwargs, + ) -> ListAccessGrantsLocationsResult: + raise NotImplementedError + + @handler("ListAccessPoints") + def list_access_points( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName | None = None, + next_token: NonEmptyMaxLength1024String | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListAccessPointsResult: + raise NotImplementedError + + @handler("ListAccessPointsForDirectoryBuckets") + def list_access_points_for_directory_buckets( + self, + context: RequestContext, + account_id: AccountId, + directory_bucket: BucketName | None = None, + next_token: NonEmptyMaxLength1024String | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListAccessPointsForDirectoryBucketsResult: + raise NotImplementedError + + @handler("ListAccessPointsForObjectLambda") + def list_access_points_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + next_token: NonEmptyMaxLength1024String | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListAccessPointsForObjectLambdaResult: + raise NotImplementedError + + @handler("ListCallerAccessGrants") + def list_caller_access_grants( + self, + context: RequestContext, + account_id: AccountId, + grant_scope: S3Prefix | None = None, + next_token: ContinuationToken | None = None, + max_results: MaxResults | None = None, + allowed_by_application: Boolean | None = None, + **kwargs, + ) -> ListCallerAccessGrantsResult: + raise NotImplementedError + + @handler("ListJobs") + def list_jobs( + self, + context: RequestContext, + account_id: AccountId, + job_statuses: JobStatusList | None = None, + next_token: StringForNextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListJobsResult: + raise NotImplementedError + + @handler("ListMultiRegionAccessPoints") + def list_multi_region_access_points( + self, + context: RequestContext, + account_id: AccountId, + next_token: NonEmptyMaxLength1024String | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListMultiRegionAccessPointsResult: + raise NotImplementedError + + @handler("ListRegionalBuckets") + def list_regional_buckets( + self, + context: RequestContext, + account_id: AccountId, + next_token: NonEmptyMaxLength1024String | None = None, + max_results: MaxResults | None = None, + outpost_id: NonEmptyMaxLength64String | None = None, + **kwargs, + ) -> ListRegionalBucketsResult: + raise NotImplementedError + + @handler("ListStorageLensConfigurations") + def list_storage_lens_configurations( + self, + context: RequestContext, + account_id: AccountId, + next_token: ContinuationToken | None = None, + **kwargs, + ) -> ListStorageLensConfigurationsResult: + raise NotImplementedError + + @handler("ListStorageLensGroups") + def list_storage_lens_groups( + self, + context: RequestContext, + account_id: AccountId, + next_token: ContinuationToken | None = None, + **kwargs, + ) -> ListStorageLensGroupsResult: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, account_id: AccountId, resource_arn: S3ResourceArn, **kwargs + ) -> ListTagsForResourceResult: + raise NotImplementedError + + @handler("PutAccessGrantsInstanceResourcePolicy") + def put_access_grants_instance_resource_policy( + self, + context: RequestContext, + account_id: AccountId, + policy: PolicyDocument, + organization: Organization | None = None, + **kwargs, + ) -> PutAccessGrantsInstanceResourcePolicyResult: + raise NotImplementedError + + @handler("PutAccessPointConfigurationForObjectLambda") + def put_access_point_configuration_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + configuration: ObjectLambdaConfiguration, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutAccessPointPolicy") + def put_access_point_policy( + self, + context: RequestContext, + account_id: AccountId, + name: AccessPointName, + policy: Policy, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutAccessPointPolicyForObjectLambda") + def put_access_point_policy_for_object_lambda( + self, + context: RequestContext, + account_id: AccountId, + name: ObjectLambdaAccessPointName, + policy: ObjectLambdaPolicy, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutAccessPointScope") + def put_access_point_scope( + self, + context: RequestContext, + account_id: AccountId, + name: AccessPointName, + scope: Scope, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketLifecycleConfiguration") + def put_bucket_lifecycle_configuration( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName, + lifecycle_configuration: LifecycleConfiguration | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketPolicy") + def put_bucket_policy( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName, + policy: Policy, + confirm_remove_self_bucket_access: ConfirmRemoveSelfBucketAccess | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketReplication") + def put_bucket_replication( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName, + replication_configuration: ReplicationConfiguration, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketTagging") + def put_bucket_tagging( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName, + tagging: Tagging, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutBucketVersioning") + def put_bucket_versioning( + self, + context: RequestContext, + account_id: AccountId, + bucket: BucketName, + versioning_configuration: VersioningConfiguration, + mfa: MFA | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutJobTagging") + def put_job_tagging( + self, + context: RequestContext, + account_id: AccountId, + job_id: JobId, + tags: S3TagSet, + **kwargs, + ) -> PutJobTaggingResult: + raise NotImplementedError + + @handler("PutMultiRegionAccessPointPolicy") + def put_multi_region_access_point_policy( + self, + context: RequestContext, + account_id: AccountId, + client_token: MultiRegionAccessPointClientToken, + details: PutMultiRegionAccessPointPolicyInput, + **kwargs, + ) -> PutMultiRegionAccessPointPolicyResult: + raise NotImplementedError + + @handler("PutPublicAccessBlock") + def put_public_access_block( + self, + context: RequestContext, + public_access_block_configuration: PublicAccessBlockConfiguration, + account_id: AccountId, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutStorageLensConfiguration") + def put_storage_lens_configuration( + self, + context: RequestContext, + config_id: ConfigId, + account_id: AccountId, + storage_lens_configuration: StorageLensConfiguration, + tags: StorageLensTags | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("PutStorageLensConfigurationTagging") + def put_storage_lens_configuration_tagging( + self, + context: RequestContext, + config_id: ConfigId, + account_id: AccountId, + tags: StorageLensTags, + **kwargs, + ) -> PutStorageLensConfigurationTaggingResult: + raise NotImplementedError + + @handler("SubmitMultiRegionAccessPointRoutes") + def submit_multi_region_access_point_routes( + self, + context: RequestContext, + account_id: AccountId, + mrap: MultiRegionAccessPointId, + route_updates: RouteList, + **kwargs, + ) -> SubmitMultiRegionAccessPointRoutesResult: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, + context: RequestContext, + account_id: AccountId, + resource_arn: S3ResourceArn, + tags: TagList, + **kwargs, + ) -> TagResourceResult: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + account_id: AccountId, + resource_arn: S3ResourceArn, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceResult: + raise NotImplementedError + + @handler("UpdateAccessGrantsLocation") + def update_access_grants_location( + self, + context: RequestContext, + account_id: AccountId, + access_grants_location_id: AccessGrantsLocationId, + iam_role_arn: IAMRoleArn, + **kwargs, + ) -> UpdateAccessGrantsLocationResult: + raise NotImplementedError + + @handler("UpdateJobPriority") + def update_job_priority( + self, + context: RequestContext, + account_id: AccountId, + job_id: JobId, + priority: JobPriority, + **kwargs, + ) -> UpdateJobPriorityResult: + raise NotImplementedError + + @handler("UpdateJobStatus") + def update_job_status( + self, + context: RequestContext, + account_id: AccountId, + job_id: JobId, + requested_job_status: RequestedJobStatus, + status_update_reason: JobStatusUpdateReason | None = None, + **kwargs, + ) -> UpdateJobStatusResult: + raise NotImplementedError + + @handler("UpdateStorageLensGroup") + def update_storage_lens_group( + self, + context: RequestContext, + name: StorageLensGroupName, + account_id: AccountId, + storage_lens_group: StorageLensGroup, + **kwargs, + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/scheduler/__init__.py b/localstack-core/localstack/aws/api/scheduler/__init__.py new file mode 100644 index 0000000000000..696814447cd11 --- /dev/null +++ b/localstack-core/localstack/aws/api/scheduler/__init__.py @@ -0,0 +1,588 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +CapacityProvider = str +CapacityProviderStrategyItemBase = int +CapacityProviderStrategyItemWeight = int +ClientToken = str +DeadLetterConfigArnString = str +Description = str +DetailType = str +EnableECSManagedTags = bool +EnableExecuteCommand = bool +Group = str +KmsKeyArn = str +MaxResults = int +MaximumEventAgeInSeconds = int +MaximumRetryAttempts = int +MaximumWindowInMinutes = int +MessageGroupId = str +Name = str +NamePrefix = str +NextToken = str +PlacementConstraintExpression = str +PlacementStrategyField = str +PlatformVersion = str +ReferenceId = str +RoleArn = str +SageMakerPipelineParameterName = str +SageMakerPipelineParameterValue = str +ScheduleArn = str +ScheduleExpression = str +ScheduleExpressionTimezone = str +ScheduleGroupArn = str +ScheduleGroupName = str +ScheduleGroupNamePrefix = str +SecurityGroup = str +Source = str +String = str +Subnet = str +TagKey = str +TagResourceArn = str +TagValue = str +TargetArn = str +TargetInput = str +TargetPartitionKey = str +TaskCount = int +TaskDefinitionArn = str + + +class ActionAfterCompletion(StrEnum): + NONE = "NONE" + DELETE = "DELETE" + + +class AssignPublicIp(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class FlexibleTimeWindowMode(StrEnum): + OFF = "OFF" + FLEXIBLE = "FLEXIBLE" + + +class LaunchType(StrEnum): + EC2 = "EC2" + FARGATE = "FARGATE" + EXTERNAL = "EXTERNAL" + + +class PlacementConstraintType(StrEnum): + distinctInstance = "distinctInstance" + memberOf = "memberOf" + + +class PlacementStrategyType(StrEnum): + random = "random" + spread = "spread" + binpack = "binpack" + + +class PropagateTags(StrEnum): + TASK_DEFINITION = "TASK_DEFINITION" + + +class ScheduleGroupState(StrEnum): + ACTIVE = "ACTIVE" + DELETING = "DELETING" + + +class ScheduleState(StrEnum): + ENABLED = "ENABLED" + DISABLED = "DISABLED" + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = True + status_code: int = 409 + + +class InternalServerException(ServiceException): + code: str = "InternalServerException" + sender_fault: bool = False + status_code: int = 500 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = True + status_code: int = 404 + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = True + status_code: int = 402 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = True + status_code: int = 429 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = True + status_code: int = 400 + + +Subnets = List[Subnet] +SecurityGroups = List[SecurityGroup] + + +class AwsVpcConfiguration(TypedDict, total=False): + AssignPublicIp: Optional[AssignPublicIp] + SecurityGroups: Optional[SecurityGroups] + Subnets: Subnets + + +class CapacityProviderStrategyItem(TypedDict, total=False): + base: Optional[CapacityProviderStrategyItemBase] + capacityProvider: CapacityProvider + weight: Optional[CapacityProviderStrategyItemWeight] + + +CapacityProviderStrategy = List[CapacityProviderStrategyItem] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class CreateScheduleGroupInput(ServiceRequest): + ClientToken: Optional[ClientToken] + Name: ScheduleGroupName + Tags: Optional[TagList] + + +class CreateScheduleGroupOutput(TypedDict, total=False): + ScheduleGroupArn: ScheduleGroupArn + + +class SqsParameters(TypedDict, total=False): + MessageGroupId: Optional[MessageGroupId] + + +class SageMakerPipelineParameter(TypedDict, total=False): + Name: SageMakerPipelineParameterName + Value: SageMakerPipelineParameterValue + + +SageMakerPipelineParameterList = List[SageMakerPipelineParameter] + + +class SageMakerPipelineParameters(TypedDict, total=False): + PipelineParameterList: Optional[SageMakerPipelineParameterList] + + +class RetryPolicy(TypedDict, total=False): + MaximumEventAgeInSeconds: Optional[MaximumEventAgeInSeconds] + MaximumRetryAttempts: Optional[MaximumRetryAttempts] + + +class KinesisParameters(TypedDict, total=False): + PartitionKey: TargetPartitionKey + + +class EventBridgeParameters(TypedDict, total=False): + DetailType: DetailType + Source: Source + + +TagMap = Dict[TagKey, TagValue] +Tags = List[TagMap] +PlacementStrategy = TypedDict( + "PlacementStrategy", + { + "field": Optional[PlacementStrategyField], + "type": Optional[PlacementStrategyType], + }, + total=False, +) +PlacementStrategies = List[PlacementStrategy] +PlacementConstraint = TypedDict( + "PlacementConstraint", + { + "expression": Optional[PlacementConstraintExpression], + "type": Optional[PlacementConstraintType], + }, + total=False, +) +PlacementConstraints = List[PlacementConstraint] + + +class NetworkConfiguration(TypedDict, total=False): + awsvpcConfiguration: Optional[AwsVpcConfiguration] + + +class EcsParameters(TypedDict, total=False): + CapacityProviderStrategy: Optional[CapacityProviderStrategy] + EnableECSManagedTags: Optional[EnableECSManagedTags] + EnableExecuteCommand: Optional[EnableExecuteCommand] + Group: Optional[Group] + LaunchType: Optional[LaunchType] + NetworkConfiguration: Optional[NetworkConfiguration] + PlacementConstraints: Optional[PlacementConstraints] + PlacementStrategy: Optional[PlacementStrategies] + PlatformVersion: Optional[PlatformVersion] + PropagateTags: Optional[PropagateTags] + ReferenceId: Optional[ReferenceId] + Tags: Optional[Tags] + TaskCount: Optional[TaskCount] + TaskDefinitionArn: TaskDefinitionArn + + +class DeadLetterConfig(TypedDict, total=False): + Arn: Optional[DeadLetterConfigArnString] + + +class Target(TypedDict, total=False): + Arn: TargetArn + DeadLetterConfig: Optional[DeadLetterConfig] + EcsParameters: Optional[EcsParameters] + EventBridgeParameters: Optional[EventBridgeParameters] + Input: Optional[TargetInput] + KinesisParameters: Optional[KinesisParameters] + RetryPolicy: Optional[RetryPolicy] + RoleArn: RoleArn + SageMakerPipelineParameters: Optional[SageMakerPipelineParameters] + SqsParameters: Optional[SqsParameters] + + +StartDate = datetime + + +class FlexibleTimeWindow(TypedDict, total=False): + MaximumWindowInMinutes: Optional[MaximumWindowInMinutes] + Mode: FlexibleTimeWindowMode + + +EndDate = datetime + + +class CreateScheduleInput(ServiceRequest): + ActionAfterCompletion: Optional[ActionAfterCompletion] + ClientToken: Optional[ClientToken] + Description: Optional[Description] + EndDate: Optional[EndDate] + FlexibleTimeWindow: FlexibleTimeWindow + GroupName: Optional[ScheduleGroupName] + KmsKeyArn: Optional[KmsKeyArn] + Name: Name + ScheduleExpression: ScheduleExpression + ScheduleExpressionTimezone: Optional[ScheduleExpressionTimezone] + StartDate: Optional[StartDate] + State: Optional[ScheduleState] + Target: Target + + +class CreateScheduleOutput(TypedDict, total=False): + ScheduleArn: ScheduleArn + + +CreationDate = datetime + + +class DeleteScheduleGroupInput(ServiceRequest): + ClientToken: Optional[ClientToken] + Name: ScheduleGroupName + + +class DeleteScheduleGroupOutput(TypedDict, total=False): + pass + + +class DeleteScheduleInput(ServiceRequest): + ClientToken: Optional[ClientToken] + GroupName: Optional[ScheduleGroupName] + Name: Name + + +class DeleteScheduleOutput(TypedDict, total=False): + pass + + +class GetScheduleGroupInput(ServiceRequest): + Name: ScheduleGroupName + + +LastModificationDate = datetime + + +class GetScheduleGroupOutput(TypedDict, total=False): + Arn: Optional[ScheduleGroupArn] + CreationDate: Optional[CreationDate] + LastModificationDate: Optional[LastModificationDate] + Name: Optional[ScheduleGroupName] + State: Optional[ScheduleGroupState] + + +class GetScheduleInput(ServiceRequest): + GroupName: Optional[ScheduleGroupName] + Name: Name + + +class GetScheduleOutput(TypedDict, total=False): + ActionAfterCompletion: Optional[ActionAfterCompletion] + Arn: Optional[ScheduleArn] + CreationDate: Optional[CreationDate] + Description: Optional[Description] + EndDate: Optional[EndDate] + FlexibleTimeWindow: Optional[FlexibleTimeWindow] + GroupName: Optional[ScheduleGroupName] + KmsKeyArn: Optional[KmsKeyArn] + LastModificationDate: Optional[LastModificationDate] + Name: Optional[Name] + ScheduleExpression: Optional[ScheduleExpression] + ScheduleExpressionTimezone: Optional[ScheduleExpressionTimezone] + StartDate: Optional[StartDate] + State: Optional[ScheduleState] + Target: Optional[Target] + + +class ListScheduleGroupsInput(ServiceRequest): + MaxResults: Optional[MaxResults] + NamePrefix: Optional[ScheduleGroupNamePrefix] + NextToken: Optional[NextToken] + + +class ScheduleGroupSummary(TypedDict, total=False): + Arn: Optional[ScheduleGroupArn] + CreationDate: Optional[CreationDate] + LastModificationDate: Optional[LastModificationDate] + Name: Optional[ScheduleGroupName] + State: Optional[ScheduleGroupState] + + +ScheduleGroupList = List[ScheduleGroupSummary] + + +class ListScheduleGroupsOutput(TypedDict, total=False): + NextToken: Optional[NextToken] + ScheduleGroups: ScheduleGroupList + + +class ListSchedulesInput(ServiceRequest): + GroupName: Optional[ScheduleGroupName] + MaxResults: Optional[MaxResults] + NamePrefix: Optional[NamePrefix] + NextToken: Optional[NextToken] + State: Optional[ScheduleState] + + +class TargetSummary(TypedDict, total=False): + Arn: TargetArn + + +class ScheduleSummary(TypedDict, total=False): + Arn: Optional[ScheduleArn] + CreationDate: Optional[CreationDate] + GroupName: Optional[ScheduleGroupName] + LastModificationDate: Optional[LastModificationDate] + Name: Optional[Name] + State: Optional[ScheduleState] + Target: Optional[TargetSummary] + + +ScheduleList = List[ScheduleSummary] + + +class ListSchedulesOutput(TypedDict, total=False): + NextToken: Optional[NextToken] + Schedules: ScheduleList + + +class ListTagsForResourceInput(ServiceRequest): + ResourceArn: TagResourceArn + + +class ListTagsForResourceOutput(TypedDict, total=False): + Tags: Optional[TagList] + + +TagKeyList = List[TagKey] + + +class TagResourceInput(ServiceRequest): + ResourceArn: TagResourceArn + Tags: TagList + + +class TagResourceOutput(TypedDict, total=False): + pass + + +class UntagResourceInput(ServiceRequest): + ResourceArn: TagResourceArn + TagKeys: TagKeyList + + +class UntagResourceOutput(TypedDict, total=False): + pass + + +class UpdateScheduleInput(ServiceRequest): + ActionAfterCompletion: Optional[ActionAfterCompletion] + ClientToken: Optional[ClientToken] + Description: Optional[Description] + EndDate: Optional[EndDate] + FlexibleTimeWindow: FlexibleTimeWindow + GroupName: Optional[ScheduleGroupName] + KmsKeyArn: Optional[KmsKeyArn] + Name: Name + ScheduleExpression: ScheduleExpression + ScheduleExpressionTimezone: Optional[ScheduleExpressionTimezone] + StartDate: Optional[StartDate] + State: Optional[ScheduleState] + Target: Target + + +class UpdateScheduleOutput(TypedDict, total=False): + ScheduleArn: ScheduleArn + + +class SchedulerApi: + service = "scheduler" + version = "2021-06-30" + + @handler("CreateSchedule") + def create_schedule( + self, + context: RequestContext, + flexible_time_window: FlexibleTimeWindow, + name: Name, + schedule_expression: ScheduleExpression, + target: Target, + action_after_completion: ActionAfterCompletion | None = None, + client_token: ClientToken | None = None, + description: Description | None = None, + end_date: EndDate | None = None, + group_name: ScheduleGroupName | None = None, + kms_key_arn: KmsKeyArn | None = None, + schedule_expression_timezone: ScheduleExpressionTimezone | None = None, + start_date: StartDate | None = None, + state: ScheduleState | None = None, + **kwargs, + ) -> CreateScheduleOutput: + raise NotImplementedError + + @handler("CreateScheduleGroup") + def create_schedule_group( + self, + context: RequestContext, + name: ScheduleGroupName, + client_token: ClientToken | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateScheduleGroupOutput: + raise NotImplementedError + + @handler("DeleteSchedule") + def delete_schedule( + self, + context: RequestContext, + name: Name, + client_token: ClientToken | None = None, + group_name: ScheduleGroupName | None = None, + **kwargs, + ) -> DeleteScheduleOutput: + raise NotImplementedError + + @handler("DeleteScheduleGroup") + def delete_schedule_group( + self, + context: RequestContext, + name: ScheduleGroupName, + client_token: ClientToken | None = None, + **kwargs, + ) -> DeleteScheduleGroupOutput: + raise NotImplementedError + + @handler("GetSchedule") + def get_schedule( + self, + context: RequestContext, + name: Name, + group_name: ScheduleGroupName | None = None, + **kwargs, + ) -> GetScheduleOutput: + raise NotImplementedError + + @handler("GetScheduleGroup") + def get_schedule_group( + self, context: RequestContext, name: ScheduleGroupName, **kwargs + ) -> GetScheduleGroupOutput: + raise NotImplementedError + + @handler("ListScheduleGroups") + def list_schedule_groups( + self, + context: RequestContext, + max_results: MaxResults | None = None, + name_prefix: ScheduleGroupNamePrefix | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListScheduleGroupsOutput: + raise NotImplementedError + + @handler("ListSchedules") + def list_schedules( + self, + context: RequestContext, + group_name: ScheduleGroupName | None = None, + max_results: MaxResults | None = None, + name_prefix: NamePrefix | None = None, + next_token: NextToken | None = None, + state: ScheduleState | None = None, + **kwargs, + ) -> ListSchedulesOutput: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: TagResourceArn, **kwargs + ) -> ListTagsForResourceOutput: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: TagResourceArn, tags: TagList, **kwargs + ) -> TagResourceOutput: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: TagResourceArn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceOutput: + raise NotImplementedError + + @handler("UpdateSchedule") + def update_schedule( + self, + context: RequestContext, + flexible_time_window: FlexibleTimeWindow, + name: Name, + schedule_expression: ScheduleExpression, + target: Target, + action_after_completion: ActionAfterCompletion | None = None, + client_token: ClientToken | None = None, + description: Description | None = None, + end_date: EndDate | None = None, + group_name: ScheduleGroupName | None = None, + kms_key_arn: KmsKeyArn | None = None, + schedule_expression_timezone: ScheduleExpressionTimezone | None = None, + start_date: StartDate | None = None, + state: ScheduleState | None = None, + **kwargs, + ) -> UpdateScheduleOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/secretsmanager/__init__.py b/localstack-core/localstack/aws/api/secretsmanager/__init__.py new file mode 100644 index 0000000000000..7e4704d8f34ac --- /dev/null +++ b/localstack-core/localstack/aws/api/secretsmanager/__init__.py @@ -0,0 +1,801 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +BooleanType = bool +ClientRequestTokenType = str +DescriptionType = str +DurationType = str +ErrorCode = str +ErrorMessage = str +ExcludeCharactersType = str +ExcludeLowercaseType = bool +ExcludeNumbersType = bool +ExcludePunctuationType = bool +ExcludeUppercaseType = bool +FilterValueStringType = str +IncludeSpaceType = bool +KmsKeyIdType = str +MaxResultsBatchType = int +MaxResultsType = int +NameType = str +NextTokenType = str +NonEmptyResourcePolicyType = str +OwningServiceType = str +RandomPasswordType = str +RegionType = str +RequireEachIncludedTypeType = bool +RotationEnabledType = bool +RotationLambdaARNType = str +RotationTokenType = str +ScheduleExpressionType = str +SecretARNType = str +SecretIdType = str +SecretNameType = str +SecretStringType = str +SecretVersionIdType = str +SecretVersionStageType = str +StatusMessageType = str +TagKeyType = str +TagValueType = str + + +class FilterNameStringType(StrEnum): + description = "description" + name = "name" + tag_key = "tag-key" + tag_value = "tag-value" + primary_region = "primary-region" + owning_service = "owning-service" + all = "all" + + +class SortOrderType(StrEnum): + asc = "asc" + desc = "desc" + + +class StatusType(StrEnum): + InSync = "InSync" + Failed = "Failed" + InProgress = "InProgress" + + +class DecryptionFailure(ServiceException): + code: str = "DecryptionFailure" + sender_fault: bool = False + status_code: int = 400 + + +class EncryptionFailure(ServiceException): + code: str = "EncryptionFailure" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServiceError(ServiceException): + code: str = "InternalServiceError" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNextTokenException(ServiceException): + code: str = "InvalidNextTokenException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRequestException(ServiceException): + code: str = "InvalidRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class MalformedPolicyDocumentException(ServiceException): + code: str = "MalformedPolicyDocumentException" + sender_fault: bool = False + status_code: int = 400 + + +class PreconditionNotMetException(ServiceException): + code: str = "PreconditionNotMetException" + sender_fault: bool = False + status_code: int = 400 + + +class PublicPolicyException(ServiceException): + code: str = "PublicPolicyException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceExistsException(ServiceException): + code: str = "ResourceExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class APIErrorType(TypedDict, total=False): + SecretId: Optional[SecretIdType] + ErrorCode: Optional[ErrorCode] + Message: Optional[ErrorMessage] + + +APIErrorListType = List[APIErrorType] + + +class ReplicaRegionType(TypedDict, total=False): + Region: Optional[RegionType] + KmsKeyId: Optional[KmsKeyIdType] + + +AddReplicaRegionListType = List[ReplicaRegionType] +AutomaticallyRotateAfterDaysType = int +FilterValuesStringList = List[FilterValueStringType] + + +class Filter(TypedDict, total=False): + Key: Optional[FilterNameStringType] + Values: Optional[FilterValuesStringList] + + +FiltersListType = List[Filter] +SecretIdListType = List[SecretIdType] + + +class BatchGetSecretValueRequest(ServiceRequest): + SecretIdList: Optional[SecretIdListType] + Filters: Optional[FiltersListType] + MaxResults: Optional[MaxResultsBatchType] + NextToken: Optional[NextTokenType] + + +CreatedDateType = datetime +SecretVersionStagesType = List[SecretVersionStageType] +SecretBinaryType = bytes + + +class SecretValueEntry(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + SecretBinary: Optional[SecretBinaryType] + SecretString: Optional[SecretStringType] + VersionStages: Optional[SecretVersionStagesType] + CreatedDate: Optional[CreatedDateType] + + +SecretValuesType = List[SecretValueEntry] + + +class BatchGetSecretValueResponse(TypedDict, total=False): + SecretValues: Optional[SecretValuesType] + NextToken: Optional[NextTokenType] + Errors: Optional[APIErrorListType] + + +class CancelRotateSecretRequest(ServiceRequest): + SecretId: SecretIdType + + +class CancelRotateSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + + +class Tag(TypedDict, total=False): + Key: Optional[TagKeyType] + Value: Optional[TagValueType] + + +TagListType = List[Tag] + + +class CreateSecretRequest(ServiceRequest): + Name: NameType + ClientRequestToken: Optional[ClientRequestTokenType] + Description: Optional[DescriptionType] + KmsKeyId: Optional[KmsKeyIdType] + SecretBinary: Optional[SecretBinaryType] + SecretString: Optional[SecretStringType] + Tags: Optional[TagListType] + AddReplicaRegions: Optional[AddReplicaRegionListType] + ForceOverwriteReplicaSecret: Optional[BooleanType] + + +LastAccessedDateType = datetime + + +class ReplicationStatusType(TypedDict, total=False): + Region: Optional[RegionType] + KmsKeyId: Optional[KmsKeyIdType] + Status: Optional[StatusType] + StatusMessage: Optional[StatusMessageType] + LastAccessedDate: Optional[LastAccessedDateType] + + +ReplicationStatusListType = List[ReplicationStatusType] + + +class CreateSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + ReplicationStatus: Optional[ReplicationStatusListType] + + +class DeleteResourcePolicyRequest(ServiceRequest): + SecretId: SecretIdType + + +class DeleteResourcePolicyResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[NameType] + + +RecoveryWindowInDaysType = int + + +class DeleteSecretRequest(ServiceRequest): + SecretId: SecretIdType + RecoveryWindowInDays: Optional[RecoveryWindowInDaysType] + ForceDeleteWithoutRecovery: Optional[BooleanType] + + +DeletionDateType = datetime + + +class DeleteSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + DeletionDate: Optional[DeletionDateType] + + +DeletedDateType = datetime + + +class DescribeSecretRequest(ServiceRequest): + SecretId: SecretIdType + + +TimestampType = datetime +SecretVersionsToStagesMapType = Dict[SecretVersionIdType, SecretVersionStagesType] +NextRotationDateType = datetime +LastChangedDateType = datetime +LastRotatedDateType = datetime + + +class RotationRulesType(TypedDict, total=False): + AutomaticallyAfterDays: Optional[AutomaticallyRotateAfterDaysType] + Duration: Optional[DurationType] + ScheduleExpression: Optional[ScheduleExpressionType] + + +class DescribeSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + Description: Optional[DescriptionType] + KmsKeyId: Optional[KmsKeyIdType] + RotationEnabled: Optional[RotationEnabledType] + RotationLambdaARN: Optional[RotationLambdaARNType] + RotationRules: Optional[RotationRulesType] + LastRotatedDate: Optional[LastRotatedDateType] + LastChangedDate: Optional[LastChangedDateType] + LastAccessedDate: Optional[LastAccessedDateType] + DeletedDate: Optional[DeletedDateType] + NextRotationDate: Optional[NextRotationDateType] + Tags: Optional[TagListType] + VersionIdsToStages: Optional[SecretVersionsToStagesMapType] + OwningService: Optional[OwningServiceType] + CreatedDate: Optional[TimestampType] + PrimaryRegion: Optional[RegionType] + ReplicationStatus: Optional[ReplicationStatusListType] + + +PasswordLengthType = int + + +class GetRandomPasswordRequest(ServiceRequest): + PasswordLength: Optional[PasswordLengthType] + ExcludeCharacters: Optional[ExcludeCharactersType] + ExcludeNumbers: Optional[ExcludeNumbersType] + ExcludePunctuation: Optional[ExcludePunctuationType] + ExcludeUppercase: Optional[ExcludeUppercaseType] + ExcludeLowercase: Optional[ExcludeLowercaseType] + IncludeSpace: Optional[IncludeSpaceType] + RequireEachIncludedType: Optional[RequireEachIncludedTypeType] + + +class GetRandomPasswordResponse(TypedDict, total=False): + RandomPassword: Optional[RandomPasswordType] + + +class GetResourcePolicyRequest(ServiceRequest): + SecretId: SecretIdType + + +class GetResourcePolicyResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[NameType] + ResourcePolicy: Optional[NonEmptyResourcePolicyType] + + +class GetSecretValueRequest(ServiceRequest): + SecretId: SecretIdType + VersionId: Optional[SecretVersionIdType] + VersionStage: Optional[SecretVersionStageType] + + +class GetSecretValueResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + SecretBinary: Optional[SecretBinaryType] + SecretString: Optional[SecretStringType] + VersionStages: Optional[SecretVersionStagesType] + CreatedDate: Optional[CreatedDateType] + + +KmsKeyIdListType = List[KmsKeyIdType] + + +class ListSecretVersionIdsRequest(ServiceRequest): + SecretId: SecretIdType + MaxResults: Optional[MaxResultsType] + NextToken: Optional[NextTokenType] + IncludeDeprecated: Optional[BooleanType] + + +class SecretVersionsListEntry(TypedDict, total=False): + VersionId: Optional[SecretVersionIdType] + VersionStages: Optional[SecretVersionStagesType] + LastAccessedDate: Optional[LastAccessedDateType] + CreatedDate: Optional[CreatedDateType] + KmsKeyIds: Optional[KmsKeyIdListType] + + +SecretVersionsListType = List[SecretVersionsListEntry] + + +class ListSecretVersionIdsResponse(TypedDict, total=False): + Versions: Optional[SecretVersionsListType] + NextToken: Optional[NextTokenType] + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + + +class ListSecretsRequest(ServiceRequest): + IncludePlannedDeletion: Optional[BooleanType] + MaxResults: Optional[MaxResultsType] + NextToken: Optional[NextTokenType] + Filters: Optional[FiltersListType] + SortOrder: Optional[SortOrderType] + + +class SecretListEntry(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + Description: Optional[DescriptionType] + KmsKeyId: Optional[KmsKeyIdType] + RotationEnabled: Optional[RotationEnabledType] + RotationLambdaARN: Optional[RotationLambdaARNType] + RotationRules: Optional[RotationRulesType] + LastRotatedDate: Optional[LastRotatedDateType] + LastChangedDate: Optional[LastChangedDateType] + LastAccessedDate: Optional[LastAccessedDateType] + DeletedDate: Optional[DeletedDateType] + NextRotationDate: Optional[NextRotationDateType] + Tags: Optional[TagListType] + SecretVersionsToStages: Optional[SecretVersionsToStagesMapType] + OwningService: Optional[OwningServiceType] + CreatedDate: Optional[TimestampType] + PrimaryRegion: Optional[RegionType] + + +SecretListType = List[SecretListEntry] + + +class ListSecretsResponse(TypedDict, total=False): + SecretList: Optional[SecretListType] + NextToken: Optional[NextTokenType] + + +class PutResourcePolicyRequest(ServiceRequest): + SecretId: SecretIdType + ResourcePolicy: NonEmptyResourcePolicyType + BlockPublicPolicy: Optional[BooleanType] + + +class PutResourcePolicyResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[NameType] + + +class PutSecretValueRequest(ServiceRequest): + SecretId: SecretIdType + ClientRequestToken: Optional[ClientRequestTokenType] + SecretBinary: Optional[SecretBinaryType] + SecretString: Optional[SecretStringType] + VersionStages: Optional[SecretVersionStagesType] + RotationToken: Optional[RotationTokenType] + + +class PutSecretValueResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + VersionStages: Optional[SecretVersionStagesType] + + +RemoveReplicaRegionListType = List[RegionType] + + +class RemoveRegionsFromReplicationRequest(ServiceRequest): + SecretId: SecretIdType + RemoveReplicaRegions: RemoveReplicaRegionListType + + +class RemoveRegionsFromReplicationResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + ReplicationStatus: Optional[ReplicationStatusListType] + + +class ReplicateSecretToRegionsRequest(ServiceRequest): + SecretId: SecretIdType + AddReplicaRegions: AddReplicaRegionListType + ForceOverwriteReplicaSecret: Optional[BooleanType] + + +class ReplicateSecretToRegionsResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + ReplicationStatus: Optional[ReplicationStatusListType] + + +class RestoreSecretRequest(ServiceRequest): + SecretId: SecretIdType + + +class RestoreSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + + +class RotateSecretRequest(ServiceRequest): + SecretId: SecretIdType + ClientRequestToken: Optional[ClientRequestTokenType] + RotationLambdaARN: Optional[RotationLambdaARNType] + RotationRules: Optional[RotationRulesType] + RotateImmediately: Optional[BooleanType] + + +class RotateSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + + +class StopReplicationToReplicaRequest(ServiceRequest): + SecretId: SecretIdType + + +class StopReplicationToReplicaResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + + +TagKeyListType = List[TagKeyType] + + +class TagResourceRequest(ServiceRequest): + SecretId: SecretIdType + Tags: TagListType + + +class UntagResourceRequest(ServiceRequest): + SecretId: SecretIdType + TagKeys: TagKeyListType + + +class UpdateSecretRequest(ServiceRequest): + SecretId: SecretIdType + ClientRequestToken: Optional[ClientRequestTokenType] + Description: Optional[DescriptionType] + KmsKeyId: Optional[KmsKeyIdType] + SecretBinary: Optional[SecretBinaryType] + SecretString: Optional[SecretStringType] + + +class UpdateSecretResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + VersionId: Optional[SecretVersionIdType] + + +class UpdateSecretVersionStageRequest(ServiceRequest): + SecretId: SecretIdType + VersionStage: SecretVersionStageType + RemoveFromVersionId: Optional[SecretVersionIdType] + MoveToVersionId: Optional[SecretVersionIdType] + + +class UpdateSecretVersionStageResponse(TypedDict, total=False): + ARN: Optional[SecretARNType] + Name: Optional[SecretNameType] + + +class ValidateResourcePolicyRequest(ServiceRequest): + SecretId: Optional[SecretIdType] + ResourcePolicy: NonEmptyResourcePolicyType + + +class ValidationErrorsEntry(TypedDict, total=False): + CheckName: Optional[NameType] + ErrorMessage: Optional[ErrorMessage] + + +ValidationErrorsType = List[ValidationErrorsEntry] + + +class ValidateResourcePolicyResponse(TypedDict, total=False): + PolicyValidationPassed: Optional[BooleanType] + ValidationErrors: Optional[ValidationErrorsType] + + +class SecretsmanagerApi: + service = "secretsmanager" + version = "2017-10-17" + + @handler("BatchGetSecretValue") + def batch_get_secret_value( + self, + context: RequestContext, + secret_id_list: SecretIdListType | None = None, + filters: FiltersListType | None = None, + max_results: MaxResultsBatchType | None = None, + next_token: NextTokenType | None = None, + **kwargs, + ) -> BatchGetSecretValueResponse: + raise NotImplementedError + + @handler("CancelRotateSecret") + def cancel_rotate_secret( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> CancelRotateSecretResponse: + raise NotImplementedError + + @handler("CreateSecret") + def create_secret( + self, + context: RequestContext, + name: NameType, + client_request_token: ClientRequestTokenType | None = None, + description: DescriptionType | None = None, + kms_key_id: KmsKeyIdType | None = None, + secret_binary: SecretBinaryType | None = None, + secret_string: SecretStringType | None = None, + tags: TagListType | None = None, + add_replica_regions: AddReplicaRegionListType | None = None, + force_overwrite_replica_secret: BooleanType | None = None, + **kwargs, + ) -> CreateSecretResponse: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> DeleteResourcePolicyResponse: + raise NotImplementedError + + @handler("DeleteSecret") + def delete_secret( + self, + context: RequestContext, + secret_id: SecretIdType, + recovery_window_in_days: RecoveryWindowInDaysType | None = None, + force_delete_without_recovery: BooleanType | None = None, + **kwargs, + ) -> DeleteSecretResponse: + raise NotImplementedError + + @handler("DescribeSecret") + def describe_secret( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> DescribeSecretResponse: + raise NotImplementedError + + @handler("GetRandomPassword") + def get_random_password( + self, + context: RequestContext, + password_length: PasswordLengthType | None = None, + exclude_characters: ExcludeCharactersType | None = None, + exclude_numbers: ExcludeNumbersType | None = None, + exclude_punctuation: ExcludePunctuationType | None = None, + exclude_uppercase: ExcludeUppercaseType | None = None, + exclude_lowercase: ExcludeLowercaseType | None = None, + include_space: IncludeSpaceType | None = None, + require_each_included_type: RequireEachIncludedTypeType | None = None, + **kwargs, + ) -> GetRandomPasswordResponse: + raise NotImplementedError + + @handler("GetResourcePolicy") + def get_resource_policy( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> GetResourcePolicyResponse: + raise NotImplementedError + + @handler("GetSecretValue") + def get_secret_value( + self, + context: RequestContext, + secret_id: SecretIdType, + version_id: SecretVersionIdType | None = None, + version_stage: SecretVersionStageType | None = None, + **kwargs, + ) -> GetSecretValueResponse: + raise NotImplementedError + + @handler("ListSecretVersionIds") + def list_secret_version_ids( + self, + context: RequestContext, + secret_id: SecretIdType, + max_results: MaxResultsType | None = None, + next_token: NextTokenType | None = None, + include_deprecated: BooleanType | None = None, + **kwargs, + ) -> ListSecretVersionIdsResponse: + raise NotImplementedError + + @handler("ListSecrets") + def list_secrets( + self, + context: RequestContext, + include_planned_deletion: BooleanType | None = None, + max_results: MaxResultsType | None = None, + next_token: NextTokenType | None = None, + filters: FiltersListType | None = None, + sort_order: SortOrderType | None = None, + **kwargs, + ) -> ListSecretsResponse: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, + context: RequestContext, + secret_id: SecretIdType, + resource_policy: NonEmptyResourcePolicyType, + block_public_policy: BooleanType | None = None, + **kwargs, + ) -> PutResourcePolicyResponse: + raise NotImplementedError + + @handler("PutSecretValue") + def put_secret_value( + self, + context: RequestContext, + secret_id: SecretIdType, + client_request_token: ClientRequestTokenType | None = None, + secret_binary: SecretBinaryType | None = None, + secret_string: SecretStringType | None = None, + version_stages: SecretVersionStagesType | None = None, + rotation_token: RotationTokenType | None = None, + **kwargs, + ) -> PutSecretValueResponse: + raise NotImplementedError + + @handler("RemoveRegionsFromReplication") + def remove_regions_from_replication( + self, + context: RequestContext, + secret_id: SecretIdType, + remove_replica_regions: RemoveReplicaRegionListType, + **kwargs, + ) -> RemoveRegionsFromReplicationResponse: + raise NotImplementedError + + @handler("ReplicateSecretToRegions") + def replicate_secret_to_regions( + self, + context: RequestContext, + secret_id: SecretIdType, + add_replica_regions: AddReplicaRegionListType, + force_overwrite_replica_secret: BooleanType | None = None, + **kwargs, + ) -> ReplicateSecretToRegionsResponse: + raise NotImplementedError + + @handler("RestoreSecret") + def restore_secret( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> RestoreSecretResponse: + raise NotImplementedError + + @handler("RotateSecret") + def rotate_secret( + self, + context: RequestContext, + secret_id: SecretIdType, + client_request_token: ClientRequestTokenType | None = None, + rotation_lambda_arn: RotationLambdaARNType | None = None, + rotation_rules: RotationRulesType | None = None, + rotate_immediately: BooleanType | None = None, + **kwargs, + ) -> RotateSecretResponse: + raise NotImplementedError + + @handler("StopReplicationToReplica") + def stop_replication_to_replica( + self, context: RequestContext, secret_id: SecretIdType, **kwargs + ) -> StopReplicationToReplicaResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, secret_id: SecretIdType, tags: TagListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, secret_id: SecretIdType, tag_keys: TagKeyListType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateSecret") + def update_secret( + self, + context: RequestContext, + secret_id: SecretIdType, + client_request_token: ClientRequestTokenType | None = None, + description: DescriptionType | None = None, + kms_key_id: KmsKeyIdType | None = None, + secret_binary: SecretBinaryType | None = None, + secret_string: SecretStringType | None = None, + **kwargs, + ) -> UpdateSecretResponse: + raise NotImplementedError + + @handler("UpdateSecretVersionStage") + def update_secret_version_stage( + self, + context: RequestContext, + secret_id: SecretIdType, + version_stage: SecretVersionStageType, + remove_from_version_id: SecretVersionIdType | None = None, + move_to_version_id: SecretVersionIdType | None = None, + **kwargs, + ) -> UpdateSecretVersionStageResponse: + raise NotImplementedError + + @handler("ValidateResourcePolicy") + def validate_resource_policy( + self, + context: RequestContext, + resource_policy: NonEmptyResourcePolicyType, + secret_id: SecretIdType | None = None, + **kwargs, + ) -> ValidateResourcePolicyResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/ses/__init__.py b/localstack-core/localstack/aws/api/ses/__init__.py new file mode 100644 index 0000000000000..26e3b38f45cf1 --- /dev/null +++ b/localstack-core/localstack/aws/api/ses/__init__.py @@ -0,0 +1,1991 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Address = str +AmazonResourceName = str +BounceMessage = str +BounceSmtpReplyCode = str +BounceStatusCode = str +Charset = str +Cidr = str +ConfigurationSetName = str +ConnectInstanceArn = str +CustomRedirectDomain = str +DefaultDimensionValue = str +DiagnosticCode = str +DimensionName = str +Domain = str +DsnStatus = str +Enabled = bool +Error = str +EventDestinationName = str +Explanation = str +ExtensionFieldName = str +ExtensionFieldValue = str +FailureRedirectionURL = str +FromAddress = str +HeaderName = str +HeaderValue = str +HtmlPart = str +IAMRoleARN = str +Identity = str +MailFromDomainName = str +Max24HourSend = float +MaxItems = int +MaxResults = int +MaxSendRate = float +MessageData = str +MessageId = str +MessageTagName = str +MessageTagValue = str +NextToken = str +NotificationTopic = str +Policy = str +PolicyName = str +ReceiptFilterName = str +ReceiptRuleName = str +ReceiptRuleSetName = str +Recipient = str +RemoteMta = str +RenderedTemplate = str +ReportingMta = str +RuleOrRuleSetName = str +S3BucketName = str +S3KeyPrefix = str +SentLast24Hours = float +Subject = str +SubjectPart = str +SuccessRedirectionURL = str +TemplateContent = str +TemplateData = str +TemplateName = str +TextPart = str +VerificationToken = str + + +class BehaviorOnMXFailure(StrEnum): + UseDefaultValue = "UseDefaultValue" + RejectMessage = "RejectMessage" + + +class BounceType(StrEnum): + DoesNotExist = "DoesNotExist" + MessageTooLarge = "MessageTooLarge" + ExceededQuota = "ExceededQuota" + ContentRejected = "ContentRejected" + Undefined = "Undefined" + TemporaryFailure = "TemporaryFailure" + + +class BulkEmailStatus(StrEnum): + Success = "Success" + MessageRejected = "MessageRejected" + MailFromDomainNotVerified = "MailFromDomainNotVerified" + ConfigurationSetDoesNotExist = "ConfigurationSetDoesNotExist" + TemplateDoesNotExist = "TemplateDoesNotExist" + AccountSuspended = "AccountSuspended" + AccountThrottled = "AccountThrottled" + AccountDailyQuotaExceeded = "AccountDailyQuotaExceeded" + InvalidSendingPoolName = "InvalidSendingPoolName" + AccountSendingPaused = "AccountSendingPaused" + ConfigurationSetSendingPaused = "ConfigurationSetSendingPaused" + InvalidParameterValue = "InvalidParameterValue" + TransientFailure = "TransientFailure" + Failed = "Failed" + + +class ConfigurationSetAttribute(StrEnum): + eventDestinations = "eventDestinations" + trackingOptions = "trackingOptions" + deliveryOptions = "deliveryOptions" + reputationOptions = "reputationOptions" + + +class CustomMailFromStatus(StrEnum): + Pending = "Pending" + Success = "Success" + Failed = "Failed" + TemporaryFailure = "TemporaryFailure" + + +class DimensionValueSource(StrEnum): + messageTag = "messageTag" + emailHeader = "emailHeader" + linkTag = "linkTag" + + +class DsnAction(StrEnum): + failed = "failed" + delayed = "delayed" + delivered = "delivered" + relayed = "relayed" + expanded = "expanded" + + +class EventType(StrEnum): + send = "send" + reject = "reject" + bounce = "bounce" + complaint = "complaint" + delivery = "delivery" + open = "open" + click = "click" + renderingFailure = "renderingFailure" + + +class IdentityType(StrEnum): + EmailAddress = "EmailAddress" + Domain = "Domain" + + +class InvocationType(StrEnum): + Event = "Event" + RequestResponse = "RequestResponse" + + +class NotificationType(StrEnum): + Bounce = "Bounce" + Complaint = "Complaint" + Delivery = "Delivery" + + +class ReceiptFilterPolicy(StrEnum): + Block = "Block" + Allow = "Allow" + + +class SNSActionEncoding(StrEnum): + UTF_8 = "UTF-8" + Base64 = "Base64" + + +class StopScope(StrEnum): + RuleSet = "RuleSet" + + +class TlsPolicy(StrEnum): + Require = "Require" + Optional_ = "Optional" + + +class VerificationStatus(StrEnum): + Pending = "Pending" + Success = "Success" + Failed = "Failed" + TemporaryFailure = "TemporaryFailure" + NotStarted = "NotStarted" + + +class AccountSendingPausedException(ServiceException): + code: str = "AccountSendingPausedException" + sender_fault: bool = True + status_code: int = 400 + + +class AlreadyExistsException(ServiceException): + code: str = "AlreadyExists" + sender_fault: bool = True + status_code: int = 400 + Name: Optional[RuleOrRuleSetName] + + +class CannotDeleteException(ServiceException): + code: str = "CannotDelete" + sender_fault: bool = True + status_code: int = 400 + Name: Optional[RuleOrRuleSetName] + + +class ConfigurationSetAlreadyExistsException(ServiceException): + code: str = "ConfigurationSetAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + + +class ConfigurationSetDoesNotExistException(ServiceException): + code: str = "ConfigurationSetDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + + +class ConfigurationSetSendingPausedException(ServiceException): + code: str = "ConfigurationSetSendingPausedException" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + + +class CustomVerificationEmailInvalidContentException(ServiceException): + code: str = "CustomVerificationEmailInvalidContent" + sender_fault: bool = True + status_code: int = 400 + + +class CustomVerificationEmailTemplateAlreadyExistsException(ServiceException): + code: str = "CustomVerificationEmailTemplateAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + CustomVerificationEmailTemplateName: Optional[TemplateName] + + +class CustomVerificationEmailTemplateDoesNotExistException(ServiceException): + code: str = "CustomVerificationEmailTemplateDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + CustomVerificationEmailTemplateName: Optional[TemplateName] + + +class EventDestinationAlreadyExistsException(ServiceException): + code: str = "EventDestinationAlreadyExists" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + EventDestinationName: Optional[EventDestinationName] + + +class EventDestinationDoesNotExistException(ServiceException): + code: str = "EventDestinationDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + EventDestinationName: Optional[EventDestinationName] + + +class FromEmailAddressNotVerifiedException(ServiceException): + code: str = "FromEmailAddressNotVerified" + sender_fault: bool = True + status_code: int = 400 + FromEmailAddress: Optional[FromAddress] + + +class InvalidCloudWatchDestinationException(ServiceException): + code: str = "InvalidCloudWatchDestination" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + EventDestinationName: Optional[EventDestinationName] + + +class InvalidConfigurationSetException(ServiceException): + code: str = "InvalidConfigurationSet" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidDeliveryOptionsException(ServiceException): + code: str = "InvalidDeliveryOptions" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidFirehoseDestinationException(ServiceException): + code: str = "InvalidFirehoseDestination" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + EventDestinationName: Optional[EventDestinationName] + + +class InvalidLambdaFunctionException(ServiceException): + code: str = "InvalidLambdaFunction" + sender_fault: bool = True + status_code: int = 400 + FunctionArn: Optional[AmazonResourceName] + + +class InvalidPolicyException(ServiceException): + code: str = "InvalidPolicy" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidRenderingParameterException(ServiceException): + code: str = "InvalidRenderingParameter" + sender_fault: bool = True + status_code: int = 400 + TemplateName: Optional[TemplateName] + + +class InvalidS3ConfigurationException(ServiceException): + code: str = "InvalidS3Configuration" + sender_fault: bool = True + status_code: int = 400 + Bucket: Optional[S3BucketName] + + +class InvalidSNSDestinationException(ServiceException): + code: str = "InvalidSNSDestination" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + EventDestinationName: Optional[EventDestinationName] + + +class InvalidSnsTopicException(ServiceException): + code: str = "InvalidSnsTopic" + sender_fault: bool = True + status_code: int = 400 + Topic: Optional[AmazonResourceName] + + +class InvalidTemplateException(ServiceException): + code: str = "InvalidTemplate" + sender_fault: bool = True + status_code: int = 400 + TemplateName: Optional[TemplateName] + + +class InvalidTrackingOptionsException(ServiceException): + code: str = "InvalidTrackingOptions" + sender_fault: bool = True + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class MailFromDomainNotVerifiedException(ServiceException): + code: str = "MailFromDomainNotVerifiedException" + sender_fault: bool = True + status_code: int = 400 + + +class MessageRejected(ServiceException): + code: str = "MessageRejected" + sender_fault: bool = True + status_code: int = 400 + + +class MissingRenderingAttributeException(ServiceException): + code: str = "MissingRenderingAttribute" + sender_fault: bool = True + status_code: int = 400 + TemplateName: Optional[TemplateName] + + +class ProductionAccessNotGrantedException(ServiceException): + code: str = "ProductionAccessNotGranted" + sender_fault: bool = True + status_code: int = 400 + + +class RuleDoesNotExistException(ServiceException): + code: str = "RuleDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + Name: Optional[RuleOrRuleSetName] + + +class RuleSetDoesNotExistException(ServiceException): + code: str = "RuleSetDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + Name: Optional[RuleOrRuleSetName] + + +class TemplateDoesNotExistException(ServiceException): + code: str = "TemplateDoesNotExist" + sender_fault: bool = True + status_code: int = 400 + TemplateName: Optional[TemplateName] + + +class TrackingOptionsAlreadyExistsException(ServiceException): + code: str = "TrackingOptionsAlreadyExistsException" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + + +class TrackingOptionsDoesNotExistException(ServiceException): + code: str = "TrackingOptionsDoesNotExistException" + sender_fault: bool = True + status_code: int = 400 + ConfigurationSetName: Optional[ConfigurationSetName] + + +class AddHeaderAction(TypedDict, total=False): + HeaderName: HeaderName + HeaderValue: HeaderValue + + +AddressList = List[Address] +ArrivalDate = datetime + + +class Content(TypedDict, total=False): + Data: MessageData + Charset: Optional[Charset] + + +class Body(TypedDict, total=False): + Text: Optional[Content] + Html: Optional[Content] + + +class BounceAction(TypedDict, total=False): + TopicArn: Optional[AmazonResourceName] + SmtpReplyCode: BounceSmtpReplyCode + StatusCode: Optional[BounceStatusCode] + Message: BounceMessage + Sender: Address + + +class ExtensionField(TypedDict, total=False): + Name: ExtensionFieldName + Value: ExtensionFieldValue + + +ExtensionFieldList = List[ExtensionField] +LastAttemptDate = datetime + + +class RecipientDsnFields(TypedDict, total=False): + FinalRecipient: Optional[Address] + Action: DsnAction + RemoteMta: Optional[RemoteMta] + Status: DsnStatus + DiagnosticCode: Optional[DiagnosticCode] + LastAttemptDate: Optional[LastAttemptDate] + ExtensionFields: Optional[ExtensionFieldList] + + +class BouncedRecipientInfo(TypedDict, total=False): + Recipient: Address + RecipientArn: Optional[AmazonResourceName] + BounceType: Optional[BounceType] + RecipientDsnFields: Optional[RecipientDsnFields] + + +BouncedRecipientInfoList = List[BouncedRecipientInfo] + + +class MessageTag(TypedDict, total=False): + Name: MessageTagName + Value: MessageTagValue + + +MessageTagList = List[MessageTag] + + +class Destination(TypedDict, total=False): + ToAddresses: Optional[AddressList] + CcAddresses: Optional[AddressList] + BccAddresses: Optional[AddressList] + + +class BulkEmailDestination(TypedDict, total=False): + Destination: Destination + ReplacementTags: Optional[MessageTagList] + ReplacementTemplateData: Optional[TemplateData] + + +BulkEmailDestinationList = List[BulkEmailDestination] + + +class BulkEmailDestinationStatus(TypedDict, total=False): + Status: Optional[BulkEmailStatus] + Error: Optional[Error] + MessageId: Optional[MessageId] + + +BulkEmailDestinationStatusList = List[BulkEmailDestinationStatus] + + +class CloneReceiptRuleSetRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + OriginalRuleSetName: ReceiptRuleSetName + + +class CloneReceiptRuleSetResponse(TypedDict, total=False): + pass + + +class CloudWatchDimensionConfiguration(TypedDict, total=False): + DimensionName: DimensionName + DimensionValueSource: DimensionValueSource + DefaultDimensionValue: DefaultDimensionValue + + +CloudWatchDimensionConfigurations = List[CloudWatchDimensionConfiguration] + + +class CloudWatchDestination(TypedDict, total=False): + DimensionConfigurations: CloudWatchDimensionConfigurations + + +class ConfigurationSet(TypedDict, total=False): + Name: ConfigurationSetName + + +ConfigurationSetAttributeList = List[ConfigurationSetAttribute] +ConfigurationSets = List[ConfigurationSet] + + +class ConnectAction(TypedDict, total=False): + InstanceARN: ConnectInstanceArn + IAMRoleARN: IAMRoleARN + + +Counter = int + + +class SNSDestination(TypedDict, total=False): + TopicARN: AmazonResourceName + + +class KinesisFirehoseDestination(TypedDict, total=False): + IAMRoleARN: AmazonResourceName + DeliveryStreamARN: AmazonResourceName + + +EventTypes = List[EventType] + + +class EventDestination(TypedDict, total=False): + Name: EventDestinationName + Enabled: Optional[Enabled] + MatchingEventTypes: EventTypes + KinesisFirehoseDestination: Optional[KinesisFirehoseDestination] + CloudWatchDestination: Optional[CloudWatchDestination] + SNSDestination: Optional[SNSDestination] + + +class CreateConfigurationSetEventDestinationRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + EventDestination: EventDestination + + +class CreateConfigurationSetEventDestinationResponse(TypedDict, total=False): + pass + + +class CreateConfigurationSetRequest(ServiceRequest): + ConfigurationSet: ConfigurationSet + + +class CreateConfigurationSetResponse(TypedDict, total=False): + pass + + +class TrackingOptions(TypedDict, total=False): + CustomRedirectDomain: Optional[CustomRedirectDomain] + + +class CreateConfigurationSetTrackingOptionsRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + TrackingOptions: TrackingOptions + + +class CreateConfigurationSetTrackingOptionsResponse(TypedDict, total=False): + pass + + +class CreateCustomVerificationEmailTemplateRequest(ServiceRequest): + TemplateName: TemplateName + FromEmailAddress: FromAddress + TemplateSubject: Subject + TemplateContent: TemplateContent + SuccessRedirectionURL: SuccessRedirectionURL + FailureRedirectionURL: FailureRedirectionURL + + +class ReceiptIpFilter(TypedDict, total=False): + Policy: ReceiptFilterPolicy + Cidr: Cidr + + +class ReceiptFilter(TypedDict, total=False): + Name: ReceiptFilterName + IpFilter: ReceiptIpFilter + + +class CreateReceiptFilterRequest(ServiceRequest): + Filter: ReceiptFilter + + +class CreateReceiptFilterResponse(TypedDict, total=False): + pass + + +class SNSAction(TypedDict, total=False): + TopicArn: AmazonResourceName + Encoding: Optional[SNSActionEncoding] + + +class StopAction(TypedDict, total=False): + Scope: StopScope + TopicArn: Optional[AmazonResourceName] + + +class LambdaAction(TypedDict, total=False): + TopicArn: Optional[AmazonResourceName] + FunctionArn: AmazonResourceName + InvocationType: Optional[InvocationType] + + +class WorkmailAction(TypedDict, total=False): + TopicArn: Optional[AmazonResourceName] + OrganizationArn: AmazonResourceName + + +class S3Action(TypedDict, total=False): + TopicArn: Optional[AmazonResourceName] + BucketName: S3BucketName + ObjectKeyPrefix: Optional[S3KeyPrefix] + KmsKeyArn: Optional[AmazonResourceName] + IamRoleArn: Optional[IAMRoleARN] + + +class ReceiptAction(TypedDict, total=False): + S3Action: Optional[S3Action] + BounceAction: Optional[BounceAction] + WorkmailAction: Optional[WorkmailAction] + LambdaAction: Optional[LambdaAction] + StopAction: Optional[StopAction] + AddHeaderAction: Optional[AddHeaderAction] + SNSAction: Optional[SNSAction] + ConnectAction: Optional[ConnectAction] + + +ReceiptActionsList = List[ReceiptAction] +RecipientsList = List[Recipient] + + +class ReceiptRule(TypedDict, total=False): + Name: ReceiptRuleName + Enabled: Optional[Enabled] + TlsPolicy: Optional[TlsPolicy] + Recipients: Optional[RecipientsList] + Actions: Optional[ReceiptActionsList] + ScanEnabled: Optional[Enabled] + + +class CreateReceiptRuleRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + After: Optional[ReceiptRuleName] + Rule: ReceiptRule + + +class CreateReceiptRuleResponse(TypedDict, total=False): + pass + + +class CreateReceiptRuleSetRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + + +class CreateReceiptRuleSetResponse(TypedDict, total=False): + pass + + +class Template(TypedDict, total=False): + TemplateName: TemplateName + SubjectPart: Optional[SubjectPart] + TextPart: Optional[TextPart] + HtmlPart: Optional[HtmlPart] + + +class CreateTemplateRequest(ServiceRequest): + Template: Template + + +class CreateTemplateResponse(TypedDict, total=False): + pass + + +class CustomVerificationEmailTemplate(TypedDict, total=False): + TemplateName: Optional[TemplateName] + FromEmailAddress: Optional[FromAddress] + TemplateSubject: Optional[Subject] + SuccessRedirectionURL: Optional[SuccessRedirectionURL] + FailureRedirectionURL: Optional[FailureRedirectionURL] + + +CustomVerificationEmailTemplates = List[CustomVerificationEmailTemplate] + + +class DeleteConfigurationSetEventDestinationRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + EventDestinationName: EventDestinationName + + +class DeleteConfigurationSetEventDestinationResponse(TypedDict, total=False): + pass + + +class DeleteConfigurationSetRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + + +class DeleteConfigurationSetResponse(TypedDict, total=False): + pass + + +class DeleteConfigurationSetTrackingOptionsRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + + +class DeleteConfigurationSetTrackingOptionsResponse(TypedDict, total=False): + pass + + +class DeleteCustomVerificationEmailTemplateRequest(ServiceRequest): + TemplateName: TemplateName + + +class DeleteIdentityPolicyRequest(ServiceRequest): + Identity: Identity + PolicyName: PolicyName + + +class DeleteIdentityPolicyResponse(TypedDict, total=False): + pass + + +class DeleteIdentityRequest(ServiceRequest): + Identity: Identity + + +class DeleteIdentityResponse(TypedDict, total=False): + pass + + +class DeleteReceiptFilterRequest(ServiceRequest): + FilterName: ReceiptFilterName + + +class DeleteReceiptFilterResponse(TypedDict, total=False): + pass + + +class DeleteReceiptRuleRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + RuleName: ReceiptRuleName + + +class DeleteReceiptRuleResponse(TypedDict, total=False): + pass + + +class DeleteReceiptRuleSetRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + + +class DeleteReceiptRuleSetResponse(TypedDict, total=False): + pass + + +class DeleteTemplateRequest(ServiceRequest): + TemplateName: TemplateName + + +class DeleteTemplateResponse(TypedDict, total=False): + pass + + +class DeleteVerifiedEmailAddressRequest(ServiceRequest): + EmailAddress: Address + + +class DeliveryOptions(TypedDict, total=False): + TlsPolicy: Optional[TlsPolicy] + + +class DescribeActiveReceiptRuleSetRequest(ServiceRequest): + pass + + +ReceiptRulesList = List[ReceiptRule] +Timestamp = datetime + + +class ReceiptRuleSetMetadata(TypedDict, total=False): + Name: Optional[ReceiptRuleSetName] + CreatedTimestamp: Optional[Timestamp] + + +class DescribeActiveReceiptRuleSetResponse(TypedDict, total=False): + Metadata: Optional[ReceiptRuleSetMetadata] + Rules: Optional[ReceiptRulesList] + + +class DescribeConfigurationSetRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + ConfigurationSetAttributeNames: Optional[ConfigurationSetAttributeList] + + +LastFreshStart = datetime + + +class ReputationOptions(TypedDict, total=False): + SendingEnabled: Optional[Enabled] + ReputationMetricsEnabled: Optional[Enabled] + LastFreshStart: Optional[LastFreshStart] + + +EventDestinations = List[EventDestination] + + +class DescribeConfigurationSetResponse(TypedDict, total=False): + ConfigurationSet: Optional[ConfigurationSet] + EventDestinations: Optional[EventDestinations] + TrackingOptions: Optional[TrackingOptions] + DeliveryOptions: Optional[DeliveryOptions] + ReputationOptions: Optional[ReputationOptions] + + +class DescribeReceiptRuleRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + RuleName: ReceiptRuleName + + +class DescribeReceiptRuleResponse(TypedDict, total=False): + Rule: Optional[ReceiptRule] + + +class DescribeReceiptRuleSetRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + + +class DescribeReceiptRuleSetResponse(TypedDict, total=False): + Metadata: Optional[ReceiptRuleSetMetadata] + Rules: Optional[ReceiptRulesList] + + +VerificationTokenList = List[VerificationToken] + + +class IdentityDkimAttributes(TypedDict, total=False): + DkimEnabled: Enabled + DkimVerificationStatus: VerificationStatus + DkimTokens: Optional[VerificationTokenList] + + +DkimAttributes = Dict[Identity, IdentityDkimAttributes] + + +class GetAccountSendingEnabledResponse(TypedDict, total=False): + Enabled: Optional[Enabled] + + +class GetCustomVerificationEmailTemplateRequest(ServiceRequest): + TemplateName: TemplateName + + +class GetCustomVerificationEmailTemplateResponse(TypedDict, total=False): + TemplateName: Optional[TemplateName] + FromEmailAddress: Optional[FromAddress] + TemplateSubject: Optional[Subject] + TemplateContent: Optional[TemplateContent] + SuccessRedirectionURL: Optional[SuccessRedirectionURL] + FailureRedirectionURL: Optional[FailureRedirectionURL] + + +IdentityList = List[Identity] + + +class GetIdentityDkimAttributesRequest(ServiceRequest): + Identities: IdentityList + + +class GetIdentityDkimAttributesResponse(TypedDict, total=False): + DkimAttributes: DkimAttributes + + +class GetIdentityMailFromDomainAttributesRequest(ServiceRequest): + Identities: IdentityList + + +class IdentityMailFromDomainAttributes(TypedDict, total=False): + MailFromDomain: MailFromDomainName + MailFromDomainStatus: CustomMailFromStatus + BehaviorOnMXFailure: BehaviorOnMXFailure + + +MailFromDomainAttributes = Dict[Identity, IdentityMailFromDomainAttributes] + + +class GetIdentityMailFromDomainAttributesResponse(TypedDict, total=False): + MailFromDomainAttributes: MailFromDomainAttributes + + +class GetIdentityNotificationAttributesRequest(ServiceRequest): + Identities: IdentityList + + +class IdentityNotificationAttributes(TypedDict, total=False): + BounceTopic: NotificationTopic + ComplaintTopic: NotificationTopic + DeliveryTopic: NotificationTopic + ForwardingEnabled: Enabled + HeadersInBounceNotificationsEnabled: Optional[Enabled] + HeadersInComplaintNotificationsEnabled: Optional[Enabled] + HeadersInDeliveryNotificationsEnabled: Optional[Enabled] + + +NotificationAttributes = Dict[Identity, IdentityNotificationAttributes] + + +class GetIdentityNotificationAttributesResponse(TypedDict, total=False): + NotificationAttributes: NotificationAttributes + + +PolicyNameList = List[PolicyName] + + +class GetIdentityPoliciesRequest(ServiceRequest): + Identity: Identity + PolicyNames: PolicyNameList + + +PolicyMap = Dict[PolicyName, Policy] + + +class GetIdentityPoliciesResponse(TypedDict, total=False): + Policies: PolicyMap + + +class GetIdentityVerificationAttributesRequest(ServiceRequest): + Identities: IdentityList + + +class IdentityVerificationAttributes(TypedDict, total=False): + VerificationStatus: VerificationStatus + VerificationToken: Optional[VerificationToken] + + +VerificationAttributes = Dict[Identity, IdentityVerificationAttributes] + + +class GetIdentityVerificationAttributesResponse(TypedDict, total=False): + VerificationAttributes: VerificationAttributes + + +class GetSendQuotaResponse(TypedDict, total=False): + Max24HourSend: Optional[Max24HourSend] + MaxSendRate: Optional[MaxSendRate] + SentLast24Hours: Optional[SentLast24Hours] + + +class SendDataPoint(TypedDict, total=False): + Timestamp: Optional[Timestamp] + DeliveryAttempts: Optional[Counter] + Bounces: Optional[Counter] + Complaints: Optional[Counter] + Rejects: Optional[Counter] + + +SendDataPointList = List[SendDataPoint] + + +class GetSendStatisticsResponse(TypedDict, total=False): + SendDataPoints: Optional[SendDataPointList] + + +class GetTemplateRequest(ServiceRequest): + TemplateName: TemplateName + + +class GetTemplateResponse(TypedDict, total=False): + Template: Optional[Template] + + +class ListConfigurationSetsRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxItems: Optional[MaxItems] + + +class ListConfigurationSetsResponse(TypedDict, total=False): + ConfigurationSets: Optional[ConfigurationSets] + NextToken: Optional[NextToken] + + +class ListCustomVerificationEmailTemplatesRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListCustomVerificationEmailTemplatesResponse(TypedDict, total=False): + CustomVerificationEmailTemplates: Optional[CustomVerificationEmailTemplates] + NextToken: Optional[NextToken] + + +class ListIdentitiesRequest(ServiceRequest): + IdentityType: Optional[IdentityType] + NextToken: Optional[NextToken] + MaxItems: Optional[MaxItems] + + +class ListIdentitiesResponse(TypedDict, total=False): + Identities: IdentityList + NextToken: Optional[NextToken] + + +class ListIdentityPoliciesRequest(ServiceRequest): + Identity: Identity + + +class ListIdentityPoliciesResponse(TypedDict, total=False): + PolicyNames: PolicyNameList + + +class ListReceiptFiltersRequest(ServiceRequest): + pass + + +ReceiptFilterList = List[ReceiptFilter] + + +class ListReceiptFiltersResponse(TypedDict, total=False): + Filters: Optional[ReceiptFilterList] + + +class ListReceiptRuleSetsRequest(ServiceRequest): + NextToken: Optional[NextToken] + + +ReceiptRuleSetsLists = List[ReceiptRuleSetMetadata] + + +class ListReceiptRuleSetsResponse(TypedDict, total=False): + RuleSets: Optional[ReceiptRuleSetsLists] + NextToken: Optional[NextToken] + + +class ListTemplatesRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxItems: Optional[MaxItems] + + +class TemplateMetadata(TypedDict, total=False): + Name: Optional[TemplateName] + CreatedTimestamp: Optional[Timestamp] + + +TemplateMetadataList = List[TemplateMetadata] + + +class ListTemplatesResponse(TypedDict, total=False): + TemplatesMetadata: Optional[TemplateMetadataList] + NextToken: Optional[NextToken] + + +class ListVerifiedEmailAddressesResponse(TypedDict, total=False): + VerifiedEmailAddresses: Optional[AddressList] + + +class Message(TypedDict, total=False): + Subject: Content + Body: Body + + +class MessageDsn(TypedDict, total=False): + ReportingMta: ReportingMta + ArrivalDate: Optional[ArrivalDate] + ExtensionFields: Optional[ExtensionFieldList] + + +class PutConfigurationSetDeliveryOptionsRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + DeliveryOptions: Optional[DeliveryOptions] + + +class PutConfigurationSetDeliveryOptionsResponse(TypedDict, total=False): + pass + + +class PutIdentityPolicyRequest(ServiceRequest): + Identity: Identity + PolicyName: PolicyName + Policy: Policy + + +class PutIdentityPolicyResponse(TypedDict, total=False): + pass + + +RawMessageData = bytes + + +class RawMessage(TypedDict, total=False): + Data: RawMessageData + + +ReceiptRuleNamesList = List[ReceiptRuleName] + + +class ReorderReceiptRuleSetRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + RuleNames: ReceiptRuleNamesList + + +class ReorderReceiptRuleSetResponse(TypedDict, total=False): + pass + + +class SendBounceRequest(ServiceRequest): + OriginalMessageId: MessageId + BounceSender: Address + Explanation: Optional[Explanation] + MessageDsn: Optional[MessageDsn] + BouncedRecipientInfoList: BouncedRecipientInfoList + BounceSenderArn: Optional[AmazonResourceName] + + +class SendBounceResponse(TypedDict, total=False): + MessageId: Optional[MessageId] + + +class SendBulkTemplatedEmailRequest(ServiceRequest): + Source: Address + SourceArn: Optional[AmazonResourceName] + ReplyToAddresses: Optional[AddressList] + ReturnPath: Optional[Address] + ReturnPathArn: Optional[AmazonResourceName] + ConfigurationSetName: Optional[ConfigurationSetName] + DefaultTags: Optional[MessageTagList] + Template: TemplateName + TemplateArn: Optional[AmazonResourceName] + DefaultTemplateData: TemplateData + Destinations: BulkEmailDestinationList + + +class SendBulkTemplatedEmailResponse(TypedDict, total=False): + Status: BulkEmailDestinationStatusList + + +class SendCustomVerificationEmailRequest(ServiceRequest): + EmailAddress: Address + TemplateName: TemplateName + ConfigurationSetName: Optional[ConfigurationSetName] + + +class SendCustomVerificationEmailResponse(TypedDict, total=False): + MessageId: Optional[MessageId] + + +class SendEmailRequest(ServiceRequest): + Source: Address + Destination: Destination + Message: Message + ReplyToAddresses: Optional[AddressList] + ReturnPath: Optional[Address] + SourceArn: Optional[AmazonResourceName] + ReturnPathArn: Optional[AmazonResourceName] + Tags: Optional[MessageTagList] + ConfigurationSetName: Optional[ConfigurationSetName] + + +class SendEmailResponse(TypedDict, total=False): + MessageId: MessageId + + +class SendRawEmailRequest(ServiceRequest): + Source: Optional[Address] + Destinations: Optional[AddressList] + RawMessage: RawMessage + FromArn: Optional[AmazonResourceName] + SourceArn: Optional[AmazonResourceName] + ReturnPathArn: Optional[AmazonResourceName] + Tags: Optional[MessageTagList] + ConfigurationSetName: Optional[ConfigurationSetName] + + +class SendRawEmailResponse(TypedDict, total=False): + MessageId: MessageId + + +class SendTemplatedEmailRequest(ServiceRequest): + Source: Address + Destination: Destination + ReplyToAddresses: Optional[AddressList] + ReturnPath: Optional[Address] + SourceArn: Optional[AmazonResourceName] + ReturnPathArn: Optional[AmazonResourceName] + Tags: Optional[MessageTagList] + ConfigurationSetName: Optional[ConfigurationSetName] + Template: TemplateName + TemplateArn: Optional[AmazonResourceName] + TemplateData: TemplateData + + +class SendTemplatedEmailResponse(TypedDict, total=False): + MessageId: MessageId + + +class SetActiveReceiptRuleSetRequest(ServiceRequest): + RuleSetName: Optional[ReceiptRuleSetName] + + +class SetActiveReceiptRuleSetResponse(TypedDict, total=False): + pass + + +class SetIdentityDkimEnabledRequest(ServiceRequest): + Identity: Identity + DkimEnabled: Enabled + + +class SetIdentityDkimEnabledResponse(TypedDict, total=False): + pass + + +class SetIdentityFeedbackForwardingEnabledRequest(ServiceRequest): + Identity: Identity + ForwardingEnabled: Enabled + + +class SetIdentityFeedbackForwardingEnabledResponse(TypedDict, total=False): + pass + + +class SetIdentityHeadersInNotificationsEnabledRequest(ServiceRequest): + Identity: Identity + NotificationType: NotificationType + Enabled: Enabled + + +class SetIdentityHeadersInNotificationsEnabledResponse(TypedDict, total=False): + pass + + +class SetIdentityMailFromDomainRequest(ServiceRequest): + Identity: Identity + MailFromDomain: Optional[MailFromDomainName] + BehaviorOnMXFailure: Optional[BehaviorOnMXFailure] + + +class SetIdentityMailFromDomainResponse(TypedDict, total=False): + pass + + +class SetIdentityNotificationTopicRequest(ServiceRequest): + Identity: Identity + NotificationType: NotificationType + SnsTopic: Optional[NotificationTopic] + + +class SetIdentityNotificationTopicResponse(TypedDict, total=False): + pass + + +class SetReceiptRulePositionRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + RuleName: ReceiptRuleName + After: Optional[ReceiptRuleName] + + +class SetReceiptRulePositionResponse(TypedDict, total=False): + pass + + +class TestRenderTemplateRequest(ServiceRequest): + TemplateName: TemplateName + TemplateData: TemplateData + + +class TestRenderTemplateResponse(TypedDict, total=False): + RenderedTemplate: Optional[RenderedTemplate] + + +class UpdateAccountSendingEnabledRequest(ServiceRequest): + Enabled: Optional[Enabled] + + +class UpdateConfigurationSetEventDestinationRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + EventDestination: EventDestination + + +class UpdateConfigurationSetEventDestinationResponse(TypedDict, total=False): + pass + + +class UpdateConfigurationSetReputationMetricsEnabledRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + Enabled: Enabled + + +class UpdateConfigurationSetSendingEnabledRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + Enabled: Enabled + + +class UpdateConfigurationSetTrackingOptionsRequest(ServiceRequest): + ConfigurationSetName: ConfigurationSetName + TrackingOptions: TrackingOptions + + +class UpdateConfigurationSetTrackingOptionsResponse(TypedDict, total=False): + pass + + +class UpdateCustomVerificationEmailTemplateRequest(ServiceRequest): + TemplateName: TemplateName + FromEmailAddress: Optional[FromAddress] + TemplateSubject: Optional[Subject] + TemplateContent: Optional[TemplateContent] + SuccessRedirectionURL: Optional[SuccessRedirectionURL] + FailureRedirectionURL: Optional[FailureRedirectionURL] + + +class UpdateReceiptRuleRequest(ServiceRequest): + RuleSetName: ReceiptRuleSetName + Rule: ReceiptRule + + +class UpdateReceiptRuleResponse(TypedDict, total=False): + pass + + +class UpdateTemplateRequest(ServiceRequest): + Template: Template + + +class UpdateTemplateResponse(TypedDict, total=False): + pass + + +class VerifyDomainDkimRequest(ServiceRequest): + Domain: Domain + + +class VerifyDomainDkimResponse(TypedDict, total=False): + DkimTokens: VerificationTokenList + + +class VerifyDomainIdentityRequest(ServiceRequest): + Domain: Domain + + +class VerifyDomainIdentityResponse(TypedDict, total=False): + VerificationToken: VerificationToken + + +class VerifyEmailAddressRequest(ServiceRequest): + EmailAddress: Address + + +class VerifyEmailIdentityRequest(ServiceRequest): + EmailAddress: Address + + +class VerifyEmailIdentityResponse(TypedDict, total=False): + pass + + +class SesApi: + service = "ses" + version = "2010-12-01" + + @handler("CloneReceiptRuleSet") + def clone_receipt_rule_set( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + original_rule_set_name: ReceiptRuleSetName, + **kwargs, + ) -> CloneReceiptRuleSetResponse: + raise NotImplementedError + + @handler("CreateConfigurationSet") + def create_configuration_set( + self, context: RequestContext, configuration_set: ConfigurationSet, **kwargs + ) -> CreateConfigurationSetResponse: + raise NotImplementedError + + @handler("CreateConfigurationSetEventDestination") + def create_configuration_set_event_destination( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + event_destination: EventDestination, + **kwargs, + ) -> CreateConfigurationSetEventDestinationResponse: + raise NotImplementedError + + @handler("CreateConfigurationSetTrackingOptions") + def create_configuration_set_tracking_options( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + tracking_options: TrackingOptions, + **kwargs, + ) -> CreateConfigurationSetTrackingOptionsResponse: + raise NotImplementedError + + @handler("CreateCustomVerificationEmailTemplate") + def create_custom_verification_email_template( + self, + context: RequestContext, + template_name: TemplateName, + from_email_address: FromAddress, + template_subject: Subject, + template_content: TemplateContent, + success_redirection_url: SuccessRedirectionURL, + failure_redirection_url: FailureRedirectionURL, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CreateReceiptFilter") + def create_receipt_filter( + self, context: RequestContext, filter: ReceiptFilter, **kwargs + ) -> CreateReceiptFilterResponse: + raise NotImplementedError + + @handler("CreateReceiptRule") + def create_receipt_rule( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule: ReceiptRule, + after: ReceiptRuleName | None = None, + **kwargs, + ) -> CreateReceiptRuleResponse: + raise NotImplementedError + + @handler("CreateReceiptRuleSet") + def create_receipt_rule_set( + self, context: RequestContext, rule_set_name: ReceiptRuleSetName, **kwargs + ) -> CreateReceiptRuleSetResponse: + raise NotImplementedError + + @handler("CreateTemplate") + def create_template( + self, context: RequestContext, template: Template, **kwargs + ) -> CreateTemplateResponse: + raise NotImplementedError + + @handler("DeleteConfigurationSet") + def delete_configuration_set( + self, context: RequestContext, configuration_set_name: ConfigurationSetName, **kwargs + ) -> DeleteConfigurationSetResponse: + raise NotImplementedError + + @handler("DeleteConfigurationSetEventDestination") + def delete_configuration_set_event_destination( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + event_destination_name: EventDestinationName, + **kwargs, + ) -> DeleteConfigurationSetEventDestinationResponse: + raise NotImplementedError + + @handler("DeleteConfigurationSetTrackingOptions") + def delete_configuration_set_tracking_options( + self, context: RequestContext, configuration_set_name: ConfigurationSetName, **kwargs + ) -> DeleteConfigurationSetTrackingOptionsResponse: + raise NotImplementedError + + @handler("DeleteCustomVerificationEmailTemplate") + def delete_custom_verification_email_template( + self, context: RequestContext, template_name: TemplateName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteIdentity") + def delete_identity( + self, context: RequestContext, identity: Identity, **kwargs + ) -> DeleteIdentityResponse: + raise NotImplementedError + + @handler("DeleteIdentityPolicy") + def delete_identity_policy( + self, context: RequestContext, identity: Identity, policy_name: PolicyName, **kwargs + ) -> DeleteIdentityPolicyResponse: + raise NotImplementedError + + @handler("DeleteReceiptFilter") + def delete_receipt_filter( + self, context: RequestContext, filter_name: ReceiptFilterName, **kwargs + ) -> DeleteReceiptFilterResponse: + raise NotImplementedError + + @handler("DeleteReceiptRule") + def delete_receipt_rule( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule_name: ReceiptRuleName, + **kwargs, + ) -> DeleteReceiptRuleResponse: + raise NotImplementedError + + @handler("DeleteReceiptRuleSet") + def delete_receipt_rule_set( + self, context: RequestContext, rule_set_name: ReceiptRuleSetName, **kwargs + ) -> DeleteReceiptRuleSetResponse: + raise NotImplementedError + + @handler("DeleteTemplate") + def delete_template( + self, context: RequestContext, template_name: TemplateName, **kwargs + ) -> DeleteTemplateResponse: + raise NotImplementedError + + @handler("DeleteVerifiedEmailAddress") + def delete_verified_email_address( + self, context: RequestContext, email_address: Address, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DescribeActiveReceiptRuleSet") + def describe_active_receipt_rule_set( + self, context: RequestContext, **kwargs + ) -> DescribeActiveReceiptRuleSetResponse: + raise NotImplementedError + + @handler("DescribeConfigurationSet") + def describe_configuration_set( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + configuration_set_attribute_names: ConfigurationSetAttributeList | None = None, + **kwargs, + ) -> DescribeConfigurationSetResponse: + raise NotImplementedError + + @handler("DescribeReceiptRule") + def describe_receipt_rule( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule_name: ReceiptRuleName, + **kwargs, + ) -> DescribeReceiptRuleResponse: + raise NotImplementedError + + @handler("DescribeReceiptRuleSet") + def describe_receipt_rule_set( + self, context: RequestContext, rule_set_name: ReceiptRuleSetName, **kwargs + ) -> DescribeReceiptRuleSetResponse: + raise NotImplementedError + + @handler("GetAccountSendingEnabled") + def get_account_sending_enabled( + self, context: RequestContext, **kwargs + ) -> GetAccountSendingEnabledResponse: + raise NotImplementedError + + @handler("GetCustomVerificationEmailTemplate") + def get_custom_verification_email_template( + self, context: RequestContext, template_name: TemplateName, **kwargs + ) -> GetCustomVerificationEmailTemplateResponse: + raise NotImplementedError + + @handler("GetIdentityDkimAttributes") + def get_identity_dkim_attributes( + self, context: RequestContext, identities: IdentityList, **kwargs + ) -> GetIdentityDkimAttributesResponse: + raise NotImplementedError + + @handler("GetIdentityMailFromDomainAttributes") + def get_identity_mail_from_domain_attributes( + self, context: RequestContext, identities: IdentityList, **kwargs + ) -> GetIdentityMailFromDomainAttributesResponse: + raise NotImplementedError + + @handler("GetIdentityNotificationAttributes") + def get_identity_notification_attributes( + self, context: RequestContext, identities: IdentityList, **kwargs + ) -> GetIdentityNotificationAttributesResponse: + raise NotImplementedError + + @handler("GetIdentityPolicies") + def get_identity_policies( + self, context: RequestContext, identity: Identity, policy_names: PolicyNameList, **kwargs + ) -> GetIdentityPoliciesResponse: + raise NotImplementedError + + @handler("GetIdentityVerificationAttributes") + def get_identity_verification_attributes( + self, context: RequestContext, identities: IdentityList, **kwargs + ) -> GetIdentityVerificationAttributesResponse: + raise NotImplementedError + + @handler("GetSendQuota") + def get_send_quota(self, context: RequestContext, **kwargs) -> GetSendQuotaResponse: + raise NotImplementedError + + @handler("GetSendStatistics") + def get_send_statistics(self, context: RequestContext, **kwargs) -> GetSendStatisticsResponse: + raise NotImplementedError + + @handler("GetTemplate") + def get_template( + self, context: RequestContext, template_name: TemplateName, **kwargs + ) -> GetTemplateResponse: + raise NotImplementedError + + @handler("ListConfigurationSets") + def list_configuration_sets( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_items: MaxItems | None = None, + **kwargs, + ) -> ListConfigurationSetsResponse: + raise NotImplementedError + + @handler("ListCustomVerificationEmailTemplates") + def list_custom_verification_email_templates( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCustomVerificationEmailTemplatesResponse: + raise NotImplementedError + + @handler("ListIdentities") + def list_identities( + self, + context: RequestContext, + identity_type: IdentityType | None = None, + next_token: NextToken | None = None, + max_items: MaxItems | None = None, + **kwargs, + ) -> ListIdentitiesResponse: + raise NotImplementedError + + @handler("ListIdentityPolicies") + def list_identity_policies( + self, context: RequestContext, identity: Identity, **kwargs + ) -> ListIdentityPoliciesResponse: + raise NotImplementedError + + @handler("ListReceiptFilters") + def list_receipt_filters(self, context: RequestContext, **kwargs) -> ListReceiptFiltersResponse: + raise NotImplementedError + + @handler("ListReceiptRuleSets") + def list_receipt_rule_sets( + self, context: RequestContext, next_token: NextToken | None = None, **kwargs + ) -> ListReceiptRuleSetsResponse: + raise NotImplementedError + + @handler("ListTemplates") + def list_templates( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_items: MaxItems | None = None, + **kwargs, + ) -> ListTemplatesResponse: + raise NotImplementedError + + @handler("ListVerifiedEmailAddresses") + def list_verified_email_addresses( + self, context: RequestContext, **kwargs + ) -> ListVerifiedEmailAddressesResponse: + raise NotImplementedError + + @handler("PutConfigurationSetDeliveryOptions") + def put_configuration_set_delivery_options( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + delivery_options: DeliveryOptions | None = None, + **kwargs, + ) -> PutConfigurationSetDeliveryOptionsResponse: + raise NotImplementedError + + @handler("PutIdentityPolicy") + def put_identity_policy( + self, + context: RequestContext, + identity: Identity, + policy_name: PolicyName, + policy: Policy, + **kwargs, + ) -> PutIdentityPolicyResponse: + raise NotImplementedError + + @handler("ReorderReceiptRuleSet") + def reorder_receipt_rule_set( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule_names: ReceiptRuleNamesList, + **kwargs, + ) -> ReorderReceiptRuleSetResponse: + raise NotImplementedError + + @handler("SendBounce") + def send_bounce( + self, + context: RequestContext, + original_message_id: MessageId, + bounce_sender: Address, + bounced_recipient_info_list: BouncedRecipientInfoList, + explanation: Explanation | None = None, + message_dsn: MessageDsn | None = None, + bounce_sender_arn: AmazonResourceName | None = None, + **kwargs, + ) -> SendBounceResponse: + raise NotImplementedError + + @handler("SendBulkTemplatedEmail") + def send_bulk_templated_email( + self, + context: RequestContext, + source: Address, + template: TemplateName, + default_template_data: TemplateData, + destinations: BulkEmailDestinationList, + source_arn: AmazonResourceName | None = None, + reply_to_addresses: AddressList | None = None, + return_path: Address | None = None, + return_path_arn: AmazonResourceName | None = None, + configuration_set_name: ConfigurationSetName | None = None, + default_tags: MessageTagList | None = None, + template_arn: AmazonResourceName | None = None, + **kwargs, + ) -> SendBulkTemplatedEmailResponse: + raise NotImplementedError + + @handler("SendCustomVerificationEmail") + def send_custom_verification_email( + self, + context: RequestContext, + email_address: Address, + template_name: TemplateName, + configuration_set_name: ConfigurationSetName | None = None, + **kwargs, + ) -> SendCustomVerificationEmailResponse: + raise NotImplementedError + + @handler("SendEmail") + def send_email( + self, + context: RequestContext, + source: Address, + destination: Destination, + message: Message, + reply_to_addresses: AddressList | None = None, + return_path: Address | None = None, + source_arn: AmazonResourceName | None = None, + return_path_arn: AmazonResourceName | None = None, + tags: MessageTagList | None = None, + configuration_set_name: ConfigurationSetName | None = None, + **kwargs, + ) -> SendEmailResponse: + raise NotImplementedError + + @handler("SendRawEmail") + def send_raw_email( + self, + context: RequestContext, + raw_message: RawMessage, + source: Address | None = None, + destinations: AddressList | None = None, + from_arn: AmazonResourceName | None = None, + source_arn: AmazonResourceName | None = None, + return_path_arn: AmazonResourceName | None = None, + tags: MessageTagList | None = None, + configuration_set_name: ConfigurationSetName | None = None, + **kwargs, + ) -> SendRawEmailResponse: + raise NotImplementedError + + @handler("SendTemplatedEmail") + def send_templated_email( + self, + context: RequestContext, + source: Address, + destination: Destination, + template: TemplateName, + template_data: TemplateData, + reply_to_addresses: AddressList | None = None, + return_path: Address | None = None, + source_arn: AmazonResourceName | None = None, + return_path_arn: AmazonResourceName | None = None, + tags: MessageTagList | None = None, + configuration_set_name: ConfigurationSetName | None = None, + template_arn: AmazonResourceName | None = None, + **kwargs, + ) -> SendTemplatedEmailResponse: + raise NotImplementedError + + @handler("SetActiveReceiptRuleSet") + def set_active_receipt_rule_set( + self, context: RequestContext, rule_set_name: ReceiptRuleSetName | None = None, **kwargs + ) -> SetActiveReceiptRuleSetResponse: + raise NotImplementedError + + @handler("SetIdentityDkimEnabled") + def set_identity_dkim_enabled( + self, context: RequestContext, identity: Identity, dkim_enabled: Enabled, **kwargs + ) -> SetIdentityDkimEnabledResponse: + raise NotImplementedError + + @handler("SetIdentityFeedbackForwardingEnabled") + def set_identity_feedback_forwarding_enabled( + self, context: RequestContext, identity: Identity, forwarding_enabled: Enabled, **kwargs + ) -> SetIdentityFeedbackForwardingEnabledResponse: + raise NotImplementedError + + @handler("SetIdentityHeadersInNotificationsEnabled") + def set_identity_headers_in_notifications_enabled( + self, + context: RequestContext, + identity: Identity, + notification_type: NotificationType, + enabled: Enabled, + **kwargs, + ) -> SetIdentityHeadersInNotificationsEnabledResponse: + raise NotImplementedError + + @handler("SetIdentityMailFromDomain") + def set_identity_mail_from_domain( + self, + context: RequestContext, + identity: Identity, + mail_from_domain: MailFromDomainName | None = None, + behavior_on_mx_failure: BehaviorOnMXFailure | None = None, + **kwargs, + ) -> SetIdentityMailFromDomainResponse: + raise NotImplementedError + + @handler("SetIdentityNotificationTopic") + def set_identity_notification_topic( + self, + context: RequestContext, + identity: Identity, + notification_type: NotificationType, + sns_topic: NotificationTopic | None = None, + **kwargs, + ) -> SetIdentityNotificationTopicResponse: + raise NotImplementedError + + @handler("SetReceiptRulePosition") + def set_receipt_rule_position( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule_name: ReceiptRuleName, + after: ReceiptRuleName | None = None, + **kwargs, + ) -> SetReceiptRulePositionResponse: + raise NotImplementedError + + @handler("TestRenderTemplate") + def test_render_template( + self, + context: RequestContext, + template_name: TemplateName, + template_data: TemplateData, + **kwargs, + ) -> TestRenderTemplateResponse: + raise NotImplementedError + + @handler("UpdateAccountSendingEnabled") + def update_account_sending_enabled( + self, context: RequestContext, enabled: Enabled | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UpdateConfigurationSetEventDestination") + def update_configuration_set_event_destination( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + event_destination: EventDestination, + **kwargs, + ) -> UpdateConfigurationSetEventDestinationResponse: + raise NotImplementedError + + @handler("UpdateConfigurationSetReputationMetricsEnabled") + def update_configuration_set_reputation_metrics_enabled( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + enabled: Enabled, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateConfigurationSetSendingEnabled") + def update_configuration_set_sending_enabled( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + enabled: Enabled, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateConfigurationSetTrackingOptions") + def update_configuration_set_tracking_options( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + tracking_options: TrackingOptions, + **kwargs, + ) -> UpdateConfigurationSetTrackingOptionsResponse: + raise NotImplementedError + + @handler("UpdateCustomVerificationEmailTemplate") + def update_custom_verification_email_template( + self, + context: RequestContext, + template_name: TemplateName, + from_email_address: FromAddress | None = None, + template_subject: Subject | None = None, + template_content: TemplateContent | None = None, + success_redirection_url: SuccessRedirectionURL | None = None, + failure_redirection_url: FailureRedirectionURL | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UpdateReceiptRule") + def update_receipt_rule( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + rule: ReceiptRule, + **kwargs, + ) -> UpdateReceiptRuleResponse: + raise NotImplementedError + + @handler("UpdateTemplate") + def update_template( + self, context: RequestContext, template: Template, **kwargs + ) -> UpdateTemplateResponse: + raise NotImplementedError + + @handler("VerifyDomainDkim") + def verify_domain_dkim( + self, context: RequestContext, domain: Domain, **kwargs + ) -> VerifyDomainDkimResponse: + raise NotImplementedError + + @handler("VerifyDomainIdentity") + def verify_domain_identity( + self, context: RequestContext, domain: Domain, **kwargs + ) -> VerifyDomainIdentityResponse: + raise NotImplementedError + + @handler("VerifyEmailAddress") + def verify_email_address( + self, context: RequestContext, email_address: Address, **kwargs + ) -> None: + raise NotImplementedError + + @handler("VerifyEmailIdentity") + def verify_email_identity( + self, context: RequestContext, email_address: Address, **kwargs + ) -> VerifyEmailIdentityResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/sns/__init__.py b/localstack-core/localstack/aws/api/sns/__init__.py new file mode 100644 index 0000000000000..df5f5618138b5 --- /dev/null +++ b/localstack-core/localstack/aws/api/sns/__init__.py @@ -0,0 +1,1095 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AmazonResourceName = str +Iso2CountryCode = str +MaxItems = int +MaxItemsListOriginationNumbers = int +OTPCode = str +PhoneNumber = str +PhoneNumberString = str +String = str +TagKey = str +TagValue = str +account = str +action = str +attributeName = str +attributeValue = str +authenticateOnUnsubscribe = str +boolean = bool +delegate = str +endpoint = str +label = str +message = str +messageId = str +messageStructure = str +nextToken = str +protocol = str +string = str +subject = str +subscriptionARN = str +token = str +topicARN = str +topicName = str + + +class LanguageCodeString(StrEnum): + en_US = "en-US" + en_GB = "en-GB" + es_419 = "es-419" + es_ES = "es-ES" + de_DE = "de-DE" + fr_CA = "fr-CA" + fr_FR = "fr-FR" + it_IT = "it-IT" + ja_JP = "ja-JP" + pt_BR = "pt-BR" + kr_KR = "kr-KR" + zh_CN = "zh-CN" + zh_TW = "zh-TW" + + +class NumberCapability(StrEnum): + SMS = "SMS" + MMS = "MMS" + VOICE = "VOICE" + + +class RouteType(StrEnum): + Transactional = "Transactional" + Promotional = "Promotional" + Premium = "Premium" + + +class SMSSandboxPhoneNumberVerificationStatus(StrEnum): + Pending = "Pending" + Verified = "Verified" + + +class AuthorizationErrorException(ServiceException): + code: str = "AuthorizationError" + sender_fault: bool = True + status_code: int = 403 + + +class BatchEntryIdsNotDistinctException(ServiceException): + code: str = "BatchEntryIdsNotDistinct" + sender_fault: bool = True + status_code: int = 400 + + +class BatchRequestTooLongException(ServiceException): + code: str = "BatchRequestTooLong" + sender_fault: bool = True + status_code: int = 400 + + +class ConcurrentAccessException(ServiceException): + code: str = "ConcurrentAccess" + sender_fault: bool = True + status_code: int = 400 + + +class EmptyBatchRequestException(ServiceException): + code: str = "EmptyBatchRequest" + sender_fault: bool = True + status_code: int = 400 + + +class EndpointDisabledException(ServiceException): + code: str = "EndpointDisabled" + sender_fault: bool = True + status_code: int = 400 + + +class FilterPolicyLimitExceededException(ServiceException): + code: str = "FilterPolicyLimitExceeded" + sender_fault: bool = True + status_code: int = 403 + + +class InternalErrorException(ServiceException): + code: str = "InternalError" + sender_fault: bool = False + status_code: int = 500 + + +class InvalidBatchEntryIdException(ServiceException): + code: str = "InvalidBatchEntryId" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidParameterException(ServiceException): + code: str = "InvalidParameter" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidParameterValueException(ServiceException): + code: str = "ParameterValueInvalid" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidSecurityException(ServiceException): + code: str = "InvalidSecurity" + sender_fault: bool = True + status_code: int = 403 + + +class InvalidStateException(ServiceException): + code: str = "InvalidState" + sender_fault: bool = True + status_code: int = 400 + + +class KMSAccessDeniedException(ServiceException): + code: str = "KMSAccessDenied" + sender_fault: bool = True + status_code: int = 400 + + +class KMSDisabledException(ServiceException): + code: str = "KMSDisabled" + sender_fault: bool = True + status_code: int = 400 + + +class KMSInvalidStateException(ServiceException): + code: str = "KMSInvalidState" + sender_fault: bool = True + status_code: int = 400 + + +class KMSNotFoundException(ServiceException): + code: str = "KMSNotFound" + sender_fault: bool = True + status_code: int = 400 + + +class KMSOptInRequired(ServiceException): + code: str = "KMSOptInRequired" + sender_fault: bool = True + status_code: int = 403 + + +class KMSThrottlingException(ServiceException): + code: str = "KMSThrottling" + sender_fault: bool = True + status_code: int = 400 + + +class NotFoundException(ServiceException): + code: str = "NotFound" + sender_fault: bool = True + status_code: int = 404 + + +class OptedOutException(ServiceException): + code: str = "OptedOut" + sender_fault: bool = True + status_code: int = 400 + + +class PlatformApplicationDisabledException(ServiceException): + code: str = "PlatformApplicationDisabled" + sender_fault: bool = True + status_code: int = 400 + + +class ReplayLimitExceededException(ServiceException): + code: str = "ReplayLimitExceeded" + sender_fault: bool = True + status_code: int = 403 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFound" + sender_fault: bool = True + status_code: int = 404 + + +class StaleTagException(ServiceException): + code: str = "StaleTag" + sender_fault: bool = True + status_code: int = 400 + + +class SubscriptionLimitExceededException(ServiceException): + code: str = "SubscriptionLimitExceeded" + sender_fault: bool = True + status_code: int = 403 + + +class TagLimitExceededException(ServiceException): + code: str = "TagLimitExceeded" + sender_fault: bool = True + status_code: int = 400 + + +class TagPolicyException(ServiceException): + code: str = "TagPolicy" + sender_fault: bool = True + status_code: int = 400 + + +class ThrottledException(ServiceException): + code: str = "Throttled" + sender_fault: bool = True + status_code: int = 429 + + +class TooManyEntriesInBatchRequestException(ServiceException): + code: str = "TooManyEntriesInBatchRequest" + sender_fault: bool = True + status_code: int = 400 + + +class TopicLimitExceededException(ServiceException): + code: str = "TopicLimitExceeded" + sender_fault: bool = True + status_code: int = 403 + + +class UserErrorException(ServiceException): + code: str = "UserError" + sender_fault: bool = True + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = True + status_code: int = 400 + + +class VerificationException(ServiceException): + code: str = "VerificationException" + sender_fault: bool = False + status_code: int = 400 + Status: string + + +ActionsList = List[action] +DelegatesList = List[delegate] + + +class AddPermissionInput(ServiceRequest): + TopicArn: topicARN + Label: label + AWSAccountId: DelegatesList + ActionName: ActionsList + + +class BatchResultErrorEntry(TypedDict, total=False): + Id: String + Code: String + Message: Optional[String] + SenderFault: boolean + + +BatchResultErrorEntryList = List[BatchResultErrorEntry] +Binary = bytes + + +class CheckIfPhoneNumberIsOptedOutInput(ServiceRequest): + phoneNumber: PhoneNumber + + +class CheckIfPhoneNumberIsOptedOutResponse(TypedDict, total=False): + isOptedOut: Optional[boolean] + + +class ConfirmSubscriptionInput(ServiceRequest): + TopicArn: topicARN + Token: token + AuthenticateOnUnsubscribe: Optional[authenticateOnUnsubscribe] + + +class ConfirmSubscriptionResponse(TypedDict, total=False): + SubscriptionArn: Optional[subscriptionARN] + + +class CreateEndpointResponse(TypedDict, total=False): + EndpointArn: Optional[String] + + +MapStringToString = Dict[String, String] + + +class CreatePlatformApplicationInput(ServiceRequest): + Name: String + Platform: String + Attributes: MapStringToString + + +class CreatePlatformApplicationResponse(TypedDict, total=False): + PlatformApplicationArn: Optional[String] + + +class CreatePlatformEndpointInput(ServiceRequest): + PlatformApplicationArn: String + Token: String + CustomUserData: Optional[String] + Attributes: Optional[MapStringToString] + + +class CreateSMSSandboxPhoneNumberInput(ServiceRequest): + PhoneNumber: PhoneNumberString + LanguageCode: Optional[LanguageCodeString] + + +class CreateSMSSandboxPhoneNumberResult(TypedDict, total=False): + pass + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] +TopicAttributesMap = Dict[attributeName, attributeValue] + + +class CreateTopicInput(ServiceRequest): + Name: topicName + Attributes: Optional[TopicAttributesMap] + Tags: Optional[TagList] + DataProtectionPolicy: Optional[attributeValue] + + +class CreateTopicResponse(TypedDict, total=False): + TopicArn: Optional[topicARN] + + +class DeleteEndpointInput(ServiceRequest): + EndpointArn: String + + +class DeletePlatformApplicationInput(ServiceRequest): + PlatformApplicationArn: String + + +class DeleteSMSSandboxPhoneNumberInput(ServiceRequest): + PhoneNumber: PhoneNumberString + + +class DeleteSMSSandboxPhoneNumberResult(TypedDict, total=False): + pass + + +class DeleteTopicInput(ServiceRequest): + TopicArn: topicARN + + +class Endpoint(TypedDict, total=False): + EndpointArn: Optional[String] + Attributes: Optional[MapStringToString] + + +class GetDataProtectionPolicyInput(ServiceRequest): + ResourceArn: topicARN + + +class GetDataProtectionPolicyResponse(TypedDict, total=False): + DataProtectionPolicy: Optional[attributeValue] + + +class GetEndpointAttributesInput(ServiceRequest): + EndpointArn: String + + +class GetEndpointAttributesResponse(TypedDict, total=False): + Attributes: Optional[MapStringToString] + + +class GetPlatformApplicationAttributesInput(ServiceRequest): + PlatformApplicationArn: String + + +class GetPlatformApplicationAttributesResponse(TypedDict, total=False): + Attributes: Optional[MapStringToString] + + +ListString = List[String] + + +class GetSMSAttributesInput(ServiceRequest): + attributes: Optional[ListString] + + +class GetSMSAttributesResponse(TypedDict, total=False): + attributes: Optional[MapStringToString] + + +class GetSMSSandboxAccountStatusInput(ServiceRequest): + pass + + +class GetSMSSandboxAccountStatusResult(TypedDict, total=False): + IsInSandbox: boolean + + +class GetSubscriptionAttributesInput(ServiceRequest): + SubscriptionArn: subscriptionARN + + +SubscriptionAttributesMap = Dict[attributeName, attributeValue] + + +class GetSubscriptionAttributesResponse(TypedDict, total=False): + Attributes: Optional[SubscriptionAttributesMap] + + +class GetTopicAttributesInput(ServiceRequest): + TopicArn: topicARN + + +class GetTopicAttributesResponse(TypedDict, total=False): + Attributes: Optional[TopicAttributesMap] + + +class ListEndpointsByPlatformApplicationInput(ServiceRequest): + PlatformApplicationArn: String + NextToken: Optional[String] + + +ListOfEndpoints = List[Endpoint] + + +class ListEndpointsByPlatformApplicationResponse(TypedDict, total=False): + Endpoints: Optional[ListOfEndpoints] + NextToken: Optional[String] + + +class PlatformApplication(TypedDict, total=False): + PlatformApplicationArn: Optional[String] + Attributes: Optional[MapStringToString] + + +ListOfPlatformApplications = List[PlatformApplication] + + +class ListOriginationNumbersRequest(ServiceRequest): + NextToken: Optional[nextToken] + MaxResults: Optional[MaxItemsListOriginationNumbers] + + +NumberCapabilityList = List[NumberCapability] +Timestamp = datetime + + +class PhoneNumberInformation(TypedDict, total=False): + CreatedAt: Optional[Timestamp] + PhoneNumber: Optional[PhoneNumber] + Status: Optional[String] + Iso2CountryCode: Optional[Iso2CountryCode] + RouteType: Optional[RouteType] + NumberCapabilities: Optional[NumberCapabilityList] + + +PhoneNumberInformationList = List[PhoneNumberInformation] + + +class ListOriginationNumbersResult(TypedDict, total=False): + NextToken: Optional[nextToken] + PhoneNumbers: Optional[PhoneNumberInformationList] + + +class ListPhoneNumbersOptedOutInput(ServiceRequest): + nextToken: Optional[string] + + +PhoneNumberList = List[PhoneNumber] + + +class ListPhoneNumbersOptedOutResponse(TypedDict, total=False): + phoneNumbers: Optional[PhoneNumberList] + nextToken: Optional[string] + + +class ListPlatformApplicationsInput(ServiceRequest): + NextToken: Optional[String] + + +class ListPlatformApplicationsResponse(TypedDict, total=False): + PlatformApplications: Optional[ListOfPlatformApplications] + NextToken: Optional[String] + + +class ListSMSSandboxPhoneNumbersInput(ServiceRequest): + NextToken: Optional[nextToken] + MaxResults: Optional[MaxItems] + + +class SMSSandboxPhoneNumber(TypedDict, total=False): + PhoneNumber: Optional[PhoneNumberString] + Status: Optional[SMSSandboxPhoneNumberVerificationStatus] + + +SMSSandboxPhoneNumberList = List[SMSSandboxPhoneNumber] + + +class ListSMSSandboxPhoneNumbersResult(TypedDict, total=False): + PhoneNumbers: SMSSandboxPhoneNumberList + NextToken: Optional[string] + + +class ListSubscriptionsByTopicInput(ServiceRequest): + TopicArn: topicARN + NextToken: Optional[nextToken] + + +class Subscription(TypedDict, total=False): + SubscriptionArn: Optional[subscriptionARN] + Owner: Optional[account] + Protocol: Optional[protocol] + Endpoint: Optional[endpoint] + TopicArn: Optional[topicARN] + + +SubscriptionsList = List[Subscription] + + +class ListSubscriptionsByTopicResponse(TypedDict, total=False): + Subscriptions: Optional[SubscriptionsList] + NextToken: Optional[nextToken] + + +class ListSubscriptionsInput(ServiceRequest): + NextToken: Optional[nextToken] + + +class ListSubscriptionsResponse(TypedDict, total=False): + Subscriptions: Optional[SubscriptionsList] + NextToken: Optional[nextToken] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + + +class ListTagsForResourceResponse(TypedDict, total=False): + Tags: Optional[TagList] + + +class ListTopicsInput(ServiceRequest): + NextToken: Optional[nextToken] + + +class Topic(TypedDict, total=False): + TopicArn: Optional[topicARN] + + +TopicsList = List[Topic] + + +class ListTopicsResponse(TypedDict, total=False): + Topics: Optional[TopicsList] + NextToken: Optional[nextToken] + + +class MessageAttributeValue(TypedDict, total=False): + DataType: String + StringValue: Optional[String] + BinaryValue: Optional[Binary] + + +MessageAttributeMap = Dict[String, MessageAttributeValue] + + +class OptInPhoneNumberInput(ServiceRequest): + phoneNumber: PhoneNumber + + +class OptInPhoneNumberResponse(TypedDict, total=False): + pass + + +class PublishBatchRequestEntry(TypedDict, total=False): + Id: String + Message: message + Subject: Optional[subject] + MessageStructure: Optional[messageStructure] + MessageAttributes: Optional[MessageAttributeMap] + MessageDeduplicationId: Optional[String] + MessageGroupId: Optional[String] + + +PublishBatchRequestEntryList = List[PublishBatchRequestEntry] + + +class PublishBatchInput(ServiceRequest): + TopicArn: topicARN + PublishBatchRequestEntries: PublishBatchRequestEntryList + + +class PublishBatchResultEntry(TypedDict, total=False): + Id: Optional[String] + MessageId: Optional[messageId] + SequenceNumber: Optional[String] + + +PublishBatchResultEntryList = List[PublishBatchResultEntry] + + +class PublishBatchResponse(TypedDict, total=False): + Successful: Optional[PublishBatchResultEntryList] + Failed: Optional[BatchResultErrorEntryList] + + +class PublishInput(ServiceRequest): + TopicArn: Optional[topicARN] + TargetArn: Optional[String] + PhoneNumber: Optional[PhoneNumber] + Message: message + Subject: Optional[subject] + MessageStructure: Optional[messageStructure] + MessageAttributes: Optional[MessageAttributeMap] + MessageDeduplicationId: Optional[String] + MessageGroupId: Optional[String] + + +class PublishResponse(TypedDict, total=False): + MessageId: Optional[messageId] + SequenceNumber: Optional[String] + + +class PutDataProtectionPolicyInput(ServiceRequest): + ResourceArn: topicARN + DataProtectionPolicy: attributeValue + + +class RemovePermissionInput(ServiceRequest): + TopicArn: topicARN + Label: label + + +class SetEndpointAttributesInput(ServiceRequest): + EndpointArn: String + Attributes: MapStringToString + + +class SetPlatformApplicationAttributesInput(ServiceRequest): + PlatformApplicationArn: String + Attributes: MapStringToString + + +class SetSMSAttributesInput(ServiceRequest): + attributes: MapStringToString + + +class SetSMSAttributesResponse(TypedDict, total=False): + pass + + +class SetSubscriptionAttributesInput(ServiceRequest): + SubscriptionArn: subscriptionARN + AttributeName: attributeName + AttributeValue: Optional[attributeValue] + + +class SetTopicAttributesInput(ServiceRequest): + TopicArn: topicARN + AttributeName: attributeName + AttributeValue: Optional[attributeValue] + + +class SubscribeInput(ServiceRequest): + TopicArn: topicARN + Protocol: protocol + Endpoint: Optional[endpoint] + Attributes: Optional[SubscriptionAttributesMap] + ReturnSubscriptionArn: Optional[boolean] + + +class SubscribeResponse(TypedDict, total=False): + SubscriptionArn: Optional[subscriptionARN] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + Tags: TagList + + +class TagResourceResponse(TypedDict, total=False): + pass + + +class UnsubscribeInput(ServiceRequest): + SubscriptionArn: subscriptionARN + + +class UntagResourceRequest(ServiceRequest): + ResourceArn: AmazonResourceName + TagKeys: TagKeyList + + +class UntagResourceResponse(TypedDict, total=False): + pass + + +class VerifySMSSandboxPhoneNumberInput(ServiceRequest): + PhoneNumber: PhoneNumberString + OneTimePassword: OTPCode + + +class VerifySMSSandboxPhoneNumberResult(TypedDict, total=False): + pass + + +class SnsApi: + service = "sns" + version = "2010-03-31" + + @handler("AddPermission") + def add_permission( + self, + context: RequestContext, + topic_arn: topicARN, + label: label, + aws_account_id: DelegatesList, + action_name: ActionsList, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CheckIfPhoneNumberIsOptedOut") + def check_if_phone_number_is_opted_out( + self, context: RequestContext, phone_number: PhoneNumber, **kwargs + ) -> CheckIfPhoneNumberIsOptedOutResponse: + raise NotImplementedError + + @handler("ConfirmSubscription") + def confirm_subscription( + self, + context: RequestContext, + topic_arn: topicARN, + token: token, + authenticate_on_unsubscribe: authenticateOnUnsubscribe | None = None, + **kwargs, + ) -> ConfirmSubscriptionResponse: + raise NotImplementedError + + @handler("CreatePlatformApplication") + def create_platform_application( + self, + context: RequestContext, + name: String, + platform: String, + attributes: MapStringToString, + **kwargs, + ) -> CreatePlatformApplicationResponse: + raise NotImplementedError + + @handler("CreatePlatformEndpoint") + def create_platform_endpoint( + self, + context: RequestContext, + platform_application_arn: String, + token: String, + custom_user_data: String | None = None, + attributes: MapStringToString | None = None, + **kwargs, + ) -> CreateEndpointResponse: + raise NotImplementedError + + @handler("CreateSMSSandboxPhoneNumber") + def create_sms_sandbox_phone_number( + self, + context: RequestContext, + phone_number: PhoneNumberString, + language_code: LanguageCodeString | None = None, + **kwargs, + ) -> CreateSMSSandboxPhoneNumberResult: + raise NotImplementedError + + @handler("CreateTopic") + def create_topic( + self, + context: RequestContext, + name: topicName, + attributes: TopicAttributesMap | None = None, + tags: TagList | None = None, + data_protection_policy: attributeValue | None = None, + **kwargs, + ) -> CreateTopicResponse: + raise NotImplementedError + + @handler("DeleteEndpoint") + def delete_endpoint(self, context: RequestContext, endpoint_arn: String, **kwargs) -> None: + raise NotImplementedError + + @handler("DeletePlatformApplication") + def delete_platform_application( + self, context: RequestContext, platform_application_arn: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteSMSSandboxPhoneNumber") + def delete_sms_sandbox_phone_number( + self, context: RequestContext, phone_number: PhoneNumberString, **kwargs + ) -> DeleteSMSSandboxPhoneNumberResult: + raise NotImplementedError + + @handler("DeleteTopic") + def delete_topic(self, context: RequestContext, topic_arn: topicARN, **kwargs) -> None: + raise NotImplementedError + + @handler("GetDataProtectionPolicy") + def get_data_protection_policy( + self, context: RequestContext, resource_arn: topicARN, **kwargs + ) -> GetDataProtectionPolicyResponse: + raise NotImplementedError + + @handler("GetEndpointAttributes") + def get_endpoint_attributes( + self, context: RequestContext, endpoint_arn: String, **kwargs + ) -> GetEndpointAttributesResponse: + raise NotImplementedError + + @handler("GetPlatformApplicationAttributes") + def get_platform_application_attributes( + self, context: RequestContext, platform_application_arn: String, **kwargs + ) -> GetPlatformApplicationAttributesResponse: + raise NotImplementedError + + @handler("GetSMSAttributes") + def get_sms_attributes( + self, context: RequestContext, attributes: ListString | None = None, **kwargs + ) -> GetSMSAttributesResponse: + raise NotImplementedError + + @handler("GetSMSSandboxAccountStatus") + def get_sms_sandbox_account_status( + self, context: RequestContext, **kwargs + ) -> GetSMSSandboxAccountStatusResult: + raise NotImplementedError + + @handler("GetSubscriptionAttributes") + def get_subscription_attributes( + self, context: RequestContext, subscription_arn: subscriptionARN, **kwargs + ) -> GetSubscriptionAttributesResponse: + raise NotImplementedError + + @handler("GetTopicAttributes") + def get_topic_attributes( + self, context: RequestContext, topic_arn: topicARN, **kwargs + ) -> GetTopicAttributesResponse: + raise NotImplementedError + + @handler("ListEndpointsByPlatformApplication") + def list_endpoints_by_platform_application( + self, + context: RequestContext, + platform_application_arn: String, + next_token: String | None = None, + **kwargs, + ) -> ListEndpointsByPlatformApplicationResponse: + raise NotImplementedError + + @handler("ListOriginationNumbers") + def list_origination_numbers( + self, + context: RequestContext, + next_token: nextToken | None = None, + max_results: MaxItemsListOriginationNumbers | None = None, + **kwargs, + ) -> ListOriginationNumbersResult: + raise NotImplementedError + + @handler("ListPhoneNumbersOptedOut") + def list_phone_numbers_opted_out( + self, context: RequestContext, next_token: string | None = None, **kwargs + ) -> ListPhoneNumbersOptedOutResponse: + raise NotImplementedError + + @handler("ListPlatformApplications") + def list_platform_applications( + self, context: RequestContext, next_token: String | None = None, **kwargs + ) -> ListPlatformApplicationsResponse: + raise NotImplementedError + + @handler("ListSMSSandboxPhoneNumbers") + def list_sms_sandbox_phone_numbers( + self, + context: RequestContext, + next_token: nextToken | None = None, + max_results: MaxItems | None = None, + **kwargs, + ) -> ListSMSSandboxPhoneNumbersResult: + raise NotImplementedError + + @handler("ListSubscriptions") + def list_subscriptions( + self, context: RequestContext, next_token: nextToken | None = None, **kwargs + ) -> ListSubscriptionsResponse: + raise NotImplementedError + + @handler("ListSubscriptionsByTopic") + def list_subscriptions_by_topic( + self, + context: RequestContext, + topic_arn: topicARN, + next_token: nextToken | None = None, + **kwargs, + ) -> ListSubscriptionsByTopicResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("ListTopics") + def list_topics( + self, context: RequestContext, next_token: nextToken | None = None, **kwargs + ) -> ListTopicsResponse: + raise NotImplementedError + + @handler("OptInPhoneNumber") + def opt_in_phone_number( + self, context: RequestContext, phone_number: PhoneNumber, **kwargs + ) -> OptInPhoneNumberResponse: + raise NotImplementedError + + @handler("Publish") + def publish( + self, + context: RequestContext, + message: message, + topic_arn: topicARN | None = None, + target_arn: String | None = None, + phone_number: PhoneNumber | None = None, + subject: subject | None = None, + message_structure: messageStructure | None = None, + message_attributes: MessageAttributeMap | None = None, + message_deduplication_id: String | None = None, + message_group_id: String | None = None, + **kwargs, + ) -> PublishResponse: + raise NotImplementedError + + @handler("PublishBatch") + def publish_batch( + self, + context: RequestContext, + topic_arn: topicARN, + publish_batch_request_entries: PublishBatchRequestEntryList, + **kwargs, + ) -> PublishBatchResponse: + raise NotImplementedError + + @handler("PutDataProtectionPolicy") + def put_data_protection_policy( + self, + context: RequestContext, + resource_arn: topicARN, + data_protection_policy: attributeValue, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RemovePermission") + def remove_permission( + self, context: RequestContext, topic_arn: topicARN, label: label, **kwargs + ) -> None: + raise NotImplementedError + + @handler("SetEndpointAttributes") + def set_endpoint_attributes( + self, context: RequestContext, endpoint_arn: String, attributes: MapStringToString, **kwargs + ) -> None: + raise NotImplementedError + + @handler("SetPlatformApplicationAttributes") + def set_platform_application_attributes( + self, + context: RequestContext, + platform_application_arn: String, + attributes: MapStringToString, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SetSMSAttributes") + def set_sms_attributes( + self, context: RequestContext, attributes: MapStringToString, **kwargs + ) -> SetSMSAttributesResponse: + raise NotImplementedError + + @handler("SetSubscriptionAttributes") + def set_subscription_attributes( + self, + context: RequestContext, + subscription_arn: subscriptionARN, + attribute_name: attributeName, + attribute_value: attributeValue | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SetTopicAttributes") + def set_topic_attributes( + self, + context: RequestContext, + topic_arn: topicARN, + attribute_name: attributeName, + attribute_value: attributeValue | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("Subscribe") + def subscribe( + self, + context: RequestContext, + topic_arn: topicARN, + protocol: protocol, + endpoint: endpoint | None = None, + attributes: SubscriptionAttributesMap | None = None, + return_subscription_arn: boolean | None = None, + **kwargs, + ) -> SubscribeResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> TagResourceResponse: + raise NotImplementedError + + @handler("Unsubscribe") + def unsubscribe( + self, context: RequestContext, subscription_arn: subscriptionARN, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceResponse: + raise NotImplementedError + + @handler("VerifySMSSandboxPhoneNumber") + def verify_sms_sandbox_phone_number( + self, + context: RequestContext, + phone_number: PhoneNumberString, + one_time_password: OTPCode, + **kwargs, + ) -> VerifySMSSandboxPhoneNumberResult: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/sqs/__init__.py b/localstack-core/localstack/aws/api/sqs/__init__.py new file mode 100644 index 0000000000000..a09978ffe8046 --- /dev/null +++ b/localstack-core/localstack/aws/api/sqs/__init__.py @@ -0,0 +1,778 @@ +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Boolean = bool +BoxedInteger = int +ExceptionMessage = str +MessageAttributeName = str +NullableInteger = int +String = str +TagKey = str +TagValue = str +Token = str + + +class MessageSystemAttributeName(StrEnum): + All = "All" + SenderId = "SenderId" + SentTimestamp = "SentTimestamp" + ApproximateReceiveCount = "ApproximateReceiveCount" + ApproximateFirstReceiveTimestamp = "ApproximateFirstReceiveTimestamp" + SequenceNumber = "SequenceNumber" + MessageDeduplicationId = "MessageDeduplicationId" + MessageGroupId = "MessageGroupId" + AWSTraceHeader = "AWSTraceHeader" + DeadLetterQueueSourceArn = "DeadLetterQueueSourceArn" + + +class MessageSystemAttributeNameForSends(StrEnum): + AWSTraceHeader = "AWSTraceHeader" + + +class QueueAttributeName(StrEnum): + All = "All" + Policy = "Policy" + VisibilityTimeout = "VisibilityTimeout" + MaximumMessageSize = "MaximumMessageSize" + MessageRetentionPeriod = "MessageRetentionPeriod" + ApproximateNumberOfMessages = "ApproximateNumberOfMessages" + ApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible" + CreatedTimestamp = "CreatedTimestamp" + LastModifiedTimestamp = "LastModifiedTimestamp" + QueueArn = "QueueArn" + ApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed" + DelaySeconds = "DelaySeconds" + ReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds" + RedrivePolicy = "RedrivePolicy" + FifoQueue = "FifoQueue" + ContentBasedDeduplication = "ContentBasedDeduplication" + KmsMasterKeyId = "KmsMasterKeyId" + KmsDataKeyReusePeriodSeconds = "KmsDataKeyReusePeriodSeconds" + DeduplicationScope = "DeduplicationScope" + FifoThroughputLimit = "FifoThroughputLimit" + RedriveAllowPolicy = "RedriveAllowPolicy" + SqsManagedSseEnabled = "SqsManagedSseEnabled" + + +class BatchEntryIdsNotDistinct(ServiceException): + code: str = "BatchEntryIdsNotDistinct" + sender_fault: bool = False + status_code: int = 400 + + +class BatchRequestTooLong(ServiceException): + code: str = "BatchRequestTooLong" + sender_fault: bool = False + status_code: int = 400 + + +class EmptyBatchRequest(ServiceException): + code: str = "EmptyBatchRequest" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAddress(ServiceException): + code: str = "InvalidAddress" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAttributeName(ServiceException): + code: str = "InvalidAttributeName" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAttributeValue(ServiceException): + code: str = "InvalidAttributeValue" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidBatchEntryId(ServiceException): + code: str = "InvalidBatchEntryId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidIdFormat(ServiceException): + code: str = "InvalidIdFormat" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidMessageContents(ServiceException): + code: str = "InvalidMessageContents" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSecurity(ServiceException): + code: str = "InvalidSecurity" + sender_fault: bool = False + status_code: int = 400 + + +class KmsAccessDenied(ServiceException): + code: str = "KmsAccessDenied" + sender_fault: bool = False + status_code: int = 400 + + +class KmsDisabled(ServiceException): + code: str = "KmsDisabled" + sender_fault: bool = False + status_code: int = 400 + + +class KmsInvalidKeyUsage(ServiceException): + code: str = "KmsInvalidKeyUsage" + sender_fault: bool = False + status_code: int = 400 + + +class KmsInvalidState(ServiceException): + code: str = "KmsInvalidState" + sender_fault: bool = False + status_code: int = 400 + + +class KmsNotFound(ServiceException): + code: str = "KmsNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class KmsOptInRequired(ServiceException): + code: str = "KmsOptInRequired" + sender_fault: bool = False + status_code: int = 400 + + +class KmsThrottled(ServiceException): + code: str = "KmsThrottled" + sender_fault: bool = False + status_code: int = 400 + + +class MessageNotInflight(ServiceException): + code: str = "MessageNotInflight" + sender_fault: bool = False + status_code: int = 400 + + +class OverLimit(ServiceException): + code: str = "OverLimit" + sender_fault: bool = False + status_code: int = 400 + + +class PurgeQueueInProgress(ServiceException): + code: str = "PurgeQueueInProgress" + sender_fault: bool = False + status_code: int = 400 + + +class QueueDeletedRecently(ServiceException): + code: str = "QueueDeletedRecently" + sender_fault: bool = False + status_code: int = 400 + + +class QueueDoesNotExist(ServiceException): + code: str = "QueueDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class QueueNameExists(ServiceException): + code: str = "QueueNameExists" + sender_fault: bool = False + status_code: int = 400 + + +class ReceiptHandleIsInvalid(ServiceException): + code: str = "ReceiptHandleIsInvalid" + sender_fault: bool = False + status_code: int = 400 + + +class RequestThrottled(ServiceException): + code: str = "RequestThrottled" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyEntriesInBatchRequest(ServiceException): + code: str = "TooManyEntriesInBatchRequest" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedOperation(ServiceException): + code: str = "UnsupportedOperation" + sender_fault: bool = False + status_code: int = 400 + + +AWSAccountIdList = List[String] +ActionNameList = List[String] + + +class AddPermissionRequest(ServiceRequest): + QueueUrl: String + Label: String + AWSAccountIds: AWSAccountIdList + Actions: ActionNameList + + +AttributeNameList = List[QueueAttributeName] + + +class BatchResultErrorEntry(TypedDict, total=False): + Id: String + SenderFault: Boolean + Code: String + Message: Optional[String] + + +BatchResultErrorEntryList = List[BatchResultErrorEntry] +Binary = bytes +BinaryList = List[Binary] + + +class CancelMessageMoveTaskRequest(ServiceRequest): + TaskHandle: String + + +Long = int + + +class CancelMessageMoveTaskResult(TypedDict, total=False): + ApproximateNumberOfMessagesMoved: Optional[Long] + + +class ChangeMessageVisibilityBatchRequestEntry(TypedDict, total=False): + Id: String + ReceiptHandle: String + VisibilityTimeout: Optional[NullableInteger] + + +ChangeMessageVisibilityBatchRequestEntryList = List[ChangeMessageVisibilityBatchRequestEntry] + + +class ChangeMessageVisibilityBatchRequest(ServiceRequest): + QueueUrl: String + Entries: ChangeMessageVisibilityBatchRequestEntryList + + +class ChangeMessageVisibilityBatchResultEntry(TypedDict, total=False): + Id: String + + +ChangeMessageVisibilityBatchResultEntryList = List[ChangeMessageVisibilityBatchResultEntry] + + +class ChangeMessageVisibilityBatchResult(TypedDict, total=False): + Successful: ChangeMessageVisibilityBatchResultEntryList + Failed: BatchResultErrorEntryList + + +class ChangeMessageVisibilityRequest(ServiceRequest): + QueueUrl: String + ReceiptHandle: String + VisibilityTimeout: NullableInteger + + +TagMap = Dict[TagKey, TagValue] +QueueAttributeMap = Dict[QueueAttributeName, String] + + +class CreateQueueRequest(ServiceRequest): + QueueName: String + Attributes: Optional[QueueAttributeMap] + tags: Optional[TagMap] + + +class CreateQueueResult(TypedDict, total=False): + QueueUrl: Optional[String] + + +class DeleteMessageBatchRequestEntry(TypedDict, total=False): + Id: String + ReceiptHandle: String + + +DeleteMessageBatchRequestEntryList = List[DeleteMessageBatchRequestEntry] + + +class DeleteMessageBatchRequest(ServiceRequest): + QueueUrl: String + Entries: DeleteMessageBatchRequestEntryList + + +class DeleteMessageBatchResultEntry(TypedDict, total=False): + Id: String + + +DeleteMessageBatchResultEntryList = List[DeleteMessageBatchResultEntry] + + +class DeleteMessageBatchResult(TypedDict, total=False): + Successful: DeleteMessageBatchResultEntryList + Failed: BatchResultErrorEntryList + + +class DeleteMessageRequest(ServiceRequest): + QueueUrl: String + ReceiptHandle: String + + +class DeleteQueueRequest(ServiceRequest): + QueueUrl: String + + +class GetQueueAttributesRequest(ServiceRequest): + QueueUrl: String + AttributeNames: Optional[AttributeNameList] + + +class GetQueueAttributesResult(TypedDict, total=False): + Attributes: Optional[QueueAttributeMap] + + +class GetQueueUrlRequest(ServiceRequest): + QueueName: String + QueueOwnerAWSAccountId: Optional[String] + + +class GetQueueUrlResult(TypedDict, total=False): + QueueUrl: Optional[String] + + +class ListDeadLetterSourceQueuesRequest(ServiceRequest): + QueueUrl: String + NextToken: Optional[Token] + MaxResults: Optional[BoxedInteger] + + +QueueUrlList = List[String] + + +class ListDeadLetterSourceQueuesResult(TypedDict, total=False): + queueUrls: QueueUrlList + NextToken: Optional[Token] + + +class ListMessageMoveTasksRequest(ServiceRequest): + SourceArn: String + MaxResults: Optional[NullableInteger] + + +NullableLong = int + + +class ListMessageMoveTasksResultEntry(TypedDict, total=False): + TaskHandle: Optional[String] + Status: Optional[String] + SourceArn: Optional[String] + DestinationArn: Optional[String] + MaxNumberOfMessagesPerSecond: Optional[NullableInteger] + ApproximateNumberOfMessagesMoved: Optional[Long] + ApproximateNumberOfMessagesToMove: Optional[NullableLong] + FailureReason: Optional[String] + StartedTimestamp: Optional[Long] + + +ListMessageMoveTasksResultEntryList = List[ListMessageMoveTasksResultEntry] + + +class ListMessageMoveTasksResult(TypedDict, total=False): + Results: Optional[ListMessageMoveTasksResultEntryList] + + +class ListQueueTagsRequest(ServiceRequest): + QueueUrl: String + + +class ListQueueTagsResult(TypedDict, total=False): + Tags: Optional[TagMap] + + +class ListQueuesRequest(ServiceRequest): + QueueNamePrefix: Optional[String] + NextToken: Optional[Token] + MaxResults: Optional[BoxedInteger] + + +class ListQueuesResult(TypedDict, total=False): + QueueUrls: Optional[QueueUrlList] + NextToken: Optional[Token] + + +StringList = List[String] + + +class MessageAttributeValue(TypedDict, total=False): + StringValue: Optional[String] + BinaryValue: Optional[Binary] + StringListValues: Optional[StringList] + BinaryListValues: Optional[BinaryList] + DataType: String + + +MessageBodyAttributeMap = Dict[String, MessageAttributeValue] +MessageSystemAttributeMap = Dict[MessageSystemAttributeName, String] + + +class Message(TypedDict, total=False): + MessageId: Optional[String] + ReceiptHandle: Optional[String] + MD5OfBody: Optional[String] + Body: Optional[String] + Attributes: Optional[MessageSystemAttributeMap] + MD5OfMessageAttributes: Optional[String] + MessageAttributes: Optional[MessageBodyAttributeMap] + + +MessageAttributeNameList = List[MessageAttributeName] + + +class MessageSystemAttributeValue(TypedDict, total=False): + StringValue: Optional[String] + BinaryValue: Optional[Binary] + StringListValues: Optional[StringList] + BinaryListValues: Optional[BinaryList] + DataType: String + + +MessageBodySystemAttributeMap = Dict[ + MessageSystemAttributeNameForSends, MessageSystemAttributeValue +] +MessageList = List[Message] +MessageSystemAttributeList = List[MessageSystemAttributeName] + + +class PurgeQueueRequest(ServiceRequest): + QueueUrl: String + + +class ReceiveMessageRequest(ServiceRequest): + QueueUrl: String + AttributeNames: Optional[AttributeNameList] + MessageSystemAttributeNames: Optional[MessageSystemAttributeList] + MessageAttributeNames: Optional[MessageAttributeNameList] + MaxNumberOfMessages: Optional[NullableInteger] + VisibilityTimeout: Optional[NullableInteger] + WaitTimeSeconds: Optional[NullableInteger] + ReceiveRequestAttemptId: Optional[String] + + +class ReceiveMessageResult(TypedDict, total=False): + Messages: Optional[MessageList] + + +class RemovePermissionRequest(ServiceRequest): + QueueUrl: String + Label: String + + +class SendMessageBatchRequestEntry(TypedDict, total=False): + Id: String + MessageBody: String + DelaySeconds: Optional[NullableInteger] + MessageAttributes: Optional[MessageBodyAttributeMap] + MessageSystemAttributes: Optional[MessageBodySystemAttributeMap] + MessageDeduplicationId: Optional[String] + MessageGroupId: Optional[String] + + +SendMessageBatchRequestEntryList = List[SendMessageBatchRequestEntry] + + +class SendMessageBatchRequest(ServiceRequest): + QueueUrl: String + Entries: SendMessageBatchRequestEntryList + + +class SendMessageBatchResultEntry(TypedDict, total=False): + Id: String + MessageId: String + MD5OfMessageBody: String + MD5OfMessageAttributes: Optional[String] + MD5OfMessageSystemAttributes: Optional[String] + SequenceNumber: Optional[String] + + +SendMessageBatchResultEntryList = List[SendMessageBatchResultEntry] + + +class SendMessageBatchResult(TypedDict, total=False): + Successful: SendMessageBatchResultEntryList + Failed: BatchResultErrorEntryList + + +class SendMessageRequest(ServiceRequest): + QueueUrl: String + MessageBody: String + DelaySeconds: Optional[NullableInteger] + MessageAttributes: Optional[MessageBodyAttributeMap] + MessageSystemAttributes: Optional[MessageBodySystemAttributeMap] + MessageDeduplicationId: Optional[String] + MessageGroupId: Optional[String] + + +class SendMessageResult(TypedDict, total=False): + MD5OfMessageBody: Optional[String] + MD5OfMessageAttributes: Optional[String] + MD5OfMessageSystemAttributes: Optional[String] + MessageId: Optional[String] + SequenceNumber: Optional[String] + + +class SetQueueAttributesRequest(ServiceRequest): + QueueUrl: String + Attributes: QueueAttributeMap + + +class StartMessageMoveTaskRequest(ServiceRequest): + SourceArn: String + DestinationArn: Optional[String] + MaxNumberOfMessagesPerSecond: Optional[NullableInteger] + + +class StartMessageMoveTaskResult(TypedDict, total=False): + TaskHandle: Optional[String] + + +TagKeyList = List[TagKey] + + +class TagQueueRequest(ServiceRequest): + QueueUrl: String + Tags: TagMap + + +class UntagQueueRequest(ServiceRequest): + QueueUrl: String + TagKeys: TagKeyList + + +class SqsApi: + service = "sqs" + version = "2012-11-05" + + @handler("AddPermission") + def add_permission( + self, + context: RequestContext, + queue_url: String, + label: String, + aws_account_ids: AWSAccountIdList, + actions: ActionNameList, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("CancelMessageMoveTask") + def cancel_message_move_task( + self, context: RequestContext, task_handle: String, **kwargs + ) -> CancelMessageMoveTaskResult: + raise NotImplementedError + + @handler("ChangeMessageVisibility") + def change_message_visibility( + self, + context: RequestContext, + queue_url: String, + receipt_handle: String, + visibility_timeout: NullableInteger, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("ChangeMessageVisibilityBatch") + def change_message_visibility_batch( + self, + context: RequestContext, + queue_url: String, + entries: ChangeMessageVisibilityBatchRequestEntryList, + **kwargs, + ) -> ChangeMessageVisibilityBatchResult: + raise NotImplementedError + + @handler("CreateQueue") + def create_queue( + self, + context: RequestContext, + queue_name: String, + attributes: QueueAttributeMap | None = None, + tags: TagMap | None = None, + **kwargs, + ) -> CreateQueueResult: + raise NotImplementedError + + @handler("DeleteMessage") + def delete_message( + self, context: RequestContext, queue_url: String, receipt_handle: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteMessageBatch") + def delete_message_batch( + self, + context: RequestContext, + queue_url: String, + entries: DeleteMessageBatchRequestEntryList, + **kwargs, + ) -> DeleteMessageBatchResult: + raise NotImplementedError + + @handler("DeleteQueue") + def delete_queue(self, context: RequestContext, queue_url: String, **kwargs) -> None: + raise NotImplementedError + + @handler("GetQueueAttributes") + def get_queue_attributes( + self, + context: RequestContext, + queue_url: String, + attribute_names: AttributeNameList | None = None, + **kwargs, + ) -> GetQueueAttributesResult: + raise NotImplementedError + + @handler("GetQueueUrl") + def get_queue_url( + self, + context: RequestContext, + queue_name: String, + queue_owner_aws_account_id: String | None = None, + **kwargs, + ) -> GetQueueUrlResult: + raise NotImplementedError + + @handler("ListDeadLetterSourceQueues") + def list_dead_letter_source_queues( + self, + context: RequestContext, + queue_url: String, + next_token: Token | None = None, + max_results: BoxedInteger | None = None, + **kwargs, + ) -> ListDeadLetterSourceQueuesResult: + raise NotImplementedError + + @handler("ListMessageMoveTasks") + def list_message_move_tasks( + self, + context: RequestContext, + source_arn: String, + max_results: NullableInteger | None = None, + **kwargs, + ) -> ListMessageMoveTasksResult: + raise NotImplementedError + + @handler("ListQueueTags") + def list_queue_tags( + self, context: RequestContext, queue_url: String, **kwargs + ) -> ListQueueTagsResult: + raise NotImplementedError + + @handler("ListQueues") + def list_queues( + self, + context: RequestContext, + queue_name_prefix: String | None = None, + next_token: Token | None = None, + max_results: BoxedInteger | None = None, + **kwargs, + ) -> ListQueuesResult: + raise NotImplementedError + + @handler("PurgeQueue") + def purge_queue(self, context: RequestContext, queue_url: String, **kwargs) -> None: + raise NotImplementedError + + @handler("ReceiveMessage") + def receive_message( + self, + context: RequestContext, + queue_url: String, + attribute_names: AttributeNameList | None = None, + message_system_attribute_names: MessageSystemAttributeList | None = None, + message_attribute_names: MessageAttributeNameList | None = None, + max_number_of_messages: NullableInteger | None = None, + visibility_timeout: NullableInteger | None = None, + wait_time_seconds: NullableInteger | None = None, + receive_request_attempt_id: String | None = None, + **kwargs, + ) -> ReceiveMessageResult: + raise NotImplementedError + + @handler("RemovePermission") + def remove_permission( + self, context: RequestContext, queue_url: String, label: String, **kwargs + ) -> None: + raise NotImplementedError + + @handler("SendMessage") + def send_message( + self, + context: RequestContext, + queue_url: String, + message_body: String, + delay_seconds: NullableInteger | None = None, + message_attributes: MessageBodyAttributeMap | None = None, + message_system_attributes: MessageBodySystemAttributeMap | None = None, + message_deduplication_id: String | None = None, + message_group_id: String | None = None, + **kwargs, + ) -> SendMessageResult: + raise NotImplementedError + + @handler("SendMessageBatch") + def send_message_batch( + self, + context: RequestContext, + queue_url: String, + entries: SendMessageBatchRequestEntryList, + **kwargs, + ) -> SendMessageBatchResult: + raise NotImplementedError + + @handler("SetQueueAttributes") + def set_queue_attributes( + self, context: RequestContext, queue_url: String, attributes: QueueAttributeMap, **kwargs + ) -> None: + raise NotImplementedError + + @handler("StartMessageMoveTask") + def start_message_move_task( + self, + context: RequestContext, + source_arn: String, + destination_arn: String | None = None, + max_number_of_messages_per_second: NullableInteger | None = None, + **kwargs, + ) -> StartMessageMoveTaskResult: + raise NotImplementedError + + @handler("TagQueue") + def tag_queue(self, context: RequestContext, queue_url: String, tags: TagMap, **kwargs) -> None: + raise NotImplementedError + + @handler("UntagQueue") + def untag_queue( + self, context: RequestContext, queue_url: String, tag_keys: TagKeyList, **kwargs + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/ssm/__init__.py b/localstack-core/localstack/aws/api/ssm/__init__.py new file mode 100644 index 0000000000000..53f430b7ce18a --- /dev/null +++ b/localstack-core/localstack/aws/api/ssm/__init__.py @@ -0,0 +1,7659 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AccessKeyIdType = str +AccessKeySecretType = str +AccessRequestId = str +Account = str +AccountId = str +ActivationCode = str +ActivationDescription = str +ActivationId = str +AgentErrorCode = str +AgentType = str +AgentVersion = str +AggregatorSchemaOnly = bool +AlarmName = str +AllowedPattern = str +ApplyOnlyAtCronInterval = bool +ApproveAfterDays = int +Architecture = str +AssociationExecutionFilterValue = str +AssociationExecutionId = str +AssociationExecutionTargetsFilterValue = str +AssociationFilterValue = str +AssociationId = str +AssociationName = str +AssociationResourceId = str +AssociationResourceType = str +AssociationVersion = str +AttachmentHash = str +AttachmentIdentifier = str +AttachmentName = str +AttachmentUrl = str +AttachmentsSourceValue = str +AttributeName = str +AttributeValue = str +AutomationActionName = str +AutomationExecutionFilterValue = str +AutomationExecutionId = str +AutomationParameterKey = str +AutomationParameterValue = str +AutomationTargetParameterName = str +BaselineDescription = str +BaselineId = str +BaselineName = str +BatchErrorMessage = str +Boolean = bool +CalendarNameOrARN = str +Category = str +ChangeDetailsValue = str +ChangeRequestName = str +ClientToken = str +CloudWatchLogGroupName = str +CloudWatchOutputEnabled = bool +CommandFilterValue = str +CommandId = str +CommandMaxResults = int +CommandPluginName = str +CommandPluginOutput = str +Comment = str +CompletedCount = int +ComplianceExecutionId = str +ComplianceExecutionType = str +ComplianceFilterValue = str +ComplianceItemContentHash = str +ComplianceItemId = str +ComplianceItemTitle = str +ComplianceResourceId = str +ComplianceResourceType = str +ComplianceStringFilterKey = str +ComplianceSummaryCount = int +ComplianceTypeName = str +ComputerName = str +DefaultBaseline = bool +DefaultInstanceName = str +DeliveryTimedOutCount = int +DescribeInstancePropertiesMaxResults = int +DescriptionInDocument = str +DocumentARN = str +DocumentAuthor = str +DocumentContent = str +DocumentDisplayName = str +DocumentFilterValue = str +DocumentHash = str +DocumentKeyValuesFilterKey = str +DocumentKeyValuesFilterValue = str +DocumentName = str +DocumentOwner = str +DocumentParameterDefaultValue = str +DocumentParameterDescrption = str +DocumentParameterName = str +DocumentPermissionMaxResults = int +DocumentReviewComment = str +DocumentSchemaVersion = str +DocumentSha1 = str +DocumentStatusInformation = str +DocumentVersion = str +DocumentVersionName = str +DocumentVersionNumber = str +DryRun = bool +Duration = int +EffectiveInstanceAssociationMaxResults = int +ErrorCount = int +ExcludeAccount = str +ExecutionPreviewId = str +ExecutionRoleName = str +GetInventorySchemaMaxResults = int +GetOpsMetadataMaxResults = int +GetParametersByPathMaxResults = int +IPAddress = str +ISO8601String = str +IamRole = str +IdempotencyToken = str +InstallOverrideList = str +InstanceAssociationExecutionSummary = str +InstanceCount = int +InstanceId = str +InstanceInformationFilterValue = str +InstanceInformationStringFilterKey = str +InstanceName = str +InstancePatchStateFilterKey = str +InstancePatchStateFilterValue = str +InstancePropertyFilterValue = str +InstancePropertyStringFilterKey = str +InstanceRole = str +InstanceState = str +InstanceStatus = str +InstanceTagName = str +InstanceType = str +InstancesCount = int +Integer = int +InventoryAggregatorExpression = str +InventoryDeletionLastStatusMessage = str +InventoryFilterKey = str +InventoryFilterValue = str +InventoryGroupName = str +InventoryItemAttributeName = str +InventoryItemCaptureTime = str +InventoryItemContentHash = str +InventoryItemSchemaVersion = str +InventoryItemTypeName = str +InventoryItemTypeNameFilter = str +InventoryResultEntityId = str +InventoryResultItemKey = str +InventoryTypeDisplayName = str +InvocationTraceOutput = str +IpAddress = str +IsSubTypeSchema = bool +KeyName = str +LastResourceDataSyncMessage = str +ListOpsMetadataMaxResults = int +MaintenanceWindowAllowUnassociatedTargets = bool +MaintenanceWindowCutoff = int +MaintenanceWindowDescription = str +MaintenanceWindowDurationHours = int +MaintenanceWindowEnabled = bool +MaintenanceWindowExecutionId = str +MaintenanceWindowExecutionStatusDetails = str +MaintenanceWindowExecutionTaskExecutionId = str +MaintenanceWindowExecutionTaskId = str +MaintenanceWindowExecutionTaskInvocationId = str +MaintenanceWindowExecutionTaskInvocationParameters = str +MaintenanceWindowFilterKey = str +MaintenanceWindowFilterValue = str +MaintenanceWindowId = str +MaintenanceWindowLambdaClientContext = str +MaintenanceWindowLambdaQualifier = str +MaintenanceWindowMaxResults = int +MaintenanceWindowName = str +MaintenanceWindowOffset = int +MaintenanceWindowSchedule = str +MaintenanceWindowSearchMaxResults = int +MaintenanceWindowStepFunctionsInput = str +MaintenanceWindowStepFunctionsName = str +MaintenanceWindowStringDateTime = str +MaintenanceWindowTargetId = str +MaintenanceWindowTaskArn = str +MaintenanceWindowTaskId = str +MaintenanceWindowTaskParameterName = str +MaintenanceWindowTaskParameterValue = str +MaintenanceWindowTaskPriority = int +MaintenanceWindowTaskTargetId = str +MaintenanceWindowTimezone = str +ManagedInstanceId = str +MaxConcurrency = str +MaxErrors = str +MaxResults = int +MaxResultsEC2Compatible = int +MaxSessionDuration = str +MetadataKey = str +MetadataValueString = str +NextToken = str +NodeAccountId = str +NodeFilterValue = str +NodeId = str +NodeOrganizationalUnitId = str +NodeOrganizationalUnitPath = str +NodeRegion = str +NotificationArn = str +OpsAggregatorType = str +OpsAggregatorValue = str +OpsAggregatorValueKey = str +OpsDataAttributeName = str +OpsDataTypeName = str +OpsEntityId = str +OpsEntityItemCaptureTime = str +OpsEntityItemKey = str +OpsFilterKey = str +OpsFilterValue = str +OpsItemAccountId = str +OpsItemArn = str +OpsItemCategory = str +OpsItemDataKey = str +OpsItemDataValueString = str +OpsItemDescription = str +OpsItemEventFilterValue = str +OpsItemEventMaxResults = int +OpsItemFilterValue = str +OpsItemId = str +OpsItemMaxResults = int +OpsItemPriority = int +OpsItemRelatedItemAssociationId = str +OpsItemRelatedItemAssociationResourceType = str +OpsItemRelatedItemAssociationResourceUri = str +OpsItemRelatedItemAssociationType = str +OpsItemRelatedItemsFilterValue = str +OpsItemRelatedItemsMaxResults = int +OpsItemSeverity = str +OpsItemSource = str +OpsItemTitle = str +OpsItemType = str +OpsMetadataArn = str +OpsMetadataFilterKey = str +OpsMetadataFilterValue = str +OpsMetadataResourceId = str +OutputSourceId = str +OutputSourceType = str +OwnerInformation = str +PSParameterName = str +PSParameterSelector = str +PSParameterValue = str +ParameterDataType = str +ParameterDescription = str +ParameterKeyId = str +ParameterLabel = str +ParameterName = str +ParameterPolicies = str +ParameterStringFilterKey = str +ParameterStringFilterValue = str +ParameterStringQueryOption = str +ParameterValue = str +ParametersFilterValue = str +PatchAdvisoryId = str +PatchArch = str +PatchAvailableSecurityUpdateCount = int +PatchBaselineMaxResults = int +PatchBugzillaId = str +PatchCVEId = str +PatchCVEIds = str +PatchClassification = str +PatchComplianceMaxResults = int +PatchContentUrl = str +PatchCriticalNonCompliantCount = int +PatchDescription = str +PatchEpoch = int +PatchFailedCount = int +PatchFilterValue = str +PatchGroup = str +PatchId = str +PatchInstalledCount = int +PatchInstalledOtherCount = int +PatchInstalledPendingRebootCount = int +PatchInstalledRejectedCount = int +PatchKbNumber = str +PatchLanguage = str +PatchMissingCount = int +PatchMsrcNumber = str +PatchMsrcSeverity = str +PatchName = str +PatchNotApplicableCount = int +PatchOrchestratorFilterKey = str +PatchOrchestratorFilterValue = str +PatchOtherNonCompliantCount = int +PatchProduct = str +PatchProductFamily = str +PatchRelease = str +PatchRepository = str +PatchSecurityNonCompliantCount = int +PatchSeverity = str +PatchSourceConfiguration = str +PatchSourceName = str +PatchSourceProduct = str +PatchStringDateTime = str +PatchTitle = str +PatchUnreportedNotApplicableCount = int +PatchVendor = str +PatchVersion = str +PlatformName = str +PlatformVersion = str +Policy = str +PolicyHash = str +PolicyId = str +Product = str +PutInventoryMessage = str +Region = str +RegistrationLimit = int +RegistrationMetadataKey = str +RegistrationMetadataValue = str +RegistrationsCount = int +RemainingCount = int +RequireType = str +ResourceArnString = str +ResourceCount = int +ResourceCountByStatus = str +ResourceDataSyncAWSKMSKeyARN = str +ResourceDataSyncDestinationDataSharingType = str +ResourceDataSyncEnableAllOpsDataSources = bool +ResourceDataSyncIncludeFutureRegions = bool +ResourceDataSyncName = str +ResourceDataSyncOrganizationSourceType = str +ResourceDataSyncOrganizationalUnitId = str +ResourceDataSyncS3BucketName = str +ResourceDataSyncS3Prefix = str +ResourceDataSyncS3Region = str +ResourceDataSyncSourceRegion = str +ResourceDataSyncSourceType = str +ResourceDataSyncState = str +ResourceDataSyncType = str +ResourceId = str +ResourcePolicyMaxResults = int +ResponseCode = int +Reviewer = str +S3BucketName = str +S3KeyPrefix = str +S3Region = str +ScheduleExpression = str +ScheduleOffset = int +ServiceRole = str +ServiceSettingId = str +ServiceSettingValue = str +SessionDetails = str +SessionFilterValue = str +SessionId = str +SessionManagerCloudWatchOutputUrl = str +SessionManagerParameterName = str +SessionManagerParameterValue = str +SessionManagerS3OutputUrl = str +SessionMaxResults = int +SessionOwner = str +SessionReason = str +SessionTarget = str +SessionTokenType = str +SharedDocumentVersion = str +SnapshotDownloadUrl = str +SnapshotId = str +SourceId = str +StandardErrorContent = str +StandardOutputContent = str +StatusAdditionalInfo = str +StatusDetails = str +StatusMessage = str +StatusName = str +StepExecutionFilterValue = str +StreamUrl = str +String = str +String1to256 = str +StringDateTime = str +TagKey = str +TagValue = str +TargetCount = int +TargetKey = str +TargetLocationsURL = str +TargetMapKey = str +TargetMapValue = str +TargetType = str +TargetValue = str +TimeoutSeconds = int +TokenValue = str +TotalCount = int +UUID = str +Url = str +ValidNextStep = str +Version = str + + +class AccessRequestStatus(StrEnum): + Approved = "Approved" + Rejected = "Rejected" + Revoked = "Revoked" + Expired = "Expired" + Pending = "Pending" + + +class AssociationComplianceSeverity(StrEnum): + CRITICAL = "CRITICAL" + HIGH = "HIGH" + MEDIUM = "MEDIUM" + LOW = "LOW" + UNSPECIFIED = "UNSPECIFIED" + + +class AssociationExecutionFilterKey(StrEnum): + ExecutionId = "ExecutionId" + Status = "Status" + CreatedTime = "CreatedTime" + + +class AssociationExecutionTargetsFilterKey(StrEnum): + Status = "Status" + ResourceId = "ResourceId" + ResourceType = "ResourceType" + + +class AssociationFilterKey(StrEnum): + InstanceId = "InstanceId" + Name = "Name" + AssociationId = "AssociationId" + AssociationStatusName = "AssociationStatusName" + LastExecutedBefore = "LastExecutedBefore" + LastExecutedAfter = "LastExecutedAfter" + AssociationName = "AssociationName" + ResourceGroupName = "ResourceGroupName" + + +class AssociationFilterOperatorType(StrEnum): + EQUAL = "EQUAL" + LESS_THAN = "LESS_THAN" + GREATER_THAN = "GREATER_THAN" + + +class AssociationStatusName(StrEnum): + Pending = "Pending" + Success = "Success" + Failed = "Failed" + + +class AssociationSyncCompliance(StrEnum): + AUTO = "AUTO" + MANUAL = "MANUAL" + + +class AttachmentHashType(StrEnum): + Sha256 = "Sha256" + + +class AttachmentsSourceKey(StrEnum): + SourceUrl = "SourceUrl" + S3FileUrl = "S3FileUrl" + AttachmentReference = "AttachmentReference" + + +class AutomationExecutionFilterKey(StrEnum): + DocumentNamePrefix = "DocumentNamePrefix" + ExecutionStatus = "ExecutionStatus" + ExecutionId = "ExecutionId" + ParentExecutionId = "ParentExecutionId" + CurrentAction = "CurrentAction" + StartTimeBefore = "StartTimeBefore" + StartTimeAfter = "StartTimeAfter" + AutomationType = "AutomationType" + TagKey = "TagKey" + TargetResourceGroup = "TargetResourceGroup" + AutomationSubtype = "AutomationSubtype" + OpsItemId = "OpsItemId" + + +class AutomationExecutionStatus(StrEnum): + Pending = "Pending" + InProgress = "InProgress" + Waiting = "Waiting" + Success = "Success" + TimedOut = "TimedOut" + Cancelling = "Cancelling" + Cancelled = "Cancelled" + Failed = "Failed" + PendingApproval = "PendingApproval" + Approved = "Approved" + Rejected = "Rejected" + Scheduled = "Scheduled" + RunbookInProgress = "RunbookInProgress" + PendingChangeCalendarOverride = "PendingChangeCalendarOverride" + ChangeCalendarOverrideApproved = "ChangeCalendarOverrideApproved" + ChangeCalendarOverrideRejected = "ChangeCalendarOverrideRejected" + CompletedWithSuccess = "CompletedWithSuccess" + CompletedWithFailure = "CompletedWithFailure" + Exited = "Exited" + + +class AutomationSubtype(StrEnum): + ChangeRequest = "ChangeRequest" + AccessRequest = "AccessRequest" + + +class AutomationType(StrEnum): + CrossAccount = "CrossAccount" + Local = "Local" + + +class CalendarState(StrEnum): + OPEN = "OPEN" + CLOSED = "CLOSED" + + +class CommandFilterKey(StrEnum): + InvokedAfter = "InvokedAfter" + InvokedBefore = "InvokedBefore" + Status = "Status" + ExecutionStage = "ExecutionStage" + DocumentName = "DocumentName" + + +class CommandInvocationStatus(StrEnum): + Pending = "Pending" + InProgress = "InProgress" + Delayed = "Delayed" + Success = "Success" + Cancelled = "Cancelled" + TimedOut = "TimedOut" + Failed = "Failed" + Cancelling = "Cancelling" + + +class CommandPluginStatus(StrEnum): + Pending = "Pending" + InProgress = "InProgress" + Success = "Success" + TimedOut = "TimedOut" + Cancelled = "Cancelled" + Failed = "Failed" + + +class CommandStatus(StrEnum): + Pending = "Pending" + InProgress = "InProgress" + Success = "Success" + Cancelled = "Cancelled" + Failed = "Failed" + TimedOut = "TimedOut" + Cancelling = "Cancelling" + + +class ComplianceQueryOperatorType(StrEnum): + EQUAL = "EQUAL" + NOT_EQUAL = "NOT_EQUAL" + BEGIN_WITH = "BEGIN_WITH" + LESS_THAN = "LESS_THAN" + GREATER_THAN = "GREATER_THAN" + + +class ComplianceSeverity(StrEnum): + CRITICAL = "CRITICAL" + HIGH = "HIGH" + MEDIUM = "MEDIUM" + LOW = "LOW" + INFORMATIONAL = "INFORMATIONAL" + UNSPECIFIED = "UNSPECIFIED" + + +class ComplianceStatus(StrEnum): + COMPLIANT = "COMPLIANT" + NON_COMPLIANT = "NON_COMPLIANT" + + +class ComplianceUploadType(StrEnum): + COMPLETE = "COMPLETE" + PARTIAL = "PARTIAL" + + +class ConnectionStatus(StrEnum): + connected = "connected" + notconnected = "notconnected" + + +class DescribeActivationsFilterKeys(StrEnum): + ActivationIds = "ActivationIds" + DefaultInstanceName = "DefaultInstanceName" + IamRole = "IamRole" + + +class DocumentFilterKey(StrEnum): + Name = "Name" + Owner = "Owner" + PlatformTypes = "PlatformTypes" + DocumentType = "DocumentType" + + +class DocumentFormat(StrEnum): + YAML = "YAML" + JSON = "JSON" + TEXT = "TEXT" + + +class DocumentHashType(StrEnum): + Sha256 = "Sha256" + Sha1 = "Sha1" + + +class DocumentMetadataEnum(StrEnum): + DocumentReviews = "DocumentReviews" + + +class DocumentParameterType(StrEnum): + String = "String" + StringList = "StringList" + + +class DocumentPermissionType(StrEnum): + Share = "Share" + + +class DocumentReviewAction(StrEnum): + SendForReview = "SendForReview" + UpdateReview = "UpdateReview" + Approve = "Approve" + Reject = "Reject" + + +class DocumentReviewCommentType(StrEnum): + Comment = "Comment" + + +class DocumentStatus(StrEnum): + Creating = "Creating" + Active = "Active" + Updating = "Updating" + Deleting = "Deleting" + Failed = "Failed" + + +class DocumentType(StrEnum): + Command = "Command" + Policy = "Policy" + Automation = "Automation" + Session = "Session" + Package = "Package" + ApplicationConfiguration = "ApplicationConfiguration" + ApplicationConfigurationSchema = "ApplicationConfigurationSchema" + DeploymentStrategy = "DeploymentStrategy" + ChangeCalendar = "ChangeCalendar" + Automation_ChangeTemplate = "Automation.ChangeTemplate" + ProblemAnalysis = "ProblemAnalysis" + ProblemAnalysisTemplate = "ProblemAnalysisTemplate" + CloudFormation = "CloudFormation" + ConformancePackTemplate = "ConformancePackTemplate" + QuickSetup = "QuickSetup" + ManualApprovalPolicy = "ManualApprovalPolicy" + AutoApprovalPolicy = "AutoApprovalPolicy" + + +class ExecutionMode(StrEnum): + Auto = "Auto" + Interactive = "Interactive" + + +class ExecutionPreviewStatus(StrEnum): + Pending = "Pending" + InProgress = "InProgress" + Success = "Success" + Failed = "Failed" + + +class ExternalAlarmState(StrEnum): + UNKNOWN = "UNKNOWN" + ALARM = "ALARM" + + +class Fault(StrEnum): + Client = "Client" + Server = "Server" + Unknown = "Unknown" + + +class ImpactType(StrEnum): + Mutating = "Mutating" + NonMutating = "NonMutating" + Undetermined = "Undetermined" + + +class InstanceInformationFilterKey(StrEnum): + InstanceIds = "InstanceIds" + AgentVersion = "AgentVersion" + PingStatus = "PingStatus" + PlatformTypes = "PlatformTypes" + ActivationIds = "ActivationIds" + IamRole = "IamRole" + ResourceType = "ResourceType" + AssociationStatus = "AssociationStatus" + + +class InstancePatchStateOperatorType(StrEnum): + Equal = "Equal" + NotEqual = "NotEqual" + LessThan = "LessThan" + GreaterThan = "GreaterThan" + + +class InstancePropertyFilterKey(StrEnum): + InstanceIds = "InstanceIds" + AgentVersion = "AgentVersion" + PingStatus = "PingStatus" + PlatformTypes = "PlatformTypes" + DocumentName = "DocumentName" + ActivationIds = "ActivationIds" + IamRole = "IamRole" + ResourceType = "ResourceType" + AssociationStatus = "AssociationStatus" + + +class InstancePropertyFilterOperator(StrEnum): + Equal = "Equal" + NotEqual = "NotEqual" + BeginWith = "BeginWith" + LessThan = "LessThan" + GreaterThan = "GreaterThan" + + +class InventoryAttributeDataType(StrEnum): + string = "string" + number = "number" + + +class InventoryDeletionStatus(StrEnum): + InProgress = "InProgress" + Complete = "Complete" + + +class InventoryQueryOperatorType(StrEnum): + Equal = "Equal" + NotEqual = "NotEqual" + BeginWith = "BeginWith" + LessThan = "LessThan" + GreaterThan = "GreaterThan" + Exists = "Exists" + + +class InventorySchemaDeleteOption(StrEnum): + DisableSchema = "DisableSchema" + DeleteSchema = "DeleteSchema" + + +class LastResourceDataSyncStatus(StrEnum): + Successful = "Successful" + Failed = "Failed" + InProgress = "InProgress" + + +class MaintenanceWindowExecutionStatus(StrEnum): + PENDING = "PENDING" + IN_PROGRESS = "IN_PROGRESS" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + TIMED_OUT = "TIMED_OUT" + CANCELLING = "CANCELLING" + CANCELLED = "CANCELLED" + SKIPPED_OVERLAPPING = "SKIPPED_OVERLAPPING" + + +class MaintenanceWindowResourceType(StrEnum): + INSTANCE = "INSTANCE" + RESOURCE_GROUP = "RESOURCE_GROUP" + + +class MaintenanceWindowTaskCutoffBehavior(StrEnum): + CONTINUE_TASK = "CONTINUE_TASK" + CANCEL_TASK = "CANCEL_TASK" + + +class MaintenanceWindowTaskType(StrEnum): + RUN_COMMAND = "RUN_COMMAND" + AUTOMATION = "AUTOMATION" + STEP_FUNCTIONS = "STEP_FUNCTIONS" + LAMBDA = "LAMBDA" + + +class ManagedStatus(StrEnum): + All = "All" + Managed = "Managed" + Unmanaged = "Unmanaged" + + +class NodeAggregatorType(StrEnum): + Count = "Count" + + +class NodeAttributeName(StrEnum): + AgentVersion = "AgentVersion" + PlatformName = "PlatformName" + PlatformType = "PlatformType" + PlatformVersion = "PlatformVersion" + Region = "Region" + ResourceType = "ResourceType" + + +class NodeFilterKey(StrEnum): + AgentType = "AgentType" + AgentVersion = "AgentVersion" + ComputerName = "ComputerName" + InstanceId = "InstanceId" + InstanceStatus = "InstanceStatus" + IpAddress = "IpAddress" + ManagedStatus = "ManagedStatus" + PlatformName = "PlatformName" + PlatformType = "PlatformType" + PlatformVersion = "PlatformVersion" + ResourceType = "ResourceType" + OrganizationalUnitId = "OrganizationalUnitId" + OrganizationalUnitPath = "OrganizationalUnitPath" + Region = "Region" + AccountId = "AccountId" + + +class NodeFilterOperatorType(StrEnum): + Equal = "Equal" + NotEqual = "NotEqual" + BeginWith = "BeginWith" + + +class NodeTypeName(StrEnum): + Instance = "Instance" + + +class NotificationEvent(StrEnum): + All = "All" + InProgress = "InProgress" + Success = "Success" + TimedOut = "TimedOut" + Cancelled = "Cancelled" + Failed = "Failed" + + +class NotificationType(StrEnum): + Command = "Command" + Invocation = "Invocation" + + +class OperatingSystem(StrEnum): + WINDOWS = "WINDOWS" + AMAZON_LINUX = "AMAZON_LINUX" + AMAZON_LINUX_2 = "AMAZON_LINUX_2" + AMAZON_LINUX_2022 = "AMAZON_LINUX_2022" + UBUNTU = "UBUNTU" + REDHAT_ENTERPRISE_LINUX = "REDHAT_ENTERPRISE_LINUX" + SUSE = "SUSE" + CENTOS = "CENTOS" + ORACLE_LINUX = "ORACLE_LINUX" + DEBIAN = "DEBIAN" + MACOS = "MACOS" + RASPBIAN = "RASPBIAN" + ROCKY_LINUX = "ROCKY_LINUX" + ALMA_LINUX = "ALMA_LINUX" + AMAZON_LINUX_2023 = "AMAZON_LINUX_2023" + + +class OpsFilterOperatorType(StrEnum): + Equal = "Equal" + NotEqual = "NotEqual" + BeginWith = "BeginWith" + LessThan = "LessThan" + GreaterThan = "GreaterThan" + Exists = "Exists" + + +class OpsItemDataType(StrEnum): + SearchableString = "SearchableString" + String = "String" + + +class OpsItemEventFilterKey(StrEnum): + OpsItemId = "OpsItemId" + + +class OpsItemEventFilterOperator(StrEnum): + Equal = "Equal" + + +class OpsItemFilterKey(StrEnum): + Status = "Status" + CreatedBy = "CreatedBy" + Source = "Source" + Priority = "Priority" + Title = "Title" + OpsItemId = "OpsItemId" + CreatedTime = "CreatedTime" + LastModifiedTime = "LastModifiedTime" + ActualStartTime = "ActualStartTime" + ActualEndTime = "ActualEndTime" + PlannedStartTime = "PlannedStartTime" + PlannedEndTime = "PlannedEndTime" + OperationalData = "OperationalData" + OperationalDataKey = "OperationalDataKey" + OperationalDataValue = "OperationalDataValue" + ResourceId = "ResourceId" + AutomationId = "AutomationId" + Category = "Category" + Severity = "Severity" + OpsItemType = "OpsItemType" + AccessRequestByRequesterArn = "AccessRequestByRequesterArn" + AccessRequestByRequesterId = "AccessRequestByRequesterId" + AccessRequestByApproverArn = "AccessRequestByApproverArn" + AccessRequestByApproverId = "AccessRequestByApproverId" + AccessRequestBySourceAccountId = "AccessRequestBySourceAccountId" + AccessRequestBySourceOpsItemId = "AccessRequestBySourceOpsItemId" + AccessRequestBySourceRegion = "AccessRequestBySourceRegion" + AccessRequestByIsReplica = "AccessRequestByIsReplica" + AccessRequestByTargetResourceId = "AccessRequestByTargetResourceId" + ChangeRequestByRequesterArn = "ChangeRequestByRequesterArn" + ChangeRequestByRequesterName = "ChangeRequestByRequesterName" + ChangeRequestByApproverArn = "ChangeRequestByApproverArn" + ChangeRequestByApproverName = "ChangeRequestByApproverName" + ChangeRequestByTemplate = "ChangeRequestByTemplate" + ChangeRequestByTargetsResourceGroup = "ChangeRequestByTargetsResourceGroup" + InsightByType = "InsightByType" + AccountId = "AccountId" + + +class OpsItemFilterOperator(StrEnum): + Equal = "Equal" + Contains = "Contains" + GreaterThan = "GreaterThan" + LessThan = "LessThan" + + +class OpsItemRelatedItemsFilterKey(StrEnum): + ResourceType = "ResourceType" + AssociationId = "AssociationId" + ResourceUri = "ResourceUri" + + +class OpsItemRelatedItemsFilterOperator(StrEnum): + Equal = "Equal" + + +class OpsItemStatus(StrEnum): + Open = "Open" + InProgress = "InProgress" + Resolved = "Resolved" + Pending = "Pending" + TimedOut = "TimedOut" + Cancelling = "Cancelling" + Cancelled = "Cancelled" + Failed = "Failed" + CompletedWithSuccess = "CompletedWithSuccess" + CompletedWithFailure = "CompletedWithFailure" + Scheduled = "Scheduled" + RunbookInProgress = "RunbookInProgress" + PendingChangeCalendarOverride = "PendingChangeCalendarOverride" + ChangeCalendarOverrideApproved = "ChangeCalendarOverrideApproved" + ChangeCalendarOverrideRejected = "ChangeCalendarOverrideRejected" + PendingApproval = "PendingApproval" + Approved = "Approved" + Revoked = "Revoked" + Rejected = "Rejected" + Closed = "Closed" + + +class ParameterTier(StrEnum): + Standard = "Standard" + Advanced = "Advanced" + Intelligent_Tiering = "Intelligent-Tiering" + + +class ParameterType(StrEnum): + String = "String" + StringList = "StringList" + SecureString = "SecureString" + + +class ParametersFilterKey(StrEnum): + Name = "Name" + Type = "Type" + KeyId = "KeyId" + + +class PatchAction(StrEnum): + ALLOW_AS_DEPENDENCY = "ALLOW_AS_DEPENDENCY" + BLOCK = "BLOCK" + + +class PatchComplianceDataState(StrEnum): + INSTALLED = "INSTALLED" + INSTALLED_OTHER = "INSTALLED_OTHER" + INSTALLED_PENDING_REBOOT = "INSTALLED_PENDING_REBOOT" + INSTALLED_REJECTED = "INSTALLED_REJECTED" + MISSING = "MISSING" + NOT_APPLICABLE = "NOT_APPLICABLE" + FAILED = "FAILED" + AVAILABLE_SECURITY_UPDATE = "AVAILABLE_SECURITY_UPDATE" + + +class PatchComplianceLevel(StrEnum): + CRITICAL = "CRITICAL" + HIGH = "HIGH" + MEDIUM = "MEDIUM" + LOW = "LOW" + INFORMATIONAL = "INFORMATIONAL" + UNSPECIFIED = "UNSPECIFIED" + + +class PatchComplianceStatus(StrEnum): + COMPLIANT = "COMPLIANT" + NON_COMPLIANT = "NON_COMPLIANT" + + +class PatchDeploymentStatus(StrEnum): + APPROVED = "APPROVED" + PENDING_APPROVAL = "PENDING_APPROVAL" + EXPLICIT_APPROVED = "EXPLICIT_APPROVED" + EXPLICIT_REJECTED = "EXPLICIT_REJECTED" + + +class PatchFilterKey(StrEnum): + ARCH = "ARCH" + ADVISORY_ID = "ADVISORY_ID" + BUGZILLA_ID = "BUGZILLA_ID" + PATCH_SET = "PATCH_SET" + PRODUCT = "PRODUCT" + PRODUCT_FAMILY = "PRODUCT_FAMILY" + CLASSIFICATION = "CLASSIFICATION" + CVE_ID = "CVE_ID" + EPOCH = "EPOCH" + MSRC_SEVERITY = "MSRC_SEVERITY" + NAME = "NAME" + PATCH_ID = "PATCH_ID" + SECTION = "SECTION" + PRIORITY = "PRIORITY" + REPOSITORY = "REPOSITORY" + RELEASE = "RELEASE" + SEVERITY = "SEVERITY" + SECURITY = "SECURITY" + VERSION = "VERSION" + + +class PatchOperationType(StrEnum): + Scan = "Scan" + Install = "Install" + + +class PatchProperty(StrEnum): + PRODUCT = "PRODUCT" + PRODUCT_FAMILY = "PRODUCT_FAMILY" + CLASSIFICATION = "CLASSIFICATION" + MSRC_SEVERITY = "MSRC_SEVERITY" + PRIORITY = "PRIORITY" + SEVERITY = "SEVERITY" + + +class PatchSet(StrEnum): + OS = "OS" + APPLICATION = "APPLICATION" + + +class PingStatus(StrEnum): + Online = "Online" + ConnectionLost = "ConnectionLost" + Inactive = "Inactive" + + +class PlatformType(StrEnum): + Windows = "Windows" + Linux = "Linux" + MacOS = "MacOS" + + +class RebootOption(StrEnum): + RebootIfNeeded = "RebootIfNeeded" + NoReboot = "NoReboot" + + +class ResourceDataSyncS3Format(StrEnum): + JsonSerDe = "JsonSerDe" + + +class ResourceType(StrEnum): + ManagedInstance = "ManagedInstance" + EC2Instance = "EC2Instance" + + +class ResourceTypeForTagging(StrEnum): + Document = "Document" + ManagedInstance = "ManagedInstance" + MaintenanceWindow = "MaintenanceWindow" + Parameter = "Parameter" + PatchBaseline = "PatchBaseline" + OpsItem = "OpsItem" + OpsMetadata = "OpsMetadata" + Automation = "Automation" + Association = "Association" + + +class ReviewStatus(StrEnum): + APPROVED = "APPROVED" + NOT_REVIEWED = "NOT_REVIEWED" + PENDING = "PENDING" + REJECTED = "REJECTED" + + +class SessionFilterKey(StrEnum): + InvokedAfter = "InvokedAfter" + InvokedBefore = "InvokedBefore" + Target = "Target" + Owner = "Owner" + Status = "Status" + SessionId = "SessionId" + + +class SessionState(StrEnum): + Active = "Active" + History = "History" + + +class SessionStatus(StrEnum): + Connected = "Connected" + Connecting = "Connecting" + Disconnected = "Disconnected" + Terminated = "Terminated" + Terminating = "Terminating" + Failed = "Failed" + + +class SignalType(StrEnum): + Approve = "Approve" + Reject = "Reject" + StartStep = "StartStep" + StopStep = "StopStep" + Resume = "Resume" + Revoke = "Revoke" + + +class SourceType(StrEnum): + AWS_EC2_Instance = "AWS::EC2::Instance" + AWS_IoT_Thing = "AWS::IoT::Thing" + AWS_SSM_ManagedInstance = "AWS::SSM::ManagedInstance" + + +class StepExecutionFilterKey(StrEnum): + StartTimeBefore = "StartTimeBefore" + StartTimeAfter = "StartTimeAfter" + StepExecutionStatus = "StepExecutionStatus" + StepExecutionId = "StepExecutionId" + StepName = "StepName" + Action = "Action" + ParentStepExecutionId = "ParentStepExecutionId" + ParentStepIteration = "ParentStepIteration" + ParentStepIteratorValue = "ParentStepIteratorValue" + + +class StopType(StrEnum): + Complete = "Complete" + Cancel = "Cancel" + + +class AccessDeniedException(ServiceException): + code: str = "AccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class AlreadyExistsException(ServiceException): + code: str = "AlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class AssociatedInstances(ServiceException): + code: str = "AssociatedInstances" + sender_fault: bool = False + status_code: int = 400 + + +class AssociationAlreadyExists(ServiceException): + code: str = "AssociationAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class AssociationDoesNotExist(ServiceException): + code: str = "AssociationDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class AssociationExecutionDoesNotExist(ServiceException): + code: str = "AssociationExecutionDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class AssociationLimitExceeded(ServiceException): + code: str = "AssociationLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class AssociationVersionLimitExceeded(ServiceException): + code: str = "AssociationVersionLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationDefinitionNotApprovedException(ServiceException): + code: str = "AutomationDefinitionNotApprovedException" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationDefinitionNotFoundException(ServiceException): + code: str = "AutomationDefinitionNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationDefinitionVersionNotFoundException(ServiceException): + code: str = "AutomationDefinitionVersionNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationExecutionLimitExceededException(ServiceException): + code: str = "AutomationExecutionLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationExecutionNotFoundException(ServiceException): + code: str = "AutomationExecutionNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class AutomationStepNotFoundException(ServiceException): + code: str = "AutomationStepNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ComplianceTypeCountLimitExceededException(ServiceException): + code: str = "ComplianceTypeCountLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class CustomSchemaCountLimitExceededException(ServiceException): + code: str = "CustomSchemaCountLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class DocumentAlreadyExists(ServiceException): + code: str = "DocumentAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class DocumentLimitExceeded(ServiceException): + code: str = "DocumentLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class DocumentPermissionLimit(ServiceException): + code: str = "DocumentPermissionLimit" + sender_fault: bool = False + status_code: int = 400 + + +class DocumentVersionLimitExceeded(ServiceException): + code: str = "DocumentVersionLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class DoesNotExistException(ServiceException): + code: str = "DoesNotExistException" + sender_fault: bool = False + status_code: int = 400 + + +class DuplicateDocumentContent(ServiceException): + code: str = "DuplicateDocumentContent" + sender_fault: bool = False + status_code: int = 400 + + +class DuplicateDocumentVersionName(ServiceException): + code: str = "DuplicateDocumentVersionName" + sender_fault: bool = False + status_code: int = 400 + + +class DuplicateInstanceId(ServiceException): + code: str = "DuplicateInstanceId" + sender_fault: bool = False + status_code: int = 400 + + +class FeatureNotAvailableException(ServiceException): + code: str = "FeatureNotAvailableException" + sender_fault: bool = False + status_code: int = 400 + + +class HierarchyLevelLimitExceededException(ServiceException): + code: str = "HierarchyLevelLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class HierarchyTypeMismatchException(ServiceException): + code: str = "HierarchyTypeMismatchException" + sender_fault: bool = False + status_code: int = 400 + + +class IdempotentParameterMismatch(ServiceException): + code: str = "IdempotentParameterMismatch" + sender_fault: bool = False + status_code: int = 400 + + +class IncompatiblePolicyException(ServiceException): + code: str = "IncompatiblePolicyException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServerError(ServiceException): + code: str = "InternalServerError" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidActivation(ServiceException): + code: str = "InvalidActivation" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidActivationId(ServiceException): + code: str = "InvalidActivationId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAggregatorException(ServiceException): + code: str = "InvalidAggregatorException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAllowedPatternException(ServiceException): + code: str = "InvalidAllowedPatternException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAssociation(ServiceException): + code: str = "InvalidAssociation" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAssociationVersion(ServiceException): + code: str = "InvalidAssociationVersion" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAutomationExecutionParametersException(ServiceException): + code: str = "InvalidAutomationExecutionParametersException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAutomationSignalException(ServiceException): + code: str = "InvalidAutomationSignalException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidAutomationStatusUpdateException(ServiceException): + code: str = "InvalidAutomationStatusUpdateException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidCommandId(ServiceException): + code: str = "InvalidCommandId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDeleteInventoryParametersException(ServiceException): + code: str = "InvalidDeleteInventoryParametersException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDeletionIdException(ServiceException): + code: str = "InvalidDeletionIdException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocument(ServiceException): + code: str = "InvalidDocument" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocumentContent(ServiceException): + code: str = "InvalidDocumentContent" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocumentOperation(ServiceException): + code: str = "InvalidDocumentOperation" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocumentSchemaVersion(ServiceException): + code: str = "InvalidDocumentSchemaVersion" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocumentType(ServiceException): + code: str = "InvalidDocumentType" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDocumentVersion(ServiceException): + code: str = "InvalidDocumentVersion" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidFilter(ServiceException): + code: str = "InvalidFilter" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidFilterKey(ServiceException): + code: str = "InvalidFilterKey" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidFilterOption(ServiceException): + code: str = "InvalidFilterOption" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidFilterValue(ServiceException): + code: str = "InvalidFilterValue" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInstanceId(ServiceException): + code: str = "InvalidInstanceId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInstanceInformationFilterValue(ServiceException): + code: str = "InvalidInstanceInformationFilterValue" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInstancePropertyFilterValue(ServiceException): + code: str = "InvalidInstancePropertyFilterValue" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInventoryGroupException(ServiceException): + code: str = "InvalidInventoryGroupException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInventoryItemContextException(ServiceException): + code: str = "InvalidInventoryItemContextException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidInventoryRequestException(ServiceException): + code: str = "InvalidInventoryRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidItemContentException(ServiceException): + code: str = "InvalidItemContentException" + sender_fault: bool = False + status_code: int = 400 + TypeName: Optional[InventoryItemTypeName] + + +class InvalidKeyId(ServiceException): + code: str = "InvalidKeyId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNextToken(ServiceException): + code: str = "InvalidNextToken" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidNotificationConfig(ServiceException): + code: str = "InvalidNotificationConfig" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidOptionException(ServiceException): + code: str = "InvalidOptionException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidOutputFolder(ServiceException): + code: str = "InvalidOutputFolder" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidOutputLocation(ServiceException): + code: str = "InvalidOutputLocation" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidParameters(ServiceException): + code: str = "InvalidParameters" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidPermissionType(ServiceException): + code: str = "InvalidPermissionType" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidPluginName(ServiceException): + code: str = "InvalidPluginName" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidPolicyAttributeException(ServiceException): + code: str = "InvalidPolicyAttributeException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidPolicyTypeException(ServiceException): + code: str = "InvalidPolicyTypeException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidResourceId(ServiceException): + code: str = "InvalidResourceId" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidResourceType(ServiceException): + code: str = "InvalidResourceType" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidResultAttributeException(ServiceException): + code: str = "InvalidResultAttributeException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidRole(ServiceException): + code: str = "InvalidRole" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidSchedule(ServiceException): + code: str = "InvalidSchedule" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTag(ServiceException): + code: str = "InvalidTag" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTarget(ServiceException): + code: str = "InvalidTarget" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTargetMaps(ServiceException): + code: str = "InvalidTargetMaps" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTypeNameException(ServiceException): + code: str = "InvalidTypeNameException" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidUpdate(ServiceException): + code: str = "InvalidUpdate" + sender_fault: bool = False + status_code: int = 400 + + +class InvocationDoesNotExist(ServiceException): + code: str = "InvocationDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class ItemContentMismatchException(ServiceException): + code: str = "ItemContentMismatchException" + sender_fault: bool = False + status_code: int = 400 + TypeName: Optional[InventoryItemTypeName] + + +class ItemSizeLimitExceededException(ServiceException): + code: str = "ItemSizeLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + TypeName: Optional[InventoryItemTypeName] + + +class MalformedResourcePolicyDocumentException(ServiceException): + code: str = "MalformedResourcePolicyDocumentException" + sender_fault: bool = False + status_code: int = 400 + + +class MaxDocumentSizeExceeded(ServiceException): + code: str = "MaxDocumentSizeExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class OpsItemAccessDeniedException(ServiceException): + code: str = "OpsItemAccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsItemAlreadyExistsException(ServiceException): + code: str = "OpsItemAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + OpsItemId: Optional[String] + + +class OpsItemConflictException(ServiceException): + code: str = "OpsItemConflictException" + sender_fault: bool = False + status_code: int = 400 + + +OpsItemParameterNamesList = List[String] + + +class OpsItemInvalidParameterException(ServiceException): + code: str = "OpsItemInvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + ParameterNames: Optional[OpsItemParameterNamesList] + + +class OpsItemLimitExceededException(ServiceException): + code: str = "OpsItemLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + ResourceTypes: Optional[OpsItemParameterNamesList] + Limit: Optional[Integer] + LimitType: Optional[String] + + +class OpsItemNotFoundException(ServiceException): + code: str = "OpsItemNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsItemRelatedItemAlreadyExistsException(ServiceException): + code: str = "OpsItemRelatedItemAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + ResourceUri: Optional[OpsItemRelatedItemAssociationResourceUri] + OpsItemId: Optional[OpsItemId] + + +class OpsItemRelatedItemAssociationNotFoundException(ServiceException): + code: str = "OpsItemRelatedItemAssociationNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataAlreadyExistsException(ServiceException): + code: str = "OpsMetadataAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataInvalidArgumentException(ServiceException): + code: str = "OpsMetadataInvalidArgumentException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataKeyLimitExceededException(ServiceException): + code: str = "OpsMetadataKeyLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataLimitExceededException(ServiceException): + code: str = "OpsMetadataLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataNotFoundException(ServiceException): + code: str = "OpsMetadataNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class OpsMetadataTooManyUpdatesException(ServiceException): + code: str = "OpsMetadataTooManyUpdatesException" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterAlreadyExists(ServiceException): + code: str = "ParameterAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterLimitExceeded(ServiceException): + code: str = "ParameterLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterMaxVersionLimitExceeded(ServiceException): + code: str = "ParameterMaxVersionLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterNotFound(ServiceException): + code: str = "ParameterNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterPatternMismatchException(ServiceException): + code: str = "ParameterPatternMismatchException" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterVersionLabelLimitExceeded(ServiceException): + code: str = "ParameterVersionLabelLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ParameterVersionNotFound(ServiceException): + code: str = "ParameterVersionNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class PoliciesLimitExceededException(ServiceException): + code: str = "PoliciesLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceDataSyncAlreadyExistsException(ServiceException): + code: str = "ResourceDataSyncAlreadyExistsException" + sender_fault: bool = False + status_code: int = 400 + SyncName: Optional[ResourceDataSyncName] + + +class ResourceDataSyncConflictException(ServiceException): + code: str = "ResourceDataSyncConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceDataSyncCountExceededException(ServiceException): + code: str = "ResourceDataSyncCountExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceDataSyncInvalidConfigurationException(ServiceException): + code: str = "ResourceDataSyncInvalidConfigurationException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceDataSyncNotFoundException(ServiceException): + code: str = "ResourceDataSyncNotFoundException" + sender_fault: bool = False + status_code: int = 400 + SyncName: Optional[ResourceDataSyncName] + SyncType: Optional[ResourceDataSyncType] + + +class ResourceInUseException(ServiceException): + code: str = "ResourceInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceLimitExceededException(ServiceException): + code: str = "ResourceLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFoundException(ServiceException): + code: str = "ResourceNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ResourcePolicyConflictException(ServiceException): + code: str = "ResourcePolicyConflictException" + sender_fault: bool = False + status_code: int = 400 + + +ResourcePolicyParameterNamesList = List[String] + + +class ResourcePolicyInvalidParameterException(ServiceException): + code: str = "ResourcePolicyInvalidParameterException" + sender_fault: bool = False + status_code: int = 400 + ParameterNames: Optional[ResourcePolicyParameterNamesList] + + +class ResourcePolicyLimitExceededException(ServiceException): + code: str = "ResourcePolicyLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + Limit: Optional[Integer] + LimitType: Optional[String] + + +class ResourcePolicyNotFoundException(ServiceException): + code: str = "ResourcePolicyNotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = False + status_code: int = 400 + ResourceId: Optional[String] + ResourceType: Optional[String] + QuotaCode: String + ServiceCode: String + + +class ServiceSettingNotFound(ServiceException): + code: str = "ServiceSettingNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class StatusUnchanged(ServiceException): + code: str = "StatusUnchanged" + sender_fault: bool = False + status_code: int = 400 + + +class SubTypeCountLimitExceededException(ServiceException): + code: str = "SubTypeCountLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class TargetInUseException(ServiceException): + code: str = "TargetInUseException" + sender_fault: bool = False + status_code: int = 400 + + +class TargetNotConnected(ServiceException): + code: str = "TargetNotConnected" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + QuotaCode: Optional[String] + ServiceCode: Optional[String] + + +class TooManyTagsError(ServiceException): + code: str = "TooManyTagsError" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyUpdates(ServiceException): + code: str = "TooManyUpdates" + sender_fault: bool = False + status_code: int = 400 + + +class TotalSizeLimitExceededException(ServiceException): + code: str = "TotalSizeLimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedCalendarException(ServiceException): + code: str = "UnsupportedCalendarException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedFeatureRequiredException(ServiceException): + code: str = "UnsupportedFeatureRequiredException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedInventoryItemContextException(ServiceException): + code: str = "UnsupportedInventoryItemContextException" + sender_fault: bool = False + status_code: int = 400 + TypeName: Optional[InventoryItemTypeName] + + +class UnsupportedInventorySchemaVersionException(ServiceException): + code: str = "UnsupportedInventorySchemaVersionException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedOperatingSystem(ServiceException): + code: str = "UnsupportedOperatingSystem" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedOperationException(ServiceException): + code: str = "UnsupportedOperationException" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedParameterType(ServiceException): + code: str = "UnsupportedParameterType" + sender_fault: bool = False + status_code: int = 400 + + +class UnsupportedPlatformType(ServiceException): + code: str = "UnsupportedPlatformType" + sender_fault: bool = False + status_code: int = 400 + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + ReasonCode: Optional[String] + + +AccountIdList = List[AccountId] + + +class AccountSharingInfo(TypedDict, total=False): + AccountId: Optional[AccountId] + SharedDocumentVersion: Optional[SharedDocumentVersion] + + +AccountSharingInfoList = List[AccountSharingInfo] +Accounts = List[Account] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] +CreatedDate = datetime +ExpirationDate = datetime + + +class Activation(TypedDict, total=False): + ActivationId: Optional[ActivationId] + Description: Optional[ActivationDescription] + DefaultInstanceName: Optional[DefaultInstanceName] + IamRole: Optional[IamRole] + RegistrationLimit: Optional[RegistrationLimit] + RegistrationsCount: Optional[RegistrationsCount] + ExpirationDate: Optional[ExpirationDate] + Expired: Optional[Boolean] + CreatedDate: Optional[CreatedDate] + Tags: Optional[TagList] + + +ActivationList = List[Activation] + + +class AddTagsToResourceRequest(ServiceRequest): + ResourceType: ResourceTypeForTagging + ResourceId: ResourceId + Tags: TagList + + +class AddTagsToResourceResult(TypedDict, total=False): + pass + + +class Alarm(TypedDict, total=False): + Name: AlarmName + + +AlarmList = List[Alarm] + + +class AlarmConfiguration(TypedDict, total=False): + IgnorePollAlarmFailure: Optional[Boolean] + Alarms: AlarmList + + +class AlarmStateInformation(TypedDict, total=False): + Name: AlarmName + State: ExternalAlarmState + + +AlarmStateInformationList = List[AlarmStateInformation] + + +class AssociateOpsItemRelatedItemRequest(ServiceRequest): + OpsItemId: OpsItemId + AssociationType: OpsItemRelatedItemAssociationType + ResourceType: OpsItemRelatedItemAssociationResourceType + ResourceUri: OpsItemRelatedItemAssociationResourceUri + + +class AssociateOpsItemRelatedItemResponse(TypedDict, total=False): + AssociationId: Optional[OpsItemRelatedItemAssociationId] + + +TargetMapValueList = List[TargetMapValue] +TargetMap = Dict[TargetMapKey, TargetMapValueList] +TargetMaps = List[TargetMap] +AssociationStatusAggregatedCount = Dict[StatusName, InstanceCount] + + +class AssociationOverview(TypedDict, total=False): + Status: Optional[StatusName] + DetailedStatus: Optional[StatusName] + AssociationStatusAggregatedCount: Optional[AssociationStatusAggregatedCount] + + +DateTime = datetime +TargetValues = List[TargetValue] + + +class Target(TypedDict, total=False): + Key: Optional[TargetKey] + Values: Optional[TargetValues] + + +Targets = List[Target] + + +class Association(TypedDict, total=False): + Name: Optional[DocumentARN] + InstanceId: Optional[InstanceId] + AssociationId: Optional[AssociationId] + AssociationVersion: Optional[AssociationVersion] + DocumentVersion: Optional[DocumentVersion] + Targets: Optional[Targets] + LastExecutionDate: Optional[DateTime] + Overview: Optional[AssociationOverview] + ScheduleExpression: Optional[ScheduleExpression] + AssociationName: Optional[AssociationName] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + + +ExcludeAccounts = List[ExcludeAccount] +Regions = List[Region] + + +class TargetLocation(TypedDict, total=False): + Accounts: Optional[Accounts] + Regions: Optional[Regions] + TargetLocationMaxConcurrency: Optional[MaxConcurrency] + TargetLocationMaxErrors: Optional[MaxErrors] + ExecutionRoleName: Optional[ExecutionRoleName] + TargetLocationAlarmConfiguration: Optional[AlarmConfiguration] + IncludeChildOrganizationUnits: Optional[Boolean] + ExcludeAccounts: Optional[ExcludeAccounts] + Targets: Optional[Targets] + TargetsMaxConcurrency: Optional[MaxConcurrency] + TargetsMaxErrors: Optional[MaxErrors] + + +TargetLocations = List[TargetLocation] +CalendarNameOrARNList = List[CalendarNameOrARN] + + +class S3OutputLocation(TypedDict, total=False): + OutputS3Region: Optional[S3Region] + OutputS3BucketName: Optional[S3BucketName] + OutputS3KeyPrefix: Optional[S3KeyPrefix] + + +class InstanceAssociationOutputLocation(TypedDict, total=False): + S3Location: Optional[S3OutputLocation] + + +ParameterValueList = List[ParameterValue] +Parameters = Dict[ParameterName, ParameterValueList] + + +class AssociationStatus(TypedDict, total=False): + Date: DateTime + Name: AssociationStatusName + Message: StatusMessage + AdditionalInfo: Optional[StatusAdditionalInfo] + + +class AssociationDescription(TypedDict, total=False): + Name: Optional[DocumentARN] + InstanceId: Optional[InstanceId] + AssociationVersion: Optional[AssociationVersion] + Date: Optional[DateTime] + LastUpdateAssociationDate: Optional[DateTime] + Status: Optional[AssociationStatus] + Overview: Optional[AssociationOverview] + DocumentVersion: Optional[DocumentVersion] + AutomationTargetParameterName: Optional[AutomationTargetParameterName] + Parameters: Optional[Parameters] + AssociationId: Optional[AssociationId] + Targets: Optional[Targets] + ScheduleExpression: Optional[ScheduleExpression] + OutputLocation: Optional[InstanceAssociationOutputLocation] + LastExecutionDate: Optional[DateTime] + LastSuccessfulExecutionDate: Optional[DateTime] + AssociationName: Optional[AssociationName] + MaxErrors: Optional[MaxErrors] + MaxConcurrency: Optional[MaxConcurrency] + ComplianceSeverity: Optional[AssociationComplianceSeverity] + SyncCompliance: Optional[AssociationSyncCompliance] + ApplyOnlyAtCronInterval: Optional[ApplyOnlyAtCronInterval] + CalendarNames: Optional[CalendarNameOrARNList] + TargetLocations: Optional[TargetLocations] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + + +AssociationDescriptionList = List[AssociationDescription] + + +class AssociationExecution(TypedDict, total=False): + AssociationId: Optional[AssociationId] + AssociationVersion: Optional[AssociationVersion] + ExecutionId: Optional[AssociationExecutionId] + Status: Optional[StatusName] + DetailedStatus: Optional[StatusName] + CreatedTime: Optional[DateTime] + LastExecutionDate: Optional[DateTime] + ResourceCountByStatus: Optional[ResourceCountByStatus] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + + +class AssociationExecutionFilter(TypedDict, total=False): + Key: AssociationExecutionFilterKey + Value: AssociationExecutionFilterValue + Type: AssociationFilterOperatorType + + +AssociationExecutionFilterList = List[AssociationExecutionFilter] + + +class OutputSource(TypedDict, total=False): + OutputSourceId: Optional[OutputSourceId] + OutputSourceType: Optional[OutputSourceType] + + +class AssociationExecutionTarget(TypedDict, total=False): + AssociationId: Optional[AssociationId] + AssociationVersion: Optional[AssociationVersion] + ExecutionId: Optional[AssociationExecutionId] + ResourceId: Optional[AssociationResourceId] + ResourceType: Optional[AssociationResourceType] + Status: Optional[StatusName] + DetailedStatus: Optional[StatusName] + LastExecutionDate: Optional[DateTime] + OutputSource: Optional[OutputSource] + + +class AssociationExecutionTargetsFilter(TypedDict, total=False): + Key: AssociationExecutionTargetsFilterKey + Value: AssociationExecutionTargetsFilterValue + + +AssociationExecutionTargetsFilterList = List[AssociationExecutionTargetsFilter] +AssociationExecutionTargetsList = List[AssociationExecutionTarget] +AssociationExecutionsList = List[AssociationExecution] + + +class AssociationFilter(TypedDict, total=False): + key: AssociationFilterKey + value: AssociationFilterValue + + +AssociationFilterList = List[AssociationFilter] +AssociationIdList = List[AssociationId] +AssociationList = List[Association] + + +class AssociationVersionInfo(TypedDict, total=False): + AssociationId: Optional[AssociationId] + AssociationVersion: Optional[AssociationVersion] + CreatedDate: Optional[DateTime] + Name: Optional[DocumentARN] + DocumentVersion: Optional[DocumentVersion] + Parameters: Optional[Parameters] + Targets: Optional[Targets] + ScheduleExpression: Optional[ScheduleExpression] + OutputLocation: Optional[InstanceAssociationOutputLocation] + AssociationName: Optional[AssociationName] + MaxErrors: Optional[MaxErrors] + MaxConcurrency: Optional[MaxConcurrency] + ComplianceSeverity: Optional[AssociationComplianceSeverity] + SyncCompliance: Optional[AssociationSyncCompliance] + ApplyOnlyAtCronInterval: Optional[ApplyOnlyAtCronInterval] + CalendarNames: Optional[CalendarNameOrARNList] + TargetLocations: Optional[TargetLocations] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + + +AssociationVersionList = List[AssociationVersionInfo] +ContentLength = int + + +class AttachmentContent(TypedDict, total=False): + Name: Optional[AttachmentName] + Size: Optional[ContentLength] + Hash: Optional[AttachmentHash] + HashType: Optional[AttachmentHashType] + Url: Optional[AttachmentUrl] + + +AttachmentContentList = List[AttachmentContent] + + +class AttachmentInformation(TypedDict, total=False): + Name: Optional[AttachmentName] + + +AttachmentInformationList = List[AttachmentInformation] +AttachmentsSourceValues = List[AttachmentsSourceValue] + + +class AttachmentsSource(TypedDict, total=False): + Key: Optional[AttachmentsSourceKey] + Values: Optional[AttachmentsSourceValues] + Name: Optional[AttachmentIdentifier] + + +AttachmentsSourceList = List[AttachmentsSource] +AutomationParameterValueList = List[AutomationParameterValue] +AutomationParameterMap = Dict[AutomationParameterKey, AutomationParameterValueList] + + +class Runbook(TypedDict, total=False): + DocumentName: DocumentARN + DocumentVersion: Optional[DocumentVersion] + Parameters: Optional[AutomationParameterMap] + TargetParameterName: Optional[AutomationParameterKey] + Targets: Optional[Targets] + TargetMaps: Optional[TargetMaps] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + TargetLocations: Optional[TargetLocations] + + +Runbooks = List[Runbook] + + +class ProgressCounters(TypedDict, total=False): + TotalSteps: Optional[Integer] + SuccessSteps: Optional[Integer] + FailedSteps: Optional[Integer] + CancelledSteps: Optional[Integer] + TimedOutSteps: Optional[Integer] + + +TargetParameterList = List[ParameterValue] + + +class ResolvedTargets(TypedDict, total=False): + ParameterValues: Optional[TargetParameterList] + Truncated: Optional[Boolean] + + +class ParentStepDetails(TypedDict, total=False): + StepExecutionId: Optional[String] + StepName: Optional[String] + Action: Optional[AutomationActionName] + Iteration: Optional[Integer] + IteratorValue: Optional[String] + + +ValidNextStepList = List[ValidNextStep] + + +class FailureDetails(TypedDict, total=False): + FailureStage: Optional[String] + FailureType: Optional[String] + Details: Optional[AutomationParameterMap] + + +NormalStringMap = Dict[String, String] +Long = int + + +class StepExecution(TypedDict, total=False): + StepName: Optional[String] + Action: Optional[AutomationActionName] + TimeoutSeconds: Optional[Long] + OnFailure: Optional[String] + MaxAttempts: Optional[Integer] + ExecutionStartTime: Optional[DateTime] + ExecutionEndTime: Optional[DateTime] + StepStatus: Optional[AutomationExecutionStatus] + ResponseCode: Optional[String] + Inputs: Optional[NormalStringMap] + Outputs: Optional[AutomationParameterMap] + Response: Optional[String] + FailureMessage: Optional[String] + FailureDetails: Optional[FailureDetails] + StepExecutionId: Optional[String] + OverriddenParameters: Optional[AutomationParameterMap] + IsEnd: Optional[Boolean] + NextStep: Optional[String] + IsCritical: Optional[Boolean] + ValidNextSteps: Optional[ValidNextStepList] + Targets: Optional[Targets] + TargetLocation: Optional[TargetLocation] + TriggeredAlarms: Optional[AlarmStateInformationList] + ParentStepDetails: Optional[ParentStepDetails] + + +StepExecutionList = List[StepExecution] + + +class AutomationExecution(TypedDict, total=False): + AutomationExecutionId: Optional[AutomationExecutionId] + DocumentName: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + ExecutionStartTime: Optional[DateTime] + ExecutionEndTime: Optional[DateTime] + AutomationExecutionStatus: Optional[AutomationExecutionStatus] + StepExecutions: Optional[StepExecutionList] + StepExecutionsTruncated: Optional[Boolean] + Parameters: Optional[AutomationParameterMap] + Outputs: Optional[AutomationParameterMap] + FailureMessage: Optional[String] + Mode: Optional[ExecutionMode] + ParentAutomationExecutionId: Optional[AutomationExecutionId] + ExecutedBy: Optional[String] + CurrentStepName: Optional[String] + CurrentAction: Optional[String] + TargetParameterName: Optional[AutomationParameterKey] + Targets: Optional[Targets] + TargetMaps: Optional[TargetMaps] + ResolvedTargets: Optional[ResolvedTargets] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + Target: Optional[String] + TargetLocations: Optional[TargetLocations] + ProgressCounters: Optional[ProgressCounters] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + TargetLocationsURL: Optional[TargetLocationsURL] + AutomationSubtype: Optional[AutomationSubtype] + ScheduledTime: Optional[DateTime] + Runbooks: Optional[Runbooks] + OpsItemId: Optional[String] + AssociationId: Optional[String] + ChangeRequestName: Optional[ChangeRequestName] + Variables: Optional[AutomationParameterMap] + + +AutomationExecutionFilterValueList = List[AutomationExecutionFilterValue] + + +class AutomationExecutionFilter(TypedDict, total=False): + Key: AutomationExecutionFilterKey + Values: AutomationExecutionFilterValueList + + +AutomationExecutionFilterList = List[AutomationExecutionFilter] + + +class AutomationExecutionInputs(TypedDict, total=False): + Parameters: Optional[AutomationParameterMap] + TargetParameterName: Optional[AutomationParameterKey] + Targets: Optional[Targets] + TargetMaps: Optional[TargetMaps] + TargetLocations: Optional[TargetLocations] + TargetLocationsURL: Optional[TargetLocationsURL] + + +class AutomationExecutionMetadata(TypedDict, total=False): + AutomationExecutionId: Optional[AutomationExecutionId] + DocumentName: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + AutomationExecutionStatus: Optional[AutomationExecutionStatus] + ExecutionStartTime: Optional[DateTime] + ExecutionEndTime: Optional[DateTime] + ExecutedBy: Optional[String] + LogFile: Optional[String] + Outputs: Optional[AutomationParameterMap] + Mode: Optional[ExecutionMode] + ParentAutomationExecutionId: Optional[AutomationExecutionId] + CurrentStepName: Optional[String] + CurrentAction: Optional[String] + FailureMessage: Optional[String] + TargetParameterName: Optional[AutomationParameterKey] + Targets: Optional[Targets] + TargetMaps: Optional[TargetMaps] + ResolvedTargets: Optional[ResolvedTargets] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + Target: Optional[String] + AutomationType: Optional[AutomationType] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + TargetLocationsURL: Optional[TargetLocationsURL] + AutomationSubtype: Optional[AutomationSubtype] + ScheduledTime: Optional[DateTime] + Runbooks: Optional[Runbooks] + OpsItemId: Optional[String] + AssociationId: Optional[String] + ChangeRequestName: Optional[ChangeRequestName] + + +AutomationExecutionMetadataList = List[AutomationExecutionMetadata] + + +class TargetPreview(TypedDict, total=False): + Count: Optional[Integer] + TargetType: Optional[String] + + +TargetPreviewList = List[TargetPreview] +RegionList = List[Region] +StepPreviewMap = Dict[ImpactType, Integer] + + +class AutomationExecutionPreview(TypedDict, total=False): + StepPreviews: Optional[StepPreviewMap] + Regions: Optional[RegionList] + TargetPreviews: Optional[TargetPreviewList] + TotalAccounts: Optional[Integer] + + +PatchSourceProductList = List[PatchSourceProduct] + + +class PatchSource(TypedDict, total=False): + Name: PatchSourceName + Products: PatchSourceProductList + Configuration: PatchSourceConfiguration + + +PatchSourceList = List[PatchSource] +PatchIdList = List[PatchId] +PatchFilterValueList = List[PatchFilterValue] + + +class PatchFilter(TypedDict, total=False): + Key: PatchFilterKey + Values: PatchFilterValueList + + +PatchFilterList = List[PatchFilter] + + +class PatchFilterGroup(TypedDict, total=False): + PatchFilters: PatchFilterList + + +class PatchRule(TypedDict, total=False): + PatchFilterGroup: PatchFilterGroup + ComplianceLevel: Optional[PatchComplianceLevel] + ApproveAfterDays: Optional[ApproveAfterDays] + ApproveUntilDate: Optional[PatchStringDateTime] + EnableNonSecurity: Optional[Boolean] + + +PatchRuleList = List[PatchRule] + + +class PatchRuleGroup(TypedDict, total=False): + PatchRules: PatchRuleList + + +class BaselineOverride(TypedDict, total=False): + OperatingSystem: Optional[OperatingSystem] + GlobalFilters: Optional[PatchFilterGroup] + ApprovalRules: Optional[PatchRuleGroup] + ApprovedPatches: Optional[PatchIdList] + ApprovedPatchesComplianceLevel: Optional[PatchComplianceLevel] + RejectedPatches: Optional[PatchIdList] + RejectedPatchesAction: Optional[PatchAction] + ApprovedPatchesEnableNonSecurity: Optional[Boolean] + Sources: Optional[PatchSourceList] + AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus] + + +InstanceIdList = List[InstanceId] + + +class CancelCommandRequest(ServiceRequest): + CommandId: CommandId + InstanceIds: Optional[InstanceIdList] + + +class CancelCommandResult(TypedDict, total=False): + pass + + +class CancelMaintenanceWindowExecutionRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + + +class CancelMaintenanceWindowExecutionResult(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + + +CategoryEnumList = List[Category] +CategoryList = List[Category] + + +class CloudWatchOutputConfig(TypedDict, total=False): + CloudWatchLogGroupName: Optional[CloudWatchLogGroupName] + CloudWatchOutputEnabled: Optional[CloudWatchOutputEnabled] + + +NotificationEventList = List[NotificationEvent] + + +class NotificationConfig(TypedDict, total=False): + NotificationArn: Optional[NotificationArn] + NotificationEvents: Optional[NotificationEventList] + NotificationType: Optional[NotificationType] + + +class Command(TypedDict, total=False): + CommandId: Optional[CommandId] + DocumentName: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + Comment: Optional[Comment] + ExpiresAfter: Optional[DateTime] + Parameters: Optional[Parameters] + InstanceIds: Optional[InstanceIdList] + Targets: Optional[Targets] + RequestedDateTime: Optional[DateTime] + Status: Optional[CommandStatus] + StatusDetails: Optional[StatusDetails] + OutputS3Region: Optional[S3Region] + OutputS3BucketName: Optional[S3BucketName] + OutputS3KeyPrefix: Optional[S3KeyPrefix] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + TargetCount: Optional[TargetCount] + CompletedCount: Optional[CompletedCount] + ErrorCount: Optional[ErrorCount] + DeliveryTimedOutCount: Optional[DeliveryTimedOutCount] + ServiceRole: Optional[ServiceRole] + NotificationConfig: Optional[NotificationConfig] + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + TimeoutSeconds: Optional[TimeoutSeconds] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + + +class CommandFilter(TypedDict, total=False): + key: CommandFilterKey + value: CommandFilterValue + + +CommandFilterList = List[CommandFilter] + + +class CommandPlugin(TypedDict, total=False): + Name: Optional[CommandPluginName] + Status: Optional[CommandPluginStatus] + StatusDetails: Optional[StatusDetails] + ResponseCode: Optional[ResponseCode] + ResponseStartDateTime: Optional[DateTime] + ResponseFinishDateTime: Optional[DateTime] + Output: Optional[CommandPluginOutput] + StandardOutputUrl: Optional[Url] + StandardErrorUrl: Optional[Url] + OutputS3Region: Optional[S3Region] + OutputS3BucketName: Optional[S3BucketName] + OutputS3KeyPrefix: Optional[S3KeyPrefix] + + +CommandPluginList = List[CommandPlugin] + + +class CommandInvocation(TypedDict, total=False): + CommandId: Optional[CommandId] + InstanceId: Optional[InstanceId] + InstanceName: Optional[InstanceTagName] + Comment: Optional[Comment] + DocumentName: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + RequestedDateTime: Optional[DateTime] + Status: Optional[CommandInvocationStatus] + StatusDetails: Optional[StatusDetails] + TraceOutput: Optional[InvocationTraceOutput] + StandardOutputUrl: Optional[Url] + StandardErrorUrl: Optional[Url] + CommandPlugins: Optional[CommandPluginList] + ServiceRole: Optional[ServiceRole] + NotificationConfig: Optional[NotificationConfig] + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + + +CommandInvocationList = List[CommandInvocation] +CommandList = List[Command] + + +class ComplianceExecutionSummary(TypedDict, total=False): + ExecutionTime: DateTime + ExecutionId: Optional[ComplianceExecutionId] + ExecutionType: Optional[ComplianceExecutionType] + + +ComplianceItemDetails = Dict[AttributeName, AttributeValue] + + +class ComplianceItem(TypedDict, total=False): + ComplianceType: Optional[ComplianceTypeName] + ResourceType: Optional[ComplianceResourceType] + ResourceId: Optional[ComplianceResourceId] + Id: Optional[ComplianceItemId] + Title: Optional[ComplianceItemTitle] + Status: Optional[ComplianceStatus] + Severity: Optional[ComplianceSeverity] + ExecutionSummary: Optional[ComplianceExecutionSummary] + Details: Optional[ComplianceItemDetails] + + +class ComplianceItemEntry(TypedDict, total=False): + Id: Optional[ComplianceItemId] + Title: Optional[ComplianceItemTitle] + Severity: ComplianceSeverity + Status: ComplianceStatus + Details: Optional[ComplianceItemDetails] + + +ComplianceItemEntryList = List[ComplianceItemEntry] +ComplianceItemList = List[ComplianceItem] +ComplianceResourceIdList = List[ComplianceResourceId] +ComplianceResourceTypeList = List[ComplianceResourceType] +ComplianceStringFilterValueList = List[ComplianceFilterValue] + + +class ComplianceStringFilter(TypedDict, total=False): + Key: Optional[ComplianceStringFilterKey] + Values: Optional[ComplianceStringFilterValueList] + Type: Optional[ComplianceQueryOperatorType] + + +ComplianceStringFilterList = List[ComplianceStringFilter] + + +class SeveritySummary(TypedDict, total=False): + CriticalCount: Optional[ComplianceSummaryCount] + HighCount: Optional[ComplianceSummaryCount] + MediumCount: Optional[ComplianceSummaryCount] + LowCount: Optional[ComplianceSummaryCount] + InformationalCount: Optional[ComplianceSummaryCount] + UnspecifiedCount: Optional[ComplianceSummaryCount] + + +class NonCompliantSummary(TypedDict, total=False): + NonCompliantCount: Optional[ComplianceSummaryCount] + SeveritySummary: Optional[SeveritySummary] + + +class CompliantSummary(TypedDict, total=False): + CompliantCount: Optional[ComplianceSummaryCount] + SeveritySummary: Optional[SeveritySummary] + + +class ComplianceSummaryItem(TypedDict, total=False): + ComplianceType: Optional[ComplianceTypeName] + CompliantSummary: Optional[CompliantSummary] + NonCompliantSummary: Optional[NonCompliantSummary] + + +ComplianceSummaryItemList = List[ComplianceSummaryItem] + + +class RegistrationMetadataItem(TypedDict, total=False): + Key: RegistrationMetadataKey + Value: RegistrationMetadataValue + + +RegistrationMetadataList = List[RegistrationMetadataItem] + + +class CreateActivationRequest(ServiceRequest): + Description: Optional[ActivationDescription] + DefaultInstanceName: Optional[DefaultInstanceName] + IamRole: IamRole + RegistrationLimit: Optional[RegistrationLimit] + ExpirationDate: Optional[ExpirationDate] + Tags: Optional[TagList] + RegistrationMetadata: Optional[RegistrationMetadataList] + + +class CreateActivationResult(TypedDict, total=False): + ActivationId: Optional[ActivationId] + ActivationCode: Optional[ActivationCode] + + +class CreateAssociationBatchRequestEntry(TypedDict, total=False): + Name: DocumentARN + InstanceId: Optional[InstanceId] + Parameters: Optional[Parameters] + AutomationTargetParameterName: Optional[AutomationTargetParameterName] + DocumentVersion: Optional[DocumentVersion] + Targets: Optional[Targets] + ScheduleExpression: Optional[ScheduleExpression] + OutputLocation: Optional[InstanceAssociationOutputLocation] + AssociationName: Optional[AssociationName] + MaxErrors: Optional[MaxErrors] + MaxConcurrency: Optional[MaxConcurrency] + ComplianceSeverity: Optional[AssociationComplianceSeverity] + SyncCompliance: Optional[AssociationSyncCompliance] + ApplyOnlyAtCronInterval: Optional[ApplyOnlyAtCronInterval] + CalendarNames: Optional[CalendarNameOrARNList] + TargetLocations: Optional[TargetLocations] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + AlarmConfiguration: Optional[AlarmConfiguration] + + +CreateAssociationBatchRequestEntries = List[CreateAssociationBatchRequestEntry] + + +class CreateAssociationBatchRequest(ServiceRequest): + Entries: CreateAssociationBatchRequestEntries + + +class FailedCreateAssociation(TypedDict, total=False): + Entry: Optional[CreateAssociationBatchRequestEntry] + Message: Optional[BatchErrorMessage] + Fault: Optional[Fault] + + +FailedCreateAssociationList = List[FailedCreateAssociation] + + +class CreateAssociationBatchResult(TypedDict, total=False): + Successful: Optional[AssociationDescriptionList] + Failed: Optional[FailedCreateAssociationList] + + +class CreateAssociationRequest(ServiceRequest): + Name: DocumentARN + DocumentVersion: Optional[DocumentVersion] + InstanceId: Optional[InstanceId] + Parameters: Optional[Parameters] + Targets: Optional[Targets] + ScheduleExpression: Optional[ScheduleExpression] + OutputLocation: Optional[InstanceAssociationOutputLocation] + AssociationName: Optional[AssociationName] + AutomationTargetParameterName: Optional[AutomationTargetParameterName] + MaxErrors: Optional[MaxErrors] + MaxConcurrency: Optional[MaxConcurrency] + ComplianceSeverity: Optional[AssociationComplianceSeverity] + SyncCompliance: Optional[AssociationSyncCompliance] + ApplyOnlyAtCronInterval: Optional[ApplyOnlyAtCronInterval] + CalendarNames: Optional[CalendarNameOrARNList] + TargetLocations: Optional[TargetLocations] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + Tags: Optional[TagList] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class CreateAssociationResult(TypedDict, total=False): + AssociationDescription: Optional[AssociationDescription] + + +class DocumentRequires(TypedDict, total=False): + Name: DocumentARN + Version: Optional[DocumentVersion] + RequireType: Optional[RequireType] + VersionName: Optional[DocumentVersionName] + + +DocumentRequiresList = List[DocumentRequires] + + +class CreateDocumentRequest(ServiceRequest): + Content: DocumentContent + Requires: Optional[DocumentRequiresList] + Attachments: Optional[AttachmentsSourceList] + Name: DocumentName + DisplayName: Optional[DocumentDisplayName] + VersionName: Optional[DocumentVersionName] + DocumentType: Optional[DocumentType] + DocumentFormat: Optional[DocumentFormat] + TargetType: Optional[TargetType] + Tags: Optional[TagList] + + +class ReviewInformation(TypedDict, total=False): + ReviewedTime: Optional[DateTime] + Status: Optional[ReviewStatus] + Reviewer: Optional[Reviewer] + + +ReviewInformationList = List[ReviewInformation] +PlatformTypeList = List[PlatformType] + + +class DocumentParameter(TypedDict, total=False): + Name: Optional[DocumentParameterName] + Type: Optional[DocumentParameterType] + Description: Optional[DocumentParameterDescrption] + DefaultValue: Optional[DocumentParameterDefaultValue] + + +DocumentParameterList = List[DocumentParameter] + + +class DocumentDescription(TypedDict, total=False): + Sha1: Optional[DocumentSha1] + Hash: Optional[DocumentHash] + HashType: Optional[DocumentHashType] + Name: Optional[DocumentARN] + DisplayName: Optional[DocumentDisplayName] + VersionName: Optional[DocumentVersionName] + Owner: Optional[DocumentOwner] + CreatedDate: Optional[DateTime] + Status: Optional[DocumentStatus] + StatusInformation: Optional[DocumentStatusInformation] + DocumentVersion: Optional[DocumentVersion] + Description: Optional[DescriptionInDocument] + Parameters: Optional[DocumentParameterList] + PlatformTypes: Optional[PlatformTypeList] + DocumentType: Optional[DocumentType] + SchemaVersion: Optional[DocumentSchemaVersion] + LatestVersion: Optional[DocumentVersion] + DefaultVersion: Optional[DocumentVersion] + DocumentFormat: Optional[DocumentFormat] + TargetType: Optional[TargetType] + Tags: Optional[TagList] + AttachmentsInformation: Optional[AttachmentInformationList] + Requires: Optional[DocumentRequiresList] + Author: Optional[DocumentAuthor] + ReviewInformation: Optional[ReviewInformationList] + ApprovedVersion: Optional[DocumentVersion] + PendingReviewVersion: Optional[DocumentVersion] + ReviewStatus: Optional[ReviewStatus] + Category: Optional[CategoryList] + CategoryEnum: Optional[CategoryEnumList] + + +class CreateDocumentResult(TypedDict, total=False): + DocumentDescription: Optional[DocumentDescription] + + +class CreateMaintenanceWindowRequest(ServiceRequest): + Name: MaintenanceWindowName + Description: Optional[MaintenanceWindowDescription] + StartDate: Optional[MaintenanceWindowStringDateTime] + EndDate: Optional[MaintenanceWindowStringDateTime] + Schedule: MaintenanceWindowSchedule + ScheduleTimezone: Optional[MaintenanceWindowTimezone] + ScheduleOffset: Optional[MaintenanceWindowOffset] + Duration: MaintenanceWindowDurationHours + Cutoff: MaintenanceWindowCutoff + AllowUnassociatedTargets: MaintenanceWindowAllowUnassociatedTargets + ClientToken: Optional[ClientToken] + Tags: Optional[TagList] + + +class CreateMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + + +class RelatedOpsItem(TypedDict, total=False): + OpsItemId: String + + +RelatedOpsItems = List[RelatedOpsItem] + + +class OpsItemNotification(TypedDict, total=False): + Arn: Optional[String] + + +OpsItemNotifications = List[OpsItemNotification] + + +class OpsItemDataValue(TypedDict, total=False): + Value: Optional[OpsItemDataValueString] + Type: Optional[OpsItemDataType] + + +OpsItemOperationalData = Dict[OpsItemDataKey, OpsItemDataValue] + + +class CreateOpsItemRequest(ServiceRequest): + Description: OpsItemDescription + OpsItemType: Optional[OpsItemType] + OperationalData: Optional[OpsItemOperationalData] + Notifications: Optional[OpsItemNotifications] + Priority: Optional[OpsItemPriority] + RelatedOpsItems: Optional[RelatedOpsItems] + Source: OpsItemSource + Title: OpsItemTitle + Tags: Optional[TagList] + Category: Optional[OpsItemCategory] + Severity: Optional[OpsItemSeverity] + ActualStartTime: Optional[DateTime] + ActualEndTime: Optional[DateTime] + PlannedStartTime: Optional[DateTime] + PlannedEndTime: Optional[DateTime] + AccountId: Optional[OpsItemAccountId] + + +class CreateOpsItemResponse(TypedDict, total=False): + OpsItemId: Optional[String] + OpsItemArn: Optional[OpsItemArn] + + +class MetadataValue(TypedDict, total=False): + Value: Optional[MetadataValueString] + + +MetadataMap = Dict[MetadataKey, MetadataValue] + + +class CreateOpsMetadataRequest(ServiceRequest): + ResourceId: OpsMetadataResourceId + Metadata: Optional[MetadataMap] + Tags: Optional[TagList] + + +class CreateOpsMetadataResult(TypedDict, total=False): + OpsMetadataArn: Optional[OpsMetadataArn] + + +class CreatePatchBaselineRequest(ServiceRequest): + OperatingSystem: Optional[OperatingSystem] + Name: BaselineName + GlobalFilters: Optional[PatchFilterGroup] + ApprovalRules: Optional[PatchRuleGroup] + ApprovedPatches: Optional[PatchIdList] + ApprovedPatchesComplianceLevel: Optional[PatchComplianceLevel] + ApprovedPatchesEnableNonSecurity: Optional[Boolean] + RejectedPatches: Optional[PatchIdList] + RejectedPatchesAction: Optional[PatchAction] + Description: Optional[BaselineDescription] + Sources: Optional[PatchSourceList] + AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus] + ClientToken: Optional[ClientToken] + Tags: Optional[TagList] + + +class CreatePatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + + +ResourceDataSyncSourceRegionList = List[ResourceDataSyncSourceRegion] + + +class ResourceDataSyncOrganizationalUnit(TypedDict, total=False): + OrganizationalUnitId: Optional[ResourceDataSyncOrganizationalUnitId] + + +ResourceDataSyncOrganizationalUnitList = List[ResourceDataSyncOrganizationalUnit] + + +class ResourceDataSyncAwsOrganizationsSource(TypedDict, total=False): + OrganizationSourceType: ResourceDataSyncOrganizationSourceType + OrganizationalUnits: Optional[ResourceDataSyncOrganizationalUnitList] + + +class ResourceDataSyncSource(TypedDict, total=False): + SourceType: ResourceDataSyncSourceType + AwsOrganizationsSource: Optional[ResourceDataSyncAwsOrganizationsSource] + SourceRegions: ResourceDataSyncSourceRegionList + IncludeFutureRegions: Optional[ResourceDataSyncIncludeFutureRegions] + EnableAllOpsDataSources: Optional[ResourceDataSyncEnableAllOpsDataSources] + + +class ResourceDataSyncDestinationDataSharing(TypedDict, total=False): + DestinationDataSharingType: Optional[ResourceDataSyncDestinationDataSharingType] + + +class ResourceDataSyncS3Destination(TypedDict, total=False): + BucketName: ResourceDataSyncS3BucketName + Prefix: Optional[ResourceDataSyncS3Prefix] + SyncFormat: ResourceDataSyncS3Format + Region: ResourceDataSyncS3Region + AWSKMSKeyARN: Optional[ResourceDataSyncAWSKMSKeyARN] + DestinationDataSharing: Optional[ResourceDataSyncDestinationDataSharing] + + +class CreateResourceDataSyncRequest(ServiceRequest): + SyncName: ResourceDataSyncName + S3Destination: Optional[ResourceDataSyncS3Destination] + SyncType: Optional[ResourceDataSyncType] + SyncSource: Optional[ResourceDataSyncSource] + + +class CreateResourceDataSyncResult(TypedDict, total=False): + pass + + +class Credentials(TypedDict, total=False): + AccessKeyId: AccessKeyIdType + SecretAccessKey: AccessKeySecretType + SessionToken: SessionTokenType + ExpirationTime: DateTime + + +class DeleteActivationRequest(ServiceRequest): + ActivationId: ActivationId + + +class DeleteActivationResult(TypedDict, total=False): + pass + + +class DeleteAssociationRequest(ServiceRequest): + Name: Optional[DocumentARN] + InstanceId: Optional[InstanceId] + AssociationId: Optional[AssociationId] + + +class DeleteAssociationResult(TypedDict, total=False): + pass + + +class DeleteDocumentRequest(ServiceRequest): + Name: DocumentName + DocumentVersion: Optional[DocumentVersion] + VersionName: Optional[DocumentVersionName] + Force: Optional[Boolean] + + +class DeleteDocumentResult(TypedDict, total=False): + pass + + +class DeleteInventoryRequest(ServiceRequest): + TypeName: InventoryItemTypeName + SchemaDeleteOption: Optional[InventorySchemaDeleteOption] + DryRun: Optional[DryRun] + ClientToken: Optional[UUID] + + +class InventoryDeletionSummaryItem(TypedDict, total=False): + Version: Optional[InventoryItemSchemaVersion] + Count: Optional[ResourceCount] + RemainingCount: Optional[RemainingCount] + + +InventoryDeletionSummaryItems = List[InventoryDeletionSummaryItem] + + +class InventoryDeletionSummary(TypedDict, total=False): + TotalCount: Optional[TotalCount] + RemainingCount: Optional[RemainingCount] + SummaryItems: Optional[InventoryDeletionSummaryItems] + + +class DeleteInventoryResult(TypedDict, total=False): + DeletionId: Optional[UUID] + TypeName: Optional[InventoryItemTypeName] + DeletionSummary: Optional[InventoryDeletionSummary] + + +class DeleteMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + + +class DeleteMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + + +class DeleteOpsItemRequest(ServiceRequest): + OpsItemId: OpsItemId + + +class DeleteOpsItemResponse(TypedDict, total=False): + pass + + +class DeleteOpsMetadataRequest(ServiceRequest): + OpsMetadataArn: OpsMetadataArn + + +class DeleteOpsMetadataResult(TypedDict, total=False): + pass + + +class DeleteParameterRequest(ServiceRequest): + Name: PSParameterName + + +class DeleteParameterResult(TypedDict, total=False): + pass + + +ParameterNameList = List[PSParameterName] + + +class DeleteParametersRequest(ServiceRequest): + Names: ParameterNameList + + +class DeleteParametersResult(TypedDict, total=False): + DeletedParameters: Optional[ParameterNameList] + InvalidParameters: Optional[ParameterNameList] + + +class DeletePatchBaselineRequest(ServiceRequest): + BaselineId: BaselineId + + +class DeletePatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + + +class DeleteResourceDataSyncRequest(ServiceRequest): + SyncName: ResourceDataSyncName + SyncType: Optional[ResourceDataSyncType] + + +class DeleteResourceDataSyncResult(TypedDict, total=False): + pass + + +class DeleteResourcePolicyRequest(ServiceRequest): + ResourceArn: ResourceArnString + PolicyId: PolicyId + PolicyHash: PolicyHash + + +class DeleteResourcePolicyResponse(TypedDict, total=False): + pass + + +class DeregisterManagedInstanceRequest(ServiceRequest): + InstanceId: ManagedInstanceId + + +class DeregisterManagedInstanceResult(TypedDict, total=False): + pass + + +class DeregisterPatchBaselineForPatchGroupRequest(ServiceRequest): + BaselineId: BaselineId + PatchGroup: PatchGroup + + +class DeregisterPatchBaselineForPatchGroupResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + PatchGroup: Optional[PatchGroup] + + +class DeregisterTargetFromMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + WindowTargetId: MaintenanceWindowTargetId + Safe: Optional[Boolean] + + +class DeregisterTargetFromMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTargetId: Optional[MaintenanceWindowTargetId] + + +class DeregisterTaskFromMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + WindowTaskId: MaintenanceWindowTaskId + + +class DeregisterTaskFromMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTaskId: Optional[MaintenanceWindowTaskId] + + +StringList = List[String] + + +class DescribeActivationsFilter(TypedDict, total=False): + FilterKey: Optional[DescribeActivationsFilterKeys] + FilterValues: Optional[StringList] + + +DescribeActivationsFilterList = List[DescribeActivationsFilter] + + +class DescribeActivationsRequest(ServiceRequest): + Filters: Optional[DescribeActivationsFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeActivationsResult(TypedDict, total=False): + ActivationList: Optional[ActivationList] + NextToken: Optional[NextToken] + + +class DescribeAssociationExecutionTargetsRequest(ServiceRequest): + AssociationId: AssociationId + ExecutionId: AssociationExecutionId + Filters: Optional[AssociationExecutionTargetsFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeAssociationExecutionTargetsResult(TypedDict, total=False): + AssociationExecutionTargets: Optional[AssociationExecutionTargetsList] + NextToken: Optional[NextToken] + + +class DescribeAssociationExecutionsRequest(ServiceRequest): + AssociationId: AssociationId + Filters: Optional[AssociationExecutionFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeAssociationExecutionsResult(TypedDict, total=False): + AssociationExecutions: Optional[AssociationExecutionsList] + NextToken: Optional[NextToken] + + +class DescribeAssociationRequest(ServiceRequest): + Name: Optional[DocumentARN] + InstanceId: Optional[InstanceId] + AssociationId: Optional[AssociationId] + AssociationVersion: Optional[AssociationVersion] + + +class DescribeAssociationResult(TypedDict, total=False): + AssociationDescription: Optional[AssociationDescription] + + +class DescribeAutomationExecutionsRequest(ServiceRequest): + Filters: Optional[AutomationExecutionFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class DescribeAutomationExecutionsResult(TypedDict, total=False): + AutomationExecutionMetadataList: Optional[AutomationExecutionMetadataList] + NextToken: Optional[NextToken] + + +StepExecutionFilterValueList = List[StepExecutionFilterValue] + + +class StepExecutionFilter(TypedDict, total=False): + Key: StepExecutionFilterKey + Values: StepExecutionFilterValueList + + +StepExecutionFilterList = List[StepExecutionFilter] + + +class DescribeAutomationStepExecutionsRequest(ServiceRequest): + AutomationExecutionId: AutomationExecutionId + Filters: Optional[StepExecutionFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + ReverseOrder: Optional[Boolean] + + +class DescribeAutomationStepExecutionsResult(TypedDict, total=False): + StepExecutions: Optional[StepExecutionList] + NextToken: Optional[NextToken] + + +PatchOrchestratorFilterValues = List[PatchOrchestratorFilterValue] + + +class PatchOrchestratorFilter(TypedDict, total=False): + Key: Optional[PatchOrchestratorFilterKey] + Values: Optional[PatchOrchestratorFilterValues] + + +PatchOrchestratorFilterList = List[PatchOrchestratorFilter] + + +class DescribeAvailablePatchesRequest(ServiceRequest): + Filters: Optional[PatchOrchestratorFilterList] + MaxResults: Optional[PatchBaselineMaxResults] + NextToken: Optional[NextToken] + + +PatchCVEIdList = List[PatchCVEId] +PatchBugzillaIdList = List[PatchBugzillaId] +PatchAdvisoryIdList = List[PatchAdvisoryId] + + +class Patch(TypedDict, total=False): + Id: Optional[PatchId] + ReleaseDate: Optional[DateTime] + Title: Optional[PatchTitle] + Description: Optional[PatchDescription] + ContentUrl: Optional[PatchContentUrl] + Vendor: Optional[PatchVendor] + ProductFamily: Optional[PatchProductFamily] + Product: Optional[PatchProduct] + Classification: Optional[PatchClassification] + MsrcSeverity: Optional[PatchMsrcSeverity] + KbNumber: Optional[PatchKbNumber] + MsrcNumber: Optional[PatchMsrcNumber] + Language: Optional[PatchLanguage] + AdvisoryIds: Optional[PatchAdvisoryIdList] + BugzillaIds: Optional[PatchBugzillaIdList] + CVEIds: Optional[PatchCVEIdList] + Name: Optional[PatchName] + Epoch: Optional[PatchEpoch] + Version: Optional[PatchVersion] + Release: Optional[PatchRelease] + Arch: Optional[PatchArch] + Severity: Optional[PatchSeverity] + Repository: Optional[PatchRepository] + + +PatchList = List[Patch] + + +class DescribeAvailablePatchesResult(TypedDict, total=False): + Patches: Optional[PatchList] + NextToken: Optional[NextToken] + + +class DescribeDocumentPermissionRequest(ServiceRequest): + Name: DocumentName + PermissionType: DocumentPermissionType + MaxResults: Optional[DocumentPermissionMaxResults] + NextToken: Optional[NextToken] + + +class DescribeDocumentPermissionResponse(TypedDict, total=False): + AccountIds: Optional[AccountIdList] + AccountSharingInfoList: Optional[AccountSharingInfoList] + NextToken: Optional[NextToken] + + +class DescribeDocumentRequest(ServiceRequest): + Name: DocumentARN + DocumentVersion: Optional[DocumentVersion] + VersionName: Optional[DocumentVersionName] + + +class DescribeDocumentResult(TypedDict, total=False): + Document: Optional[DocumentDescription] + + +class DescribeEffectiveInstanceAssociationsRequest(ServiceRequest): + InstanceId: InstanceId + MaxResults: Optional[EffectiveInstanceAssociationMaxResults] + NextToken: Optional[NextToken] + + +class InstanceAssociation(TypedDict, total=False): + AssociationId: Optional[AssociationId] + InstanceId: Optional[InstanceId] + Content: Optional[DocumentContent] + AssociationVersion: Optional[AssociationVersion] + + +InstanceAssociationList = List[InstanceAssociation] + + +class DescribeEffectiveInstanceAssociationsResult(TypedDict, total=False): + Associations: Optional[InstanceAssociationList] + NextToken: Optional[NextToken] + + +class DescribeEffectivePatchesForPatchBaselineRequest(ServiceRequest): + BaselineId: BaselineId + MaxResults: Optional[PatchBaselineMaxResults] + NextToken: Optional[NextToken] + + +class PatchStatus(TypedDict, total=False): + DeploymentStatus: Optional[PatchDeploymentStatus] + ComplianceLevel: Optional[PatchComplianceLevel] + ApprovalDate: Optional[DateTime] + + +class EffectivePatch(TypedDict, total=False): + Patch: Optional[Patch] + PatchStatus: Optional[PatchStatus] + + +EffectivePatchList = List[EffectivePatch] + + +class DescribeEffectivePatchesForPatchBaselineResult(TypedDict, total=False): + EffectivePatches: Optional[EffectivePatchList] + NextToken: Optional[NextToken] + + +class DescribeInstanceAssociationsStatusRequest(ServiceRequest): + InstanceId: InstanceId + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class S3OutputUrl(TypedDict, total=False): + OutputUrl: Optional[Url] + + +class InstanceAssociationOutputUrl(TypedDict, total=False): + S3OutputUrl: Optional[S3OutputUrl] + + +class InstanceAssociationStatusInfo(TypedDict, total=False): + AssociationId: Optional[AssociationId] + Name: Optional[DocumentARN] + DocumentVersion: Optional[DocumentVersion] + AssociationVersion: Optional[AssociationVersion] + InstanceId: Optional[InstanceId] + ExecutionDate: Optional[DateTime] + Status: Optional[StatusName] + DetailedStatus: Optional[StatusName] + ExecutionSummary: Optional[InstanceAssociationExecutionSummary] + ErrorCode: Optional[AgentErrorCode] + OutputUrl: Optional[InstanceAssociationOutputUrl] + AssociationName: Optional[AssociationName] + + +InstanceAssociationStatusInfos = List[InstanceAssociationStatusInfo] + + +class DescribeInstanceAssociationsStatusResult(TypedDict, total=False): + InstanceAssociationStatusInfos: Optional[InstanceAssociationStatusInfos] + NextToken: Optional[NextToken] + + +InstanceInformationFilterValueSet = List[InstanceInformationFilterValue] + + +class InstanceInformationStringFilter(TypedDict, total=False): + Key: InstanceInformationStringFilterKey + Values: InstanceInformationFilterValueSet + + +InstanceInformationStringFilterList = List[InstanceInformationStringFilter] + + +class InstanceInformationFilter(TypedDict, total=False): + key: InstanceInformationFilterKey + valueSet: InstanceInformationFilterValueSet + + +InstanceInformationFilterList = List[InstanceInformationFilter] + + +class DescribeInstanceInformationRequest(ServiceRequest): + InstanceInformationFilterList: Optional[InstanceInformationFilterList] + Filters: Optional[InstanceInformationStringFilterList] + MaxResults: Optional[MaxResultsEC2Compatible] + NextToken: Optional[NextToken] + + +InstanceAssociationStatusAggregatedCount = Dict[StatusName, InstanceCount] + + +class InstanceAggregatedAssociationOverview(TypedDict, total=False): + DetailedStatus: Optional[StatusName] + InstanceAssociationStatusAggregatedCount: Optional[InstanceAssociationStatusAggregatedCount] + + +class InstanceInformation(TypedDict, total=False): + InstanceId: Optional[InstanceId] + PingStatus: Optional[PingStatus] + LastPingDateTime: Optional[DateTime] + AgentVersion: Optional[Version] + IsLatestVersion: Optional[Boolean] + PlatformType: Optional[PlatformType] + PlatformName: Optional[String] + PlatformVersion: Optional[String] + ActivationId: Optional[ActivationId] + IamRole: Optional[IamRole] + RegistrationDate: Optional[DateTime] + ResourceType: Optional[ResourceType] + Name: Optional[String] + IPAddress: Optional[IPAddress] + ComputerName: Optional[ComputerName] + AssociationStatus: Optional[StatusName] + LastAssociationExecutionDate: Optional[DateTime] + LastSuccessfulAssociationExecutionDate: Optional[DateTime] + AssociationOverview: Optional[InstanceAggregatedAssociationOverview] + SourceId: Optional[SourceId] + SourceType: Optional[SourceType] + + +InstanceInformationList = List[InstanceInformation] + + +class DescribeInstanceInformationResult(TypedDict, total=False): + InstanceInformationList: Optional[InstanceInformationList] + NextToken: Optional[NextToken] + + +InstancePatchStateFilterValues = List[InstancePatchStateFilterValue] + + +class InstancePatchStateFilter(TypedDict, total=False): + Key: InstancePatchStateFilterKey + Values: InstancePatchStateFilterValues + Type: InstancePatchStateOperatorType + + +InstancePatchStateFilterList = List[InstancePatchStateFilter] + + +class DescribeInstancePatchStatesForPatchGroupRequest(ServiceRequest): + PatchGroup: PatchGroup + Filters: Optional[InstancePatchStateFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[PatchComplianceMaxResults] + + +class InstancePatchState(TypedDict, total=False): + InstanceId: InstanceId + PatchGroup: PatchGroup + BaselineId: BaselineId + SnapshotId: Optional[SnapshotId] + InstallOverrideList: Optional[InstallOverrideList] + OwnerInformation: Optional[OwnerInformation] + InstalledCount: Optional[PatchInstalledCount] + InstalledOtherCount: Optional[PatchInstalledOtherCount] + InstalledPendingRebootCount: Optional[PatchInstalledPendingRebootCount] + InstalledRejectedCount: Optional[PatchInstalledRejectedCount] + MissingCount: Optional[PatchMissingCount] + FailedCount: Optional[PatchFailedCount] + UnreportedNotApplicableCount: Optional[PatchUnreportedNotApplicableCount] + NotApplicableCount: Optional[PatchNotApplicableCount] + AvailableSecurityUpdateCount: Optional[PatchAvailableSecurityUpdateCount] + OperationStartTime: DateTime + OperationEndTime: DateTime + Operation: PatchOperationType + LastNoRebootInstallOperationTime: Optional[DateTime] + RebootOption: Optional[RebootOption] + CriticalNonCompliantCount: Optional[PatchCriticalNonCompliantCount] + SecurityNonCompliantCount: Optional[PatchSecurityNonCompliantCount] + OtherNonCompliantCount: Optional[PatchOtherNonCompliantCount] + + +InstancePatchStatesList = List[InstancePatchState] + + +class DescribeInstancePatchStatesForPatchGroupResult(TypedDict, total=False): + InstancePatchStates: Optional[InstancePatchStatesList] + NextToken: Optional[NextToken] + + +class DescribeInstancePatchStatesRequest(ServiceRequest): + InstanceIds: InstanceIdList + NextToken: Optional[NextToken] + MaxResults: Optional[PatchComplianceMaxResults] + + +InstancePatchStateList = List[InstancePatchState] + + +class DescribeInstancePatchStatesResult(TypedDict, total=False): + InstancePatchStates: Optional[InstancePatchStateList] + NextToken: Optional[NextToken] + + +class DescribeInstancePatchesRequest(ServiceRequest): + InstanceId: InstanceId + Filters: Optional[PatchOrchestratorFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[PatchComplianceMaxResults] + + +class PatchComplianceData(TypedDict, total=False): + Title: PatchTitle + KBId: PatchKbNumber + Classification: PatchClassification + Severity: PatchSeverity + State: PatchComplianceDataState + InstalledTime: DateTime + CVEIds: Optional[PatchCVEIds] + + +PatchComplianceDataList = List[PatchComplianceData] + + +class DescribeInstancePatchesResult(TypedDict, total=False): + Patches: Optional[PatchComplianceDataList] + NextToken: Optional[NextToken] + + +InstancePropertyFilterValueSet = List[InstancePropertyFilterValue] + + +class InstancePropertyStringFilter(TypedDict, total=False): + Key: InstancePropertyStringFilterKey + Values: InstancePropertyFilterValueSet + Operator: Optional[InstancePropertyFilterOperator] + + +InstancePropertyStringFilterList = List[InstancePropertyStringFilter] + + +class InstancePropertyFilter(TypedDict, total=False): + key: InstancePropertyFilterKey + valueSet: InstancePropertyFilterValueSet + + +InstancePropertyFilterList = List[InstancePropertyFilter] + + +class DescribeInstancePropertiesRequest(ServiceRequest): + InstancePropertyFilterList: Optional[InstancePropertyFilterList] + FiltersWithOperator: Optional[InstancePropertyStringFilterList] + MaxResults: Optional[DescribeInstancePropertiesMaxResults] + NextToken: Optional[NextToken] + + +class InstanceProperty(TypedDict, total=False): + Name: Optional[InstanceName] + InstanceId: Optional[InstanceId] + InstanceType: Optional[InstanceType] + InstanceRole: Optional[InstanceRole] + KeyName: Optional[KeyName] + InstanceState: Optional[InstanceState] + Architecture: Optional[Architecture] + IPAddress: Optional[IPAddress] + LaunchTime: Optional[DateTime] + PingStatus: Optional[PingStatus] + LastPingDateTime: Optional[DateTime] + AgentVersion: Optional[Version] + PlatformType: Optional[PlatformType] + PlatformName: Optional[PlatformName] + PlatformVersion: Optional[PlatformVersion] + ActivationId: Optional[ActivationId] + IamRole: Optional[IamRole] + RegistrationDate: Optional[DateTime] + ResourceType: Optional[String] + ComputerName: Optional[ComputerName] + AssociationStatus: Optional[StatusName] + LastAssociationExecutionDate: Optional[DateTime] + LastSuccessfulAssociationExecutionDate: Optional[DateTime] + AssociationOverview: Optional[InstanceAggregatedAssociationOverview] + SourceId: Optional[SourceId] + SourceType: Optional[SourceType] + + +InstanceProperties = List[InstanceProperty] + + +class DescribeInstancePropertiesResult(TypedDict, total=False): + InstanceProperties: Optional[InstanceProperties] + NextToken: Optional[NextToken] + + +class DescribeInventoryDeletionsRequest(ServiceRequest): + DeletionId: Optional[UUID] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +InventoryDeletionLastStatusUpdateTime = datetime +InventoryDeletionStartTime = datetime + + +class InventoryDeletionStatusItem(TypedDict, total=False): + DeletionId: Optional[UUID] + TypeName: Optional[InventoryItemTypeName] + DeletionStartTime: Optional[InventoryDeletionStartTime] + LastStatus: Optional[InventoryDeletionStatus] + LastStatusMessage: Optional[InventoryDeletionLastStatusMessage] + DeletionSummary: Optional[InventoryDeletionSummary] + LastStatusUpdateTime: Optional[InventoryDeletionLastStatusUpdateTime] + + +InventoryDeletionsList = List[InventoryDeletionStatusItem] + + +class DescribeInventoryDeletionsResult(TypedDict, total=False): + InventoryDeletions: Optional[InventoryDeletionsList] + NextToken: Optional[NextToken] + + +MaintenanceWindowFilterValues = List[MaintenanceWindowFilterValue] + + +class MaintenanceWindowFilter(TypedDict, total=False): + Key: Optional[MaintenanceWindowFilterKey] + Values: Optional[MaintenanceWindowFilterValues] + + +MaintenanceWindowFilterList = List[MaintenanceWindowFilter] + + +class DescribeMaintenanceWindowExecutionTaskInvocationsRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + TaskId: MaintenanceWindowExecutionTaskId + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowExecutionTaskInvocationIdentity(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + TaskExecutionId: Optional[MaintenanceWindowExecutionTaskId] + InvocationId: Optional[MaintenanceWindowExecutionTaskInvocationId] + ExecutionId: Optional[MaintenanceWindowExecutionTaskExecutionId] + TaskType: Optional[MaintenanceWindowTaskType] + Parameters: Optional[MaintenanceWindowExecutionTaskInvocationParameters] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + OwnerInformation: Optional[OwnerInformation] + WindowTargetId: Optional[MaintenanceWindowTaskTargetId] + + +MaintenanceWindowExecutionTaskInvocationIdentityList = List[ + MaintenanceWindowExecutionTaskInvocationIdentity +] + + +class DescribeMaintenanceWindowExecutionTaskInvocationsResult(TypedDict, total=False): + WindowExecutionTaskInvocationIdentities: Optional[ + MaintenanceWindowExecutionTaskInvocationIdentityList + ] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowExecutionTasksRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowExecutionTaskIdentity(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + TaskExecutionId: Optional[MaintenanceWindowExecutionTaskId] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + TaskArn: Optional[MaintenanceWindowTaskArn] + TaskType: Optional[MaintenanceWindowTaskType] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + + +MaintenanceWindowExecutionTaskIdentityList = List[MaintenanceWindowExecutionTaskIdentity] + + +class DescribeMaintenanceWindowExecutionTasksResult(TypedDict, total=False): + WindowExecutionTaskIdentities: Optional[MaintenanceWindowExecutionTaskIdentityList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowExecutionsRequest(ServiceRequest): + WindowId: MaintenanceWindowId + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowExecution(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + + +MaintenanceWindowExecutionList = List[MaintenanceWindowExecution] + + +class DescribeMaintenanceWindowExecutionsResult(TypedDict, total=False): + WindowExecutions: Optional[MaintenanceWindowExecutionList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowScheduleRequest(ServiceRequest): + WindowId: Optional[MaintenanceWindowId] + Targets: Optional[Targets] + ResourceType: Optional[MaintenanceWindowResourceType] + Filters: Optional[PatchOrchestratorFilterList] + MaxResults: Optional[MaintenanceWindowSearchMaxResults] + NextToken: Optional[NextToken] + + +class ScheduledWindowExecution(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + Name: Optional[MaintenanceWindowName] + ExecutionTime: Optional[MaintenanceWindowStringDateTime] + + +ScheduledWindowExecutionList = List[ScheduledWindowExecution] + + +class DescribeMaintenanceWindowScheduleResult(TypedDict, total=False): + ScheduledWindowExecutions: Optional[ScheduledWindowExecutionList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowTargetsRequest(ServiceRequest): + WindowId: MaintenanceWindowId + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowTarget(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTargetId: Optional[MaintenanceWindowTargetId] + ResourceType: Optional[MaintenanceWindowResourceType] + Targets: Optional[Targets] + OwnerInformation: Optional[OwnerInformation] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + + +MaintenanceWindowTargetList = List[MaintenanceWindowTarget] + + +class DescribeMaintenanceWindowTargetsResult(TypedDict, total=False): + Targets: Optional[MaintenanceWindowTargetList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowTasksRequest(ServiceRequest): + WindowId: MaintenanceWindowId + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class LoggingInfo(TypedDict, total=False): + S3BucketName: S3BucketName + S3KeyPrefix: Optional[S3KeyPrefix] + S3Region: S3Region + + +MaintenanceWindowTaskParameterValueList = List[MaintenanceWindowTaskParameterValue] + + +class MaintenanceWindowTaskParameterValueExpression(TypedDict, total=False): + Values: Optional[MaintenanceWindowTaskParameterValueList] + + +MaintenanceWindowTaskParameters = Dict[ + MaintenanceWindowTaskParameterName, MaintenanceWindowTaskParameterValueExpression +] + + +class MaintenanceWindowTask(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTaskId: Optional[MaintenanceWindowTaskId] + TaskArn: Optional[MaintenanceWindowTaskArn] + Type: Optional[MaintenanceWindowTaskType] + Targets: Optional[Targets] + TaskParameters: Optional[MaintenanceWindowTaskParameters] + Priority: Optional[MaintenanceWindowTaskPriority] + LoggingInfo: Optional[LoggingInfo] + ServiceRoleArn: Optional[ServiceRole] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + CutoffBehavior: Optional[MaintenanceWindowTaskCutoffBehavior] + AlarmConfiguration: Optional[AlarmConfiguration] + + +MaintenanceWindowTaskList = List[MaintenanceWindowTask] + + +class DescribeMaintenanceWindowTasksResult(TypedDict, total=False): + Tasks: Optional[MaintenanceWindowTaskList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowsForTargetRequest(ServiceRequest): + Targets: Targets + ResourceType: MaintenanceWindowResourceType + MaxResults: Optional[MaintenanceWindowSearchMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowIdentityForTarget(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + Name: Optional[MaintenanceWindowName] + + +MaintenanceWindowsForTargetList = List[MaintenanceWindowIdentityForTarget] + + +class DescribeMaintenanceWindowsForTargetResult(TypedDict, total=False): + WindowIdentities: Optional[MaintenanceWindowsForTargetList] + NextToken: Optional[NextToken] + + +class DescribeMaintenanceWindowsRequest(ServiceRequest): + Filters: Optional[MaintenanceWindowFilterList] + MaxResults: Optional[MaintenanceWindowMaxResults] + NextToken: Optional[NextToken] + + +class MaintenanceWindowIdentity(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + Enabled: Optional[MaintenanceWindowEnabled] + Duration: Optional[MaintenanceWindowDurationHours] + Cutoff: Optional[MaintenanceWindowCutoff] + Schedule: Optional[MaintenanceWindowSchedule] + ScheduleTimezone: Optional[MaintenanceWindowTimezone] + ScheduleOffset: Optional[MaintenanceWindowOffset] + EndDate: Optional[MaintenanceWindowStringDateTime] + StartDate: Optional[MaintenanceWindowStringDateTime] + NextExecutionTime: Optional[MaintenanceWindowStringDateTime] + + +MaintenanceWindowIdentityList = List[MaintenanceWindowIdentity] + + +class DescribeMaintenanceWindowsResult(TypedDict, total=False): + WindowIdentities: Optional[MaintenanceWindowIdentityList] + NextToken: Optional[NextToken] + + +OpsItemFilterValues = List[OpsItemFilterValue] + + +class OpsItemFilter(TypedDict, total=False): + Key: OpsItemFilterKey + Values: OpsItemFilterValues + Operator: OpsItemFilterOperator + + +OpsItemFilters = List[OpsItemFilter] + + +class DescribeOpsItemsRequest(ServiceRequest): + OpsItemFilters: Optional[OpsItemFilters] + MaxResults: Optional[OpsItemMaxResults] + NextToken: Optional[String] + + +class OpsItemSummary(TypedDict, total=False): + CreatedBy: Optional[String] + CreatedTime: Optional[DateTime] + LastModifiedBy: Optional[String] + LastModifiedTime: Optional[DateTime] + Priority: Optional[OpsItemPriority] + Source: Optional[OpsItemSource] + Status: Optional[OpsItemStatus] + OpsItemId: Optional[OpsItemId] + Title: Optional[OpsItemTitle] + OperationalData: Optional[OpsItemOperationalData] + Category: Optional[OpsItemCategory] + Severity: Optional[OpsItemSeverity] + OpsItemType: Optional[OpsItemType] + ActualStartTime: Optional[DateTime] + ActualEndTime: Optional[DateTime] + PlannedStartTime: Optional[DateTime] + PlannedEndTime: Optional[DateTime] + + +OpsItemSummaries = List[OpsItemSummary] + + +class DescribeOpsItemsResponse(TypedDict, total=False): + NextToken: Optional[String] + OpsItemSummaries: Optional[OpsItemSummaries] + + +ParameterStringFilterValueList = List[ParameterStringFilterValue] + + +class ParameterStringFilter(TypedDict, total=False): + Key: ParameterStringFilterKey + Option: Optional[ParameterStringQueryOption] + Values: Optional[ParameterStringFilterValueList] + + +ParameterStringFilterList = List[ParameterStringFilter] +ParametersFilterValueList = List[ParametersFilterValue] + + +class ParametersFilter(TypedDict, total=False): + Key: ParametersFilterKey + Values: ParametersFilterValueList + + +ParametersFilterList = List[ParametersFilter] + + +class DescribeParametersRequest(ServiceRequest): + Filters: Optional[ParametersFilterList] + ParameterFilters: Optional[ParameterStringFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + Shared: Optional[Boolean] + + +class ParameterInlinePolicy(TypedDict, total=False): + PolicyText: Optional[String] + PolicyType: Optional[String] + PolicyStatus: Optional[String] + + +ParameterPolicyList = List[ParameterInlinePolicy] +PSParameterVersion = int + + +class ParameterMetadata(TypedDict, total=False): + Name: Optional[PSParameterName] + ARN: Optional[String] + Type: Optional[ParameterType] + KeyId: Optional[ParameterKeyId] + LastModifiedDate: Optional[DateTime] + LastModifiedUser: Optional[String] + Description: Optional[ParameterDescription] + AllowedPattern: Optional[AllowedPattern] + Version: Optional[PSParameterVersion] + Tier: Optional[ParameterTier] + Policies: Optional[ParameterPolicyList] + DataType: Optional[ParameterDataType] + + +ParameterMetadataList = List[ParameterMetadata] + + +class DescribeParametersResult(TypedDict, total=False): + Parameters: Optional[ParameterMetadataList] + NextToken: Optional[NextToken] + + +class DescribePatchBaselinesRequest(ServiceRequest): + Filters: Optional[PatchOrchestratorFilterList] + MaxResults: Optional[PatchBaselineMaxResults] + NextToken: Optional[NextToken] + + +class PatchBaselineIdentity(TypedDict, total=False): + BaselineId: Optional[BaselineId] + BaselineName: Optional[BaselineName] + OperatingSystem: Optional[OperatingSystem] + BaselineDescription: Optional[BaselineDescription] + DefaultBaseline: Optional[DefaultBaseline] + + +PatchBaselineIdentityList = List[PatchBaselineIdentity] + + +class DescribePatchBaselinesResult(TypedDict, total=False): + BaselineIdentities: Optional[PatchBaselineIdentityList] + NextToken: Optional[NextToken] + + +class DescribePatchGroupStateRequest(ServiceRequest): + PatchGroup: PatchGroup + + +class DescribePatchGroupStateResult(TypedDict, total=False): + Instances: Optional[Integer] + InstancesWithInstalledPatches: Optional[Integer] + InstancesWithInstalledOtherPatches: Optional[Integer] + InstancesWithInstalledPendingRebootPatches: Optional[InstancesCount] + InstancesWithInstalledRejectedPatches: Optional[InstancesCount] + InstancesWithMissingPatches: Optional[Integer] + InstancesWithFailedPatches: Optional[Integer] + InstancesWithNotApplicablePatches: Optional[Integer] + InstancesWithUnreportedNotApplicablePatches: Optional[Integer] + InstancesWithCriticalNonCompliantPatches: Optional[InstancesCount] + InstancesWithSecurityNonCompliantPatches: Optional[InstancesCount] + InstancesWithOtherNonCompliantPatches: Optional[InstancesCount] + InstancesWithAvailableSecurityUpdates: Optional[Integer] + + +class DescribePatchGroupsRequest(ServiceRequest): + MaxResults: Optional[PatchBaselineMaxResults] + Filters: Optional[PatchOrchestratorFilterList] + NextToken: Optional[NextToken] + + +class PatchGroupPatchBaselineMapping(TypedDict, total=False): + PatchGroup: Optional[PatchGroup] + BaselineIdentity: Optional[PatchBaselineIdentity] + + +PatchGroupPatchBaselineMappingList = List[PatchGroupPatchBaselineMapping] + + +class DescribePatchGroupsResult(TypedDict, total=False): + Mappings: Optional[PatchGroupPatchBaselineMappingList] + NextToken: Optional[NextToken] + + +class DescribePatchPropertiesRequest(ServiceRequest): + OperatingSystem: OperatingSystem + Property: PatchProperty + PatchSet: Optional[PatchSet] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +PatchPropertyEntry = Dict[AttributeName, AttributeValue] +PatchPropertiesList = List[PatchPropertyEntry] + + +class DescribePatchPropertiesResult(TypedDict, total=False): + Properties: Optional[PatchPropertiesList] + NextToken: Optional[NextToken] + + +class SessionFilter(TypedDict, total=False): + key: SessionFilterKey + value: SessionFilterValue + + +SessionFilterList = List[SessionFilter] + + +class DescribeSessionsRequest(ServiceRequest): + State: SessionState + MaxResults: Optional[SessionMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[SessionFilterList] + + +class SessionManagerOutputUrl(TypedDict, total=False): + S3OutputUrl: Optional[SessionManagerS3OutputUrl] + CloudWatchOutputUrl: Optional[SessionManagerCloudWatchOutputUrl] + + +class Session(TypedDict, total=False): + SessionId: Optional[SessionId] + Target: Optional[SessionTarget] + Status: Optional[SessionStatus] + StartDate: Optional[DateTime] + EndDate: Optional[DateTime] + DocumentName: Optional[DocumentName] + Owner: Optional[SessionOwner] + Reason: Optional[SessionReason] + Details: Optional[SessionDetails] + OutputUrl: Optional[SessionManagerOutputUrl] + MaxSessionDuration: Optional[MaxSessionDuration] + + +SessionList = List[Session] + + +class DescribeSessionsResponse(TypedDict, total=False): + Sessions: Optional[SessionList] + NextToken: Optional[NextToken] + + +class DisassociateOpsItemRelatedItemRequest(ServiceRequest): + OpsItemId: OpsItemId + AssociationId: OpsItemRelatedItemAssociationId + + +class DisassociateOpsItemRelatedItemResponse(TypedDict, total=False): + pass + + +class DocumentDefaultVersionDescription(TypedDict, total=False): + Name: Optional[DocumentName] + DefaultVersion: Optional[DocumentVersion] + DefaultVersionName: Optional[DocumentVersionName] + + +class DocumentFilter(TypedDict, total=False): + key: DocumentFilterKey + value: DocumentFilterValue + + +DocumentFilterList = List[DocumentFilter] + + +class DocumentIdentifier(TypedDict, total=False): + Name: Optional[DocumentARN] + CreatedDate: Optional[DateTime] + DisplayName: Optional[DocumentDisplayName] + Owner: Optional[DocumentOwner] + VersionName: Optional[DocumentVersionName] + PlatformTypes: Optional[PlatformTypeList] + DocumentVersion: Optional[DocumentVersion] + DocumentType: Optional[DocumentType] + SchemaVersion: Optional[DocumentSchemaVersion] + DocumentFormat: Optional[DocumentFormat] + TargetType: Optional[TargetType] + Tags: Optional[TagList] + Requires: Optional[DocumentRequiresList] + ReviewStatus: Optional[ReviewStatus] + Author: Optional[DocumentAuthor] + + +DocumentIdentifierList = List[DocumentIdentifier] +DocumentKeyValuesFilterValues = List[DocumentKeyValuesFilterValue] + + +class DocumentKeyValuesFilter(TypedDict, total=False): + Key: Optional[DocumentKeyValuesFilterKey] + Values: Optional[DocumentKeyValuesFilterValues] + + +DocumentKeyValuesFilterList = List[DocumentKeyValuesFilter] + + +class DocumentReviewCommentSource(TypedDict, total=False): + Type: Optional[DocumentReviewCommentType] + Content: Optional[DocumentReviewComment] + + +DocumentReviewCommentList = List[DocumentReviewCommentSource] + + +class DocumentReviewerResponseSource(TypedDict, total=False): + CreateTime: Optional[DateTime] + UpdatedTime: Optional[DateTime] + ReviewStatus: Optional[ReviewStatus] + Comment: Optional[DocumentReviewCommentList] + Reviewer: Optional[Reviewer] + + +DocumentReviewerResponseList = List[DocumentReviewerResponseSource] + + +class DocumentMetadataResponseInfo(TypedDict, total=False): + ReviewerResponse: Optional[DocumentReviewerResponseList] + + +class DocumentReviews(TypedDict, total=False): + Action: DocumentReviewAction + Comment: Optional[DocumentReviewCommentList] + + +class DocumentVersionInfo(TypedDict, total=False): + Name: Optional[DocumentName] + DisplayName: Optional[DocumentDisplayName] + DocumentVersion: Optional[DocumentVersion] + VersionName: Optional[DocumentVersionName] + CreatedDate: Optional[DateTime] + IsDefaultVersion: Optional[Boolean] + DocumentFormat: Optional[DocumentFormat] + Status: Optional[DocumentStatus] + StatusInformation: Optional[DocumentStatusInformation] + ReviewStatus: Optional[ReviewStatus] + + +DocumentVersionList = List[DocumentVersionInfo] + + +class ExecutionInputs(TypedDict, total=False): + Automation: Optional[AutomationExecutionInputs] + + +class ExecutionPreview(TypedDict, total=False): + Automation: Optional[AutomationExecutionPreview] + + +class GetAccessTokenRequest(ServiceRequest): + AccessRequestId: AccessRequestId + + +class GetAccessTokenResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + AccessRequestStatus: Optional[AccessRequestStatus] + + +class GetAutomationExecutionRequest(ServiceRequest): + AutomationExecutionId: AutomationExecutionId + + +class GetAutomationExecutionResult(TypedDict, total=False): + AutomationExecution: Optional[AutomationExecution] + + +class GetCalendarStateRequest(ServiceRequest): + CalendarNames: CalendarNameOrARNList + AtTime: Optional[ISO8601String] + + +class GetCalendarStateResponse(TypedDict, total=False): + State: Optional[CalendarState] + AtTime: Optional[ISO8601String] + NextTransitionTime: Optional[ISO8601String] + + +class GetCommandInvocationRequest(ServiceRequest): + CommandId: CommandId + InstanceId: InstanceId + PluginName: Optional[CommandPluginName] + + +class GetCommandInvocationResult(TypedDict, total=False): + CommandId: Optional[CommandId] + InstanceId: Optional[InstanceId] + Comment: Optional[Comment] + DocumentName: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + PluginName: Optional[CommandPluginName] + ResponseCode: Optional[ResponseCode] + ExecutionStartDateTime: Optional[StringDateTime] + ExecutionElapsedTime: Optional[StringDateTime] + ExecutionEndDateTime: Optional[StringDateTime] + Status: Optional[CommandInvocationStatus] + StatusDetails: Optional[StatusDetails] + StandardOutputContent: Optional[StandardOutputContent] + StandardOutputUrl: Optional[Url] + StandardErrorContent: Optional[StandardErrorContent] + StandardErrorUrl: Optional[Url] + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + + +class GetConnectionStatusRequest(ServiceRequest): + Target: SessionTarget + + +class GetConnectionStatusResponse(TypedDict, total=False): + Target: Optional[SessionTarget] + Status: Optional[ConnectionStatus] + + +class GetDefaultPatchBaselineRequest(ServiceRequest): + OperatingSystem: Optional[OperatingSystem] + + +class GetDefaultPatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + OperatingSystem: Optional[OperatingSystem] + + +class GetDeployablePatchSnapshotForInstanceRequest(ServiceRequest): + InstanceId: InstanceId + SnapshotId: SnapshotId + BaselineOverride: Optional[BaselineOverride] + + +class GetDeployablePatchSnapshotForInstanceResult(TypedDict, total=False): + InstanceId: Optional[InstanceId] + SnapshotId: Optional[SnapshotId] + SnapshotDownloadUrl: Optional[SnapshotDownloadUrl] + Product: Optional[Product] + + +class GetDocumentRequest(ServiceRequest): + Name: DocumentARN + VersionName: Optional[DocumentVersionName] + DocumentVersion: Optional[DocumentVersion] + DocumentFormat: Optional[DocumentFormat] + + +class GetDocumentResult(TypedDict, total=False): + Name: Optional[DocumentARN] + CreatedDate: Optional[DateTime] + DisplayName: Optional[DocumentDisplayName] + VersionName: Optional[DocumentVersionName] + DocumentVersion: Optional[DocumentVersion] + Status: Optional[DocumentStatus] + StatusInformation: Optional[DocumentStatusInformation] + Content: Optional[DocumentContent] + DocumentType: Optional[DocumentType] + DocumentFormat: Optional[DocumentFormat] + Requires: Optional[DocumentRequiresList] + AttachmentsContent: Optional[AttachmentContentList] + ReviewStatus: Optional[ReviewStatus] + + +class GetExecutionPreviewRequest(ServiceRequest): + ExecutionPreviewId: ExecutionPreviewId + + +class GetExecutionPreviewResponse(TypedDict, total=False): + ExecutionPreviewId: Optional[ExecutionPreviewId] + EndedAt: Optional[DateTime] + Status: Optional[ExecutionPreviewStatus] + StatusMessage: Optional[String] + ExecutionPreview: Optional[ExecutionPreview] + + +class ResultAttribute(TypedDict, total=False): + TypeName: InventoryItemTypeName + + +ResultAttributeList = List[ResultAttribute] +InventoryFilterValueList = List[InventoryFilterValue] + + +class InventoryFilter(TypedDict, total=False): + Key: InventoryFilterKey + Values: InventoryFilterValueList + Type: Optional[InventoryQueryOperatorType] + + +InventoryFilterList = List[InventoryFilter] + + +class InventoryGroup(TypedDict, total=False): + Name: InventoryGroupName + Filters: InventoryFilterList + + +InventoryGroupList = List[InventoryGroup] +InventoryAggregatorList = List["InventoryAggregator"] + + +class InventoryAggregator(TypedDict, total=False): + Expression: Optional[InventoryAggregatorExpression] + Aggregators: Optional[InventoryAggregatorList] + Groups: Optional[InventoryGroupList] + + +class GetInventoryRequest(ServiceRequest): + Filters: Optional[InventoryFilterList] + Aggregators: Optional[InventoryAggregatorList] + ResultAttributes: Optional[ResultAttributeList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +InventoryItemEntry = Dict[AttributeName, AttributeValue] +InventoryItemEntryList = List[InventoryItemEntry] + + +class InventoryResultItem(TypedDict, total=False): + TypeName: InventoryItemTypeName + SchemaVersion: InventoryItemSchemaVersion + CaptureTime: Optional[InventoryItemCaptureTime] + ContentHash: Optional[InventoryItemContentHash] + Content: InventoryItemEntryList + + +InventoryResultItemMap = Dict[InventoryResultItemKey, InventoryResultItem] + + +class InventoryResultEntity(TypedDict, total=False): + Id: Optional[InventoryResultEntityId] + Data: Optional[InventoryResultItemMap] + + +InventoryResultEntityList = List[InventoryResultEntity] + + +class GetInventoryResult(TypedDict, total=False): + Entities: Optional[InventoryResultEntityList] + NextToken: Optional[NextToken] + + +class GetInventorySchemaRequest(ServiceRequest): + TypeName: Optional[InventoryItemTypeNameFilter] + NextToken: Optional[NextToken] + MaxResults: Optional[GetInventorySchemaMaxResults] + Aggregator: Optional[AggregatorSchemaOnly] + SubType: Optional[IsSubTypeSchema] + + +class InventoryItemAttribute(TypedDict, total=False): + Name: InventoryItemAttributeName + DataType: InventoryAttributeDataType + + +InventoryItemAttributeList = List[InventoryItemAttribute] + + +class InventoryItemSchema(TypedDict, total=False): + TypeName: InventoryItemTypeName + Version: Optional[InventoryItemSchemaVersion] + Attributes: InventoryItemAttributeList + DisplayName: Optional[InventoryTypeDisplayName] + + +InventoryItemSchemaResultList = List[InventoryItemSchema] + + +class GetInventorySchemaResult(TypedDict, total=False): + Schemas: Optional[InventoryItemSchemaResultList] + NextToken: Optional[NextToken] + + +class GetMaintenanceWindowExecutionRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + + +MaintenanceWindowExecutionTaskIdList = List[MaintenanceWindowExecutionTaskId] + + +class GetMaintenanceWindowExecutionResult(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + TaskIds: Optional[MaintenanceWindowExecutionTaskIdList] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + + +class GetMaintenanceWindowExecutionTaskInvocationRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + TaskId: MaintenanceWindowExecutionTaskId + InvocationId: MaintenanceWindowExecutionTaskInvocationId + + +class GetMaintenanceWindowExecutionTaskInvocationResult(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + TaskExecutionId: Optional[MaintenanceWindowExecutionTaskId] + InvocationId: Optional[MaintenanceWindowExecutionTaskInvocationId] + ExecutionId: Optional[MaintenanceWindowExecutionTaskExecutionId] + TaskType: Optional[MaintenanceWindowTaskType] + Parameters: Optional[MaintenanceWindowExecutionTaskInvocationParameters] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + OwnerInformation: Optional[OwnerInformation] + WindowTargetId: Optional[MaintenanceWindowTaskTargetId] + + +class GetMaintenanceWindowExecutionTaskRequest(ServiceRequest): + WindowExecutionId: MaintenanceWindowExecutionId + TaskId: MaintenanceWindowExecutionTaskId + + +MaintenanceWindowTaskParametersList = List[MaintenanceWindowTaskParameters] + + +class GetMaintenanceWindowExecutionTaskResult(TypedDict, total=False): + WindowExecutionId: Optional[MaintenanceWindowExecutionId] + TaskExecutionId: Optional[MaintenanceWindowExecutionTaskId] + TaskArn: Optional[MaintenanceWindowTaskArn] + ServiceRole: Optional[ServiceRole] + Type: Optional[MaintenanceWindowTaskType] + TaskParameters: Optional[MaintenanceWindowTaskParametersList] + Priority: Optional[MaintenanceWindowTaskPriority] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + Status: Optional[MaintenanceWindowExecutionStatus] + StatusDetails: Optional[MaintenanceWindowExecutionStatusDetails] + StartTime: Optional[DateTime] + EndTime: Optional[DateTime] + AlarmConfiguration: Optional[AlarmConfiguration] + TriggeredAlarms: Optional[AlarmStateInformationList] + + +class GetMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + + +class GetMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + StartDate: Optional[MaintenanceWindowStringDateTime] + EndDate: Optional[MaintenanceWindowStringDateTime] + Schedule: Optional[MaintenanceWindowSchedule] + ScheduleTimezone: Optional[MaintenanceWindowTimezone] + ScheduleOffset: Optional[MaintenanceWindowOffset] + NextExecutionTime: Optional[MaintenanceWindowStringDateTime] + Duration: Optional[MaintenanceWindowDurationHours] + Cutoff: Optional[MaintenanceWindowCutoff] + AllowUnassociatedTargets: Optional[MaintenanceWindowAllowUnassociatedTargets] + Enabled: Optional[MaintenanceWindowEnabled] + CreatedDate: Optional[DateTime] + ModifiedDate: Optional[DateTime] + + +class GetMaintenanceWindowTaskRequest(ServiceRequest): + WindowId: MaintenanceWindowId + WindowTaskId: MaintenanceWindowTaskId + + +MaintenanceWindowLambdaPayload = bytes + + +class MaintenanceWindowLambdaParameters(TypedDict, total=False): + ClientContext: Optional[MaintenanceWindowLambdaClientContext] + Qualifier: Optional[MaintenanceWindowLambdaQualifier] + Payload: Optional[MaintenanceWindowLambdaPayload] + + +class MaintenanceWindowStepFunctionsParameters(TypedDict, total=False): + Input: Optional[MaintenanceWindowStepFunctionsInput] + Name: Optional[MaintenanceWindowStepFunctionsName] + + +class MaintenanceWindowAutomationParameters(TypedDict, total=False): + DocumentVersion: Optional[DocumentVersion] + Parameters: Optional[AutomationParameterMap] + + +class MaintenanceWindowRunCommandParameters(TypedDict, total=False): + Comment: Optional[Comment] + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + DocumentHash: Optional[DocumentHash] + DocumentHashType: Optional[DocumentHashType] + DocumentVersion: Optional[DocumentVersion] + NotificationConfig: Optional[NotificationConfig] + OutputS3BucketName: Optional[S3BucketName] + OutputS3KeyPrefix: Optional[S3KeyPrefix] + Parameters: Optional[Parameters] + ServiceRoleArn: Optional[ServiceRole] + TimeoutSeconds: Optional[TimeoutSeconds] + + +class MaintenanceWindowTaskInvocationParameters(TypedDict, total=False): + RunCommand: Optional[MaintenanceWindowRunCommandParameters] + Automation: Optional[MaintenanceWindowAutomationParameters] + StepFunctions: Optional[MaintenanceWindowStepFunctionsParameters] + Lambda: Optional[MaintenanceWindowLambdaParameters] + + +class GetMaintenanceWindowTaskResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTaskId: Optional[MaintenanceWindowTaskId] + Targets: Optional[Targets] + TaskArn: Optional[MaintenanceWindowTaskArn] + ServiceRoleArn: Optional[ServiceRole] + TaskType: Optional[MaintenanceWindowTaskType] + TaskParameters: Optional[MaintenanceWindowTaskParameters] + TaskInvocationParameters: Optional[MaintenanceWindowTaskInvocationParameters] + Priority: Optional[MaintenanceWindowTaskPriority] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + LoggingInfo: Optional[LoggingInfo] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + CutoffBehavior: Optional[MaintenanceWindowTaskCutoffBehavior] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class GetOpsItemRequest(ServiceRequest): + OpsItemId: OpsItemId + OpsItemArn: Optional[OpsItemArn] + + +class OpsItem(TypedDict, total=False): + CreatedBy: Optional[String] + OpsItemType: Optional[OpsItemType] + CreatedTime: Optional[DateTime] + Description: Optional[OpsItemDescription] + LastModifiedBy: Optional[String] + LastModifiedTime: Optional[DateTime] + Notifications: Optional[OpsItemNotifications] + Priority: Optional[OpsItemPriority] + RelatedOpsItems: Optional[RelatedOpsItems] + Status: Optional[OpsItemStatus] + OpsItemId: Optional[OpsItemId] + Version: Optional[String] + Title: Optional[OpsItemTitle] + Source: Optional[OpsItemSource] + OperationalData: Optional[OpsItemOperationalData] + Category: Optional[OpsItemCategory] + Severity: Optional[OpsItemSeverity] + ActualStartTime: Optional[DateTime] + ActualEndTime: Optional[DateTime] + PlannedStartTime: Optional[DateTime] + PlannedEndTime: Optional[DateTime] + OpsItemArn: Optional[OpsItemArn] + + +class GetOpsItemResponse(TypedDict, total=False): + OpsItem: Optional[OpsItem] + + +class GetOpsMetadataRequest(ServiceRequest): + OpsMetadataArn: OpsMetadataArn + MaxResults: Optional[GetOpsMetadataMaxResults] + NextToken: Optional[NextToken] + + +class GetOpsMetadataResult(TypedDict, total=False): + ResourceId: Optional[OpsMetadataResourceId] + Metadata: Optional[MetadataMap] + NextToken: Optional[NextToken] + + +class OpsResultAttribute(TypedDict, total=False): + TypeName: OpsDataTypeName + + +OpsResultAttributeList = List[OpsResultAttribute] +OpsAggregatorList = List["OpsAggregator"] +OpsFilterValueList = List[OpsFilterValue] + + +class OpsFilter(TypedDict, total=False): + Key: OpsFilterKey + Values: OpsFilterValueList + Type: Optional[OpsFilterOperatorType] + + +OpsFilterList = List[OpsFilter] +OpsAggregatorValueMap = Dict[OpsAggregatorValueKey, OpsAggregatorValue] + + +class OpsAggregator(TypedDict, total=False): + AggregatorType: Optional[OpsAggregatorType] + TypeName: Optional[OpsDataTypeName] + AttributeName: Optional[OpsDataAttributeName] + Values: Optional[OpsAggregatorValueMap] + Filters: Optional[OpsFilterList] + Aggregators: Optional[OpsAggregatorList] + + +class GetOpsSummaryRequest(ServiceRequest): + SyncName: Optional[ResourceDataSyncName] + Filters: Optional[OpsFilterList] + Aggregators: Optional[OpsAggregatorList] + ResultAttributes: Optional[OpsResultAttributeList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +OpsEntityItemEntry = Dict[AttributeName, AttributeValue] +OpsEntityItemEntryList = List[OpsEntityItemEntry] + + +class OpsEntityItem(TypedDict, total=False): + CaptureTime: Optional[OpsEntityItemCaptureTime] + Content: Optional[OpsEntityItemEntryList] + + +OpsEntityItemMap = Dict[OpsEntityItemKey, OpsEntityItem] + + +class OpsEntity(TypedDict, total=False): + Id: Optional[OpsEntityId] + Data: Optional[OpsEntityItemMap] + + +OpsEntityList = List[OpsEntity] + + +class GetOpsSummaryResult(TypedDict, total=False): + Entities: Optional[OpsEntityList] + NextToken: Optional[NextToken] + + +class GetParameterHistoryRequest(ServiceRequest): + Name: PSParameterName + WithDecryption: Optional[Boolean] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +ParameterLabelList = List[ParameterLabel] + + +class ParameterHistory(TypedDict, total=False): + Name: Optional[PSParameterName] + Type: Optional[ParameterType] + KeyId: Optional[ParameterKeyId] + LastModifiedDate: Optional[DateTime] + LastModifiedUser: Optional[String] + Description: Optional[ParameterDescription] + Value: Optional[PSParameterValue] + AllowedPattern: Optional[AllowedPattern] + Version: Optional[PSParameterVersion] + Labels: Optional[ParameterLabelList] + Tier: Optional[ParameterTier] + Policies: Optional[ParameterPolicyList] + DataType: Optional[ParameterDataType] + + +ParameterHistoryList = List[ParameterHistory] + + +class GetParameterHistoryResult(TypedDict, total=False): + Parameters: Optional[ParameterHistoryList] + NextToken: Optional[NextToken] + + +class GetParameterRequest(ServiceRequest): + Name: PSParameterName + WithDecryption: Optional[Boolean] + + +class Parameter(TypedDict, total=False): + Name: Optional[PSParameterName] + Type: Optional[ParameterType] + Value: Optional[PSParameterValue] + Version: Optional[PSParameterVersion] + Selector: Optional[PSParameterSelector] + SourceResult: Optional[String] + LastModifiedDate: Optional[DateTime] + ARN: Optional[String] + DataType: Optional[ParameterDataType] + + +class GetParameterResult(TypedDict, total=False): + Parameter: Optional[Parameter] + + +class GetParametersByPathRequest(ServiceRequest): + Path: PSParameterName + Recursive: Optional[Boolean] + ParameterFilters: Optional[ParameterStringFilterList] + WithDecryption: Optional[Boolean] + MaxResults: Optional[GetParametersByPathMaxResults] + NextToken: Optional[NextToken] + + +ParameterList = List[Parameter] + + +class GetParametersByPathResult(TypedDict, total=False): + Parameters: Optional[ParameterList] + NextToken: Optional[NextToken] + + +class GetParametersRequest(ServiceRequest): + Names: ParameterNameList + WithDecryption: Optional[Boolean] + + +class GetParametersResult(TypedDict, total=False): + Parameters: Optional[ParameterList] + InvalidParameters: Optional[ParameterNameList] + + +class GetPatchBaselineForPatchGroupRequest(ServiceRequest): + PatchGroup: PatchGroup + OperatingSystem: Optional[OperatingSystem] + + +class GetPatchBaselineForPatchGroupResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + PatchGroup: Optional[PatchGroup] + OperatingSystem: Optional[OperatingSystem] + + +class GetPatchBaselineRequest(ServiceRequest): + BaselineId: BaselineId + + +PatchGroupList = List[PatchGroup] + + +class GetPatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + Name: Optional[BaselineName] + OperatingSystem: Optional[OperatingSystem] + GlobalFilters: Optional[PatchFilterGroup] + ApprovalRules: Optional[PatchRuleGroup] + ApprovedPatches: Optional[PatchIdList] + ApprovedPatchesComplianceLevel: Optional[PatchComplianceLevel] + ApprovedPatchesEnableNonSecurity: Optional[Boolean] + RejectedPatches: Optional[PatchIdList] + RejectedPatchesAction: Optional[PatchAction] + PatchGroups: Optional[PatchGroupList] + CreatedDate: Optional[DateTime] + ModifiedDate: Optional[DateTime] + Description: Optional[BaselineDescription] + Sources: Optional[PatchSourceList] + AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus] + + +class GetResourcePoliciesRequest(ServiceRequest): + ResourceArn: ResourceArnString + NextToken: Optional[String] + MaxResults: Optional[ResourcePolicyMaxResults] + + +class GetResourcePoliciesResponseEntry(TypedDict, total=False): + PolicyId: Optional[PolicyId] + PolicyHash: Optional[PolicyHash] + Policy: Optional[Policy] + + +GetResourcePoliciesResponseEntries = List[GetResourcePoliciesResponseEntry] + + +class GetResourcePoliciesResponse(TypedDict, total=False): + NextToken: Optional[String] + Policies: Optional[GetResourcePoliciesResponseEntries] + + +class GetServiceSettingRequest(ServiceRequest): + SettingId: ServiceSettingId + + +class ServiceSetting(TypedDict, total=False): + SettingId: Optional[ServiceSettingId] + SettingValue: Optional[ServiceSettingValue] + LastModifiedDate: Optional[DateTime] + LastModifiedUser: Optional[String] + ARN: Optional[String] + Status: Optional[String] + + +class GetServiceSettingResult(TypedDict, total=False): + ServiceSetting: Optional[ServiceSetting] + + +class InstanceInfo(TypedDict, total=False): + AgentType: Optional[AgentType] + AgentVersion: Optional[AgentVersion] + ComputerName: Optional[ComputerName] + InstanceStatus: Optional[InstanceStatus] + IpAddress: Optional[IpAddress] + ManagedStatus: Optional[ManagedStatus] + PlatformType: Optional[PlatformType] + PlatformName: Optional[PlatformName] + PlatformVersion: Optional[PlatformVersion] + ResourceType: Optional[ResourceType] + + +InventoryItemContentContext = Dict[AttributeName, AttributeValue] + + +class InventoryItem(TypedDict, total=False): + TypeName: InventoryItemTypeName + SchemaVersion: InventoryItemSchemaVersion + CaptureTime: InventoryItemCaptureTime + ContentHash: Optional[InventoryItemContentHash] + Content: Optional[InventoryItemEntryList] + Context: Optional[InventoryItemContentContext] + + +InventoryItemList = List[InventoryItem] +KeyList = List[TagKey] + + +class LabelParameterVersionRequest(ServiceRequest): + Name: PSParameterName + ParameterVersion: Optional[PSParameterVersion] + Labels: ParameterLabelList + + +class LabelParameterVersionResult(TypedDict, total=False): + InvalidLabels: Optional[ParameterLabelList] + ParameterVersion: Optional[PSParameterVersion] + + +LastResourceDataSyncTime = datetime +LastSuccessfulResourceDataSyncTime = datetime + + +class ListAssociationVersionsRequest(ServiceRequest): + AssociationId: AssociationId + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListAssociationVersionsResult(TypedDict, total=False): + AssociationVersions: Optional[AssociationVersionList] + NextToken: Optional[NextToken] + + +class ListAssociationsRequest(ServiceRequest): + AssociationFilterList: Optional[AssociationFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListAssociationsResult(TypedDict, total=False): + Associations: Optional[AssociationList] + NextToken: Optional[NextToken] + + +class ListCommandInvocationsRequest(ServiceRequest): + CommandId: Optional[CommandId] + InstanceId: Optional[InstanceId] + MaxResults: Optional[CommandMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[CommandFilterList] + Details: Optional[Boolean] + + +class ListCommandInvocationsResult(TypedDict, total=False): + CommandInvocations: Optional[CommandInvocationList] + NextToken: Optional[NextToken] + + +class ListCommandsRequest(ServiceRequest): + CommandId: Optional[CommandId] + InstanceId: Optional[InstanceId] + MaxResults: Optional[CommandMaxResults] + NextToken: Optional[NextToken] + Filters: Optional[CommandFilterList] + + +class ListCommandsResult(TypedDict, total=False): + Commands: Optional[CommandList] + NextToken: Optional[NextToken] + + +class ListComplianceItemsRequest(ServiceRequest): + Filters: Optional[ComplianceStringFilterList] + ResourceIds: Optional[ComplianceResourceIdList] + ResourceTypes: Optional[ComplianceResourceTypeList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListComplianceItemsResult(TypedDict, total=False): + ComplianceItems: Optional[ComplianceItemList] + NextToken: Optional[NextToken] + + +class ListComplianceSummariesRequest(ServiceRequest): + Filters: Optional[ComplianceStringFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListComplianceSummariesResult(TypedDict, total=False): + ComplianceSummaryItems: Optional[ComplianceSummaryItemList] + NextToken: Optional[NextToken] + + +class ListDocumentMetadataHistoryRequest(ServiceRequest): + Name: DocumentName + DocumentVersion: Optional[DocumentVersion] + Metadata: DocumentMetadataEnum + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListDocumentMetadataHistoryResponse(TypedDict, total=False): + Name: Optional[DocumentName] + DocumentVersion: Optional[DocumentVersion] + Author: Optional[DocumentAuthor] + Metadata: Optional[DocumentMetadataResponseInfo] + NextToken: Optional[NextToken] + + +class ListDocumentVersionsRequest(ServiceRequest): + Name: DocumentARN + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListDocumentVersionsResult(TypedDict, total=False): + DocumentVersions: Optional[DocumentVersionList] + NextToken: Optional[NextToken] + + +class ListDocumentsRequest(ServiceRequest): + DocumentFilterList: Optional[DocumentFilterList] + Filters: Optional[DocumentKeyValuesFilterList] + MaxResults: Optional[MaxResults] + NextToken: Optional[NextToken] + + +class ListDocumentsResult(TypedDict, total=False): + DocumentIdentifiers: Optional[DocumentIdentifierList] + NextToken: Optional[NextToken] + + +class ListInventoryEntriesRequest(ServiceRequest): + InstanceId: InstanceId + TypeName: InventoryItemTypeName + Filters: Optional[InventoryFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListInventoryEntriesResult(TypedDict, total=False): + TypeName: Optional[InventoryItemTypeName] + InstanceId: Optional[InstanceId] + SchemaVersion: Optional[InventoryItemSchemaVersion] + CaptureTime: Optional[InventoryItemCaptureTime] + Entries: Optional[InventoryItemEntryList] + NextToken: Optional[NextToken] + + +NodeFilterValueList = List[NodeFilterValue] + + +class NodeFilter(TypedDict, total=False): + Key: NodeFilterKey + Values: NodeFilterValueList + Type: Optional[NodeFilterOperatorType] + + +NodeFilterList = List[NodeFilter] + + +class ListNodesRequest(ServiceRequest): + SyncName: Optional[ResourceDataSyncName] + Filters: Optional[NodeFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class NodeType(TypedDict, total=False): + Instance: Optional[InstanceInfo] + + +class NodeOwnerInfo(TypedDict, total=False): + AccountId: Optional[NodeAccountId] + OrganizationalUnitId: Optional[NodeOrganizationalUnitId] + OrganizationalUnitPath: Optional[NodeOrganizationalUnitPath] + + +NodeCaptureTime = datetime + + +class Node(TypedDict, total=False): + CaptureTime: Optional[NodeCaptureTime] + Id: Optional[NodeId] + Owner: Optional[NodeOwnerInfo] + Region: Optional[NodeRegion] + NodeType: Optional[NodeType] + + +NodeList = List[Node] + + +class ListNodesResult(TypedDict, total=False): + Nodes: Optional[NodeList] + NextToken: Optional[NextToken] + + +NodeAggregatorList = List["NodeAggregator"] + + +class NodeAggregator(TypedDict, total=False): + AggregatorType: NodeAggregatorType + TypeName: NodeTypeName + AttributeName: NodeAttributeName + Aggregators: Optional[NodeAggregatorList] + + +class ListNodesSummaryRequest(ServiceRequest): + SyncName: Optional[ResourceDataSyncName] + Filters: Optional[NodeFilterList] + Aggregators: NodeAggregatorList + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +NodeSummary = Dict[AttributeName, AttributeValue] +NodeSummaryList = List[NodeSummary] + + +class ListNodesSummaryResult(TypedDict, total=False): + Summary: Optional[NodeSummaryList] + NextToken: Optional[NextToken] + + +OpsItemEventFilterValues = List[OpsItemEventFilterValue] + + +class OpsItemEventFilter(TypedDict, total=False): + Key: OpsItemEventFilterKey + Values: OpsItemEventFilterValues + Operator: OpsItemEventFilterOperator + + +OpsItemEventFilters = List[OpsItemEventFilter] + + +class ListOpsItemEventsRequest(ServiceRequest): + Filters: Optional[OpsItemEventFilters] + MaxResults: Optional[OpsItemEventMaxResults] + NextToken: Optional[String] + + +class OpsItemIdentity(TypedDict, total=False): + Arn: Optional[String] + + +class OpsItemEventSummary(TypedDict, total=False): + OpsItemId: Optional[String] + EventId: Optional[String] + Source: Optional[String] + DetailType: Optional[String] + Detail: Optional[String] + CreatedBy: Optional[OpsItemIdentity] + CreatedTime: Optional[DateTime] + + +OpsItemEventSummaries = List[OpsItemEventSummary] + + +class ListOpsItemEventsResponse(TypedDict, total=False): + NextToken: Optional[String] + Summaries: Optional[OpsItemEventSummaries] + + +OpsItemRelatedItemsFilterValues = List[OpsItemRelatedItemsFilterValue] + + +class OpsItemRelatedItemsFilter(TypedDict, total=False): + Key: OpsItemRelatedItemsFilterKey + Values: OpsItemRelatedItemsFilterValues + Operator: OpsItemRelatedItemsFilterOperator + + +OpsItemRelatedItemsFilters = List[OpsItemRelatedItemsFilter] + + +class ListOpsItemRelatedItemsRequest(ServiceRequest): + OpsItemId: Optional[OpsItemId] + Filters: Optional[OpsItemRelatedItemsFilters] + MaxResults: Optional[OpsItemRelatedItemsMaxResults] + NextToken: Optional[String] + + +class OpsItemRelatedItemSummary(TypedDict, total=False): + OpsItemId: Optional[OpsItemId] + AssociationId: Optional[OpsItemRelatedItemAssociationId] + ResourceType: Optional[OpsItemRelatedItemAssociationResourceType] + AssociationType: Optional[OpsItemRelatedItemAssociationType] + ResourceUri: Optional[OpsItemRelatedItemAssociationResourceUri] + CreatedBy: Optional[OpsItemIdentity] + CreatedTime: Optional[DateTime] + LastModifiedBy: Optional[OpsItemIdentity] + LastModifiedTime: Optional[DateTime] + + +OpsItemRelatedItemSummaries = List[OpsItemRelatedItemSummary] + + +class ListOpsItemRelatedItemsResponse(TypedDict, total=False): + NextToken: Optional[String] + Summaries: Optional[OpsItemRelatedItemSummaries] + + +OpsMetadataFilterValueList = List[OpsMetadataFilterValue] + + +class OpsMetadataFilter(TypedDict, total=False): + Key: OpsMetadataFilterKey + Values: OpsMetadataFilterValueList + + +OpsMetadataFilterList = List[OpsMetadataFilter] + + +class ListOpsMetadataRequest(ServiceRequest): + Filters: Optional[OpsMetadataFilterList] + MaxResults: Optional[ListOpsMetadataMaxResults] + NextToken: Optional[NextToken] + + +class OpsMetadata(TypedDict, total=False): + ResourceId: Optional[OpsMetadataResourceId] + OpsMetadataArn: Optional[OpsMetadataArn] + LastModifiedDate: Optional[DateTime] + LastModifiedUser: Optional[String] + CreationDate: Optional[DateTime] + + +OpsMetadataList = List[OpsMetadata] + + +class ListOpsMetadataResult(TypedDict, total=False): + OpsMetadataList: Optional[OpsMetadataList] + NextToken: Optional[NextToken] + + +class ListResourceComplianceSummariesRequest(ServiceRequest): + Filters: Optional[ComplianceStringFilterList] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ResourceComplianceSummaryItem(TypedDict, total=False): + ComplianceType: Optional[ComplianceTypeName] + ResourceType: Optional[ComplianceResourceType] + ResourceId: Optional[ComplianceResourceId] + Status: Optional[ComplianceStatus] + OverallSeverity: Optional[ComplianceSeverity] + ExecutionSummary: Optional[ComplianceExecutionSummary] + CompliantSummary: Optional[CompliantSummary] + NonCompliantSummary: Optional[NonCompliantSummary] + + +ResourceComplianceSummaryItemList = List[ResourceComplianceSummaryItem] + + +class ListResourceComplianceSummariesResult(TypedDict, total=False): + ResourceComplianceSummaryItems: Optional[ResourceComplianceSummaryItemList] + NextToken: Optional[NextToken] + + +class ListResourceDataSyncRequest(ServiceRequest): + SyncType: Optional[ResourceDataSyncType] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +ResourceDataSyncCreatedTime = datetime +ResourceDataSyncLastModifiedTime = datetime + + +class ResourceDataSyncSourceWithState(TypedDict, total=False): + SourceType: Optional[ResourceDataSyncSourceType] + AwsOrganizationsSource: Optional[ResourceDataSyncAwsOrganizationsSource] + SourceRegions: Optional[ResourceDataSyncSourceRegionList] + IncludeFutureRegions: Optional[ResourceDataSyncIncludeFutureRegions] + State: Optional[ResourceDataSyncState] + EnableAllOpsDataSources: Optional[ResourceDataSyncEnableAllOpsDataSources] + + +class ResourceDataSyncItem(TypedDict, total=False): + SyncName: Optional[ResourceDataSyncName] + SyncType: Optional[ResourceDataSyncType] + SyncSource: Optional[ResourceDataSyncSourceWithState] + S3Destination: Optional[ResourceDataSyncS3Destination] + LastSyncTime: Optional[LastResourceDataSyncTime] + LastSuccessfulSyncTime: Optional[LastSuccessfulResourceDataSyncTime] + SyncLastModifiedTime: Optional[ResourceDataSyncLastModifiedTime] + LastStatus: Optional[LastResourceDataSyncStatus] + SyncCreatedTime: Optional[ResourceDataSyncCreatedTime] + LastSyncStatusMessage: Optional[LastResourceDataSyncMessage] + + +ResourceDataSyncItemList = List[ResourceDataSyncItem] + + +class ListResourceDataSyncResult(TypedDict, total=False): + ResourceDataSyncItems: Optional[ResourceDataSyncItemList] + NextToken: Optional[NextToken] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceType: ResourceTypeForTagging + ResourceId: ResourceId + + +class ListTagsForResourceResult(TypedDict, total=False): + TagList: Optional[TagList] + + +MetadataKeysToDeleteList = List[MetadataKey] + + +class ModifyDocumentPermissionRequest(ServiceRequest): + Name: DocumentName + PermissionType: DocumentPermissionType + AccountIdsToAdd: Optional[AccountIdList] + AccountIdsToRemove: Optional[AccountIdList] + SharedDocumentVersion: Optional[SharedDocumentVersion] + + +class ModifyDocumentPermissionResponse(TypedDict, total=False): + pass + + +OpsItemOpsDataKeysList = List[String] + + +class PutComplianceItemsRequest(ServiceRequest): + ResourceId: ComplianceResourceId + ResourceType: ComplianceResourceType + ComplianceType: ComplianceTypeName + ExecutionSummary: ComplianceExecutionSummary + Items: ComplianceItemEntryList + ItemContentHash: Optional[ComplianceItemContentHash] + UploadType: Optional[ComplianceUploadType] + + +class PutComplianceItemsResult(TypedDict, total=False): + pass + + +class PutInventoryRequest(ServiceRequest): + InstanceId: InstanceId + Items: InventoryItemList + + +class PutInventoryResult(TypedDict, total=False): + Message: Optional[PutInventoryMessage] + + +class PutParameterRequest(ServiceRequest): + Name: PSParameterName + Description: Optional[ParameterDescription] + Value: PSParameterValue + Type: Optional[ParameterType] + KeyId: Optional[ParameterKeyId] + Overwrite: Optional[Boolean] + AllowedPattern: Optional[AllowedPattern] + Tags: Optional[TagList] + Tier: Optional[ParameterTier] + Policies: Optional[ParameterPolicies] + DataType: Optional[ParameterDataType] + + +class PutParameterResult(TypedDict, total=False): + Version: Optional[PSParameterVersion] + Tier: Optional[ParameterTier] + + +class PutResourcePolicyRequest(ServiceRequest): + ResourceArn: ResourceArnString + Policy: Policy + PolicyId: Optional[PolicyId] + PolicyHash: Optional[PolicyHash] + + +class PutResourcePolicyResponse(TypedDict, total=False): + PolicyId: Optional[PolicyId] + PolicyHash: Optional[PolicyHash] + + +class RegisterDefaultPatchBaselineRequest(ServiceRequest): + BaselineId: BaselineId + + +class RegisterDefaultPatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + + +class RegisterPatchBaselineForPatchGroupRequest(ServiceRequest): + BaselineId: BaselineId + PatchGroup: PatchGroup + + +class RegisterPatchBaselineForPatchGroupResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + PatchGroup: Optional[PatchGroup] + + +class RegisterTargetWithMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + ResourceType: MaintenanceWindowResourceType + Targets: Targets + OwnerInformation: Optional[OwnerInformation] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + ClientToken: Optional[ClientToken] + + +class RegisterTargetWithMaintenanceWindowResult(TypedDict, total=False): + WindowTargetId: Optional[MaintenanceWindowTargetId] + + +class RegisterTaskWithMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + Targets: Optional[Targets] + TaskArn: MaintenanceWindowTaskArn + ServiceRoleArn: Optional[ServiceRole] + TaskType: MaintenanceWindowTaskType + TaskParameters: Optional[MaintenanceWindowTaskParameters] + TaskInvocationParameters: Optional[MaintenanceWindowTaskInvocationParameters] + Priority: Optional[MaintenanceWindowTaskPriority] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + LoggingInfo: Optional[LoggingInfo] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + ClientToken: Optional[ClientToken] + CutoffBehavior: Optional[MaintenanceWindowTaskCutoffBehavior] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class RegisterTaskWithMaintenanceWindowResult(TypedDict, total=False): + WindowTaskId: Optional[MaintenanceWindowTaskId] + + +class RemoveTagsFromResourceRequest(ServiceRequest): + ResourceType: ResourceTypeForTagging + ResourceId: ResourceId + TagKeys: KeyList + + +class RemoveTagsFromResourceResult(TypedDict, total=False): + pass + + +class ResetServiceSettingRequest(ServiceRequest): + SettingId: ServiceSettingId + + +class ResetServiceSettingResult(TypedDict, total=False): + ServiceSetting: Optional[ServiceSetting] + + +class ResumeSessionRequest(ServiceRequest): + SessionId: SessionId + + +class ResumeSessionResponse(TypedDict, total=False): + SessionId: Optional[SessionId] + TokenValue: Optional[TokenValue] + StreamUrl: Optional[StreamUrl] + + +class SendAutomationSignalRequest(ServiceRequest): + AutomationExecutionId: AutomationExecutionId + SignalType: SignalType + Payload: Optional[AutomationParameterMap] + + +class SendAutomationSignalResult(TypedDict, total=False): + pass + + +class SendCommandRequest(ServiceRequest): + InstanceIds: Optional[InstanceIdList] + Targets: Optional[Targets] + DocumentName: DocumentARN + DocumentVersion: Optional[DocumentVersion] + DocumentHash: Optional[DocumentHash] + DocumentHashType: Optional[DocumentHashType] + TimeoutSeconds: Optional[TimeoutSeconds] + Comment: Optional[Comment] + Parameters: Optional[Parameters] + OutputS3Region: Optional[S3Region] + OutputS3BucketName: Optional[S3BucketName] + OutputS3KeyPrefix: Optional[S3KeyPrefix] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + ServiceRoleArn: Optional[ServiceRole] + NotificationConfig: Optional[NotificationConfig] + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class SendCommandResult(TypedDict, total=False): + Command: Optional[Command] + + +SessionManagerParameterValueList = List[SessionManagerParameterValue] +SessionManagerParameters = Dict[SessionManagerParameterName, SessionManagerParameterValueList] + + +class StartAccessRequestRequest(ServiceRequest): + Reason: String1to256 + Targets: Targets + Tags: Optional[TagList] + + +class StartAccessRequestResponse(TypedDict, total=False): + AccessRequestId: Optional[AccessRequestId] + + +class StartAssociationsOnceRequest(ServiceRequest): + AssociationIds: AssociationIdList + + +class StartAssociationsOnceResult(TypedDict, total=False): + pass + + +class StartAutomationExecutionRequest(ServiceRequest): + DocumentName: DocumentARN + DocumentVersion: Optional[DocumentVersion] + Parameters: Optional[AutomationParameterMap] + ClientToken: Optional[IdempotencyToken] + Mode: Optional[ExecutionMode] + TargetParameterName: Optional[AutomationParameterKey] + Targets: Optional[Targets] + TargetMaps: Optional[TargetMaps] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + TargetLocations: Optional[TargetLocations] + Tags: Optional[TagList] + AlarmConfiguration: Optional[AlarmConfiguration] + TargetLocationsURL: Optional[TargetLocationsURL] + + +class StartAutomationExecutionResult(TypedDict, total=False): + AutomationExecutionId: Optional[AutomationExecutionId] + + +class StartChangeRequestExecutionRequest(ServiceRequest): + ScheduledTime: Optional[DateTime] + DocumentName: DocumentARN + DocumentVersion: Optional[DocumentVersion] + Parameters: Optional[AutomationParameterMap] + ChangeRequestName: Optional[ChangeRequestName] + ClientToken: Optional[IdempotencyToken] + AutoApprove: Optional[Boolean] + Runbooks: Runbooks + Tags: Optional[TagList] + ScheduledEndTime: Optional[DateTime] + ChangeDetails: Optional[ChangeDetailsValue] + + +class StartChangeRequestExecutionResult(TypedDict, total=False): + AutomationExecutionId: Optional[AutomationExecutionId] + + +class StartExecutionPreviewRequest(ServiceRequest): + DocumentName: DocumentName + DocumentVersion: Optional[DocumentVersion] + ExecutionInputs: Optional[ExecutionInputs] + + +class StartExecutionPreviewResponse(TypedDict, total=False): + ExecutionPreviewId: Optional[ExecutionPreviewId] + + +class StartSessionRequest(ServiceRequest): + Target: SessionTarget + DocumentName: Optional[DocumentARN] + Reason: Optional[SessionReason] + Parameters: Optional[SessionManagerParameters] + + +class StartSessionResponse(TypedDict, total=False): + SessionId: Optional[SessionId] + TokenValue: Optional[TokenValue] + StreamUrl: Optional[StreamUrl] + + +class StopAutomationExecutionRequest(ServiceRequest): + AutomationExecutionId: AutomationExecutionId + Type: Optional[StopType] + + +class StopAutomationExecutionResult(TypedDict, total=False): + pass + + +class TerminateSessionRequest(ServiceRequest): + SessionId: SessionId + + +class TerminateSessionResponse(TypedDict, total=False): + SessionId: Optional[SessionId] + + +class UnlabelParameterVersionRequest(ServiceRequest): + Name: PSParameterName + ParameterVersion: PSParameterVersion + Labels: ParameterLabelList + + +class UnlabelParameterVersionResult(TypedDict, total=False): + RemovedLabels: Optional[ParameterLabelList] + InvalidLabels: Optional[ParameterLabelList] + + +class UpdateAssociationRequest(ServiceRequest): + AssociationId: AssociationId + Parameters: Optional[Parameters] + DocumentVersion: Optional[DocumentVersion] + ScheduleExpression: Optional[ScheduleExpression] + OutputLocation: Optional[InstanceAssociationOutputLocation] + Name: Optional[DocumentARN] + Targets: Optional[Targets] + AssociationName: Optional[AssociationName] + AssociationVersion: Optional[AssociationVersion] + AutomationTargetParameterName: Optional[AutomationTargetParameterName] + MaxErrors: Optional[MaxErrors] + MaxConcurrency: Optional[MaxConcurrency] + ComplianceSeverity: Optional[AssociationComplianceSeverity] + SyncCompliance: Optional[AssociationSyncCompliance] + ApplyOnlyAtCronInterval: Optional[ApplyOnlyAtCronInterval] + CalendarNames: Optional[CalendarNameOrARNList] + TargetLocations: Optional[TargetLocations] + ScheduleOffset: Optional[ScheduleOffset] + Duration: Optional[Duration] + TargetMaps: Optional[TargetMaps] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class UpdateAssociationResult(TypedDict, total=False): + AssociationDescription: Optional[AssociationDescription] + + +class UpdateAssociationStatusRequest(ServiceRequest): + Name: DocumentARN + InstanceId: InstanceId + AssociationStatus: AssociationStatus + + +class UpdateAssociationStatusResult(TypedDict, total=False): + AssociationDescription: Optional[AssociationDescription] + + +class UpdateDocumentDefaultVersionRequest(ServiceRequest): + Name: DocumentName + DocumentVersion: DocumentVersionNumber + + +class UpdateDocumentDefaultVersionResult(TypedDict, total=False): + Description: Optional[DocumentDefaultVersionDescription] + + +class UpdateDocumentMetadataRequest(ServiceRequest): + Name: DocumentName + DocumentVersion: Optional[DocumentVersion] + DocumentReviews: DocumentReviews + + +class UpdateDocumentMetadataResponse(TypedDict, total=False): + pass + + +class UpdateDocumentRequest(ServiceRequest): + Content: DocumentContent + Attachments: Optional[AttachmentsSourceList] + Name: DocumentName + DisplayName: Optional[DocumentDisplayName] + VersionName: Optional[DocumentVersionName] + DocumentVersion: Optional[DocumentVersion] + DocumentFormat: Optional[DocumentFormat] + TargetType: Optional[TargetType] + + +class UpdateDocumentResult(TypedDict, total=False): + DocumentDescription: Optional[DocumentDescription] + + +class UpdateMaintenanceWindowRequest(ServiceRequest): + WindowId: MaintenanceWindowId + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + StartDate: Optional[MaintenanceWindowStringDateTime] + EndDate: Optional[MaintenanceWindowStringDateTime] + Schedule: Optional[MaintenanceWindowSchedule] + ScheduleTimezone: Optional[MaintenanceWindowTimezone] + ScheduleOffset: Optional[MaintenanceWindowOffset] + Duration: Optional[MaintenanceWindowDurationHours] + Cutoff: Optional[MaintenanceWindowCutoff] + AllowUnassociatedTargets: Optional[MaintenanceWindowAllowUnassociatedTargets] + Enabled: Optional[MaintenanceWindowEnabled] + Replace: Optional[Boolean] + + +class UpdateMaintenanceWindowResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + StartDate: Optional[MaintenanceWindowStringDateTime] + EndDate: Optional[MaintenanceWindowStringDateTime] + Schedule: Optional[MaintenanceWindowSchedule] + ScheduleTimezone: Optional[MaintenanceWindowTimezone] + ScheduleOffset: Optional[MaintenanceWindowOffset] + Duration: Optional[MaintenanceWindowDurationHours] + Cutoff: Optional[MaintenanceWindowCutoff] + AllowUnassociatedTargets: Optional[MaintenanceWindowAllowUnassociatedTargets] + Enabled: Optional[MaintenanceWindowEnabled] + + +class UpdateMaintenanceWindowTargetRequest(ServiceRequest): + WindowId: MaintenanceWindowId + WindowTargetId: MaintenanceWindowTargetId + Targets: Optional[Targets] + OwnerInformation: Optional[OwnerInformation] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + Replace: Optional[Boolean] + + +class UpdateMaintenanceWindowTargetResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTargetId: Optional[MaintenanceWindowTargetId] + Targets: Optional[Targets] + OwnerInformation: Optional[OwnerInformation] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + + +class UpdateMaintenanceWindowTaskRequest(ServiceRequest): + WindowId: MaintenanceWindowId + WindowTaskId: MaintenanceWindowTaskId + Targets: Optional[Targets] + TaskArn: Optional[MaintenanceWindowTaskArn] + ServiceRoleArn: Optional[ServiceRole] + TaskParameters: Optional[MaintenanceWindowTaskParameters] + TaskInvocationParameters: Optional[MaintenanceWindowTaskInvocationParameters] + Priority: Optional[MaintenanceWindowTaskPriority] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + LoggingInfo: Optional[LoggingInfo] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + Replace: Optional[Boolean] + CutoffBehavior: Optional[MaintenanceWindowTaskCutoffBehavior] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class UpdateMaintenanceWindowTaskResult(TypedDict, total=False): + WindowId: Optional[MaintenanceWindowId] + WindowTaskId: Optional[MaintenanceWindowTaskId] + Targets: Optional[Targets] + TaskArn: Optional[MaintenanceWindowTaskArn] + ServiceRoleArn: Optional[ServiceRole] + TaskParameters: Optional[MaintenanceWindowTaskParameters] + TaskInvocationParameters: Optional[MaintenanceWindowTaskInvocationParameters] + Priority: Optional[MaintenanceWindowTaskPriority] + MaxConcurrency: Optional[MaxConcurrency] + MaxErrors: Optional[MaxErrors] + LoggingInfo: Optional[LoggingInfo] + Name: Optional[MaintenanceWindowName] + Description: Optional[MaintenanceWindowDescription] + CutoffBehavior: Optional[MaintenanceWindowTaskCutoffBehavior] + AlarmConfiguration: Optional[AlarmConfiguration] + + +class UpdateManagedInstanceRoleRequest(ServiceRequest): + InstanceId: ManagedInstanceId + IamRole: IamRole + + +class UpdateManagedInstanceRoleResult(TypedDict, total=False): + pass + + +class UpdateOpsItemRequest(ServiceRequest): + Description: Optional[OpsItemDescription] + OperationalData: Optional[OpsItemOperationalData] + OperationalDataToDelete: Optional[OpsItemOpsDataKeysList] + Notifications: Optional[OpsItemNotifications] + Priority: Optional[OpsItemPriority] + RelatedOpsItems: Optional[RelatedOpsItems] + Status: Optional[OpsItemStatus] + OpsItemId: OpsItemId + Title: Optional[OpsItemTitle] + Category: Optional[OpsItemCategory] + Severity: Optional[OpsItemSeverity] + ActualStartTime: Optional[DateTime] + ActualEndTime: Optional[DateTime] + PlannedStartTime: Optional[DateTime] + PlannedEndTime: Optional[DateTime] + OpsItemArn: Optional[OpsItemArn] + + +class UpdateOpsItemResponse(TypedDict, total=False): + pass + + +class UpdateOpsMetadataRequest(ServiceRequest): + OpsMetadataArn: OpsMetadataArn + MetadataToUpdate: Optional[MetadataMap] + KeysToDelete: Optional[MetadataKeysToDeleteList] + + +class UpdateOpsMetadataResult(TypedDict, total=False): + OpsMetadataArn: Optional[OpsMetadataArn] + + +class UpdatePatchBaselineRequest(ServiceRequest): + BaselineId: BaselineId + Name: Optional[BaselineName] + GlobalFilters: Optional[PatchFilterGroup] + ApprovalRules: Optional[PatchRuleGroup] + ApprovedPatches: Optional[PatchIdList] + ApprovedPatchesComplianceLevel: Optional[PatchComplianceLevel] + ApprovedPatchesEnableNonSecurity: Optional[Boolean] + RejectedPatches: Optional[PatchIdList] + RejectedPatchesAction: Optional[PatchAction] + Description: Optional[BaselineDescription] + Sources: Optional[PatchSourceList] + AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus] + Replace: Optional[Boolean] + + +class UpdatePatchBaselineResult(TypedDict, total=False): + BaselineId: Optional[BaselineId] + Name: Optional[BaselineName] + OperatingSystem: Optional[OperatingSystem] + GlobalFilters: Optional[PatchFilterGroup] + ApprovalRules: Optional[PatchRuleGroup] + ApprovedPatches: Optional[PatchIdList] + ApprovedPatchesComplianceLevel: Optional[PatchComplianceLevel] + ApprovedPatchesEnableNonSecurity: Optional[Boolean] + RejectedPatches: Optional[PatchIdList] + RejectedPatchesAction: Optional[PatchAction] + CreatedDate: Optional[DateTime] + ModifiedDate: Optional[DateTime] + Description: Optional[BaselineDescription] + Sources: Optional[PatchSourceList] + AvailableSecurityUpdatesComplianceStatus: Optional[PatchComplianceStatus] + + +class UpdateResourceDataSyncRequest(ServiceRequest): + SyncName: ResourceDataSyncName + SyncType: ResourceDataSyncType + SyncSource: ResourceDataSyncSource + + +class UpdateResourceDataSyncResult(TypedDict, total=False): + pass + + +class UpdateServiceSettingRequest(ServiceRequest): + SettingId: ServiceSettingId + SettingValue: ServiceSettingValue + + +class UpdateServiceSettingResult(TypedDict, total=False): + pass + + +class SsmApi: + service = "ssm" + version = "2014-11-06" + + @handler("AddTagsToResource") + def add_tags_to_resource( + self, + context: RequestContext, + resource_type: ResourceTypeForTagging, + resource_id: ResourceId, + tags: TagList, + **kwargs, + ) -> AddTagsToResourceResult: + raise NotImplementedError + + @handler("AssociateOpsItemRelatedItem") + def associate_ops_item_related_item( + self, + context: RequestContext, + ops_item_id: OpsItemId, + association_type: OpsItemRelatedItemAssociationType, + resource_type: OpsItemRelatedItemAssociationResourceType, + resource_uri: OpsItemRelatedItemAssociationResourceUri, + **kwargs, + ) -> AssociateOpsItemRelatedItemResponse: + raise NotImplementedError + + @handler("CancelCommand") + def cancel_command( + self, + context: RequestContext, + command_id: CommandId, + instance_ids: InstanceIdList | None = None, + **kwargs, + ) -> CancelCommandResult: + raise NotImplementedError + + @handler("CancelMaintenanceWindowExecution") + def cancel_maintenance_window_execution( + self, context: RequestContext, window_execution_id: MaintenanceWindowExecutionId, **kwargs + ) -> CancelMaintenanceWindowExecutionResult: + raise NotImplementedError + + @handler("CreateActivation") + def create_activation( + self, + context: RequestContext, + iam_role: IamRole, + description: ActivationDescription | None = None, + default_instance_name: DefaultInstanceName | None = None, + registration_limit: RegistrationLimit | None = None, + expiration_date: ExpirationDate | None = None, + tags: TagList | None = None, + registration_metadata: RegistrationMetadataList | None = None, + **kwargs, + ) -> CreateActivationResult: + raise NotImplementedError + + @handler("CreateAssociation") + def create_association( + self, + context: RequestContext, + name: DocumentARN, + document_version: DocumentVersion | None = None, + instance_id: InstanceId | None = None, + parameters: Parameters | None = None, + targets: Targets | None = None, + schedule_expression: ScheduleExpression | None = None, + output_location: InstanceAssociationOutputLocation | None = None, + association_name: AssociationName | None = None, + automation_target_parameter_name: AutomationTargetParameterName | None = None, + max_errors: MaxErrors | None = None, + max_concurrency: MaxConcurrency | None = None, + compliance_severity: AssociationComplianceSeverity | None = None, + sync_compliance: AssociationSyncCompliance | None = None, + apply_only_at_cron_interval: ApplyOnlyAtCronInterval | None = None, + calendar_names: CalendarNameOrARNList | None = None, + target_locations: TargetLocations | None = None, + schedule_offset: ScheduleOffset | None = None, + duration: Duration | None = None, + target_maps: TargetMaps | None = None, + tags: TagList | None = None, + alarm_configuration: AlarmConfiguration | None = None, + **kwargs, + ) -> CreateAssociationResult: + raise NotImplementedError + + @handler("CreateAssociationBatch") + def create_association_batch( + self, context: RequestContext, entries: CreateAssociationBatchRequestEntries, **kwargs + ) -> CreateAssociationBatchResult: + raise NotImplementedError + + @handler("CreateDocument") + def create_document( + self, + context: RequestContext, + content: DocumentContent, + name: DocumentName, + requires: DocumentRequiresList | None = None, + attachments: AttachmentsSourceList | None = None, + display_name: DocumentDisplayName | None = None, + version_name: DocumentVersionName | None = None, + document_type: DocumentType | None = None, + document_format: DocumentFormat | None = None, + target_type: TargetType | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateDocumentResult: + raise NotImplementedError + + @handler("CreateMaintenanceWindow") + def create_maintenance_window( + self, + context: RequestContext, + name: MaintenanceWindowName, + schedule: MaintenanceWindowSchedule, + duration: MaintenanceWindowDurationHours, + cutoff: MaintenanceWindowCutoff, + allow_unassociated_targets: MaintenanceWindowAllowUnassociatedTargets, + description: MaintenanceWindowDescription | None = None, + start_date: MaintenanceWindowStringDateTime | None = None, + end_date: MaintenanceWindowStringDateTime | None = None, + schedule_timezone: MaintenanceWindowTimezone | None = None, + schedule_offset: MaintenanceWindowOffset | None = None, + client_token: ClientToken | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateMaintenanceWindowResult: + raise NotImplementedError + + @handler("CreateOpsItem") + def create_ops_item( + self, + context: RequestContext, + description: OpsItemDescription, + source: OpsItemSource, + title: OpsItemTitle, + ops_item_type: OpsItemType | None = None, + operational_data: OpsItemOperationalData | None = None, + notifications: OpsItemNotifications | None = None, + priority: OpsItemPriority | None = None, + related_ops_items: RelatedOpsItems | None = None, + tags: TagList | None = None, + category: OpsItemCategory | None = None, + severity: OpsItemSeverity | None = None, + actual_start_time: DateTime | None = None, + actual_end_time: DateTime | None = None, + planned_start_time: DateTime | None = None, + planned_end_time: DateTime | None = None, + account_id: OpsItemAccountId | None = None, + **kwargs, + ) -> CreateOpsItemResponse: + raise NotImplementedError + + @handler("CreateOpsMetadata") + def create_ops_metadata( + self, + context: RequestContext, + resource_id: OpsMetadataResourceId, + metadata: MetadataMap | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreateOpsMetadataResult: + raise NotImplementedError + + @handler("CreatePatchBaseline") + def create_patch_baseline( + self, + context: RequestContext, + name: BaselineName, + operating_system: OperatingSystem | None = None, + global_filters: PatchFilterGroup | None = None, + approval_rules: PatchRuleGroup | None = None, + approved_patches: PatchIdList | None = None, + approved_patches_compliance_level: PatchComplianceLevel | None = None, + approved_patches_enable_non_security: Boolean | None = None, + rejected_patches: PatchIdList | None = None, + rejected_patches_action: PatchAction | None = None, + description: BaselineDescription | None = None, + sources: PatchSourceList | None = None, + available_security_updates_compliance_status: PatchComplianceStatus | None = None, + client_token: ClientToken | None = None, + tags: TagList | None = None, + **kwargs, + ) -> CreatePatchBaselineResult: + raise NotImplementedError + + @handler("CreateResourceDataSync") + def create_resource_data_sync( + self, + context: RequestContext, + sync_name: ResourceDataSyncName, + s3_destination: ResourceDataSyncS3Destination | None = None, + sync_type: ResourceDataSyncType | None = None, + sync_source: ResourceDataSyncSource | None = None, + **kwargs, + ) -> CreateResourceDataSyncResult: + raise NotImplementedError + + @handler("DeleteActivation") + def delete_activation( + self, context: RequestContext, activation_id: ActivationId, **kwargs + ) -> DeleteActivationResult: + raise NotImplementedError + + @handler("DeleteAssociation") + def delete_association( + self, + context: RequestContext, + name: DocumentARN | None = None, + instance_id: InstanceId | None = None, + association_id: AssociationId | None = None, + **kwargs, + ) -> DeleteAssociationResult: + raise NotImplementedError + + @handler("DeleteDocument") + def delete_document( + self, + context: RequestContext, + name: DocumentName, + document_version: DocumentVersion | None = None, + version_name: DocumentVersionName | None = None, + force: Boolean | None = None, + **kwargs, + ) -> DeleteDocumentResult: + raise NotImplementedError + + @handler("DeleteInventory") + def delete_inventory( + self, + context: RequestContext, + type_name: InventoryItemTypeName, + schema_delete_option: InventorySchemaDeleteOption | None = None, + dry_run: DryRun | None = None, + client_token: UUID | None = None, + **kwargs, + ) -> DeleteInventoryResult: + raise NotImplementedError + + @handler("DeleteMaintenanceWindow") + def delete_maintenance_window( + self, context: RequestContext, window_id: MaintenanceWindowId, **kwargs + ) -> DeleteMaintenanceWindowResult: + raise NotImplementedError + + @handler("DeleteOpsItem") + def delete_ops_item( + self, context: RequestContext, ops_item_id: OpsItemId, **kwargs + ) -> DeleteOpsItemResponse: + raise NotImplementedError + + @handler("DeleteOpsMetadata") + def delete_ops_metadata( + self, context: RequestContext, ops_metadata_arn: OpsMetadataArn, **kwargs + ) -> DeleteOpsMetadataResult: + raise NotImplementedError + + @handler("DeleteParameter") + def delete_parameter( + self, context: RequestContext, name: PSParameterName, **kwargs + ) -> DeleteParameterResult: + raise NotImplementedError + + @handler("DeleteParameters") + def delete_parameters( + self, context: RequestContext, names: ParameterNameList, **kwargs + ) -> DeleteParametersResult: + raise NotImplementedError + + @handler("DeletePatchBaseline") + def delete_patch_baseline( + self, context: RequestContext, baseline_id: BaselineId, **kwargs + ) -> DeletePatchBaselineResult: + raise NotImplementedError + + @handler("DeleteResourceDataSync") + def delete_resource_data_sync( + self, + context: RequestContext, + sync_name: ResourceDataSyncName, + sync_type: ResourceDataSyncType | None = None, + **kwargs, + ) -> DeleteResourceDataSyncResult: + raise NotImplementedError + + @handler("DeleteResourcePolicy") + def delete_resource_policy( + self, + context: RequestContext, + resource_arn: ResourceArnString, + policy_id: PolicyId, + policy_hash: PolicyHash, + **kwargs, + ) -> DeleteResourcePolicyResponse: + raise NotImplementedError + + @handler("DeregisterManagedInstance") + def deregister_managed_instance( + self, context: RequestContext, instance_id: ManagedInstanceId, **kwargs + ) -> DeregisterManagedInstanceResult: + raise NotImplementedError + + @handler("DeregisterPatchBaselineForPatchGroup") + def deregister_patch_baseline_for_patch_group( + self, context: RequestContext, baseline_id: BaselineId, patch_group: PatchGroup, **kwargs + ) -> DeregisterPatchBaselineForPatchGroupResult: + raise NotImplementedError + + @handler("DeregisterTargetFromMaintenanceWindow") + def deregister_target_from_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_target_id: MaintenanceWindowTargetId, + safe: Boolean | None = None, + **kwargs, + ) -> DeregisterTargetFromMaintenanceWindowResult: + raise NotImplementedError + + @handler("DeregisterTaskFromMaintenanceWindow") + def deregister_task_from_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_task_id: MaintenanceWindowTaskId, + **kwargs, + ) -> DeregisterTaskFromMaintenanceWindowResult: + raise NotImplementedError + + @handler("DescribeActivations") + def describe_activations( + self, + context: RequestContext, + filters: DescribeActivationsFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeActivationsResult: + raise NotImplementedError + + @handler("DescribeAssociation") + def describe_association( + self, + context: RequestContext, + name: DocumentARN | None = None, + instance_id: InstanceId | None = None, + association_id: AssociationId | None = None, + association_version: AssociationVersion | None = None, + **kwargs, + ) -> DescribeAssociationResult: + raise NotImplementedError + + @handler("DescribeAssociationExecutionTargets") + def describe_association_execution_targets( + self, + context: RequestContext, + association_id: AssociationId, + execution_id: AssociationExecutionId, + filters: AssociationExecutionTargetsFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAssociationExecutionTargetsResult: + raise NotImplementedError + + @handler("DescribeAssociationExecutions") + def describe_association_executions( + self, + context: RequestContext, + association_id: AssociationId, + filters: AssociationExecutionFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAssociationExecutionsResult: + raise NotImplementedError + + @handler("DescribeAutomationExecutions") + def describe_automation_executions( + self, + context: RequestContext, + filters: AutomationExecutionFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAutomationExecutionsResult: + raise NotImplementedError + + @handler("DescribeAutomationStepExecutions") + def describe_automation_step_executions( + self, + context: RequestContext, + automation_execution_id: AutomationExecutionId, + filters: StepExecutionFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + reverse_order: Boolean | None = None, + **kwargs, + ) -> DescribeAutomationStepExecutionsResult: + raise NotImplementedError + + @handler("DescribeAvailablePatches") + def describe_available_patches( + self, + context: RequestContext, + filters: PatchOrchestratorFilterList | None = None, + max_results: PatchBaselineMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeAvailablePatchesResult: + raise NotImplementedError + + @handler("DescribeDocument") + def describe_document( + self, + context: RequestContext, + name: DocumentARN, + document_version: DocumentVersion | None = None, + version_name: DocumentVersionName | None = None, + **kwargs, + ) -> DescribeDocumentResult: + raise NotImplementedError + + @handler("DescribeDocumentPermission") + def describe_document_permission( + self, + context: RequestContext, + name: DocumentName, + permission_type: DocumentPermissionType, + max_results: DocumentPermissionMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeDocumentPermissionResponse: + raise NotImplementedError + + @handler("DescribeEffectiveInstanceAssociations") + def describe_effective_instance_associations( + self, + context: RequestContext, + instance_id: InstanceId, + max_results: EffectiveInstanceAssociationMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeEffectiveInstanceAssociationsResult: + raise NotImplementedError + + @handler("DescribeEffectivePatchesForPatchBaseline") + def describe_effective_patches_for_patch_baseline( + self, + context: RequestContext, + baseline_id: BaselineId, + max_results: PatchBaselineMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeEffectivePatchesForPatchBaselineResult: + raise NotImplementedError + + @handler("DescribeInstanceAssociationsStatus") + def describe_instance_associations_status( + self, + context: RequestContext, + instance_id: InstanceId, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInstanceAssociationsStatusResult: + raise NotImplementedError + + @handler("DescribeInstanceInformation") + def describe_instance_information( + self, + context: RequestContext, + instance_information_filter_list: InstanceInformationFilterList | None = None, + filters: InstanceInformationStringFilterList | None = None, + max_results: MaxResultsEC2Compatible | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInstanceInformationResult: + raise NotImplementedError + + @handler("DescribeInstancePatchStates") + def describe_instance_patch_states( + self, + context: RequestContext, + instance_ids: InstanceIdList, + next_token: NextToken | None = None, + max_results: PatchComplianceMaxResults | None = None, + **kwargs, + ) -> DescribeInstancePatchStatesResult: + raise NotImplementedError + + @handler("DescribeInstancePatchStatesForPatchGroup") + def describe_instance_patch_states_for_patch_group( + self, + context: RequestContext, + patch_group: PatchGroup, + filters: InstancePatchStateFilterList | None = None, + next_token: NextToken | None = None, + max_results: PatchComplianceMaxResults | None = None, + **kwargs, + ) -> DescribeInstancePatchStatesForPatchGroupResult: + raise NotImplementedError + + @handler("DescribeInstancePatches") + def describe_instance_patches( + self, + context: RequestContext, + instance_id: InstanceId, + filters: PatchOrchestratorFilterList | None = None, + next_token: NextToken | None = None, + max_results: PatchComplianceMaxResults | None = None, + **kwargs, + ) -> DescribeInstancePatchesResult: + raise NotImplementedError + + @handler("DescribeInstanceProperties") + def describe_instance_properties( + self, + context: RequestContext, + instance_property_filter_list: InstancePropertyFilterList | None = None, + filters_with_operator: InstancePropertyStringFilterList | None = None, + max_results: DescribeInstancePropertiesMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeInstancePropertiesResult: + raise NotImplementedError + + @handler("DescribeInventoryDeletions") + def describe_inventory_deletions( + self, + context: RequestContext, + deletion_id: UUID | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> DescribeInventoryDeletionsResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowExecutionTaskInvocations") + def describe_maintenance_window_execution_task_invocations( + self, + context: RequestContext, + window_execution_id: MaintenanceWindowExecutionId, + task_id: MaintenanceWindowExecutionTaskId, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowExecutionTaskInvocationsResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowExecutionTasks") + def describe_maintenance_window_execution_tasks( + self, + context: RequestContext, + window_execution_id: MaintenanceWindowExecutionId, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowExecutionTasksResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowExecutions") + def describe_maintenance_window_executions( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowExecutionsResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowSchedule") + def describe_maintenance_window_schedule( + self, + context: RequestContext, + window_id: MaintenanceWindowId | None = None, + targets: Targets | None = None, + resource_type: MaintenanceWindowResourceType | None = None, + filters: PatchOrchestratorFilterList | None = None, + max_results: MaintenanceWindowSearchMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowScheduleResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowTargets") + def describe_maintenance_window_targets( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowTargetsResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowTasks") + def describe_maintenance_window_tasks( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowTasksResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindows") + def describe_maintenance_windows( + self, + context: RequestContext, + filters: MaintenanceWindowFilterList | None = None, + max_results: MaintenanceWindowMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowsResult: + raise NotImplementedError + + @handler("DescribeMaintenanceWindowsForTarget") + def describe_maintenance_windows_for_target( + self, + context: RequestContext, + targets: Targets, + resource_type: MaintenanceWindowResourceType, + max_results: MaintenanceWindowSearchMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribeMaintenanceWindowsForTargetResult: + raise NotImplementedError + + @handler("DescribeOpsItems") + def describe_ops_items( + self, + context: RequestContext, + ops_item_filters: OpsItemFilters | None = None, + max_results: OpsItemMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> DescribeOpsItemsResponse: + raise NotImplementedError + + @handler("DescribeParameters") + def describe_parameters( + self, + context: RequestContext, + filters: ParametersFilterList | None = None, + parameter_filters: ParameterStringFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + shared: Boolean | None = None, + **kwargs, + ) -> DescribeParametersResult: + raise NotImplementedError + + @handler("DescribePatchBaselines") + def describe_patch_baselines( + self, + context: RequestContext, + filters: PatchOrchestratorFilterList | None = None, + max_results: PatchBaselineMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribePatchBaselinesResult: + raise NotImplementedError + + @handler("DescribePatchGroupState") + def describe_patch_group_state( + self, context: RequestContext, patch_group: PatchGroup, **kwargs + ) -> DescribePatchGroupStateResult: + raise NotImplementedError + + @handler("DescribePatchGroups") + def describe_patch_groups( + self, + context: RequestContext, + max_results: PatchBaselineMaxResults | None = None, + filters: PatchOrchestratorFilterList | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribePatchGroupsResult: + raise NotImplementedError + + @handler("DescribePatchProperties") + def describe_patch_properties( + self, + context: RequestContext, + operating_system: OperatingSystem, + property: PatchProperty, + patch_set: PatchSet | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> DescribePatchPropertiesResult: + raise NotImplementedError + + @handler("DescribeSessions") + def describe_sessions( + self, + context: RequestContext, + state: SessionState, + max_results: SessionMaxResults | None = None, + next_token: NextToken | None = None, + filters: SessionFilterList | None = None, + **kwargs, + ) -> DescribeSessionsResponse: + raise NotImplementedError + + @handler("DisassociateOpsItemRelatedItem") + def disassociate_ops_item_related_item( + self, + context: RequestContext, + ops_item_id: OpsItemId, + association_id: OpsItemRelatedItemAssociationId, + **kwargs, + ) -> DisassociateOpsItemRelatedItemResponse: + raise NotImplementedError + + @handler("GetAccessToken") + def get_access_token( + self, context: RequestContext, access_request_id: AccessRequestId, **kwargs + ) -> GetAccessTokenResponse: + raise NotImplementedError + + @handler("GetAutomationExecution") + def get_automation_execution( + self, context: RequestContext, automation_execution_id: AutomationExecutionId, **kwargs + ) -> GetAutomationExecutionResult: + raise NotImplementedError + + @handler("GetCalendarState") + def get_calendar_state( + self, + context: RequestContext, + calendar_names: CalendarNameOrARNList, + at_time: ISO8601String | None = None, + **kwargs, + ) -> GetCalendarStateResponse: + raise NotImplementedError + + @handler("GetCommandInvocation") + def get_command_invocation( + self, + context: RequestContext, + command_id: CommandId, + instance_id: InstanceId, + plugin_name: CommandPluginName | None = None, + **kwargs, + ) -> GetCommandInvocationResult: + raise NotImplementedError + + @handler("GetConnectionStatus") + def get_connection_status( + self, context: RequestContext, target: SessionTarget, **kwargs + ) -> GetConnectionStatusResponse: + raise NotImplementedError + + @handler("GetDefaultPatchBaseline") + def get_default_patch_baseline( + self, context: RequestContext, operating_system: OperatingSystem | None = None, **kwargs + ) -> GetDefaultPatchBaselineResult: + raise NotImplementedError + + @handler("GetDeployablePatchSnapshotForInstance") + def get_deployable_patch_snapshot_for_instance( + self, + context: RequestContext, + instance_id: InstanceId, + snapshot_id: SnapshotId, + baseline_override: BaselineOverride | None = None, + **kwargs, + ) -> GetDeployablePatchSnapshotForInstanceResult: + raise NotImplementedError + + @handler("GetDocument") + def get_document( + self, + context: RequestContext, + name: DocumentARN, + version_name: DocumentVersionName | None = None, + document_version: DocumentVersion | None = None, + document_format: DocumentFormat | None = None, + **kwargs, + ) -> GetDocumentResult: + raise NotImplementedError + + @handler("GetExecutionPreview") + def get_execution_preview( + self, context: RequestContext, execution_preview_id: ExecutionPreviewId, **kwargs + ) -> GetExecutionPreviewResponse: + raise NotImplementedError + + @handler("GetInventory") + def get_inventory( + self, + context: RequestContext, + filters: InventoryFilterList | None = None, + aggregators: InventoryAggregatorList | None = None, + result_attributes: ResultAttributeList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> GetInventoryResult: + raise NotImplementedError + + @handler("GetInventorySchema") + def get_inventory_schema( + self, + context: RequestContext, + type_name: InventoryItemTypeNameFilter | None = None, + next_token: NextToken | None = None, + max_results: GetInventorySchemaMaxResults | None = None, + aggregator: AggregatorSchemaOnly | None = None, + sub_type: IsSubTypeSchema | None = None, + **kwargs, + ) -> GetInventorySchemaResult: + raise NotImplementedError + + @handler("GetMaintenanceWindow") + def get_maintenance_window( + self, context: RequestContext, window_id: MaintenanceWindowId, **kwargs + ) -> GetMaintenanceWindowResult: + raise NotImplementedError + + @handler("GetMaintenanceWindowExecution") + def get_maintenance_window_execution( + self, context: RequestContext, window_execution_id: MaintenanceWindowExecutionId, **kwargs + ) -> GetMaintenanceWindowExecutionResult: + raise NotImplementedError + + @handler("GetMaintenanceWindowExecutionTask") + def get_maintenance_window_execution_task( + self, + context: RequestContext, + window_execution_id: MaintenanceWindowExecutionId, + task_id: MaintenanceWindowExecutionTaskId, + **kwargs, + ) -> GetMaintenanceWindowExecutionTaskResult: + raise NotImplementedError + + @handler("GetMaintenanceWindowExecutionTaskInvocation") + def get_maintenance_window_execution_task_invocation( + self, + context: RequestContext, + window_execution_id: MaintenanceWindowExecutionId, + task_id: MaintenanceWindowExecutionTaskId, + invocation_id: MaintenanceWindowExecutionTaskInvocationId, + **kwargs, + ) -> GetMaintenanceWindowExecutionTaskInvocationResult: + raise NotImplementedError + + @handler("GetMaintenanceWindowTask") + def get_maintenance_window_task( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_task_id: MaintenanceWindowTaskId, + **kwargs, + ) -> GetMaintenanceWindowTaskResult: + raise NotImplementedError + + @handler("GetOpsItem") + def get_ops_item( + self, + context: RequestContext, + ops_item_id: OpsItemId, + ops_item_arn: OpsItemArn | None = None, + **kwargs, + ) -> GetOpsItemResponse: + raise NotImplementedError + + @handler("GetOpsMetadata") + def get_ops_metadata( + self, + context: RequestContext, + ops_metadata_arn: OpsMetadataArn, + max_results: GetOpsMetadataMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetOpsMetadataResult: + raise NotImplementedError + + @handler("GetOpsSummary") + def get_ops_summary( + self, + context: RequestContext, + sync_name: ResourceDataSyncName | None = None, + filters: OpsFilterList | None = None, + aggregators: OpsAggregatorList | None = None, + result_attributes: OpsResultAttributeList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> GetOpsSummaryResult: + raise NotImplementedError + + @handler("GetParameter") + def get_parameter( + self, + context: RequestContext, + name: PSParameterName, + with_decryption: Boolean | None = None, + **kwargs, + ) -> GetParameterResult: + raise NotImplementedError + + @handler("GetParameterHistory") + def get_parameter_history( + self, + context: RequestContext, + name: PSParameterName, + with_decryption: Boolean | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetParameterHistoryResult: + raise NotImplementedError + + @handler("GetParameters") + def get_parameters( + self, + context: RequestContext, + names: ParameterNameList, + with_decryption: Boolean | None = None, + **kwargs, + ) -> GetParametersResult: + raise NotImplementedError + + @handler("GetParametersByPath") + def get_parameters_by_path( + self, + context: RequestContext, + path: PSParameterName, + recursive: Boolean | None = None, + parameter_filters: ParameterStringFilterList | None = None, + with_decryption: Boolean | None = None, + max_results: GetParametersByPathMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> GetParametersByPathResult: + raise NotImplementedError + + @handler("GetPatchBaseline") + def get_patch_baseline( + self, context: RequestContext, baseline_id: BaselineId, **kwargs + ) -> GetPatchBaselineResult: + raise NotImplementedError + + @handler("GetPatchBaselineForPatchGroup") + def get_patch_baseline_for_patch_group( + self, + context: RequestContext, + patch_group: PatchGroup, + operating_system: OperatingSystem | None = None, + **kwargs, + ) -> GetPatchBaselineForPatchGroupResult: + raise NotImplementedError + + @handler("GetResourcePolicies") + def get_resource_policies( + self, + context: RequestContext, + resource_arn: ResourceArnString, + next_token: String | None = None, + max_results: ResourcePolicyMaxResults | None = None, + **kwargs, + ) -> GetResourcePoliciesResponse: + raise NotImplementedError + + @handler("GetServiceSetting") + def get_service_setting( + self, context: RequestContext, setting_id: ServiceSettingId, **kwargs + ) -> GetServiceSettingResult: + raise NotImplementedError + + @handler("LabelParameterVersion") + def label_parameter_version( + self, + context: RequestContext, + name: PSParameterName, + labels: ParameterLabelList, + parameter_version: PSParameterVersion | None = None, + **kwargs, + ) -> LabelParameterVersionResult: + raise NotImplementedError + + @handler("ListAssociationVersions") + def list_association_versions( + self, + context: RequestContext, + association_id: AssociationId, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListAssociationVersionsResult: + raise NotImplementedError + + @handler("ListAssociations") + def list_associations( + self, + context: RequestContext, + association_filter_list: AssociationFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListAssociationsResult: + raise NotImplementedError + + @handler("ListCommandInvocations") + def list_command_invocations( + self, + context: RequestContext, + command_id: CommandId | None = None, + instance_id: InstanceId | None = None, + max_results: CommandMaxResults | None = None, + next_token: NextToken | None = None, + filters: CommandFilterList | None = None, + details: Boolean | None = None, + **kwargs, + ) -> ListCommandInvocationsResult: + raise NotImplementedError + + @handler("ListCommands") + def list_commands( + self, + context: RequestContext, + command_id: CommandId | None = None, + instance_id: InstanceId | None = None, + max_results: CommandMaxResults | None = None, + next_token: NextToken | None = None, + filters: CommandFilterList | None = None, + **kwargs, + ) -> ListCommandsResult: + raise NotImplementedError + + @handler("ListComplianceItems") + def list_compliance_items( + self, + context: RequestContext, + filters: ComplianceStringFilterList | None = None, + resource_ids: ComplianceResourceIdList | None = None, + resource_types: ComplianceResourceTypeList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListComplianceItemsResult: + raise NotImplementedError + + @handler("ListComplianceSummaries") + def list_compliance_summaries( + self, + context: RequestContext, + filters: ComplianceStringFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListComplianceSummariesResult: + raise NotImplementedError + + @handler("ListDocumentMetadataHistory") + def list_document_metadata_history( + self, + context: RequestContext, + name: DocumentName, + metadata: DocumentMetadataEnum, + document_version: DocumentVersion | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListDocumentMetadataHistoryResponse: + raise NotImplementedError + + @handler("ListDocumentVersions") + def list_document_versions( + self, + context: RequestContext, + name: DocumentARN, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDocumentVersionsResult: + raise NotImplementedError + + @handler("ListDocuments") + def list_documents( + self, + context: RequestContext, + document_filter_list: DocumentFilterList | None = None, + filters: DocumentKeyValuesFilterList | None = None, + max_results: MaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListDocumentsResult: + raise NotImplementedError + + @handler("ListInventoryEntries") + def list_inventory_entries( + self, + context: RequestContext, + instance_id: InstanceId, + type_name: InventoryItemTypeName, + filters: InventoryFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListInventoryEntriesResult: + raise NotImplementedError + + @handler("ListNodes") + def list_nodes( + self, + context: RequestContext, + sync_name: ResourceDataSyncName | None = None, + filters: NodeFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListNodesResult: + raise NotImplementedError + + @handler("ListNodesSummary") + def list_nodes_summary( + self, + context: RequestContext, + aggregators: NodeAggregatorList, + sync_name: ResourceDataSyncName | None = None, + filters: NodeFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListNodesSummaryResult: + raise NotImplementedError + + @handler("ListOpsItemEvents") + def list_ops_item_events( + self, + context: RequestContext, + filters: OpsItemEventFilters | None = None, + max_results: OpsItemEventMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> ListOpsItemEventsResponse: + raise NotImplementedError + + @handler("ListOpsItemRelatedItems") + def list_ops_item_related_items( + self, + context: RequestContext, + ops_item_id: OpsItemId | None = None, + filters: OpsItemRelatedItemsFilters | None = None, + max_results: OpsItemRelatedItemsMaxResults | None = None, + next_token: String | None = None, + **kwargs, + ) -> ListOpsItemRelatedItemsResponse: + raise NotImplementedError + + @handler("ListOpsMetadata") + def list_ops_metadata( + self, + context: RequestContext, + filters: OpsMetadataFilterList | None = None, + max_results: ListOpsMetadataMaxResults | None = None, + next_token: NextToken | None = None, + **kwargs, + ) -> ListOpsMetadataResult: + raise NotImplementedError + + @handler("ListResourceComplianceSummaries") + def list_resource_compliance_summaries( + self, + context: RequestContext, + filters: ComplianceStringFilterList | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListResourceComplianceSummariesResult: + raise NotImplementedError + + @handler("ListResourceDataSync") + def list_resource_data_sync( + self, + context: RequestContext, + sync_type: ResourceDataSyncType | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListResourceDataSyncResult: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, + context: RequestContext, + resource_type: ResourceTypeForTagging, + resource_id: ResourceId, + **kwargs, + ) -> ListTagsForResourceResult: + raise NotImplementedError + + @handler("ModifyDocumentPermission") + def modify_document_permission( + self, + context: RequestContext, + name: DocumentName, + permission_type: DocumentPermissionType, + account_ids_to_add: AccountIdList | None = None, + account_ids_to_remove: AccountIdList | None = None, + shared_document_version: SharedDocumentVersion | None = None, + **kwargs, + ) -> ModifyDocumentPermissionResponse: + raise NotImplementedError + + @handler("PutComplianceItems") + def put_compliance_items( + self, + context: RequestContext, + resource_id: ComplianceResourceId, + resource_type: ComplianceResourceType, + compliance_type: ComplianceTypeName, + execution_summary: ComplianceExecutionSummary, + items: ComplianceItemEntryList, + item_content_hash: ComplianceItemContentHash | None = None, + upload_type: ComplianceUploadType | None = None, + **kwargs, + ) -> PutComplianceItemsResult: + raise NotImplementedError + + @handler("PutInventory") + def put_inventory( + self, context: RequestContext, instance_id: InstanceId, items: InventoryItemList, **kwargs + ) -> PutInventoryResult: + raise NotImplementedError + + @handler("PutParameter", expand=False) + def put_parameter( + self, context: RequestContext, request: PutParameterRequest, **kwargs + ) -> PutParameterResult: + raise NotImplementedError + + @handler("PutResourcePolicy") + def put_resource_policy( + self, + context: RequestContext, + resource_arn: ResourceArnString, + policy: Policy, + policy_id: PolicyId | None = None, + policy_hash: PolicyHash | None = None, + **kwargs, + ) -> PutResourcePolicyResponse: + raise NotImplementedError + + @handler("RegisterDefaultPatchBaseline") + def register_default_patch_baseline( + self, context: RequestContext, baseline_id: BaselineId, **kwargs + ) -> RegisterDefaultPatchBaselineResult: + raise NotImplementedError + + @handler("RegisterPatchBaselineForPatchGroup") + def register_patch_baseline_for_patch_group( + self, context: RequestContext, baseline_id: BaselineId, patch_group: PatchGroup, **kwargs + ) -> RegisterPatchBaselineForPatchGroupResult: + raise NotImplementedError + + @handler("RegisterTargetWithMaintenanceWindow") + def register_target_with_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + resource_type: MaintenanceWindowResourceType, + targets: Targets, + owner_information: OwnerInformation | None = None, + name: MaintenanceWindowName | None = None, + description: MaintenanceWindowDescription | None = None, + client_token: ClientToken | None = None, + **kwargs, + ) -> RegisterTargetWithMaintenanceWindowResult: + raise NotImplementedError + + @handler("RegisterTaskWithMaintenanceWindow") + def register_task_with_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + task_arn: MaintenanceWindowTaskArn, + task_type: MaintenanceWindowTaskType, + targets: Targets | None = None, + service_role_arn: ServiceRole | None = None, + task_parameters: MaintenanceWindowTaskParameters | None = None, + task_invocation_parameters: MaintenanceWindowTaskInvocationParameters | None = None, + priority: MaintenanceWindowTaskPriority | None = None, + max_concurrency: MaxConcurrency | None = None, + max_errors: MaxErrors | None = None, + logging_info: LoggingInfo | None = None, + name: MaintenanceWindowName | None = None, + description: MaintenanceWindowDescription | None = None, + client_token: ClientToken | None = None, + cutoff_behavior: MaintenanceWindowTaskCutoffBehavior | None = None, + alarm_configuration: AlarmConfiguration | None = None, + **kwargs, + ) -> RegisterTaskWithMaintenanceWindowResult: + raise NotImplementedError + + @handler("RemoveTagsFromResource") + def remove_tags_from_resource( + self, + context: RequestContext, + resource_type: ResourceTypeForTagging, + resource_id: ResourceId, + tag_keys: KeyList, + **kwargs, + ) -> RemoveTagsFromResourceResult: + raise NotImplementedError + + @handler("ResetServiceSetting") + def reset_service_setting( + self, context: RequestContext, setting_id: ServiceSettingId, **kwargs + ) -> ResetServiceSettingResult: + raise NotImplementedError + + @handler("ResumeSession") + def resume_session( + self, context: RequestContext, session_id: SessionId, **kwargs + ) -> ResumeSessionResponse: + raise NotImplementedError + + @handler("SendAutomationSignal") + def send_automation_signal( + self, + context: RequestContext, + automation_execution_id: AutomationExecutionId, + signal_type: SignalType, + payload: AutomationParameterMap | None = None, + **kwargs, + ) -> SendAutomationSignalResult: + raise NotImplementedError + + @handler("SendCommand") + def send_command( + self, + context: RequestContext, + document_name: DocumentARN, + instance_ids: InstanceIdList | None = None, + targets: Targets | None = None, + document_version: DocumentVersion | None = None, + document_hash: DocumentHash | None = None, + document_hash_type: DocumentHashType | None = None, + timeout_seconds: TimeoutSeconds | None = None, + comment: Comment | None = None, + parameters: Parameters | None = None, + output_s3_region: S3Region | None = None, + output_s3_bucket_name: S3BucketName | None = None, + output_s3_key_prefix: S3KeyPrefix | None = None, + max_concurrency: MaxConcurrency | None = None, + max_errors: MaxErrors | None = None, + service_role_arn: ServiceRole | None = None, + notification_config: NotificationConfig | None = None, + cloud_watch_output_config: CloudWatchOutputConfig | None = None, + alarm_configuration: AlarmConfiguration | None = None, + **kwargs, + ) -> SendCommandResult: + raise NotImplementedError + + @handler("StartAccessRequest") + def start_access_request( + self, + context: RequestContext, + reason: String1to256, + targets: Targets, + tags: TagList | None = None, + **kwargs, + ) -> StartAccessRequestResponse: + raise NotImplementedError + + @handler("StartAssociationsOnce") + def start_associations_once( + self, context: RequestContext, association_ids: AssociationIdList, **kwargs + ) -> StartAssociationsOnceResult: + raise NotImplementedError + + @handler("StartAutomationExecution") + def start_automation_execution( + self, + context: RequestContext, + document_name: DocumentARN, + document_version: DocumentVersion | None = None, + parameters: AutomationParameterMap | None = None, + client_token: IdempotencyToken | None = None, + mode: ExecutionMode | None = None, + target_parameter_name: AutomationParameterKey | None = None, + targets: Targets | None = None, + target_maps: TargetMaps | None = None, + max_concurrency: MaxConcurrency | None = None, + max_errors: MaxErrors | None = None, + target_locations: TargetLocations | None = None, + tags: TagList | None = None, + alarm_configuration: AlarmConfiguration | None = None, + target_locations_url: TargetLocationsURL | None = None, + **kwargs, + ) -> StartAutomationExecutionResult: + raise NotImplementedError + + @handler("StartChangeRequestExecution") + def start_change_request_execution( + self, + context: RequestContext, + document_name: DocumentARN, + runbooks: Runbooks, + scheduled_time: DateTime | None = None, + document_version: DocumentVersion | None = None, + parameters: AutomationParameterMap | None = None, + change_request_name: ChangeRequestName | None = None, + client_token: IdempotencyToken | None = None, + auto_approve: Boolean | None = None, + tags: TagList | None = None, + scheduled_end_time: DateTime | None = None, + change_details: ChangeDetailsValue | None = None, + **kwargs, + ) -> StartChangeRequestExecutionResult: + raise NotImplementedError + + @handler("StartExecutionPreview") + def start_execution_preview( + self, + context: RequestContext, + document_name: DocumentName, + document_version: DocumentVersion | None = None, + execution_inputs: ExecutionInputs | None = None, + **kwargs, + ) -> StartExecutionPreviewResponse: + raise NotImplementedError + + @handler("StartSession") + def start_session( + self, + context: RequestContext, + target: SessionTarget, + document_name: DocumentARN | None = None, + reason: SessionReason | None = None, + parameters: SessionManagerParameters | None = None, + **kwargs, + ) -> StartSessionResponse: + raise NotImplementedError + + @handler("StopAutomationExecution", expand=False) + def stop_automation_execution( + self, context: RequestContext, request: StopAutomationExecutionRequest, **kwargs + ) -> StopAutomationExecutionResult: + raise NotImplementedError + + @handler("TerminateSession") + def terminate_session( + self, context: RequestContext, session_id: SessionId, **kwargs + ) -> TerminateSessionResponse: + raise NotImplementedError + + @handler("UnlabelParameterVersion") + def unlabel_parameter_version( + self, + context: RequestContext, + name: PSParameterName, + parameter_version: PSParameterVersion, + labels: ParameterLabelList, + **kwargs, + ) -> UnlabelParameterVersionResult: + raise NotImplementedError + + @handler("UpdateAssociation") + def update_association( + self, + context: RequestContext, + association_id: AssociationId, + parameters: Parameters | None = None, + document_version: DocumentVersion | None = None, + schedule_expression: ScheduleExpression | None = None, + output_location: InstanceAssociationOutputLocation | None = None, + name: DocumentARN | None = None, + targets: Targets | None = None, + association_name: AssociationName | None = None, + association_version: AssociationVersion | None = None, + automation_target_parameter_name: AutomationTargetParameterName | None = None, + max_errors: MaxErrors | None = None, + max_concurrency: MaxConcurrency | None = None, + compliance_severity: AssociationComplianceSeverity | None = None, + sync_compliance: AssociationSyncCompliance | None = None, + apply_only_at_cron_interval: ApplyOnlyAtCronInterval | None = None, + calendar_names: CalendarNameOrARNList | None = None, + target_locations: TargetLocations | None = None, + schedule_offset: ScheduleOffset | None = None, + duration: Duration | None = None, + target_maps: TargetMaps | None = None, + alarm_configuration: AlarmConfiguration | None = None, + **kwargs, + ) -> UpdateAssociationResult: + raise NotImplementedError + + @handler("UpdateAssociationStatus") + def update_association_status( + self, + context: RequestContext, + name: DocumentARN, + instance_id: InstanceId, + association_status: AssociationStatus, + **kwargs, + ) -> UpdateAssociationStatusResult: + raise NotImplementedError + + @handler("UpdateDocument") + def update_document( + self, + context: RequestContext, + content: DocumentContent, + name: DocumentName, + attachments: AttachmentsSourceList | None = None, + display_name: DocumentDisplayName | None = None, + version_name: DocumentVersionName | None = None, + document_version: DocumentVersion | None = None, + document_format: DocumentFormat | None = None, + target_type: TargetType | None = None, + **kwargs, + ) -> UpdateDocumentResult: + raise NotImplementedError + + @handler("UpdateDocumentDefaultVersion") + def update_document_default_version( + self, + context: RequestContext, + name: DocumentName, + document_version: DocumentVersionNumber, + **kwargs, + ) -> UpdateDocumentDefaultVersionResult: + raise NotImplementedError + + @handler("UpdateDocumentMetadata") + def update_document_metadata( + self, + context: RequestContext, + name: DocumentName, + document_reviews: DocumentReviews, + document_version: DocumentVersion | None = None, + **kwargs, + ) -> UpdateDocumentMetadataResponse: + raise NotImplementedError + + @handler("UpdateMaintenanceWindow") + def update_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + name: MaintenanceWindowName | None = None, + description: MaintenanceWindowDescription | None = None, + start_date: MaintenanceWindowStringDateTime | None = None, + end_date: MaintenanceWindowStringDateTime | None = None, + schedule: MaintenanceWindowSchedule | None = None, + schedule_timezone: MaintenanceWindowTimezone | None = None, + schedule_offset: MaintenanceWindowOffset | None = None, + duration: MaintenanceWindowDurationHours | None = None, + cutoff: MaintenanceWindowCutoff | None = None, + allow_unassociated_targets: MaintenanceWindowAllowUnassociatedTargets | None = None, + enabled: MaintenanceWindowEnabled | None = None, + replace: Boolean | None = None, + **kwargs, + ) -> UpdateMaintenanceWindowResult: + raise NotImplementedError + + @handler("UpdateMaintenanceWindowTarget") + def update_maintenance_window_target( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_target_id: MaintenanceWindowTargetId, + targets: Targets | None = None, + owner_information: OwnerInformation | None = None, + name: MaintenanceWindowName | None = None, + description: MaintenanceWindowDescription | None = None, + replace: Boolean | None = None, + **kwargs, + ) -> UpdateMaintenanceWindowTargetResult: + raise NotImplementedError + + @handler("UpdateMaintenanceWindowTask") + def update_maintenance_window_task( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_task_id: MaintenanceWindowTaskId, + targets: Targets | None = None, + task_arn: MaintenanceWindowTaskArn | None = None, + service_role_arn: ServiceRole | None = None, + task_parameters: MaintenanceWindowTaskParameters | None = None, + task_invocation_parameters: MaintenanceWindowTaskInvocationParameters | None = None, + priority: MaintenanceWindowTaskPriority | None = None, + max_concurrency: MaxConcurrency | None = None, + max_errors: MaxErrors | None = None, + logging_info: LoggingInfo | None = None, + name: MaintenanceWindowName | None = None, + description: MaintenanceWindowDescription | None = None, + replace: Boolean | None = None, + cutoff_behavior: MaintenanceWindowTaskCutoffBehavior | None = None, + alarm_configuration: AlarmConfiguration | None = None, + **kwargs, + ) -> UpdateMaintenanceWindowTaskResult: + raise NotImplementedError + + @handler("UpdateManagedInstanceRole") + def update_managed_instance_role( + self, context: RequestContext, instance_id: ManagedInstanceId, iam_role: IamRole, **kwargs + ) -> UpdateManagedInstanceRoleResult: + raise NotImplementedError + + @handler("UpdateOpsItem") + def update_ops_item( + self, + context: RequestContext, + ops_item_id: OpsItemId, + description: OpsItemDescription | None = None, + operational_data: OpsItemOperationalData | None = None, + operational_data_to_delete: OpsItemOpsDataKeysList | None = None, + notifications: OpsItemNotifications | None = None, + priority: OpsItemPriority | None = None, + related_ops_items: RelatedOpsItems | None = None, + status: OpsItemStatus | None = None, + title: OpsItemTitle | None = None, + category: OpsItemCategory | None = None, + severity: OpsItemSeverity | None = None, + actual_start_time: DateTime | None = None, + actual_end_time: DateTime | None = None, + planned_start_time: DateTime | None = None, + planned_end_time: DateTime | None = None, + ops_item_arn: OpsItemArn | None = None, + **kwargs, + ) -> UpdateOpsItemResponse: + raise NotImplementedError + + @handler("UpdateOpsMetadata") + def update_ops_metadata( + self, + context: RequestContext, + ops_metadata_arn: OpsMetadataArn, + metadata_to_update: MetadataMap | None = None, + keys_to_delete: MetadataKeysToDeleteList | None = None, + **kwargs, + ) -> UpdateOpsMetadataResult: + raise NotImplementedError + + @handler("UpdatePatchBaseline") + def update_patch_baseline( + self, + context: RequestContext, + baseline_id: BaselineId, + name: BaselineName | None = None, + global_filters: PatchFilterGroup | None = None, + approval_rules: PatchRuleGroup | None = None, + approved_patches: PatchIdList | None = None, + approved_patches_compliance_level: PatchComplianceLevel | None = None, + approved_patches_enable_non_security: Boolean | None = None, + rejected_patches: PatchIdList | None = None, + rejected_patches_action: PatchAction | None = None, + description: BaselineDescription | None = None, + sources: PatchSourceList | None = None, + available_security_updates_compliance_status: PatchComplianceStatus | None = None, + replace: Boolean | None = None, + **kwargs, + ) -> UpdatePatchBaselineResult: + raise NotImplementedError + + @handler("UpdateResourceDataSync") + def update_resource_data_sync( + self, + context: RequestContext, + sync_name: ResourceDataSyncName, + sync_type: ResourceDataSyncType, + sync_source: ResourceDataSyncSource, + **kwargs, + ) -> UpdateResourceDataSyncResult: + raise NotImplementedError + + @handler("UpdateServiceSetting") + def update_service_setting( + self, + context: RequestContext, + setting_id: ServiceSettingId, + setting_value: ServiceSettingValue, + **kwargs, + ) -> UpdateServiceSettingResult: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/stepfunctions/__init__.py b/localstack-core/localstack/aws/api/stepfunctions/__init__.py new file mode 100644 index 0000000000000..c1dca160d5ffe --- /dev/null +++ b/localstack-core/localstack/aws/api/stepfunctions/__init__.py @@ -0,0 +1,1733 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AliasDescription = str +Arn = str +CharacterRestrictedName = str +ClientToken = str +ConnectorParameters = str +Definition = str +Enabled = bool +ErrorMessage = str +EvaluationFailureLocation = str +HTTPBody = str +HTTPHeaders = str +HTTPMethod = str +HTTPProtocol = str +HTTPStatusCode = str +HTTPStatusMessage = str +Identity = str +IncludeExecutionData = bool +IncludeExecutionDataGetExecutionHistory = bool +KmsDataKeyReusePeriodSeconds = int +KmsKeyId = str +ListExecutionsPageToken = str +LongArn = str +MapRunLabel = str +MaxConcurrency = int +Name = str +PageSize = int +PageToken = str +Publish = bool +RedriveCount = int +RevealSecrets = bool +ReverseOrder = bool +RevisionId = str +SensitiveCause = str +SensitiveData = str +SensitiveDataJobInput = str +SensitiveError = str +StateName = str +TagKey = str +TagValue = str +TaskToken = str +ToleratedFailurePercentage = float +TraceHeader = str +URL = str +UnsignedInteger = int +ValidateStateMachineDefinitionCode = str +ValidateStateMachineDefinitionLocation = str +ValidateStateMachineDefinitionMaxResult = int +ValidateStateMachineDefinitionMessage = str +ValidateStateMachineDefinitionTruncated = bool +VariableName = str +VariableValue = str +VersionDescription = str +VersionWeight = int +includedDetails = bool +truncated = bool + + +class EncryptionType(StrEnum): + AWS_OWNED_KEY = "AWS_OWNED_KEY" + CUSTOMER_MANAGED_KMS_KEY = "CUSTOMER_MANAGED_KMS_KEY" + + +class ExecutionRedriveFilter(StrEnum): + REDRIVEN = "REDRIVEN" + NOT_REDRIVEN = "NOT_REDRIVEN" + + +class ExecutionRedriveStatus(StrEnum): + REDRIVABLE = "REDRIVABLE" + NOT_REDRIVABLE = "NOT_REDRIVABLE" + REDRIVABLE_BY_MAP_RUN = "REDRIVABLE_BY_MAP_RUN" + + +class ExecutionStatus(StrEnum): + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + TIMED_OUT = "TIMED_OUT" + ABORTED = "ABORTED" + PENDING_REDRIVE = "PENDING_REDRIVE" + + +class HistoryEventType(StrEnum): + ActivityFailed = "ActivityFailed" + ActivityScheduled = "ActivityScheduled" + ActivityScheduleFailed = "ActivityScheduleFailed" + ActivityStarted = "ActivityStarted" + ActivitySucceeded = "ActivitySucceeded" + ActivityTimedOut = "ActivityTimedOut" + ChoiceStateEntered = "ChoiceStateEntered" + ChoiceStateExited = "ChoiceStateExited" + ExecutionAborted = "ExecutionAborted" + ExecutionFailed = "ExecutionFailed" + ExecutionStarted = "ExecutionStarted" + ExecutionSucceeded = "ExecutionSucceeded" + ExecutionTimedOut = "ExecutionTimedOut" + FailStateEntered = "FailStateEntered" + LambdaFunctionFailed = "LambdaFunctionFailed" + LambdaFunctionScheduled = "LambdaFunctionScheduled" + LambdaFunctionScheduleFailed = "LambdaFunctionScheduleFailed" + LambdaFunctionStarted = "LambdaFunctionStarted" + LambdaFunctionStartFailed = "LambdaFunctionStartFailed" + LambdaFunctionSucceeded = "LambdaFunctionSucceeded" + LambdaFunctionTimedOut = "LambdaFunctionTimedOut" + MapIterationAborted = "MapIterationAborted" + MapIterationFailed = "MapIterationFailed" + MapIterationStarted = "MapIterationStarted" + MapIterationSucceeded = "MapIterationSucceeded" + MapStateAborted = "MapStateAborted" + MapStateEntered = "MapStateEntered" + MapStateExited = "MapStateExited" + MapStateFailed = "MapStateFailed" + MapStateStarted = "MapStateStarted" + MapStateSucceeded = "MapStateSucceeded" + ParallelStateAborted = "ParallelStateAborted" + ParallelStateEntered = "ParallelStateEntered" + ParallelStateExited = "ParallelStateExited" + ParallelStateFailed = "ParallelStateFailed" + ParallelStateStarted = "ParallelStateStarted" + ParallelStateSucceeded = "ParallelStateSucceeded" + PassStateEntered = "PassStateEntered" + PassStateExited = "PassStateExited" + SucceedStateEntered = "SucceedStateEntered" + SucceedStateExited = "SucceedStateExited" + TaskFailed = "TaskFailed" + TaskScheduled = "TaskScheduled" + TaskStarted = "TaskStarted" + TaskStartFailed = "TaskStartFailed" + TaskStateAborted = "TaskStateAborted" + TaskStateEntered = "TaskStateEntered" + TaskStateExited = "TaskStateExited" + TaskSubmitFailed = "TaskSubmitFailed" + TaskSubmitted = "TaskSubmitted" + TaskSucceeded = "TaskSucceeded" + TaskTimedOut = "TaskTimedOut" + WaitStateAborted = "WaitStateAborted" + WaitStateEntered = "WaitStateEntered" + WaitStateExited = "WaitStateExited" + MapRunAborted = "MapRunAborted" + MapRunFailed = "MapRunFailed" + MapRunStarted = "MapRunStarted" + MapRunSucceeded = "MapRunSucceeded" + ExecutionRedriven = "ExecutionRedriven" + MapRunRedriven = "MapRunRedriven" + EvaluationFailed = "EvaluationFailed" + + +class IncludedData(StrEnum): + ALL_DATA = "ALL_DATA" + METADATA_ONLY = "METADATA_ONLY" + + +class InspectionLevel(StrEnum): + INFO = "INFO" + DEBUG = "DEBUG" + TRACE = "TRACE" + + +class KmsKeyState(StrEnum): + DISABLED = "DISABLED" + PENDING_DELETION = "PENDING_DELETION" + PENDING_IMPORT = "PENDING_IMPORT" + UNAVAILABLE = "UNAVAILABLE" + CREATING = "CREATING" + + +class LogLevel(StrEnum): + ALL = "ALL" + ERROR = "ERROR" + FATAL = "FATAL" + OFF = "OFF" + + +class MapRunStatus(StrEnum): + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + ABORTED = "ABORTED" + + +class StateMachineStatus(StrEnum): + ACTIVE = "ACTIVE" + DELETING = "DELETING" + + +class StateMachineType(StrEnum): + STANDARD = "STANDARD" + EXPRESS = "EXPRESS" + + +class SyncExecutionStatus(StrEnum): + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + TIMED_OUT = "TIMED_OUT" + + +class TestExecutionStatus(StrEnum): + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + RETRIABLE = "RETRIABLE" + CAUGHT_ERROR = "CAUGHT_ERROR" + + +class ValidateStateMachineDefinitionResultCode(StrEnum): + OK = "OK" + FAIL = "FAIL" + + +class ValidateStateMachineDefinitionSeverity(StrEnum): + ERROR = "ERROR" + WARNING = "WARNING" + + +class ValidationExceptionReason(StrEnum): + API_DOES_NOT_SUPPORT_LABELED_ARNS = "API_DOES_NOT_SUPPORT_LABELED_ARNS" + MISSING_REQUIRED_PARAMETER = "MISSING_REQUIRED_PARAMETER" + CANNOT_UPDATE_COMPLETED_MAP_RUN = "CANNOT_UPDATE_COMPLETED_MAP_RUN" + INVALID_ROUTING_CONFIGURATION = "INVALID_ROUTING_CONFIGURATION" + + +class ActivityAlreadyExists(ServiceException): + code: str = "ActivityAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class ActivityDoesNotExist(ServiceException): + code: str = "ActivityDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class ActivityLimitExceeded(ServiceException): + code: str = "ActivityLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ActivityWorkerLimitExceeded(ServiceException): + code: str = "ActivityWorkerLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class ExecutionAlreadyExists(ServiceException): + code: str = "ExecutionAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class ExecutionDoesNotExist(ServiceException): + code: str = "ExecutionDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class ExecutionLimitExceeded(ServiceException): + code: str = "ExecutionLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class ExecutionNotRedrivable(ServiceException): + code: str = "ExecutionNotRedrivable" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidArn(ServiceException): + code: str = "InvalidArn" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidDefinition(ServiceException): + code: str = "InvalidDefinition" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidEncryptionConfiguration(ServiceException): + code: str = "InvalidEncryptionConfiguration" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidExecutionInput(ServiceException): + code: str = "InvalidExecutionInput" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidLoggingConfiguration(ServiceException): + code: str = "InvalidLoggingConfiguration" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidName(ServiceException): + code: str = "InvalidName" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidOutput(ServiceException): + code: str = "InvalidOutput" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidToken(ServiceException): + code: str = "InvalidToken" + sender_fault: bool = False + status_code: int = 400 + + +class InvalidTracingConfiguration(ServiceException): + code: str = "InvalidTracingConfiguration" + sender_fault: bool = False + status_code: int = 400 + + +class KmsAccessDeniedException(ServiceException): + code: str = "KmsAccessDeniedException" + sender_fault: bool = False + status_code: int = 400 + + +class KmsInvalidStateException(ServiceException): + code: str = "KmsInvalidStateException" + sender_fault: bool = False + status_code: int = 400 + kmsKeyState: Optional[KmsKeyState] + + +class KmsThrottlingException(ServiceException): + code: str = "KmsThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +class MissingRequiredParameter(ServiceException): + code: str = "MissingRequiredParameter" + sender_fault: bool = False + status_code: int = 400 + + +class ResourceNotFound(ServiceException): + code: str = "ResourceNotFound" + sender_fault: bool = False + status_code: int = 400 + resourceName: Optional[Arn] + + +class ServiceQuotaExceededException(ServiceException): + code: str = "ServiceQuotaExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class StateMachineAlreadyExists(ServiceException): + code: str = "StateMachineAlreadyExists" + sender_fault: bool = False + status_code: int = 400 + + +class StateMachineDeleting(ServiceException): + code: str = "StateMachineDeleting" + sender_fault: bool = False + status_code: int = 400 + + +class StateMachineDoesNotExist(ServiceException): + code: str = "StateMachineDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class StateMachineLimitExceeded(ServiceException): + code: str = "StateMachineLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class StateMachineTypeNotSupported(ServiceException): + code: str = "StateMachineTypeNotSupported" + sender_fault: bool = False + status_code: int = 400 + + +class TaskDoesNotExist(ServiceException): + code: str = "TaskDoesNotExist" + sender_fault: bool = False + status_code: int = 400 + + +class TaskTimedOut(ServiceException): + code: str = "TaskTimedOut" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTags(ServiceException): + code: str = "TooManyTags" + sender_fault: bool = False + status_code: int = 400 + resourceName: Optional[Arn] + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = False + status_code: int = 400 + reason: Optional[ValidationExceptionReason] + + +class ActivityFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +Timestamp = datetime + + +class ActivityListItem(TypedDict, total=False): + activityArn: Arn + name: Name + creationDate: Timestamp + + +ActivityList = List[ActivityListItem] + + +class ActivityScheduleFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +TimeoutInSeconds = int + + +class HistoryEventExecutionDataDetails(TypedDict, total=False): + truncated: Optional[truncated] + + +class ActivityScheduledEventDetails(TypedDict, total=False): + resource: Arn + input: Optional[SensitiveData] + inputDetails: Optional[HistoryEventExecutionDataDetails] + timeoutInSeconds: Optional[TimeoutInSeconds] + heartbeatInSeconds: Optional[TimeoutInSeconds] + + +class ActivityStartedEventDetails(TypedDict, total=False): + workerName: Optional[Identity] + + +class ActivitySucceededEventDetails(TypedDict, total=False): + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + + +class ActivityTimedOutEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +AssignedVariables = Dict[VariableName, VariableValue] + + +class AssignedVariablesDetails(TypedDict, total=False): + truncated: Optional[truncated] + + +BilledDuration = int +BilledMemoryUsed = int + + +class BillingDetails(TypedDict, total=False): + billedMemoryUsedInMB: Optional[BilledMemoryUsed] + billedDurationInMilliseconds: Optional[BilledDuration] + + +class CloudWatchEventsExecutionDataDetails(TypedDict, total=False): + included: Optional[includedDetails] + + +class CloudWatchLogsLogGroup(TypedDict, total=False): + logGroupArn: Optional[Arn] + + +EncryptionConfiguration = TypedDict( + "EncryptionConfiguration", + { + "kmsKeyId": Optional[KmsKeyId], + "kmsDataKeyReusePeriodSeconds": Optional[KmsDataKeyReusePeriodSeconds], + "type": EncryptionType, + }, + total=False, +) + + +class Tag(TypedDict, total=False): + key: Optional[TagKey] + value: Optional[TagValue] + + +TagList = List[Tag] + + +class CreateActivityInput(ServiceRequest): + name: Name + tags: Optional[TagList] + encryptionConfiguration: Optional[EncryptionConfiguration] + + +class CreateActivityOutput(TypedDict, total=False): + activityArn: Arn + creationDate: Timestamp + + +class RoutingConfigurationListItem(TypedDict, total=False): + stateMachineVersionArn: Arn + weight: VersionWeight + + +RoutingConfigurationList = List[RoutingConfigurationListItem] + + +class CreateStateMachineAliasInput(ServiceRequest): + description: Optional[AliasDescription] + name: CharacterRestrictedName + routingConfiguration: RoutingConfigurationList + + +class CreateStateMachineAliasOutput(TypedDict, total=False): + stateMachineAliasArn: Arn + creationDate: Timestamp + + +class TracingConfiguration(TypedDict, total=False): + enabled: Optional[Enabled] + + +class LogDestination(TypedDict, total=False): + cloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup] + + +LogDestinationList = List[LogDestination] + + +class LoggingConfiguration(TypedDict, total=False): + level: Optional[LogLevel] + includeExecutionData: Optional[IncludeExecutionData] + destinations: Optional[LogDestinationList] + + +CreateStateMachineInput = TypedDict( + "CreateStateMachineInput", + { + "name": Name, + "definition": Definition, + "roleArn": Arn, + "type": Optional[StateMachineType], + "loggingConfiguration": Optional[LoggingConfiguration], + "tags": Optional[TagList], + "tracingConfiguration": Optional[TracingConfiguration], + "publish": Optional[Publish], + "versionDescription": Optional[VersionDescription], + "encryptionConfiguration": Optional[EncryptionConfiguration], + }, + total=False, +) + + +class CreateStateMachineOutput(TypedDict, total=False): + stateMachineArn: Arn + creationDate: Timestamp + stateMachineVersionArn: Optional[Arn] + + +class DeleteActivityInput(ServiceRequest): + activityArn: Arn + + +class DeleteActivityOutput(TypedDict, total=False): + pass + + +class DeleteStateMachineAliasInput(ServiceRequest): + stateMachineAliasArn: Arn + + +class DeleteStateMachineAliasOutput(TypedDict, total=False): + pass + + +class DeleteStateMachineInput(ServiceRequest): + stateMachineArn: Arn + + +class DeleteStateMachineOutput(TypedDict, total=False): + pass + + +class DeleteStateMachineVersionInput(ServiceRequest): + stateMachineVersionArn: LongArn + + +class DeleteStateMachineVersionOutput(TypedDict, total=False): + pass + + +class DescribeActivityInput(ServiceRequest): + activityArn: Arn + + +class DescribeActivityOutput(TypedDict, total=False): + activityArn: Arn + name: Name + creationDate: Timestamp + encryptionConfiguration: Optional[EncryptionConfiguration] + + +class DescribeExecutionInput(ServiceRequest): + executionArn: Arn + includedData: Optional[IncludedData] + + +class DescribeExecutionOutput(TypedDict, total=False): + executionArn: Arn + stateMachineArn: Arn + name: Optional[Name] + status: ExecutionStatus + startDate: Timestamp + stopDate: Optional[Timestamp] + input: Optional[SensitiveData] + inputDetails: Optional[CloudWatchEventsExecutionDataDetails] + output: Optional[SensitiveData] + outputDetails: Optional[CloudWatchEventsExecutionDataDetails] + traceHeader: Optional[TraceHeader] + mapRunArn: Optional[LongArn] + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + stateMachineVersionArn: Optional[Arn] + stateMachineAliasArn: Optional[Arn] + redriveCount: Optional[RedriveCount] + redriveDate: Optional[Timestamp] + redriveStatus: Optional[ExecutionRedriveStatus] + redriveStatusReason: Optional[SensitiveData] + + +class DescribeMapRunInput(ServiceRequest): + mapRunArn: LongArn + + +LongObject = int +UnsignedLong = int + + +class MapRunExecutionCounts(TypedDict, total=False): + pending: UnsignedLong + running: UnsignedLong + succeeded: UnsignedLong + failed: UnsignedLong + timedOut: UnsignedLong + aborted: UnsignedLong + total: UnsignedLong + resultsWritten: UnsignedLong + failuresNotRedrivable: Optional[LongObject] + pendingRedrive: Optional[LongObject] + + +class MapRunItemCounts(TypedDict, total=False): + pending: UnsignedLong + running: UnsignedLong + succeeded: UnsignedLong + failed: UnsignedLong + timedOut: UnsignedLong + aborted: UnsignedLong + total: UnsignedLong + resultsWritten: UnsignedLong + failuresNotRedrivable: Optional[LongObject] + pendingRedrive: Optional[LongObject] + + +ToleratedFailureCount = int + + +class DescribeMapRunOutput(TypedDict, total=False): + mapRunArn: LongArn + executionArn: Arn + status: MapRunStatus + startDate: Timestamp + stopDate: Optional[Timestamp] + maxConcurrency: MaxConcurrency + toleratedFailurePercentage: ToleratedFailurePercentage + toleratedFailureCount: ToleratedFailureCount + itemCounts: MapRunItemCounts + executionCounts: MapRunExecutionCounts + redriveCount: Optional[RedriveCount] + redriveDate: Optional[Timestamp] + + +class DescribeStateMachineAliasInput(ServiceRequest): + stateMachineAliasArn: Arn + + +class DescribeStateMachineAliasOutput(TypedDict, total=False): + stateMachineAliasArn: Optional[Arn] + name: Optional[Name] + description: Optional[AliasDescription] + routingConfiguration: Optional[RoutingConfigurationList] + creationDate: Optional[Timestamp] + updateDate: Optional[Timestamp] + + +class DescribeStateMachineForExecutionInput(ServiceRequest): + executionArn: Arn + includedData: Optional[IncludedData] + + +VariableNameList = List[VariableName] +VariableReferences = Dict[StateName, VariableNameList] + + +class DescribeStateMachineForExecutionOutput(TypedDict, total=False): + stateMachineArn: Arn + name: Name + definition: Definition + roleArn: Arn + updateDate: Timestamp + loggingConfiguration: Optional[LoggingConfiguration] + tracingConfiguration: Optional[TracingConfiguration] + mapRunArn: Optional[LongArn] + label: Optional[MapRunLabel] + revisionId: Optional[RevisionId] + encryptionConfiguration: Optional[EncryptionConfiguration] + variableReferences: Optional[VariableReferences] + + +class DescribeStateMachineInput(ServiceRequest): + stateMachineArn: Arn + includedData: Optional[IncludedData] + + +DescribeStateMachineOutput = TypedDict( + "DescribeStateMachineOutput", + { + "stateMachineArn": Arn, + "name": Name, + "status": Optional[StateMachineStatus], + "definition": Definition, + "roleArn": Arn, + "type": StateMachineType, + "creationDate": Timestamp, + "loggingConfiguration": Optional[LoggingConfiguration], + "tracingConfiguration": Optional[TracingConfiguration], + "label": Optional[MapRunLabel], + "revisionId": Optional[RevisionId], + "description": Optional[VersionDescription], + "encryptionConfiguration": Optional[EncryptionConfiguration], + "variableReferences": Optional[VariableReferences], + }, + total=False, +) + + +class EvaluationFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + location: Optional[EvaluationFailureLocation] + state: StateName + + +EventId = int + + +class ExecutionAbortedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class ExecutionFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class ExecutionListItem(TypedDict, total=False): + executionArn: Arn + stateMachineArn: Arn + name: Name + status: ExecutionStatus + startDate: Timestamp + stopDate: Optional[Timestamp] + mapRunArn: Optional[LongArn] + itemCount: Optional[UnsignedInteger] + stateMachineVersionArn: Optional[Arn] + stateMachineAliasArn: Optional[Arn] + redriveCount: Optional[RedriveCount] + redriveDate: Optional[Timestamp] + + +ExecutionList = List[ExecutionListItem] + + +class ExecutionRedrivenEventDetails(TypedDict, total=False): + redriveCount: Optional[RedriveCount] + + +class ExecutionStartedEventDetails(TypedDict, total=False): + input: Optional[SensitiveData] + inputDetails: Optional[HistoryEventExecutionDataDetails] + roleArn: Optional[Arn] + stateMachineAliasArn: Optional[Arn] + stateMachineVersionArn: Optional[Arn] + + +class ExecutionSucceededEventDetails(TypedDict, total=False): + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + + +class ExecutionTimedOutEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class GetActivityTaskInput(ServiceRequest): + activityArn: Arn + workerName: Optional[Name] + + +class GetActivityTaskOutput(TypedDict, total=False): + taskToken: Optional[TaskToken] + input: Optional[SensitiveDataJobInput] + + +class GetExecutionHistoryInput(ServiceRequest): + executionArn: Arn + maxResults: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + nextToken: Optional[PageToken] + includeExecutionData: Optional[IncludeExecutionDataGetExecutionHistory] + + +class MapRunRedrivenEventDetails(TypedDict, total=False): + mapRunArn: Optional[LongArn] + redriveCount: Optional[RedriveCount] + + +class MapRunFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class MapRunStartedEventDetails(TypedDict, total=False): + mapRunArn: Optional[LongArn] + + +class StateExitedEventDetails(TypedDict, total=False): + name: Name + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + assignedVariables: Optional[AssignedVariables] + assignedVariablesDetails: Optional[AssignedVariablesDetails] + + +class StateEnteredEventDetails(TypedDict, total=False): + name: Name + input: Optional[SensitiveData] + inputDetails: Optional[HistoryEventExecutionDataDetails] + + +class LambdaFunctionTimedOutEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class LambdaFunctionSucceededEventDetails(TypedDict, total=False): + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + + +class LambdaFunctionStartFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class TaskCredentials(TypedDict, total=False): + roleArn: Optional[LongArn] + + +class LambdaFunctionScheduledEventDetails(TypedDict, total=False): + resource: Arn + input: Optional[SensitiveData] + inputDetails: Optional[HistoryEventExecutionDataDetails] + timeoutInSeconds: Optional[TimeoutInSeconds] + taskCredentials: Optional[TaskCredentials] + + +class LambdaFunctionScheduleFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class LambdaFunctionFailedEventDetails(TypedDict, total=False): + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class MapIterationEventDetails(TypedDict, total=False): + name: Optional[Name] + index: Optional[UnsignedInteger] + + +class MapStateStartedEventDetails(TypedDict, total=False): + length: Optional[UnsignedInteger] + + +class TaskTimedOutEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class TaskSucceededEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + + +class TaskSubmittedEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + output: Optional[SensitiveData] + outputDetails: Optional[HistoryEventExecutionDataDetails] + + +class TaskSubmitFailedEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class TaskStartedEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + + +class TaskStartFailedEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class TaskScheduledEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + region: Name + parameters: ConnectorParameters + timeoutInSeconds: Optional[TimeoutInSeconds] + heartbeatInSeconds: Optional[TimeoutInSeconds] + taskCredentials: Optional[TaskCredentials] + + +class TaskFailedEventDetails(TypedDict, total=False): + resourceType: Name + resource: Name + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +HistoryEvent = TypedDict( + "HistoryEvent", + { + "timestamp": Timestamp, + "type": HistoryEventType, + "id": EventId, + "previousEventId": Optional[EventId], + "activityFailedEventDetails": Optional[ActivityFailedEventDetails], + "activityScheduleFailedEventDetails": Optional[ActivityScheduleFailedEventDetails], + "activityScheduledEventDetails": Optional[ActivityScheduledEventDetails], + "activityStartedEventDetails": Optional[ActivityStartedEventDetails], + "activitySucceededEventDetails": Optional[ActivitySucceededEventDetails], + "activityTimedOutEventDetails": Optional[ActivityTimedOutEventDetails], + "taskFailedEventDetails": Optional[TaskFailedEventDetails], + "taskScheduledEventDetails": Optional[TaskScheduledEventDetails], + "taskStartFailedEventDetails": Optional[TaskStartFailedEventDetails], + "taskStartedEventDetails": Optional[TaskStartedEventDetails], + "taskSubmitFailedEventDetails": Optional[TaskSubmitFailedEventDetails], + "taskSubmittedEventDetails": Optional[TaskSubmittedEventDetails], + "taskSucceededEventDetails": Optional[TaskSucceededEventDetails], + "taskTimedOutEventDetails": Optional[TaskTimedOutEventDetails], + "executionFailedEventDetails": Optional[ExecutionFailedEventDetails], + "executionStartedEventDetails": Optional[ExecutionStartedEventDetails], + "executionSucceededEventDetails": Optional[ExecutionSucceededEventDetails], + "executionAbortedEventDetails": Optional[ExecutionAbortedEventDetails], + "executionTimedOutEventDetails": Optional[ExecutionTimedOutEventDetails], + "executionRedrivenEventDetails": Optional[ExecutionRedrivenEventDetails], + "mapStateStartedEventDetails": Optional[MapStateStartedEventDetails], + "mapIterationStartedEventDetails": Optional[MapIterationEventDetails], + "mapIterationSucceededEventDetails": Optional[MapIterationEventDetails], + "mapIterationFailedEventDetails": Optional[MapIterationEventDetails], + "mapIterationAbortedEventDetails": Optional[MapIterationEventDetails], + "lambdaFunctionFailedEventDetails": Optional[LambdaFunctionFailedEventDetails], + "lambdaFunctionScheduleFailedEventDetails": Optional[ + LambdaFunctionScheduleFailedEventDetails + ], + "lambdaFunctionScheduledEventDetails": Optional[LambdaFunctionScheduledEventDetails], + "lambdaFunctionStartFailedEventDetails": Optional[LambdaFunctionStartFailedEventDetails], + "lambdaFunctionSucceededEventDetails": Optional[LambdaFunctionSucceededEventDetails], + "lambdaFunctionTimedOutEventDetails": Optional[LambdaFunctionTimedOutEventDetails], + "stateEnteredEventDetails": Optional[StateEnteredEventDetails], + "stateExitedEventDetails": Optional[StateExitedEventDetails], + "mapRunStartedEventDetails": Optional[MapRunStartedEventDetails], + "mapRunFailedEventDetails": Optional[MapRunFailedEventDetails], + "mapRunRedrivenEventDetails": Optional[MapRunRedrivenEventDetails], + "evaluationFailedEventDetails": Optional[EvaluationFailedEventDetails], + }, + total=False, +) +HistoryEventList = List[HistoryEvent] + + +class GetExecutionHistoryOutput(TypedDict, total=False): + events: HistoryEventList + nextToken: Optional[PageToken] + + +class InspectionDataResponse(TypedDict, total=False): + protocol: Optional[HTTPProtocol] + statusCode: Optional[HTTPStatusCode] + statusMessage: Optional[HTTPStatusMessage] + headers: Optional[HTTPHeaders] + body: Optional[HTTPBody] + + +class InspectionDataRequest(TypedDict, total=False): + protocol: Optional[HTTPProtocol] + method: Optional[HTTPMethod] + url: Optional[URL] + headers: Optional[HTTPHeaders] + body: Optional[HTTPBody] + + +class InspectionData(TypedDict, total=False): + input: Optional[SensitiveData] + afterArguments: Optional[SensitiveData] + afterInputPath: Optional[SensitiveData] + afterParameters: Optional[SensitiveData] + result: Optional[SensitiveData] + afterResultSelector: Optional[SensitiveData] + afterResultPath: Optional[SensitiveData] + request: Optional[InspectionDataRequest] + response: Optional[InspectionDataResponse] + variables: Optional[SensitiveData] + + +class ListActivitiesInput(ServiceRequest): + maxResults: Optional[PageSize] + nextToken: Optional[PageToken] + + +class ListActivitiesOutput(TypedDict, total=False): + activities: ActivityList + nextToken: Optional[PageToken] + + +class ListExecutionsInput(ServiceRequest): + stateMachineArn: Optional[Arn] + statusFilter: Optional[ExecutionStatus] + maxResults: Optional[PageSize] + nextToken: Optional[ListExecutionsPageToken] + mapRunArn: Optional[LongArn] + redriveFilter: Optional[ExecutionRedriveFilter] + + +class ListExecutionsOutput(TypedDict, total=False): + executions: ExecutionList + nextToken: Optional[ListExecutionsPageToken] + + +class ListMapRunsInput(ServiceRequest): + executionArn: Arn + maxResults: Optional[PageSize] + nextToken: Optional[PageToken] + + +class MapRunListItem(TypedDict, total=False): + executionArn: Arn + mapRunArn: LongArn + stateMachineArn: Arn + startDate: Timestamp + stopDate: Optional[Timestamp] + + +MapRunList = List[MapRunListItem] + + +class ListMapRunsOutput(TypedDict, total=False): + mapRuns: MapRunList + nextToken: Optional[PageToken] + + +class ListStateMachineAliasesInput(ServiceRequest): + stateMachineArn: Arn + nextToken: Optional[PageToken] + maxResults: Optional[PageSize] + + +class StateMachineAliasListItem(TypedDict, total=False): + stateMachineAliasArn: LongArn + creationDate: Timestamp + + +StateMachineAliasList = List[StateMachineAliasListItem] + + +class ListStateMachineAliasesOutput(TypedDict, total=False): + stateMachineAliases: StateMachineAliasList + nextToken: Optional[PageToken] + + +class ListStateMachineVersionsInput(ServiceRequest): + stateMachineArn: Arn + nextToken: Optional[PageToken] + maxResults: Optional[PageSize] + + +class StateMachineVersionListItem(TypedDict, total=False): + stateMachineVersionArn: LongArn + creationDate: Timestamp + + +StateMachineVersionList = List[StateMachineVersionListItem] + + +class ListStateMachineVersionsOutput(TypedDict, total=False): + stateMachineVersions: StateMachineVersionList + nextToken: Optional[PageToken] + + +class ListStateMachinesInput(ServiceRequest): + maxResults: Optional[PageSize] + nextToken: Optional[PageToken] + + +StateMachineListItem = TypedDict( + "StateMachineListItem", + { + "stateMachineArn": Arn, + "name": Name, + "type": StateMachineType, + "creationDate": Timestamp, + }, + total=False, +) +StateMachineList = List[StateMachineListItem] + + +class ListStateMachinesOutput(TypedDict, total=False): + stateMachines: StateMachineList + nextToken: Optional[PageToken] + + +class ListTagsForResourceInput(ServiceRequest): + resourceArn: Arn + + +class ListTagsForResourceOutput(TypedDict, total=False): + tags: Optional[TagList] + + +class PublishStateMachineVersionInput(ServiceRequest): + stateMachineArn: Arn + revisionId: Optional[RevisionId] + description: Optional[VersionDescription] + + +class PublishStateMachineVersionOutput(TypedDict, total=False): + creationDate: Timestamp + stateMachineVersionArn: Arn + + +class RedriveExecutionInput(ServiceRequest): + executionArn: Arn + clientToken: Optional[ClientToken] + + +class RedriveExecutionOutput(TypedDict, total=False): + redriveDate: Timestamp + + +class SendTaskFailureInput(ServiceRequest): + taskToken: TaskToken + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class SendTaskFailureOutput(TypedDict, total=False): + pass + + +class SendTaskHeartbeatInput(ServiceRequest): + taskToken: TaskToken + + +class SendTaskHeartbeatOutput(TypedDict, total=False): + pass + + +class SendTaskSuccessInput(ServiceRequest): + taskToken: TaskToken + output: SensitiveData + + +class SendTaskSuccessOutput(TypedDict, total=False): + pass + + +class StartExecutionInput(ServiceRequest): + stateMachineArn: Arn + name: Optional[Name] + input: Optional[SensitiveData] + traceHeader: Optional[TraceHeader] + + +class StartExecutionOutput(TypedDict, total=False): + executionArn: Arn + startDate: Timestamp + + +class StartSyncExecutionInput(ServiceRequest): + stateMachineArn: Arn + name: Optional[Name] + input: Optional[SensitiveData] + traceHeader: Optional[TraceHeader] + includedData: Optional[IncludedData] + + +class StartSyncExecutionOutput(TypedDict, total=False): + executionArn: Arn + stateMachineArn: Optional[Arn] + name: Optional[Name] + startDate: Timestamp + stopDate: Timestamp + status: SyncExecutionStatus + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + input: Optional[SensitiveData] + inputDetails: Optional[CloudWatchEventsExecutionDataDetails] + output: Optional[SensitiveData] + outputDetails: Optional[CloudWatchEventsExecutionDataDetails] + traceHeader: Optional[TraceHeader] + billingDetails: Optional[BillingDetails] + + +class StopExecutionInput(ServiceRequest): + executionArn: Arn + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + +class StopExecutionOutput(TypedDict, total=False): + stopDate: Timestamp + + +TagKeyList = List[TagKey] + + +class TagResourceInput(ServiceRequest): + resourceArn: Arn + tags: TagList + + +class TagResourceOutput(TypedDict, total=False): + pass + + +class TestStateInput(ServiceRequest): + definition: Definition + roleArn: Optional[Arn] + input: Optional[SensitiveData] + inspectionLevel: Optional[InspectionLevel] + revealSecrets: Optional[RevealSecrets] + variables: Optional[SensitiveData] + + +class TestStateOutput(TypedDict, total=False): + output: Optional[SensitiveData] + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + inspectionData: Optional[InspectionData] + nextState: Optional[StateName] + status: Optional[TestExecutionStatus] + + +class UntagResourceInput(ServiceRequest): + resourceArn: Arn + tagKeys: TagKeyList + + +class UntagResourceOutput(TypedDict, total=False): + pass + + +class UpdateMapRunInput(ServiceRequest): + mapRunArn: LongArn + maxConcurrency: Optional[MaxConcurrency] + toleratedFailurePercentage: Optional[ToleratedFailurePercentage] + toleratedFailureCount: Optional[ToleratedFailureCount] + + +class UpdateMapRunOutput(TypedDict, total=False): + pass + + +class UpdateStateMachineAliasInput(ServiceRequest): + stateMachineAliasArn: Arn + description: Optional[AliasDescription] + routingConfiguration: Optional[RoutingConfigurationList] + + +class UpdateStateMachineAliasOutput(TypedDict, total=False): + updateDate: Timestamp + + +class UpdateStateMachineInput(ServiceRequest): + stateMachineArn: Arn + definition: Optional[Definition] + roleArn: Optional[Arn] + loggingConfiguration: Optional[LoggingConfiguration] + tracingConfiguration: Optional[TracingConfiguration] + publish: Optional[Publish] + versionDescription: Optional[VersionDescription] + encryptionConfiguration: Optional[EncryptionConfiguration] + + +class UpdateStateMachineOutput(TypedDict, total=False): + updateDate: Timestamp + revisionId: Optional[RevisionId] + stateMachineVersionArn: Optional[Arn] + + +class ValidateStateMachineDefinitionDiagnostic(TypedDict, total=False): + severity: ValidateStateMachineDefinitionSeverity + code: ValidateStateMachineDefinitionCode + message: ValidateStateMachineDefinitionMessage + location: Optional[ValidateStateMachineDefinitionLocation] + + +ValidateStateMachineDefinitionDiagnosticList = List[ValidateStateMachineDefinitionDiagnostic] +ValidateStateMachineDefinitionInput = TypedDict( + "ValidateStateMachineDefinitionInput", + { + "definition": Definition, + "type": Optional[StateMachineType], + "severity": Optional[ValidateStateMachineDefinitionSeverity], + "maxResults": Optional[ValidateStateMachineDefinitionMaxResult], + }, + total=False, +) + + +class ValidateStateMachineDefinitionOutput(TypedDict, total=False): + result: ValidateStateMachineDefinitionResultCode + diagnostics: ValidateStateMachineDefinitionDiagnosticList + truncated: Optional[ValidateStateMachineDefinitionTruncated] + + +class StepfunctionsApi: + service = "stepfunctions" + version = "2016-11-23" + + @handler("CreateActivity") + def create_activity( + self, + context: RequestContext, + name: Name, + tags: TagList | None = None, + encryption_configuration: EncryptionConfiguration | None = None, + **kwargs, + ) -> CreateActivityOutput: + raise NotImplementedError + + @handler("CreateStateMachine", expand=False) + def create_state_machine( + self, context: RequestContext, request: CreateStateMachineInput, **kwargs + ) -> CreateStateMachineOutput: + raise NotImplementedError + + @handler("CreateStateMachineAlias") + def create_state_machine_alias( + self, + context: RequestContext, + name: CharacterRestrictedName, + routing_configuration: RoutingConfigurationList, + description: AliasDescription | None = None, + **kwargs, + ) -> CreateStateMachineAliasOutput: + raise NotImplementedError + + @handler("DeleteActivity") + def delete_activity( + self, context: RequestContext, activity_arn: Arn, **kwargs + ) -> DeleteActivityOutput: + raise NotImplementedError + + @handler("DeleteStateMachine") + def delete_state_machine( + self, context: RequestContext, state_machine_arn: Arn, **kwargs + ) -> DeleteStateMachineOutput: + raise NotImplementedError + + @handler("DeleteStateMachineAlias") + def delete_state_machine_alias( + self, context: RequestContext, state_machine_alias_arn: Arn, **kwargs + ) -> DeleteStateMachineAliasOutput: + raise NotImplementedError + + @handler("DeleteStateMachineVersion") + def delete_state_machine_version( + self, context: RequestContext, state_machine_version_arn: LongArn, **kwargs + ) -> DeleteStateMachineVersionOutput: + raise NotImplementedError + + @handler("DescribeActivity") + def describe_activity( + self, context: RequestContext, activity_arn: Arn, **kwargs + ) -> DescribeActivityOutput: + raise NotImplementedError + + @handler("DescribeExecution") + def describe_execution( + self, + context: RequestContext, + execution_arn: Arn, + included_data: IncludedData | None = None, + **kwargs, + ) -> DescribeExecutionOutput: + raise NotImplementedError + + @handler("DescribeMapRun") + def describe_map_run( + self, context: RequestContext, map_run_arn: LongArn, **kwargs + ) -> DescribeMapRunOutput: + raise NotImplementedError + + @handler("DescribeStateMachine") + def describe_state_machine( + self, + context: RequestContext, + state_machine_arn: Arn, + included_data: IncludedData | None = None, + **kwargs, + ) -> DescribeStateMachineOutput: + raise NotImplementedError + + @handler("DescribeStateMachineAlias") + def describe_state_machine_alias( + self, context: RequestContext, state_machine_alias_arn: Arn, **kwargs + ) -> DescribeStateMachineAliasOutput: + raise NotImplementedError + + @handler("DescribeStateMachineForExecution") + def describe_state_machine_for_execution( + self, + context: RequestContext, + execution_arn: Arn, + included_data: IncludedData | None = None, + **kwargs, + ) -> DescribeStateMachineForExecutionOutput: + raise NotImplementedError + + @handler("GetActivityTask") + def get_activity_task( + self, context: RequestContext, activity_arn: Arn, worker_name: Name | None = None, **kwargs + ) -> GetActivityTaskOutput: + raise NotImplementedError + + @handler("GetExecutionHistory") + def get_execution_history( + self, + context: RequestContext, + execution_arn: Arn, + max_results: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + next_token: PageToken | None = None, + include_execution_data: IncludeExecutionDataGetExecutionHistory | None = None, + **kwargs, + ) -> GetExecutionHistoryOutput: + raise NotImplementedError + + @handler("ListActivities") + def list_activities( + self, + context: RequestContext, + max_results: PageSize | None = None, + next_token: PageToken | None = None, + **kwargs, + ) -> ListActivitiesOutput: + raise NotImplementedError + + @handler("ListExecutions") + def list_executions( + self, + context: RequestContext, + state_machine_arn: Arn | None = None, + status_filter: ExecutionStatus | None = None, + max_results: PageSize | None = None, + next_token: ListExecutionsPageToken | None = None, + map_run_arn: LongArn | None = None, + redrive_filter: ExecutionRedriveFilter | None = None, + **kwargs, + ) -> ListExecutionsOutput: + raise NotImplementedError + + @handler("ListMapRuns") + def list_map_runs( + self, + context: RequestContext, + execution_arn: Arn, + max_results: PageSize | None = None, + next_token: PageToken | None = None, + **kwargs, + ) -> ListMapRunsOutput: + raise NotImplementedError + + @handler("ListStateMachineAliases") + def list_state_machine_aliases( + self, + context: RequestContext, + state_machine_arn: Arn, + next_token: PageToken | None = None, + max_results: PageSize | None = None, + **kwargs, + ) -> ListStateMachineAliasesOutput: + raise NotImplementedError + + @handler("ListStateMachineVersions") + def list_state_machine_versions( + self, + context: RequestContext, + state_machine_arn: Arn, + next_token: PageToken | None = None, + max_results: PageSize | None = None, + **kwargs, + ) -> ListStateMachineVersionsOutput: + raise NotImplementedError + + @handler("ListStateMachines") + def list_state_machines( + self, + context: RequestContext, + max_results: PageSize | None = None, + next_token: PageToken | None = None, + **kwargs, + ) -> ListStateMachinesOutput: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: Arn, **kwargs + ) -> ListTagsForResourceOutput: + raise NotImplementedError + + @handler("PublishStateMachineVersion") + def publish_state_machine_version( + self, + context: RequestContext, + state_machine_arn: Arn, + revision_id: RevisionId | None = None, + description: VersionDescription | None = None, + **kwargs, + ) -> PublishStateMachineVersionOutput: + raise NotImplementedError + + @handler("RedriveExecution") + def redrive_execution( + self, + context: RequestContext, + execution_arn: Arn, + client_token: ClientToken | None = None, + **kwargs, + ) -> RedriveExecutionOutput: + raise NotImplementedError + + @handler("SendTaskFailure") + def send_task_failure( + self, + context: RequestContext, + task_token: TaskToken, + error: SensitiveError | None = None, + cause: SensitiveCause | None = None, + **kwargs, + ) -> SendTaskFailureOutput: + raise NotImplementedError + + @handler("SendTaskHeartbeat") + def send_task_heartbeat( + self, context: RequestContext, task_token: TaskToken, **kwargs + ) -> SendTaskHeartbeatOutput: + raise NotImplementedError + + @handler("SendTaskSuccess") + def send_task_success( + self, context: RequestContext, task_token: TaskToken, output: SensitiveData, **kwargs + ) -> SendTaskSuccessOutput: + raise NotImplementedError + + @handler("StartExecution") + def start_execution( + self, + context: RequestContext, + state_machine_arn: Arn, + name: Name | None = None, + input: SensitiveData | None = None, + trace_header: TraceHeader | None = None, + **kwargs, + ) -> StartExecutionOutput: + raise NotImplementedError + + @handler("StartSyncExecution") + def start_sync_execution( + self, + context: RequestContext, + state_machine_arn: Arn, + name: Name | None = None, + input: SensitiveData | None = None, + trace_header: TraceHeader | None = None, + included_data: IncludedData | None = None, + **kwargs, + ) -> StartSyncExecutionOutput: + raise NotImplementedError + + @handler("StopExecution") + def stop_execution( + self, + context: RequestContext, + execution_arn: Arn, + error: SensitiveError | None = None, + cause: SensitiveCause | None = None, + **kwargs, + ) -> StopExecutionOutput: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: TagList, **kwargs + ) -> TagResourceOutput: + raise NotImplementedError + + @handler("TestState") + def test_state( + self, + context: RequestContext, + definition: Definition, + role_arn: Arn | None = None, + input: SensitiveData | None = None, + inspection_level: InspectionLevel | None = None, + reveal_secrets: RevealSecrets | None = None, + variables: SensitiveData | None = None, + **kwargs, + ) -> TestStateOutput: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceOutput: + raise NotImplementedError + + @handler("UpdateMapRun") + def update_map_run( + self, + context: RequestContext, + map_run_arn: LongArn, + max_concurrency: MaxConcurrency | None = None, + tolerated_failure_percentage: ToleratedFailurePercentage | None = None, + tolerated_failure_count: ToleratedFailureCount | None = None, + **kwargs, + ) -> UpdateMapRunOutput: + raise NotImplementedError + + @handler("UpdateStateMachine") + def update_state_machine( + self, + context: RequestContext, + state_machine_arn: Arn, + definition: Definition | None = None, + role_arn: Arn | None = None, + logging_configuration: LoggingConfiguration | None = None, + tracing_configuration: TracingConfiguration | None = None, + publish: Publish | None = None, + version_description: VersionDescription | None = None, + encryption_configuration: EncryptionConfiguration | None = None, + **kwargs, + ) -> UpdateStateMachineOutput: + raise NotImplementedError + + @handler("UpdateStateMachineAlias") + def update_state_machine_alias( + self, + context: RequestContext, + state_machine_alias_arn: Arn, + description: AliasDescription | None = None, + routing_configuration: RoutingConfigurationList | None = None, + **kwargs, + ) -> UpdateStateMachineAliasOutput: + raise NotImplementedError + + @handler("ValidateStateMachineDefinition", expand=False) + def validate_state_machine_definition( + self, context: RequestContext, request: ValidateStateMachineDefinitionInput, **kwargs + ) -> ValidateStateMachineDefinitionOutput: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/sts/__init__.py b/localstack-core/localstack/aws/api/sts/__init__.py new file mode 100644 index 0000000000000..3a5e4c337c738 --- /dev/null +++ b/localstack-core/localstack/aws/api/sts/__init__.py @@ -0,0 +1,369 @@ +from datetime import datetime +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Audience = str +Issuer = str +NameQualifier = str +RootDurationSecondsType = int +SAMLAssertionType = str +Subject = str +SubjectType = str +TargetPrincipalType = str +accessKeyIdType = str +accessKeySecretType = str +accountType = str +arnType = str +assumedRoleIdType = str +clientTokenType = str +contextAssertionType = str +decodedMessageType = str +durationSecondsType = int +encodedMessageType = str +expiredIdentityTokenMessage = str +externalIdType = str +federatedIdType = str +idpCommunicationErrorMessage = str +idpRejectedClaimMessage = str +invalidAuthorizationMessage = str +invalidIdentityTokenMessage = str +malformedPolicyDocumentMessage = str +nonNegativeIntegerType = int +packedPolicyTooLargeMessage = str +regionDisabledMessage = str +roleDurationSecondsType = int +roleSessionNameType = str +serialNumberType = str +sessionPolicyDocumentType = str +sourceIdentityType = str +tagKeyType = str +tagValueType = str +tokenCodeType = str +tokenType = str +unrestrictedSessionPolicyDocumentType = str +urlType = str +userIdType = str +userNameType = str +webIdentitySubjectType = str + + +class ExpiredTokenException(ServiceException): + code: str = "ExpiredTokenException" + sender_fault: bool = True + status_code: int = 400 + + +class IDPCommunicationErrorException(ServiceException): + code: str = "IDPCommunicationError" + sender_fault: bool = True + status_code: int = 400 + + +class IDPRejectedClaimException(ServiceException): + code: str = "IDPRejectedClaim" + sender_fault: bool = True + status_code: int = 403 + + +class InvalidAuthorizationMessageException(ServiceException): + code: str = "InvalidAuthorizationMessageException" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidIdentityTokenException(ServiceException): + code: str = "InvalidIdentityToken" + sender_fault: bool = True + status_code: int = 400 + + +class MalformedPolicyDocumentException(ServiceException): + code: str = "MalformedPolicyDocument" + sender_fault: bool = True + status_code: int = 400 + + +class PackedPolicyTooLargeException(ServiceException): + code: str = "PackedPolicyTooLarge" + sender_fault: bool = True + status_code: int = 400 + + +class RegionDisabledException(ServiceException): + code: str = "RegionDisabledException" + sender_fault: bool = True + status_code: int = 403 + + +class ProvidedContext(TypedDict, total=False): + ProviderArn: Optional[arnType] + ContextAssertion: Optional[contextAssertionType] + + +ProvidedContextsListType = List[ProvidedContext] +tagKeyListType = List[tagKeyType] + + +class Tag(TypedDict, total=False): + Key: tagKeyType + Value: tagValueType + + +tagListType = List[Tag] + + +class PolicyDescriptorType(TypedDict, total=False): + arn: Optional[arnType] + + +policyDescriptorListType = List[PolicyDescriptorType] + + +class AssumeRoleRequest(ServiceRequest): + RoleArn: arnType + RoleSessionName: roleSessionNameType + PolicyArns: Optional[policyDescriptorListType] + Policy: Optional[unrestrictedSessionPolicyDocumentType] + DurationSeconds: Optional[roleDurationSecondsType] + Tags: Optional[tagListType] + TransitiveTagKeys: Optional[tagKeyListType] + ExternalId: Optional[externalIdType] + SerialNumber: Optional[serialNumberType] + TokenCode: Optional[tokenCodeType] + SourceIdentity: Optional[sourceIdentityType] + ProvidedContexts: Optional[ProvidedContextsListType] + + +class AssumedRoleUser(TypedDict, total=False): + AssumedRoleId: assumedRoleIdType + Arn: arnType + + +dateType = datetime + + +class Credentials(TypedDict, total=False): + AccessKeyId: accessKeyIdType + SecretAccessKey: accessKeySecretType + SessionToken: tokenType + Expiration: dateType + + +class AssumeRoleResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + AssumedRoleUser: Optional[AssumedRoleUser] + PackedPolicySize: Optional[nonNegativeIntegerType] + SourceIdentity: Optional[sourceIdentityType] + + +class AssumeRoleWithSAMLRequest(ServiceRequest): + RoleArn: arnType + PrincipalArn: arnType + SAMLAssertion: SAMLAssertionType + PolicyArns: Optional[policyDescriptorListType] + Policy: Optional[sessionPolicyDocumentType] + DurationSeconds: Optional[roleDurationSecondsType] + + +class AssumeRoleWithSAMLResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + AssumedRoleUser: Optional[AssumedRoleUser] + PackedPolicySize: Optional[nonNegativeIntegerType] + Subject: Optional[Subject] + SubjectType: Optional[SubjectType] + Issuer: Optional[Issuer] + Audience: Optional[Audience] + NameQualifier: Optional[NameQualifier] + SourceIdentity: Optional[sourceIdentityType] + + +class AssumeRoleWithWebIdentityRequest(ServiceRequest): + RoleArn: arnType + RoleSessionName: roleSessionNameType + WebIdentityToken: clientTokenType + ProviderId: Optional[urlType] + PolicyArns: Optional[policyDescriptorListType] + Policy: Optional[sessionPolicyDocumentType] + DurationSeconds: Optional[roleDurationSecondsType] + + +class AssumeRoleWithWebIdentityResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + SubjectFromWebIdentityToken: Optional[webIdentitySubjectType] + AssumedRoleUser: Optional[AssumedRoleUser] + PackedPolicySize: Optional[nonNegativeIntegerType] + Provider: Optional[Issuer] + Audience: Optional[Audience] + SourceIdentity: Optional[sourceIdentityType] + + +class AssumeRootRequest(ServiceRequest): + TargetPrincipal: TargetPrincipalType + TaskPolicyArn: PolicyDescriptorType + DurationSeconds: Optional[RootDurationSecondsType] + + +class AssumeRootResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + SourceIdentity: Optional[sourceIdentityType] + + +class DecodeAuthorizationMessageRequest(ServiceRequest): + EncodedMessage: encodedMessageType + + +class DecodeAuthorizationMessageResponse(TypedDict, total=False): + DecodedMessage: Optional[decodedMessageType] + + +class FederatedUser(TypedDict, total=False): + FederatedUserId: federatedIdType + Arn: arnType + + +class GetAccessKeyInfoRequest(ServiceRequest): + AccessKeyId: accessKeyIdType + + +class GetAccessKeyInfoResponse(TypedDict, total=False): + Account: Optional[accountType] + + +class GetCallerIdentityRequest(ServiceRequest): + pass + + +class GetCallerIdentityResponse(TypedDict, total=False): + UserId: Optional[userIdType] + Account: Optional[accountType] + Arn: Optional[arnType] + + +class GetFederationTokenRequest(ServiceRequest): + Name: userNameType + Policy: Optional[sessionPolicyDocumentType] + PolicyArns: Optional[policyDescriptorListType] + DurationSeconds: Optional[durationSecondsType] + Tags: Optional[tagListType] + + +class GetFederationTokenResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + FederatedUser: Optional[FederatedUser] + PackedPolicySize: Optional[nonNegativeIntegerType] + + +class GetSessionTokenRequest(ServiceRequest): + DurationSeconds: Optional[durationSecondsType] + SerialNumber: Optional[serialNumberType] + TokenCode: Optional[tokenCodeType] + + +class GetSessionTokenResponse(TypedDict, total=False): + Credentials: Optional[Credentials] + + +class StsApi: + service = "sts" + version = "2011-06-15" + + @handler("AssumeRole") + def assume_role( + self, + context: RequestContext, + role_arn: arnType, + role_session_name: roleSessionNameType, + policy_arns: policyDescriptorListType | None = None, + policy: unrestrictedSessionPolicyDocumentType | None = None, + duration_seconds: roleDurationSecondsType | None = None, + tags: tagListType | None = None, + transitive_tag_keys: tagKeyListType | None = None, + external_id: externalIdType | None = None, + serial_number: serialNumberType | None = None, + token_code: tokenCodeType | None = None, + source_identity: sourceIdentityType | None = None, + provided_contexts: ProvidedContextsListType | None = None, + **kwargs, + ) -> AssumeRoleResponse: + raise NotImplementedError + + @handler("AssumeRoleWithSAML") + def assume_role_with_saml( + self, + context: RequestContext, + role_arn: arnType, + principal_arn: arnType, + saml_assertion: SAMLAssertionType, + policy_arns: policyDescriptorListType | None = None, + policy: sessionPolicyDocumentType | None = None, + duration_seconds: roleDurationSecondsType | None = None, + **kwargs, + ) -> AssumeRoleWithSAMLResponse: + raise NotImplementedError + + @handler("AssumeRoleWithWebIdentity") + def assume_role_with_web_identity( + self, + context: RequestContext, + role_arn: arnType, + role_session_name: roleSessionNameType, + web_identity_token: clientTokenType, + provider_id: urlType | None = None, + policy_arns: policyDescriptorListType | None = None, + policy: sessionPolicyDocumentType | None = None, + duration_seconds: roleDurationSecondsType | None = None, + **kwargs, + ) -> AssumeRoleWithWebIdentityResponse: + raise NotImplementedError + + @handler("AssumeRoot") + def assume_root( + self, + context: RequestContext, + target_principal: TargetPrincipalType, + task_policy_arn: PolicyDescriptorType, + duration_seconds: RootDurationSecondsType | None = None, + **kwargs, + ) -> AssumeRootResponse: + raise NotImplementedError + + @handler("DecodeAuthorizationMessage") + def decode_authorization_message( + self, context: RequestContext, encoded_message: encodedMessageType, **kwargs + ) -> DecodeAuthorizationMessageResponse: + raise NotImplementedError + + @handler("GetAccessKeyInfo") + def get_access_key_info( + self, context: RequestContext, access_key_id: accessKeyIdType, **kwargs + ) -> GetAccessKeyInfoResponse: + raise NotImplementedError + + @handler("GetCallerIdentity") + def get_caller_identity(self, context: RequestContext, **kwargs) -> GetCallerIdentityResponse: + raise NotImplementedError + + @handler("GetFederationToken") + def get_federation_token( + self, + context: RequestContext, + name: userNameType, + policy: sessionPolicyDocumentType | None = None, + policy_arns: policyDescriptorListType | None = None, + duration_seconds: durationSecondsType | None = None, + tags: tagListType | None = None, + **kwargs, + ) -> GetFederationTokenResponse: + raise NotImplementedError + + @handler("GetSessionToken") + def get_session_token( + self, + context: RequestContext, + duration_seconds: durationSecondsType | None = None, + serial_number: serialNumberType | None = None, + token_code: tokenCodeType | None = None, + **kwargs, + ) -> GetSessionTokenResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/support/__init__.py b/localstack-core/localstack/aws/api/support/__init__.py new file mode 100644 index 0000000000000..c1575127c69e6 --- /dev/null +++ b/localstack-core/localstack/aws/api/support/__init__.py @@ -0,0 +1,622 @@ +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +AfterTime = str +AttachmentId = str +AttachmentSetId = str +AvailabilityErrorMessage = str +BeforeTime = str +Boolean = bool +CaseId = str +CaseStatus = str +CategoryCode = str +CategoryName = str +CcEmailAddress = str +Code = str +CommunicationBody = str +Display = str +DisplayId = str +Double = float +EndTime = str +ErrorMessage = str +ExpiryTime = str +FileName = str +IncludeCommunications = bool +IncludeResolvedCases = bool +IssueType = str +Language = str +MaxResults = int +NextToken = str +Result = bool +ServiceCode = str +ServiceName = str +SeverityCode = str +SeverityLevelCode = str +SeverityLevelName = str +StartTime = str +Status = str +String = str +Subject = str +SubmittedBy = str +TimeCreated = str +Type = str +ValidatedCategoryCode = str +ValidatedCommunicationBody = str +ValidatedDateTime = str +ValidatedIssueTypeString = str +ValidatedLanguageAvailability = str +ValidatedServiceCode = str + + +class AttachmentIdNotFound(ServiceException): + code: str = "AttachmentIdNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class AttachmentLimitExceeded(ServiceException): + code: str = "AttachmentLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class AttachmentSetExpired(ServiceException): + code: str = "AttachmentSetExpired" + sender_fault: bool = False + status_code: int = 400 + + +class AttachmentSetIdNotFound(ServiceException): + code: str = "AttachmentSetIdNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class AttachmentSetSizeLimitExceeded(ServiceException): + code: str = "AttachmentSetSizeLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class CaseCreationLimitExceeded(ServiceException): + code: str = "CaseCreationLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class CaseIdNotFound(ServiceException): + code: str = "CaseIdNotFound" + sender_fault: bool = False + status_code: int = 400 + + +class DescribeAttachmentLimitExceeded(ServiceException): + code: str = "DescribeAttachmentLimitExceeded" + sender_fault: bool = False + status_code: int = 400 + + +class InternalServerError(ServiceException): + code: str = "InternalServerError" + sender_fault: bool = False + status_code: int = 400 + + +class ThrottlingException(ServiceException): + code: str = "ThrottlingException" + sender_fault: bool = False + status_code: int = 400 + + +Data = bytes + + +class Attachment(TypedDict, total=False): + fileName: Optional[FileName] + data: Optional[Data] + + +Attachments = List[Attachment] + + +class AddAttachmentsToSetRequest(ServiceRequest): + attachmentSetId: Optional[AttachmentSetId] + attachments: Attachments + + +class AddAttachmentsToSetResponse(TypedDict, total=False): + attachmentSetId: Optional[AttachmentSetId] + expiryTime: Optional[ExpiryTime] + + +CcEmailAddressList = List[CcEmailAddress] + + +class AddCommunicationToCaseRequest(ServiceRequest): + caseId: Optional[CaseId] + communicationBody: CommunicationBody + ccEmailAddresses: Optional[CcEmailAddressList] + attachmentSetId: Optional[AttachmentSetId] + + +class AddCommunicationToCaseResponse(TypedDict, total=False): + result: Optional[Result] + + +class AttachmentDetails(TypedDict, total=False): + attachmentId: Optional[AttachmentId] + fileName: Optional[FileName] + + +AttachmentSet = List[AttachmentDetails] + + +class Communication(TypedDict, total=False): + caseId: Optional[CaseId] + body: Optional[ValidatedCommunicationBody] + submittedBy: Optional[SubmittedBy] + timeCreated: Optional[TimeCreated] + attachmentSet: Optional[AttachmentSet] + + +CommunicationList = List[Communication] + + +class RecentCaseCommunications(TypedDict, total=False): + communications: Optional[CommunicationList] + nextToken: Optional[NextToken] + + +class CaseDetails(TypedDict, total=False): + caseId: Optional[CaseId] + displayId: Optional[DisplayId] + subject: Optional[Subject] + status: Optional[Status] + serviceCode: Optional[ServiceCode] + categoryCode: Optional[CategoryCode] + severityCode: Optional[SeverityCode] + submittedBy: Optional[SubmittedBy] + timeCreated: Optional[TimeCreated] + recentCommunications: Optional[RecentCaseCommunications] + ccEmailAddresses: Optional[CcEmailAddressList] + language: Optional[Language] + + +CaseIdList = List[CaseId] +CaseList = List[CaseDetails] + + +class Category(TypedDict, total=False): + code: Optional[CategoryCode] + name: Optional[CategoryName] + + +CategoryList = List[Category] + + +class DateInterval(TypedDict, total=False): + startDateTime: Optional[ValidatedDateTime] + endDateTime: Optional[ValidatedDateTime] + + +DatesWithoutSupportList = List[DateInterval] + + +class SupportedHour(TypedDict, total=False): + startTime: Optional[StartTime] + endTime: Optional[EndTime] + + +SupportedHoursList = List[SupportedHour] +CommunicationTypeOptions = TypedDict( + "CommunicationTypeOptions", + { + "type": Optional[Type], + "supportedHours": Optional[SupportedHoursList], + "datesWithoutSupport": Optional[DatesWithoutSupportList], + }, + total=False, +) +CommunicationTypeOptionsList = List[CommunicationTypeOptions] + + +class CreateCaseRequest(ServiceRequest): + subject: Subject + serviceCode: Optional[ServiceCode] + severityCode: Optional[SeverityCode] + categoryCode: Optional[CategoryCode] + communicationBody: CommunicationBody + ccEmailAddresses: Optional[CcEmailAddressList] + language: Optional[Language] + issueType: Optional[IssueType] + attachmentSetId: Optional[AttachmentSetId] + + +class CreateCaseResponse(TypedDict, total=False): + caseId: Optional[CaseId] + + +class DescribeAttachmentRequest(ServiceRequest): + attachmentId: AttachmentId + + +class DescribeAttachmentResponse(TypedDict, total=False): + attachment: Optional[Attachment] + + +class DescribeCasesRequest(ServiceRequest): + caseIdList: Optional[CaseIdList] + displayId: Optional[DisplayId] + afterTime: Optional[AfterTime] + beforeTime: Optional[BeforeTime] + includeResolvedCases: Optional[IncludeResolvedCases] + nextToken: Optional[NextToken] + maxResults: Optional[MaxResults] + language: Optional[Language] + includeCommunications: Optional[IncludeCommunications] + + +class DescribeCasesResponse(TypedDict, total=False): + cases: Optional[CaseList] + nextToken: Optional[NextToken] + + +class DescribeCommunicationsRequest(ServiceRequest): + caseId: CaseId + beforeTime: Optional[BeforeTime] + afterTime: Optional[AfterTime] + nextToken: Optional[NextToken] + maxResults: Optional[MaxResults] + + +class DescribeCommunicationsResponse(TypedDict, total=False): + communications: Optional[CommunicationList] + nextToken: Optional[NextToken] + + +class DescribeCreateCaseOptionsRequest(ServiceRequest): + issueType: IssueType + serviceCode: ServiceCode + language: Language + categoryCode: CategoryCode + + +class DescribeCreateCaseOptionsResponse(TypedDict, total=False): + languageAvailability: Optional[ValidatedLanguageAvailability] + communicationTypes: Optional[CommunicationTypeOptionsList] + + +ServiceCodeList = List[ServiceCode] + + +class DescribeServicesRequest(ServiceRequest): + serviceCodeList: Optional[ServiceCodeList] + language: Optional[Language] + + +class Service(TypedDict, total=False): + code: Optional[ServiceCode] + name: Optional[ServiceName] + categories: Optional[CategoryList] + + +ServiceList = List[Service] + + +class DescribeServicesResponse(TypedDict, total=False): + services: Optional[ServiceList] + + +class DescribeSeverityLevelsRequest(ServiceRequest): + language: Optional[Language] + + +class SeverityLevel(TypedDict, total=False): + code: Optional[SeverityLevelCode] + name: Optional[SeverityLevelName] + + +SeverityLevelsList = List[SeverityLevel] + + +class DescribeSeverityLevelsResponse(TypedDict, total=False): + severityLevels: Optional[SeverityLevelsList] + + +class DescribeSupportedLanguagesRequest(ServiceRequest): + issueType: ValidatedIssueTypeString + serviceCode: ValidatedServiceCode + categoryCode: ValidatedCategoryCode + + +class SupportedLanguage(TypedDict, total=False): + code: Optional[Code] + language: Optional[Language] + display: Optional[Display] + + +SupportedLanguagesList = List[SupportedLanguage] + + +class DescribeSupportedLanguagesResponse(TypedDict, total=False): + supportedLanguages: Optional[SupportedLanguagesList] + + +StringList = List[String] + + +class DescribeTrustedAdvisorCheckRefreshStatusesRequest(ServiceRequest): + checkIds: StringList + + +Long = int + + +class TrustedAdvisorCheckRefreshStatus(TypedDict, total=False): + checkId: String + status: String + millisUntilNextRefreshable: Long + + +TrustedAdvisorCheckRefreshStatusList = List[TrustedAdvisorCheckRefreshStatus] + + +class DescribeTrustedAdvisorCheckRefreshStatusesResponse(TypedDict, total=False): + statuses: TrustedAdvisorCheckRefreshStatusList + + +class DescribeTrustedAdvisorCheckResultRequest(ServiceRequest): + checkId: String + language: Optional[String] + + +class TrustedAdvisorResourceDetail(TypedDict, total=False): + status: String + region: Optional[String] + resourceId: String + isSuppressed: Optional[Boolean] + metadata: StringList + + +TrustedAdvisorResourceDetailList = List[TrustedAdvisorResourceDetail] + + +class TrustedAdvisorCostOptimizingSummary(TypedDict, total=False): + estimatedMonthlySavings: Double + estimatedPercentMonthlySavings: Double + + +class TrustedAdvisorCategorySpecificSummary(TypedDict, total=False): + costOptimizing: Optional[TrustedAdvisorCostOptimizingSummary] + + +class TrustedAdvisorResourcesSummary(TypedDict, total=False): + resourcesProcessed: Long + resourcesFlagged: Long + resourcesIgnored: Long + resourcesSuppressed: Long + + +class TrustedAdvisorCheckResult(TypedDict, total=False): + checkId: String + timestamp: String + status: String + resourcesSummary: TrustedAdvisorResourcesSummary + categorySpecificSummary: TrustedAdvisorCategorySpecificSummary + flaggedResources: TrustedAdvisorResourceDetailList + + +class DescribeTrustedAdvisorCheckResultResponse(TypedDict, total=False): + result: Optional[TrustedAdvisorCheckResult] + + +class DescribeTrustedAdvisorCheckSummariesRequest(ServiceRequest): + checkIds: StringList + + +class TrustedAdvisorCheckSummary(TypedDict, total=False): + checkId: String + timestamp: String + status: String + hasFlaggedResources: Optional[Boolean] + resourcesSummary: TrustedAdvisorResourcesSummary + categorySpecificSummary: TrustedAdvisorCategorySpecificSummary + + +TrustedAdvisorCheckSummaryList = List[TrustedAdvisorCheckSummary] + + +class DescribeTrustedAdvisorCheckSummariesResponse(TypedDict, total=False): + summaries: TrustedAdvisorCheckSummaryList + + +class DescribeTrustedAdvisorChecksRequest(ServiceRequest): + language: String + + +class TrustedAdvisorCheckDescription(TypedDict, total=False): + id: String + name: String + description: String + category: String + metadata: StringList + + +TrustedAdvisorCheckList = List[TrustedAdvisorCheckDescription] + + +class DescribeTrustedAdvisorChecksResponse(TypedDict, total=False): + checks: TrustedAdvisorCheckList + + +class RefreshTrustedAdvisorCheckRequest(ServiceRequest): + checkId: String + + +class RefreshTrustedAdvisorCheckResponse(TypedDict, total=False): + status: TrustedAdvisorCheckRefreshStatus + + +class ResolveCaseRequest(ServiceRequest): + caseId: Optional[CaseId] + + +class ResolveCaseResponse(TypedDict, total=False): + initialCaseStatus: Optional[CaseStatus] + finalCaseStatus: Optional[CaseStatus] + + +class SupportApi: + service = "support" + version = "2013-04-15" + + @handler("AddAttachmentsToSet") + def add_attachments_to_set( + self, + context: RequestContext, + attachments: Attachments, + attachment_set_id: AttachmentSetId | None = None, + **kwargs, + ) -> AddAttachmentsToSetResponse: + raise NotImplementedError + + @handler("AddCommunicationToCase") + def add_communication_to_case( + self, + context: RequestContext, + communication_body: CommunicationBody, + case_id: CaseId | None = None, + cc_email_addresses: CcEmailAddressList | None = None, + attachment_set_id: AttachmentSetId | None = None, + **kwargs, + ) -> AddCommunicationToCaseResponse: + raise NotImplementedError + + @handler("CreateCase") + def create_case( + self, + context: RequestContext, + subject: Subject, + communication_body: CommunicationBody, + service_code: ServiceCode | None = None, + severity_code: SeverityCode | None = None, + category_code: CategoryCode | None = None, + cc_email_addresses: CcEmailAddressList | None = None, + language: Language | None = None, + issue_type: IssueType | None = None, + attachment_set_id: AttachmentSetId | None = None, + **kwargs, + ) -> CreateCaseResponse: + raise NotImplementedError + + @handler("DescribeAttachment") + def describe_attachment( + self, context: RequestContext, attachment_id: AttachmentId, **kwargs + ) -> DescribeAttachmentResponse: + raise NotImplementedError + + @handler("DescribeCases") + def describe_cases( + self, + context: RequestContext, + case_id_list: CaseIdList | None = None, + display_id: DisplayId | None = None, + after_time: AfterTime | None = None, + before_time: BeforeTime | None = None, + include_resolved_cases: IncludeResolvedCases | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + language: Language | None = None, + include_communications: IncludeCommunications | None = None, + **kwargs, + ) -> DescribeCasesResponse: + raise NotImplementedError + + @handler("DescribeCommunications") + def describe_communications( + self, + context: RequestContext, + case_id: CaseId, + before_time: BeforeTime | None = None, + after_time: AfterTime | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> DescribeCommunicationsResponse: + raise NotImplementedError + + @handler("DescribeCreateCaseOptions") + def describe_create_case_options( + self, + context: RequestContext, + issue_type: IssueType, + service_code: ServiceCode, + language: Language, + category_code: CategoryCode, + **kwargs, + ) -> DescribeCreateCaseOptionsResponse: + raise NotImplementedError + + @handler("DescribeServices") + def describe_services( + self, + context: RequestContext, + service_code_list: ServiceCodeList | None = None, + language: Language | None = None, + **kwargs, + ) -> DescribeServicesResponse: + raise NotImplementedError + + @handler("DescribeSeverityLevels") + def describe_severity_levels( + self, context: RequestContext, language: Language | None = None, **kwargs + ) -> DescribeSeverityLevelsResponse: + raise NotImplementedError + + @handler("DescribeSupportedLanguages") + def describe_supported_languages( + self, + context: RequestContext, + issue_type: ValidatedIssueTypeString, + service_code: ValidatedServiceCode, + category_code: ValidatedCategoryCode, + **kwargs, + ) -> DescribeSupportedLanguagesResponse: + raise NotImplementedError + + @handler("DescribeTrustedAdvisorCheckRefreshStatuses") + def describe_trusted_advisor_check_refresh_statuses( + self, context: RequestContext, check_ids: StringList, **kwargs + ) -> DescribeTrustedAdvisorCheckRefreshStatusesResponse: + raise NotImplementedError + + @handler("DescribeTrustedAdvisorCheckResult") + def describe_trusted_advisor_check_result( + self, context: RequestContext, check_id: String, language: String | None = None, **kwargs + ) -> DescribeTrustedAdvisorCheckResultResponse: + raise NotImplementedError + + @handler("DescribeTrustedAdvisorCheckSummaries") + def describe_trusted_advisor_check_summaries( + self, context: RequestContext, check_ids: StringList, **kwargs + ) -> DescribeTrustedAdvisorCheckSummariesResponse: + raise NotImplementedError + + @handler("DescribeTrustedAdvisorChecks") + def describe_trusted_advisor_checks( + self, context: RequestContext, language: String, **kwargs + ) -> DescribeTrustedAdvisorChecksResponse: + raise NotImplementedError + + @handler("RefreshTrustedAdvisorCheck") + def refresh_trusted_advisor_check( + self, context: RequestContext, check_id: String, **kwargs + ) -> RefreshTrustedAdvisorCheckResponse: + raise NotImplementedError + + @handler("ResolveCase") + def resolve_case( + self, context: RequestContext, case_id: CaseId | None = None, **kwargs + ) -> ResolveCaseResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/swf/__init__.py b/localstack-core/localstack/aws/api/swf/__init__.py new file mode 100644 index 0000000000000..23653779f7e9f --- /dev/null +++ b/localstack-core/localstack/aws/api/swf/__init__.py @@ -0,0 +1,1861 @@ +from datetime import datetime +from enum import StrEnum +from typing import List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +ActivityId = str +Arn = str +Canceled = bool +CauseMessage = str +Count = int +Data = str +Description = str +DomainName = str +DurationInDays = str +DurationInSeconds = str +DurationInSecondsOptional = str +ErrorMessage = str +FailureReason = str +FunctionId = str +FunctionInput = str +FunctionName = str +Identity = str +LimitedData = str +MarkerName = str +Name = str +OpenDecisionTasksCount = int +PageSize = int +PageToken = str +ResourceTagKey = str +ResourceTagValue = str +ReverseOrder = bool +SignalName = str +StartAtPreviousStartedEvent = bool +Tag = str +TaskPriority = str +TaskToken = str +TerminateReason = str +TimerId = str +Truncated = bool +Version = str +VersionOptional = str +WorkflowId = str +WorkflowRunId = str +WorkflowRunIdOptional = str + + +class ActivityTaskTimeoutType(StrEnum): + START_TO_CLOSE = "START_TO_CLOSE" + SCHEDULE_TO_START = "SCHEDULE_TO_START" + SCHEDULE_TO_CLOSE = "SCHEDULE_TO_CLOSE" + HEARTBEAT = "HEARTBEAT" + + +class CancelTimerFailedCause(StrEnum): + TIMER_ID_UNKNOWN = "TIMER_ID_UNKNOWN" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class CancelWorkflowExecutionFailedCause(StrEnum): + UNHANDLED_DECISION = "UNHANDLED_DECISION" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class ChildPolicy(StrEnum): + TERMINATE = "TERMINATE" + REQUEST_CANCEL = "REQUEST_CANCEL" + ABANDON = "ABANDON" + + +class CloseStatus(StrEnum): + COMPLETED = "COMPLETED" + FAILED = "FAILED" + CANCELED = "CANCELED" + TERMINATED = "TERMINATED" + CONTINUED_AS_NEW = "CONTINUED_AS_NEW" + TIMED_OUT = "TIMED_OUT" + + +class CompleteWorkflowExecutionFailedCause(StrEnum): + UNHANDLED_DECISION = "UNHANDLED_DECISION" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class ContinueAsNewWorkflowExecutionFailedCause(StrEnum): + UNHANDLED_DECISION = "UNHANDLED_DECISION" + WORKFLOW_TYPE_DEPRECATED = "WORKFLOW_TYPE_DEPRECATED" + WORKFLOW_TYPE_DOES_NOT_EXIST = "WORKFLOW_TYPE_DOES_NOT_EXIST" + DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED = ( + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + ) + DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + DEFAULT_TASK_LIST_UNDEFINED = "DEFAULT_TASK_LIST_UNDEFINED" + DEFAULT_CHILD_POLICY_UNDEFINED = "DEFAULT_CHILD_POLICY_UNDEFINED" + CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED = ( + "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" + ) + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class DecisionTaskTimeoutType(StrEnum): + START_TO_CLOSE = "START_TO_CLOSE" + SCHEDULE_TO_START = "SCHEDULE_TO_START" + + +class DecisionType(StrEnum): + ScheduleActivityTask = "ScheduleActivityTask" + RequestCancelActivityTask = "RequestCancelActivityTask" + CompleteWorkflowExecution = "CompleteWorkflowExecution" + FailWorkflowExecution = "FailWorkflowExecution" + CancelWorkflowExecution = "CancelWorkflowExecution" + ContinueAsNewWorkflowExecution = "ContinueAsNewWorkflowExecution" + RecordMarker = "RecordMarker" + StartTimer = "StartTimer" + CancelTimer = "CancelTimer" + SignalExternalWorkflowExecution = "SignalExternalWorkflowExecution" + RequestCancelExternalWorkflowExecution = "RequestCancelExternalWorkflowExecution" + StartChildWorkflowExecution = "StartChildWorkflowExecution" + ScheduleLambdaFunction = "ScheduleLambdaFunction" + + +class EventType(StrEnum): + WorkflowExecutionStarted = "WorkflowExecutionStarted" + WorkflowExecutionCancelRequested = "WorkflowExecutionCancelRequested" + WorkflowExecutionCompleted = "WorkflowExecutionCompleted" + CompleteWorkflowExecutionFailed = "CompleteWorkflowExecutionFailed" + WorkflowExecutionFailed = "WorkflowExecutionFailed" + FailWorkflowExecutionFailed = "FailWorkflowExecutionFailed" + WorkflowExecutionTimedOut = "WorkflowExecutionTimedOut" + WorkflowExecutionCanceled = "WorkflowExecutionCanceled" + CancelWorkflowExecutionFailed = "CancelWorkflowExecutionFailed" + WorkflowExecutionContinuedAsNew = "WorkflowExecutionContinuedAsNew" + ContinueAsNewWorkflowExecutionFailed = "ContinueAsNewWorkflowExecutionFailed" + WorkflowExecutionTerminated = "WorkflowExecutionTerminated" + DecisionTaskScheduled = "DecisionTaskScheduled" + DecisionTaskStarted = "DecisionTaskStarted" + DecisionTaskCompleted = "DecisionTaskCompleted" + DecisionTaskTimedOut = "DecisionTaskTimedOut" + ActivityTaskScheduled = "ActivityTaskScheduled" + ScheduleActivityTaskFailed = "ScheduleActivityTaskFailed" + ActivityTaskStarted = "ActivityTaskStarted" + ActivityTaskCompleted = "ActivityTaskCompleted" + ActivityTaskFailed = "ActivityTaskFailed" + ActivityTaskTimedOut = "ActivityTaskTimedOut" + ActivityTaskCanceled = "ActivityTaskCanceled" + ActivityTaskCancelRequested = "ActivityTaskCancelRequested" + RequestCancelActivityTaskFailed = "RequestCancelActivityTaskFailed" + WorkflowExecutionSignaled = "WorkflowExecutionSignaled" + MarkerRecorded = "MarkerRecorded" + RecordMarkerFailed = "RecordMarkerFailed" + TimerStarted = "TimerStarted" + StartTimerFailed = "StartTimerFailed" + TimerFired = "TimerFired" + TimerCanceled = "TimerCanceled" + CancelTimerFailed = "CancelTimerFailed" + StartChildWorkflowExecutionInitiated = "StartChildWorkflowExecutionInitiated" + StartChildWorkflowExecutionFailed = "StartChildWorkflowExecutionFailed" + ChildWorkflowExecutionStarted = "ChildWorkflowExecutionStarted" + ChildWorkflowExecutionCompleted = "ChildWorkflowExecutionCompleted" + ChildWorkflowExecutionFailed = "ChildWorkflowExecutionFailed" + ChildWorkflowExecutionTimedOut = "ChildWorkflowExecutionTimedOut" + ChildWorkflowExecutionCanceled = "ChildWorkflowExecutionCanceled" + ChildWorkflowExecutionTerminated = "ChildWorkflowExecutionTerminated" + SignalExternalWorkflowExecutionInitiated = "SignalExternalWorkflowExecutionInitiated" + SignalExternalWorkflowExecutionFailed = "SignalExternalWorkflowExecutionFailed" + ExternalWorkflowExecutionSignaled = "ExternalWorkflowExecutionSignaled" + RequestCancelExternalWorkflowExecutionInitiated = ( + "RequestCancelExternalWorkflowExecutionInitiated" + ) + RequestCancelExternalWorkflowExecutionFailed = "RequestCancelExternalWorkflowExecutionFailed" + ExternalWorkflowExecutionCancelRequested = "ExternalWorkflowExecutionCancelRequested" + LambdaFunctionScheduled = "LambdaFunctionScheduled" + LambdaFunctionStarted = "LambdaFunctionStarted" + LambdaFunctionCompleted = "LambdaFunctionCompleted" + LambdaFunctionFailed = "LambdaFunctionFailed" + LambdaFunctionTimedOut = "LambdaFunctionTimedOut" + ScheduleLambdaFunctionFailed = "ScheduleLambdaFunctionFailed" + StartLambdaFunctionFailed = "StartLambdaFunctionFailed" + + +class ExecutionStatus(StrEnum): + OPEN = "OPEN" + CLOSED = "CLOSED" + + +class FailWorkflowExecutionFailedCause(StrEnum): + UNHANDLED_DECISION = "UNHANDLED_DECISION" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class LambdaFunctionTimeoutType(StrEnum): + START_TO_CLOSE = "START_TO_CLOSE" + + +class RecordMarkerFailedCause(StrEnum): + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class RegistrationStatus(StrEnum): + REGISTERED = "REGISTERED" + DEPRECATED = "DEPRECATED" + + +class RequestCancelActivityTaskFailedCause(StrEnum): + ACTIVITY_ID_UNKNOWN = "ACTIVITY_ID_UNKNOWN" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class RequestCancelExternalWorkflowExecutionFailedCause(StrEnum): + UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED = ( + "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + ) + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class ScheduleActivityTaskFailedCause(StrEnum): + ACTIVITY_TYPE_DEPRECATED = "ACTIVITY_TYPE_DEPRECATED" + ACTIVITY_TYPE_DOES_NOT_EXIST = "ACTIVITY_TYPE_DOES_NOT_EXIST" + ACTIVITY_ID_ALREADY_IN_USE = "ACTIVITY_ID_ALREADY_IN_USE" + OPEN_ACTIVITIES_LIMIT_EXCEEDED = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" + ACTIVITY_CREATION_RATE_EXCEEDED = "ACTIVITY_CREATION_RATE_EXCEEDED" + DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" + DEFAULT_TASK_LIST_UNDEFINED = "DEFAULT_TASK_LIST_UNDEFINED" + DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" + DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" + DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class ScheduleLambdaFunctionFailedCause(StrEnum): + ID_ALREADY_IN_USE = "ID_ALREADY_IN_USE" + OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" + LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" + LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" + + +class SignalExternalWorkflowExecutionFailedCause(StrEnum): + UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED = ( + "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + ) + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class StartChildWorkflowExecutionFailedCause(StrEnum): + WORKFLOW_TYPE_DOES_NOT_EXIST = "WORKFLOW_TYPE_DOES_NOT_EXIST" + WORKFLOW_TYPE_DEPRECATED = "WORKFLOW_TYPE_DEPRECATED" + OPEN_CHILDREN_LIMIT_EXCEEDED = "OPEN_CHILDREN_LIMIT_EXCEEDED" + OPEN_WORKFLOWS_LIMIT_EXCEEDED = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" + CHILD_CREATION_RATE_EXCEEDED = "CHILD_CREATION_RATE_EXCEEDED" + WORKFLOW_ALREADY_RUNNING = "WORKFLOW_ALREADY_RUNNING" + DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED = ( + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + ) + DEFAULT_TASK_LIST_UNDEFINED = "DEFAULT_TASK_LIST_UNDEFINED" + DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + DEFAULT_CHILD_POLICY_UNDEFINED = "DEFAULT_CHILD_POLICY_UNDEFINED" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class StartLambdaFunctionFailedCause(StrEnum): + ASSUME_ROLE_FAILED = "ASSUME_ROLE_FAILED" + + +class StartTimerFailedCause(StrEnum): + TIMER_ID_ALREADY_IN_USE = "TIMER_ID_ALREADY_IN_USE" + OPEN_TIMERS_LIMIT_EXCEEDED = "OPEN_TIMERS_LIMIT_EXCEEDED" + TIMER_CREATION_RATE_EXCEEDED = "TIMER_CREATION_RATE_EXCEEDED" + OPERATION_NOT_PERMITTED = "OPERATION_NOT_PERMITTED" + + +class WorkflowExecutionCancelRequestedCause(StrEnum): + CHILD_POLICY_APPLIED = "CHILD_POLICY_APPLIED" + + +class WorkflowExecutionTerminatedCause(StrEnum): + CHILD_POLICY_APPLIED = "CHILD_POLICY_APPLIED" + EVENT_LIMIT_EXCEEDED = "EVENT_LIMIT_EXCEEDED" + OPERATOR_INITIATED = "OPERATOR_INITIATED" + + +class WorkflowExecutionTimeoutType(StrEnum): + START_TO_CLOSE = "START_TO_CLOSE" + + +class DefaultUndefinedFault(ServiceException): + code: str = "DefaultUndefinedFault" + sender_fault: bool = False + status_code: int = 400 + + +class DomainAlreadyExistsFault(ServiceException): + code: str = "DomainAlreadyExistsFault" + sender_fault: bool = False + status_code: int = 400 + + +class DomainDeprecatedFault(ServiceException): + code: str = "DomainDeprecatedFault" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededFault(ServiceException): + code: str = "LimitExceededFault" + sender_fault: bool = False + status_code: int = 400 + + +class OperationNotPermittedFault(ServiceException): + code: str = "OperationNotPermittedFault" + sender_fault: bool = False + status_code: int = 400 + + +class TooManyTagsFault(ServiceException): + code: str = "TooManyTagsFault" + sender_fault: bool = False + status_code: int = 400 + + +class TypeAlreadyExistsFault(ServiceException): + code: str = "TypeAlreadyExistsFault" + sender_fault: bool = False + status_code: int = 400 + + +class TypeDeprecatedFault(ServiceException): + code: str = "TypeDeprecatedFault" + sender_fault: bool = False + status_code: int = 400 + + +class TypeNotDeprecatedFault(ServiceException): + code: str = "TypeNotDeprecatedFault" + sender_fault: bool = False + status_code: int = 400 + + +class UnknownResourceFault(ServiceException): + code: str = "UnknownResourceFault" + sender_fault: bool = False + status_code: int = 400 + + +class WorkflowExecutionAlreadyStartedFault(ServiceException): + code: str = "WorkflowExecutionAlreadyStartedFault" + sender_fault: bool = False + status_code: int = 400 + + +class ActivityType(TypedDict, total=False): + name: Name + version: Version + + +class WorkflowExecution(TypedDict, total=False): + workflowId: WorkflowId + runId: WorkflowRunId + + +EventId = int + + +class ActivityTask(TypedDict, total=False): + taskToken: TaskToken + activityId: ActivityId + startedEventId: EventId + workflowExecution: WorkflowExecution + activityType: ActivityType + input: Optional[Data] + + +class ActivityTaskCancelRequestedEventAttributes(TypedDict, total=False): + decisionTaskCompletedEventId: EventId + activityId: ActivityId + + +class ActivityTaskCanceledEventAttributes(TypedDict, total=False): + details: Optional[Data] + scheduledEventId: EventId + startedEventId: EventId + latestCancelRequestedEventId: Optional[EventId] + + +class ActivityTaskCompletedEventAttributes(TypedDict, total=False): + result: Optional[Data] + scheduledEventId: EventId + startedEventId: EventId + + +class ActivityTaskFailedEventAttributes(TypedDict, total=False): + reason: Optional[FailureReason] + details: Optional[Data] + scheduledEventId: EventId + startedEventId: EventId + + +class TaskList(TypedDict, total=False): + name: Name + + +class ActivityTaskScheduledEventAttributes(TypedDict, total=False): + activityType: ActivityType + activityId: ActivityId + input: Optional[Data] + control: Optional[Data] + scheduleToStartTimeout: Optional[DurationInSecondsOptional] + scheduleToCloseTimeout: Optional[DurationInSecondsOptional] + startToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: TaskList + taskPriority: Optional[TaskPriority] + decisionTaskCompletedEventId: EventId + heartbeatTimeout: Optional[DurationInSecondsOptional] + + +class ActivityTaskStartedEventAttributes(TypedDict, total=False): + identity: Optional[Identity] + scheduledEventId: EventId + + +class ActivityTaskStatus(TypedDict, total=False): + cancelRequested: Canceled + + +class ActivityTaskTimedOutEventAttributes(TypedDict, total=False): + timeoutType: ActivityTaskTimeoutType + scheduledEventId: EventId + startedEventId: EventId + details: Optional[LimitedData] + + +class ActivityTypeConfiguration(TypedDict, total=False): + defaultTaskStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultTaskHeartbeatTimeout: Optional[DurationInSecondsOptional] + defaultTaskList: Optional[TaskList] + defaultTaskPriority: Optional[TaskPriority] + defaultTaskScheduleToStartTimeout: Optional[DurationInSecondsOptional] + defaultTaskScheduleToCloseTimeout: Optional[DurationInSecondsOptional] + + +Timestamp = datetime + + +class ActivityTypeInfo(TypedDict, total=False): + activityType: ActivityType + status: RegistrationStatus + description: Optional[Description] + creationDate: Timestamp + deprecationDate: Optional[Timestamp] + + +class ActivityTypeDetail(TypedDict, total=False): + typeInfo: ActivityTypeInfo + configuration: ActivityTypeConfiguration + + +ActivityTypeInfoList = List[ActivityTypeInfo] + + +class ActivityTypeInfos(TypedDict, total=False): + typeInfos: ActivityTypeInfoList + nextPageToken: Optional[PageToken] + + +class CancelTimerDecisionAttributes(TypedDict, total=False): + timerId: TimerId + + +class CancelTimerFailedEventAttributes(TypedDict, total=False): + timerId: TimerId + cause: CancelTimerFailedCause + decisionTaskCompletedEventId: EventId + + +class CancelWorkflowExecutionDecisionAttributes(TypedDict, total=False): + details: Optional[Data] + + +class CancelWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + cause: CancelWorkflowExecutionFailedCause + decisionTaskCompletedEventId: EventId + + +class WorkflowType(TypedDict, total=False): + name: Name + version: Version + + +class ChildWorkflowExecutionCanceledEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + details: Optional[Data] + initiatedEventId: EventId + startedEventId: EventId + + +class ChildWorkflowExecutionCompletedEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + result: Optional[Data] + initiatedEventId: EventId + startedEventId: EventId + + +class ChildWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + reason: Optional[FailureReason] + details: Optional[Data] + initiatedEventId: EventId + startedEventId: EventId + + +class ChildWorkflowExecutionStartedEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + initiatedEventId: EventId + + +class ChildWorkflowExecutionTerminatedEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + initiatedEventId: EventId + startedEventId: EventId + + +class ChildWorkflowExecutionTimedOutEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + workflowType: WorkflowType + timeoutType: WorkflowExecutionTimeoutType + initiatedEventId: EventId + startedEventId: EventId + + +class CloseStatusFilter(TypedDict, total=False): + status: CloseStatus + + +class CompleteWorkflowExecutionDecisionAttributes(TypedDict, total=False): + result: Optional[Data] + + +class CompleteWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + cause: CompleteWorkflowExecutionFailedCause + decisionTaskCompletedEventId: EventId + + +TagList = List[Tag] + + +class ContinueAsNewWorkflowExecutionDecisionAttributes(TypedDict, total=False): + input: Optional[Data] + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: Optional[TaskList] + taskPriority: Optional[TaskPriority] + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + childPolicy: Optional[ChildPolicy] + tagList: Optional[TagList] + workflowTypeVersion: Optional[Version] + lambdaRole: Optional[Arn] + + +class ContinueAsNewWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + cause: ContinueAsNewWorkflowExecutionFailedCause + decisionTaskCompletedEventId: EventId + + +class TagFilter(TypedDict, total=False): + tag: Tag + + +class WorkflowTypeFilter(TypedDict, total=False): + name: Name + version: Optional[VersionOptional] + + +class WorkflowExecutionFilter(TypedDict, total=False): + workflowId: WorkflowId + + +class ExecutionTimeFilter(TypedDict, total=False): + oldestDate: Timestamp + latestDate: Optional[Timestamp] + + +class CountClosedWorkflowExecutionsInput(ServiceRequest): + domain: DomainName + startTimeFilter: Optional[ExecutionTimeFilter] + closeTimeFilter: Optional[ExecutionTimeFilter] + executionFilter: Optional[WorkflowExecutionFilter] + typeFilter: Optional[WorkflowTypeFilter] + tagFilter: Optional[TagFilter] + closeStatusFilter: Optional[CloseStatusFilter] + + +class CountOpenWorkflowExecutionsInput(ServiceRequest): + domain: DomainName + startTimeFilter: ExecutionTimeFilter + typeFilter: Optional[WorkflowTypeFilter] + tagFilter: Optional[TagFilter] + executionFilter: Optional[WorkflowExecutionFilter] + + +class CountPendingActivityTasksInput(ServiceRequest): + domain: DomainName + taskList: TaskList + + +class CountPendingDecisionTasksInput(ServiceRequest): + domain: DomainName + taskList: TaskList + + +class ScheduleLambdaFunctionDecisionAttributes(TypedDict, total=False): + id: FunctionId + name: FunctionName + control: Optional[Data] + input: Optional[FunctionInput] + startToCloseTimeout: Optional[DurationInSecondsOptional] + + +class StartChildWorkflowExecutionDecisionAttributes(TypedDict, total=False): + workflowType: WorkflowType + workflowId: WorkflowId + control: Optional[Data] + input: Optional[Data] + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: Optional[TaskList] + taskPriority: Optional[TaskPriority] + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + childPolicy: Optional[ChildPolicy] + tagList: Optional[TagList] + lambdaRole: Optional[Arn] + + +class RequestCancelExternalWorkflowExecutionDecisionAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + control: Optional[Data] + + +class SignalExternalWorkflowExecutionDecisionAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + signalName: SignalName + input: Optional[Data] + control: Optional[Data] + + +class StartTimerDecisionAttributes(TypedDict, total=False): + timerId: TimerId + control: Optional[Data] + startToFireTimeout: DurationInSeconds + + +class RecordMarkerDecisionAttributes(TypedDict, total=False): + markerName: MarkerName + details: Optional[Data] + + +class FailWorkflowExecutionDecisionAttributes(TypedDict, total=False): + reason: Optional[FailureReason] + details: Optional[Data] + + +class RequestCancelActivityTaskDecisionAttributes(TypedDict, total=False): + activityId: ActivityId + + +class ScheduleActivityTaskDecisionAttributes(TypedDict, total=False): + activityType: ActivityType + activityId: ActivityId + control: Optional[Data] + input: Optional[Data] + scheduleToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: Optional[TaskList] + taskPriority: Optional[TaskPriority] + scheduleToStartTimeout: Optional[DurationInSecondsOptional] + startToCloseTimeout: Optional[DurationInSecondsOptional] + heartbeatTimeout: Optional[DurationInSecondsOptional] + + +class Decision(TypedDict, total=False): + decisionType: DecisionType + scheduleActivityTaskDecisionAttributes: Optional[ScheduleActivityTaskDecisionAttributes] + requestCancelActivityTaskDecisionAttributes: Optional[ + RequestCancelActivityTaskDecisionAttributes + ] + completeWorkflowExecutionDecisionAttributes: Optional[ + CompleteWorkflowExecutionDecisionAttributes + ] + failWorkflowExecutionDecisionAttributes: Optional[FailWorkflowExecutionDecisionAttributes] + cancelWorkflowExecutionDecisionAttributes: Optional[CancelWorkflowExecutionDecisionAttributes] + continueAsNewWorkflowExecutionDecisionAttributes: Optional[ + ContinueAsNewWorkflowExecutionDecisionAttributes + ] + recordMarkerDecisionAttributes: Optional[RecordMarkerDecisionAttributes] + startTimerDecisionAttributes: Optional[StartTimerDecisionAttributes] + cancelTimerDecisionAttributes: Optional[CancelTimerDecisionAttributes] + signalExternalWorkflowExecutionDecisionAttributes: Optional[ + SignalExternalWorkflowExecutionDecisionAttributes + ] + requestCancelExternalWorkflowExecutionDecisionAttributes: Optional[ + RequestCancelExternalWorkflowExecutionDecisionAttributes + ] + startChildWorkflowExecutionDecisionAttributes: Optional[ + StartChildWorkflowExecutionDecisionAttributes + ] + scheduleLambdaFunctionDecisionAttributes: Optional[ScheduleLambdaFunctionDecisionAttributes] + + +DecisionList = List[Decision] + + +class StartLambdaFunctionFailedEventAttributes(TypedDict, total=False): + scheduledEventId: Optional[EventId] + cause: Optional[StartLambdaFunctionFailedCause] + message: Optional[CauseMessage] + + +class ScheduleLambdaFunctionFailedEventAttributes(TypedDict, total=False): + id: FunctionId + name: FunctionName + cause: ScheduleLambdaFunctionFailedCause + decisionTaskCompletedEventId: EventId + + +class LambdaFunctionTimedOutEventAttributes(TypedDict, total=False): + scheduledEventId: EventId + startedEventId: EventId + timeoutType: Optional[LambdaFunctionTimeoutType] + + +class LambdaFunctionFailedEventAttributes(TypedDict, total=False): + scheduledEventId: EventId + startedEventId: EventId + reason: Optional[FailureReason] + details: Optional[Data] + + +class LambdaFunctionCompletedEventAttributes(TypedDict, total=False): + scheduledEventId: EventId + startedEventId: EventId + result: Optional[Data] + + +class LambdaFunctionStartedEventAttributes(TypedDict, total=False): + scheduledEventId: EventId + + +class LambdaFunctionScheduledEventAttributes(TypedDict, total=False): + id: FunctionId + name: FunctionName + control: Optional[Data] + input: Optional[FunctionInput] + startToCloseTimeout: Optional[DurationInSecondsOptional] + decisionTaskCompletedEventId: EventId + + +class StartChildWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + workflowType: WorkflowType + cause: StartChildWorkflowExecutionFailedCause + workflowId: WorkflowId + initiatedEventId: EventId + decisionTaskCompletedEventId: EventId + control: Optional[Data] + + +class StartTimerFailedEventAttributes(TypedDict, total=False): + timerId: TimerId + cause: StartTimerFailedCause + decisionTaskCompletedEventId: EventId + + +class RequestCancelActivityTaskFailedEventAttributes(TypedDict, total=False): + activityId: ActivityId + cause: RequestCancelActivityTaskFailedCause + decisionTaskCompletedEventId: EventId + + +class ScheduleActivityTaskFailedEventAttributes(TypedDict, total=False): + activityType: ActivityType + activityId: ActivityId + cause: ScheduleActivityTaskFailedCause + decisionTaskCompletedEventId: EventId + + +class RequestCancelExternalWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + cause: RequestCancelExternalWorkflowExecutionFailedCause + initiatedEventId: EventId + decisionTaskCompletedEventId: EventId + control: Optional[Data] + + +class RequestCancelExternalWorkflowExecutionInitiatedEventAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + decisionTaskCompletedEventId: EventId + control: Optional[Data] + + +class ExternalWorkflowExecutionCancelRequestedEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + initiatedEventId: EventId + + +class SignalExternalWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + cause: SignalExternalWorkflowExecutionFailedCause + initiatedEventId: EventId + decisionTaskCompletedEventId: EventId + control: Optional[Data] + + +class ExternalWorkflowExecutionSignaledEventAttributes(TypedDict, total=False): + workflowExecution: WorkflowExecution + initiatedEventId: EventId + + +class SignalExternalWorkflowExecutionInitiatedEventAttributes(TypedDict, total=False): + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + signalName: SignalName + input: Optional[Data] + decisionTaskCompletedEventId: EventId + control: Optional[Data] + + +class StartChildWorkflowExecutionInitiatedEventAttributes(TypedDict, total=False): + workflowId: WorkflowId + workflowType: WorkflowType + control: Optional[Data] + input: Optional[Data] + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: TaskList + taskPriority: Optional[TaskPriority] + decisionTaskCompletedEventId: EventId + childPolicy: ChildPolicy + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + tagList: Optional[TagList] + lambdaRole: Optional[Arn] + + +class TimerCanceledEventAttributes(TypedDict, total=False): + timerId: TimerId + startedEventId: EventId + decisionTaskCompletedEventId: EventId + + +class TimerFiredEventAttributes(TypedDict, total=False): + timerId: TimerId + startedEventId: EventId + + +class TimerStartedEventAttributes(TypedDict, total=False): + timerId: TimerId + control: Optional[Data] + startToFireTimeout: DurationInSeconds + decisionTaskCompletedEventId: EventId + + +class RecordMarkerFailedEventAttributes(TypedDict, total=False): + markerName: MarkerName + cause: RecordMarkerFailedCause + decisionTaskCompletedEventId: EventId + + +class MarkerRecordedEventAttributes(TypedDict, total=False): + markerName: MarkerName + details: Optional[Data] + decisionTaskCompletedEventId: EventId + + +class WorkflowExecutionSignaledEventAttributes(TypedDict, total=False): + signalName: SignalName + input: Optional[Data] + externalWorkflowExecution: Optional[WorkflowExecution] + externalInitiatedEventId: Optional[EventId] + + +class DecisionTaskTimedOutEventAttributes(TypedDict, total=False): + timeoutType: DecisionTaskTimeoutType + scheduledEventId: EventId + startedEventId: EventId + + +class DecisionTaskCompletedEventAttributes(TypedDict, total=False): + executionContext: Optional[Data] + scheduledEventId: EventId + startedEventId: EventId + taskList: Optional[TaskList] + taskListScheduleToStartTimeout: Optional[DurationInSecondsOptional] + + +class DecisionTaskStartedEventAttributes(TypedDict, total=False): + identity: Optional[Identity] + scheduledEventId: EventId + + +class DecisionTaskScheduledEventAttributes(TypedDict, total=False): + taskList: TaskList + taskPriority: Optional[TaskPriority] + startToCloseTimeout: Optional[DurationInSecondsOptional] + scheduleToStartTimeout: Optional[DurationInSecondsOptional] + + +class WorkflowExecutionCancelRequestedEventAttributes(TypedDict, total=False): + externalWorkflowExecution: Optional[WorkflowExecution] + externalInitiatedEventId: Optional[EventId] + cause: Optional[WorkflowExecutionCancelRequestedCause] + + +class WorkflowExecutionTerminatedEventAttributes(TypedDict, total=False): + reason: Optional[TerminateReason] + details: Optional[Data] + childPolicy: ChildPolicy + cause: Optional[WorkflowExecutionTerminatedCause] + + +class WorkflowExecutionContinuedAsNewEventAttributes(TypedDict, total=False): + input: Optional[Data] + decisionTaskCompletedEventId: EventId + newExecutionRunId: WorkflowRunId + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + taskList: TaskList + taskPriority: Optional[TaskPriority] + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + childPolicy: ChildPolicy + tagList: Optional[TagList] + workflowType: WorkflowType + lambdaRole: Optional[Arn] + + +class WorkflowExecutionCanceledEventAttributes(TypedDict, total=False): + details: Optional[Data] + decisionTaskCompletedEventId: EventId + + +class WorkflowExecutionTimedOutEventAttributes(TypedDict, total=False): + timeoutType: WorkflowExecutionTimeoutType + childPolicy: ChildPolicy + + +class FailWorkflowExecutionFailedEventAttributes(TypedDict, total=False): + cause: FailWorkflowExecutionFailedCause + decisionTaskCompletedEventId: EventId + + +class WorkflowExecutionFailedEventAttributes(TypedDict, total=False): + reason: Optional[FailureReason] + details: Optional[Data] + decisionTaskCompletedEventId: EventId + + +class WorkflowExecutionCompletedEventAttributes(TypedDict, total=False): + result: Optional[Data] + decisionTaskCompletedEventId: EventId + + +class WorkflowExecutionStartedEventAttributes(TypedDict, total=False): + input: Optional[Data] + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + childPolicy: ChildPolicy + taskList: TaskList + taskPriority: Optional[TaskPriority] + workflowType: WorkflowType + tagList: Optional[TagList] + continuedExecutionRunId: Optional[WorkflowRunIdOptional] + parentWorkflowExecution: Optional[WorkflowExecution] + parentInitiatedEventId: Optional[EventId] + lambdaRole: Optional[Arn] + + +class HistoryEvent(TypedDict, total=False): + eventTimestamp: Timestamp + eventType: EventType + eventId: EventId + workflowExecutionStartedEventAttributes: Optional[WorkflowExecutionStartedEventAttributes] + workflowExecutionCompletedEventAttributes: Optional[WorkflowExecutionCompletedEventAttributes] + completeWorkflowExecutionFailedEventAttributes: Optional[ + CompleteWorkflowExecutionFailedEventAttributes + ] + workflowExecutionFailedEventAttributes: Optional[WorkflowExecutionFailedEventAttributes] + failWorkflowExecutionFailedEventAttributes: Optional[FailWorkflowExecutionFailedEventAttributes] + workflowExecutionTimedOutEventAttributes: Optional[WorkflowExecutionTimedOutEventAttributes] + workflowExecutionCanceledEventAttributes: Optional[WorkflowExecutionCanceledEventAttributes] + cancelWorkflowExecutionFailedEventAttributes: Optional[ + CancelWorkflowExecutionFailedEventAttributes + ] + workflowExecutionContinuedAsNewEventAttributes: Optional[ + WorkflowExecutionContinuedAsNewEventAttributes + ] + continueAsNewWorkflowExecutionFailedEventAttributes: Optional[ + ContinueAsNewWorkflowExecutionFailedEventAttributes + ] + workflowExecutionTerminatedEventAttributes: Optional[WorkflowExecutionTerminatedEventAttributes] + workflowExecutionCancelRequestedEventAttributes: Optional[ + WorkflowExecutionCancelRequestedEventAttributes + ] + decisionTaskScheduledEventAttributes: Optional[DecisionTaskScheduledEventAttributes] + decisionTaskStartedEventAttributes: Optional[DecisionTaskStartedEventAttributes] + decisionTaskCompletedEventAttributes: Optional[DecisionTaskCompletedEventAttributes] + decisionTaskTimedOutEventAttributes: Optional[DecisionTaskTimedOutEventAttributes] + activityTaskScheduledEventAttributes: Optional[ActivityTaskScheduledEventAttributes] + activityTaskStartedEventAttributes: Optional[ActivityTaskStartedEventAttributes] + activityTaskCompletedEventAttributes: Optional[ActivityTaskCompletedEventAttributes] + activityTaskFailedEventAttributes: Optional[ActivityTaskFailedEventAttributes] + activityTaskTimedOutEventAttributes: Optional[ActivityTaskTimedOutEventAttributes] + activityTaskCanceledEventAttributes: Optional[ActivityTaskCanceledEventAttributes] + activityTaskCancelRequestedEventAttributes: Optional[ActivityTaskCancelRequestedEventAttributes] + workflowExecutionSignaledEventAttributes: Optional[WorkflowExecutionSignaledEventAttributes] + markerRecordedEventAttributes: Optional[MarkerRecordedEventAttributes] + recordMarkerFailedEventAttributes: Optional[RecordMarkerFailedEventAttributes] + timerStartedEventAttributes: Optional[TimerStartedEventAttributes] + timerFiredEventAttributes: Optional[TimerFiredEventAttributes] + timerCanceledEventAttributes: Optional[TimerCanceledEventAttributes] + startChildWorkflowExecutionInitiatedEventAttributes: Optional[ + StartChildWorkflowExecutionInitiatedEventAttributes + ] + childWorkflowExecutionStartedEventAttributes: Optional[ + ChildWorkflowExecutionStartedEventAttributes + ] + childWorkflowExecutionCompletedEventAttributes: Optional[ + ChildWorkflowExecutionCompletedEventAttributes + ] + childWorkflowExecutionFailedEventAttributes: Optional[ + ChildWorkflowExecutionFailedEventAttributes + ] + childWorkflowExecutionTimedOutEventAttributes: Optional[ + ChildWorkflowExecutionTimedOutEventAttributes + ] + childWorkflowExecutionCanceledEventAttributes: Optional[ + ChildWorkflowExecutionCanceledEventAttributes + ] + childWorkflowExecutionTerminatedEventAttributes: Optional[ + ChildWorkflowExecutionTerminatedEventAttributes + ] + signalExternalWorkflowExecutionInitiatedEventAttributes: Optional[ + SignalExternalWorkflowExecutionInitiatedEventAttributes + ] + externalWorkflowExecutionSignaledEventAttributes: Optional[ + ExternalWorkflowExecutionSignaledEventAttributes + ] + signalExternalWorkflowExecutionFailedEventAttributes: Optional[ + SignalExternalWorkflowExecutionFailedEventAttributes + ] + externalWorkflowExecutionCancelRequestedEventAttributes: Optional[ + ExternalWorkflowExecutionCancelRequestedEventAttributes + ] + requestCancelExternalWorkflowExecutionInitiatedEventAttributes: Optional[ + RequestCancelExternalWorkflowExecutionInitiatedEventAttributes + ] + requestCancelExternalWorkflowExecutionFailedEventAttributes: Optional[ + RequestCancelExternalWorkflowExecutionFailedEventAttributes + ] + scheduleActivityTaskFailedEventAttributes: Optional[ScheduleActivityTaskFailedEventAttributes] + requestCancelActivityTaskFailedEventAttributes: Optional[ + RequestCancelActivityTaskFailedEventAttributes + ] + startTimerFailedEventAttributes: Optional[StartTimerFailedEventAttributes] + cancelTimerFailedEventAttributes: Optional[CancelTimerFailedEventAttributes] + startChildWorkflowExecutionFailedEventAttributes: Optional[ + StartChildWorkflowExecutionFailedEventAttributes + ] + lambdaFunctionScheduledEventAttributes: Optional[LambdaFunctionScheduledEventAttributes] + lambdaFunctionStartedEventAttributes: Optional[LambdaFunctionStartedEventAttributes] + lambdaFunctionCompletedEventAttributes: Optional[LambdaFunctionCompletedEventAttributes] + lambdaFunctionFailedEventAttributes: Optional[LambdaFunctionFailedEventAttributes] + lambdaFunctionTimedOutEventAttributes: Optional[LambdaFunctionTimedOutEventAttributes] + scheduleLambdaFunctionFailedEventAttributes: Optional[ + ScheduleLambdaFunctionFailedEventAttributes + ] + startLambdaFunctionFailedEventAttributes: Optional[StartLambdaFunctionFailedEventAttributes] + + +HistoryEventList = List[HistoryEvent] + + +class DecisionTask(TypedDict, total=False): + taskToken: TaskToken + startedEventId: EventId + workflowExecution: WorkflowExecution + workflowType: WorkflowType + events: HistoryEventList + nextPageToken: Optional[PageToken] + previousStartedEventId: Optional[EventId] + + +class DeleteActivityTypeInput(ServiceRequest): + domain: DomainName + activityType: ActivityType + + +class DeleteWorkflowTypeInput(ServiceRequest): + domain: DomainName + workflowType: WorkflowType + + +class DeprecateActivityTypeInput(ServiceRequest): + domain: DomainName + activityType: ActivityType + + +class DeprecateDomainInput(ServiceRequest): + name: DomainName + + +class DeprecateWorkflowTypeInput(ServiceRequest): + domain: DomainName + workflowType: WorkflowType + + +class DescribeActivityTypeInput(ServiceRequest): + domain: DomainName + activityType: ActivityType + + +class DescribeDomainInput(ServiceRequest): + name: DomainName + + +class DescribeWorkflowExecutionInput(ServiceRequest): + domain: DomainName + execution: WorkflowExecution + + +class DescribeWorkflowTypeInput(ServiceRequest): + domain: DomainName + workflowType: WorkflowType + + +class DomainConfiguration(TypedDict, total=False): + workflowExecutionRetentionPeriodInDays: DurationInDays + + +class DomainInfo(TypedDict, total=False): + name: DomainName + status: RegistrationStatus + description: Optional[Description] + arn: Optional[Arn] + + +class DomainDetail(TypedDict, total=False): + domainInfo: DomainInfo + configuration: DomainConfiguration + + +DomainInfoList = List[DomainInfo] + + +class DomainInfos(TypedDict, total=False): + domainInfos: DomainInfoList + nextPageToken: Optional[PageToken] + + +class GetWorkflowExecutionHistoryInput(ServiceRequest): + domain: DomainName + execution: WorkflowExecution + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + + +class History(TypedDict, total=False): + events: HistoryEventList + nextPageToken: Optional[PageToken] + + +class ListActivityTypesInput(ServiceRequest): + domain: DomainName + name: Optional[Name] + registrationStatus: RegistrationStatus + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + + +class ListClosedWorkflowExecutionsInput(ServiceRequest): + domain: DomainName + startTimeFilter: Optional[ExecutionTimeFilter] + closeTimeFilter: Optional[ExecutionTimeFilter] + executionFilter: Optional[WorkflowExecutionFilter] + closeStatusFilter: Optional[CloseStatusFilter] + typeFilter: Optional[WorkflowTypeFilter] + tagFilter: Optional[TagFilter] + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + + +class ListDomainsInput(ServiceRequest): + nextPageToken: Optional[PageToken] + registrationStatus: RegistrationStatus + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + + +class ListOpenWorkflowExecutionsInput(ServiceRequest): + domain: DomainName + startTimeFilter: ExecutionTimeFilter + typeFilter: Optional[WorkflowTypeFilter] + tagFilter: Optional[TagFilter] + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + executionFilter: Optional[WorkflowExecutionFilter] + + +class ListTagsForResourceInput(ServiceRequest): + resourceArn: Arn + + +class ResourceTag(TypedDict, total=False): + key: ResourceTagKey + value: Optional[ResourceTagValue] + + +ResourceTagList = List[ResourceTag] + + +class ListTagsForResourceOutput(TypedDict, total=False): + tags: Optional[ResourceTagList] + + +class ListWorkflowTypesInput(ServiceRequest): + domain: DomainName + name: Optional[Name] + registrationStatus: RegistrationStatus + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + + +class PendingTaskCount(TypedDict, total=False): + count: Count + truncated: Optional[Truncated] + + +class PollForActivityTaskInput(ServiceRequest): + domain: DomainName + taskList: TaskList + identity: Optional[Identity] + + +class PollForDecisionTaskInput(ServiceRequest): + domain: DomainName + taskList: TaskList + identity: Optional[Identity] + nextPageToken: Optional[PageToken] + maximumPageSize: Optional[PageSize] + reverseOrder: Optional[ReverseOrder] + startAtPreviousStartedEvent: Optional[StartAtPreviousStartedEvent] + + +class RecordActivityTaskHeartbeatInput(ServiceRequest): + taskToken: TaskToken + details: Optional[LimitedData] + + +class RegisterActivityTypeInput(ServiceRequest): + domain: DomainName + name: Name + version: Version + description: Optional[Description] + defaultTaskStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultTaskHeartbeatTimeout: Optional[DurationInSecondsOptional] + defaultTaskList: Optional[TaskList] + defaultTaskPriority: Optional[TaskPriority] + defaultTaskScheduleToStartTimeout: Optional[DurationInSecondsOptional] + defaultTaskScheduleToCloseTimeout: Optional[DurationInSecondsOptional] + + +class RegisterDomainInput(ServiceRequest): + name: DomainName + description: Optional[Description] + workflowExecutionRetentionPeriodInDays: DurationInDays + tags: Optional[ResourceTagList] + + +class RegisterWorkflowTypeInput(ServiceRequest): + domain: DomainName + name: Name + version: Version + description: Optional[Description] + defaultTaskStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultExecutionStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultTaskList: Optional[TaskList] + defaultTaskPriority: Optional[TaskPriority] + defaultChildPolicy: Optional[ChildPolicy] + defaultLambdaRole: Optional[Arn] + + +class RequestCancelWorkflowExecutionInput(ServiceRequest): + domain: DomainName + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + + +ResourceTagKeyList = List[ResourceTagKey] + + +class RespondActivityTaskCanceledInput(ServiceRequest): + taskToken: TaskToken + details: Optional[Data] + + +class RespondActivityTaskCompletedInput(ServiceRequest): + taskToken: TaskToken + result: Optional[Data] + + +class RespondActivityTaskFailedInput(ServiceRequest): + taskToken: TaskToken + reason: Optional[FailureReason] + details: Optional[Data] + + +class RespondDecisionTaskCompletedInput(ServiceRequest): + taskToken: TaskToken + decisions: Optional[DecisionList] + executionContext: Optional[Data] + taskList: Optional[TaskList] + taskListScheduleToStartTimeout: Optional[DurationInSecondsOptional] + + +class Run(TypedDict, total=False): + runId: Optional[WorkflowRunId] + + +class SignalWorkflowExecutionInput(ServiceRequest): + domain: DomainName + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + signalName: SignalName + input: Optional[Data] + + +class StartWorkflowExecutionInput(ServiceRequest): + domain: DomainName + workflowId: WorkflowId + workflowType: WorkflowType + taskList: Optional[TaskList] + taskPriority: Optional[TaskPriority] + input: Optional[Data] + executionStartToCloseTimeout: Optional[DurationInSecondsOptional] + tagList: Optional[TagList] + taskStartToCloseTimeout: Optional[DurationInSecondsOptional] + childPolicy: Optional[ChildPolicy] + lambdaRole: Optional[Arn] + + +class TagResourceInput(ServiceRequest): + resourceArn: Arn + tags: ResourceTagList + + +class TerminateWorkflowExecutionInput(ServiceRequest): + domain: DomainName + workflowId: WorkflowId + runId: Optional[WorkflowRunIdOptional] + reason: Optional[TerminateReason] + details: Optional[Data] + childPolicy: Optional[ChildPolicy] + + +class UndeprecateActivityTypeInput(ServiceRequest): + domain: DomainName + activityType: ActivityType + + +class UndeprecateDomainInput(ServiceRequest): + name: DomainName + + +class UndeprecateWorkflowTypeInput(ServiceRequest): + domain: DomainName + workflowType: WorkflowType + + +class UntagResourceInput(ServiceRequest): + resourceArn: Arn + tagKeys: ResourceTagKeyList + + +class WorkflowExecutionConfiguration(TypedDict, total=False): + taskStartToCloseTimeout: DurationInSeconds + executionStartToCloseTimeout: DurationInSeconds + taskList: TaskList + taskPriority: Optional[TaskPriority] + childPolicy: ChildPolicy + lambdaRole: Optional[Arn] + + +class WorkflowExecutionCount(TypedDict, total=False): + count: Count + truncated: Optional[Truncated] + + +class WorkflowExecutionOpenCounts(TypedDict, total=False): + openActivityTasks: Count + openDecisionTasks: OpenDecisionTasksCount + openTimers: Count + openChildWorkflowExecutions: Count + openLambdaFunctions: Optional[Count] + + +class WorkflowExecutionInfo(TypedDict, total=False): + execution: WorkflowExecution + workflowType: WorkflowType + startTimestamp: Timestamp + closeTimestamp: Optional[Timestamp] + executionStatus: ExecutionStatus + closeStatus: Optional[CloseStatus] + parent: Optional[WorkflowExecution] + tagList: Optional[TagList] + cancelRequested: Optional[Canceled] + + +class WorkflowExecutionDetail(TypedDict, total=False): + executionInfo: WorkflowExecutionInfo + executionConfiguration: WorkflowExecutionConfiguration + openCounts: WorkflowExecutionOpenCounts + latestActivityTaskTimestamp: Optional[Timestamp] + latestExecutionContext: Optional[Data] + + +WorkflowExecutionInfoList = List[WorkflowExecutionInfo] + + +class WorkflowExecutionInfos(TypedDict, total=False): + executionInfos: WorkflowExecutionInfoList + nextPageToken: Optional[PageToken] + + +class WorkflowTypeConfiguration(TypedDict, total=False): + defaultTaskStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultExecutionStartToCloseTimeout: Optional[DurationInSecondsOptional] + defaultTaskList: Optional[TaskList] + defaultTaskPriority: Optional[TaskPriority] + defaultChildPolicy: Optional[ChildPolicy] + defaultLambdaRole: Optional[Arn] + + +class WorkflowTypeInfo(TypedDict, total=False): + workflowType: WorkflowType + status: RegistrationStatus + description: Optional[Description] + creationDate: Timestamp + deprecationDate: Optional[Timestamp] + + +class WorkflowTypeDetail(TypedDict, total=False): + typeInfo: WorkflowTypeInfo + configuration: WorkflowTypeConfiguration + + +WorkflowTypeInfoList = List[WorkflowTypeInfo] + + +class WorkflowTypeInfos(TypedDict, total=False): + typeInfos: WorkflowTypeInfoList + nextPageToken: Optional[PageToken] + + +class SwfApi: + service = "swf" + version = "2012-01-25" + + @handler("CountClosedWorkflowExecutions") + def count_closed_workflow_executions( + self, + context: RequestContext, + domain: DomainName, + start_time_filter: ExecutionTimeFilter | None = None, + close_time_filter: ExecutionTimeFilter | None = None, + execution_filter: WorkflowExecutionFilter | None = None, + type_filter: WorkflowTypeFilter | None = None, + tag_filter: TagFilter | None = None, + close_status_filter: CloseStatusFilter | None = None, + **kwargs, + ) -> WorkflowExecutionCount: + raise NotImplementedError + + @handler("CountOpenWorkflowExecutions") + def count_open_workflow_executions( + self, + context: RequestContext, + domain: DomainName, + start_time_filter: ExecutionTimeFilter, + type_filter: WorkflowTypeFilter | None = None, + tag_filter: TagFilter | None = None, + execution_filter: WorkflowExecutionFilter | None = None, + **kwargs, + ) -> WorkflowExecutionCount: + raise NotImplementedError + + @handler("CountPendingActivityTasks") + def count_pending_activity_tasks( + self, context: RequestContext, domain: DomainName, task_list: TaskList, **kwargs + ) -> PendingTaskCount: + raise NotImplementedError + + @handler("CountPendingDecisionTasks") + def count_pending_decision_tasks( + self, context: RequestContext, domain: DomainName, task_list: TaskList, **kwargs + ) -> PendingTaskCount: + raise NotImplementedError + + @handler("DeleteActivityType") + def delete_activity_type( + self, context: RequestContext, domain: DomainName, activity_type: ActivityType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteWorkflowType") + def delete_workflow_type( + self, context: RequestContext, domain: DomainName, workflow_type: WorkflowType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeprecateActivityType") + def deprecate_activity_type( + self, context: RequestContext, domain: DomainName, activity_type: ActivityType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeprecateDomain") + def deprecate_domain(self, context: RequestContext, name: DomainName, **kwargs) -> None: + raise NotImplementedError + + @handler("DeprecateWorkflowType") + def deprecate_workflow_type( + self, context: RequestContext, domain: DomainName, workflow_type: WorkflowType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DescribeActivityType") + def describe_activity_type( + self, context: RequestContext, domain: DomainName, activity_type: ActivityType, **kwargs + ) -> ActivityTypeDetail: + raise NotImplementedError + + @handler("DescribeDomain") + def describe_domain(self, context: RequestContext, name: DomainName, **kwargs) -> DomainDetail: + raise NotImplementedError + + @handler("DescribeWorkflowExecution") + def describe_workflow_execution( + self, context: RequestContext, domain: DomainName, execution: WorkflowExecution, **kwargs + ) -> WorkflowExecutionDetail: + raise NotImplementedError + + @handler("DescribeWorkflowType") + def describe_workflow_type( + self, context: RequestContext, domain: DomainName, workflow_type: WorkflowType, **kwargs + ) -> WorkflowTypeDetail: + raise NotImplementedError + + @handler("GetWorkflowExecutionHistory") + def get_workflow_execution_history( + self, + context: RequestContext, + domain: DomainName, + execution: WorkflowExecution, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + **kwargs, + ) -> History: + raise NotImplementedError + + @handler("ListActivityTypes") + def list_activity_types( + self, + context: RequestContext, + domain: DomainName, + registration_status: RegistrationStatus, + name: Name | None = None, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + **kwargs, + ) -> ActivityTypeInfos: + raise NotImplementedError + + @handler("ListClosedWorkflowExecutions") + def list_closed_workflow_executions( + self, + context: RequestContext, + domain: DomainName, + start_time_filter: ExecutionTimeFilter | None = None, + close_time_filter: ExecutionTimeFilter | None = None, + execution_filter: WorkflowExecutionFilter | None = None, + close_status_filter: CloseStatusFilter | None = None, + type_filter: WorkflowTypeFilter | None = None, + tag_filter: TagFilter | None = None, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + **kwargs, + ) -> WorkflowExecutionInfos: + raise NotImplementedError + + @handler("ListDomains") + def list_domains( + self, + context: RequestContext, + registration_status: RegistrationStatus, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + **kwargs, + ) -> DomainInfos: + raise NotImplementedError + + @handler("ListOpenWorkflowExecutions") + def list_open_workflow_executions( + self, + context: RequestContext, + domain: DomainName, + start_time_filter: ExecutionTimeFilter, + type_filter: WorkflowTypeFilter | None = None, + tag_filter: TagFilter | None = None, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + execution_filter: WorkflowExecutionFilter | None = None, + **kwargs, + ) -> WorkflowExecutionInfos: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: Arn, **kwargs + ) -> ListTagsForResourceOutput: + raise NotImplementedError + + @handler("ListWorkflowTypes") + def list_workflow_types( + self, + context: RequestContext, + domain: DomainName, + registration_status: RegistrationStatus, + name: Name | None = None, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + **kwargs, + ) -> WorkflowTypeInfos: + raise NotImplementedError + + @handler("PollForActivityTask") + def poll_for_activity_task( + self, + context: RequestContext, + domain: DomainName, + task_list: TaskList, + identity: Identity | None = None, + **kwargs, + ) -> ActivityTask: + raise NotImplementedError + + @handler("PollForDecisionTask") + def poll_for_decision_task( + self, + context: RequestContext, + domain: DomainName, + task_list: TaskList, + identity: Identity | None = None, + next_page_token: PageToken | None = None, + maximum_page_size: PageSize | None = None, + reverse_order: ReverseOrder | None = None, + start_at_previous_started_event: StartAtPreviousStartedEvent | None = None, + **kwargs, + ) -> DecisionTask: + raise NotImplementedError + + @handler("RecordActivityTaskHeartbeat") + def record_activity_task_heartbeat( + self, + context: RequestContext, + task_token: TaskToken, + details: LimitedData | None = None, + **kwargs, + ) -> ActivityTaskStatus: + raise NotImplementedError + + @handler("RegisterActivityType") + def register_activity_type( + self, + context: RequestContext, + domain: DomainName, + name: Name, + version: Version, + description: Description | None = None, + default_task_start_to_close_timeout: DurationInSecondsOptional | None = None, + default_task_heartbeat_timeout: DurationInSecondsOptional | None = None, + default_task_list: TaskList | None = None, + default_task_priority: TaskPriority | None = None, + default_task_schedule_to_start_timeout: DurationInSecondsOptional | None = None, + default_task_schedule_to_close_timeout: DurationInSecondsOptional | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RegisterDomain") + def register_domain( + self, + context: RequestContext, + name: DomainName, + workflow_execution_retention_period_in_days: DurationInDays, + description: Description | None = None, + tags: ResourceTagList | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RegisterWorkflowType") + def register_workflow_type( + self, + context: RequestContext, + domain: DomainName, + name: Name, + version: Version, + description: Description | None = None, + default_task_start_to_close_timeout: DurationInSecondsOptional | None = None, + default_execution_start_to_close_timeout: DurationInSecondsOptional | None = None, + default_task_list: TaskList | None = None, + default_task_priority: TaskPriority | None = None, + default_child_policy: ChildPolicy | None = None, + default_lambda_role: Arn | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RequestCancelWorkflowExecution") + def request_cancel_workflow_execution( + self, + context: RequestContext, + domain: DomainName, + workflow_id: WorkflowId, + run_id: WorkflowRunIdOptional | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RespondActivityTaskCanceled") + def respond_activity_task_canceled( + self, context: RequestContext, task_token: TaskToken, details: Data | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RespondActivityTaskCompleted") + def respond_activity_task_completed( + self, context: RequestContext, task_token: TaskToken, result: Data | None = None, **kwargs + ) -> None: + raise NotImplementedError + + @handler("RespondActivityTaskFailed") + def respond_activity_task_failed( + self, + context: RequestContext, + task_token: TaskToken, + reason: FailureReason | None = None, + details: Data | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("RespondDecisionTaskCompleted") + def respond_decision_task_completed( + self, + context: RequestContext, + task_token: TaskToken, + decisions: DecisionList | None = None, + execution_context: Data | None = None, + task_list: TaskList | None = None, + task_list_schedule_to_start_timeout: DurationInSecondsOptional | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("SignalWorkflowExecution") + def signal_workflow_execution( + self, + context: RequestContext, + domain: DomainName, + workflow_id: WorkflowId, + signal_name: SignalName, + run_id: WorkflowRunIdOptional | None = None, + input: Data | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("StartWorkflowExecution") + def start_workflow_execution( + self, + context: RequestContext, + domain: DomainName, + workflow_id: WorkflowId, + workflow_type: WorkflowType, + task_list: TaskList | None = None, + task_priority: TaskPriority | None = None, + input: Data | None = None, + execution_start_to_close_timeout: DurationInSecondsOptional | None = None, + tag_list: TagList | None = None, + task_start_to_close_timeout: DurationInSecondsOptional | None = None, + child_policy: ChildPolicy | None = None, + lambda_role: Arn | None = None, + **kwargs, + ) -> Run: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: ResourceTagList, **kwargs + ) -> None: + raise NotImplementedError + + @handler("TerminateWorkflowExecution") + def terminate_workflow_execution( + self, + context: RequestContext, + domain: DomainName, + workflow_id: WorkflowId, + run_id: WorkflowRunIdOptional | None = None, + reason: TerminateReason | None = None, + details: Data | None = None, + child_policy: ChildPolicy | None = None, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("UndeprecateActivityType") + def undeprecate_activity_type( + self, context: RequestContext, domain: DomainName, activity_type: ActivityType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UndeprecateDomain") + def undeprecate_domain(self, context: RequestContext, name: DomainName, **kwargs) -> None: + raise NotImplementedError + + @handler("UndeprecateWorkflowType") + def undeprecate_workflow_type( + self, context: RequestContext, domain: DomainName, workflow_type: WorkflowType, **kwargs + ) -> None: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: ResourceTagKeyList, **kwargs + ) -> None: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/api/transcribe/__init__.py b/localstack-core/localstack/aws/api/transcribe/__init__.py new file mode 100644 index 0000000000000..6e1d666bcd326 --- /dev/null +++ b/localstack-core/localstack/aws/api/transcribe/__init__.py @@ -0,0 +1,1668 @@ +from datetime import datetime +from enum import StrEnum +from typing import Dict, List, Optional, TypedDict + +from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler + +Boolean = bool +CallAnalyticsJobName = str +CategoryName = str +ChannelId = int +DataAccessRoleArn = str +DurationInSeconds = float +FailureReason = str +IdentifiedLanguageScore = float +KMSKeyId = str +MaxAlternatives = int +MaxResults = int +MaxSpeakers = int +MediaSampleRateHertz = int +MedicalMediaSampleRateHertz = int +MedicalScribeChannelId = int +ModelName = str +NextToken = str +NonEmptyString = str +OutputBucketName = str +OutputKey = str +Percentage = int +Phrase = str +String = str +SubtitleOutputStartIndex = int +TagKey = str +TagValue = str +TranscribeArn = str +TranscriptionJobName = str +Uri = str +VocabularyFilterName = str +VocabularyName = str +Word = str + + +class BaseModelName(StrEnum): + NarrowBand = "NarrowBand" + WideBand = "WideBand" + + +class CLMLanguageCode(StrEnum): + en_US = "en-US" + hi_IN = "hi-IN" + es_US = "es-US" + en_GB = "en-GB" + en_AU = "en-AU" + de_DE = "de-DE" + ja_JP = "ja-JP" + + +class CallAnalyticsFeature(StrEnum): + GENERATIVE_SUMMARIZATION = "GENERATIVE_SUMMARIZATION" + + +class CallAnalyticsJobStatus(StrEnum): + QUEUED = "QUEUED" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETED = "COMPLETED" + + +class CallAnalyticsSkippedReasonCode(StrEnum): + INSUFFICIENT_CONVERSATION_CONTENT = "INSUFFICIENT_CONVERSATION_CONTENT" + FAILED_SAFETY_GUIDELINES = "FAILED_SAFETY_GUIDELINES" + + +class InputType(StrEnum): + REAL_TIME = "REAL_TIME" + POST_CALL = "POST_CALL" + + +class LanguageCode(StrEnum): + af_ZA = "af-ZA" + ar_AE = "ar-AE" + ar_SA = "ar-SA" + da_DK = "da-DK" + de_CH = "de-CH" + de_DE = "de-DE" + en_AB = "en-AB" + en_AU = "en-AU" + en_GB = "en-GB" + en_IE = "en-IE" + en_IN = "en-IN" + en_US = "en-US" + en_WL = "en-WL" + es_ES = "es-ES" + es_US = "es-US" + fa_IR = "fa-IR" + fr_CA = "fr-CA" + fr_FR = "fr-FR" + he_IL = "he-IL" + hi_IN = "hi-IN" + id_ID = "id-ID" + it_IT = "it-IT" + ja_JP = "ja-JP" + ko_KR = "ko-KR" + ms_MY = "ms-MY" + nl_NL = "nl-NL" + pt_BR = "pt-BR" + pt_PT = "pt-PT" + ru_RU = "ru-RU" + ta_IN = "ta-IN" + te_IN = "te-IN" + tr_TR = "tr-TR" + zh_CN = "zh-CN" + zh_TW = "zh-TW" + th_TH = "th-TH" + en_ZA = "en-ZA" + en_NZ = "en-NZ" + vi_VN = "vi-VN" + sv_SE = "sv-SE" + ab_GE = "ab-GE" + ast_ES = "ast-ES" + az_AZ = "az-AZ" + ba_RU = "ba-RU" + be_BY = "be-BY" + bg_BG = "bg-BG" + bn_IN = "bn-IN" + bs_BA = "bs-BA" + ca_ES = "ca-ES" + ckb_IQ = "ckb-IQ" + ckb_IR = "ckb-IR" + cs_CZ = "cs-CZ" + cy_WL = "cy-WL" + el_GR = "el-GR" + et_ET = "et-ET" + eu_ES = "eu-ES" + fi_FI = "fi-FI" + gl_ES = "gl-ES" + gu_IN = "gu-IN" + ha_NG = "ha-NG" + hr_HR = "hr-HR" + hu_HU = "hu-HU" + hy_AM = "hy-AM" + is_IS = "is-IS" + ka_GE = "ka-GE" + kab_DZ = "kab-DZ" + kk_KZ = "kk-KZ" + kn_IN = "kn-IN" + ky_KG = "ky-KG" + lg_IN = "lg-IN" + lt_LT = "lt-LT" + lv_LV = "lv-LV" + mhr_RU = "mhr-RU" + mi_NZ = "mi-NZ" + mk_MK = "mk-MK" + ml_IN = "ml-IN" + mn_MN = "mn-MN" + mr_IN = "mr-IN" + mt_MT = "mt-MT" + no_NO = "no-NO" + or_IN = "or-IN" + pa_IN = "pa-IN" + pl_PL = "pl-PL" + ps_AF = "ps-AF" + ro_RO = "ro-RO" + rw_RW = "rw-RW" + si_LK = "si-LK" + sk_SK = "sk-SK" + sl_SI = "sl-SI" + so_SO = "so-SO" + sr_RS = "sr-RS" + su_ID = "su-ID" + sw_BI = "sw-BI" + sw_KE = "sw-KE" + sw_RW = "sw-RW" + sw_TZ = "sw-TZ" + sw_UG = "sw-UG" + tl_PH = "tl-PH" + tt_RU = "tt-RU" + ug_CN = "ug-CN" + uk_UA = "uk-UA" + uz_UZ = "uz-UZ" + wo_SN = "wo-SN" + zh_HK = "zh-HK" + zu_ZA = "zu-ZA" + + +class MediaFormat(StrEnum): + mp3 = "mp3" + mp4 = "mp4" + wav = "wav" + flac = "flac" + ogg = "ogg" + amr = "amr" + webm = "webm" + m4a = "m4a" + + +class MedicalContentIdentificationType(StrEnum): + PHI = "PHI" + + +class MedicalScribeJobStatus(StrEnum): + QUEUED = "QUEUED" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETED = "COMPLETED" + + +class MedicalScribeLanguageCode(StrEnum): + en_US = "en-US" + + +class MedicalScribeNoteTemplate(StrEnum): + HISTORY_AND_PHYSICAL = "HISTORY_AND_PHYSICAL" + GIRPP = "GIRPP" + + +class MedicalScribeParticipantRole(StrEnum): + PATIENT = "PATIENT" + CLINICIAN = "CLINICIAN" + + +class ModelStatus(StrEnum): + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETED = "COMPLETED" + + +class OutputLocationType(StrEnum): + CUSTOMER_BUCKET = "CUSTOMER_BUCKET" + SERVICE_BUCKET = "SERVICE_BUCKET" + + +class ParticipantRole(StrEnum): + AGENT = "AGENT" + CUSTOMER = "CUSTOMER" + + +class PiiEntityType(StrEnum): + BANK_ACCOUNT_NUMBER = "BANK_ACCOUNT_NUMBER" + BANK_ROUTING = "BANK_ROUTING" + CREDIT_DEBIT_NUMBER = "CREDIT_DEBIT_NUMBER" + CREDIT_DEBIT_CVV = "CREDIT_DEBIT_CVV" + CREDIT_DEBIT_EXPIRY = "CREDIT_DEBIT_EXPIRY" + PIN = "PIN" + EMAIL = "EMAIL" + ADDRESS = "ADDRESS" + NAME = "NAME" + PHONE = "PHONE" + SSN = "SSN" + ALL = "ALL" + + +class RedactionOutput(StrEnum): + redacted = "redacted" + redacted_and_unredacted = "redacted_and_unredacted" + + +class RedactionType(StrEnum): + PII = "PII" + + +class SentimentValue(StrEnum): + POSITIVE = "POSITIVE" + NEGATIVE = "NEGATIVE" + NEUTRAL = "NEUTRAL" + MIXED = "MIXED" + + +class Specialty(StrEnum): + PRIMARYCARE = "PRIMARYCARE" + + +class SubtitleFormat(StrEnum): + vtt = "vtt" + srt = "srt" + + +class ToxicityCategory(StrEnum): + ALL = "ALL" + + +class TranscriptFilterType(StrEnum): + EXACT = "EXACT" + + +class TranscriptionJobStatus(StrEnum): + QUEUED = "QUEUED" + IN_PROGRESS = "IN_PROGRESS" + FAILED = "FAILED" + COMPLETED = "COMPLETED" + + +class Type(StrEnum): + CONVERSATION = "CONVERSATION" + DICTATION = "DICTATION" + + +class VocabularyFilterMethod(StrEnum): + remove = "remove" + mask = "mask" + tag = "tag" + + +class VocabularyState(StrEnum): + PENDING = "PENDING" + READY = "READY" + FAILED = "FAILED" + + +class BadRequestException(ServiceException): + code: str = "BadRequestException" + sender_fault: bool = False + status_code: int = 400 + + +class ConflictException(ServiceException): + code: str = "ConflictException" + sender_fault: bool = False + status_code: int = 400 + + +class InternalFailureException(ServiceException): + code: str = "InternalFailureException" + sender_fault: bool = False + status_code: int = 400 + + +class LimitExceededException(ServiceException): + code: str = "LimitExceededException" + sender_fault: bool = False + status_code: int = 400 + + +class NotFoundException(ServiceException): + code: str = "NotFoundException" + sender_fault: bool = False + status_code: int = 400 + + +TimestampMilliseconds = int + + +class AbsoluteTimeRange(TypedDict, total=False): + StartTime: Optional[TimestampMilliseconds] + EndTime: Optional[TimestampMilliseconds] + First: Optional[TimestampMilliseconds] + Last: Optional[TimestampMilliseconds] + + +class Tag(TypedDict, total=False): + Key: TagKey + Value: TagValue + + +TagList = List[Tag] + + +class ChannelDefinition(TypedDict, total=False): + ChannelId: Optional[ChannelId] + ParticipantRole: Optional[ParticipantRole] + + +ChannelDefinitions = List[ChannelDefinition] + + +class Summarization(TypedDict, total=False): + GenerateAbstractiveSummary: Boolean + + +class LanguageIdSettings(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + VocabularyFilterName: Optional[VocabularyFilterName] + LanguageModelName: Optional[ModelName] + + +LanguageIdSettingsMap = Dict[LanguageCode, LanguageIdSettings] +LanguageOptions = List[LanguageCode] +PiiEntityTypes = List[PiiEntityType] + + +class ContentRedaction(TypedDict, total=False): + RedactionType: RedactionType + RedactionOutput: RedactionOutput + PiiEntityTypes: Optional[PiiEntityTypes] + + +class CallAnalyticsJobSettings(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + VocabularyFilterName: Optional[VocabularyFilterName] + VocabularyFilterMethod: Optional[VocabularyFilterMethod] + LanguageModelName: Optional[ModelName] + ContentRedaction: Optional[ContentRedaction] + LanguageOptions: Optional[LanguageOptions] + LanguageIdSettings: Optional[LanguageIdSettingsMap] + Summarization: Optional[Summarization] + + +DateTime = datetime + + +class Transcript(TypedDict, total=False): + TranscriptFileUri: Optional[Uri] + RedactedTranscriptFileUri: Optional[Uri] + + +class Media(TypedDict, total=False): + MediaFileUri: Optional[Uri] + RedactedMediaFileUri: Optional[Uri] + + +class CallAnalyticsSkippedFeature(TypedDict, total=False): + Feature: Optional[CallAnalyticsFeature] + ReasonCode: Optional[CallAnalyticsSkippedReasonCode] + Message: Optional[String] + + +CallAnalyticsSkippedFeatureList = List[CallAnalyticsSkippedFeature] + + +class CallAnalyticsJobDetails(TypedDict, total=False): + Skipped: Optional[CallAnalyticsSkippedFeatureList] + + +class CallAnalyticsJob(TypedDict, total=False): + CallAnalyticsJobName: Optional[CallAnalyticsJobName] + CallAnalyticsJobStatus: Optional[CallAnalyticsJobStatus] + CallAnalyticsJobDetails: Optional[CallAnalyticsJobDetails] + LanguageCode: Optional[LanguageCode] + MediaSampleRateHertz: Optional[MediaSampleRateHertz] + MediaFormat: Optional[MediaFormat] + Media: Optional[Media] + Transcript: Optional[Transcript] + StartTime: Optional[DateTime] + CreationTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + DataAccessRoleArn: Optional[DataAccessRoleArn] + IdentifiedLanguageScore: Optional[IdentifiedLanguageScore] + Settings: Optional[CallAnalyticsJobSettings] + ChannelDefinitions: Optional[ChannelDefinitions] + Tags: Optional[TagList] + + +class CallAnalyticsJobSummary(TypedDict, total=False): + CallAnalyticsJobName: Optional[CallAnalyticsJobName] + CreationTime: Optional[DateTime] + StartTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + LanguageCode: Optional[LanguageCode] + CallAnalyticsJobStatus: Optional[CallAnalyticsJobStatus] + CallAnalyticsJobDetails: Optional[CallAnalyticsJobDetails] + FailureReason: Optional[FailureReason] + + +CallAnalyticsJobSummaries = List[CallAnalyticsJobSummary] + + +class RelativeTimeRange(TypedDict, total=False): + StartPercentage: Optional[Percentage] + EndPercentage: Optional[Percentage] + First: Optional[Percentage] + Last: Optional[Percentage] + + +SentimentValueList = List[SentimentValue] + + +class SentimentFilter(TypedDict, total=False): + Sentiments: SentimentValueList + AbsoluteTimeRange: Optional[AbsoluteTimeRange] + RelativeTimeRange: Optional[RelativeTimeRange] + ParticipantRole: Optional[ParticipantRole] + Negate: Optional[Boolean] + + +StringTargetList = List[NonEmptyString] + + +class TranscriptFilter(TypedDict, total=False): + TranscriptFilterType: TranscriptFilterType + AbsoluteTimeRange: Optional[AbsoluteTimeRange] + RelativeTimeRange: Optional[RelativeTimeRange] + ParticipantRole: Optional[ParticipantRole] + Negate: Optional[Boolean] + Targets: StringTargetList + + +class InterruptionFilter(TypedDict, total=False): + Threshold: Optional[TimestampMilliseconds] + ParticipantRole: Optional[ParticipantRole] + AbsoluteTimeRange: Optional[AbsoluteTimeRange] + RelativeTimeRange: Optional[RelativeTimeRange] + Negate: Optional[Boolean] + + +class NonTalkTimeFilter(TypedDict, total=False): + Threshold: Optional[TimestampMilliseconds] + AbsoluteTimeRange: Optional[AbsoluteTimeRange] + RelativeTimeRange: Optional[RelativeTimeRange] + Negate: Optional[Boolean] + + +class Rule(TypedDict, total=False): + NonTalkTimeFilter: Optional[NonTalkTimeFilter] + InterruptionFilter: Optional[InterruptionFilter] + TranscriptFilter: Optional[TranscriptFilter] + SentimentFilter: Optional[SentimentFilter] + + +RuleList = List[Rule] + + +class CategoryProperties(TypedDict, total=False): + CategoryName: Optional[CategoryName] + Rules: Optional[RuleList] + CreateTime: Optional[DateTime] + LastUpdateTime: Optional[DateTime] + Tags: Optional[TagList] + InputType: Optional[InputType] + + +CategoryPropertiesList = List[CategoryProperties] + + +class ClinicalNoteGenerationSettings(TypedDict, total=False): + NoteTemplate: Optional[MedicalScribeNoteTemplate] + + +class CreateCallAnalyticsCategoryRequest(ServiceRequest): + CategoryName: CategoryName + Rules: RuleList + Tags: Optional[TagList] + InputType: Optional[InputType] + + +class CreateCallAnalyticsCategoryResponse(TypedDict, total=False): + CategoryProperties: Optional[CategoryProperties] + + +class InputDataConfig(TypedDict, total=False): + S3Uri: Uri + TuningDataS3Uri: Optional[Uri] + DataAccessRoleArn: DataAccessRoleArn + + +class CreateLanguageModelRequest(ServiceRequest): + LanguageCode: CLMLanguageCode + BaseModelName: BaseModelName + ModelName: ModelName + InputDataConfig: InputDataConfig + Tags: Optional[TagList] + + +class CreateLanguageModelResponse(TypedDict, total=False): + LanguageCode: Optional[CLMLanguageCode] + BaseModelName: Optional[BaseModelName] + ModelName: Optional[ModelName] + InputDataConfig: Optional[InputDataConfig] + ModelStatus: Optional[ModelStatus] + + +class CreateMedicalVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + LanguageCode: LanguageCode + VocabularyFileUri: Uri + Tags: Optional[TagList] + + +class CreateMedicalVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + VocabularyState: Optional[VocabularyState] + LastModifiedTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + + +Words = List[Word] + + +class CreateVocabularyFilterRequest(ServiceRequest): + VocabularyFilterName: VocabularyFilterName + LanguageCode: LanguageCode + Words: Optional[Words] + VocabularyFilterFileUri: Optional[Uri] + Tags: Optional[TagList] + DataAccessRoleArn: Optional[DataAccessRoleArn] + + +class CreateVocabularyFilterResponse(TypedDict, total=False): + VocabularyFilterName: Optional[VocabularyFilterName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + + +Phrases = List[Phrase] + + +class CreateVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + LanguageCode: LanguageCode + Phrases: Optional[Phrases] + VocabularyFileUri: Optional[Uri] + Tags: Optional[TagList] + DataAccessRoleArn: Optional[DataAccessRoleArn] + + +class CreateVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + VocabularyState: Optional[VocabularyState] + LastModifiedTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + + +class DeleteCallAnalyticsCategoryRequest(ServiceRequest): + CategoryName: CategoryName + + +class DeleteCallAnalyticsCategoryResponse(TypedDict, total=False): + pass + + +class DeleteCallAnalyticsJobRequest(ServiceRequest): + CallAnalyticsJobName: CallAnalyticsJobName + + +class DeleteCallAnalyticsJobResponse(TypedDict, total=False): + pass + + +class DeleteLanguageModelRequest(ServiceRequest): + ModelName: ModelName + + +class DeleteMedicalScribeJobRequest(ServiceRequest): + MedicalScribeJobName: TranscriptionJobName + + +class DeleteMedicalTranscriptionJobRequest(ServiceRequest): + MedicalTranscriptionJobName: TranscriptionJobName + + +class DeleteMedicalVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + + +class DeleteTranscriptionJobRequest(ServiceRequest): + TranscriptionJobName: TranscriptionJobName + + +class DeleteVocabularyFilterRequest(ServiceRequest): + VocabularyFilterName: VocabularyFilterName + + +class DeleteVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + + +class DescribeLanguageModelRequest(ServiceRequest): + ModelName: ModelName + + +class LanguageModel(TypedDict, total=False): + ModelName: Optional[ModelName] + CreateTime: Optional[DateTime] + LastModifiedTime: Optional[DateTime] + LanguageCode: Optional[CLMLanguageCode] + BaseModelName: Optional[BaseModelName] + ModelStatus: Optional[ModelStatus] + UpgradeAvailability: Optional[Boolean] + FailureReason: Optional[FailureReason] + InputDataConfig: Optional[InputDataConfig] + + +class DescribeLanguageModelResponse(TypedDict, total=False): + LanguageModel: Optional[LanguageModel] + + +class GetCallAnalyticsCategoryRequest(ServiceRequest): + CategoryName: CategoryName + + +class GetCallAnalyticsCategoryResponse(TypedDict, total=False): + CategoryProperties: Optional[CategoryProperties] + + +class GetCallAnalyticsJobRequest(ServiceRequest): + CallAnalyticsJobName: CallAnalyticsJobName + + +class GetCallAnalyticsJobResponse(TypedDict, total=False): + CallAnalyticsJob: Optional[CallAnalyticsJob] + + +class GetMedicalScribeJobRequest(ServiceRequest): + MedicalScribeJobName: TranscriptionJobName + + +class MedicalScribeChannelDefinition(TypedDict, total=False): + ChannelId: MedicalScribeChannelId + ParticipantRole: MedicalScribeParticipantRole + + +MedicalScribeChannelDefinitions = List[MedicalScribeChannelDefinition] + + +class MedicalScribeSettings(TypedDict, total=False): + ShowSpeakerLabels: Optional[Boolean] + MaxSpeakerLabels: Optional[MaxSpeakers] + ChannelIdentification: Optional[Boolean] + VocabularyName: Optional[VocabularyName] + VocabularyFilterName: Optional[VocabularyFilterName] + VocabularyFilterMethod: Optional[VocabularyFilterMethod] + ClinicalNoteGenerationSettings: Optional[ClinicalNoteGenerationSettings] + + +class MedicalScribeOutput(TypedDict, total=False): + TranscriptFileUri: Uri + ClinicalDocumentUri: Uri + + +class MedicalScribeJob(TypedDict, total=False): + MedicalScribeJobName: Optional[TranscriptionJobName] + MedicalScribeJobStatus: Optional[MedicalScribeJobStatus] + LanguageCode: Optional[MedicalScribeLanguageCode] + Media: Optional[Media] + MedicalScribeOutput: Optional[MedicalScribeOutput] + StartTime: Optional[DateTime] + CreationTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + Settings: Optional[MedicalScribeSettings] + DataAccessRoleArn: Optional[DataAccessRoleArn] + ChannelDefinitions: Optional[MedicalScribeChannelDefinitions] + Tags: Optional[TagList] + + +class GetMedicalScribeJobResponse(TypedDict, total=False): + MedicalScribeJob: Optional[MedicalScribeJob] + + +class GetMedicalTranscriptionJobRequest(ServiceRequest): + MedicalTranscriptionJobName: TranscriptionJobName + + +class MedicalTranscriptionSetting(TypedDict, total=False): + ShowSpeakerLabels: Optional[Boolean] + MaxSpeakerLabels: Optional[MaxSpeakers] + ChannelIdentification: Optional[Boolean] + ShowAlternatives: Optional[Boolean] + MaxAlternatives: Optional[MaxAlternatives] + VocabularyName: Optional[VocabularyName] + + +class MedicalTranscript(TypedDict, total=False): + TranscriptFileUri: Optional[Uri] + + +class MedicalTranscriptionJob(TypedDict, total=False): + MedicalTranscriptionJobName: Optional[TranscriptionJobName] + TranscriptionJobStatus: Optional[TranscriptionJobStatus] + LanguageCode: Optional[LanguageCode] + MediaSampleRateHertz: Optional[MedicalMediaSampleRateHertz] + MediaFormat: Optional[MediaFormat] + Media: Optional[Media] + Transcript: Optional[MedicalTranscript] + StartTime: Optional[DateTime] + CreationTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + Settings: Optional[MedicalTranscriptionSetting] + ContentIdentificationType: Optional[MedicalContentIdentificationType] + Specialty: Optional[Specialty] + Type: Optional[Type] + Tags: Optional[TagList] + + +class GetMedicalTranscriptionJobResponse(TypedDict, total=False): + MedicalTranscriptionJob: Optional[MedicalTranscriptionJob] + + +class GetMedicalVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + + +class GetMedicalVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + VocabularyState: Optional[VocabularyState] + LastModifiedTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + DownloadUri: Optional[Uri] + + +class GetTranscriptionJobRequest(ServiceRequest): + TranscriptionJobName: TranscriptionJobName + + +ToxicityCategories = List[ToxicityCategory] + + +class ToxicityDetectionSettings(TypedDict, total=False): + ToxicityCategories: ToxicityCategories + + +ToxicityDetection = List[ToxicityDetectionSettings] +SubtitleFileUris = List[Uri] +SubtitleFormats = List[SubtitleFormat] + + +class SubtitlesOutput(TypedDict, total=False): + Formats: Optional[SubtitleFormats] + SubtitleFileUris: Optional[SubtitleFileUris] + OutputStartIndex: Optional[SubtitleOutputStartIndex] + + +class LanguageCodeItem(TypedDict, total=False): + LanguageCode: Optional[LanguageCode] + DurationInSeconds: Optional[DurationInSeconds] + + +LanguageCodeList = List[LanguageCodeItem] + + +class JobExecutionSettings(TypedDict, total=False): + AllowDeferredExecution: Optional[Boolean] + DataAccessRoleArn: Optional[DataAccessRoleArn] + + +class ModelSettings(TypedDict, total=False): + LanguageModelName: Optional[ModelName] + + +class Settings(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + ShowSpeakerLabels: Optional[Boolean] + MaxSpeakerLabels: Optional[MaxSpeakers] + ChannelIdentification: Optional[Boolean] + ShowAlternatives: Optional[Boolean] + MaxAlternatives: Optional[MaxAlternatives] + VocabularyFilterName: Optional[VocabularyFilterName] + VocabularyFilterMethod: Optional[VocabularyFilterMethod] + + +class TranscriptionJob(TypedDict, total=False): + TranscriptionJobName: Optional[TranscriptionJobName] + TranscriptionJobStatus: Optional[TranscriptionJobStatus] + LanguageCode: Optional[LanguageCode] + MediaSampleRateHertz: Optional[MediaSampleRateHertz] + MediaFormat: Optional[MediaFormat] + Media: Optional[Media] + Transcript: Optional[Transcript] + StartTime: Optional[DateTime] + CreationTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + Settings: Optional[Settings] + ModelSettings: Optional[ModelSettings] + JobExecutionSettings: Optional[JobExecutionSettings] + ContentRedaction: Optional[ContentRedaction] + IdentifyLanguage: Optional[Boolean] + IdentifyMultipleLanguages: Optional[Boolean] + LanguageOptions: Optional[LanguageOptions] + IdentifiedLanguageScore: Optional[IdentifiedLanguageScore] + LanguageCodes: Optional[LanguageCodeList] + Tags: Optional[TagList] + Subtitles: Optional[SubtitlesOutput] + LanguageIdSettings: Optional[LanguageIdSettingsMap] + ToxicityDetection: Optional[ToxicityDetection] + + +class GetTranscriptionJobResponse(TypedDict, total=False): + TranscriptionJob: Optional[TranscriptionJob] + + +class GetVocabularyFilterRequest(ServiceRequest): + VocabularyFilterName: VocabularyFilterName + + +class GetVocabularyFilterResponse(TypedDict, total=False): + VocabularyFilterName: Optional[VocabularyFilterName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + DownloadUri: Optional[Uri] + + +class GetVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + + +class GetVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + VocabularyState: Optional[VocabularyState] + LastModifiedTime: Optional[DateTime] + FailureReason: Optional[FailureReason] + DownloadUri: Optional[Uri] + + +KMSEncryptionContextMap = Dict[NonEmptyString, NonEmptyString] + + +class ListCallAnalyticsCategoriesRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListCallAnalyticsCategoriesResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + Categories: Optional[CategoryPropertiesList] + + +class ListCallAnalyticsJobsRequest(ServiceRequest): + Status: Optional[CallAnalyticsJobStatus] + JobNameContains: Optional[CallAnalyticsJobName] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class ListCallAnalyticsJobsResponse(TypedDict, total=False): + Status: Optional[CallAnalyticsJobStatus] + NextToken: Optional[NextToken] + CallAnalyticsJobSummaries: Optional[CallAnalyticsJobSummaries] + + +class ListLanguageModelsRequest(ServiceRequest): + StatusEquals: Optional[ModelStatus] + NameContains: Optional[ModelName] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +Models = List[LanguageModel] + + +class ListLanguageModelsResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + Models: Optional[Models] + + +class ListMedicalScribeJobsRequest(ServiceRequest): + Status: Optional[MedicalScribeJobStatus] + JobNameContains: Optional[TranscriptionJobName] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class MedicalScribeJobSummary(TypedDict, total=False): + MedicalScribeJobName: Optional[TranscriptionJobName] + CreationTime: Optional[DateTime] + StartTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + LanguageCode: Optional[MedicalScribeLanguageCode] + MedicalScribeJobStatus: Optional[MedicalScribeJobStatus] + FailureReason: Optional[FailureReason] + + +MedicalScribeJobSummaries = List[MedicalScribeJobSummary] + + +class ListMedicalScribeJobsResponse(TypedDict, total=False): + Status: Optional[MedicalScribeJobStatus] + NextToken: Optional[NextToken] + MedicalScribeJobSummaries: Optional[MedicalScribeJobSummaries] + + +class ListMedicalTranscriptionJobsRequest(ServiceRequest): + Status: Optional[TranscriptionJobStatus] + JobNameContains: Optional[TranscriptionJobName] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class MedicalTranscriptionJobSummary(TypedDict, total=False): + MedicalTranscriptionJobName: Optional[TranscriptionJobName] + CreationTime: Optional[DateTime] + StartTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + LanguageCode: Optional[LanguageCode] + TranscriptionJobStatus: Optional[TranscriptionJobStatus] + FailureReason: Optional[FailureReason] + OutputLocationType: Optional[OutputLocationType] + Specialty: Optional[Specialty] + ContentIdentificationType: Optional[MedicalContentIdentificationType] + Type: Optional[Type] + + +MedicalTranscriptionJobSummaries = List[MedicalTranscriptionJobSummary] + + +class ListMedicalTranscriptionJobsResponse(TypedDict, total=False): + Status: Optional[TranscriptionJobStatus] + NextToken: Optional[NextToken] + MedicalTranscriptionJobSummaries: Optional[MedicalTranscriptionJobSummaries] + + +class ListMedicalVocabulariesRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + StateEquals: Optional[VocabularyState] + NameContains: Optional[VocabularyName] + + +class VocabularyInfo(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + VocabularyState: Optional[VocabularyState] + + +Vocabularies = List[VocabularyInfo] + + +class ListMedicalVocabulariesResponse(TypedDict, total=False): + Status: Optional[VocabularyState] + NextToken: Optional[NextToken] + Vocabularies: Optional[Vocabularies] + + +class ListTagsForResourceRequest(ServiceRequest): + ResourceArn: TranscribeArn + + +class ListTagsForResourceResponse(TypedDict, total=False): + ResourceArn: Optional[TranscribeArn] + Tags: Optional[TagList] + + +class ListTranscriptionJobsRequest(ServiceRequest): + Status: Optional[TranscriptionJobStatus] + JobNameContains: Optional[TranscriptionJobName] + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + + +class TranscriptionJobSummary(TypedDict, total=False): + TranscriptionJobName: Optional[TranscriptionJobName] + CreationTime: Optional[DateTime] + StartTime: Optional[DateTime] + CompletionTime: Optional[DateTime] + LanguageCode: Optional[LanguageCode] + TranscriptionJobStatus: Optional[TranscriptionJobStatus] + FailureReason: Optional[FailureReason] + OutputLocationType: Optional[OutputLocationType] + ContentRedaction: Optional[ContentRedaction] + ModelSettings: Optional[ModelSettings] + IdentifyLanguage: Optional[Boolean] + IdentifyMultipleLanguages: Optional[Boolean] + IdentifiedLanguageScore: Optional[IdentifiedLanguageScore] + LanguageCodes: Optional[LanguageCodeList] + ToxicityDetection: Optional[ToxicityDetection] + + +TranscriptionJobSummaries = List[TranscriptionJobSummary] + + +class ListTranscriptionJobsResponse(TypedDict, total=False): + Status: Optional[TranscriptionJobStatus] + NextToken: Optional[NextToken] + TranscriptionJobSummaries: Optional[TranscriptionJobSummaries] + + +class ListVocabulariesRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + StateEquals: Optional[VocabularyState] + NameContains: Optional[VocabularyName] + + +class ListVocabulariesResponse(TypedDict, total=False): + Status: Optional[VocabularyState] + NextToken: Optional[NextToken] + Vocabularies: Optional[Vocabularies] + + +class ListVocabularyFiltersRequest(ServiceRequest): + NextToken: Optional[NextToken] + MaxResults: Optional[MaxResults] + NameContains: Optional[VocabularyFilterName] + + +class VocabularyFilterInfo(TypedDict, total=False): + VocabularyFilterName: Optional[VocabularyFilterName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + + +VocabularyFilters = List[VocabularyFilterInfo] + + +class ListVocabularyFiltersResponse(TypedDict, total=False): + NextToken: Optional[NextToken] + VocabularyFilters: Optional[VocabularyFilters] + + +class StartCallAnalyticsJobRequest(ServiceRequest): + CallAnalyticsJobName: CallAnalyticsJobName + Media: Media + OutputLocation: Optional[Uri] + OutputEncryptionKMSKeyId: Optional[KMSKeyId] + DataAccessRoleArn: Optional[DataAccessRoleArn] + Settings: Optional[CallAnalyticsJobSettings] + Tags: Optional[TagList] + ChannelDefinitions: Optional[ChannelDefinitions] + + +class StartCallAnalyticsJobResponse(TypedDict, total=False): + CallAnalyticsJob: Optional[CallAnalyticsJob] + + +class StartMedicalScribeJobRequest(ServiceRequest): + MedicalScribeJobName: TranscriptionJobName + Media: Media + OutputBucketName: OutputBucketName + OutputEncryptionKMSKeyId: Optional[KMSKeyId] + KMSEncryptionContext: Optional[KMSEncryptionContextMap] + DataAccessRoleArn: DataAccessRoleArn + Settings: MedicalScribeSettings + ChannelDefinitions: Optional[MedicalScribeChannelDefinitions] + Tags: Optional[TagList] + + +class StartMedicalScribeJobResponse(TypedDict, total=False): + MedicalScribeJob: Optional[MedicalScribeJob] + + +class StartMedicalTranscriptionJobRequest(ServiceRequest): + MedicalTranscriptionJobName: TranscriptionJobName + LanguageCode: LanguageCode + MediaSampleRateHertz: Optional[MedicalMediaSampleRateHertz] + MediaFormat: Optional[MediaFormat] + Media: Media + OutputBucketName: OutputBucketName + OutputKey: Optional[OutputKey] + OutputEncryptionKMSKeyId: Optional[KMSKeyId] + KMSEncryptionContext: Optional[KMSEncryptionContextMap] + Settings: Optional[MedicalTranscriptionSetting] + ContentIdentificationType: Optional[MedicalContentIdentificationType] + Specialty: Specialty + Type: Type + Tags: Optional[TagList] + + +class StartMedicalTranscriptionJobResponse(TypedDict, total=False): + MedicalTranscriptionJob: Optional[MedicalTranscriptionJob] + + +class Subtitles(TypedDict, total=False): + Formats: Optional[SubtitleFormats] + OutputStartIndex: Optional[SubtitleOutputStartIndex] + + +class StartTranscriptionJobRequest(ServiceRequest): + TranscriptionJobName: TranscriptionJobName + LanguageCode: Optional[LanguageCode] + MediaSampleRateHertz: Optional[MediaSampleRateHertz] + MediaFormat: Optional[MediaFormat] + Media: Media + OutputBucketName: Optional[OutputBucketName] + OutputKey: Optional[OutputKey] + OutputEncryptionKMSKeyId: Optional[KMSKeyId] + KMSEncryptionContext: Optional[KMSEncryptionContextMap] + Settings: Optional[Settings] + ModelSettings: Optional[ModelSettings] + JobExecutionSettings: Optional[JobExecutionSettings] + ContentRedaction: Optional[ContentRedaction] + IdentifyLanguage: Optional[Boolean] + IdentifyMultipleLanguages: Optional[Boolean] + LanguageOptions: Optional[LanguageOptions] + Subtitles: Optional[Subtitles] + Tags: Optional[TagList] + LanguageIdSettings: Optional[LanguageIdSettingsMap] + ToxicityDetection: Optional[ToxicityDetection] + + +class StartTranscriptionJobResponse(TypedDict, total=False): + TranscriptionJob: Optional[TranscriptionJob] + + +TagKeyList = List[TagKey] + + +class TagResourceRequest(ServiceRequest): + ResourceArn: TranscribeArn + Tags: TagList + + +class TagResourceResponse(TypedDict, total=False): + pass + + +class UntagResourceRequest(ServiceRequest): + ResourceArn: TranscribeArn + TagKeys: TagKeyList + + +class UntagResourceResponse(TypedDict, total=False): + pass + + +class UpdateCallAnalyticsCategoryRequest(ServiceRequest): + CategoryName: CategoryName + Rules: RuleList + InputType: Optional[InputType] + + +class UpdateCallAnalyticsCategoryResponse(TypedDict, total=False): + CategoryProperties: Optional[CategoryProperties] + + +class UpdateMedicalVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + LanguageCode: LanguageCode + VocabularyFileUri: Uri + + +class UpdateMedicalVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + VocabularyState: Optional[VocabularyState] + + +class UpdateVocabularyFilterRequest(ServiceRequest): + VocabularyFilterName: VocabularyFilterName + Words: Optional[Words] + VocabularyFilterFileUri: Optional[Uri] + DataAccessRoleArn: Optional[DataAccessRoleArn] + + +class UpdateVocabularyFilterResponse(TypedDict, total=False): + VocabularyFilterName: Optional[VocabularyFilterName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + + +class UpdateVocabularyRequest(ServiceRequest): + VocabularyName: VocabularyName + LanguageCode: LanguageCode + Phrases: Optional[Phrases] + VocabularyFileUri: Optional[Uri] + DataAccessRoleArn: Optional[DataAccessRoleArn] + + +class UpdateVocabularyResponse(TypedDict, total=False): + VocabularyName: Optional[VocabularyName] + LanguageCode: Optional[LanguageCode] + LastModifiedTime: Optional[DateTime] + VocabularyState: Optional[VocabularyState] + + +class TranscribeApi: + service = "transcribe" + version = "2017-10-26" + + @handler("CreateCallAnalyticsCategory") + def create_call_analytics_category( + self, + context: RequestContext, + category_name: CategoryName, + rules: RuleList, + tags: TagList | None = None, + input_type: InputType | None = None, + **kwargs, + ) -> CreateCallAnalyticsCategoryResponse: + raise NotImplementedError + + @handler("CreateLanguageModel") + def create_language_model( + self, + context: RequestContext, + language_code: CLMLanguageCode, + base_model_name: BaseModelName, + model_name: ModelName, + input_data_config: InputDataConfig, + tags: TagList | None = None, + **kwargs, + ) -> CreateLanguageModelResponse: + raise NotImplementedError + + @handler("CreateMedicalVocabulary") + def create_medical_vocabulary( + self, + context: RequestContext, + vocabulary_name: VocabularyName, + language_code: LanguageCode, + vocabulary_file_uri: Uri, + tags: TagList | None = None, + **kwargs, + ) -> CreateMedicalVocabularyResponse: + raise NotImplementedError + + @handler("CreateVocabulary") + def create_vocabulary( + self, + context: RequestContext, + vocabulary_name: VocabularyName, + language_code: LanguageCode, + phrases: Phrases | None = None, + vocabulary_file_uri: Uri | None = None, + tags: TagList | None = None, + data_access_role_arn: DataAccessRoleArn | None = None, + **kwargs, + ) -> CreateVocabularyResponse: + raise NotImplementedError + + @handler("CreateVocabularyFilter") + def create_vocabulary_filter( + self, + context: RequestContext, + vocabulary_filter_name: VocabularyFilterName, + language_code: LanguageCode, + words: Words | None = None, + vocabulary_filter_file_uri: Uri | None = None, + tags: TagList | None = None, + data_access_role_arn: DataAccessRoleArn | None = None, + **kwargs, + ) -> CreateVocabularyFilterResponse: + raise NotImplementedError + + @handler("DeleteCallAnalyticsCategory") + def delete_call_analytics_category( + self, context: RequestContext, category_name: CategoryName, **kwargs + ) -> DeleteCallAnalyticsCategoryResponse: + raise NotImplementedError + + @handler("DeleteCallAnalyticsJob") + def delete_call_analytics_job( + self, context: RequestContext, call_analytics_job_name: CallAnalyticsJobName, **kwargs + ) -> DeleteCallAnalyticsJobResponse: + raise NotImplementedError + + @handler("DeleteLanguageModel") + def delete_language_model( + self, context: RequestContext, model_name: ModelName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteMedicalScribeJob") + def delete_medical_scribe_job( + self, context: RequestContext, medical_scribe_job_name: TranscriptionJobName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteMedicalTranscriptionJob") + def delete_medical_transcription_job( + self, + context: RequestContext, + medical_transcription_job_name: TranscriptionJobName, + **kwargs, + ) -> None: + raise NotImplementedError + + @handler("DeleteMedicalVocabulary") + def delete_medical_vocabulary( + self, context: RequestContext, vocabulary_name: VocabularyName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteTranscriptionJob") + def delete_transcription_job( + self, context: RequestContext, transcription_job_name: TranscriptionJobName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteVocabulary") + def delete_vocabulary( + self, context: RequestContext, vocabulary_name: VocabularyName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DeleteVocabularyFilter") + def delete_vocabulary_filter( + self, context: RequestContext, vocabulary_filter_name: VocabularyFilterName, **kwargs + ) -> None: + raise NotImplementedError + + @handler("DescribeLanguageModel") + def describe_language_model( + self, context: RequestContext, model_name: ModelName, **kwargs + ) -> DescribeLanguageModelResponse: + raise NotImplementedError + + @handler("GetCallAnalyticsCategory") + def get_call_analytics_category( + self, context: RequestContext, category_name: CategoryName, **kwargs + ) -> GetCallAnalyticsCategoryResponse: + raise NotImplementedError + + @handler("GetCallAnalyticsJob") + def get_call_analytics_job( + self, context: RequestContext, call_analytics_job_name: CallAnalyticsJobName, **kwargs + ) -> GetCallAnalyticsJobResponse: + raise NotImplementedError + + @handler("GetMedicalScribeJob") + def get_medical_scribe_job( + self, context: RequestContext, medical_scribe_job_name: TranscriptionJobName, **kwargs + ) -> GetMedicalScribeJobResponse: + raise NotImplementedError + + @handler("GetMedicalTranscriptionJob") + def get_medical_transcription_job( + self, + context: RequestContext, + medical_transcription_job_name: TranscriptionJobName, + **kwargs, + ) -> GetMedicalTranscriptionJobResponse: + raise NotImplementedError + + @handler("GetMedicalVocabulary") + def get_medical_vocabulary( + self, context: RequestContext, vocabulary_name: VocabularyName, **kwargs + ) -> GetMedicalVocabularyResponse: + raise NotImplementedError + + @handler("GetTranscriptionJob") + def get_transcription_job( + self, context: RequestContext, transcription_job_name: TranscriptionJobName, **kwargs + ) -> GetTranscriptionJobResponse: + raise NotImplementedError + + @handler("GetVocabulary") + def get_vocabulary( + self, context: RequestContext, vocabulary_name: VocabularyName, **kwargs + ) -> GetVocabularyResponse: + raise NotImplementedError + + @handler("GetVocabularyFilter") + def get_vocabulary_filter( + self, context: RequestContext, vocabulary_filter_name: VocabularyFilterName, **kwargs + ) -> GetVocabularyFilterResponse: + raise NotImplementedError + + @handler("ListCallAnalyticsCategories") + def list_call_analytics_categories( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCallAnalyticsCategoriesResponse: + raise NotImplementedError + + @handler("ListCallAnalyticsJobs") + def list_call_analytics_jobs( + self, + context: RequestContext, + status: CallAnalyticsJobStatus | None = None, + job_name_contains: CallAnalyticsJobName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListCallAnalyticsJobsResponse: + raise NotImplementedError + + @handler("ListLanguageModels") + def list_language_models( + self, + context: RequestContext, + status_equals: ModelStatus | None = None, + name_contains: ModelName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListLanguageModelsResponse: + raise NotImplementedError + + @handler("ListMedicalScribeJobs") + def list_medical_scribe_jobs( + self, + context: RequestContext, + status: MedicalScribeJobStatus | None = None, + job_name_contains: TranscriptionJobName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListMedicalScribeJobsResponse: + raise NotImplementedError + + @handler("ListMedicalTranscriptionJobs") + def list_medical_transcription_jobs( + self, + context: RequestContext, + status: TranscriptionJobStatus | None = None, + job_name_contains: TranscriptionJobName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListMedicalTranscriptionJobsResponse: + raise NotImplementedError + + @handler("ListMedicalVocabularies") + def list_medical_vocabularies( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + state_equals: VocabularyState | None = None, + name_contains: VocabularyName | None = None, + **kwargs, + ) -> ListMedicalVocabulariesResponse: + raise NotImplementedError + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: TranscribeArn, **kwargs + ) -> ListTagsForResourceResponse: + raise NotImplementedError + + @handler("ListTranscriptionJobs") + def list_transcription_jobs( + self, + context: RequestContext, + status: TranscriptionJobStatus | None = None, + job_name_contains: TranscriptionJobName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs, + ) -> ListTranscriptionJobsResponse: + raise NotImplementedError + + @handler("ListVocabularies") + def list_vocabularies( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + state_equals: VocabularyState | None = None, + name_contains: VocabularyName | None = None, + **kwargs, + ) -> ListVocabulariesResponse: + raise NotImplementedError + + @handler("ListVocabularyFilters") + def list_vocabulary_filters( + self, + context: RequestContext, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + name_contains: VocabularyFilterName | None = None, + **kwargs, + ) -> ListVocabularyFiltersResponse: + raise NotImplementedError + + @handler("StartCallAnalyticsJob") + def start_call_analytics_job( + self, + context: RequestContext, + call_analytics_job_name: CallAnalyticsJobName, + media: Media, + output_location: Uri | None = None, + output_encryption_kms_key_id: KMSKeyId | None = None, + data_access_role_arn: DataAccessRoleArn | None = None, + settings: CallAnalyticsJobSettings | None = None, + tags: TagList | None = None, + channel_definitions: ChannelDefinitions | None = None, + **kwargs, + ) -> StartCallAnalyticsJobResponse: + raise NotImplementedError + + @handler("StartMedicalScribeJob") + def start_medical_scribe_job( + self, + context: RequestContext, + medical_scribe_job_name: TranscriptionJobName, + media: Media, + output_bucket_name: OutputBucketName, + data_access_role_arn: DataAccessRoleArn, + settings: MedicalScribeSettings, + output_encryption_kms_key_id: KMSKeyId | None = None, + kms_encryption_context: KMSEncryptionContextMap | None = None, + channel_definitions: MedicalScribeChannelDefinitions | None = None, + tags: TagList | None = None, + **kwargs, + ) -> StartMedicalScribeJobResponse: + raise NotImplementedError + + @handler("StartMedicalTranscriptionJob", expand=False) + def start_medical_transcription_job( + self, context: RequestContext, request: StartMedicalTranscriptionJobRequest, **kwargs + ) -> StartMedicalTranscriptionJobResponse: + raise NotImplementedError + + @handler("StartTranscriptionJob") + def start_transcription_job( + self, + context: RequestContext, + transcription_job_name: TranscriptionJobName, + media: Media, + language_code: LanguageCode | None = None, + media_sample_rate_hertz: MediaSampleRateHertz | None = None, + media_format: MediaFormat | None = None, + output_bucket_name: OutputBucketName | None = None, + output_key: OutputKey | None = None, + output_encryption_kms_key_id: KMSKeyId | None = None, + kms_encryption_context: KMSEncryptionContextMap | None = None, + settings: Settings | None = None, + model_settings: ModelSettings | None = None, + job_execution_settings: JobExecutionSettings | None = None, + content_redaction: ContentRedaction | None = None, + identify_language: Boolean | None = None, + identify_multiple_languages: Boolean | None = None, + language_options: LanguageOptions | None = None, + subtitles: Subtitles | None = None, + tags: TagList | None = None, + language_id_settings: LanguageIdSettingsMap | None = None, + toxicity_detection: ToxicityDetection | None = None, + **kwargs, + ) -> StartTranscriptionJobResponse: + raise NotImplementedError + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: TranscribeArn, tags: TagList, **kwargs + ) -> TagResourceResponse: + raise NotImplementedError + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: TranscribeArn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceResponse: + raise NotImplementedError + + @handler("UpdateCallAnalyticsCategory") + def update_call_analytics_category( + self, + context: RequestContext, + category_name: CategoryName, + rules: RuleList, + input_type: InputType | None = None, + **kwargs, + ) -> UpdateCallAnalyticsCategoryResponse: + raise NotImplementedError + + @handler("UpdateMedicalVocabulary") + def update_medical_vocabulary( + self, + context: RequestContext, + vocabulary_name: VocabularyName, + language_code: LanguageCode, + vocabulary_file_uri: Uri, + **kwargs, + ) -> UpdateMedicalVocabularyResponse: + raise NotImplementedError + + @handler("UpdateVocabulary") + def update_vocabulary( + self, + context: RequestContext, + vocabulary_name: VocabularyName, + language_code: LanguageCode, + phrases: Phrases | None = None, + vocabulary_file_uri: Uri | None = None, + data_access_role_arn: DataAccessRoleArn | None = None, + **kwargs, + ) -> UpdateVocabularyResponse: + raise NotImplementedError + + @handler("UpdateVocabularyFilter") + def update_vocabulary_filter( + self, + context: RequestContext, + vocabulary_filter_name: VocabularyFilterName, + words: Words | None = None, + vocabulary_filter_file_uri: Uri | None = None, + data_access_role_arn: DataAccessRoleArn | None = None, + **kwargs, + ) -> UpdateVocabularyFilterResponse: + raise NotImplementedError diff --git a/localstack-core/localstack/aws/app.py b/localstack-core/localstack/aws/app.py new file mode 100644 index 0000000000000..35249aae9d7cd --- /dev/null +++ b/localstack-core/localstack/aws/app.py @@ -0,0 +1,130 @@ +from localstack import config +from localstack.aws import handlers +from localstack.aws.api import RequestContext +from localstack.aws.chain import HandlerChain +from localstack.aws.handlers.metric_handler import MetricHandler +from localstack.aws.handlers.service_plugin import ServiceLoader, ServiceLoaderForDataPlane +from localstack.http.trace import TracingHandlerChain +from localstack.services.plugins import SERVICE_PLUGINS, ServiceManager, ServicePluginManager +from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available + +from .gateway import Gateway +from .handlers.fallback import EmptyResponseHandler +from .handlers.service import ServiceRequestRouter + + +class LocalstackAwsGateway(Gateway): + def __init__(self, service_manager: ServiceManager = None) -> None: + super().__init__(context_class=RequestContext) + + # basic server components + self.service_manager = service_manager or ServicePluginManager() + self.service_request_router = ServiceRequestRouter() + # lazy-loads services into the router + load_service = ServiceLoader(self.service_manager, self.service_request_router) + load_service_for_data_plane = ServiceLoaderForDataPlane(load_service) + + metric_collector = MetricHandler() + # the main request handler chain + self.request_handlers.extend( + [ + handlers.add_internal_request_params, + handlers.handle_runtime_shutdown, + metric_collector.create_metric_handler_item, + load_service_for_data_plane, + handlers.preprocess_request, + handlers.enforce_cors, + handlers.content_decoder, # depends on preprocess_request for the S3 service + handlers.validate_request_schema, # validate request schema for public LS endpoints + handlers.serve_localstack_resources, # try to serve endpoints in /_localstack + handlers.serve_edge_router_rules, + # start aws handler chain + handlers.parse_service_name, + handlers.parse_pre_signed_url_request, + handlers.inject_auth_header_if_missing, + handlers.add_region_from_header, + handlers.rewrite_region, + handlers.add_account_id, + handlers.parse_trace_context, + handlers.parse_service_request, + metric_collector.record_parsed_request, + handlers.serve_custom_service_request_handlers, + load_service, # once we have the service request we can make sure we load the service + self.service_request_router, # once we know the service is loaded we can route the request + # if the chain is still running, set an empty response + EmptyResponseHandler(404, b'{"message": "Not Found"}'), + ] + ) + + # exception handlers in the chain + self.exception_handlers.extend( + [ + handlers.log_exception, + handlers.serve_custom_exception_handlers, + handlers.handle_service_exception, + handlers.handle_internal_failure, + ] + ) + + # response post-processing + self.response_handlers.extend( + [ + handlers.validate_response_schema, # validate response schema for public LS endpoints + handlers.modify_service_response, + handlers.parse_service_response, + handlers.run_custom_response_handlers, + handlers.add_cors_response_headers, + handlers.log_response, + handlers.count_service_request, + metric_collector.update_metric_collection, + ] + ) + + # request chain finalization + self.finalizers.extend( + [ + handlers.set_close_connection_header, + handlers.run_custom_finalizers, + ] + ) + + def new_chain(self) -> HandlerChain: + if config.DEBUG_HANDLER_CHAIN: + return TracingHandlerChain( + self.request_handlers, + self.response_handlers, + self.finalizers, + self.exception_handlers, + ) + return super().new_chain() + + +def main(): + """ + Serve the LocalstackGateway with the default configuration directly through hypercorn. This is mostly for + development purposes and documentation on how to serve the Gateway. + """ + from .serving.hypercorn import serve + + use_ssl = True + port = 4566 + + # serve the LocalStackAwsGateway in a dev app + from localstack.utils.bootstrap import setup_logging + + setup_logging() + + if use_ssl: + install_predefined_cert_if_available() + _, cert_file_name, key_file_name = create_ssl_cert(serial_number=port) + ssl_creds = (cert_file_name, key_file_name) + else: + ssl_creds = None + + gw = LocalstackAwsGateway(SERVICE_PLUGINS) + + serve(gw, use_reloader=True, port=port, ssl_creds=ssl_creds) + + +if __name__ == "__main__": + main() diff --git a/localstack-core/localstack/aws/chain.py b/localstack-core/localstack/aws/chain.py new file mode 100644 index 0000000000000..6702d154cefaf --- /dev/null +++ b/localstack-core/localstack/aws/chain.py @@ -0,0 +1,42 @@ +""" +The core concepts of the HandlerChain. +""" + +from __future__ import annotations + +import logging +from typing import Callable, Type + +from rolo.gateway import ( + CompositeExceptionHandler, + CompositeFinalizer, + CompositeHandler, + CompositeResponseHandler, +) +from rolo.gateway import HandlerChain as RoloHandlerChain +from werkzeug import Response + +from .api import RequestContext + +LOG = logging.getLogger(__name__) + +Handler = Callable[["HandlerChain", RequestContext, Response], None] +"""The signature of request or response handler in the handler chain. Receives the HandlerChain, the +RequestContext, and the Response object to be populated.""" + +ExceptionHandler = Callable[["HandlerChain", Exception, RequestContext, Response], None] +"""The signature of an exception handler in the handler chain. Receives the HandlerChain, the exception that +was raised by the request handler, the RequestContext, and the Response object to be populated.""" + + +HandlerChain: Type[RoloHandlerChain[RequestContext]] = RoloHandlerChain + +__all__ = [ + "HandlerChain", + "Handler", + "ExceptionHandler", + "CompositeHandler", + "CompositeResponseHandler", + "CompositeExceptionHandler", + "CompositeFinalizer", +] diff --git a/localstack-core/localstack/aws/client.py b/localstack-core/localstack/aws/client.py new file mode 100644 index 0000000000000..6d938c086a8cf --- /dev/null +++ b/localstack-core/localstack/aws/client.py @@ -0,0 +1,430 @@ +"""Utils to process AWS requests as a client.""" + +import io +import logging +from datetime import datetime, timezone +from typing import Dict, Iterable, Optional +from urllib.parse import urlsplit + +from botocore import awsrequest +from botocore.endpoint import Endpoint +from botocore.model import OperationModel +from botocore.parsers import ResponseParser, ResponseParserFactory +from werkzeug.datastructures import Headers + +from localstack import config +from localstack.http import Request, Response +from localstack.runtime import hooks +from localstack.utils.patch import Patch, patch +from localstack.utils.strings import to_str + +from .api import CommonServiceException, RequestContext, ServiceException, ServiceResponse +from .connect import get_service_endpoint +from .gateway import Gateway + +LOG = logging.getLogger(__name__) + + +def create_http_request(aws_request: awsrequest.AWSPreparedRequest) -> Request: + """ + Create an ASF HTTP Request from a botocore AWSPreparedRequest. + + :param aws_request: the botocore prepared request + :return: a new Request + """ + split_url = urlsplit(aws_request.url) + host = split_url.netloc.split(":") + if len(host) == 1: + server = (to_str(host[0]), None) + elif len(host) == 2: + server = (to_str(host[0]), int(host[1])) + else: + raise ValueError + + # prepare the RequestContext + headers = Headers() + for k, v in aws_request.headers.items(): + headers[k] = to_str(v) + + return Request( + method=aws_request.method, + path=split_url.path, + query_string=split_url.query, + headers=headers, + body=aws_request.body, + server=server, + ) + + +class _ResponseStream(io.RawIOBase): + """ + Wraps a Response and makes it available as a readable IO stream. If the response stream is used as an iterable, it + will use the underlying response object directly. + + Adapted from https://stackoverflow.com/a/20260030/804840 + """ + + def __init__(self, response: Response): + self.response = response + self.iterator = response.iter_encoded() + self._buf = None + + def stream(self) -> Iterable[bytes]: + # adds compatibility for botocore's client-side AWSResponse.raw attribute. + return self.iterator + + def readable(self): + return True + + def readinto(self, buffer): + try: + upto = len(buffer) # We're supposed to return at most this much + chunk = self._buf or next(self.iterator) + # FIXME: this is very slow as it copies the entire chunk + output, self._buf = chunk[:upto], chunk[upto:] + buffer[: len(output)] = output + return len(output) + except StopIteration: + return 0 # indicate EOF + + def read(self, amt=None) -> bytes | None: + # see https://github.com/python/cpython/blob/main/Lib/_pyio.py + # adds compatibility for botocore's client-side AWSResponse.raw attribute. + # it seems the default implementation of RawIOBase.read to not handle well some cases + if amt is None: + amt = -1 + return super().read(amt) + + def close(self) -> None: + return self.response.close() + + def __iter__(self): + return self.iterator + + def __next__(self): + return next(self.iterator) + + def __str__(self): + length = self.response.content_length + if length is None: + length = "unknown" + + return f"StreamedBytes({length})" + + def __repr__(self): + return self.__str__() + + +class _RawStream: + """This is a compatibility adapter for the raw_stream attribute passed to botocore's EventStream.""" + + def __init__(self, response: Response): + self.response = response + self.iterator = response.iter_encoded() + + def stream(self) -> Iterable[bytes]: + return self.iterator + + def close(self): + pass + + +def _add_modeled_error_fields( + response_dict: Dict, + parsed_response: Dict, + operation_model: OperationModel, + parser: ResponseParser, +): + """ + This function adds additional error shape members (other than message, code, and type) to an already parsed error + response dict. + Port of botocore's Endpoint#_add_modeled_error_fields. + """ + error_code = parsed_response.get("Error", {}).get("Code") + if error_code is None: + return + service_model = operation_model.service_model + error_shape = service_model.shape_for_error_code(error_code) + if error_shape is None: + return + modeled_parse = parser.parse(response_dict, error_shape) + parsed_response.update(modeled_parse) + + +def _cbor_timestamp_parser(value): + return datetime.fromtimestamp(value / 1000) + + +def _cbor_blob_parser(value): + return bytes(value) + + +@hooks.on_infra_start() +def _patch_botocore_json_parser(): + from botocore.parsers import BaseJSONParser + + @patch(BaseJSONParser._parse_body_as_json) + def _parse_body_as_json(fn, self, body_contents): + """ + botocore does not support CBOR encoded response parsing. Since we use the botocore parsers + to parse responses from external backends (like kinesis-mock), we need to patch botocore to + try CBOR decoding in case the JSON decoding fails. + """ + try: + return fn(self, body_contents) + except UnicodeDecodeError as json_exception: + # cbor2: explicitly load from private _decoder module to avoid using the (non-patched) C-version + from cbor2._decoder import loads + + try: + LOG.debug("botocore failed decoding JSON. Trying to decode as CBOR.") + return loads(body_contents) + except Exception as cbor_exception: + LOG.debug("CBOR fallback decoding failed.") + raise cbor_exception from json_exception + + +@hooks.on_infra_start() +def _patch_cbor2(): + """ + Patch fixing the AWS CBOR en-/decoding of datetime fields. + + Unfortunately, Kinesis (the only known service using CBOR) does not use the number of seconds (with floating-point + milliseconds - according to RFC8949), but uses milliseconds. + Python cbor2 is highly optimized by using a C-implementation by default, which cannot be patched. + Instead of `from cbor2 import loads`, directly import the python-native loads implementation to avoid loading the + unpatched C implementation: + ``` + from cbor2._decoder import loads + from cbor2._decoder import dumps + ``` + + See https://github.com/aws/aws-sdk-java-v2/issues/4661 + """ + from cbor2._decoder import CBORDecodeValueError, semantic_decoders + from cbor2._encoder import CBOREncodeValueError, default_encoders + from cbor2._types import CBORTag + + def _patched_decode_epoch_datetime(self) -> datetime: + """ + Replaces `cbor2._decoder.CBORDecoder.decode_epoch_datetime` as default datetime semantic_decoder. + """ + # Semantic tag 1 + value = self._decode() + + try: + # The next line is the only change in this patch compared to the original function. + # AWS breaks the CBOR spec by using the millis (instead of seconds with floating point support for millis) + # https://github.com/aws/aws-sdk-java-v2/issues/4661 + value = value / 1000 + tmp = datetime.fromtimestamp(value, timezone.utc) + except (OverflowError, OSError, ValueError) as exc: + raise CBORDecodeValueError("error decoding datetime from epoch") from exc + + return self.set_shareable(tmp) + + def _patched_encode_datetime(self, value: datetime) -> None: + """ + Replaces `cbor2._encoder.CBOREncoder.encode_datetime` as default datetime default_encoder. + """ + if not value.tzinfo: + if self._timezone: + value = value.replace(tzinfo=self._timezone) + else: + raise CBOREncodeValueError( + f"naive datetime {value!r} encountered and no default timezone has been set" + ) + + if self.datetime_as_timestamp: + from calendar import timegm + + if not value.microsecond: + timestamp: float = timegm(value.utctimetuple()) + else: + timestamp = timegm(value.utctimetuple()) + value.microsecond / 1000000 + # The next line is the only change in this patch compared to the original function. + # - AWS breaks the CBOR spec by using the millis (instead of seconds with floating point support for millis) + # https://github.com/aws/aws-sdk-java-v2/issues/4661 + # - AWS SDKs in addition have very tight assumptions on the type. + # This needs to be an integer, and must not be a floating point number (CBOR is typed)! + timestamp = int(timestamp * 1000) + self.encode_semantic(CBORTag(1, timestamp)) + else: + datestring = value.isoformat().replace("+00:00", "Z") + self.encode_semantic(CBORTag(0, datestring)) + + # overwrite the default epoch datetime en-/decoder with patched versions + default_encoders[datetime] = _patched_encode_datetime + semantic_decoders[1] = _patched_decode_epoch_datetime + + +def _create_and_enrich_aws_request( + fn, self: Endpoint, params: dict, operation_model: OperationModel = None +): + """ + Patch that adds the botocore operation model and request parameters to a newly created AWSPreparedRequest, + which normally only holds low-level HTTP request information. + """ + request: awsrequest.AWSPreparedRequest = fn(self, params, operation_model) + + request.params = params + request.operation_model = operation_model + + return request + + +botocore_in_memory_endpoint_patch = Patch.function( + Endpoint.create_request, _create_and_enrich_aws_request +) + + +@hooks.on_infra_start(should_load=config.IN_MEMORY_CLIENT) +def _patch_botocore_endpoint_in_memory(): + botocore_in_memory_endpoint_patch.apply() + + +def parse_response( + operation: OperationModel, response: Response, include_response_metadata: bool = True +) -> ServiceResponse: + """ + Parses an HTTP Response object into an AWS response object using botocore. It does this by adapting the + procedure of ``botocore.endpoint.convert_to_response_dict`` to work with Werkzeug's server-side response object. + + :param operation: the operation of the original request + :param response: the HTTP response object containing the response of the operation + :param include_response_metadata: True if the ResponseMetadata (typical for boto response dicts) should be included + :return: a parsed dictionary as it is returned by botocore + """ + # this is what botocore.endpoint.convert_to_response_dict normally does + response_dict = { + "headers": dict(response.headers.items()), # boto doesn't like werkzeug headers + "status_code": response.status_code, + "context": { + "operation_name": operation.name, + }, + } + + if response_dict["status_code"] >= 301: + response_dict["body"] = response.data + elif operation.has_event_stream_output: + # TODO test this + response_dict["body"] = _RawStream(response) + elif operation.has_streaming_output: + # for s3.GetObject for example, the Body attribute is actually a stream, not the raw bytes value + response_dict["body"] = _ResponseStream(response) + else: + response_dict["body"] = response.data + + factory = ResponseParserFactory() + if response.content_type and response.content_type.startswith("application/x-amz-cbor"): + # botocore cannot handle CBOR encoded responses (because it never sends them), we need to modify the parser + factory.set_parser_defaults( + timestamp_parser=_cbor_timestamp_parser, blob_parser=_cbor_blob_parser + ) + + parser = factory.create_parser(operation.service_model.protocol) + parsed_response = parser.parse(response_dict, operation.output_shape) + + if response.status_code >= 301: + # Add possible additional error shape members + _add_modeled_error_fields(response_dict, parsed_response, operation, parser) + + if not include_response_metadata: + parsed_response.pop("ResponseMetadata", None) + + return parsed_response + + +def parse_service_exception( + response: Response, parsed_response: Dict +) -> Optional[ServiceException]: + """ + Creates a ServiceException (one ASF can handle) from a parsed response (one that botocore would return). + It does not automatically raise the exception (see #raise_service_exception). + :param response: Un-parsed response + :param parsed_response: Parsed response + :return: ServiceException or None (if it's not an error response) + """ + if response.status_code < 301 or "Error" not in parsed_response: + return None + error = parsed_response["Error"] + service_exception = CommonServiceException( + code=error.get("Code", f"'{response.status_code}'"), + status_code=response.status_code, + message=error.get("Message", ""), + sender_fault=error.get("Type") == "Sender", + ) + # Add all additional fields in the parsed response as members of the exception + for key, value in parsed_response.items(): + if key.lower() not in ["code", "message", "type", "error"] and not hasattr( + service_exception, key + ): + setattr(service_exception, key, value) + return service_exception + + +def raise_service_exception(response: Response, parsed_response: Dict) -> None: + """ + Creates and raises a ServiceException from a parsed response (one that botocore would return). + :param response: Un-parsed response + :param parsed_response: Parsed response + :raise ServiceException: If the response is an error response + :return: None if the response is not an error response + """ + if service_exception := parse_service_exception(response, parsed_response): + raise service_exception + + +class GatewayShortCircuit: + gateway: Gateway + + def __init__(self, gateway: Gateway): + self.gateway = gateway + self._internal_url = get_service_endpoint() + + def __call__( + self, event_name: str, request: awsrequest.AWSPreparedRequest, **kwargs + ) -> awsrequest.AWSResponse | None: + # TODO: we sometimes overrides the endpoint_url to direct it to DynamoDBLocal directly + # if the default endpoint_url is not in the request, just skips the in-memory forwarding + if self._internal_url not in request.url: + return + + # extract extra data from enriched AWSPreparedRequest + params = request.params + operation: OperationModel = request.operation_model + + # create request + context = RequestContext(request=create_http_request(request)) + + # TODO: just a hacky thing to unblock the service model being set to `sqs-query` blocking for now + # this is using the same services as `localstack.aws.protocol.service_router.resolve_conflicts`, maybe + # consolidate. `docdb` and `neptune` uses the RDS API and service. + if operation.service_model.service_name not in { + "sqs-query", + "docdb", + "neptune", + "timestream-write", + }: + context.service = operation.service_model + + context.operation = operation + context.service_request = params["body"] + + # perform request + response = Response() + self.gateway.handle(context, response) + + # transform Werkzeug response to client-side botocore response + aws_response = awsrequest.AWSResponse( + url=context.request.url, + status_code=response.status_code, + headers=response.headers, + raw=_ResponseStream(response), + ) + + return aws_response + + @staticmethod + def modify_client(client, gateway): + client.meta.events.register_first("before-send.*.*", GatewayShortCircuit(gateway)) diff --git a/localstack-core/localstack/aws/components.py b/localstack-core/localstack/aws/components.py new file mode 100644 index 0000000000000..82b203741de60 --- /dev/null +++ b/localstack-core/localstack/aws/components.py @@ -0,0 +1,22 @@ +from functools import cached_property + +from rolo.gateway import Gateway + +from localstack.aws.app import LocalstackAwsGateway +from localstack.runtime.components import BaseComponents + + +class AwsComponents(BaseComponents): + """ + Runtime components specific to the AWS emulator. + """ + + name = "aws" + + @cached_property + def gateway(self) -> Gateway: + # FIXME: the ServiceManager should be reworked to be more generic, and then become part of the + # components + from localstack.services.plugins import SERVICE_PLUGINS + + return LocalstackAwsGateway(SERVICE_PLUGINS) diff --git a/localstack-core/localstack/aws/connect.py b/localstack-core/localstack/aws/connect.py new file mode 100644 index 0000000000000..6a04285e021a2 --- /dev/null +++ b/localstack-core/localstack/aws/connect.py @@ -0,0 +1,782 @@ +""" +LocalStack client stack. + +This module provides the interface to perform cross-service communication between +LocalStack providers. +""" + +import json +import logging +import re +import threading +from abc import ABC, abstractmethod +from functools import lru_cache, partial +from random import choice +from socket import socket +from typing import Any, Callable, Generic, Optional, TypedDict, TypeVar + +import dns.message +import dns.query +from boto3.session import Session +from botocore.awsrequest import ( + AWSHTTPConnection, + AWSHTTPConnectionPool, + AWSHTTPSConnection, + AWSHTTPSConnectionPool, +) +from botocore.client import BaseClient +from botocore.config import Config +from botocore.httpsession import URLLib3Session +from botocore.waiter import Waiter + +from localstack import config as localstack_config +from localstack.aws.spec import LOCALSTACK_BUILTIN_DATA_PATH +from localstack.constants import ( + AWS_REGION_US_EAST_1, + INTERNAL_AWS_ACCESS_KEY_ID, + INTERNAL_AWS_SECRET_ACCESS_KEY, + MAX_POOL_CONNECTIONS, +) +from localstack.utils.aws.aws_stack import get_s3_hostname +from localstack.utils.aws.client_types import ServicePrincipal, TypedServiceClientFactory +from localstack.utils.patch import patch +from localstack.utils.strings import short_uid + +LOG = logging.getLogger(__name__) + + +@patch(target=Waiter.wait, pass_target=True) +def my_patch(fn, self, **kwargs): + """ + We're patching defaults in here that will override the defaults specified in the waiter spec since these are usually way too long + + Alternatively we could also try to find a solution where we patch the loader used in the generated clients + so that we can dynamically fix the waiter config when it's loaded instead of when it's being used for wait execution + """ + + if localstack_config.DISABLE_CUSTOM_BOTO_WAITER_CONFIG: + return fn(self, **kwargs) + else: + patched_kwargs = { + **kwargs, + "WaiterConfig": { + "Delay": localstack_config.BOTO_WAITER_DELAY, + "MaxAttempts": localstack_config.BOTO_WAITER_MAX_ATTEMPTS, + **kwargs.get( + "WaiterConfig", {} + ), # we still allow client users to override these defaults + }, + } + return fn(self, **patched_kwargs) + + +# patch the botocore.Config object to be comparable and hashable. +# this solution does not validates the hashable (https://docs.python.org/3/glossary.html#term-hashable) definition on python +# It would do so only when someone accesses the internals of the Config option to change the dict directly. +# Since this is not a proper way to use the config object (but via config.merge), this should be fine +def make_hash(o): + if isinstance(o, (set, tuple, list)): + return tuple([make_hash(e) for e in o]) + + elif not isinstance(o, dict): + return hash(o) + + new_o = {} + for k, v in o.items(): + new_o[k] = make_hash(v) + + return hash(frozenset(sorted(new_o.items()))) + + +def config_equality_patch(self, other: object): + return type(self) == type(other) and self._user_provided_options == other._user_provided_options + + +def config_hash_patch(self): + return make_hash(self._user_provided_options) + + +Config.__eq__ = config_equality_patch +Config.__hash__ = config_hash_patch + + +def attribute_name_to_service_name(attribute_name): + """ + Converts a python-compatible attribute name to the boto service name + :param attribute_name: Python compatible attribute name using the following replacements: + a) Add an underscore suffix `_` to any reserved Python keyword (PEP-8). + b) Replace any dash `-` with an underscore `_` + :return: + """ + if attribute_name.endswith("_"): + # lambda_ -> lambda + attribute_name = attribute_name[:-1] + # replace all _ with -: cognito_idp -> cognito-idp + return attribute_name.replace("_", "-") + + +def get_service_endpoint() -> str | None: + """ + Returns the endpoint the client should target. + + :return: Endpoint url + """ + if localstack_config.DISTRIBUTED_MODE: + return None + return localstack_config.internal_service_url() + + +# +# Data transfer object +# + +INTERNAL_REQUEST_PARAMS_HEADER = "x-localstack-data" +"""Request header which contains the data transfer object.""" + + +class InternalRequestParameters(TypedDict): + """ + LocalStack Data Transfer Object. + + This is sent with every internal request and contains any additional information + LocalStack might need for the purpose of policy enforcement. It is serialised + into text and sent in the request header. + + Attributes can be added as needed. The keys should roughly correspond to: + https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html + """ + + source_arn: str | None + """ARN of resource which is triggering the call""" + + service_principal: str | None + """Service principal making this call""" + + +def dump_dto(data: InternalRequestParameters) -> str: + # To produce a compact JSON representation of DTO, remove spaces from separators + # If possible, we could use a custom encoder to further decrease header size in the future + return json.dumps(data, separators=(",", ":")) + + +def load_dto(data: str) -> InternalRequestParameters: + return json.loads(data) + + +T = TypeVar("T") + + +class MetadataRequestInjector(Generic[T]): + def __init__(self, client: T, params: dict[str, str] | None = None): + self._client = client + self._params = params + + def __getattr__(self, item): + target = getattr(self._client, item) + if not isinstance(target, Callable): + return target + if self._params: + return partial(target, **self._params) + else: + return target + + def request_metadata( + self, source_arn: str | None = None, service_principal: str | None = None + ) -> T: + """ + Returns a new client instance preset with the given request metadata. + Identical to providing _ServicePrincipal and _SourceArn directly as operation arguments but typing + compatible. + + Raw example: lambda_client.invoke(FunctionName="fn", _SourceArn="...") + Injector example: lambda_client.request_metadata(source_arn="...").invoke(FunctionName="fn") + Cannot be called on objects where the parameters are already set. + + :param source_arn: Arn on which behalf the calls of this client shall be made + :param service_principal: Service principal on which behalf the calls of this client shall be made + :return: A new version of the MetadataRequestInjector + """ + if self._params is not None: + raise TypeError("Request_data cannot be called on it's own return value") + params = {} + if source_arn: + params["_SourceArn"] = source_arn + if service_principal: + params["_ServicePrincipal"] = service_principal + return MetadataRequestInjector(client=self._client, params=params) + + +# +# Factory +# +class ServiceLevelClientFactory(TypedServiceClientFactory): + """ + A service level client factory, preseeded with parameters for the boto3 client creation. + Will create any service client with parameters already provided by the ClientFactory. + """ + + def __init__( + self, + *, + factory: "ClientFactory", + client_creation_params: dict[str, str | Config | None], + request_wrapper_clazz: type, + ): + self._factory = factory + self._client_creation_params = client_creation_params + self._request_wrapper_clazz = request_wrapper_clazz + + def get_client(self, service: str): + return self._request_wrapper_clazz( + client=self._factory.get_client(service_name=service, **self._client_creation_params) + ) + + def __getattr__(self, service: str): + service = attribute_name_to_service_name(service) + return self._request_wrapper_clazz( + client=self._factory.get_client(service_name=service, **self._client_creation_params) + ) + + +class ClientFactory(ABC): + """ + Factory to build the AWS client. + + Boto client creation is resource intensive. This class caches all Boto + clients it creates and must be used instead of directly using boto lib. + """ + + def __init__( + self, + use_ssl: bool = False, + verify: bool = False, + session: Session = None, + config: Config = None, + ): + """ + :param use_ssl: Whether to use SSL + :param verify: Whether to verify SSL certificates + :param session: Session to be used for client creation. Will create a new session if not provided. + Please note that sessions are not generally thread safe. + Either create a new session for each factory or make sure the session is not shared with another thread. + The factory itself has a lock for the session, so as long as you only use the session in one factory, + it should be fine using the factory in a multithreaded context. + :param config: Config used as default for client creation. + """ + self._use_ssl = use_ssl + self._verify = verify + self._config: Config = config or Config(max_pool_connections=MAX_POOL_CONNECTIONS) + self._session: Session = session or Session() + + # make sure we consider our custom data paths for legacy specs (like SQS query protocol) + if LOCALSTACK_BUILTIN_DATA_PATH not in self._session._loader.search_paths: + self._session._loader.search_paths.insert(0, LOCALSTACK_BUILTIN_DATA_PATH) + + self._create_client_lock = threading.RLock() + + def __call__( + self, + *, + region_name: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_session_token: Optional[str] = None, + endpoint_url: str = None, + config: Config = None, + ) -> ServiceLevelClientFactory: + """ + Get back an object which lets you select the typed service you want to access with the given attributes + + :param region_name: Name of the AWS region to be associated with the client + If set to None, loads from botocore session. + :param aws_access_key_id: Access key to use for the client. + If set to None, loads from botocore session. + :param aws_secret_access_key: Secret key to use for the client. + If set to None, loads from botocore session. + :param aws_session_token: Session token to use for the client. + Not being used if not set. + :param endpoint_url: Full endpoint URL to be used by the client. + Defaults to appropriate LocalStack endpoint. + :param config: Boto config for advanced use. + :return: Service Region Client Creator + """ + params = { + "region_name": region_name, + "aws_access_key_id": aws_access_key_id, + "aws_secret_access_key": aws_secret_access_key, + "aws_session_token": aws_session_token, + "endpoint_url": endpoint_url, + "config": config, + } + return ServiceLevelClientFactory( + factory=self, + client_creation_params=params, + request_wrapper_clazz=MetadataRequestInjector, + ) + + def with_assumed_role( + self, + *, + role_arn: str, + service_principal: Optional[ServicePrincipal] = None, + session_name: Optional[str] = None, + region_name: Optional[str] = None, + endpoint_url: Optional[str] = None, + config: Optional[Config] = None, + ) -> ServiceLevelClientFactory: + """ + Create a service level client factory with credentials from assuming the given role ARN. + The service_principal will only be used for the assume_role call, for all succeeding calls it has to be provided + separately, either as call attribute or using request_metadata() + + :param role_arn: Role to assume + :param service_principal: Service the role should be assumed as, must not be set for test clients + :param session_name: Session name for the role session + :param region_name: Region for the returned client + :param endpoint_url: Endpoint for both the assume_role call and the returned client + :param config: Config for both the assume_role call and the returned client + :return: Service Level Client Factory + """ + session_name = session_name or f"session-{short_uid()}" + sts_client = self(endpoint_url=endpoint_url, config=config, region_name=region_name).sts + + metadata = {} + if service_principal: + metadata["service_principal"] = service_principal + + sts_client = sts_client.request_metadata(**metadata) + credentials = sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)[ + "Credentials" + ] + + return self( + region_name=region_name, + aws_access_key_id=credentials["AccessKeyId"], + aws_secret_access_key=credentials["SecretAccessKey"], + aws_session_token=credentials["SessionToken"], + endpoint_url=endpoint_url, + config=config, + ) + + @abstractmethod + def get_client( + self, + service_name: str, + region_name: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_session_token: Optional[str] = None, + endpoint_url: Optional[str] = None, + config: Optional[Config] = None, + ): + raise NotImplementedError() + + def _get_client_post_hook(self, client: BaseClient) -> BaseClient: + """ + This is called after the client is created by Boto. + + Any modifications to the client can be implemented here in subclasses + without affecting the caching mechanism. + """ + return client + + # TODO @lru_cache here might result in a memory leak, as it keeps a reference to `self` + # We might need an alternative caching decorator with a weak ref to `self` + # Otherwise factories might never be garbage collected + @lru_cache(maxsize=256) + def _get_client( + self, + service_name: str, + region_name: str, + use_ssl: bool, + verify: Optional[bool], + endpoint_url: Optional[str], + aws_access_key_id: Optional[str], + aws_secret_access_key: Optional[str], + aws_session_token: Optional[str], + config: Config, + ) -> BaseClient: + """ + Returns a boto3 client with the given configuration, and the hooks added by `_get_client_post_hook`. + This is a cached call, so modifications to the used client will affect others. + Please use another instance of the factory, should you want to modify clients. + Client creation is behind a lock as it is not generally thread safe. + + :param service_name: Service to build the client for, eg. `s3` + :param region_name: Name of the AWS region to be associated with the client + If set to None, loads from botocore session. + :param aws_access_key_id: Access key to use for the client. + If set to None, loads from botocore session. + :param aws_secret_access_key: Secret key to use for the client. + If set to None, loads from botocore session. + :param aws_session_token: Session token to use for the client. + Not being used if not set. + :param endpoint_url: Full endpoint URL to be used by the client. + Defaults to appropriate LocalStack endpoint. + :param config: Boto config for advanced use. + :return: Boto3 client. + """ + with self._create_client_lock: + default_config = ( + Config(retries={"max_attempts": 0}) + if localstack_config.DISABLE_BOTO_RETRIES + else Config() + ) + + client = self._session.client( + service_name=service_name, + region_name=region_name, + use_ssl=use_ssl, + verify=verify, + endpoint_url=endpoint_url, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + config=config.merge(default_config), + ) + + return self._get_client_post_hook(client) + + # + # Boto session utilities + # + def _get_session_region(self) -> str: + """ + Return AWS region as set in the Boto session. + """ + return self._session.region_name + + def _get_region(self) -> str: + """ + Return the AWS region name from following sources, in order of availability. + - LocalStack request context + - Boto session + - us-east-1 + """ + return self._get_session_region() or AWS_REGION_US_EAST_1 + + +class InternalClientFactory(ClientFactory): + def _get_client_post_hook(self, client: BaseClient) -> BaseClient: + """ + Register handlers that enable internal data object transfer mechanism + for internal clients. + """ + client.meta.events.register( + "provide-client-params.*.*", handler=_handler_create_request_parameters + ) + + client.meta.events.register("before-call.*.*", handler=_handler_inject_dto_header) + + if localstack_config.IN_MEMORY_CLIENT: + # this make the client call the gateway directly + from localstack.aws.client import GatewayShortCircuit + from localstack.runtime import get_current_runtime + + GatewayShortCircuit.modify_client(client, get_current_runtime().components.gateway) + + return client + + def get_client( + self, + service_name: str, + region_name: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_session_token: Optional[str] = None, + endpoint_url: Optional[str] = None, + config: Optional[Config] = None, + ) -> BaseClient: + """ + Build and return client for connections originating within LocalStack. + + All API operation methods (such as `.list_buckets()` or `.run_instances()` + take additional args that start with `_` prefix. These are used to pass + additional information to LocalStack server during internal calls. + + :param service_name: Service to build the client for, eg. `s3` + :param region_name: Region name. See note above. + If set to None, loads from botocore session. + :param aws_access_key_id: Access key to use for the client. + Defaults to LocalStack internal credentials. + :param aws_secret_access_key: Secret key to use for the client. + Defaults to LocalStack internal credentials. + :param aws_session_token: Session token to use for the client. + Not being used if not set. + :param endpoint_url: Full endpoint URL to be used by the client. + Defaults to appropriate LocalStack endpoint. + :param config: Boto config for advanced use. + """ + + if config is None: + config = self._config + else: + config = self._config.merge(config) + + endpoint_url = endpoint_url or get_service_endpoint() + if service_name == "s3" and endpoint_url: + if re.match(r"https?://localhost(:[0-9]+)?", endpoint_url): + endpoint_url = endpoint_url.replace("://localhost", f"://{get_s3_hostname()}") + + return self._get_client( + service_name=service_name, + region_name=region_name or self._get_region(), + use_ssl=self._use_ssl, + verify=self._verify, + endpoint_url=endpoint_url, + aws_access_key_id=aws_access_key_id or INTERNAL_AWS_ACCESS_KEY_ID, + aws_secret_access_key=aws_secret_access_key or INTERNAL_AWS_SECRET_ACCESS_KEY, + aws_session_token=aws_session_token, + config=config, + ) + + +class ExternalClientFactory(ClientFactory): + def get_client( + self, + service_name: str, + region_name: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_session_token: Optional[str] = None, + endpoint_url: Optional[str] = None, + config: Optional[Config] = None, + ) -> BaseClient: + """ + Build and return client for connections originating outside LocalStack and targeting Localstack. + + If the region is set to None, it is loaded from following + locations: + - AWS environment variables + - Credentials file `~/.aws/credentials` + - Config file `~/.aws/config` + + :param service_name: Service to build the client for, eg. `s3` + :param region_name: Name of the AWS region to be associated with the client + If set to None, loads from botocore session. + :param aws_access_key_id: Access key to use for the client. + If set to None, loads from botocore session. + :param aws_secret_access_key: Secret key to use for the client. + If set to None, uses a placeholder value + :param aws_session_token: Session token to use for the client. + Not being used if not set. + :param endpoint_url: Full endpoint URL to be used by the client. + Defaults to appropriate LocalStack endpoint. + :param config: Boto config for advanced use. + """ + if config is None: + config = self._config + else: + config = self._config.merge(config) + + # Boto has an odd behaviour when using a non-default (any other region than us-east-1) in config + # If the region in arg is non-default, it gives the arg the precedence + # But if the region in arg is default (us-east-1), it gives precedence to one in config + # Below: always give precedence to arg region + if config and config.region_name != AWS_REGION_US_EAST_1: + if region_name == AWS_REGION_US_EAST_1: + config = config.merge(Config(region_name=region_name)) + + endpoint_url = endpoint_url or get_service_endpoint() + if service_name == "s3": + if re.match(r"https?://localhost(:[0-9]+)?", endpoint_url): + endpoint_url = endpoint_url.replace("://localhost", f"://{get_s3_hostname()}") + + # Prevent `PartialCredentialsError` when only access key ID is provided + # The value of secret access key is insignificant and can be set to anything + if aws_access_key_id: + aws_secret_access_key = aws_secret_access_key or INTERNAL_AWS_SECRET_ACCESS_KEY + + return self._get_client( + service_name=service_name, + region_name=region_name or config.region_name or self._get_region(), + use_ssl=self._use_ssl, + verify=self._verify, + endpoint_url=endpoint_url, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + config=config, + ) + + +class ExternalAwsClientFactory(ClientFactory): + def get_client( + self, + service_name: str, + region_name: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_session_token: Optional[str] = None, + endpoint_url: Optional[str] = None, + config: Optional[Config] = None, + ) -> BaseClient: + """ + Build and return client for connections originating outside LocalStack and targeting AWS. + + If either of the access keys or region are set to None, they are loaded from following + locations: + - AWS environment variables + - Credentials file `~/.aws/credentials` + - Config file `~/.aws/config` + + :param service_name: Service to build the client for, eg. `s3` + :param region_name: Name of the AWS region to be associated with the client + If set to None, loads from botocore session. + :param aws_access_key_id: Access key to use for the client. + If set to None, loads from botocore session. + :param aws_secret_access_key: Secret key to use for the client. + If set to None, loads from botocore session. + :param aws_session_token: Session token to use for the client. + Not being used if not set. + :param endpoint_url: Full endpoint URL to be used by the client. + Defaults to appropriate AWS endpoint. + :param config: Boto config for advanced use. + """ + if config is None: + config = self._config + else: + config = self._config.merge(config) + + return self._get_client( + config=config, + service_name=service_name, + region_name=region_name or self._get_session_region(), + endpoint_url=endpoint_url, + use_ssl=True, + verify=True, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + ) + + +def resolve_dns_from_upstream(hostname: str) -> str: + from localstack.dns.server import get_fallback_dns_server + + upstream_dns = get_fallback_dns_server() + request = dns.message.make_query(hostname, "A") + response = dns.query.udp(request, upstream_dns, port=53, timeout=5) + if len(response.answer) == 0: + raise ValueError(f"No DNS response found for hostname '{hostname}'") + + ip_addresses = [] + for answer in response.answer: + if answer.match(dns.rdataclass.IN, dns.rdatatype.A, dns.rdatatype.NONE): + ip_addresses.extend(answer.items.keys()) + + if not ip_addresses: + raise ValueError(f"No DNS records of type 'A' found for hostname '{hostname}'") + + return choice(ip_addresses).address + + +class ExternalBypassDnsClientFactory(ExternalAwsClientFactory): + """ + Client factory that makes requests against AWS ensuring that DNS resolution is not affected by the LocalStack DNS + server. + """ + + def __init__( + self, + session: Session = None, + config: Config = None, + ): + super().__init__(use_ssl=True, verify=True, session=session, config=config) + + def _get_client_post_hook(self, client: BaseClient) -> BaseClient: + client = super()._get_client_post_hook(client) + client._endpoint.http_session = ExternalBypassDnsSession() + return client + + +class ExternalBypassDnsHTTPConnection(AWSHTTPConnection): + """ + Connection class that bypasses the LocalStack DNS server for HTTP connections + """ + + def _new_conn(self) -> socket: + orig_host = self._dns_host + try: + self._dns_host = resolve_dns_from_upstream(self._dns_host) + return super()._new_conn() + finally: + self._dns_host = orig_host + + +class ExternalBypassDnsHTTPSConnection(AWSHTTPSConnection): + """ + Connection class that bypasses the LocalStack DNS server for HTTPS connections + """ + + def _new_conn(self) -> socket: + orig_host = self._dns_host + try: + self._dns_host = resolve_dns_from_upstream(self._dns_host) + return super()._new_conn() + finally: + self._dns_host = orig_host + + +class ExternalBypassDnsHTTPConnectionPool(AWSHTTPConnectionPool): + ConnectionCls = ExternalBypassDnsHTTPConnection + + +class ExternalBypassDnsHTTPSConnectionPool(AWSHTTPSConnectionPool): + ConnectionCls = ExternalBypassDnsHTTPSConnection + + +class ExternalBypassDnsSession(URLLib3Session): + """ + urllib3 session wrapper that uses our custom connection pool. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._pool_classes_by_scheme["https"] = ExternalBypassDnsHTTPSConnectionPool + self._pool_classes_by_scheme["http"] = ExternalBypassDnsHTTPConnectionPool + + +connect_to = InternalClientFactory(use_ssl=localstack_config.DISTRIBUTED_MODE) +connect_externally_to = ExternalClientFactory() + + +# +# Handlers +# + + +def _handler_create_request_parameters(params: dict[str, Any], context: dict[str, Any], **kwargs): + """ + Construct the data transfer object at the time of parsing the client + parameters and proxy it via the Boto context dict. + + This handler enables the use of additional keyword parameters in Boto API + operation functions. + + It uses the `InternalRequestParameters` type annotations to handle supported parameters. + The keys supported by this type will be converted to method parameters by prefixing it with an underscore `_` + and converting the snake case to camel case. + Example: + service_principal -> _ServicePrincipal + """ + + # Names of arguments that can be passed to Boto API operation functions. + # These must correspond to entries on the data transfer object. + dto = InternalRequestParameters() + for member in InternalRequestParameters.__annotations__.keys(): + parameter = f"_{''.join([part.title() for part in member.split('_')])}" + if parameter in params: + dto[member] = params.pop(parameter) + + context["_localstack"] = dto + + +def _handler_inject_dto_header(params: dict[str, Any], context: dict[str, Any], **kwargs): + """ + Retrieve the data transfer object from the Boto context dict and serialise + it as part of the request headers. + """ + if (dto := context.pop("_localstack", None)) is not None: + params["headers"][INTERNAL_REQUEST_PARAMS_HEADER] = dump_dto(dto) diff --git a/localstack-core/localstack/aws/data/sqs-query/2012-11-05/README.md b/localstack-core/localstack/aws/data/sqs-query/2012-11-05/README.md new file mode 100644 index 0000000000000..7ca0522be5242 --- /dev/null +++ b/localstack-core/localstack/aws/data/sqs-query/2012-11-05/README.md @@ -0,0 +1,10 @@ +This spec preserves the SQS query protocol spec, which was part of botocore until the protocol was switched to json with `botocore==1.31.81`. +This switch removed a lot of spec data which is necessary for the proper parsing and serialization, which is why we have to preserve them on our own. + +- The spec content was preserved from this state: https://github.com/boto/botocore/blob/79c92132e266b15f62bc743ae0816c27d598c36e/botocore/data/sqs/2012-11-05/service-2.json +- This was the last commit before the protocol switched back (again) to json (with https://github.com/boto/botocore/commit/47a515f6727a7585487d58c069c7c0063c28899e). +- The file is licensed with Apache License 2.0. +- Modifications: + - Removal of documentation strings with the following regex: `(,)?\n\s+"documentation":".*"` + - Added `MessageSystemAttributeNames` to `ReceiveMessageRequest.members` with AWS deprecating `AttributeNames`. + The patches in `spec-patches.json` are not present in the boto client for our sqs-query tests right now because the custom loading is not fully integrated at the moment, so it is changed directly in the spec. diff --git a/localstack-core/localstack/aws/data/sqs-query/2012-11-05/service-2.json b/localstack-core/localstack/aws/data/sqs-query/2012-11-05/service-2.json new file mode 100644 index 0000000000000..37168390cc218 --- /dev/null +++ b/localstack-core/localstack/aws/data/sqs-query/2012-11-05/service-2.json @@ -0,0 +1,1505 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-11-05", + "endpointPrefix":"sqs", + "protocol":"query", + "serviceAbbreviation":"Amazon SQS", + "serviceFullName":"Amazon Simple Queue Service", + "serviceId":"SQS", + "signatureVersion":"v4", + "uid":"sqs-2012-11-05", + "xmlNamespace":"http://queue.amazonaws.com/doc/2012-11-05/" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddPermissionRequest"}, + "errors":[ + {"shape":"OverLimit"} + ] + }, + "CancelMessageMoveTask":{ + "name":"CancelMessageMoveTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelMessageMoveTaskRequest"}, + "output":{ + "shape":"CancelMessageMoveTaskResult", + "resultWrapper":"CancelMessageMoveTaskResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ] + }, + "ChangeMessageVisibility":{ + "name":"ChangeMessageVisibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityRequest"}, + "errors":[ + {"shape":"MessageNotInflight"}, + {"shape":"ReceiptHandleIsInvalid"} + ] + }, + "ChangeMessageVisibilityBatch":{ + "name":"ChangeMessageVisibilityBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityBatchRequest"}, + "output":{ + "shape":"ChangeMessageVisibilityBatchResult", + "resultWrapper":"ChangeMessageVisibilityBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"InvalidBatchEntryId"} + ] + }, + "CreateQueue":{ + "name":"CreateQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateQueueRequest"}, + "output":{ + "shape":"CreateQueueResult", + "resultWrapper":"CreateQueueResult" + }, + "errors":[ + {"shape":"QueueDeletedRecently"}, + {"shape":"QueueNameExists"} + ] + }, + "DeleteMessage":{ + "name":"DeleteMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageRequest"}, + "errors":[ + {"shape":"InvalidIdFormat"}, + {"shape":"ReceiptHandleIsInvalid"} + ] + }, + "DeleteMessageBatch":{ + "name":"DeleteMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageBatchRequest"}, + "output":{ + "shape":"DeleteMessageBatchResult", + "resultWrapper":"DeleteMessageBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"InvalidBatchEntryId"} + ] + }, + "DeleteQueue":{ + "name":"DeleteQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteQueueRequest"} + }, + "GetQueueAttributes":{ + "name":"GetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueAttributesRequest"}, + "output":{ + "shape":"GetQueueAttributesResult", + "resultWrapper":"GetQueueAttributesResult" + }, + "errors":[ + {"shape":"InvalidAttributeName"} + ] + }, + "GetQueueUrl":{ + "name":"GetQueueUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueUrlRequest"}, + "output":{ + "shape":"GetQueueUrlResult", + "resultWrapper":"GetQueueUrlResult" + }, + "errors":[ + {"shape":"QueueDoesNotExist"} + ] + }, + "ListDeadLetterSourceQueues":{ + "name":"ListDeadLetterSourceQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeadLetterSourceQueuesRequest"}, + "output":{ + "shape":"ListDeadLetterSourceQueuesResult", + "resultWrapper":"ListDeadLetterSourceQueuesResult" + }, + "errors":[ + {"shape":"QueueDoesNotExist"} + ] + }, + "ListMessageMoveTasks":{ + "name":"ListMessageMoveTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMessageMoveTasksRequest"}, + "output":{ + "shape":"ListMessageMoveTasksResult", + "resultWrapper":"ListMessageMoveTasksResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ] + }, + "ListQueueTags":{ + "name":"ListQueueTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListQueueTagsRequest"}, + "output":{ + "shape":"ListQueueTagsResult", + "resultWrapper":"ListQueueTagsResult" + } + }, + "ListQueues":{ + "name":"ListQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListQueuesRequest"}, + "output":{ + "shape":"ListQueuesResult", + "resultWrapper":"ListQueuesResult" + } + }, + "PurgeQueue":{ + "name":"PurgeQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurgeQueueRequest"}, + "errors":[ + {"shape":"QueueDoesNotExist"}, + {"shape":"PurgeQueueInProgress"} + ] + }, + "ReceiveMessage":{ + "name":"ReceiveMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReceiveMessageRequest"}, + "output":{ + "shape":"ReceiveMessageResult", + "resultWrapper":"ReceiveMessageResult" + }, + "errors":[ + {"shape":"OverLimit"} + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemovePermissionRequest"} + }, + "SendMessage":{ + "name":"SendMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageRequest"}, + "output":{ + "shape":"SendMessageResult", + "resultWrapper":"SendMessageResult" + }, + "errors":[ + {"shape":"InvalidMessageContents"}, + {"shape":"UnsupportedOperation"} + ] + }, + "SendMessageBatch":{ + "name":"SendMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageBatchRequest"}, + "output":{ + "shape":"SendMessageBatchResult", + "resultWrapper":"SendMessageBatchResult" + }, + "errors":[ + {"shape":"TooManyEntriesInBatchRequest"}, + {"shape":"EmptyBatchRequest"}, + {"shape":"BatchEntryIdsNotDistinct"}, + {"shape":"BatchRequestTooLong"}, + {"shape":"InvalidBatchEntryId"}, + {"shape":"UnsupportedOperation"} + ] + }, + "SetQueueAttributes":{ + "name":"SetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetQueueAttributesRequest"}, + "errors":[ + {"shape":"InvalidAttributeName"} + ] + }, + "StartMessageMoveTask":{ + "name":"StartMessageMoveTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMessageMoveTaskRequest"}, + "output":{ + "shape":"StartMessageMoveTaskResult", + "resultWrapper":"StartMessageMoveTaskResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ] + }, + "TagQueue":{ + "name":"TagQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagQueueRequest"} + }, + "UntagQueue":{ + "name":"UntagQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagQueueRequest"} + } + }, + "shapes":{ + "AWSAccountIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AWSAccountId" + }, + "flattened":true + }, + "ActionNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ActionName" + }, + "flattened":true + }, + "AddPermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label", + "AWSAccountIds", + "Actions" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Label":{ + "shape":"String" + }, + "AWSAccountIds":{ + "shape":"AWSAccountIdList" + }, + "Actions":{ + "shape":"ActionNameList" + } + } + }, + "AttributeNameList":{ + "type":"list", + "member":{ + "shape":"QueueAttributeName", + "locationName":"AttributeName" + }, + "flattened":true + }, + "BatchEntryIdsNotDistinct":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchRequestTooLong":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchRequestTooLong", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchResultErrorEntry":{ + "type":"structure", + "required":[ + "Id", + "SenderFault", + "Code" + ], + "members":{ + "Id":{ + "shape":"String" + }, + "SenderFault":{ + "shape":"Boolean" + }, + "Code":{ + "shape":"String" + }, + "Message":{ + "shape":"String" + } + } + }, + "BatchResultErrorEntryList":{ + "type":"list", + "member":{ + "shape":"BatchResultErrorEntry", + "locationName":"BatchResultErrorEntry" + }, + "flattened":true + }, + "Binary":{"type":"blob"}, + "BinaryList":{ + "type":"list", + "member":{ + "shape":"Binary", + "locationName":"BinaryListValue" + } + }, + "Boolean":{"type":"boolean"}, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "CancelMessageMoveTaskRequest":{ + "type":"structure", + "required":["TaskHandle"], + "members":{ + "TaskHandle":{ + "shape":"String" + } + } + }, + "CancelMessageMoveTaskResult":{ + "type":"structure", + "members":{ + "ApproximateNumberOfMessagesMoved":{ + "shape":"Long" + } + } + }, + "ChangeMessageVisibilityBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Entries":{ + "shape":"ChangeMessageVisibilityBatchRequestEntryList" + } + } + }, + "ChangeMessageVisibilityBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{ + "shape":"String" + }, + "ReceiptHandle":{ + "shape":"String" + }, + "VisibilityTimeout":{ + "shape":"Integer" + } + } + }, + "ChangeMessageVisibilityBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchRequestEntry", + "locationName":"ChangeMessageVisibilityBatchRequestEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{ + "shape":"ChangeMessageVisibilityBatchResultEntryList" + }, + "Failed":{ + "shape":"BatchResultErrorEntryList" + } + } + }, + "ChangeMessageVisibilityBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"String" + } + } + }, + "ChangeMessageVisibilityBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchResultEntry", + "locationName":"ChangeMessageVisibilityBatchResultEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle", + "VisibilityTimeout" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "ReceiptHandle":{ + "shape":"String" + }, + "VisibilityTimeout":{ + "shape":"Integer" + } + } + }, + "CreateQueueRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{ + "shape":"String" + }, + "Attributes":{ + "shape":"QueueAttributeMap", + "locationName":"Attribute" + }, + "tags":{ + "shape":"TagMap", + "locationName":"Tag" + } + } + }, + "CreateQueueResult":{ + "type":"structure", + "members":{ + "QueueUrl":{ + "shape":"String" + } + } + }, + "DeleteMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Entries":{ + "shape":"DeleteMessageBatchRequestEntryList" + } + } + }, + "DeleteMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{ + "shape":"String" + }, + "ReceiptHandle":{ + "shape":"String" + } + } + }, + "DeleteMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchRequestEntry", + "locationName":"DeleteMessageBatchRequestEntry" + }, + "flattened":true + }, + "DeleteMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{ + "shape":"DeleteMessageBatchResultEntryList" + }, + "Failed":{ + "shape":"BatchResultErrorEntryList" + } + } + }, + "DeleteMessageBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"String" + } + } + }, + "DeleteMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchResultEntry", + "locationName":"DeleteMessageBatchResultEntry" + }, + "flattened":true + }, + "DeleteMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "ReceiptHandle":{ + "shape":"String" + } + } + }, + "DeleteQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + } + } + }, + "EmptyBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GetQueueAttributesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "AttributeNames":{ + "shape":"AttributeNameList" + } + } + }, + "GetQueueAttributesResult":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"QueueAttributeMap", + "locationName":"Attribute" + } + } + }, + "GetQueueUrlRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{ + "shape":"String" + }, + "QueueOwnerAWSAccountId":{ + "shape":"String" + } + } + }, + "GetQueueUrlResult":{ + "type":"structure", + "members":{ + "QueueUrl":{ + "shape":"String" + } + } + }, + "Integer":{"type":"integer"}, + "InvalidAttributeName":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBatchEntryId":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidIdFormat":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMessageContents":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListDeadLetterSourceQueuesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "NextToken":{ + "shape":"Token" + }, + "MaxResults":{ + "shape":"BoxedInteger" + } + } + }, + "ListDeadLetterSourceQueuesResult":{ + "type":"structure", + "required":["queueUrls"], + "members":{ + "queueUrls":{ + "shape":"QueueUrlList" + }, + "NextToken":{ + "shape":"Token" + } + } + }, + "ListMessageMoveTasksRequest":{ + "type":"structure", + "required":["SourceArn"], + "members":{ + "SourceArn":{ + "shape":"String" + }, + "MaxResults":{ + "shape":"Integer" + } + } + }, + "ListMessageMoveTasksResult":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"ListMessageMoveTasksResultEntryList" + } + } + }, + "ListMessageMoveTasksResultEntry":{ + "type":"structure", + "members":{ + "TaskHandle":{ + "shape":"String" + }, + "Status":{ + "shape":"String" + }, + "SourceArn":{ + "shape":"String" + }, + "DestinationArn":{ + "shape":"String" + }, + "MaxNumberOfMessagesPerSecond":{ + "shape":"Integer" + }, + "ApproximateNumberOfMessagesMoved":{ + "shape":"Long" + }, + "ApproximateNumberOfMessagesToMove":{ + "shape":"Long" + }, + "FailureReason":{ + "shape":"String" + }, + "StartedTimestamp":{ + "shape":"Long" + } + } + }, + "ListMessageMoveTasksResultEntryList":{ + "type":"list", + "member":{ + "shape":"ListMessageMoveTasksResultEntry", + "locationName":"ListMessageMoveTasksResultEntry" + }, + "flattened":true + }, + "ListQueueTagsRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + } + } + }, + "ListQueueTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "locationName":"Tag" + } + } + }, + "ListQueuesRequest":{ + "type":"structure", + "members":{ + "QueueNamePrefix":{ + "shape":"String" + }, + "NextToken":{ + "shape":"Token" + }, + "MaxResults":{ + "shape":"BoxedInteger" + } + } + }, + "ListQueuesResult":{ + "type":"structure", + "members":{ + "QueueUrls":{ + "shape":"QueueUrlList" + }, + "NextToken":{ + "shape":"Token" + } + } + }, + "Long":{"type":"long"}, + "Message":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"String" + }, + "ReceiptHandle":{ + "shape":"String" + }, + "MD5OfBody":{ + "shape":"String" + }, + "Body":{ + "shape":"String" + }, + "Attributes":{ + "shape":"MessageSystemAttributeMap", + "locationName":"Attribute" + }, + "MD5OfMessageAttributes":{ + "shape":"String" + }, + "MessageAttributes":{ + "shape":"MessageBodyAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "MessageAttributeName":{"type":"string"}, + "MessageAttributeNameList":{ + "type":"list", + "member":{ + "shape":"MessageAttributeName", + "locationName":"MessageAttributeName" + }, + "flattened":true + }, + "MessageAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "StringValue":{ + "shape":"String" + }, + "BinaryValue":{ + "shape":"Binary" + }, + "StringListValues":{ + "shape":"StringList", + "flattened":true, + "locationName":"StringListValue" + }, + "BinaryListValues":{ + "shape":"BinaryList", + "flattened":true, + "locationName":"BinaryListValue" + }, + "DataType":{ + "shape":"String" + } + } + }, + "MessageBodyAttributeMap":{ + "type":"map", + "key":{ + "shape":"String", + "locationName":"Name" + }, + "value":{ + "shape":"MessageAttributeValue", + "locationName":"Value" + }, + "flattened":true + }, + "MessageBodySystemAttributeMap":{ + "type":"map", + "key":{ + "shape":"MessageSystemAttributeNameForSends", + "locationName":"Name" + }, + "value":{ + "shape":"MessageSystemAttributeValue", + "locationName":"Value" + }, + "flattened":true + }, + "MessageList":{ + "type":"list", + "member":{ + "shape":"Message", + "locationName":"Message" + }, + "flattened":true + }, + "MessageNotInflight":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.MessageNotInflight", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MessageSystemAttributeMap":{ + "type":"map", + "key":{ + "shape":"MessageSystemAttributeName", + "locationName":"Name" + }, + "value":{ + "shape":"String", + "locationName":"Value" + }, + "flattened":true, + "locationName":"Attribute" + }, + "MessageSystemAttributeName":{ + "type":"string", + "enum":[ + "SenderId", + "SentTimestamp", + "ApproximateReceiveCount", + "ApproximateFirstReceiveTimestamp", + "SequenceNumber", + "MessageDeduplicationId", + "MessageGroupId", + "AWSTraceHeader", + "DeadLetterQueueSourceArn" + ] + }, + "MessageSystemAttributeNameForSends":{ + "type":"string", + "enum":["AWSTraceHeader"] + }, + "MessageSystemAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "StringValue":{ + "shape":"String" + }, + "BinaryValue":{ + "shape":"Binary" + }, + "StringListValues":{ + "shape":"StringList", + "flattened":true, + "locationName":"StringListValue" + }, + "BinaryListValues":{ + "shape":"BinaryList", + "flattened":true, + "locationName":"BinaryListValue" + }, + "DataType":{ + "shape":"String" + } + } + }, + "OverLimit":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OverLimit", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueInProgress":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.PurgeQueueInProgress", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + } + } + }, + "QueueAttributeMap":{ + "type":"map", + "key":{ + "shape":"QueueAttributeName", + "locationName":"Name" + }, + "value":{ + "shape":"String", + "locationName":"Value" + }, + "flattened":true, + "locationName":"Attribute" + }, + "QueueAttributeName":{ + "type":"string", + "enum":[ + "All", + "Policy", + "VisibilityTimeout", + "MaximumMessageSize", + "MessageRetentionPeriod", + "ApproximateNumberOfMessages", + "ApproximateNumberOfMessagesNotVisible", + "CreatedTimestamp", + "LastModifiedTimestamp", + "QueueArn", + "ApproximateNumberOfMessagesDelayed", + "DelaySeconds", + "ReceiveMessageWaitTimeSeconds", + "RedrivePolicy", + "FifoQueue", + "ContentBasedDeduplication", + "KmsMasterKeyId", + "KmsDataKeyReusePeriodSeconds", + "DeduplicationScope", + "FifoThroughputLimit", + "RedriveAllowPolicy", + "SqsManagedSseEnabled" + ] + }, + "QueueDeletedRecently":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.QueueDeletedRecently", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueDoesNotExist":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueNameExists":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QueueAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueUrlList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"QueueUrl" + }, + "flattened":true + }, + "ReceiptHandleIsInvalid":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ReceiveMessageRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "AttributeNames":{ + "shape":"AttributeNameList" + }, + "MessageSystemAttributeNames":{ + "shape":"AttributeNameList" + }, + "MessageAttributeNames":{ + "shape":"MessageAttributeNameList" + }, + "MaxNumberOfMessages":{ + "shape":"Integer" + }, + "VisibilityTimeout":{ + "shape":"Integer" + }, + "WaitTimeSeconds":{ + "shape":"Integer" + }, + "ReceiveRequestAttemptId":{ + "shape":"String" + } + } + }, + "ReceiveMessageResult":{ + "type":"structure", + "members":{ + "Messages":{ + "shape":"MessageList" + } + } + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Label":{ + "shape":"String" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SendMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Entries":{ + "shape":"SendMessageBatchRequestEntryList" + } + } + }, + "SendMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageBody" + ], + "members":{ + "Id":{ + "shape":"String" + }, + "MessageBody":{ + "shape":"String" + }, + "DelaySeconds":{ + "shape":"Integer" + }, + "MessageAttributes":{ + "shape":"MessageBodyAttributeMap", + "locationName":"MessageAttribute" + }, + "MessageSystemAttributes":{ + "shape":"MessageBodySystemAttributeMap", + "locationName":"MessageSystemAttribute" + }, + "MessageDeduplicationId":{ + "shape":"String" + }, + "MessageGroupId":{ + "shape":"String" + } + } + }, + "SendMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchRequestEntry", + "locationName":"SendMessageBatchRequestEntry" + }, + "flattened":true + }, + "SendMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{ + "shape":"SendMessageBatchResultEntryList" + }, + "Failed":{ + "shape":"BatchResultErrorEntryList" + } + } + }, + "SendMessageBatchResultEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageId", + "MD5OfMessageBody" + ], + "members":{ + "Id":{ + "shape":"String" + }, + "MessageId":{ + "shape":"String" + }, + "MD5OfMessageBody":{ + "shape":"String" + }, + "MD5OfMessageAttributes":{ + "shape":"String" + }, + "MD5OfMessageSystemAttributes":{ + "shape":"String" + }, + "SequenceNumber":{ + "shape":"String" + } + } + }, + "SendMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchResultEntry", + "locationName":"SendMessageBatchResultEntry" + }, + "flattened":true + }, + "SendMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "MessageBody" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "MessageBody":{ + "shape":"String" + }, + "DelaySeconds":{ + "shape":"Integer" + }, + "MessageAttributes":{ + "shape":"MessageBodyAttributeMap", + "locationName":"MessageAttribute" + }, + "MessageSystemAttributes":{ + "shape":"MessageBodySystemAttributeMap", + "locationName":"MessageSystemAttribute" + }, + "MessageDeduplicationId":{ + "shape":"String" + }, + "MessageGroupId":{ + "shape":"String" + } + } + }, + "SendMessageResult":{ + "type":"structure", + "members":{ + "MD5OfMessageBody":{ + "shape":"String" + }, + "MD5OfMessageAttributes":{ + "shape":"String" + }, + "MD5OfMessageSystemAttributes":{ + "shape":"String" + }, + "MessageId":{ + "shape":"String" + }, + "SequenceNumber":{ + "shape":"String" + } + } + }, + "SetQueueAttributesRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Attributes" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Attributes":{ + "shape":"QueueAttributeMap", + "locationName":"Attribute" + } + } + }, + "StartMessageMoveTaskRequest":{ + "type":"structure", + "required":["SourceArn"], + "members":{ + "SourceArn":{ + "shape":"String" + }, + "DestinationArn":{ + "shape":"String" + }, + "MaxNumberOfMessagesPerSecond":{ + "shape":"Integer" + } + } + }, + "StartMessageMoveTaskResult":{ + "type":"structure", + "members":{ + "TaskHandle":{ + "shape":"String" + } + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"StringListValue" + } + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"TagKey", + "locationName":"TagKey" + }, + "flattened":true + }, + "TagMap":{ + "type":"map", + "key":{ + "shape":"TagKey", + "locationName":"Key" + }, + "value":{ + "shape":"TagValue", + "locationName":"Value" + }, + "flattened":true, + "locationName":"Tag" + }, + "TagQueueRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Tags" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "Tags":{ + "shape":"TagMap" + } + } + }, + "TagValue":{"type":"string"}, + "Token":{"type":"string"}, + "TooManyEntriesInBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOperation":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UntagQueueRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "TagKeys" + ], + "members":{ + "QueueUrl":{ + "shape":"String" + }, + "TagKeys":{ + "shape":"TagKeyList" + } + } + } + } +} diff --git a/localstack-core/localstack/aws/forwarder.py b/localstack-core/localstack/aws/forwarder.py new file mode 100644 index 0000000000000..c25d4b90f6c09 --- /dev/null +++ b/localstack-core/localstack/aws/forwarder.py @@ -0,0 +1,271 @@ +""" +This module contains utilities to call a backend (e.g., an external service process like +DynamoDBLocal) from a service provider. +""" + +from typing import Any, Callable, Mapping, Optional, Union + +from botocore.awsrequest import AWSPreparedRequest, prepare_request_dict +from botocore.config import Config as BotoConfig +from werkzeug.datastructures import Headers + +from localstack.aws.api.core import ( + RequestContext, + ServiceRequest, + ServiceRequestHandler, + ServiceResponse, +) +from localstack.aws.client import create_http_request, parse_response, raise_service_exception +from localstack.aws.connect import connect_to +from localstack.aws.skeleton import DispatchTable, create_dispatch_table +from localstack.aws.spec import load_service +from localstack.constants import AWS_REGION_US_EAST_1 +from localstack.http import Response +from localstack.http.proxy import Proxy + + +class AwsRequestProxy: + """ + Implements the ``ServiceRequestHandler`` protocol to forward AWS requests to a backend. It is stateful and uses a + ``Proxy`` instance for re-using client connections to the backend. + """ + + def __init__( + self, + endpoint_url: str, + parse_response: bool = True, + include_response_metadata: bool = False, + ): + """ + Create a new AwsRequestProxy. ``parse_response`` control the return behavior of ``forward``. If + ``parse_response`` is set, then ``forward`` parses the HTTP response from the backend and returns a + ``ServiceResponse``, otherwise it returns the raw HTTP ``Response`` object. + + :param endpoint_url: the backend to proxy the requests to, used as ``forward_base_url`` for the ``Proxy``. + :param parse_response: whether to parse the response before returning it + :param include_response_metadata: include AWS response metadata, only used with ``parse_response=True`` + """ + self.endpoint_url = endpoint_url + self.parse_response = parse_response + self.include_response_metadata = include_response_metadata + self.proxy = Proxy(forward_base_url=endpoint_url) + + def __call__( + self, + context: RequestContext, + service_request: ServiceRequest = None, + ) -> Optional[Union[ServiceResponse, Response]]: + """Method to satisfy the ``ServiceRequestHandler`` protocol.""" + return self.forward(context, service_request) + + def forward( + self, + context: RequestContext, + service_request: ServiceRequest = None, + ) -> Optional[Union[ServiceResponse, Response]]: + """ + Forwards the given request to the backend configured by ``endpoint_url``. + + :param context: the original request context of the incoming request + :param service_request: optionally a new service + :return: + """ + if service_request is not None: + # if a service request is passed then we need to create a new request context + context = self.new_request_context(context, service_request) + + http_response = self.proxy.forward(context.request, forward_path=context.request.path) + if not self.parse_response: + return http_response + parsed_response = parse_response( + context.operation, http_response, self.include_response_metadata + ) + raise_service_exception(http_response, parsed_response) + return parsed_response + + def new_request_context(self, original: RequestContext, service_request: ServiceRequest): + context = create_aws_request_context( + service_name=original.service.service_name, + action=original.operation.name, + parameters=service_request, + region=original.region, + ) + # update the newly created context with non-payload specific request headers (the payload can differ from + # the original request, f.e. it could be JSON encoded now while the initial request was CBOR encoded) + headers = Headers(original.request.headers) + headers.pop("Content-Type", None) + headers.pop("Content-Length", None) + context.request.headers.update(headers) + return context + + +def ForwardingFallbackDispatcher( + provider: object, request_forwarder: ServiceRequestHandler +) -> DispatchTable: + """ + Wraps a provider with a request forwarder. It does by creating a new DispatchTable from the original + provider, and wrapping each method with a fallthrough method that calls ``request_forwarder`` if the + original provider raises a ``NotImplementedError``. + + :param provider: the ASF provider + :param request_forwarder: callable that forwards the request (e.g., to a backend server) + :return: a modified DispatchTable + """ + table = create_dispatch_table(provider) + + for op, fn in table.items(): + table[op] = _wrap_with_fallthrough(fn, request_forwarder) + + return table + + +class NotImplementedAvoidFallbackError(NotImplementedError): + pass + + +def _wrap_with_fallthrough( + handler: ServiceRequestHandler, fallthrough_handler: ServiceRequestHandler +) -> ServiceRequestHandler: + def _call(context, req) -> ServiceResponse: + try: + # handler will typically be an ASF provider method, and in case it hasn't been + # implemented, we try to fall back to forwarding the request to the backend + return handler(context, req) + except NotImplementedAvoidFallbackError as e: + # if the fallback has been explicitly disabled, don't pass on to the fallback + raise e + except NotImplementedError: + pass + + return fallthrough_handler(context, req) + + return _call + + +def HttpFallbackDispatcher(provider: object, forward_url_getter: Callable[[str, str], str]): + return ForwardingFallbackDispatcher(provider, get_request_forwarder_http(forward_url_getter)) + + +def get_request_forwarder_http( + forward_url_getter: Callable[[str, str], str], +) -> ServiceRequestHandler: + """ + Returns a ServiceRequestHandler that creates for each invocation a new AwsRequestProxy with the result of + forward_url_getter. Note that this is an inefficient method of proxying, since for every call a new client + connection has to be established. Try to instead use static forward URL values and use ``AwsRequestProxy`` directly. + + :param forward_url_getter: a factory method for returning forward base urls for the proxy + :return: a ServiceRequestHandler acting as a proxy + """ + + def _forward_request( + context: RequestContext, service_request: ServiceRequest = None + ) -> ServiceResponse: + return AwsRequestProxy(forward_url_getter(context.account_id, context.region)).forward( + context, service_request + ) + + return _forward_request + + +def dispatch_to_backend( + context: RequestContext, + http_request_dispatcher: Callable[[RequestContext], Response], + include_response_metadata=False, +) -> ServiceResponse: + """ + Dispatch the given request to a backend by using the `request_forwarder` function to + fetch an HTTP response, converting it to a ServiceResponse. + :param context: the request context + :param http_request_dispatcher: dispatcher that performs the request and returns an HTTP response + :param include_response_metadata: whether to include boto3 response metadata in the response + :return: parsed service response + :raises ServiceException: if the dispatcher returned an error response + """ + http_response = http_request_dispatcher(context) + parsed_response = parse_response(context.operation, http_response, include_response_metadata) + raise_service_exception(http_response, parsed_response) + return parsed_response + + +# boto config deactivating param validation to forward to backends (backends are responsible for validating params) +_non_validating_boto_config = BotoConfig(parameter_validation=False) + + +def create_aws_request_context( + service_name: str, + action: str, + parameters: Mapping[str, Any] = None, + region: str = None, + endpoint_url: Optional[str] = None, +) -> RequestContext: + """ + This is a stripped-down version of what the botocore client does to perform an HTTP request from a client call. A + client call looks something like this: boto3.client("sqs").create_queue(QueueName="myqueue"), which will be + serialized into an HTTP request. This method does the same, without performing the actual request, and with a + more low-level interface. An equivalent call would be + + create_aws_request_context("sqs", "CreateQueue", {"QueueName": "myqueue"}) + + :param service_name: the AWS service + :param action: the action to invoke + :param parameters: the invocation parameters + :param region: the region name (default is us-east-1) + :param endpoint_url: the endpoint to call (defaults to localstack) + :return: a RequestContext object that describes this request + """ + if parameters is None: + parameters = {} + if region is None: + region = AWS_REGION_US_EAST_1 + + service = load_service(service_name) + operation = service.operation_model(action) + + # we re-use botocore internals here to serialize the HTTP request, + # but deactivate validation (validation errors should be handled by the backend) + # and don't send it yet + client = connect_to.get_client( + service_name, + endpoint_url=endpoint_url, + region_name=region, + config=_non_validating_boto_config, + ) + request_context = { + "client_region": region, + "has_streaming_input": operation.has_streaming_input, + "auth_type": operation.auth_type, + } + + # The endpoint URL is mandatory here, set a dummy if not given (doesn't _need_ to be localstack specific) + if not endpoint_url: + endpoint_url = "http://localhost.localstack.cloud" + # pre-process the request args (some params are modified using botocore event handlers) + parameters = client._emit_api_params(parameters, operation, request_context) + request_dict = client._convert_to_request_dict( + parameters, operation, endpoint_url, context=request_context + ) + + if auth_path := request_dict.get("auth_path"): + # botocore >= 1.28 might modify the url path of the request dict (specifically for S3). + # It will then set the original url path as "auth_path". If the auth_path is set, we reset the url_path. + # Since botocore 1.31.2, botocore will strip the query from the `authPart` + # We need to add it back from `requestUri` field + # Afterwards the request needs to be prepared again. + path, sep, query = request_dict["url_path"].partition("?") + request_dict["url_path"] = f"{auth_path}{sep}{query}" + prepare_request_dict( + request_dict, + endpoint_url=endpoint_url, + user_agent=client._client_config.user_agent, + context=request_context, + ) + + aws_request: AWSPreparedRequest = client._endpoint.create_request(request_dict, operation) + context = RequestContext(request=create_http_request(aws_request)) + context.service = service + context.operation = operation + context.region = region + context.service_request = parameters + + return context diff --git a/localstack-core/localstack/aws/gateway.py b/localstack-core/localstack/aws/gateway.py new file mode 100644 index 0000000000000..6fd526b6014fc --- /dev/null +++ b/localstack-core/localstack/aws/gateway.py @@ -0,0 +1,32 @@ +import typing as t + +from rolo.gateway import Gateway as RoloGateway +from rolo.response import Response + +from .chain import ExceptionHandler, Handler, RequestContext + +__all__ = [ + "Gateway", +] + + +class Gateway(RoloGateway): + def __init__( + self, + request_handlers: list[Handler] = None, + response_handlers: list[Handler] = None, + finalizers: list[Handler] = None, + exception_handlers: list[ExceptionHandler] = None, + context_class: t.Type[RequestContext] = None, + ) -> None: + super().__init__( + request_handlers, + response_handlers, + finalizers, + exception_handlers, + context_class or RequestContext, + ) + + def handle(self, context: RequestContext, response: Response) -> None: + """Exposes the same interface as ``HandlerChain.handle``.""" + return self.new_chain().handle(context, response) diff --git a/localstack-core/localstack/aws/handlers/__init__.py b/localstack-core/localstack/aws/handlers/__init__.py new file mode 100644 index 0000000000000..a7aea2c69b03d --- /dev/null +++ b/localstack-core/localstack/aws/handlers/__init__.py @@ -0,0 +1,51 @@ +"""A set of common handlers to build an AWS server application.""" + +from .. import chain +from . import ( + analytics, + auth, + codec, + cors, + fallback, + internal, + internal_requests, + legacy, + logging, + presigned_url, + region, + service, + tracing, + validation, +) + +handle_runtime_shutdown = internal.RuntimeShutdownHandler() +enforce_cors = cors.CorsEnforcer() +preprocess_request = chain.CompositeHandler() +add_cors_response_headers = cors.CorsResponseEnricher() +content_decoder = codec.ContentDecoder() +parse_service_name = service.ServiceNameParser() +parse_service_request = service.ServiceRequestParser() +add_account_id = auth.AccountIdEnricher() +inject_auth_header_if_missing = auth.MissingAuthHeaderInjector() +add_region_from_header = region.RegionContextEnricher() +rewrite_region = region.RegionRewriter() +add_internal_request_params = internal_requests.InternalRequestParamsEnricher() +validate_request_schema = validation.OpenAPIRequestValidator() +validate_response_schema = validation.OpenAPIResponseValidator() +log_exception = logging.ExceptionLogger() +log_response = logging.ResponseLogger() +count_service_request = analytics.ServiceRequestCounter() +handle_service_exception = service.ServiceExceptionSerializer() +handle_internal_failure = fallback.InternalFailureHandler() +serve_custom_service_request_handlers = chain.CompositeHandler() +serve_localstack_resources = internal.LocalstackResourceHandler() +run_custom_response_handlers = chain.CompositeResponseHandler() +modify_service_response = service.ServiceResponseHandlers() +parse_service_response = service.ServiceResponseParser() +parse_trace_context = tracing.TraceContextParser() +parse_pre_signed_url_request = presigned_url.ParsePreSignedUrlRequest() +run_custom_finalizers = chain.CompositeFinalizer() +serve_custom_exception_handlers = chain.CompositeExceptionHandler() +# legacy compatibility handlers +serve_edge_router_rules = legacy.EdgeRouterHandler() +set_close_connection_header = legacy.set_close_connection_header diff --git a/localstack-core/localstack/aws/handlers/analytics.py b/localstack-core/localstack/aws/handlers/analytics.py new file mode 100644 index 0000000000000..4e5bbfa8aa085 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/analytics.py @@ -0,0 +1,69 @@ +import logging +import threading +from typing import Optional + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.chain import HandlerChain +from localstack.aws.client import parse_response +from localstack.http import Response +from localstack.utils.analytics.service_request_aggregator import ( + ServiceRequestAggregator, + ServiceRequestInfo, +) + +LOG = logging.getLogger(__name__) + + +class ServiceRequestCounter: + aggregator: ServiceRequestAggregator + + def __init__(self, service_request_aggregator: ServiceRequestAggregator = None): + self.aggregator = service_request_aggregator or ServiceRequestAggregator() + self._mutex = threading.Lock() + self._started = False + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + if response is None or context.operation is None: + return + if config.DISABLE_EVENTS: + return + if context.is_internal_call: + # don't count internal requests + return + + # this condition will only be true only for the first call, so it makes sense to not acquire the lock every time + if not self._started: + with self._mutex: + if not self._started: + self._started = True + self.aggregator.start() + + err_type = self._get_err_type(context, response) if response.status_code >= 400 else None + service_name = context.operation.service_model.service_name + operation_name = context.operation.name + + self.aggregator.add_request( + ServiceRequestInfo( + service_name, + operation_name, + response.status_code, + err_type=err_type, + ) + ) + + def _get_err_type(self, context: RequestContext, response: Response) -> Optional[str]: + """ + Attempts to re-use the existing service_response, or parse and return the error type from the response body, + e.g. ``ResourceInUseException``. + """ + try: + if context.service_exception: + return context.service_exception.code + + response = parse_response(context.operation, response) + return response["Error"]["Code"] + except Exception: + if config.DEBUG_ANALYTICS: + LOG.exception("error parsing error response") + return None diff --git a/localstack-core/localstack/aws/handlers/auth.py b/localstack-core/localstack/aws/handlers/auth.py new file mode 100644 index 0000000000000..789734bbb28f9 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/auth.py @@ -0,0 +1,55 @@ +import logging + +from localstack.aws.accounts import ( + get_account_id_from_access_key_id, +) +from localstack.constants import ( + AWS_REGION_US_EAST_1, + DEFAULT_AWS_ACCOUNT_ID, +) +from localstack.http import Response +from localstack.utils.aws.request_context import ( + extract_access_key_id_from_auth_header, + mock_aws_request_headers, +) + +from ..api import RequestContext +from ..chain import Handler, HandlerChain + +LOG = logging.getLogger(__name__) + + +class MissingAuthHeaderInjector(Handler): + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + # FIXME: this is needed for allowing access to resources via plain URLs where access is typically restricted ( + # e.g., GET requests on S3 URLs or apigateway routes). this should probably be part of a general IAM middleware + # (that allows access to restricted resources by default) + if not context.service: + return + + api = context.service.service_name + headers = context.request.headers + + if not headers.get("Authorization"): + headers["Authorization"] = mock_aws_request_headers( + api, aws_access_key_id="injectedaccesskey", region_name=AWS_REGION_US_EAST_1 + )["Authorization"] + + +class AccountIdEnricher(Handler): + """ + A handler that sets the AWS account of the request in the RequestContext. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + # Obtain the access key ID + access_key_id = ( + extract_access_key_id_from_auth_header(context.request.headers) + or DEFAULT_AWS_ACCOUNT_ID + ) + + # Obtain the account ID from access key ID + context.account_id = get_account_id_from_access_key_id(access_key_id) + + # Make Moto use the same Account ID as LocalStack + context.request.headers.add("x-moto-account-id", context.account_id) diff --git a/localstack/aws/handlers/codec.py b/localstack-core/localstack/aws/handlers/codec.py similarity index 100% rename from localstack/aws/handlers/codec.py rename to localstack-core/localstack/aws/handlers/codec.py diff --git a/localstack-core/localstack/aws/handlers/cors.py b/localstack-core/localstack/aws/handlers/cors.py new file mode 100644 index 0000000000000..13540e0165710 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/cors.py @@ -0,0 +1,283 @@ +""" +A set of handlers which handle Cross Origin Resource Sharing (CORS). +""" + +import logging +import re +from typing import List, Set +from urllib.parse import urlparse + +from werkzeug.datastructures import Headers + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.chain import Handler, HandlerChain +from localstack.config import EXTRA_CORS_ALLOWED_HEADERS, EXTRA_CORS_EXPOSE_HEADERS +from localstack.constants import LOCALHOST, LOCALHOST_HOSTNAME, PATH_USER_REQUEST +from localstack.http import Response +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +# CORS headers +ACL_ALLOW_HEADERS = "Access-Control-Allow-Headers" +ACL_CREDENTIALS = "Access-Control-Allow-Credentials" +ACL_EXPOSE_HEADERS = "Access-Control-Expose-Headers" +ACL_METHODS = "Access-Control-Allow-Methods" +ACL_ORIGIN = "Access-Control-Allow-Origin" +ACL_REQUEST_HEADERS = "Access-Control-Request-Headers" + +# header name constants +ACL_REQUEST_PRIVATE_NETWORK = "Access-Control-Request-Private-Network" +ACL_ALLOW_PRIVATE_NETWORK = "Access-Control-Allow-Private-Network" + +# CORS constants below +CORS_ALLOWED_HEADERS = [ + "authorization", + "cache-control", + "content-length", + "content-md5", + "content-type", + "etag", + "location", + "x-amz-acl", + "x-amz-content-sha256", + "x-amz-date", + "x-amz-request-id", + "x-amz-security-token", + "x-amz-tagging", + "x-amz-target", + "x-amz-user-agent", + "x-amz-version-id", + "x-amzn-requestid", + "x-localstack-target", + # for AWS SDK v3 + "amz-sdk-invocation-id", + "amz-sdk-request", + # for lambda + "x-amz-log-type", +] +if EXTRA_CORS_ALLOWED_HEADERS: + CORS_ALLOWED_HEADERS += EXTRA_CORS_ALLOWED_HEADERS.split(",") + +CORS_ALLOWED_METHODS = ("HEAD", "GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH") + +CORS_EXPOSE_HEADERS = ( + "etag", + "x-amz-version-id", + # for lambda + "x-amz-log-result", + "x-amz-executed-version", + "x-amz-function-error", +) +if EXTRA_CORS_EXPOSE_HEADERS: + CORS_EXPOSE_HEADERS += tuple(EXTRA_CORS_EXPOSE_HEADERS.split(",")) + +ALLOWED_CORS_RESPONSE_HEADERS = [ + "Access-Control-Allow-Origin", + "Access-Control-Allow-Methods", + "Access-Control-Allow-Headers", + "Access-Control-Max-Age", + "Access-Control-Allow-Credentials", + "Access-Control-Expose-Headers", +] + + +def _get_allowed_cors_internal_domains() -> Set[str]: + """ + Construct the list of allowed internal domains for CORS enforcement purposes + Defined as function to allow easier testing with monkeypatch of config values + """ + return {LOCALHOST, LOCALHOST_HOSTNAME, localstack_host().host} + + +_ALLOWED_INTERNAL_DOMAINS = _get_allowed_cors_internal_domains() + + +def _get_allowed_cors_ports() -> Set[int]: + """ + Construct the list of allowed ports for CORS enforcement purposes + Defined as function to allow easier testing with monkeypatch of config values + """ + return {host_and_port.port for host_and_port in config.GATEWAY_LISTEN} + + +_ALLOWED_INTERNAL_PORTS = _get_allowed_cors_ports() + + +def _get_allowed_cors_origins() -> List[str]: + """Construct the list of allowed origins for CORS enforcement purposes""" + result = [ + # allow access from Web app and localhost domains + "https://app.localstack.cloud", + "http://app.localstack.cloud", + "https://localhost", + "https://localhost.localstack.cloud", + # for requests from Electron apps, e.g., DynamoDB NoSQL Workbench + "file://", + ] + # Add allowed origins for localhost domains, using different protocol/port combinations. + for protocol in {"http", "https"}: + for port in _get_allowed_cors_ports(): + result.append(f"{protocol}://{LOCALHOST}:{port}") + result.append(f"{protocol}://{LOCALHOST_HOSTNAME}:{port}") + + if config.EXTRA_CORS_ALLOWED_ORIGINS: + origins = config.EXTRA_CORS_ALLOWED_ORIGINS.split(",") + origins = [origin.strip() for origin in origins] + origins = [origin for origin in origins if origin != ""] + result += origins + + return result + + +# allowed origins used for CORS / CSRF checks +ALLOWED_CORS_ORIGINS = _get_allowed_cors_origins() + +# allowed dynamic internal origin +# must follow the same pattern with 3 matching group, group 2 being the domain and group 3 the port +# TODO: might need to match/group the scheme also? +DYNAMIC_INTERNAL_ORIGINS = ( + re.compile("(.*)\\.s3-website\\.(.[^:]*)(:[0-9]{2,5})?"), + re.compile("(.*)\\.cloudfront\\.(.[^:]*)(:[0-9]{2,5})?"), +) + + +def is_execute_api_call(context: RequestContext) -> bool: + path = context.request.path + return ( + ".execute-api." in context.request.host + or (path.startswith("/restapis/") and f"/{PATH_USER_REQUEST}" in context.request.path) + or (path.startswith("/_aws/execute-api")) + ) + + +def should_enforce_self_managed_service(context: RequestContext) -> bool: + """ + Some services are handling their CORS checks on their own (depending on config vars). + + :param context: context of the request for which to check if the CORS checks should be executed in here or in + the targeting service + :return: True if the CORS rules should be enforced in here. + """ + # allow only certain api calls without checking origin as those services self-manage CORS + if not config.DISABLE_CUSTOM_CORS_S3: + if context.service and context.service.service_name == "s3": + return False + + if not config.DISABLE_CUSTOM_CORS_APIGATEWAY: + if is_execute_api_call(context): + return False + + return True + + +class CorsEnforcer(Handler): + """ + Handler which enforces Cross-Origin-Resource-Sharing (CORS) rules. + This handler needs to be at the top of the handler chain to ensure that these security rules are enforced before any + commands are executed. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response) -> None: + if not should_enforce_self_managed_service(context): + return + if not config.DISABLE_CORS_CHECKS and not self.is_cors_origin_allowed( + context.request.headers + ): + LOG.info( + "Blocked CORS request from forbidden origin %s", + context.request.headers.get("origin") or context.request.headers.get("referer"), + ) + response.status_code = 403 + chain.terminate() + elif context.request.method == "OPTIONS" and not config.DISABLE_PREFLIGHT_PROCESSING: + # we want to return immediately here, but we do not want to omit our response chain for cors headers + response.status_code = 204 + chain.stop() + + @staticmethod + def is_cors_origin_allowed(headers: Headers) -> bool: + """Returns true if origin is allowed to perform cors requests, false otherwise.""" + origin = headers.get("origin") + referer = headers.get("referer") + if origin: + return CorsEnforcer._is_in_allowed_origins(ALLOWED_CORS_ORIGINS, origin) + elif referer: + referer_uri = "{uri.scheme}://{uri.netloc}".format(uri=urlparse(referer)) + return CorsEnforcer._is_in_allowed_origins(ALLOWED_CORS_ORIGINS, referer_uri) + # If both headers are not set, let it through (awscli etc. do not send these headers) + return True + + @staticmethod + def _is_in_allowed_origins(allowed_origins: List[str], origin: str) -> bool: + """Returns true if the `origin` is in the `allowed_origins`.""" + for allowed_origin in allowed_origins: + if allowed_origin == "*" or origin == allowed_origin: + return True + + # performance wise, this is not very heavy because most of the regular requests will match above + # this would be executed mostly when rejecting or actually using content served by CloudFront or S3 website + for dynamic_origin in DYNAMIC_INTERNAL_ORIGINS: + match = dynamic_origin.match(origin) + if ( + match + and (match.group(2) in _ALLOWED_INTERNAL_DOMAINS) + and (not (port := match.group(3)) or int(port[1:]) in _ALLOWED_INTERNAL_PORTS) + ): + return True + + return False + + +class CorsResponseEnricher(Handler): + """ + ResponseHandler which adds Cross-Origin-Request-Sharing (CORS) headers (Access-Control-*) to the response. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + headers = response.headers + # Remove empty CORS headers + for header in ALLOWED_CORS_RESPONSE_HEADERS: + if headers.get(header) == "": + del headers[header] + + request_headers = context.request.headers + # CORS headers should only be returned when an Origin header is set. + # use DISABLE_CORS_HEADERS to disable returning CORS headers entirely (more restrictive security setting) + # also don't add CORS response headers if the service manages the CORS handling + if ( + "Origin" not in request_headers + or config.DISABLE_CORS_HEADERS + or not should_enforce_self_managed_service(context) + ): + return + + self.add_cors_headers(request_headers, response_headers=headers) + + @staticmethod + def add_cors_headers(request_headers: Headers, response_headers: Headers): + if ACL_ORIGIN not in response_headers: + response_headers[ACL_ORIGIN] = ( + request_headers["Origin"] + if request_headers.get("Origin") and not config.DISABLE_CORS_CHECKS + else "*" + ) + if "*" not in response_headers.get(ACL_ORIGIN, ""): + response_headers[ACL_CREDENTIALS] = "true" + if ACL_METHODS not in response_headers: + response_headers[ACL_METHODS] = ",".join(CORS_ALLOWED_METHODS) + if ACL_ALLOW_HEADERS not in response_headers: + requested_headers = response_headers.get(ACL_REQUEST_HEADERS, "") + requested_headers = re.split(r"[,\s]+", requested_headers) + CORS_ALLOWED_HEADERS + response_headers[ACL_ALLOW_HEADERS] = ",".join([h for h in requested_headers if h]) + if ACL_EXPOSE_HEADERS not in response_headers: + response_headers[ACL_EXPOSE_HEADERS] = ",".join(CORS_EXPOSE_HEADERS) + if ( + request_headers.get(ACL_REQUEST_PRIVATE_NETWORK) == "true" + and ACL_ALLOW_PRIVATE_NETWORK not in response_headers + ): + response_headers[ACL_ALLOW_PRIVATE_NETWORK] = "true" + + # we conditionally apply CORS headers depending on the Origin, so add it to `Vary` + response_headers["Vary"] = "Origin" diff --git a/localstack-core/localstack/aws/handlers/fallback.py b/localstack-core/localstack/aws/handlers/fallback.py new file mode 100644 index 0000000000000..17c30e1a2bbb7 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/fallback.py @@ -0,0 +1,48 @@ +"""Handlers for fallback logic, e.g., populating empty requests or defaulting with default exceptions.""" + +import logging + +from rolo.gateway.handlers import EmptyResponseHandler +from werkzeug.exceptions import HTTPException + +from localstack.http import Response + +from ..api import RequestContext +from ..chain import ExceptionHandler, HandlerChain + +__all__ = ["EmptyResponseHandler", "InternalFailureHandler"] + +LOG = logging.getLogger(__name__) + + +class InternalFailureHandler(ExceptionHandler): + """ + Exception handler that returns a generic error message if there was an exception and there is no response set yet. + """ + + def __call__( + self, + chain: HandlerChain, + exception: Exception, + context: RequestContext, + response: Response, + ): + if response.data: + # response already set + return + + if isinstance(exception, HTTPException): + response.status_code = exception.code + response.headers.update(exception.get_headers()) + response.set_json({"error": exception.name, "message": exception.description}) + return + + LOG.debug("setting internal failure response for %s", exception) + response.status_code = 500 + response.set_json( + { + "error": "Unexpected exception", + "message": str(exception), + "type": str(exception.__class__.__name__), + } + ) diff --git a/localstack-core/localstack/aws/handlers/internal.py b/localstack-core/localstack/aws/handlers/internal.py new file mode 100644 index 0000000000000..ac89d0af1748e --- /dev/null +++ b/localstack-core/localstack/aws/handlers/internal.py @@ -0,0 +1,54 @@ +"""Handler for routing internal localstack resources under /_localstack.""" + +import logging + +from werkzeug.exceptions import NotFound + +from localstack import constants +from localstack.http import Response +from localstack.runtime import events +from localstack.services.internal import LocalstackResources + +from ..api import RequestContext +from ..chain import Handler, HandlerChain + +LOG = logging.getLogger(__name__) + + +class LocalstackResourceHandler(Handler): + """ + Adapter to serve LocalstackResources as a Handler. + """ + + resources: LocalstackResources + + def __init__(self, resources: LocalstackResources = None) -> None: + from localstack.services.internal import get_internal_apis + + self.resources = resources or get_internal_apis() + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + try: + # serve + response.update_from(self.resources.dispatch(context.request)) + chain.stop() + except NotFound: + path = context.request.path + if path.startswith(constants.INTERNAL_RESOURCE_PATH + "/"): + # only return 404 if we're accessing an internal resource, otherwise fall back to the other handlers + LOG.warning("Unable to find resource handler for path: %s", path) + chain.respond(404) + + +class RuntimeShutdownHandler(Handler): + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + if events.infra_stopped.is_set(): + chain.respond(503) + elif events.infra_stopping.is_set(): + # if we're in the process of shutting down the infrastructure, only accept internal calls, or calls to + # internal APIs + if context.is_internal_call: + return + if context.request.path.startswith("/_localstack"): + return + chain.respond(503) diff --git a/localstack-core/localstack/aws/handlers/internal_requests.py b/localstack-core/localstack/aws/handlers/internal_requests.py new file mode 100644 index 0000000000000..9e4b0c35fe77b --- /dev/null +++ b/localstack-core/localstack/aws/handlers/internal_requests.py @@ -0,0 +1,26 @@ +import logging +from types import MappingProxyType + +from localstack.http import Response + +from ..api import RequestContext +from ..chain import Handler, HandlerChain +from ..connect import INTERNAL_REQUEST_PARAMS_HEADER, load_dto + +LOG = logging.getLogger(__name__) + + +class InternalRequestParamsEnricher(Handler): + """ + This handler sets the internal call DTO in the request context. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + if header := context.request.headers.get(INTERNAL_REQUEST_PARAMS_HEADER): + try: + dto = MappingProxyType(load_dto(header)) + except Exception as e: + LOG.exception("Error loading request parameters '%s', Error: %s", header, e) + return + + context.internal_request_params = dto diff --git a/localstack-core/localstack/aws/handlers/legacy.py b/localstack-core/localstack/aws/handlers/legacy.py new file mode 100644 index 0000000000000..fe8374c1e4b5b --- /dev/null +++ b/localstack-core/localstack/aws/handlers/legacy.py @@ -0,0 +1,33 @@ +"""Handlers for compatibility with legacy thread local storages.""" + +import logging + +from localstack import config +from localstack.http import Response + +from ..api import RequestContext +from ..chain import HandlerChain +from .routes import RouterHandler + +LOG = logging.getLogger(__name__) + + +def set_close_connection_header(_chain: HandlerChain, context: RequestContext, response: Response): + """This is a hack to work around performance issues with h11 and boto. See + https://github.com/localstack/localstack/issues/6557""" + if config.GATEWAY_SERVER != "hypercorn": + return + if conn := context.request.headers.get("Connection"): + if conn.lower() == "keep-alive": + # don't set Connection: close header if keep-alive is explicitly asked for + return + + if "Connection" not in response.headers: + response.headers["Connection"] = "close" + + +class EdgeRouterHandler(RouterHandler): + def __init__(self, respond_not_found=False) -> None: + from localstack.services.edge import ROUTER + + super().__init__(ROUTER, respond_not_found) diff --git a/localstack-core/localstack/aws/handlers/logging.py b/localstack-core/localstack/aws/handlers/logging.py new file mode 100644 index 0000000000000..2113b67fa5176 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/logging.py @@ -0,0 +1,154 @@ +"""Handlers for logging.""" + +import logging +from functools import cached_property +from typing import Type + +from localstack.aws.api import RequestContext, ServiceException +from localstack.aws.chain import ExceptionHandler, HandlerChain +from localstack.http import Response +from localstack.http.request import restore_payload +from localstack.logging.format import AwsTraceLoggingFormatter, TraceLoggingFormatter +from localstack.logging.setup import create_default_handler + +LOG = logging.getLogger(__name__) + + +class ExceptionLogger(ExceptionHandler): + """ + Logs exceptions into a logger. + """ + + def __init__(self, logger=None): + self.logger = logger or LOG + + def __call__( + self, + chain: HandlerChain, + exception: Exception, + context: RequestContext, + response: Response, + ): + if isinstance(exception, ServiceException): + # We do not want to log an error/stacktrace if the handler is working as expected, but chooses to throw + # a service exception + return + if self.logger.isEnabledFor(level=logging.DEBUG): + self.logger.exception("exception during call chain", exc_info=exception) + else: + self.logger.error("exception during call chain: %s", exception) + + +class ResponseLogger: + def __call__(self, _: HandlerChain, context: RequestContext, response: Response): + if context.request.path == "/health" or context.request.path == "/_localstack/health": + # special case so the health check doesn't spam the logs + return + self._log(context, response) + + @cached_property + def aws_logger(self): + return self._prepare_logger( + logging.getLogger("localstack.request.aws"), formatter=AwsTraceLoggingFormatter + ) + + @cached_property + def http_logger(self): + return self._prepare_logger( + logging.getLogger("localstack.request.http"), formatter=TraceLoggingFormatter + ) + + @cached_property + def internal_aws_logger(self): + return self._prepare_logger( + logging.getLogger("localstack.request.internal.aws"), formatter=AwsTraceLoggingFormatter + ) + + @cached_property + def internal_http_logger(self): + return self._prepare_logger( + logging.getLogger("localstack.request.internal.http"), formatter=TraceLoggingFormatter + ) + + # make sure loggers are loaded after logging config is loaded + def _prepare_logger(self, logger: logging.Logger, formatter: Type): + if logger.isEnabledFor(logging.DEBUG): + logger.propagate = False + handler = create_default_handler(logger.level) + handler.setFormatter(formatter()) + logger.addHandler(handler) + return logger + + def _log(self, context: RequestContext, response: Response): + aws_logger = self.aws_logger + http_logger = self.http_logger + if context.is_internal_call: + aws_logger = self.internal_aws_logger + http_logger = self.internal_http_logger + if context.operation: + # log an AWS response + if context.service_exception: + aws_logger.info( + "AWS %s.%s => %d (%s)", + context.service.service_name, + context.operation.name, + response.status_code, + context.service_exception.code, + extra={ + # context + "account_id": context.account_id, + "region": context.region, + # request + "input_type": context.operation.input_shape.name + if context.operation.input_shape + else "Request", + "input": context.service_request, + "request_headers": dict(context.request.headers), + # response + "output_type": context.service_exception.code, + "output": context.service_exception.message, + "response_headers": dict(response.headers), + }, + ) + else: + aws_logger.info( + "AWS %s.%s => %s", + context.service.service_name, + context.operation.name, + response.status_code, + extra={ + # context + "account_id": context.account_id, + "region": context.region, + # request + "input_type": context.operation.input_shape.name + if context.operation.input_shape + else "Request", + "input": context.service_request, + "request_headers": dict(context.request.headers), + # response + "output_type": context.operation.output_shape.name + if context.operation.output_shape + else "Response", + "output": context.service_response, + "response_headers": dict(response.headers), + }, + ) + else: + # log any other HTTP response + http_logger.info( + "%s %s => %d", + context.request.method, + context.request.path, + response.status_code, + extra={ + # request + "input_type": "Request", + "input": restore_payload(context.request), + "request_headers": dict(context.request.headers), + # response + "output_type": "Response", + "output": "StreamingBody(unknown)" if response.is_streamed else response.data, + "response_headers": dict(response.headers), + }, + ) diff --git a/localstack/aws/handlers/metric_handler.py b/localstack-core/localstack/aws/handlers/metric_handler.py similarity index 96% rename from localstack/aws/handlers/metric_handler.py rename to localstack-core/localstack/aws/handlers/metric_handler.py index 6359a21bfd9d4..6a1ad8f16b982 100644 --- a/localstack/aws/handlers/metric_handler.py +++ b/localstack-core/localstack/aws/handlers/metric_handler.py @@ -5,7 +5,6 @@ from localstack.aws.api import RequestContext from localstack.aws.chain import HandlerChain from localstack.http import Response -from localstack.utils.aws.aws_stack import is_internal_call_context LOG = logging.getLogger(__name__) @@ -175,7 +174,6 @@ def update_metric_collection( if not config.is_collect_metrics_mode() or not context.service_operation: return - is_internal = is_internal_call_context(context.request.headers) item = self._get_metric_handler_item_for_context(context) # parameters might get changed when dispatched to the service - we use the params stored in @@ -193,7 +191,7 @@ def update_metric_collection( exception=context.service_exception.__class__.__name__ if context.service_exception else "", - origin="internal" if is_internal else "external", + origin="internal" if context.is_internal_call else "external", ) # refrain from adding duplicates if metric not in MetricHandler.metric_data: diff --git a/localstack-core/localstack/aws/handlers/presigned_url.py b/localstack-core/localstack/aws/handlers/presigned_url.py new file mode 100644 index 0000000000000..153aef1521bd9 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/presigned_url.py @@ -0,0 +1,23 @@ +from localstack.http import Response +from localstack.services.s3.presigned_url import S3PreSignedURLRequestHandler + +from ..api import RequestContext +from ..chain import Handler, HandlerChain + + +class ParsePreSignedUrlRequest(Handler): + def __init__(self): + self.pre_signed_handlers: dict[str, Handler] = { + "s3": S3PreSignedURLRequestHandler(), + } + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + # TODO: handle other services pre-signed URL (CloudFront) + if not context.service: + return + + # we are handling the pre-signed URL before parsing, because S3 will append typical headers parameters to + # the querystring when generating a pre-signed URL. This handler will move them back into the headers before + # the parsing of the request happens + if handler := self.pre_signed_handlers.get(context.service.service_name): + handler(chain, context, response) diff --git a/localstack/aws/handlers/proxy.py b/localstack-core/localstack/aws/handlers/proxy.py similarity index 100% rename from localstack/aws/handlers/proxy.py rename to localstack-core/localstack/aws/handlers/proxy.py diff --git a/localstack-core/localstack/aws/handlers/region.py b/localstack-core/localstack/aws/handlers/region.py new file mode 100644 index 0000000000000..492539e0c6e9c --- /dev/null +++ b/localstack-core/localstack/aws/handlers/region.py @@ -0,0 +1,103 @@ +import abc +import logging +import re +from functools import cached_property + +from boto3.session import Session + +from localstack.http import Request, Response +from localstack.utils.aws.arns import get_partition + +from ..api import RequestContext +from ..chain import Handler, HandlerChain + +LOG = logging.getLogger(__name__) + + +class RegionContextEnricher(Handler): + """ + A handler that sets the AWS region of the request in the RequestContext. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + context.region = self.get_region(context.request) + context.partition = get_partition(context.region) + + @staticmethod + def get_region(request: Request) -> str: + from localstack.utils.aws.request_context import extract_region_from_headers + + return extract_region_from_headers(request.headers) + + +class RegionRewriterStrategy(abc.ABC): + @abc.abstractmethod + def apply(self, context: RequestContext): + """ + Apply the region rewriter to the request context + :param context: Request Context + """ + pass + + +class DefaultRegionRewriterStrategy(RegionRewriterStrategy): + """ + If a region is not known, override it to "us-east-1" + """ + + default_region = "us-east-1" + + def apply(self, context: RequestContext): + if not context.region: + return + + if context.region not in self.available_regions: + LOG.warning( + "Region '%s' is not available. Resetting the region to 'us-east-1'. " + "Please consider using a region in the 'aws' partition to avoid any unexpected behavior. " + "Available regions: %s", + context.region, + self.available_regions, + ) + context.region = self.default_region + context.partition = "aws" + self.rewrite_auth_header(context, self.default_region) + + def rewrite_auth_header(self, context: RequestContext, region: str): + """ + Rewrites the `Authorization` header to reflect the specified region. + :param context: Request context + :param region: Region to rewrite the `Authorization` header to. + """ + auth_header = context.request.headers.get("Authorization") + + if auth_header: + regex = r"Credential=([^/]+)/([^/]+)/([^/]+)/" + auth_header = re.sub(regex, rf"Credential=\1/\2/{region}/", auth_header) + context.request.headers["Authorization"] = auth_header + + @cached_property + def available_regions(self) -> list[str]: + """ + Returns a list of supported regions. + :return: List of regions in the `aws` partition. + """ + # We cannot cache the session here, as it is not thread safe. As the entire method is cached, this should not + # have a significant impact. + # using s3 as "everywhere available" service, as it usually is supported in all regions + # the S3 image also deletes other botocore specifications, so it is the easiest possibility + return Session().get_available_regions("s3", "aws") + + +class RegionRewriter(Handler): + """ + A handler that ensures the region being in a list of allowed regions + """ + + region_rewriter_strategy: RegionRewriterStrategy + + def __init__(self): + self.region_rewriter_strategy = DefaultRegionRewriterStrategy() + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + self.region_rewriter_strategy.apply(context) diff --git a/localstack-core/localstack/aws/handlers/routes.py b/localstack-core/localstack/aws/handlers/routes.py new file mode 100644 index 0000000000000..57114ff569d33 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/routes.py @@ -0,0 +1,5 @@ +from rolo.gateway.handlers import RouterHandler + +__all__ = [ + "RouterHandler", +] diff --git a/localstack/aws/handlers/service.py b/localstack-core/localstack/aws/handlers/service.py similarity index 86% rename from localstack/aws/handlers/service.py rename to localstack-core/localstack/aws/handlers/service.py index 4c5f09d935e9a..edef0699c3539 100644 --- a/localstack/aws/handlers/service.py +++ b/localstack-core/localstack/aws/handlers/service.py @@ -1,14 +1,15 @@ """A set of common handlers to parse and route AWS service requests.""" + import logging import traceback from collections import defaultdict -from functools import lru_cache -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Union from botocore.model import OperationModel, ServiceModel from localstack import config from localstack.http import Response +from localstack.utils.coverage_docs import get_coverage_link_for_service from ..api import CommonServiceException, RequestContext, ServiceException from ..api.core import ServiceOperation @@ -16,9 +17,8 @@ from ..client import parse_response, parse_service_exception from ..protocol.parser import RequestParser, create_parser from ..protocol.serializer import create_serializer -from ..protocol.service_router import determine_aws_service_name +from ..protocol.service_router import determine_aws_service_model from ..skeleton import Skeleton, create_skeleton -from ..spec import load_service LOG = logging.getLogger(__name__) @@ -29,18 +29,18 @@ class ServiceNameParser(Handler): """ def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): - service = determine_aws_service_name(context.request) - - if not service: + # Some early handlers can already determine the AWS service the request is directed to (the S3 CORS handler for + # example). If it is already set, we can skip the parsing of the request. It is very important for S3, because + # parsing the request will consume the data stream and prevent streaming. + if context.service: return - context.service = self.get_service_model(service) - headers = context.request.headers - headers["x-localstack-tgt-api"] = service # TODO: probably no longer needed + service_model = determine_aws_service_model(context.request) + + if not service_model: + return - @lru_cache() - def get_service_model(self, service: str) -> ServiceModel: - return load_service(service) + context.service = service_model class ServiceRequestParser(Handler): @@ -62,17 +62,8 @@ def __call__(self, chain: HandlerChain, context: RequestContext, response: Respo return self.parse_and_enrich(context) - def get_parser(self, service: ServiceModel): - name = service.service_name - - if name in self.parsers: - return self.parsers[name] - - self.parsers[name] = create_parser(service) - return self.parsers[name] - def parse_and_enrich(self, context: RequestContext): - parser = self.get_parser(context.service) + parser = create_parser(context.service) operation, instance = parser.parse(context.request) # enrich context @@ -127,10 +118,7 @@ def add_handler(self, key: ServiceOperation, handler: Handler): self.handlers[key] = handler - def add_provider(self, provider: Any, service: Optional[Union[str, ServiceModel]] = None): - if not service: - service = provider.service - + def add_provider(self, provider: Any, service: Union[str, ServiceModel]): self.add_skeleton(create_skeleton(service, provider)) def add_skeleton(self, skeleton: Skeleton): @@ -150,7 +138,9 @@ def create_not_implemented_response(self, context): message = f"no handler for operation '{operation_name}' on service '{service_name}'" error = CommonServiceException("InternalFailure", message, status_code=501) serializer = create_serializer(context.service) - return serializer.serialize_error_to_response(error, operation, context.request.headers) + return serializer.serialize_error_to_response( + error, operation, context.request.headers, context.request_id + ) class ServiceExceptionSerializer(ExceptionHandler): @@ -184,10 +174,8 @@ def create_exception_response(self, exception: Exception, context: RequestContex if operation and isinstance(exception, NotImplementedError): action_name = operation.name - message = ( - f"API action '{action_name}' for service '{service_name}' not yet implemented or pro feature" - f" - check https://docs.localstack.cloud/user-guide/aws/feature-coverage for further information" - ) + exception_message: str | None = exception.args[0] if exception.args else None + message = exception_message or get_coverage_link_for_service(service_name, action_name) LOG.info(message) error = CommonServiceException("InternalFailure", message, status_code=501) context.service_exception = error @@ -225,7 +213,9 @@ def create_exception_response(self, exception: Exception, context: RequestContex context.service_exception = error serializer = create_serializer(context.service) # TODO: serializer cache - return serializer.serialize_error_to_response(error, operation, context.request.headers) + return serializer.serialize_error_to_response( + error, operation, context.request.headers, context.request_id + ) class ServiceResponseParser(Handler): @@ -249,9 +239,7 @@ def __call__(self, chain: HandlerChain, context: RequestContext, response: Respo if exception := context.service_exception: if isinstance(exception, ServiceException): - try: - exception.code - except AttributeError: + if not hasattr(exception, "code"): # FIXME: we should set the exception attributes in the scaffold when we generate the exceptions. # this is a workaround for now, since we are not doing that yet, and the attributes may be unset. self._set_exception_attributes(context.operation, exception) diff --git a/localstack-core/localstack/aws/handlers/service_plugin.py b/localstack-core/localstack/aws/handlers/service_plugin.py new file mode 100644 index 0000000000000..c28bbb7e341c1 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/service_plugin.py @@ -0,0 +1,89 @@ +"""Handlers extending the base logic of service handlers with lazy-loading and plugin mechanisms.""" + +import logging +import threading + +from localstack.http import Response +from localstack.services.plugins import Service, ServiceManager +from localstack.utils.sync import SynchronizedDefaultDict + +from ...utils.bootstrap import is_api_enabled +from ..api import RequestContext +from ..chain import Handler, HandlerChain +from ..protocol.service_router import determine_aws_service_model_for_data_plane +from .service import ServiceRequestRouter + +LOG = logging.getLogger(__name__) + + +class ServiceLoader(Handler): + def __init__( + self, service_manager: ServiceManager, service_request_router: ServiceRequestRouter + ): + """ + This handler encapsulates service lazy-loading. It loads services from the given ServiceManager and uses them + to populate the given ServiceRequestRouter. + + :param service_manager: the service manager used to load services + :param service_request_router: the service request router to populate + """ + self.service_manager = service_manager + self.service_request_router = service_request_router + self.service_locks = SynchronizedDefaultDict(threading.RLock) + self.loaded_services = set() + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + return self.require_service(chain, context, response) + + def require_service(self, _: HandlerChain, context: RequestContext, response: Response): + if not context.service: + return + + service_name: str = context.service.service_name + if service_name in self.loaded_services: + return + + if not self.service_manager.exists(service_name): + raise NotImplementedError + elif not is_api_enabled(service_name): + raise NotImplementedError( + f"Service '{service_name}' is not enabled. Please check your 'SERVICES' configuration variable." + ) + + request_router = self.service_request_router + + # Ensure the Service is loaded and set to ServiceState.RUNNING if not in an erroneous state. + service_plugin: Service = self.service_manager.require(service_name) + + with self.service_locks[context.service.service_name]: + # try again to avoid race conditions + if service_name in self.loaded_services: + return + self.loaded_services.add(service_name) + if isinstance(service_plugin, Service): + request_router.add_skeleton(service_plugin.skeleton) + else: + LOG.warning( + "found plugin for '%s', but cannot attach service plugin of type '%s'", + service_name, + type(service_plugin), + ) + + +class ServiceLoaderForDataPlane(Handler): + """ + Specific lightweight service loader that loads services based only on hostname indicators. This allows + us to correctly load services when things like lambda function URLs or APIGW REST APIs are called + before the services were actually loaded. + """ + + def __init__(self, service_loader: ServiceLoader): + self.service_loader = service_loader + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + if context.service: + return + + if service := determine_aws_service_model_for_data_plane(context.request): + context.service = service + self.service_loader.require_service(chain, context, response) diff --git a/localstack-core/localstack/aws/handlers/tracing.py b/localstack-core/localstack/aws/handlers/tracing.py new file mode 100644 index 0000000000000..eaea78b8c15d3 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/tracing.py @@ -0,0 +1,23 @@ +from localstack.aws.api import RequestContext +from localstack.aws.chain import Handler, HandlerChain +from localstack.http import Response +from localstack.utils.xray.trace_header import TraceHeader + + +class TraceContextParser(Handler): + """ + A handler that parses trace context headers, including: + * AWS X-Ray trace header: https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-tracingheader + X-Amzn-Trace-Id: Root=1-5759e988-bd862e3fe1be46a994272793;Sampled=1;Lineage=a87bd80c:1|68fd508a:5|c512fbe3:2 + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + # The Werkzeug headers data structure handles case-insensitive HTTP header matching (verified manually) + trace_header_str = context.request.headers.get("X-Amzn-Trace-Id") + # The minimum X-Ray header only contains a Root trace id, missing Sampled and Parent + aws_trace_header = TraceHeader.from_header_str(trace_header_str).ensure_root_exists() + # Naming aws_trace_header inspired by AWSTraceHeader convention for SQS: + # https://docs.aws.amazon.com/xray/latest/devguide/xray-services-sqs.html + context.trace_context["aws_trace_header"] = aws_trace_header + # NOTE: X-Ray sampling might require service-specific decisions: + # https://docs.aws.amazon.com/xray/latest/devguide/xray-console-sampling.html diff --git a/localstack-core/localstack/aws/handlers/validation.py b/localstack-core/localstack/aws/handlers/validation.py new file mode 100644 index 0000000000000..ebfe1da064358 --- /dev/null +++ b/localstack-core/localstack/aws/handlers/validation.py @@ -0,0 +1,100 @@ +""" +Handlers for validating request and response schema against OpenAPI specs. +""" + +import logging + +from openapi_core import OpenAPI +from openapi_core.contrib.werkzeug import WerkzeugOpenAPIRequest, WerkzeugOpenAPIResponse +from openapi_core.exceptions import OpenAPIError +from openapi_core.validation.request.exceptions import ( + RequestValidationError, +) +from openapi_core.validation.response.exceptions import ResponseValidationError +from plux import PluginManager + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.chain import Handler, HandlerChain +from localstack.constants import INTERNAL_RESOURCE_PATH +from localstack.http import Response + +LOG = logging.getLogger(__name__) + + +class OpenAPIValidator(Handler): + open_apis: list["OpenAPI"] + + def __init__(self) -> None: + self._load_specs() + + def _load_specs(self) -> None: + """Load the openapi spec plugins iff at least one between request and response validation is set.""" + if not (config.OPENAPI_VALIDATE_REQUEST or config.OPENAPI_VALIDATE_RESPONSE): + return + specs = PluginManager("localstack.openapi.spec").load_all() + self.open_apis = [] + for spec in specs: + self.open_apis.append(OpenAPI.from_path(spec.spec_path)) + + +class OpenAPIRequestValidator(OpenAPIValidator): + """ + Validates the requests to the LocalStack public endpoints (the ones with a _localstack or _aws prefix) against + a OpenAPI specification. + """ + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + if not config.OPENAPI_VALIDATE_REQUEST: + return + + hasattr(self, "open_apis") or self._load_specs() + path = context.request.path + + if path.startswith(f"{INTERNAL_RESOURCE_PATH}/") or path.startswith("/_aws/"): + for openapi in self.open_apis: + try: + openapi.validate_request(WerkzeugOpenAPIRequest(context.request)) + # We stop the handler at the first succeeded validation, as the other spec might not even specify + # this path. + break + except RequestValidationError as e: + # Note: in this handler we only check validation errors, e.g., wrong body, missing required. + response.status_code = 400 + response.set_json({"error": "Bad Request", "message": str(e)}) + chain.stop() + except OpenAPIError: + # Other errors can be raised when validating a request against the OpenAPI specification. + # The most common are: ServerNotFound, OperationNotFound, or PathNotFound. + # We explicitly do not check any other error but RequestValidationError ones. + # We shallow the exception to avoid excessive logging (e.g., a lot of ServerNotFound), as the only + # purpose of this handler is to check for request validation errors. + pass + + +class OpenAPIResponseValidator(OpenAPIValidator): + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + # The use of this flag is intended for test only. Eventual errors are due to LocalStack implementation and not + # to improper user usage of the endpoints. + if not config.OPENAPI_VALIDATE_RESPONSE: + return + + hasattr(self, "open_apis") or self._load_specs() + path = context.request.path + + if path.startswith(f"{INTERNAL_RESOURCE_PATH}/") or path.startswith("/_aws/"): + for openapi in self.open_apis: + try: + openapi.validate_response( + WerkzeugOpenAPIRequest(context.request), + WerkzeugOpenAPIResponse(response), + ) + break + except ResponseValidationError as exc: + LOG.error("Response validation failed for %s: %s", path, exc) + response.status_code = 500 + response.set_json({"error": exc.__class__.__name__, "message": str(exc)}) + chain.terminate() + except OpenAPIError: + # Same logic from the request validator applies here. + pass diff --git a/localstack/aws/mocking.py b/localstack-core/localstack/aws/mocking.py similarity index 99% rename from localstack/aws/mocking.py rename to localstack-core/localstack/aws/mocking.py index 3436bfb1a95af..2231b76eddbfb 100644 --- a/localstack/aws/mocking.py +++ b/localstack-core/localstack/aws/mocking.py @@ -162,6 +162,7 @@ def sanitize_arn_pattern(pattern: str) -> str: "arn:aws.*:*", "^arn:aws.*", "^arn:.*", + "arn:\\S+", ".*\\S.*", "^[A-Za-z0-9:\\/_-]*$", "^arn[\\/\\:\\-\\_\\.a-zA-Z0-9]+$", diff --git a/localstack-core/localstack/aws/patches.py b/localstack-core/localstack/aws/patches.py new file mode 100644 index 0000000000000..f73067ad7f878 --- /dev/null +++ b/localstack-core/localstack/aws/patches.py @@ -0,0 +1,55 @@ +from importlib.util import find_spec + +from localstack.runtime import hooks +from localstack.utils.patch import patch + + +def patch_moto_instance_tracker_meta(): + """ + Avoid instance collection for moto dashboard. Introduced in + https://github.com/localstack/localstack/pull/3250. + """ + from moto.core.base_backend import InstanceTrackerMeta + from moto.core.common_models import BaseModel + + if hasattr(InstanceTrackerMeta, "_ls_patch_applied"): + return # ensure we're not applying the patch multiple times + + @patch(InstanceTrackerMeta.__new__, pass_target=False) + def new_instance(meta, name, bases, dct): + cls = super(InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) + if name == "BaseModel": + return cls + cls.instances = [] + return cls + + @patch(BaseModel.__new__, pass_target=False) + def new_basemodel(cls, *args, **kwargs): + # skip cls.instances.append(..) which is done by the original/upstream constructor + instance = super(BaseModel, cls).__new__(cls) + return instance + + InstanceTrackerMeta._ls_patch_applied = True + + +def patch_moto_iam_config(): + """ + Enable loading AWS IAM managed policies in moto by default.Introduced in + https://github.com/localstack/localstack/pull/10112. + """ + from moto.core.config import default_user_config + + default_user_config["iam"]["load_aws_managed_policies"] = True + + +# TODO: this could be improved by introducing a hook specifically for applying global patches that is run +# before any other code is imported. +@hooks.on_infra_start(priority=100) +def apply_aws_runtime_patches(): + """ + Runtime patches specific to the AWS emulator. + """ + if find_spec("moto"): + # only load patches when moto is importable + patch_moto_iam_config() + patch_moto_instance_tracker_meta() diff --git a/localstack/aws/protocol/__init__.py b/localstack-core/localstack/aws/protocol/__init__.py similarity index 100% rename from localstack/aws/protocol/__init__.py rename to localstack-core/localstack/aws/protocol/__init__.py diff --git a/localstack-core/localstack/aws/protocol/op_router.py b/localstack-core/localstack/aws/protocol/op_router.py new file mode 100644 index 0000000000000..f4c5f1019aa02 --- /dev/null +++ b/localstack-core/localstack/aws/protocol/op_router.py @@ -0,0 +1,264 @@ +from collections import defaultdict +from typing import Any, Dict, List, Mapping, NamedTuple, Optional, Tuple +from urllib.parse import parse_qs, unquote + +from botocore.model import OperationModel, ServiceModel, StructureShape +from werkzeug.datastructures import Headers, MultiDict +from werkzeug.exceptions import MethodNotAllowed, NotFound +from werkzeug.routing import Map, MapAdapter + +from localstack.aws.protocol.routing import ( + StrictMethodRule, + path_param_regex, + post_process_arg_name, + transform_path_params_to_rule_vars, +) +from localstack.http import Request +from localstack.http.request import get_raw_path +from localstack.http.router import GreedyPathConverter + + +class _HttpOperation(NamedTuple): + """Useful intermediary representation of the 'http' block of an operation to make code cleaner""" + + operation: OperationModel + path: str + method: str + query_args: Mapping[str, List[str]] + header_args: List[str] + deprecated: bool + + @staticmethod + def from_operation(op: OperationModel) -> "_HttpOperation": + # botocore >= 1.28 might modify the internal model (specifically for S3). + # It will modify the request URI to strip the bucket name from the path and set the original value at + # "authPath". + # Since botocore 1.31.2, botocore will strip the query from the `authPart` + # We need to add it back from `requestUri` field + # Use authPath if set, otherwise use the regular requestUri. + if auth_path := op.http.get("authPath"): + path, sep, query = op.http.get("requestUri", "").partition("?") + uri = f"{auth_path.rstrip('/')}{sep}{query}" + else: + uri = op.http.get("requestUri") + + method = op.http.get("method") + deprecated = op.deprecated + + # requestUris can contain mandatory query args (f.e. /apikeys?mode=import) + path_query = uri.split("?") + path = path_query[0] + header_args = [] + query_args: Dict[str, List[str]] = {} + + if len(path_query) > 1: + # parse the query args of the request URI (they are mandatory) + query_args: Dict[str, List[str]] = parse_qs(path_query[1], keep_blank_values=True) + # for mandatory keys without values, keep an empty list (instead of [''] - the result of parse_qs) + query_args = {k: filter(None, v) for k, v in query_args.items()} + + # find the required header and query parameters of the input shape + input_shape = op.input_shape + if isinstance(input_shape, StructureShape): + for required_member in input_shape.required_members: + member_shape = input_shape.members[required_member] + location = member_shape.serialization.get("location") + if location is not None: + if location == "header": + header_name = member_shape.serialization.get("name") + header_args.append(header_name) + elif location == "querystring": + query_name = member_shape.serialization.get("name") + # do not overwrite potentially already existing query params with specific values + if query_name not in query_args: + # an empty list defines a required query param only needs to be present + # (no specific value will be enforced when matching) + query_args[query_name] = [] + + return _HttpOperation(op, path, method, query_args, header_args, deprecated) + + +class _RequiredArgsRule: + """ + Specific Rule implementation which checks if a set of certain required header and query parameters are matched by + a specific request. + """ + + endpoint: Any + required_query_args: Optional[Mapping[str, List[Any]]] + required_header_args: List[str] + match_score: int + + def __init__(self, operation: _HttpOperation) -> None: + super().__init__() + self.endpoint = operation.operation + self.required_query_args = operation.query_args or {} + self.required_header_args = operation.header_args or [] + self.match_score = ( + 10 + 10 * len(self.required_query_args) + 10 * len(self.required_header_args) + ) + # If this operation is deprecated, the score is a bit less high (bot not as much as a matching required arg) + if operation.deprecated: + self.match_score -= 5 + + def matches(self, query_args: MultiDict, headers: Headers) -> bool: + """ + Returns true if the given query args and the given headers of a request match the required query args and + headers of this rule. + :param query_args: query arguments of the incoming request + :param headers: headers of the incoming request + :return: True if the query args and headers match the required args of this rule + """ + if self.required_query_args: + for key, values in self.required_query_args.items(): + if key not in query_args: + return False + # if a required query arg also has a list of required values set, the values need to match as well + if values: + query_arg_values = query_args.getlist(key) + for value in values: + if value not in query_arg_values: + return False + + if self.required_header_args: + for key in self.required_header_args: + if key not in headers: + return False + + return True + + +class _RequestMatchingRule(StrictMethodRule): + """ + A Werkzeug Rule extension which initially acts as a normal rule (i.e. matches a path and method). + + This rule matches if one of its sub-rules _might_ match. + It cannot be assumed that one of the fine-grained rules matches, just because this rule initially matches. + If this rule matches, the caller _must_ call `match_request` in order to find the actual fine-grained matching rule. + The result of `match_request` is only meaningful if this wrapping rule also matches. + """ + + def __init__( + self, string: str, operations: List[_HttpOperation], method: str, **kwargs + ) -> None: + super().__init__(string=string, method=method, **kwargs) + # Create a rule which checks all required arguments (not only the path and method) + rules = [_RequiredArgsRule(op) for op in operations] + # Sort the rules descending based on their rule score + # (i.e. the first matching rule will have the highest score)= + self.rules = sorted(rules, key=lambda rule: rule.match_score, reverse=True) + + def match_request(self, request: Request) -> _RequiredArgsRule: + """ + Function which needs to be called by a caller if the _RequestMatchingRule already matched using Werkzeug's + default matching mechanism. + + :param request: to perform the fine-grained matching on + :return: matching fine-grained rule + :raises: NotFound if none of the fine-grained rules matches + """ + for rule in self.rules: + if rule.matches(request.args, request.headers): + return rule + raise NotFound() + + +def _create_service_map(service: ServiceModel) -> Map: + """ + Creates a Werkzeug Map object with all rules necessary for the specific service. + :param service: botocore service model to create the rules for + :return: a Map instance which is used to perform the in-service operation routing + """ + ops = [service.operation_model(op_name) for op_name in service.operation_names] + + rules = [] + + # group all operations by their path and method + path_index: Dict[(str, str), List[_HttpOperation]] = defaultdict(list) + for op in ops: + http_op = _HttpOperation.from_operation(op) + path_index[(http_op.path, http_op.method)].append(http_op) + + # create a matching rule for each (path, method) combination + for (path, method), ops in path_index.items(): + # translate the requestUri to a Werkzeug rule string + rule_string = path_param_regex.sub(transform_path_params_to_rule_vars, path) + + if len(ops) == 1: + # if there is only a single operation for a (path, method) combination, + # the default Werkzeug rule can be used directly (this is the case for most rules) + op = ops[0] + rules.append(StrictMethodRule(string=rule_string, method=method, endpoint=op.operation)) # type: ignore + else: + # if there is an ambiguity with only the (path, method) combination, + # a custom rule - which can use additional request metadata - needs to be used + rules.append(_RequestMatchingRule(string=rule_string, method=method, operations=ops)) + + return Map( + rules=rules, + # don't be strict about trailing slashes when matching + strict_slashes=False, + # we can't really use werkzeug's merge-slashes since it uses HTTP redirects to solve it + merge_slashes=False, + # get service-specific converters + converters={"path": GreedyPathConverter}, + ) + + +class RestServiceOperationRouter: + """ + A router implementation which abstracts the (quite complex) routing of incoming HTTP requests to a specific + operation within a "REST" service (rest-xml, rest-json). + """ + + _map: Map + + def __init__(self, service: ServiceModel): + self._map = _create_service_map(service) + + def match(self, request: Request) -> Tuple[OperationModel, Mapping[str, Any]]: + """ + Matches the given request to the operation it targets (or raises an exception if no operation matches). + + :param request: The request of which the targeting operation needs to be found + :return: A tuple with the matched operation and the (already parsed) path params + :raises: Werkzeug's NotFound exception in case the given request does not match any operation + """ + + # bind the map to get the actual matcher + matcher: MapAdapter = self._map.bind(request.host) + + # perform the matching + try: + # some services (at least S3) allow OPTIONS request (f.e. for CORS preflight requests) without them being + # specified. the specs do _not_ contain any operations on OPTIONS methods at all. + # avoid matching issues for preflight requests by matching against a similar GET request instead. + method = request.method if request.method != "OPTIONS" else "GET" + + path = get_raw_path(request) + # trailing slashes are ignored in smithy matching, + # see https://smithy.io/1.0/spec/core/http-traits.html#literal-character-sequences and this + # makes sure that, e.g., in s3, `GET /mybucket/` is not matched to `GetBucket` and not to + # `GetObject` and the associated rule. + path = path.rstrip("/") + + rule, args = matcher.match(path, method=method, return_rule=True) + except MethodNotAllowed as e: + # MethodNotAllowed (405) exception is raised if a path is matching, but the method does not. + # Our router handles this as a 404. + raise NotFound() from e + + # if the found rule is a _RequestMatchingRule, the multi rule matching needs to be invoked to perform the + # fine-grained matching based on the whole request + if isinstance(rule, _RequestMatchingRule): + rule = rule.match_request(request) + + # post process the arg keys and values + # - the path param keys need to be "un-sanitized", i.e. sanitized rule variable names need to be reverted + # - the path param values might still be url-encoded + args = {post_process_arg_name(k): unquote(v) for k, v in args.items()} + + # extract the operation model from the rule + operation: OperationModel = rule.endpoint + + return operation, args diff --git a/localstack/aws/protocol/parser.py b/localstack-core/localstack/aws/protocol/parser.py similarity index 86% rename from localstack/aws/protocol/parser.py rename to localstack-core/localstack/aws/protocol/parser.py index 5aef7ce6d48de..96fd3d16cf0aa 100644 --- a/localstack/aws/protocol/parser.py +++ b/localstack-core/localstack/aws/protocol/parser.py @@ -61,6 +61,7 @@ service's action which the request was aiming for, as well as the parsed parameters for the service's function invocation. """ + import abc import base64 import datetime @@ -68,11 +69,9 @@ import re from abc import ABC from email.utils import parsedate_to_datetime -from typing import Any, Dict, List, Mapping, Optional, Tuple, Union -from typing.io import IO +from typing import IO, Any, Dict, List, Mapping, Optional, Tuple, Union from xml.etree import ElementTree as ETree -import cbor2 import dateutil.parser from botocore.model import ( ListShape, @@ -83,11 +82,13 @@ Shape, StructureShape, ) + +# cbor2: explicitly load from private _decoder module to avoid using the (non-patched) C-version +from cbor2._decoder import loads as cbor2_loads from werkzeug.exceptions import BadRequest, NotFound -from localstack.aws.api import HttpRequest from localstack.aws.protocol.op_router import RestServiceOperationRouter -from localstack.config import LEGACY_S3_PROVIDER +from localstack.http import Request def _text_content(func): @@ -104,7 +105,7 @@ def _text_content(func): def _get_text_content( self, - request: HttpRequest, + request: Request, shape: Shape, node_or_string: Union[ETree.Element, str], uri_params: Mapping[str, Any] = None, @@ -202,7 +203,7 @@ def __init__(self, service: ServiceModel) -> None: self.service = service @_handle_exceptions - def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: + def parse(self, request: Request) -> Tuple[OperationModel, Any]: """ Determines which operation the request was aiming for and parses the incoming request such that the resulting dictionary can be used to invoke the service's function implementation. @@ -215,12 +216,12 @@ def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: raise NotImplementedError def _parse_shape( - self, request: HttpRequest, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None ) -> Any: """ Main parsing method which dynamically calls the parsing function for the specific shape. - :param request: the complete HttpRequest + :param request: the complete Request :param shape: of the node :param node: the single part of the HTTP request to parse :param uri_params: the extracted URI path params @@ -233,10 +234,12 @@ def _parse_shape( if location == "header": header_name = shape.serialization.get("name") payload = request.headers.get(header_name) - if shape.type_name == "list": + if payload and shape.type_name == "list": # headers may contain a comma separated list of values (e.g., the ObjectAttributes member in # s3.GetObjectAttributes), so we prepare it here for the handler, which will be `_parse_list`. - payload = payload.split(",") + # Header lists can contain optional whitespace, so we strip it + # https://www.rfc-editor.org/rfc/rfc9110.html#name-lists-rule-abnf-extension + payload = [value.strip() for value in payload.split(",")] elif location == "headers": payload = self._parse_header_map(shape, request.headers) # shapes with the location trait "headers" only contain strings and are not further processed @@ -271,7 +274,7 @@ def _parse_shape( def _parse_list( self, - request: HttpRequest, + request: Request, shape: ListShape, node: list, uri_params: Mapping[str, Any] = None, @@ -336,6 +339,10 @@ def _timestamp_iso8601(date_string: str) -> datetime.datetime: def _timestamp_unixtimestamp(timestamp_string: str) -> datetime.datetime: return datetime.datetime.utcfromtimestamp(int(timestamp_string)) + @staticmethod + def _timestamp_unixtimestampmillis(timestamp_string: str) -> datetime.datetime: + return datetime.datetime.utcfromtimestamp(float(timestamp_string) / 1000) + @staticmethod def _timestamp_rfc822(datetime_string: str) -> datetime.datetime: return parsedate_to_datetime(datetime_string) @@ -360,7 +367,7 @@ class QueryRequestParser(RequestParser): """ @_handle_exceptions - def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: + def parse(self, request: Request) -> Tuple[OperationModel, Any]: instance = request.values if "Action" not in instance: raise ProtocolParserError( @@ -385,7 +392,7 @@ def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: def _process_member( self, - request: HttpRequest, + request: Request, member_name: str, member_shape: Shape, node: dict, @@ -407,7 +414,7 @@ def _process_member( def _parse_structure( self, - request: HttpRequest, + request: Request, shape: StructureShape, node: dict, uri_params: Mapping[str, Any] = None, @@ -429,7 +436,7 @@ def _parse_structure( return result if len(result) > 0 else None def _parse_map( - self, request: HttpRequest, shape: MapShape, node: dict, uri_params: Mapping[str, Any] + self, request: Request, shape: MapShape, node: dict, uri_params: Mapping[str, Any] ) -> dict: """ This is what the node looks like for a flattened map:: @@ -477,7 +484,7 @@ def _parse_map( def _parse_list( self, - request: HttpRequest, + request: Request, shape: ListShape, node: dict, uri_params: Mapping[str, Any] = None, @@ -552,7 +559,7 @@ def __init__(self, service: ServiceModel) -> None: self._operation_router = RestServiceOperationRouter(service) @_handle_exceptions - def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: + def parse(self, request: Request) -> Tuple[OperationModel, Any]: try: operation, uri_params = self._operation_router.match(request) except NotFound as e: @@ -569,7 +576,7 @@ def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: def _parse_payload( self, - request: HttpRequest, + request: Request, shape: Shape, member_shapes: Dict[str, Shape], uri_params: Mapping[str, Any], @@ -620,7 +627,7 @@ def _parse_payload( final_parsed.update(non_payload_parsed) final_parsed.update(payload_parsed) - def _initial_body_parse(self, request: HttpRequest) -> Any: + def _initial_body_parse(self, request: Request) -> Any: """ This method executes the initial parsing of the body (XML, JSON, or CBOR). The parsed body will afterwards still be walked through and the nodes will be converted to the appropriate @@ -631,13 +638,13 @@ def _initial_body_parse(self, request: HttpRequest) -> Any: """ raise NotImplementedError("_initial_body_parse") - def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any: + def _create_event_stream(self, request: Request, shape: Shape) -> Any: # TODO handle event streams raise NotImplementedError("_create_event_stream") - def create_input_stream(self, request: HttpRequest) -> IO[bytes]: + def create_input_stream(self, request: Request) -> IO[bytes]: """ - Returns an IO object that makes the payload of the HttpRequest available for streaming. + Returns an IO object that makes the payload of the Request available for streaming. :param request: the http request :return: the input stream that allows services to consume the request payload @@ -659,7 +666,7 @@ def __init__(self, service_model: ServiceModel): self.ignore_get_body_errors = True self._namespace_re = re.compile("{.*}") - def _initial_body_parse(self, request: HttpRequest) -> ETree.Element: + def _initial_body_parse(self, request: Request) -> ETree.Element: body = request.data if not body: return ETree.Element("") @@ -667,7 +674,7 @@ def _initial_body_parse(self, request: HttpRequest) -> ETree.Element: def _parse_structure( self, - request: HttpRequest, + request: Request, shape: StructureShape, node: ETree.Element, uri_params: Mapping[str, Any] = None, @@ -702,7 +709,7 @@ def _parse_structure( def _parse_map( self, - request: HttpRequest, + request: Request, shape: MapShape, node: dict, uri_params: Mapping[str, Any] = None, @@ -730,7 +737,7 @@ def _parse_map( def _parse_list( self, - request: HttpRequest, + request: Request, shape: ListShape, node: dict, uri_params: Mapping[str, Any] = None, @@ -797,7 +804,7 @@ def _build_name_to_xml_node(self, parent_node: Union[list, ETree.Element]) -> di xml_dict[key] = item return xml_dict - def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any: + def _create_event_stream(self, request: Request, shape: Shape) -> Any: # TODO handle event streams raise NotImplementedError("_create_event_stream") @@ -808,11 +815,14 @@ class BaseJSONRequestParser(RequestParser, ABC): This base-class handles parsing the payload / body as JSON. """ + # default timestamp format for JSON requests TIMESTAMP_FORMAT = "unixtimestamp" + # timestamp format for requests with CBOR content type + CBOR_TIMESTAMP_FORMAT = "unixtimestampmillis" def _parse_structure( self, - request: HttpRequest, + request: Request, shape: StructureShape, value: Optional[dict], uri_params: Mapping[str, Any] = None, @@ -837,7 +847,7 @@ def _parse_structure( def _parse_map( self, - request: HttpRequest, + request: Request, shape: MapShape, value: Optional[dict], uri_params: Mapping[str, Any] = None, @@ -847,19 +857,19 @@ def _parse_map( parsed = {} key_shape = shape.key value_shape = shape.value - for key, value in value.items(): + for key, val in value.items(): actual_key = self._parse_shape(request, key_shape, key, uri_params) - actual_value = self._parse_shape(request, value_shape, value, uri_params) + actual_value = self._parse_shape(request, value_shape, val, uri_params) parsed[actual_key] = actual_value return parsed - def _parse_body_as_json(self, request: HttpRequest) -> dict: + def _parse_body_as_json(self, request: Request) -> dict: body_contents = request.data if not body_contents: return {} if request.mimetype.startswith("application/x-amz-cbor"): try: - return cbor2.loads(body_contents) + return cbor2_loads(body_contents) except ValueError as e: raise ProtocolParserError("HTTP body could not be parsed as CBOR.") from e else: @@ -869,12 +879,26 @@ def _parse_body_as_json(self, request: HttpRequest) -> dict: raise ProtocolParserError("HTTP body could not be parsed as JSON.") from e def _parse_boolean( - self, request: HttpRequest, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None ) -> bool: return super()._noop_parser(request, shape, node, uri_params) + def _parse_timestamp( + self, request: Request, shape: Shape, node: str, uri_params: Mapping[str, Any] = None + ) -> datetime.datetime: + if not shape.serialization.get("timestampFormat") and request.mimetype.startswith( + "application/x-amz-cbor" + ): + # cbor2 has native support for timestamp decoding, so this node could already have the right type + if isinstance(node, datetime.datetime): + return node + # otherwise parse the timestamp using the AWS CBOR timestamp format + # (non-CBOR-standard conform, uses millis instead of floating-point-millis) + return self._convert_str_to_timestamp(node, self.CBOR_TIMESTAMP_FORMAT) + return super()._parse_timestamp(request, shape, node, uri_params) + def _parse_blob( - self, request: HttpRequest, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None ) -> bytes: if isinstance(node, bytes) and request.mimetype.startswith("application/x-amz-cbor"): # CBOR does not base64 encode binary data @@ -892,7 +916,7 @@ class JSONRequestParser(BaseJSONRequestParser): """ @_handle_exceptions - def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: + def parse(self, request: Request) -> Tuple[OperationModel, Any]: target = request.headers["X-Amz-Target"] # assuming that the last part of the target string (e.g., "x.y.z.MyAction") contains the operation name operation_name = target.rpartition(".")[2] @@ -904,7 +928,7 @@ def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: return operation, final_parsed def _do_parse( - self, request: HttpRequest, shape: Shape, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, uri_params: Mapping[str, Any] = None ) -> dict: parsed = {} if shape is not None: @@ -915,12 +939,12 @@ def _do_parse( parsed = self._handle_json_body(request, shape, uri_params) return parsed - def _handle_event_stream(self, request: HttpRequest, shape: Shape, event_name: str): + def _handle_event_stream(self, request: Request, shape: Shape, event_name: str): # TODO handle event streams raise NotImplementedError def _handle_json_body( - self, request: HttpRequest, shape: Shape, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, uri_params: Mapping[str, Any] = None ) -> Any: # The json.loads() gives us the primitive JSON types, but we need to traverse the parsed JSON data to convert # to richer types (blobs, timestamps, etc.) @@ -936,10 +960,10 @@ class RestJSONRequestParser(BaseRestRequestParser, BaseJSONRequestParser): The operation is defined by the HTTP method and the path suffix. """ - def _initial_body_parse(self, request: HttpRequest) -> dict: + def _initial_body_parse(self, request: Request) -> dict: return self._parse_body_as_json(request) - def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any: + def _create_event_stream(self, request: Request, shape: Shape) -> Any: raise NotImplementedError @@ -971,36 +995,32 @@ class VirtualHostRewriter: """ Context Manager which rewrites the request object parameters such that - within the context - it looks like a normal S3 request. + FIXME: this is not optimal because it mutates the Request object. Once we have better utility to create/copy + a request instead of EnvironBuilder, we should copy it before parsing (except the stream). """ - def __init__(self, request: HttpRequest): + def __init__(self, request: Request): self.request = request self.old_host = None self.old_path = None def __enter__(self): # only modify the request if it uses the virtual host addressing - if self._is_vhost_address(self.request): + if bucket_name := self._is_vhost_address_get_bucket(self.request): # save the original path and host for restoring on context exit self.old_path = self.request.path self.old_host = self.request.host self.old_raw_uri = self.request.environ.get("RAW_URI") - # extract the bucket name from the host part of the request - bucket_name, new_host = self.old_host.split(".", maxsplit=1) + # remove the bucket name from the host part of the request + new_host = self.old_host.removeprefix(f"{bucket_name}.") - # split the url and put the bucket name at the front - path_parts = self.old_path.split("/") - path_parts = [bucket_name] + path_parts - path_parts = [part for part in path_parts if part] - new_path = "/" + "/".join(path_parts) or "/" + # put the bucket name at the front + new_path = "/" + bucket_name + self.old_path or "/" # create a new RAW_URI for the WSGI environment, this is necessary because of our `get_raw_path` utility if self.old_raw_uri: - path_parts = self.old_raw_uri.split("/") - path_parts = [bucket_name] + path_parts - path_parts = [part for part in path_parts if part] - new_raw_uri = "/" + "/".join(path_parts) or "/" + new_raw_uri = "/" + bucket_name + self.old_raw_uri or "/" if qs := self.request.query_string: new_raw_uri += "?" + qs.decode("utf-8") else: @@ -1019,7 +1039,7 @@ def __exit__(self, exc_type, exc_value, exc_traceback): @staticmethod def _set_request_props( - request: HttpRequest, path: str, host: str, raw_uri: Optional[str] = None + request: Request, path: str, host: str, raw_uri: Optional[str] = None ): """Sets the HTTP request's path and host and clears the cache in the request object.""" request.path = path @@ -1046,47 +1066,57 @@ def _set_request_props( pass @staticmethod - def _is_vhost_address(request: HttpRequest) -> bool: - from localstack.services.s3.s3_utils import uses_host_addressing + def _is_vhost_address_get_bucket(request: Request) -> str | None: + from localstack.services.s3.utils import uses_host_addressing return uses_host_addressing(request.headers) @_handle_exceptions - def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]: - if LEGACY_S3_PROVIDER: - """Handle virtual-host-addressing for S3.""" - with self.VirtualHostRewriter(request): - return super().parse(request) - else: + def parse(self, request: Request) -> Tuple[OperationModel, Any]: + """Handle virtual-host-addressing for S3.""" + with self.VirtualHostRewriter(request): return super().parse(request) def _parse_shape( - self, request: HttpRequest, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None + self, request: Request, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None ) -> Any: """ Special handling of parsing the shape for s3 object-names (=key): - trailing '/' are valid and need to be preserved, however, the url-matcher removes it from the key - we check the request.url to verify the name + Trailing '/' are valid and need to be preserved, however, the url-matcher removes it from the key. + We need special logic to compare the parsed Key parameter against the path and add back the missing slashes """ if ( shape is not None and uri_params is not None and shape.serialization.get("location") == "uri" and shape.serialization.get("name") == "Key" - and request.base_url.endswith(f"{uri_params['Key']}/") + and ( + (trailing_slashes := request.path.rpartition(uri_params["Key"])[2]) + and all(char == "/" for char in trailing_slashes) + ) ): uri_params = dict(uri_params) - uri_params["Key"] = uri_params["Key"] + "/" + uri_params["Key"] = uri_params["Key"] + trailing_slashes return super()._parse_shape(request, shape, node, uri_params) + @_text_content + def _parse_integer(self, _, shape, node: str, ___) -> int | None: + # S3 accepts empty query string parameters that should be integer + # to not break other cases, validate that the shape is in the querystring + if node == "" and shape.serialization.get("location") == "querystring": + return None + return int(node) -class SQSRequestParser(QueryRequestParser): + +class SQSQueryRequestParser(QueryRequestParser): def _get_serialized_name(self, shape: Shape, default_name: str, node: dict) -> str: """ SQS allows using both - the proper serialized name of a map as well as the member name - as name for maps. For example, both works for the TagQueue operation: - Using the proper serialized name "Tag": Tag.1.Key=key&Tag.1.Value=value - Using the member name "Tag" in the parent structure: Tags.1.Key=key&Tags.1.Value=value + - Using "Name" to represent the Key for a nested dict: MessageAttributes.1.Name=key&MessageAttributes.1.Value.StringValue=value + resulting in {MessageAttributes: {key : {StringValue: value}}} The Java SDK implements the second variant: https://github.com/aws/aws-sdk-java-v2/issues/2524 This has been approved to be a bug and against the spec, but since the client has a lot of users, and AWS SQS supports both, we need to handle it here. @@ -1094,26 +1124,29 @@ def _get_serialized_name(self, shape: Shape, default_name: str, node: dict) -> s # ask the super implementation for the proper serialized name primary_name = super()._get_serialized_name(shape, default_name, node) - # determine a potential suffix for the name of the member in the node - suffix = "" + # determine potential suffixes for the name of the member in the node + suffixes = [] if shape.type_name == "map": if not shape.serialization.get("flattened"): - suffix = ".entry.1.Key" + suffixes = [".entry.1.Key", ".entry.1.Name"] else: - suffix = ".1.Key" + suffixes = [".1.Key", ".1.Name"] if shape.type_name == "list": if not shape.serialization.get("flattened"): - suffix = ".member.1" + suffixes = [".member.1"] else: - suffix = ".1" + suffixes = [".1"] # if the primary name is _not_ available in the node, but the default name is, we use the default name - if f"{primary_name}{suffix}" not in node and f"{default_name}{suffix}" in node: + if not any(f"{primary_name}{suffix}" in node for suffix in suffixes) and any( + f"{default_name}{suffix}" in node for suffix in suffixes + ): return default_name # otherwise we use the primary name return primary_name +@functools.cache def create_parser(service: ServiceModel) -> RequestParser: """ Creates the right parser for the given service model. @@ -1127,8 +1160,8 @@ def create_parser(service: ServiceModel) -> RequestParser: # within the parser implementations, the service-specific parser implementations (basically the implicit / # informally more specific protocol implementation) has precedence over the more general protocol-specific parsers. service_specific_parsers = { - "s3": S3RequestParser, - "sqs": SQSRequestParser, + "s3": {"rest-xml": S3RequestParser}, + "sqs": {"query": SQSQueryRequestParser}, } protocol_specific_parsers = { "query": QueryRequestParser, @@ -1138,9 +1171,12 @@ def create_parser(service: ServiceModel) -> RequestParser: "ec2": EC2RequestParser, } - # Try to select a service-specific parser implementation - if service.service_name in service_specific_parsers: - return service_specific_parsers[service.service_name](service) + # Try to select a service- and protocol-specific parser implementation + if ( + service.service_name in service_specific_parsers + and service.protocol in service_specific_parsers[service.service_name] + ): + return service_specific_parsers[service.service_name][service.protocol](service) else: # Otherwise, pick the protocol-specific parser for the protocol of the service return protocol_specific_parsers[service.protocol](service) diff --git a/localstack-core/localstack/aws/protocol/routing.py b/localstack-core/localstack/aws/protocol/routing.py new file mode 100644 index 0000000000000..f793bd051ec27 --- /dev/null +++ b/localstack-core/localstack/aws/protocol/routing.py @@ -0,0 +1,69 @@ +import re +from typing import AnyStr + +from werkzeug.routing import Rule + +# Regex to find path parameters in requestUris of AWS service specs (f.e. /{param1}/{param2+}) +path_param_regex = re.compile(r"({.+?})") +# Translation table which replaces characters forbidden in Werkzeug rule names with temporary replacements +# Note: The temporary replacements must not occur in any requestUri of any operation in any service! +_rule_replacements = {"-": "_0_"} +# String translation table for #_rule_replacements for str#translate +_rule_replacement_table = str.maketrans(_rule_replacements) + + +class StrictMethodRule(Rule): + """ + Small extension to Werkzeug's Rule class which reverts unwanted assumptions made by Werkzeug. + Reverted assumptions: + - Werkzeug automatically matches HEAD requests to the corresponding GET request (i.e. Werkzeug's rule automatically + adds the HEAD HTTP method to a rule which should only match GET requests). This is implemented to simplify + implementing an app compliant with HTTP (where a HEAD request needs to return the headers of a corresponding GET + request), but it is unwanted for our strict rule matching in here. + """ + + def __init__(self, string: str, method: str, **kwargs) -> None: + super().__init__(string=string, methods=[method], **kwargs) + + # Make sure Werkzeug's Rule does not add any other methods + # (f.e. the HEAD method even though the rule should only match GET) + self.methods = {method.upper()} + + +def transform_path_params_to_rule_vars(match: re.Match[AnyStr]) -> str: + """ + Transforms a request URI path param to a valid Werkzeug Rule string variable placeholder. + This transformation function should be used in combination with _path_param_regex on the request URIs (without any + query params). + + :param match: Regex match which contains a single group. The match group is a request URI path param, including the + surrounding curly braces. + :return: Werkzeug rule string variable placeholder which is semantically equal to the given request URI path param + + """ + # get the group match and strip the curly braces + request_uri_variable: str = match.group(0)[1:-1] + + # if the request URI param is greedy (f.e. /foo/{Bar+}), add Werkzeug's "path" prefix (/foo/{path:Bar}) + greedy_prefix = "" + if request_uri_variable.endswith("+"): + greedy_prefix = "path:" + request_uri_variable = request_uri_variable.strip("+") + + # replace forbidden chars (not allowed in Werkzeug rule variable names) with their placeholder + escaped_request_uri_variable = request_uri_variable.translate(_rule_replacement_table) + + return f"<{greedy_prefix}{escaped_request_uri_variable}>" + + +def post_process_arg_name(arg_key: str) -> str: + """ + Reverses previous manipulations to the path parameters names (like replacing forbidden characters with + placeholders). + :param arg_key: Path param key name extracted using Werkzeug rules + :return: Post-processed ("un-sanitized") path param key + """ + result = arg_key + for original, substitution in _rule_replacements.items(): + result = result.replace(substitution, original) + return result diff --git a/localstack/aws/protocol/serializer.py b/localstack-core/localstack/aws/protocol/serializer.py similarity index 77% rename from localstack/aws/protocol/serializer.py rename to localstack-core/localstack/aws/protocol/serializer.py index 7fcb87985beec..86cabdd3487b6 100644 --- a/localstack/aws/protocol/serializer.py +++ b/localstack-core/localstack/aws/protocol/serializer.py @@ -70,12 +70,12 @@ The result of the serialization methods is the HTTP response which can be sent back to the calling client. """ + import abc import base64 import functools import json import logging -import random import string from abc import ABC from binascii import crc32 @@ -85,19 +85,20 @@ from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union from xml.etree import ElementTree as ETree -import cbor2 import xmltodict -from boto.utils import ISO8601 from botocore.model import ListShape, MapShape, OperationModel, ServiceModel, Shape, StructureShape -from botocore.serialize import ISO8601_MICRO +from botocore.serialize import ISO8601, ISO8601_MICRO from botocore.utils import calculate_md5, is_json_value_header, parse_to_aware_datetime + +# cbor2: explicitly load from private _encoder module to avoid using the (non-patched) C-version +from cbor2._encoder import dumps as cbor2_dumps from werkzeug import Request as WerkzeugRequest from werkzeug import Response as WerkzeugResponse from werkzeug.datastructures import Headers, MIMEAccept from werkzeug.http import parse_accept_header -from localstack.aws.api import CommonServiceException, HttpResponse, ServiceException -from localstack.aws.spec import load_service +from localstack.aws.api import CommonServiceException, ServiceException +from localstack.aws.spec import ProtocolName, load_service from localstack.constants import ( APPLICATION_AMZ_CBOR_1_1, APPLICATION_AMZ_JSON_1_0, @@ -107,7 +108,9 @@ APPLICATION_XML, TEXT_XML, ) +from localstack.http import Response from localstack.utils.common import to_bytes, to_str +from localstack.utils.strings import long_uid from localstack.utils.xml import strip_xmlns LOG = logging.getLogger(__name__) @@ -182,8 +185,12 @@ class ResponseSerializer(abc.ABC): @_handle_exceptions def serialize_to_response( - self, response: dict, operation_model: OperationModel, headers: Optional[Dict | Headers] - ) -> HttpResponse: + self, + response: dict, + operation_model: OperationModel, + headers: Optional[Dict | Headers], + request_id: str, + ) -> Response: """ Takes a response dict and serializes it to an actual HttpResponse. @@ -192,7 +199,8 @@ def serialize_to_response( service's output / response :param headers: the headers of the incoming request this response should be serialized for. This is necessary for features like Content-Negotiation (define response content type based on request headers). - :return: HttpResponse which can be sent to the calling client + :param request_id: autogenerated AWS request ID identifying the original request + :return: Response which can be sent to the calling client :raises: ResponseSerializerError (either a ProtocolSerializerError or an UnknownSerializerError) """ @@ -201,17 +209,23 @@ def serialize_to_response( # if the operation has a streaming output, handle the serialization differently if operation_model.has_event_stream_output: - return self._serialize_event_stream(response, operation_model, mime_type) + return self._serialize_event_stream(response, operation_model, mime_type, request_id) serialized_response = self._create_default_response(operation_model, mime_type) shape = operation_model.output_shape # The shape can also be none (for empty responses), but it still needs to be serialized (to add some metadata) shape_members = shape.members if shape is not None else None self._serialize_response( - response, serialized_response, shape, shape_members, operation_model, mime_type + response, + serialized_response, + shape, + shape_members, + operation_model, + mime_type, + request_id, ) serialized_response = self._prepare_additional_traits_in_response( - serialized_response, operation_model + serialized_response, operation_model, request_id ) return serialized_response @@ -221,7 +235,8 @@ def serialize_error_to_response( error: ServiceException, operation_model: OperationModel, headers: Optional[Dict | Headers], - ) -> HttpResponse: + request_id: str, + ) -> Response: """ Takes an error instance and serializes it to an actual HttpResponse. Therefore, this method is used for errors which should be serialized and transmitted to the calling client. @@ -231,6 +246,7 @@ def serialize_error_to_response( service's output / response :param headers: the headers of the incoming request this response should be serialized for. This is necessary for features like Content-Negotiation (define response content type based on request headers). + :param request_id: autogenerated AWS request ID identifying the original request :return: HttpResponse which can be sent to the calling client :raises: ResponseSerializerError (either a ProtocolSerializerError or an UnknownSerializerError) """ @@ -246,20 +262,23 @@ def serialize_error_to_response( shape = operation_model.service_model.shape_for_error_code(error.code) serialized_response.status_code = error.status_code - self._serialize_error(error, serialized_response, shape, operation_model, mime_type) + self._serialize_error( + error, serialized_response, shape, operation_model, mime_type, request_id + ) serialized_response = self._prepare_additional_traits_in_response( - serialized_response, operation_model + serialized_response, operation_model, request_id ) return serialized_response def _serialize_response( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: raise NotImplementedError @@ -269,6 +288,7 @@ def _serialize_body_params( shape: Shape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> Optional[str]: """ Actually serializes the given params for the given shape to a string for the transmission in the body of the @@ -277,6 +297,7 @@ def _serialize_body_params( :param shape: to know how to serialize the params :param operation_model: for additional metadata :param mime_type: Mime type which should be used to encode the payload + :param request_id: autogenerated AWS request ID identifying the original request :return: string containing the serialized body """ raise NotImplementedError @@ -284,16 +305,21 @@ def _serialize_body_params( def _serialize_error( self, error: ServiceException, - response: HttpResponse, + response: Response, shape: StructureShape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: raise NotImplementedError def _serialize_event_stream( - self, response: dict, operation_model: OperationModel, mime_type: str - ) -> HttpResponse: + self, + response: dict, + operation_model: OperationModel, + mime_type: str, + request_id: str, + ) -> Response: """ Serializes a given response dict (the return payload of a service implementation) to an _event stream_ using the given operation model. @@ -301,7 +327,8 @@ def _serialize_event_stream( :param response: dictionary containing the payload for the response :param operation_model: describing the operation the response dict is being returned by :param mime_type: Mime type which should be used to encode the payload - :return: HttpResponse which can directly be sent to the client (in chunks) + :param request_id: autogenerated AWS request ID identifying the original request + :return: Response which can directly be sent to the client (in chunks) """ event_stream_shape = operation_model.get_event_stream_output() event_stream_member_name = operation_model.output_shape.event_stream_name @@ -309,7 +336,6 @@ def _serialize_event_stream( # wrap the generator in operation specific serialization def event_stream_serializer() -> Iterable[bytes]: yield self._encode_event_payload("initial-response") - # yield convert_to_binary_event_payload("", event_type="initial-response") # create a default response serialized_event_response = self._create_default_response(operation_model, mime_type) @@ -345,17 +371,18 @@ def event_stream_serializer() -> Iterable[bytes]: event_member_shape.members if event_member_shape is not None else None, operation_model, mime_type, + request_id, ) # execute additional response traits (might be modifying the response) serialized_event_response = self._prepare_additional_traits_in_response( - serialized_event_response, operation_model + serialized_event_response, operation_model, request_id ) # encode the event and yield it yield self._encode_event_payload( event_type=event_member_name, content=serialized_event_response.data ) - return HttpResponse( + return Response( response=event_stream_serializer(), status=operation_model.http.get("responseCode", 200), ) @@ -431,9 +458,7 @@ def _encode_event_payload( return result - def _create_default_response( - self, operation_model: OperationModel, mime_type: str - ) -> HttpResponse: + def _create_default_response(self, operation_model: OperationModel, mime_type: str) -> Response: """ Creates a boilerplate default response to be used by subclasses as starting points. Uses the default HTTP response status code defined in the operation model (if defined), otherwise 200. @@ -442,7 +467,7 @@ def _create_default_response( :param mime_type: Mime type which should be used to encode the payload :return: boilerplate HTTP response """ - return HttpResponse(status=operation_model.http.get("responseCode", 200)) + return Response(status=operation_model.http.get("responseCode", 200)) def _get_mime_type(self, headers: Optional[Dict | Headers]) -> str: """ @@ -491,10 +516,6 @@ def _timestamp_iso8601(value: datetime) -> str: def _timestamp_unixtimestamp(value: datetime) -> float: return value.timestamp() - @staticmethod - def _timestamp_unixtimestampmillis(value: datetime) -> int: - return int(value.timestamp() * 1000) - def _timestamp_rfc822(self, value: datetime) -> str: if isinstance(value, datetime): value = self._timestamp_unixtimestamp(value) @@ -535,7 +556,7 @@ def _encode_payload(self, body: Union[bytes, str]) -> bytes: return body def _prepare_additional_traits_in_response( - self, response: HttpResponse, operation_model: OperationModel + self, response: Response, operation_model: OperationModel, request_id: str ): """Applies additional traits on the raw response for a given model or protocol.""" if operation_model.http_checksum_required: @@ -549,7 +570,7 @@ def _has_header(self, header_name: str, headers: dict): else: return header_name.lower() in [key.lower() for key in headers.keys()] - def _add_md5_header(self, response: HttpResponse): + def _add_md5_header(self, response: Response): """Add a Content-MD5 header if not yet there. Adapted from botocore.utils""" headers = response.headers body = response.data @@ -575,10 +596,11 @@ class BaseXMLResponseSerializer(ResponseSerializer): def _serialize_error( self, error: ServiceException, - response: HttpResponse, + response: Response, shape: StructureShape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: # Check if we need to add a namespace attr = ( @@ -590,10 +612,10 @@ def _serialize_error( error_tag = ETree.SubElement(root, "Error") self._add_error_tags(error, error_tag, mime_type) - request_id = ETree.SubElement(root, "RequestId") - request_id.text = gen_amzn_requestid_long() + request_id_element = ETree.SubElement(root, "RequestId") + request_id_element.text = request_id - self._add_additional_error_tags(error, root, shape, mime_type) + self._add_additional_error_tags(vars(error), root, shape, mime_type) response.set_response(self._encode_payload(self._node_to_string(root, mime_type))) @@ -610,7 +632,7 @@ def _add_error_tags( self._default_serialize(error_tag, "Sender", None, "Type", mime_type) def _add_additional_error_tags( - self, error: ServiceException, node: ETree, shape: StructureShape, mime_type: str + self, parameters: dict, node: ETree, shape: StructureShape, mime_type: str ): if shape: params = {} @@ -618,8 +640,8 @@ def _add_additional_error_tags( for member in shape.members: # XML protocols do not add modeled default fields to the root node # (tested for cloudfront, route53, cloudwatch, iam) - if member.lower() not in ["code", "message"] and hasattr(error, member): - params[member] = getattr(error, member) + if member.lower() not in ["code", "message"] and member in parameters: + params[member] = parameters[member] # If there is an error shape with members which should be set, they need to be added to the node if params: @@ -638,9 +660,10 @@ def _serialize_body_params( shape: Shape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> Optional[str]: root = self._serialize_body_params_to_xml(params, shape, operation_model, mime_type) - self._prepare_additional_traits_in_xml(root) + self._prepare_additional_traits_in_xml(root, request_id) return self._node_to_string(root, mime_type) def _serialize_body_params_to_xml( @@ -815,7 +838,7 @@ def _default_serialize(self, xmlnode: ETree.Element, params: str, _, name: str, node = ETree.SubElement(xmlnode, name) node.text = str(params) - def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): + def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element], request_id: str): """ Prepares the XML root node before being serialized with additional traits (like the Response ID in the Query protocol). @@ -823,9 +846,7 @@ def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): """ pass - def _create_default_response( - self, operation_model: OperationModel, mime_type: str - ) -> HttpResponse: + def _create_default_response(self, operation_model: OperationModel, mime_type: str) -> Response: response = super()._create_default_response(operation_model, mime_type) response.headers["Content-Type"] = mime_type return response @@ -855,11 +876,12 @@ class BaseRestResponseSerializer(ResponseSerializer, ABC): def _serialize_response( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: header_params, payload_params = self._partition_members(parameters, shape) self._process_header_members(header_params, response, shape) @@ -867,29 +889,37 @@ def _serialize_response( # Do not process the body payload in this case (setting a body could also manipulate the headers) if operation_model.http.get("method") != "HEAD": self._serialize_payload( - payload_params, response, shape, shape_members, operation_model, mime_type + payload_params, + response, + shape, + shape_members, + operation_model, + mime_type, + request_id, ) self._serialize_content_type(response, shape, shape_members, mime_type) - self._prepare_additional_traits_in_response(response, operation_model) + self._prepare_additional_traits_in_response(response, operation_model, request_id) def _serialize_payload( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: """ Serializes the given payload. :param parameters: The user input params - :param response: The final serialized HttpResponse + :param response: The final serialized Response :param shape: Describes the expected output shape (can be None in case of an "empty" response) :param shape_members: The members of the output struct shape :param operation_model: The specification of the operation of which the response is serialized here :param mime_type: Mime type which should be used to encode the payload + :param request_id: autogenerated AWS request ID identifying the original request :return: None - the given `serialized` dict is modified """ if shape is None: @@ -918,7 +948,11 @@ def _serialize_payload( response.set_response( self._encode_payload( self._serialize_body_params( - body_params, shape_members[payload_member], operation_model, mime_type + body_params, + shape_members[payload_member], + operation_model, + mime_type, + request_id, ) ) ) @@ -926,12 +960,14 @@ def _serialize_payload( # Otherwise, we use the "traditional" way of serializing the whole parameters dict recursively. response.set_response( self._encode_payload( - self._serialize_body_params(parameters, shape, operation_model, mime_type) + self._serialize_body_params( + parameters, shape, operation_model, mime_type, request_id + ) ) ) def _serialize_content_type( - self, serialized: HttpResponse, shape: Shape, shape_members: dict, mime_type: str + self, serialized: Response, shape: Shape, shape_members: dict, mime_type: str ): """ Some protocols require varied Content-Type headers depending on user input. @@ -944,14 +980,16 @@ def _has_streaming_payload(self, payload: Optional[str], shape_members): return payload is not None and shape_members[payload].type_name in ["blob", "string"] def _prepare_additional_traits_in_response( - self, response: HttpResponse, operation_model: OperationModel + self, response: Response, operation_model: OperationModel, request_id: str ): """Adds the request ID to the headers (in contrast to the body - as in the Query protocol).""" - response = super()._prepare_additional_traits_in_response(response, operation_model) - response.headers["x-amz-request-id"] = gen_amzn_requestid_long() + response = super()._prepare_additional_traits_in_response( + response, operation_model, request_id + ) + response.headers["x-amz-request-id"] = request_id return response - def _process_header_members(self, parameters: dict, response: HttpResponse, shape: Shape): + def _process_header_members(self, parameters: dict, response: Response, shape: Shape): shape_members = shape.members if isinstance(shape, StructureShape) else [] for name in shape_members: member_shape = shape_members[name] @@ -973,7 +1011,7 @@ def _process_header_members(self, parameters: dict, response: HttpResponse, shap elif location == "statusCode": response.status_code = int(value) - def _serialize_header_map(self, prefix: str, response: HttpResponse, params: dict) -> None: + def _serialize_header_map(self, prefix: str, response: Response, params: dict) -> None: """Serializes the header map for the location trait "headers".""" for key, val in params.items(): actual_key = prefix + key @@ -1044,26 +1082,30 @@ class QueryResponseSerializer(BaseXMLResponseSerializer): def _serialize_response( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: """ Serializes the given parameters as XML for the query protocol. :param parameters: The user input params - :param response: The final serialized HttpResponse + :param response: The final serialized Response :param shape: Describes the expected output shape (can be None in case of an "empty" response) :param shape_members: The members of the output struct shape :param operation_model: The specification of the operation of which the response is serialized here :param mime_type: Mime type which should be used to encode the payload + :param request_id: autogenerated AWS request ID identifying the original request :return: None - the given `serialized` dict is modified """ response.set_response( self._encode_payload( - self._serialize_body_params(parameters, shape, operation_model, mime_type) + self._serialize_body_params( + parameters, shape, operation_model, mime_type, request_id + ) ) ) @@ -1088,12 +1130,12 @@ def _serialize_body_params_to_xml( root.append(node) return root - def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): + def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element], request_id: str): # Add the response metadata here (it's not defined in the specs) # For the ec2 and the query protocol, the root cannot be None at this time. response_metadata = ETree.SubElement(root, "ResponseMetadata") - request_id = ETree.SubElement(response_metadata, "RequestId") - request_id.text = gen_amzn_requestid_long() + request_id_element = ETree.SubElement(response_metadata, "RequestId") + request_id_element.text = request_id class EC2ResponseSerializer(QueryResponseSerializer): @@ -1106,10 +1148,11 @@ class EC2ResponseSerializer(QueryResponseSerializer): def _serialize_error( self, error: ServiceException, - response: HttpResponse, + response: Response, shape: StructureShape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: # EC2 errors look like: # <Response> @@ -1132,11 +1175,11 @@ def _serialize_error( errors_tag = ETree.SubElement(root, "Errors") error_tag = ETree.SubElement(errors_tag, "Error") self._add_error_tags(error, error_tag, mime_type) - request_id = ETree.SubElement(root, "RequestID") - request_id.text = gen_amzn_requestid_long() + request_id_element = ETree.SubElement(root, "RequestID") + request_id_element.text = request_id response.set_response(self._encode_payload(self._node_to_string(root, mime_type))) - def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): + def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element], request_id: str): # The EC2 protocol does not use the root output shape, therefore we need to remove the hierarchy level # below the root level if len(root) > 0: @@ -1147,8 +1190,8 @@ def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): # Add the requestId here (it's not defined in the specs) # For the ec2 and the query protocol, the root cannot be None at this time. - request_id = ETree.SubElement(root, "requestId") - request_id.text = gen_amzn_requestid_long() + request_id_element = ETree.SubElement(root, "requestId") + request_id_element.text = request_id class JSONResponseSerializer(ResponseSerializer): @@ -1167,10 +1210,11 @@ class JSONResponseSerializer(ResponseSerializer): def _serialize_error( self, error: ServiceException, - response: HttpResponse, + response: Response, shape: StructureShape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: body = dict() @@ -1198,7 +1242,7 @@ def _serialize_error( body["message"] = message if mime_type in self.CBOR_TYPES: - response.set_response(cbor2.dumps(body)) + response.set_response(cbor2_dumps(body, datetime_as_timestamp=True)) response.content_type = mime_type else: response.set_json(body) @@ -1206,11 +1250,12 @@ def _serialize_error( def _serialize_response( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: if mime_type in self.CBOR_TYPES: response.content_type = mime_type @@ -1219,18 +1264,23 @@ def _serialize_response( if json_version is not None: response.headers["Content-Type"] = "application/x-amz-json-%s" % json_version response.set_response( - self._serialize_body_params(parameters, shape, operation_model, mime_type) + self._serialize_body_params(parameters, shape, operation_model, mime_type, request_id) ) def _serialize_body_params( - self, params: dict, shape: Shape, operation_model: OperationModel, mime_type: str + self, + params: dict, + shape: Shape, + operation_model: OperationModel, + mime_type: str, + request_id: str, ) -> Optional[str]: body = {} if shape is not None: self._serialize(body, params, shape, None, mime_type) if mime_type in self.CBOR_TYPES: - return cbor2.dumps(body) + return cbor2_dumps(body, datetime_as_timestamp=True) else: return json.dumps(body) @@ -1312,13 +1362,12 @@ def _default_serialize(self, body: dict, value: Any, _, key: str, __): def _serialize_type_timestamp( self, body: dict, value: Any, shape: Shape, key: str, mime_type: str ): - timestamp_format = ( - shape.serialization.get("timestampFormat") - # CBOR always uses unix timestamp milliseconds - if mime_type not in self.CBOR_TYPES - else "unixtimestampmillis" - ) - body[key] = self._convert_timestamp_to_str(value, timestamp_format) + if mime_type in self.CBOR_TYPES: + # CBOR has native support for timestamps + body[key] = value + else: + timestamp_format = shape.serialization.get("timestampFormat") + body[key] = self._convert_timestamp_to_str(value, timestamp_format) def _serialize_type_blob( self, body: dict, value: Union[str, bytes], _, key: str, mime_type: str @@ -1329,10 +1378,12 @@ def _serialize_type_blob( body[key] = self._get_base64(value) def _prepare_additional_traits_in_response( - self, response: HttpResponse, operation_model: OperationModel + self, response: Response, operation_model: OperationModel, request_id: str ): - response.headers["x-amzn-requestid"] = gen_amzn_requestid_long() - response = super()._prepare_additional_traits_in_response(response, operation_model) + response.headers["x-amzn-requestid"] = request_id + response = super()._prepare_additional_traits_in_response( + response, operation_model, request_id + ) return response @@ -1345,7 +1396,7 @@ class RestJSONResponseSerializer(BaseRestResponseSerializer, JSONResponseSeriali """ def _serialize_content_type( - self, serialized: HttpResponse, shape: Shape, shape_members: dict, mime_type: str + self, serialized: Response, shape: Shape, shape_members: dict, mime_type: str ): """Set Content-Type to application/json for all structured bodies.""" payload = shape.serialization.get("payload") if shape is not None else None @@ -1362,39 +1413,97 @@ def _serialize_content_type( class S3ResponseSerializer(RestXMLResponseSerializer): """ The ``S3ResponseSerializer`` adds some minor logic to handle S3 specific peculiarities with the error response - serialization. + serialization and the root node tag. """ SUPPORTED_MIME_TYPES = [APPLICATION_XML, TEXT_XML] + _RESPONSE_ROOT_TAGS = { + "CompleteMultipartUploadOutput": "CompleteMultipartUploadResult", + "CopyObjectOutput": "CopyObjectResult", + "CreateMultipartUploadOutput": "InitiateMultipartUploadResult", + "DeleteObjectsOutput": "DeleteResult", + "GetBucketAccelerateConfigurationOutput": "AccelerateConfiguration", + "GetBucketAclOutput": "AccessControlPolicy", + "GetBucketAnalyticsConfigurationOutput": "AnalyticsConfiguration", + "GetBucketCorsOutput": "CORSConfiguration", + "GetBucketEncryptionOutput": "ServerSideEncryptionConfiguration", + "GetBucketIntelligentTieringConfigurationOutput": "IntelligentTieringConfiguration", + "GetBucketInventoryConfigurationOutput": "InventoryConfiguration", + "GetBucketLifecycleOutput": "LifecycleConfiguration", + "GetBucketLifecycleConfigurationOutput": "LifecycleConfiguration", + "GetBucketLoggingOutput": "BucketLoggingStatus", + "GetBucketMetricsConfigurationOutput": "MetricsConfiguration", + "NotificationConfigurationDeprecated": "NotificationConfiguration", + "GetBucketOwnershipControlsOutput": "OwnershipControls", + "GetBucketPolicyStatusOutput": "PolicyStatus", + "GetBucketReplicationOutput": "ReplicationConfiguration", + "GetBucketRequestPaymentOutput": "RequestPaymentConfiguration", + "GetBucketTaggingOutput": "Tagging", + "GetBucketVersioningOutput": "VersioningConfiguration", + "GetBucketWebsiteOutput": "WebsiteConfiguration", + "GetObjectAclOutput": "AccessControlPolicy", + "GetObjectLegalHoldOutput": "LegalHold", + "GetObjectLockConfigurationOutput": "ObjectLockConfiguration", + "GetObjectRetentionOutput": "Retention", + "GetObjectTaggingOutput": "Tagging", + "GetObjectAttributesOutput": "GetObjectAttributesResponse", + "GetPublicAccessBlockOutput": "PublicAccessBlockConfiguration", + "ListBucketAnalyticsConfigurationsOutput": "ListBucketAnalyticsConfigurationResult", + "ListBucketInventoryConfigurationsOutput": "ListInventoryConfigurationsResult", + "ListBucketMetricsConfigurationsOutput": "ListMetricsConfigurationsResult", + "ListBucketsOutput": "ListAllMyBucketsResult", + "ListMultipartUploadsOutput": "ListMultipartUploadsResult", + "ListObjectsOutput": "ListBucketResult", + "ListObjectsV2Output": "ListBucketResult", + "ListObjectVersionsOutput": "ListVersionsResult", + "ListPartsOutput": "ListPartsResult", + "UploadPartCopyOutput": "CopyPartResult", + } + + XML_NAMESPACE = "http://s3.amazonaws.com/doc/2006-03-01/" def _serialize_response( self, parameters: dict, - response: HttpResponse, + response: Response, shape: Optional[Shape], shape_members: dict, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: header_params, payload_params = self._partition_members(parameters, shape) self._process_header_members(header_params, response, shape) # "HEAD" responses are basically "GET" responses without the actual body. # Do not process the body payload in this case (setting a body could also manipulate the headers) - # If the response is a redirection, the body should be empty as well - if operation_model.http.get("method") != "HEAD" and not 300 <= response.status_code < 400: + # - If the response is a redirection, the body should be empty as well + # - If the response is from a "PUT" request, the body should be empty except if there's a specific "payload" + # field in the serialization (CopyObject and CopyObjectPart) + http_method = operation_model.http.get("method") + if ( + http_method != "HEAD" + and not 300 <= response.status_code < 400 + and not (http_method == "PUT" and shape and not shape.serialization.get("payload")) + ): self._serialize_payload( - payload_params, response, shape, shape_members, operation_model, mime_type + payload_params, + response, + shape, + shape_members, + operation_model, + mime_type, + request_id, ) self._serialize_content_type(response, shape, shape_members, mime_type) - self._prepare_additional_traits_in_response(response, operation_model) def _serialize_error( self, error: ServiceException, - response: HttpResponse, + response: Response, shape: StructureShape, operation_model: OperationModel, mime_type: str, + request_id: str, ) -> None: attr = ( {"xmlns": operation_model.metadata.get("xmlNamespace")} @@ -1403,20 +1512,41 @@ def _serialize_error( ) root = ETree.Element("Error", attr) self._add_error_tags(error, root, mime_type) - request_id = ETree.SubElement(root, "RequestId") - request_id.text = gen_amzn_requestid_long() - self._add_additional_error_tags(error, root, shape, mime_type) + request_id_element = ETree.SubElement(root, "RequestId") + request_id_element.text = request_id + + header_params, payload_params = self._partition_members(vars(error), shape) + self._add_additional_error_tags(payload_params, root, shape, mime_type) + self._process_header_members(header_params, response, shape) response.set_response(self._encode_payload(self._node_to_string(root, mime_type))) + def _serialize_body_params( + self, + params: dict, + shape: Shape, + operation_model: OperationModel, + mime_type: str, + request_id: str, + ) -> Optional[str]: + root = self._serialize_body_params_to_xml(params, shape, operation_model, mime_type) + # S3 does not follow the specs on the root tag name for 41 of 44 operations + root.tag = self._RESPONSE_ROOT_TAGS.get(root.tag, root.tag) + self._prepare_additional_traits_in_xml(root, request_id) + return self._node_to_string(root, mime_type) + def _prepare_additional_traits_in_response( - self, response: HttpResponse, operation_model: OperationModel + self, response: Response, operation_model: OperationModel, request_id: str ): """Adds the request ID to the headers (in contrast to the body - as in the Query protocol).""" - response = super()._prepare_additional_traits_in_response(response, operation_model) - response.headers[ - "x-amz-id-2" - ] = f"MzRISOwyjmnup{response.headers['x-amz-request-id']}7/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp" + response = super()._prepare_additional_traits_in_response( + response, operation_model, request_id + ) + # s3 extended Request ID + # mostly used internally on AWS and corresponds to a HostId + response.headers["x-amz-id-2"] = ( + "s9lzHYrFp76ZVxRcpX9+5cjAnEH2ROuNkd2BHfIa6UkFVdtjf5mKR3/eTPFvsiP/XV/VLi31234=" + ) return response def _add_error_tags( @@ -1438,13 +1568,23 @@ def _add_error_tags( def _create_empty_node(xmlnode: ETree.Element, name: str) -> None: ETree.SubElement(xmlnode, name) - def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element]): + def _prepare_additional_traits_in_xml(self, root: Optional[ETree.Element], request_id: str): # some tools (Serverless) require a newline after the "<?xml ...>\n" preamble line, e.g., for LocationConstraint if root and not root.tail: root.tail = "\n" + root.attrib["xmlns"] = self.XML_NAMESPACE -class SqsResponseSerializer(QueryResponseSerializer): + @staticmethod + def _timestamp_iso8601(value: datetime) -> str: + """ + This is very specific to S3, S3 returns an ISO8601 timestamp but with milliseconds always set to 000 + Some SDKs are very picky about the length + """ + return value.strftime("%Y-%m-%dT%H:%M:%S.000Z") + + +class SqsQueryResponseSerializer(QueryResponseSerializer): """ Unfortunately, SQS uses a rare interpretation of the XML protocol: It uses HTML entities within XML tag text nodes. For example: @@ -1458,34 +1598,155 @@ class SqsResponseSerializer(QueryResponseSerializer): - These double-escapes are corrected by replacing such strings with their original. """ + # those are deleted from the JSON specs, but need to be kept for legacy reason (sent in 'x-amzn-query-error') + QUERY_PREFIXED_ERRORS = { + "BatchEntryIdsNotDistinct", + "BatchRequestTooLong", + "EmptyBatchRequest", + "InvalidBatchEntryId", + "MessageNotInflight", + "PurgeQueueInProgress", + "QueueDeletedRecently", + "TooManyEntriesInBatchRequest", + "UnsupportedOperation", + } + + # Some error code changed between JSON and query, and we need to have a way to map it for legacy reason + JSON_TO_QUERY_ERROR_CODES = { + "InvalidParameterValueException": "InvalidParameterValue", + "MissingRequiredParameterException": "MissingParameter", + "AccessDeniedException": "AccessDenied", + "QueueDoesNotExist": "AWS.SimpleQueueService.NonExistentQueue", + "QueueNameExists": "QueueAlreadyExists", + } + + SENDER_FAULT_ERRORS = ( + QUERY_PREFIXED_ERRORS + | JSON_TO_QUERY_ERROR_CODES.keys() + | {"OverLimit", "ResourceNotFoundException"} + ) + def _default_serialize(self, xmlnode: ETree.Element, params: str, _, name: str, __) -> None: - """Ensures that XML text nodes use HTML entities instead of " or \r""" + """ + Ensures that we "mark" characters in the node's text which need to be specifically encoded. + This is necessary to easily identify these specific characters later, after the standard XML serialization is + done, while not replacing any other occurrences of these characters which might appear in the serialized string. + """ node = ETree.SubElement(xmlnode, name) - node.text = str(params).replace('"', """).replace("\r", " ") + node.text = ( + str(params) + .replace('"', '__marker__"__marker__') + .replace("\r", "__marker__-r__marker__") + ) def _node_to_string(self, root: Optional[ETree.ElementTree], mime_type: str) -> Optional[str]: - """ - Replaces the double-escaped HTML entities with their correct HTML entity (basically reverts the escaping in - the serialization of the used XML framework). - """ + """Replaces the previously "marked" characters with their encoded value.""" generated_string = super()._node_to_string(root, mime_type) - return ( - to_bytes( - to_str(generated_string) - # Undo the second escaping of the & - .replace("&quot;", """) - # Undo the second escaping of the carriage return (\r) - .replace("&#xD;", " ") + if generated_string is None: + return None + generated_string = to_str(generated_string) + # Undo the second escaping of the & + # Undo the second escaping of the carriage return (\r) + if mime_type == APPLICATION_JSON: + # At this point the json was already dumped and escaped, so we replace directly. + generated_string = generated_string.replace(r"__marker__\"__marker__", r"\"").replace( + "__marker__-r__marker__", r"\r" ) - if generated_string is not None - else None - ) + else: + generated_string = generated_string.replace('__marker__"__marker__', """).replace( + "__marker__-r__marker__", " " + ) + + return to_bytes(generated_string) + + def _add_error_tags( + self, error: ServiceException, error_tag: ETree.Element, mime_type: str + ) -> None: + """The SQS API stubs is now generated from JSON specs, and some fields have been modified""" + code_tag = ETree.SubElement(error_tag, "Code") + + if error.code in self.JSON_TO_QUERY_ERROR_CODES: + error_code = self.JSON_TO_QUERY_ERROR_CODES[error.code] + elif error.code in self.QUERY_PREFIXED_ERRORS: + error_code = f"AWS.SimpleQueueService.{error.code}" + else: + error_code = error.code + code_tag.text = error_code + message = self._get_error_message(error) + if message: + self._default_serialize(error_tag, message, None, "Message", mime_type) + if error.code in self.SENDER_FAULT_ERRORS or error.sender_fault: + # The sender fault is either not set or "Sender" + self._default_serialize(error_tag, "Sender", None, "Type", mime_type) + + +class SqsJsonResponseSerializer(JSONResponseSerializer): + # those are deleted from the JSON specs, but need to be kept for legacy reason (sent in 'x-amzn-query-error') + QUERY_PREFIXED_ERRORS = { + "BatchEntryIdsNotDistinct", + "BatchRequestTooLong", + "EmptyBatchRequest", + "InvalidBatchEntryId", + "MessageNotInflight", + "PurgeQueueInProgress", + "QueueDeletedRecently", + "TooManyEntriesInBatchRequest", + "UnsupportedOperation", + } + + # Some error code changed between JSON and query, and we need to have a way to map it for legacy reason + JSON_TO_QUERY_ERROR_CODES = { + "InvalidParameterValueException": "InvalidParameterValue", + "MissingRequiredParameterException": "MissingParameter", + "AccessDeniedException": "AccessDenied", + "QueueDoesNotExist": "AWS.SimpleQueueService.NonExistentQueue", + "QueueNameExists": "QueueAlreadyExists", + } + + def _serialize_error( + self, + error: ServiceException, + response: Response, + shape: StructureShape, + operation_model: OperationModel, + mime_type: str, + request_id: str, + ) -> None: + """ + Overrides _serialize_error as SQS has a special header for query API legacy reason: 'x-amzn-query-error', + which contained the exception code as well as a Sender field. + Ex: 'x-amzn-query-error': 'InvalidParameterValue;Sender' + """ + # TODO: for body["__type"] = error.code, it seems AWS differs from what we send for SQS + # AWS: "com.amazon.coral.service#InvalidParameterValueException" + # or AWS: "com.amazonaws.sqs#BatchRequestTooLong" + # LocalStack: "InvalidParameterValue" + super()._serialize_error(error, response, shape, operation_model, mime_type, request_id) + # We need to add a prefix to certain errors, as they have been deleted in the specs. These will not change + if error.code in self.JSON_TO_QUERY_ERROR_CODES: + code = self.JSON_TO_QUERY_ERROR_CODES[error.code] + elif error.code in self.QUERY_PREFIXED_ERRORS: + code = f"AWS.SimpleQueueService.{error.code}" + else: + code = error.code + + response.headers["x-amzn-query-error"] = f"{code};Sender" -def gen_amzn_requestid_long(): - return "".join([random.choice(REQUEST_ID_CHARACTERS) for _ in range(0, 52)]) +def gen_amzn_requestid(): + """ + Generate generic AWS request ID. + 3 uses a different format and set of request Ids. + Examples: + 996d38a0-a4e9-45de-bad4-480cd962d208 + b9260553-df1b-4db6-ae41-97b89a5f85ea + """ + return long_uid() + + +@functools.cache def create_serializer(service: ServiceModel) -> ResponseSerializer: """ Creates the right serializer for the given service model. @@ -1500,7 +1761,10 @@ def create_serializer(service: ServiceModel) -> ResponseSerializer: # specific services as close as possible. # Therefore, the service-specific serializer implementations (basically the implicit / informally more specific # protocol implementation) has precedence over the more general protocol-specific serializers. - service_specific_serializers = {"sqs": SqsResponseSerializer, "s3": S3ResponseSerializer} + service_specific_serializers = { + "sqs": {"json": SqsJsonResponseSerializer, "query": SqsQueryResponseSerializer}, + "s3": {"rest-xml": S3ResponseSerializer}, + } protocol_specific_serializers = { "query": QueryResponseSerializer, "json": JSONResponseSerializer, @@ -1509,15 +1773,20 @@ def create_serializer(service: ServiceModel) -> ResponseSerializer: "ec2": EC2ResponseSerializer, } - # Try to select a service-specific serializer implementation - if service.service_name in service_specific_serializers: - return service_specific_serializers[service.service_name]() + # Try to select a service- and protocol-specific serializer implementation + if ( + service.service_name in service_specific_serializers + and service.protocol in service_specific_serializers[service.service_name] + ): + return service_specific_serializers[service.service_name][service.protocol]() else: # Otherwise, pick the protocol-specific serializer for the protocol of the service return protocol_specific_serializers[service.protocol]() -def aws_response_serializer(service: str, operation: str): +def aws_response_serializer( + service_name: str, operation: str, protocol: Optional[ProtocolName] = None +): """ A decorator for an HTTP route that can serialize return values or exceptions into AWS responses. This can be used to create AWS request handlers in a convenient way. Example usage:: @@ -1533,13 +1802,15 @@ def my_route(request: Request): return ListQueuesResult(QueueUrls=...) # <- object from the SQS API will be serialized - :param service: the AWS service (e.g., "sqs", "lambda") + :param service_name: the AWS service (e.g., "sqs", "lambda") + :param protocol: the protocol of the AWS service to serialize to. If not set (by default) the default protocol + of the service in botocore is used. :param operation: the operation name (e.g., "ReceiveMessage", "ListFunctions") :returns: a decorator """ def _decorate(fn): - service_model = load_service(service) + service_model = load_service(service_name, protocol=protocol) operation_model = service_model.operation_model(operation) serializer = create_serializer(service_model) @@ -1556,6 +1827,10 @@ def _proxy(*args, **kwargs) -> WerkzeugResponse: else: raise ValueError(f"could not find Request in signature of function {fn}") + # TODO: we have no context here + # TODO: maybe try to get the request ID from the headers first before generating a new one + request_id = gen_amzn_requestid() + try: response = fn(*args, **kwargs) @@ -1563,13 +1838,13 @@ def _proxy(*args, **kwargs) -> WerkzeugResponse: return response return serializer.serialize_to_response( - response, - operation_model, - request.headers, + response, operation_model, request.headers, request_id ) except ServiceException as e: - return serializer.serialize_error_to_response(e, operation_model, request.headers) + return serializer.serialize_error_to_response( + e, operation_model, request.headers, request_id + ) except Exception as e: return serializer.serialize_error_to_response( CommonServiceException( @@ -1577,6 +1852,7 @@ def _proxy(*args, **kwargs) -> WerkzeugResponse: ), operation_model, request.headers, + request_id, ) return _proxy diff --git a/localstack-core/localstack/aws/protocol/service_router.py b/localstack-core/localstack/aws/protocol/service_router.py new file mode 100644 index 0000000000000..9ff78708cf27e --- /dev/null +++ b/localstack-core/localstack/aws/protocol/service_router.py @@ -0,0 +1,401 @@ +import logging +from typing import NamedTuple, Optional, Set + +from botocore.model import ServiceModel +from werkzeug.exceptions import RequestEntityTooLarge +from werkzeug.http import parse_dict_header + +from localstack.aws.spec import ( + ServiceCatalog, + ServiceModelIdentifier, + get_service_catalog, +) +from localstack.http import Request +from localstack.services.s3.utils import uses_host_addressing +from localstack.services.sqs.utils import is_sqs_queue_url +from localstack.utils.strings import to_bytes + +LOG = logging.getLogger(__name__) + + +class _ServiceIndicators(NamedTuple): + """ + Encapsulates the different fields that might indicate which service a request is targeting. + + This class does _not_ contain any data which is parsed from the body of the request in order to defer or even avoid + processing the body. + """ + + # AWS service's "signing name" - Contained in the Authorization header + # (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) + signing_name: Optional[str] = None + # Target prefix as defined in the service specs for non-rest protocols - Contained in the X-Amz-Target header + target_prefix: Optional[str] = None + # Targeted operation as defined in the service specs for non-rest protocols - Contained in the X-Amz-Target header + operation: Optional[str] = None + # Host field of the HTTP request + host: Optional[str] = None + # Path of the HTTP request + path: Optional[str] = None + + +def _extract_service_indicators(request: Request) -> _ServiceIndicators: + """Extracts all different fields that might indicate which service a request is targeting.""" + x_amz_target = request.headers.get("x-amz-target") + authorization = request.headers.get("authorization") + + signing_name = None + if authorization: + try: + auth_type, auth_info = authorization.split(None, 1) + auth_type = auth_type.lower().strip() + if auth_type == "aws4-hmac-sha256": + values = parse_dict_header(auth_info) + _, _, _, signing_name, _ = values["Credential"].split("/") + except (ValueError, KeyError): + LOG.debug("auth header could not be parsed for service routing: %s", authorization) + pass + if x_amz_target: + if "." in x_amz_target: + target_prefix, operation = x_amz_target.split(".", 1) + else: + target_prefix = None + operation = x_amz_target + else: + target_prefix, operation = None, None + + return _ServiceIndicators(signing_name, target_prefix, operation, request.host, request.path) + + +signing_name_path_prefix_rules = { + # custom rules based on URI path prefixes that are not easily generalizable + "apigateway": { + "/v2": ServiceModelIdentifier("apigatewayv2"), + }, + "appconfig": { + "/configuration": ServiceModelIdentifier("appconfigdata"), + }, + "bedrock": { + "/guardrail/": ServiceModelIdentifier("bedrock-runtime"), + "/model/": ServiceModelIdentifier("bedrock-runtime"), + "/async-invoke": ServiceModelIdentifier("bedrock-runtime"), + }, + "execute-api": { + "/@connections": ServiceModelIdentifier("apigatewaymanagementapi"), + "/participant": ServiceModelIdentifier("connectparticipant"), + "*": ServiceModelIdentifier("iot"), + }, + "ses": { + "/v2": ServiceModelIdentifier("sesv2"), + "/v1": ServiceModelIdentifier("pinpoint-email"), + }, + "greengrass": { + "/greengrass/v2/": ServiceModelIdentifier("greengrassv2"), + }, + "cloudsearch": { + "/2013-01-01": ServiceModelIdentifier("cloudsearchdomain"), + }, + "s3": {"/v20180820": ServiceModelIdentifier("s3control")}, + "iot1click": { + "/projects": ServiceModelIdentifier("iot1click-projects"), + "/devices": ServiceModelIdentifier("iot1click-devices"), + }, + "es": { + "/2015-01-01": ServiceModelIdentifier("es"), + "/2021-01-01": ServiceModelIdentifier("opensearch"), + }, + "sagemaker": { + "/endpoints": ServiceModelIdentifier("sagemaker-runtime"), + "/human-loops": ServiceModelIdentifier("sagemaker-a2i-runtime"), + }, +} + + +def custom_signing_name_rules(signing_name: str, path: str) -> Optional[ServiceModelIdentifier]: + """ + Rules which are based on the signing name (in the auth header) and the request path. + """ + rules = signing_name_path_prefix_rules.get(signing_name) + + if not rules: + if signing_name == "servicecatalog": + if path == "/": + # servicecatalog uses the protocol json (only uses root-path URIs, i.e. only /) + return ServiceModelIdentifier("servicecatalog") + else: + # servicecatalog-appregistry uses rest-json (only uses non-root-path request URIs) + return ServiceModelIdentifier("servicecatalog-appregistry") + return + + for prefix, service_model_identifier in rules.items(): + if path.startswith(prefix): + return service_model_identifier + + return rules.get("*", ServiceModelIdentifier(signing_name)) + + +def custom_host_addressing_rules(host: str) -> Optional[ServiceModelIdentifier]: + """ + Rules based on the host header of the request, which is typically the data plane of a service. + + Some services are added through a patch in ext. + """ + if ".lambda-url." in host: + return ServiceModelIdentifier("lambda") + + if ".s3-website." in host: + return ServiceModelIdentifier("s3") + + +def custom_path_addressing_rules(path: str) -> Optional[ServiceModelIdentifier]: + """ + Rules which are only based on the request path. + """ + + if is_sqs_queue_url(path): + return ServiceModelIdentifier("sqs", protocol="query") + + if path.startswith("/2015-03-31/functions/"): + return ServiceModelIdentifier("lambda") + + +def legacy_s3_rules(request: Request) -> Optional[ServiceModelIdentifier]: + """ + *Legacy* rules which allow us to fallback to S3 if no other service was matched. + All rules which are implemented here should be removed once we make sure it would not break any use-cases. + """ + + path = request.path + method = request.method + + # TODO The remaining rules here are special S3 rules - needs to be discussed how these should be handled. + # Some are similar to other rules and not that greedy, others are nearly general fallbacks. + stripped = path.strip("/") + if method in ["GET", "HEAD"] and stripped: + # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` + return ServiceModelIdentifier("s3") + + # detect S3 URLs + if stripped and "/" not in stripped: + if method == "PUT": + # assume that this is an S3 PUT bucket request with URL path `/<bucket>` + return ServiceModelIdentifier("s3") + if method == "POST" and "key" in request.values: + # assume that this is an S3 POST request with form parameters or multipart form in the body + return ServiceModelIdentifier("s3") + + # detect S3 requests sent from aws-cli using --no-sign-request option + if "aws-cli/" in str(request.user_agent): + return ServiceModelIdentifier("s3") + + # detect S3 pre-signed URLs (v2 and v4) + values = request.values + if any( + value in values + for value in [ + "AWSAccessKeyId", + "Signature", + "X-Amz-Algorithm", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Expires", + "X-Amz-SignedHeaders", + "X-Amz-Signature", + ] + ): + return ServiceModelIdentifier("s3") + + # S3 delete object requests + if method == "POST" and "delete" in values: + data_bytes = to_bytes(request.data) + if b"<Delete" in data_bytes and b"<Key>" in data_bytes: + return ServiceModelIdentifier("s3") + + # Put Object API can have multiple keys + if stripped.count("/") >= 1 and method == "PUT": + # assume that this is an S3 PUT bucket object request with URL path `/<bucket>/object` + # or `/<bucket>/object/object1/+` + return ServiceModelIdentifier("s3") + + # detect S3 requests with "AWS id:key" Auth headers + auth_header = request.headers.get("Authorization") or "" + if auth_header.startswith("AWS "): + return ServiceModelIdentifier("s3") + + if uses_host_addressing(request.headers): + # Note: This needs to be the last rule (and therefore is not in the host rules), since it is incredibly greedy + return ServiceModelIdentifier("s3") + + +def resolve_conflicts( + candidates: Set[ServiceModelIdentifier], request: Request +) -> ServiceModelIdentifier: + """ + Some service definitions are overlapping to a point where they are _not_ distinguishable at all + (f.e. ``DescribeEndpints`` in timestream-query and timestream-write). + These conflicts need to be resolved manually. + """ + service_name_candidates = {service.name for service in candidates} + if service_name_candidates == {"timestream-query", "timestream-write"}: + return ServiceModelIdentifier("timestream-query") + if service_name_candidates == {"docdb", "neptune", "rds"}: + return ServiceModelIdentifier("rds") + if service_name_candidates == {"sqs"}: + # SQS now have 2 different specs for `query` and `json` protocol. From our current implementation with the + # parser and serializer, we need to have 2 different service names for them, but they share one provider + # implementation. `sqs` represents the `json` protocol spec, and `sqs-query` the `query` protocol + # (default again in botocore starting with 1.32.6). + # The `application/x-amz-json-1.0` header is mandatory for requests targeting SQS with the `json` protocol. We + # can safely route them to the `sqs` JSON parser/serializer. If not present, route the request to the + # sqs-query protocol. + content_type = request.headers.get("Content-Type") + return ( + ServiceModelIdentifier("sqs") + if content_type == "application/x-amz-json-1.0" + else ServiceModelIdentifier("sqs", "query") + ) + + +def determine_aws_service_model_for_data_plane( + request: Request, services: ServiceCatalog = None +) -> Optional[ServiceModel]: + """ + A stripped down version of ``determine_aws_service_model`` which only checks hostname indicators for + the AWS data plane, such as s3 websites, lambda function URLs, or API gateway routes. + """ + custom_host_match = custom_host_addressing_rules(request.host) + if custom_host_match: + services = services or get_service_catalog() + return services.get(*custom_host_match) + + +def determine_aws_service_model( + request: Request, services: ServiceCatalog = None +) -> Optional[ServiceModel]: + """ + Tries to determine the name of the AWS service an incoming request is targeting. + :param request: to determine the target service name of + :param services: service catalog (can be handed in for caching purposes) + :return: service name string (or None if the targeting service could not be determined exactly) + """ + services = services or get_service_catalog() + signing_name, target_prefix, operation, host, path = _extract_service_indicators(request) + candidates = set() + + # 1. check the signing names + if signing_name: + signing_name_candidates = services.by_signing_name(signing_name) + if len(signing_name_candidates) == 1: + # a unique signing-name -> service name mapping is the case for ~75% of service operations + return services.get(*signing_name_candidates[0]) + + # try to find a match with the custom signing name rules + custom_match = custom_signing_name_rules(signing_name, path) + if custom_match: + return services.get(*custom_match) + + # still ambiguous - add the services to the list of candidates + candidates.update(signing_name_candidates) + + # 2. check the target prefix + if target_prefix and operation: + target_candidates = services.by_target_prefix(target_prefix) + if len(target_candidates) == 1: + # a unique target prefix + return services.get(*target_candidates[0]) + + # still ambiguous - add the services to the list of candidates + candidates.update(target_candidates) + + # exclude services where the operation is not contained in the service spec + for service_identifier in list(candidates): + service = services.get(*service_identifier) + if operation not in service.operation_names: + candidates.remove(service_identifier) + else: + # exclude services which have a target prefix (the current request does not have one) + for service_identifier in list(candidates): + service = services.get(*service_identifier) + if service.metadata.get("targetPrefix") is not None: + candidates.remove(service_identifier) + + if len(candidates) == 1: + service_identifier = candidates.pop() + return services.get(*service_identifier) + + # 3. check the path if it is set and not a trivial root path + if path and path != "/": + # try to find a match with the custom path rules + custom_path_match = custom_path_addressing_rules(path) + if custom_path_match: + return services.get(*custom_path_match) + + # 4. check the host (custom host addressing rules) + if host: + # iterate over the service spec's endpoint prefix + for prefix, services_per_prefix in services.endpoint_prefix_index.items(): + # this prevents a virtual host addressed bucket to be wrongly recognized + if host.startswith(f"{prefix}.") and ".s3." not in host: + if len(services_per_prefix) == 1: + return services.get(*services_per_prefix[0]) + candidates.update(services_per_prefix) + + custom_host_match = custom_host_addressing_rules(host) + if custom_host_match: + return services.get(*custom_host_match) + + if request.shallow: + # from here on we would need access to the request body, which doesn't exist for shallow requests like + # WebsocketRequests. + return None + + # 5. check the query / form-data + try: + values = request.values + if "Action" in values: + # query / ec2 protocol requests always have an action and a version (the action is more significant) + query_candidates = [ + service + for service in services.by_operation(values["Action"]) + if service.protocol in ("ec2", "query") + ] + + if len(query_candidates) == 1: + return services.get(*query_candidates[0]) + + if "Version" in values: + for service_identifier in list(query_candidates): + service_model = services.get(*service_identifier) + if values["Version"] != service_model.api_version: + # the combination of Version and Action is not unique, add matches to the candidates + query_candidates.remove(service_identifier) + + if len(query_candidates) == 1: + return services.get(*query_candidates[0]) + + candidates.update(query_candidates) + + except RequestEntityTooLarge: + # Some requests can be form-urlencoded but also contain binary data, which will fail the form parsing (S3 can + # do this). In that case, skip this step and continue to try to determine the service name. The exception is + # RequestEntityTooLarge even if the error is due to failed decoding. + LOG.debug( + "Failed to determine AWS service from request body because the form could not be parsed", + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + # 6. resolve service spec conflicts + resolved_conflict = resolve_conflicts(candidates, request) + if resolved_conflict: + return services.get(*resolved_conflict) + + # 7. check the legacy S3 rules in the end + legacy_match = legacy_s3_rules(request) + if legacy_match: + return services.get(*legacy_match) + + if signing_name: + return services.get(name=signing_name) + if candidates: + return services.get(*candidates.pop()) + return None diff --git a/localstack/aws/protocol/validate.py b/localstack-core/localstack/aws/protocol/validate.py similarity index 98% rename from localstack/aws/protocol/validate.py rename to localstack-core/localstack/aws/protocol/validate.py index c462833aa065b..30d1be4355fb0 100644 --- a/localstack/aws/protocol/validate.py +++ b/localstack-core/localstack/aws/protocol/validate.py @@ -1,4 +1,5 @@ """Slightly extends the ``botocore.validate`` package to provide better integration with our parser/serializer.""" + from typing import Any, Dict, List, NamedTuple from botocore.model import OperationModel, Shape @@ -15,7 +16,7 @@ class Error(NamedTuple): Attributes: reason The error type - name The name of the parameter the error occured at + name The name of the parameter the error occurred at attributes Error type-specific attributes """ @@ -25,7 +26,6 @@ class Error(NamedTuple): class ParameterValidationError(Exception): - error: Error def __init__(self, error: Error) -> None: diff --git a/localstack/aws/scaffold.py b/localstack-core/localstack/aws/scaffold.py similarity index 91% rename from localstack/aws/scaffold.py rename to localstack-core/localstack/aws/scaffold.py index 6a9511b2161a2..3d9c0e3e55db4 100644 --- a/localstack/aws/scaffold.py +++ b/localstack-core/localstack/aws/scaffold.py @@ -60,6 +60,7 @@ def html_to_rst(html: str): doc = doc.replace("\_", "_") # noqa: W605 doc = doc.replace("\|", "|") # noqa: W605 doc = doc.replace("\ ", " ") # noqa: W605 + doc = doc.replace("\\", "\\\\") # noqa: W605 rst = doc.strip() return rst @@ -141,11 +142,11 @@ def dependencies(self) -> List[str]: def _print_structure_declaration(self, output, doc=True, quote_types=False): if self.is_exception: - self._print_as_class(output, "ServiceException", doc) + self._print_as_class(output, "ServiceException", doc, quote_types) return if any(map(is_keyword, self.shape.members.keys())): - self._print_as_typed_dict(output) + self._print_as_typed_dict(output, doc, quote_types) return if self.is_request: @@ -166,8 +167,8 @@ def _print_as_class(self, output, base: str, doc=True, quote_types=False): if self.is_exception: error_spec = self.shape.metadata.get("error", {}) output.write(f' code: str = "{error_spec.get("code", self.shape.name)}"\n') - output.write(f' sender_fault: bool = {error_spec.get("senderFault", False)}\n') - output.write(f' status_code: int = {error_spec.get("httpStatusCode", 400)}\n') + output.write(f" sender_fault: bool = {error_spec.get('senderFault', False)}\n") + output.write(f" status_code: int = {error_spec.get('httpStatusCode', 400)}\n") elif not self.shape.members: output.write(" pass\n") @@ -221,19 +222,22 @@ def _print_as_class(self, output, base: str, doc=True, quote_types=False): def _print_as_typed_dict(self, output, doc=True, quote_types=False): name = to_valid_python_name(self.shape.name) - q = '"' if quote_types else "" output.write('%s = TypedDict("%s", {\n' % (name, name)) for k, v in self.shape.members.items(): + member_name = to_valid_python_name(v.name) + # check if the member name is the same as the type name (recursive types need to use forward references) + recursive_type = name == member_name + q = '"' if quote_types or recursive_type else "" if k in self.shape.required_members: if v.serialization.get("eventstream"): - output.write(f' "{k}": Iterator[{q}{to_valid_python_name(v.name)}{q}],\n') + output.write(f' "{k}": Iterator[{q}{member_name}{q}],\n') else: - output.write(f' "{k}": {q}{to_valid_python_name(v.name)}{q},\n') + output.write(f' "{k}": {q}{member_name}{q},\n') else: if v.serialization.get("eventstream"): - output.write(f' "{k}": Iterator[{q}{to_valid_python_name(v.name)}{q}],\n') + output.write(f' "{k}": Iterator[{q}{member_name}{q}],\n') else: - output.write(f' "{k}": Optional[{q}{to_valid_python_name(v.name)}{q}],\n') + output.write(f' "{k}": Optional[{q}{member_name}{q}],\n') output.write("}, total=False)") def print_shape_doc(self, output, shape): @@ -261,7 +265,7 @@ def print_declaration(self, output, doc=True, quote_types=False): ) elif isinstance(shape, StringShape): if shape.enum: - output.write(f"class {to_valid_python_name(shape.name)}(str):\n") + output.write(f"class {to_valid_python_name(shape.name)}(StrEnum):\n") for value in shape.enum: name = to_valid_python_name(value) output.write(f' {name} = "{value}"\n') @@ -311,13 +315,11 @@ def get_order(self): def generate_service_types(output, service: ServiceModel, doc=True): - output.write("import sys\n") - output.write("from typing import Dict, List, Optional, Iterator, Iterable, IO, Union\n") output.write("from datetime import datetime\n") - output.write("if sys.version_info >= (3, 8):\n") - output.write(" from typing import TypedDict\n") - output.write("else:\n") - output.write(" from typing_extensions import TypedDict\n") + output.write("from enum import StrEnum\n") + output.write( + "from typing import Dict, List, Optional, Iterator, Iterable, IO, Union, TypedDict\n" + ) output.write("\n") output.write( "from localstack.aws.api import handler, RequestContext, ServiceException, ServiceRequest" @@ -409,7 +411,7 @@ def generate_service_api(output, service: ServiceModel, doc=True): type_name = to_valid_python_name(m_shape.name) if m == streaming_payload_member: type_name = f"IO[{type_name}]" - parameters[xform_name(m)] = f"{type_name} = None" + parameters[xform_name(m)] = f"{type_name} | None = None" if any(map(is_bad_param_name, parameters.keys())): # if we cannot render the parameter name, don't expand the parameters in the handler @@ -419,6 +421,12 @@ def generate_service_api(output, service: ServiceModel, doc=True): param_list = ", ".join([f"{k}: {v}" for k, v in parameters.items()]) output.write(f' @handler("{operation.name}")\n') + # add the **kwargs in the end + if param_list: + param_list += ", **kwargs" + else: + param_list = "**kwargs" + output.write( f" def {fn_name}(self, context: RequestContext, {param_list}) -> {output_shape}:\n" ) @@ -465,7 +473,9 @@ def scaffold(): help="whether or not to save the result into the api directory", ) @click.option( - "--path", default="./localstack/aws/api", help="the path where the api should be saved" + "--path", + default="./localstack-core/localstack/aws/api", + help="the path where the api should be saved", ) def generate(service: str, doc: bool, save: bool, path: str): """ @@ -495,26 +505,7 @@ def generate_code(service_name: str, doc: bool = False) -> str: output = io.StringIO() generate_service_types(output, model, doc=doc) generate_service_api(output, model, doc=doc) - - code = output.getvalue() - - try: - import autoflake - import isort - from black import FileMode, format_str - - # try to remove unused imports - code = autoflake.fix_code(code, remove_all_unused_imports=True) - - # try to format with black - code = format_str(code, mode=FileMode(line_length=100)) - - # try to sort imports - code = isort.code(code, config=isort.Config(profile="black", line_length=100)) - except Exception: - pass - - return code + return output.getvalue() def create_code_directory(service_name: str, code: str, base_path: str): @@ -537,7 +528,7 @@ def create_code_directory(service_name: str, code: str, base_path: str): @click.option("--doc/--no-doc", default=False, help="whether or not to generate docstrings") @click.option( "--path", - default="./localstack/aws/api", + default="./localstack-core/localstack/aws/api", help="the path in which to upgrade ASF APIs", ) def upgrade(path: str, doc: bool = False): diff --git a/localstack/aws/serving/__init__.py b/localstack-core/localstack/aws/serving/__init__.py similarity index 100% rename from localstack/aws/serving/__init__.py rename to localstack-core/localstack/aws/serving/__init__.py diff --git a/localstack-core/localstack/aws/serving/asgi.py b/localstack-core/localstack/aws/serving/asgi.py new file mode 100644 index 0000000000000..3bbeefd49944f --- /dev/null +++ b/localstack-core/localstack/aws/serving/asgi.py @@ -0,0 +1,5 @@ +from rolo.gateway.asgi import AsgiGateway + +__all__ = [ + "AsgiGateway", +] diff --git a/localstack-core/localstack/aws/serving/edge.py b/localstack-core/localstack/aws/serving/edge.py new file mode 100644 index 0000000000000..0e204a4d96f88 --- /dev/null +++ b/localstack-core/localstack/aws/serving/edge.py @@ -0,0 +1,119 @@ +import logging +import threading +from typing import List + +from rolo.gateway.wsgi import WsgiGateway + +from localstack import config +from localstack.aws.app import LocalstackAwsGateway +from localstack.config import HostAndPort +from localstack.runtime import get_current_runtime +from localstack.runtime.shutdown import ON_AFTER_SERVICE_SHUTDOWN_HANDLERS +from localstack.utils.collections import ensure_list + +LOG = logging.getLogger(__name__) + + +def serve_gateway( + listen: HostAndPort | List[HostAndPort], use_ssl: bool, asynchronous: bool = False +): + """ + Implementation of the edge.do_start_edge_proxy interface to start a Hypercorn server instance serving the + LocalstackAwsGateway. + """ + + gateway = get_current_runtime().components.gateway + + listens = ensure_list(listen) + + if config.GATEWAY_SERVER == "hypercorn": + return _serve_hypercorn(gateway, listens, use_ssl, asynchronous) + elif config.GATEWAY_SERVER == "werkzeug": + return _serve_werkzeug(gateway, listens, use_ssl, asynchronous) + elif config.GATEWAY_SERVER == "twisted": + return _serve_twisted(gateway, listens, use_ssl, asynchronous) + else: + raise ValueError(f"Unknown gateway server type {config.GATEWAY_SERVER}") + + +def _serve_werkzeug( + gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool +): + from werkzeug.serving import ThreadedWSGIServer + + from .werkzeug import CustomWSGIRequestHandler + + params = { + "app": WsgiGateway(gateway), + "handler": CustomWSGIRequestHandler, + } + + if use_ssl: + from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available + + install_predefined_cert_if_available() + serial_number = listen[0].port + _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number) + params["ssl_context"] = (cert_file_name, key_file_name) + + threads = [] + servers: List[ThreadedWSGIServer] = [] + + for host_port in listen: + kwargs = dict(params) + kwargs["host"] = host_port.host + kwargs["port"] = host_port.port + server = ThreadedWSGIServer(**kwargs) + servers.append(server) + threads.append( + threading.Thread( + target=server.serve_forever, name=f"werkzeug-server-{host_port.port}", daemon=True + ) + ) + + def _shutdown_servers(): + LOG.debug("[shutdown] Shutting down gateway servers") + for _srv in servers: + _srv.shutdown() + + ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_servers) + + for thread in threads: + thread.start() + + if not asynchronous: + for thread in threads: + return thread.join() + + # FIXME: thread handling is a bit wonky + return threads[0] + + +def _serve_hypercorn( + gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool +): + from localstack.http.hypercorn import GatewayServer + + # start serving gateway + server = GatewayServer(gateway, listen, use_ssl, config.GATEWAY_WORKER_COUNT) + server.start() + + # with the current way the infrastructure is started, this is the easiest way to shut down the server correctly + # FIXME: but the infrastructure shutdown should be much cleaner, core components like the gateway should be handled + # explicitly by the thing starting the components, not implicitly by the components. + def _shutdown_gateway(): + LOG.debug("[shutdown] Shutting down gateway server") + server.shutdown() + + ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_gateway) + if not asynchronous: + server.join() + return server._thread + + +def _serve_twisted( + gateway: LocalstackAwsGateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool +): + from .twisted import serve_gateway + + return serve_gateway(gateway, listen, use_ssl, asynchronous) diff --git a/localstack-core/localstack/aws/serving/hypercorn.py b/localstack-core/localstack/aws/serving/hypercorn.py new file mode 100644 index 0000000000000..450d2664badc9 --- /dev/null +++ b/localstack-core/localstack/aws/serving/hypercorn.py @@ -0,0 +1,47 @@ +import asyncio +from typing import Any, Optional, Tuple + +from hypercorn import Config +from hypercorn.asyncio import serve as serve_hypercorn + +from localstack import constants + +from ..gateway import Gateway +from .asgi import AsgiGateway + + +def serve( + gateway: Gateway, + host: str = "localhost", + port: int = constants.DEFAULT_PORT_EDGE, + use_reloader: bool = True, + ssl_creds: Optional[Tuple[Any, Any]] = None, + **kwargs, +) -> None: + """ + Serve the given Gateway through a hypercorn server and block until it is completed. + + :param gateway: the Gateway instance to serve + :param host: the host to expose the server on + :param port: the port to expose the server on + :param use_reloader: whether to use the reloader + :param ssl_creds: the ssl credentials (tuple of certfile and keyfile) + :param kwargs: any oder parameters that can be passed to the hypercorn.Config object + """ + config = Config() + config.h11_pass_raw_headers = True + config.bind = f"{host}:{port}" + config.use_reloader = use_reloader + + if ssl_creds: + cert_file_name, key_file_name = ssl_creds + if cert_file_name: + kwargs["certfile"] = cert_file_name + if key_file_name: + kwargs["keyfile"] = key_file_name + + for k, v in kwargs.items(): + setattr(config, k, v) + + loop = asyncio.new_event_loop() + loop.run_until_complete(serve_hypercorn(AsgiGateway(gateway, event_loop=loop), config)) diff --git a/localstack-core/localstack/aws/serving/twisted.py b/localstack-core/localstack/aws/serving/twisted.py new file mode 100644 index 0000000000000..549150a73ae61 --- /dev/null +++ b/localstack-core/localstack/aws/serving/twisted.py @@ -0,0 +1,173 @@ +""" +Bindings to serve LocalStack using twisted. +""" + +import logging +import time +from typing import List + +from rolo.gateway import Gateway +from rolo.serving.twisted import TwistedGateway +from twisted.internet import endpoints, interfaces, reactor, ssl +from twisted.protocols.policies import ProtocolWrapper, WrappingFactory +from twisted.protocols.tls import BufferingTLSTransport, TLSMemoryBIOFactory +from twisted.python.threadpool import ThreadPool + +from localstack import config +from localstack.config import HostAndPort +from localstack.runtime.shutdown import ON_AFTER_SERVICE_SHUTDOWN_HANDLERS +from localstack.utils.patch import patch +from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available +from localstack.utils.threads import start_worker_thread + +LOG = logging.getLogger(__name__) + + +class TLSMultiplexer(ProtocolWrapper): + """ + Custom protocol to multiplex HTTPS and HTTP connections over the same port. This is the equivalent of + ``DuplexSocket``, but since twisted use its own SSL layer and doesn't use `ssl.SSLSocket``, we need to implement + the multiplexing behavior in the Twisted layer. + + The basic idea is to defer the ``makeConnection`` call until the first data are received, and then re-configure + the underlying ``wrappedProtocol`` if needed with a TLS wrapper. + """ + + tlsProtocol = BufferingTLSTransport + + def __init__( + self, + factory: "WrappingFactory", + wrappedProtocol: interfaces.IProtocol, + ): + super().__init__(factory, wrappedProtocol) + self._isInitialized = False + self._isTLS = None + self._negotiatedProtocol = None + + def makeConnection(self, transport): + self.connected = 1 + self.transport = transport + self.factory.registerProtocol(self) # this is idempotent + # we defer the actual makeConnection call to the first invocation of dataReceived + + def dataReceived(self, data: bytes) -> None: + if self._isInitialized: + super().dataReceived(data) + return + + # once the first data have been received, we can check whether it's a TLS handshake, then we need to run the + # actual makeConnection procedure. + self._isInitialized = True + self._isTLS = data[0] == 22 # 0x16 is the marker byte identifying a TLS handshake + + if self._isTLS: + # wrap protocol again in tls protocol + self.wrappedProtocol = self.tlsProtocol(self.factory, self.wrappedProtocol) + else: + if data.startswith(b"PRI * HTTP/2"): + # TODO: can we do proper protocol negotiation like in ALPN? + # in the TLS case, this is determined by the ALPN procedure by OpenSSL. + self._negotiatedProtocol = b"h2" + + # now that we've set the real wrapped protocol, run the make connection procedure + super().makeConnection(self.transport) + super().dataReceived(data) + + @property + def negotiatedProtocol(self) -> str | None: + if self._negotiatedProtocol: + return self._negotiatedProtocol + return self.wrappedProtocol.negotiatedProtocol + + +class TLSMultiplexerFactory(TLSMemoryBIOFactory): + protocol = TLSMultiplexer + + +def stop_thread_pool(self: ThreadPool, stop, timeout: float = None): + """ + Patch for a custom shutdown procedure for a ThreadPool that waits a given amount of time for all threads. + + :param self: the pool to shut down + :param stop: the original function + :param timeout: the maximum amount of time to wait + """ + # copied from ThreadPool.stop() + if self.joined: + return + if not timeout: + stop() + return + + self.joined = True + self.started = False + self._team.quit() + + # our own joining logic with timeout + remaining = timeout + total_waited = 0 + + for thread in self.threads: + then = time.time() + + # LOG.info("[shutdown] Joining thread %s", thread) + thread.join(remaining) + + waited = time.time() - then + total_waited += waited + remaining -= waited + + if thread.is_alive(): + LOG.warning( + "[shutdown] Request thread %s still alive after %.2f seconds", + thread, + total_waited, + ) + + if remaining <= 0: + remaining = 0 + + +def serve_gateway( + gateway: Gateway, listen: List[HostAndPort], use_ssl: bool, asynchronous: bool = False +): + """ + Serve a Gateway instance using twisted. + """ + # setup reactor + reactor.suggestThreadPoolSize(config.GATEWAY_WORKER_COUNT) + thread_pool = reactor.getThreadPool() + patch(thread_pool.stop)(stop_thread_pool) + + def _shutdown_reactor(): + LOG.debug("[shutdown] Shutting down twisted reactor serving the gateway") + thread_pool.stop(timeout=10) + reactor.stop() + + ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.register(_shutdown_reactor) + + # setup twisted webserver Site + site = TwistedGateway(gateway) + + # configure ssl + if use_ssl: + install_predefined_cert_if_available() + serial_number = listen[0].port + _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number) + context_factory = ssl.DefaultOpenSSLContextFactory(key_file_name, cert_file_name) + context_factory.getContext().use_certificate_chain_file(cert_file_name) + protocol_factory = TLSMultiplexerFactory(context_factory, False, site) + else: + protocol_factory = site + + # add endpoint for each host/port combination + for host_and_port in listen: + # TODO: interface = host? + endpoint = endpoints.TCP4ServerEndpoint(reactor, host_and_port.port) + endpoint.listen(protocol_factory) + + if asynchronous: + return start_worker_thread(reactor.run) + else: + return reactor.run() diff --git a/localstack-core/localstack/aws/serving/werkzeug.py b/localstack-core/localstack/aws/serving/werkzeug.py new file mode 100644 index 0000000000000..22e351adc4842 --- /dev/null +++ b/localstack-core/localstack/aws/serving/werkzeug.py @@ -0,0 +1,58 @@ +import ssl +from typing import TYPE_CHECKING, Any, Optional, Tuple + +from rolo.gateway import Gateway +from rolo.gateway.wsgi import WsgiGateway +from werkzeug import run_simple +from werkzeug.serving import WSGIRequestHandler + +if TYPE_CHECKING: + from _typeshed.wsgi import WSGIEnvironment + +from localstack import constants + + +def serve( + gateway: Gateway, + host: str = "localhost", + port: int = constants.DEFAULT_PORT_EDGE, + use_reloader: bool = True, + ssl_creds: Optional[Tuple[Any, Any]] = None, + **kwargs, +) -> None: + """ + Serve a Gateway as a WSGI application through werkzeug. This is mostly for development purposes. + + :param gateway: the Gateway to serve + :param host: the host to expose the server to + :param port: the port to expose the server to + :param use_reloader: whether to autoreload the server on changes + :param kwargs: any other arguments that can be passed to `werkzeug.run_simple` + """ + kwargs["threaded"] = kwargs.get("threaded", True) # make sure requests don't block + kwargs["ssl_context"] = ssl_creds + kwargs.setdefault("request_handler", CustomWSGIRequestHandler) + run_simple(host, port, WsgiGateway(gateway), use_reloader=use_reloader, **kwargs) + + +class CustomWSGIRequestHandler(WSGIRequestHandler): + def make_environ(self) -> "WSGIEnvironment": + environ = super().make_environ() + + # restore RAW_URI from the requestline will be something like ``GET //foo/?foo=bar%20ed HTTP/1.1`` + environ["RAW_URI"] = " ".join(self.requestline.split(" ")[1:-1]) + + # restore raw headers for rolo + environ["asgi.headers"] = [ + (k.encode("latin-1"), v.encode("latin-1")) for k, v in self.headers.raw_items() + ] + + # the default WSGIRequestHandler does not understand our DuplexSocket, so it will always set https, which we + # correct here + try: + is_ssl = isinstance(self.request, ssl.SSLSocket) + except AttributeError: + is_ssl = False + environ["wsgi.url_scheme"] = "https" if is_ssl else "http" + + return environ diff --git a/localstack-core/localstack/aws/serving/wsgi.py b/localstack-core/localstack/aws/serving/wsgi.py new file mode 100644 index 0000000000000..8ae26b3d8c9df --- /dev/null +++ b/localstack-core/localstack/aws/serving/wsgi.py @@ -0,0 +1,5 @@ +from rolo.gateway.wsgi import WsgiGateway + +__all__ = [ + "WsgiGateway", +] diff --git a/localstack/aws/skeleton.py b/localstack-core/localstack/aws/skeleton.py similarity index 75% rename from localstack/aws/skeleton.py rename to localstack-core/localstack/aws/skeleton.py index 33dcfade62124..9d66fa4b375c1 100644 --- a/localstack/aws/skeleton.py +++ b/localstack-core/localstack/aws/skeleton.py @@ -7,15 +7,16 @@ from localstack.aws.api import ( CommonServiceException, - HttpResponse, RequestContext, ServiceException, ) from localstack.aws.api.core import ServiceRequest, ServiceRequestHandler, ServiceResponse from localstack.aws.protocol.parser import create_parser -from localstack.aws.protocol.serializer import create_serializer +from localstack.aws.protocol.serializer import ResponseSerializer, create_serializer from localstack.aws.spec import load_service +from localstack.http import Response from localstack.utils import analytics +from localstack.utils.coverage_docs import get_coverage_link_for_service LOG = logging.getLogger(__name__) @@ -123,21 +124,21 @@ class Skeleton: def __init__(self, service: ServiceModel, implementation: Union[Any, DispatchTable]): self.service = service - self.parser = create_parser(service) - self.serializer = create_serializer(service) if isinstance(implementation, dict): self.dispatch_table = implementation else: self.dispatch_table = create_dispatch_table(implementation) - def invoke(self, context: RequestContext) -> HttpResponse: + def invoke(self, context: RequestContext) -> Response: + serializer = create_serializer(context.service) + if context.operation and context.service_request: # if the parsed request is already set in the context, re-use them operation, instance = context.operation, context.service_request else: # otherwise, parse the incoming HTTPRequest - operation, instance = self.parser.parse(context.request) + operation, instance = create_parser(context.service).parse(context.request) context.operation = operation try: @@ -150,13 +151,15 @@ def invoke(self, context: RequestContext) -> HttpResponse: ) raise NotImplementedError - return self.dispatch_request(context, instance) + return self.dispatch_request(serializer, context, instance) except ServiceException as e: - return self.on_service_exception(context, e) - except NotImplementedError: - return self.on_not_implemented_error(context) + return self.on_service_exception(serializer, context, e) + except NotImplementedError as e: + return self.on_not_implemented_error(serializer, context, e) - def dispatch_request(self, context: RequestContext, instance: ServiceRequest) -> HttpResponse: + def dispatch_request( + self, serializer: ResponseSerializer, context: RequestContext, instance: ServiceRequest + ) -> Response: operation = context.operation handler = self.dispatch_table[operation.name] @@ -165,46 +168,53 @@ def dispatch_request(self, context: RequestContext, instance: ServiceRequest) -> result = handler(context, instance) or {} # if the service handler returned an HTTP request, forego serialization and return immediately - if isinstance(result, HttpResponse): + if isinstance(result, Response): return result context.service_response = result - # Serialize result dict to an HTTPResponse and return it - return self.serializer.serialize_to_response(result, operation, context.request.headers) + # Serialize result dict to a Response and return it + return serializer.serialize_to_response( + result, operation, context.request.headers, context.request_id + ) def on_service_exception( - self, context: RequestContext, exception: ServiceException - ) -> HttpResponse: + self, serializer: ResponseSerializer, context: RequestContext, exception: ServiceException + ) -> Response: """ Called by invoke if the handler of the operation raised a ServiceException. + :param serializer: serializer which should be used to serialize the exception :param context: the request context :param exception: the exception that was raised - :return: an HttpResponse object + :return: a Response object """ context.service_exception = exception - return self.serializer.serialize_error_to_response( - exception, context.operation, context.request.headers + return serializer.serialize_error_to_response( + exception, context.operation, context.request.headers, context.request_id ) - def on_not_implemented_error(self, context: RequestContext) -> HttpResponse: + def on_not_implemented_error( + self, + serializer: ResponseSerializer, + context: RequestContext, + exception: NotImplementedError, + ) -> Response: """ Called by invoke if either the dispatch table did not contain an entry for the operation, or the service provider raised a NotImplementedError + :param serializer: the serialzier which should be used to serialize the NotImplementedError :param context: the request context - :return: an HttpResponse object + :param exception: the NotImplementedError that was raised + :return: a Response object """ operation = context.operation - serializer = self.serializer action_name = operation.name service_name = operation.service_model.service_name - message = ( - f"API action '{action_name}' for service '{service_name}' not yet implemented or pro feature" - f" - check https://docs.localstack.cloud/user-guide/aws/feature-coverage for further information" - ) + exception_message: str | None = exception.args[0] if exception.args else None + message = exception_message or get_coverage_link_for_service(service_name, action_name) LOG.info(message) error = CommonServiceException("InternalFailure", message, status_code=501) # record event @@ -213,4 +223,6 @@ def on_not_implemented_error(self, context: RequestContext) -> HttpResponse: ) context.service_exception = error - return serializer.serialize_error_to_response(error, operation, context.request.headers) + return serializer.serialize_error_to_response( + error, operation, context.request.headers, context.request_id + ) diff --git a/localstack-core/localstack/aws/spec-patches.json b/localstack-core/localstack/aws/spec-patches.json new file mode 100644 index 0000000000000..37cc8a5c27001 --- /dev/null +++ b/localstack-core/localstack/aws/spec-patches.json @@ -0,0 +1,1356 @@ +{ + "s3/2006-03-01/service-2": [ + { + "op": "add", + "path": "/shapes/NoSuchBucket/members/BucketName", + "value": { + "shape": "BucketName" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchBucket/error", + "value": { + "httpStatusCode": 404 + } + }, + { + "op": "add", + "path": "/shapes/NoSuchLifecycleConfiguration", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The lifecycle configuration does not exist</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidBucketName", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The specified bucket is not valid.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/BucketRegion", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/BucketContentType", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/HeadBucketOutput", + "value": { + "type": "structure", + "members": { + "BucketRegion": { + "shape": "BucketRegion", + "location": "header", + "locationName": "x-amz-bucket-region" + }, + "BucketContentType": { + "shape": "BucketContentType", + "location": "header", + "locationName": "content-type" + } + } + } + }, + { + "op": "add", + "path": "/operations/HeadBucket/output", + "value": { + "shape": "HeadBucketOutput" + } + }, + { + "op": "add", + "path": "/operations/PutBucketPolicy/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/shapes/GetBucketLocationOutput/payload", + "value": "LocationConstraint" + }, + { + "op": "add", + "path": "/shapes/BucketAlreadyOwnedByYou/members/BucketName", + "value": { + "shape": "BucketName" + } + }, + { + "op": "add", + "path": "/shapes/BucketAlreadyOwnedByYou/error", + "value": { + "httpStatusCode": 409 + } + }, + { + "op": "add", + "path": "/shapes/GetObjectOutput/members/StatusCode", + "value": { + "shape": "GetObjectResponseStatusCode", + "location": "statusCode" + } + }, + { + "op": "add", + "path": "/shapes/HeadObjectOutput/members/StatusCode", + "value": { + "shape": "GetObjectResponseStatusCode", + "location": "statusCode" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchKey/members/Key", + "value": { + "shape": "ObjectKey" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchKey/error", + "value": { + "httpStatusCode": 404 + } + }, + { + "op": "add", + "path": "/shapes/NoSuchKey/members/DeleteMarker", + "value": { + "shape": "DeleteMarker", + "location": "header", + "locationName": "x-amz-delete-marker" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchKey/members/VersionId", + "value": { + "shape": "ObjectVersionId", + "location": "header", + "locationName": "x-amz-version-id" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchVersion", + "value": { + "type": "structure", + "members": { + "VersionId": { + "shape": "ObjectVersionId" + }, + "Key": { + "shape": "ObjectKey" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p></p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/PreconditionFailed", + "value": { + "type": "structure", + "members": { + "Condition": { + "shape": "IfCondition" + } + }, + "error": { + "httpStatusCode": 412 + }, + "documentation": "<p>At least one of the pre-conditions you specified did not hold</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/IfCondition", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/InvalidRange", + "value": { + "type": "structure", + "members": { + "ActualObjectSize": { + "shape": "ObjectSize" + }, + "RangeRequested": { + "shape": "ContentRange" + } + }, + "error": { + "httpStatusCode": 416 + }, + "documentation": "<p>The requested range is not satisfiable</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/HeadObjectOutput/members/Expires", + "value": { + "shape": "Expires", + "documentation": "<p>The date and time at which the object is no longer cacheable.</p>", + "location": "header", + "locationName": "expires" + } + }, + { + "op": "add", + "path": "/shapes/GetObjectOutput/members/Expires", + "value": { + "shape": "Expires", + "documentation": "<p>The date and time at which the object is no longer cacheable.</p>", + "location": "header", + "locationName": "expires" + } + }, + { + "op": "add", + "path": "/shapes/RestoreObjectOutputStatusCode", + "value": { + "type": "integer" + } + }, + { + "op": "add", + "path": "/shapes/RestoreObjectOutput/members/StatusCode", + "value": { + "shape": "RestoreObjectOutputStatusCode", + "location": "statusCode" + } + }, + { + "op": "add", + "path": "/shapes/InvalidArgument", + "value": { + "type": "structure", + "members": { + "ArgumentName": { + "shape": "ArgumentName" + }, + "ArgumentValue": { + "shape": "ArgumentValue" + }, + "HostId": { + "shape": "HostId" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>Invalid Argument</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/ArgumentName", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/ArgumentValue", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/SignatureDoesNotMatch", + "value": { + "type": "structure", + "members": { + "AWSAccessKeyId": { + "shape": "AWSAccessKeyId" + }, + "CanonicalRequest": { + "shape": "CanonicalRequest" + }, + "CanonicalRequestBytes": { + "shape": "CanonicalRequestBytes" + }, + "HostId": { + "shape": "HostId" + }, + "SignatureProvided": { + "shape": "SignatureProvided" + }, + "StringToSign": { + "shape": "StringToSign" + }, + "StringToSignBytes": { + "shape": "StringToSignBytes" + } + }, + "error": { + "httpStatusCode": 403 + }, + "documentation": "<p>The request signature we calculated does not match the signature you provided. Check your key and signing method.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/AccessDenied", + "value": { + "type": "structure", + "members": { + "Expires": { + "shape": "Expires" + }, + "ServerTime": { + "shape": "ServerTime" + }, + "X_Amz_Expires": { + "shape": "X-Amz-Expires", + "locationName":"X-Amz-Expires" + }, + "HostId": { + "shape": "HostId" + }, + "HeadersNotSigned": { + "shape": "HeadersNotSigned" + } + }, + "error": { + "httpStatusCode": 403 + }, + "documentation": "<p>Request has expired</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/AWSAccessKeyId", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/HostId", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/HeadersNotSigned", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/SignatureProvided", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/StringToSign", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/StringToSignBytes", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/CanonicalRequest", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/CanonicalRequestBytes", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/ServerTime", + "value": { + "type": "timestamp" + } + }, + { + "op": "add", + "path": "/shapes/X-Amz-Expires", + "value": { + "type": "integer" + } + }, + { + "op": "add", + "path": "/shapes/AuthorizationQueryParametersError", + "value": { + "type": "structure", + "members": { + "HostId": { + "shape": "HostId" + } + }, + "documentation": "<p>Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/operations/PostObject", + "value": { + "name":"PostObject", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"PostObjectRequest"}, + "output":{"shape":"PostResponse"}, + "documentationUrl":"http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html", + "documentation":"<p>The POST operation adds an object to a specified bucket by using HTML forms. POST is an alternate form of PUT that enables browser-based uploads as a way of putting objects in buckets. Parameters that are passed to PUT through HTTP Headers are instead passed as form fields to POST in the multipart/form-data encoded message body. To add an object to a bucket, you must have WRITE access on the bucket. Amazon S3 never stores partial objects. If you receive a successful response, you can be confident that the entire object was stored.<p>" + } + }, + { + "op": "add", + "path": "/shapes/PostObjectRequest", + "value": { + "type":"structure", + "required":[ + "Bucket" + ], + "members":{ + "Body":{ + "shape":"Body", + "documentation":"<p>Object data.</p>", + "streaming":true + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"<p>The bucket name to which the PUT action was initiated. </p> <p>When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html\">Using access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form <code> <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html\">Using Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location":"uri", + "locationName":"Bucket" + } + }, + "payload":"Body" + } + }, + { + "op": "add", + "path": "/shapes/PostResponse", + "value": { + "type":"structure", + "members":{ + "StatusCode": { + "shape": "GetObjectResponseStatusCode", + "location": "statusCode" + }, + "Location":{ + "shape":"Location", + "documentation":"<p>The URI that identifies the newly created object.</p>" + }, + "LocationHeader":{ + "shape":"Location", + "documentation":"<p>The URI that identifies the newly created object.</p>", + "location": "header", + "locationName": "Location" + }, + "Bucket":{ + "shape":"BucketName", + "documentation":"<p>The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used.</p> <p>When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html\">Using access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form <code> <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html\">Using Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>" + }, + "Key":{ + "shape":"ObjectKey", + "documentation":"<p>The object key of the newly created object.</p>" + }, + "Expiration": { + "shape": "Expiration", + "documentation": "<p>If the expiration is configured for the object (see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html\">PutBucketLifecycleConfiguration</a>), the response includes this header. It includes the <code>expiry-date</code> and <code>rule-id</code> key-value pairs that provide information about object expiration. The value of the <code>rule-id</code> is URL-encoded.</p>", + "location": "header", + "locationName": "x-amz-expiration" + }, + "ETag":{ + "shape":"ETag", + "documentation":"<p>Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html\">Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>" + }, + "ETagHeader":{ + "shape":"ETag", + "documentation":"<p>Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html\">Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location": "header", + "locationName": "ETag" + }, + "ChecksumCRC32": { + "shape": "ChecksumCRC32", + "documentation": "<p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums\"> Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location": "header", + "locationName": "x-amz-checksum-crc32" + }, + "ChecksumCRC32C": { + "shape": "ChecksumCRC32C", + "documentation": "<p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums\"> Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location": "header", + "locationName": "x-amz-checksum-crc32c" + }, + "ChecksumCRC64NVME":{ + "shape":"ChecksumCRC64NVME", + "documentation":"<p>This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit <code>CRC64NVME</code> checksum of the object. The <code>CRC64NVME</code> checksum is always a full object checksum. For more information, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html\">Checking object integrity in the Amazon S3 User Guide</a>.</p>", + "location":"header", + "locationName":"x-amz-checksum-crc64nvme" + }, + "ChecksumSHA1": { + "shape": "ChecksumSHA1", + "documentation": "<p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums\"> Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location": "header", + "locationName": "x-amz-checksum-sha1" + }, + "ChecksumSHA256": { + "shape": "ChecksumSHA256", + "documentation": "<p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums\"> Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>", + "location": "header", + "locationName": "x-amz-checksum-sha256" + }, + "ChecksumType":{ + "shape":"ChecksumType", + "documentation":"<p>This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. If the checksum type doesn’t match the checksum type that was specified for the object during the <code>CreateMultipartUpload</code> request, it’ll result in a <code>BadDigest</code> error. For more information, see Checking object integrity in the Amazon S3 User Guide. </p>", + "location":"header", + "locationName":"x-amz-checksum-type" + }, + "ServerSideEncryption": { + "shape": "ServerSideEncryption", + "documentation": "<p>If you specified server-side encryption either with an Amazon Web Services KMS key or Amazon S3-managed encryption key in your PUT request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.</p>", + "location": "header", + "locationName": "x-amz-server-side-encryption" + }, + "VersionId": { + "shape": "ObjectVersionId", + "documentation": "<p>Version of the object.</p>", + "location": "header", + "locationName": "x-amz-version-id" + }, + "SSECustomerAlgorithm": { + "shape": "SSECustomerAlgorithm", + "documentation": "<p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p>", + "location": "header", + "locationName": "x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5": { + "shape": "SSECustomerKeyMD5", + "documentation": "<p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.</p>", + "location": "header", + "locationName": "x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId": { + "shape": "SSEKMSKeyId", + "documentation": "<p>If <code>x-amz-server-side-encryption</code> is present and has the value of <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for the object. </p>", + "location": "header", + "locationName": "x-amz-server-side-encryption-aws-kms-key-id" + }, + "SSEKMSEncryptionContext": { + "shape": "SSEKMSEncryptionContext", + "documentation": "<p>If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.</p>", + "location": "header", + "locationName": "x-amz-server-side-encryption-context" + }, + "BucketKeyEnabled": { + "shape": "BucketKeyEnabled", + "documentation": "<p>Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).</p>", + "location": "header", + "locationName": "x-amz-server-side-encryption-bucket-key-enabled" + }, + "RequestCharged": { + "shape": "RequestCharged", + "location": "header", + "locationName": "x-amz-request-charged" + } + } + } + }, + { + "op": "add", + "path": "/shapes/NoSuchWebsiteConfiguration", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The specified bucket does not have a website configuration</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/NoSuchUpload/members/UploadId", + "value": { + "shape": "MultipartUploadId" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchUpload/error", + "value": { + "httpStatusCode": 404 + } + }, + { + "op": "add", + "path": "/shapes/ReplicationConfigurationNotFoundError", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The replication configuration was not found.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/BucketCannedACL/enum/4", + "value": "log-delivery-write", + "documentation": "<p>Not included in the specs, but valid value according to the docs: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl</p>" + }, + { + "op": "add", + "path": "/shapes/BadRequest", + "value": { + "type": "structure", + "members": { + "HostId": { + "shape": "HostId" + } + }, + "documentation": "<p>Insufficient information. Origin request header needed.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/AccessForbidden", + "value": { + "type": "structure", + "members": { + "HostId": { + "shape": "HostId" + }, + "Method": { + "shape": "HttpMethod" + }, + "ResourceType": { + "shape": "ResourceType" + } + }, + "error": { + "httpStatusCode": 403 + }, + "documentation": "<p>CORSResponse</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/HttpMethod", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/ResourceType", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/NoSuchCORSConfiguration", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The CORS configuration does not exist</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/MissingSecurityHeader", + "value": { + "type": "structure", + "members": { + "MissingHeaderName": { + "shape": "MissingHeaderName" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>Your request was missing a required header</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/MissingHeaderName", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/InvalidPartOrder", + "value": { + "type": "structure", + "members": { + "UploadId": { + "shape": "MultipartUploadId" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The list of parts was not in ascending order. Parts must be ordered by part number.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidStorageClass", + "value": { + "type": "structure", + "members": { + "StorageClassRequested": { + "shape": "StorageClass" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The storage class you specified is not valid</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/ListObjectsOutput/members/BucketRegion", + "value": { + "shape": "BucketRegion", + "location": "header", + "locationName": "x-amz-bucket-region" + } + }, + { + "op": "add", + "path": "/shapes/ListObjectsV2Output/members/BucketRegion", + "value": { + "shape": "BucketRegion", + "location": "header", + "locationName": "x-amz-bucket-region" + } + }, + { + "op": "add", + "path": "/shapes/ResourceType", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/MethodNotAllowed", + "value": { + "type": "structure", + "members": { + "Method": { + "shape": "HttpMethod" + }, + "ResourceType": { + "shape": "ResourceType" + }, + "DeleteMarker": { + "shape": "DeleteMarker", + "location": "header", + "locationName": "x-amz-delete-marker" + }, + "VersionId": { + "shape": "ObjectVersionId", + "location": "header", + "locationName": "x-amz-version-id" + }, + "Allow": { + "shape": "HttpMethod", + "location": "header", + "locationName": "allow" + } + }, + "error": { + "httpStatusCode": 405 + }, + "documentation": "<p>The specified method is not allowed against this resource.</p>", + "exception": true + } + }, + { + "op": "remove", + "path": "/shapes/ListBucketsOutput/members/Buckets" + }, + { + "op": "add", + "path": "/shapes/ListBucketsOutput/members/Buckets", + "value": { + "shape":"Buckets", + "documentation":"<p>The list of buckets owned by the requester.</p>" + } + }, + { + "op": "remove", + "path": "/shapes/ListObjectsOutput/members/Contents" + }, + { + "op": "add", + "path": "/shapes/ListObjectsOutput/members/Contents", + "value": { + "shape":"ObjectList", + "documentation":"<p>Metadata about each object returned.</p>" + } + }, + { + "op": "remove", + "path": "/shapes/ListObjectsV2Output/members/Contents" + }, + { + "op": "add", + "path": "/shapes/ListObjectsV2Output/members/Contents", + "value": { + "shape":"ObjectList", + "documentation":"<p>Metadata about each object returned.</p>" + } + }, + { + "op": "add", + "path": "/shapes/CrossLocationLoggingProhibitted", + "value": { + "type": "structure", + "members": { + "TargetBucketLocation": { + "shape": "BucketRegion" + }, + "SourceBucketLocation": { + "shape": "BucketRegion" + } + }, + "error": { + "httpStatusCode": 403 + }, + "documentation": "<p>Cross S3 location logging not allowed. </p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidTargetBucketForLogging", + "value": { + "type": "structure", + "members": { + "TargetBucket": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The target bucket for logging does not exist</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/operations/PutBucketInventoryConfiguration/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/operations/PutBucketAnalyticsConfiguration/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/operations/PutBucketIntelligentTieringConfiguration/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/shapes/BucketNotEmpty", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 409 + }, + "documentation": "<p>The bucket you tried to delete is not empty</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/MinSizeAllowed", + "value": { + "type": "long" + } + }, + { + "op": "add", + "path": "/shapes/ProposedSize", + "value": { + "type": "long" + } + }, + { + "op": "add", + "path": "/shapes/EntityTooSmall", + "value": { + "type": "structure", + "members": { + "ETag": { + "shape": "ETag" + }, + "MinSizeAllowed": { + "shape": "MinSizeAllowed" + }, + "PartNumber": { + "shape": "PartNumber" + }, + "ProposedSize": { + "shape": "ProposedSize" + } + }, + "documentation": "<p>Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidPart", + "value": { + "type": "structure", + "members": { + "ETag": { + "shape": "ETag" + }, + "UploadId": { + "shape": "MultipartUploadId" + }, + "PartNumber": { + "shape": "PartNumber" + } + }, + "documentation": "<p>One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/NoSuchTagSet", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>There is no tag set associated with the bucket.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/operations/PutBucketTagging/http/responseCode", + "value": 204 + }, + { + "op": "add", + "path": "/shapes/InvalidTag", + "value": { + "type": "structure", + "members": { + "TagKey": { + "shape": "ObjectKey" + }, + "TagValue": { + "shape": "Value" + } + }, + "documentation": "<p>The tag provided was not a valid tag. This error can occur if the tag did not pass input validation.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/ObjectLockConfigurationNotFoundError", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>Object Lock configuration does not exist for this bucket</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidPartNumber", + "value": { + "type": "structure", + "members": { + "PartNumberRequested": { + "shape": "PartNumber" + }, + "ActualPartCount": { + "shape": "PartNumber" + } + }, + "error": { + "httpStatusCode": 416 + }, + "documentation": "<p>The requested partnumber is not satisfiable</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/OwnershipControlsNotFoundError", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The bucket ownership controls were not found</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/NoSuchPublicAccessBlockConfiguration", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The public access block configuration was not found</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/NoSuchBucketPolicy", + "value": { + "type": "structure", + "members": { + "BucketName": { + "shape": "BucketName" + } + }, + "error": { + "httpStatusCode": 404 + }, + "documentation": "<p>The bucket policy does not exist</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidObjectState/error", + "value": { + "httpStatusCode": 403 + } + }, + { + "op": "add", + "path": "/shapes/InvalidDigest", + "value": { + "type": "structure", + "members": { + "Content_MD5": { + "shape": "ContentMD5", + "locationName":"Content-MD5" + } + }, + "documentation": "<p>The Content-MD5 you specified was invalid.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/KeyLength", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/KeyTooLongError", + "value": { + "type": "structure", + "members": { + "MaxSizeAllowed": { + "shape": "KeyLength" + }, + "Size": { + "shape": "KeyLength" + } + }, + "documentation": "<p>Your key is too long</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/InvalidLocationConstraint", + "value": { + "type": "structure", + "members": { + "LocationConstraint": { + "shape": "BucketRegion" + } + }, + "documentation": "<p>The specified location-constraint is not valid</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/EntityTooLarge", + "value": { + "type": "structure", + "members": { + "MaxSizeAllowed": { + "shape": "KeyLength" + }, + "HostId": { + "shape": "HostId" + }, + "ProposedSize": { + "shape": "ProposedSize" + } + }, + "documentation": "<p>Your proposed upload exceeds the maximum allowed size</p>", + "exception": true + } + }, + { + "op": "remove", + "path": "/shapes/ListObjectVersionsOutput/members/Versions" + }, + { + "op": "add", + "path": "/shapes/ListObjectVersionsOutput/members/Versions", + "value": { + "shape":"ObjectVersionList", + "documentation":"<p>Container for version information.</p>", + "locationName":"Version" + } + }, + { + "op": "add", + "path": "/shapes/InvalidEncryptionAlgorithmError", + "value": { + "type": "structure", + "members": { + "ArgumentName": { + "shape": "ArgumentName" + }, + "ArgumentValue": { + "shape": "ArgumentValue" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The Encryption request you specified is not valid.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/Header", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/additionalMessage", + "value": { + "type": "string" + } + }, + { + "op": "add", + "path": "/shapes/NotImplemented", + "value": { + "type": "structure", + "members": { + "Header": { + "shape": "Header" + }, + "additionalMessage": { + "shape": "additionalMessage" + } + }, + "error": { + "httpStatusCode": 501 + }, + "documentation": "<p>A header you provided implies functionality that is not implemented.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/ConditionalRequestConflict", + "value": { + "type": "structure", + "members": { + "Condition": { + "shape": "IfCondition" + }, + "Key": { + "shape": "ObjectKey" + } + }, + "error": { + "httpStatusCode": 409 + }, + "documentation": "<p>The conditional request cannot succeed due to a conflicting operation against this resource.</p>", + "exception": true + } + }, + { + "op": "add", + "path": "/shapes/BadDigest", + "value": { + "type": "structure", + "members": { + "ExpectedDigest": { + "shape": "ContentMD5" + }, + "CalculatedDigest": { + "shape": "ContentMD5" + } + }, + "error": { + "httpStatusCode": 400 + }, + "documentation": "<p>The Content-MD5 you specified did not match what we received.</p>", + "exception": true + } + } + ], + "apigatewayv2/2018-11-29/service-2": [ + { + "op": "add", + "path": "/operations/UpdateDeployment/http/responseCode", + "value": 201 + }, + { + "op": "add", + "path": "/operations/UpdateApi/http/responseCode", + "value": 201 + }, + { + "op": "add", + "path": "/operations/UpdateRoute/http/responseCode", + "value": 201 + }, + { + "op": "add", + "path": "/operations/CreateApiMapping/http/responseCode", + "value": 200 + } + ] +} diff --git a/localstack-core/localstack/aws/spec.py b/localstack-core/localstack/aws/spec.py new file mode 100644 index 0000000000000..1410ddde3e246 --- /dev/null +++ b/localstack-core/localstack/aws/spec.py @@ -0,0 +1,369 @@ +import dataclasses +import json +import logging +import os +import sys +from collections import defaultdict +from functools import cached_property, lru_cache +from typing import Dict, Generator, List, Literal, NamedTuple, Optional, Tuple + +import botocore +import jsonpatch +from botocore.exceptions import UnknownServiceError +from botocore.loaders import Loader, instance_cache +from botocore.model import OperationModel, ServiceModel + +from localstack import config +from localstack.constants import VERSION +from localstack.utils.objects import singleton_factory + +LOG = logging.getLogger(__name__) + +ServiceName = str +ProtocolName = Literal["query", "json", "rest-json", "rest-xml", "ec2"] + + +class ServiceModelIdentifier(NamedTuple): + """ + Identifies a specific service model. + If the protocol is not given, the default protocol of the service with the specific name is assumed. + Maybe also add versions here in the future (if we can support multiple different versions for one service). + """ + + name: ServiceName + protocol: Optional[ProtocolName] = None + + +spec_patches_json = os.path.join(os.path.dirname(__file__), "spec-patches.json") + + +def load_spec_patches() -> Dict[str, list]: + if not os.path.exists(spec_patches_json): + return {} + with open(spec_patches_json) as fd: + return json.load(fd) + + +# Path for custom specs which are not (anymore) provided by botocore +LOCALSTACK_BUILTIN_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") + + +class LocalStackBuiltInDataLoaderMixin(Loader): + def __init__(self, *args, **kwargs): + # add the builtin data path to the extra_search_paths to ensure they are discovered by the loader + super().__init__(*args, extra_search_paths=[LOCALSTACK_BUILTIN_DATA_PATH], **kwargs) + + +class PatchingLoader(Loader): + """ + A custom botocore Loader that applies JSON patches from the given json patch file to the specs as they are loaded. + """ + + patches: Dict[str, list] + + def __init__(self, patches: Dict[str, list], *args, **kwargs): + # add the builtin data path to the extra_search_paths to ensure they are discovered by the loader + super().__init__(*args, **kwargs) + self.patches = patches + + @instance_cache + def load_data(self, name: str): + result = super(PatchingLoader, self).load_data(name) + + if patches := self.patches.get(name): + return jsonpatch.apply_patch(result, patches) + + return result + + +class CustomLoader(PatchingLoader, LocalStackBuiltInDataLoaderMixin): + # Class mixing the different loader features (patching, localstack specific data) + pass + + +loader = CustomLoader(load_spec_patches()) + + +class UnknownServiceProtocolError(UnknownServiceError): + """Raised when trying to load a service with an unknown protocol. + + :ivar service_name: The name of the service. + :ivar protocol: The name of the unknown protocol. + """ + + fmt = "Unknown service protocol: '{service_name}-{protocol}'." + + +def list_services() -> List[ServiceModel]: + return [load_service(service) for service in loader.list_available_services("service-2")] + + +def load_service( + service: ServiceName, version: Optional[str] = None, protocol: Optional[ProtocolName] = None +) -> ServiceModel: + """ + Loads a service + :param service: to load, f.e. "sqs". For custom, internalized, service protocol specs (f.e. sqs-query) it's also + possible to directly define the protocol in the service name (f.e. use sqs-query) + :param version: of the service to load, f.e. "2012-11-05", by default the latest version will be used + :param protocol: specific protocol to load for the specific service, f.e. "json" for the "sqs" service + if the service cannot be found + :return: Loaded service model of the service + :raises: UnknownServiceError if the service cannot be found + :raises: UnknownServiceProtocolError if the specific protocol of the service cannot be found + """ + service_description = loader.load_service_model(service, "service-2", version) + + # check if the protocol is defined, and if so, if the loaded service defines this protocol + if protocol is not None and protocol != service_description.get("metadata", {}).get("protocol"): + # if the protocol is defined, but not the one of the currently loaded service, + # check if we already loaded the custom spec based on the naming convention (<service>-<protocol>), + # f.e. "sqs-query" + if service.endswith(f"-{protocol}"): + # if so, we raise an exception + raise UnknownServiceProtocolError(service_name=service, protocol=protocol) + # otherwise we try to load it (recursively) + try: + return load_service(f"{service}-{protocol}", version, protocol=protocol) + except UnknownServiceError: + # raise an unknown protocol error in case the service also can't be loaded with the naming convention + raise UnknownServiceProtocolError(service_name=service, protocol=protocol) + + # remove potential protocol names from the service name + # FIXME add more protocols here if we have to internalize more than just sqs-query + # TODO this should not contain specific internalized serivce names + service = {"sqs-query": "sqs"}.get(service, service) + return ServiceModel(service_description, service) + + +def iterate_service_operations() -> Generator[Tuple[ServiceModel, OperationModel], None, None]: + """ + Returns one record per operation in the AWS service spec, where the first item is the service model the operation + belongs to, and the second is the operation model. + + :return: an iterable + """ + for service in list_services(): + for op_name in service.operation_names: + yield service, service.operation_model(op_name) + + +@dataclasses.dataclass +class ServiceCatalogIndex: + """ + The ServiceCatalogIndex enables fast lookups for common operations to determine a service from service indicators. + """ + + service_names: List[ServiceName] + target_prefix_index: Dict[str, List[ServiceModelIdentifier]] + signing_name_index: Dict[str, List[ServiceModelIdentifier]] + operations_index: Dict[str, List[ServiceModelIdentifier]] + endpoint_prefix_index: Dict[str, List[ServiceModelIdentifier]] + + +class LazyServiceCatalogIndex: + """ + A ServiceCatalogIndex that builds indexes in-memory from the spec. + """ + + @cached_property + def service_names(self) -> List[ServiceName]: + return list(self._services.keys()) + + @cached_property + def target_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + result = defaultdict(list) + for service_models in self._services.values(): + for service_model in service_models: + target_prefix = service_model.metadata.get("targetPrefix") + if target_prefix: + result[target_prefix].append( + ServiceModelIdentifier(service_model.service_name, service_model.protocol) + ) + return dict(result) + + @cached_property + def signing_name_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + result = defaultdict(list) + for service_models in self._services.values(): + for service_model in service_models: + result[service_model.signing_name].append( + ServiceModelIdentifier(service_model.service_name, service_model.protocol) + ) + return dict(result) + + @cached_property + def operations_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + result = defaultdict(list) + for service_models in self._services.values(): + for service_model in service_models: + operations = service_model.operation_names + if operations: + for operation in operations: + result[operation].append( + ServiceModelIdentifier( + service_model.service_name, service_model.protocol + ) + ) + return dict(result) + + @cached_property + def endpoint_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + result = defaultdict(list) + for service_models in self._services.values(): + for service_model in service_models: + result[service_model.endpoint_prefix].append( + ServiceModelIdentifier(service_model.service_name, service_model.protocol) + ) + return dict(result) + + @cached_property + def _services(self) -> Dict[ServiceName, List[ServiceModel]]: + services = defaultdict(list) + for service in list_services(): + services[service.service_name].append(service) + return services + + +class ServiceCatalog: + index: ServiceCatalogIndex + + def __init__(self, index: ServiceCatalogIndex = None): + self.index = index or LazyServiceCatalogIndex() + + @lru_cache(maxsize=512) + def get( + self, name: ServiceName, protocol: Optional[ProtocolName] = None + ) -> Optional[ServiceModel]: + return load_service(name, protocol=protocol) + + @property + def service_names(self) -> List[ServiceName]: + return self.index.service_names + + @property + def target_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + return self.index.target_prefix_index + + @property + def signing_name_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + return self.index.signing_name_index + + @property + def operations_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + return self.index.operations_index + + @property + def endpoint_prefix_index(self) -> Dict[str, List[ServiceModelIdentifier]]: + return self.index.endpoint_prefix_index + + def by_target_prefix(self, target_prefix: str) -> List[ServiceModelIdentifier]: + return self.target_prefix_index.get(target_prefix, []) + + def by_signing_name(self, signing_name: str) -> List[ServiceModelIdentifier]: + return self.signing_name_index.get(signing_name, []) + + def by_operation(self, operation_name: str) -> List[ServiceModelIdentifier]: + return self.operations_index.get(operation_name, []) + + +def build_service_index_cache(file_path: str) -> ServiceCatalogIndex: + """ + Creates a new ServiceCatalogIndex and stores it into the given file_path. + + :param file_path: the path to store the file to + :return: the created ServiceCatalogIndex + """ + return save_service_index_cache(LazyServiceCatalogIndex(), file_path) + + +def load_service_index_cache(file: str) -> ServiceCatalogIndex: + """ + Loads from the given file the stored ServiceCatalogIndex. + + :param file: the file to load from + :return: the loaded ServiceCatalogIndex + """ + import dill + + with open(file, "rb") as fd: + return dill.load(fd) + + +def save_service_index_cache(index: LazyServiceCatalogIndex, file_path: str) -> ServiceCatalogIndex: + """ + Creates from the given LazyServiceCatalogIndex a ``ServiceCatalogIndex`, stores its contents into the given file, + and then returns the newly created index. + + :param index: the LazyServiceCatalogIndex to store the index from. + :param file_path: the path to store the binary index cache file to + :return: the created ServiceCatalogIndex + """ + import dill + + cache = ServiceCatalogIndex( + service_names=index.service_names, + endpoint_prefix_index=index.endpoint_prefix_index, + operations_index=index.operations_index, + signing_name_index=index.signing_name_index, + target_prefix_index=index.target_prefix_index, + ) + with open(file_path, "wb") as fd: + # use dill (instead of plain pickle) to avoid issues when serializing the pickle from __main__ + dill.dump(cache, fd) + return cache + + +def _get_catalog_filename(): + ls_ver = VERSION.replace(".", "_") + botocore_ver = botocore.__version__.replace(".", "_") + return f"service-catalog-{ls_ver}-{botocore_ver}.dill" + + +@singleton_factory +def get_service_catalog() -> ServiceCatalog: + """Loads the ServiceCatalog (which contains all the service specs), and potentially re-uses a cached index.""" + + try: + catalog_file_name = _get_catalog_filename() + static_catalog_file = os.path.join(config.dirs.static_libs, catalog_file_name) + + # try to load or load/build/save the service catalog index from the static libs + index = None + if os.path.exists(static_catalog_file): + # load the service catalog from the static libs dir / built at build time + LOG.debug("loading service catalog index cache file %s", static_catalog_file) + index = load_service_index_cache(static_catalog_file) + elif os.path.isdir(config.dirs.cache): + cache_catalog_file = os.path.join(config.dirs.cache, catalog_file_name) + if os.path.exists(cache_catalog_file): + LOG.debug("loading service catalog index cache file %s", cache_catalog_file) + index = load_service_index_cache(cache_catalog_file) + else: + LOG.debug("building service catalog index cache file %s", cache_catalog_file) + index = build_service_index_cache(cache_catalog_file) + return ServiceCatalog(index) + except Exception: + LOG.exception( + "error while processing service catalog index cache, falling back to lazy-loaded index" + ) + return ServiceCatalog() + + +def main(): + catalog_file_name = _get_catalog_filename() + static_catalog_file = os.path.join(config.dirs.static_libs, catalog_file_name) + + if os.path.exists(static_catalog_file): + LOG.error( + "service catalog index cache file (%s) already there. aborting!", static_catalog_file + ) + return 1 + + # load the service catalog from the static libs dir / built at build time + LOG.debug("building service catalog index cache file %s", static_catalog_file) + build_service_index_cache(static_catalog_file) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/localstack/cli/__init__.py b/localstack-core/localstack/cli/__init__.py similarity index 100% rename from localstack/cli/__init__.py rename to localstack-core/localstack/cli/__init__.py diff --git a/localstack/cli/console.py b/localstack-core/localstack/cli/console.py similarity index 100% rename from localstack/cli/console.py rename to localstack-core/localstack/cli/console.py diff --git a/localstack-core/localstack/cli/exceptions.py b/localstack-core/localstack/cli/exceptions.py new file mode 100644 index 0000000000000..cd65d2ee13d26 --- /dev/null +++ b/localstack-core/localstack/cli/exceptions.py @@ -0,0 +1,19 @@ +import typing as t +from gettext import gettext + +import click +from click import ClickException, echo +from click._compat import get_text_stderr + + +class CLIError(ClickException): + """A ClickException with a red error message""" + + def format_message(self) -> str: + return click.style(f"❌ Error: {self.message}", fg="red") + + def show(self, file: t.Optional[t.IO[t.Any]] = None) -> None: + if file is None: + file = get_text_stderr() + + echo(gettext(self.format_message()), file=file) diff --git a/localstack-core/localstack/cli/localstack.py b/localstack-core/localstack/cli/localstack.py new file mode 100644 index 0000000000000..016834b3e21b3 --- /dev/null +++ b/localstack-core/localstack/cli/localstack.py @@ -0,0 +1,946 @@ +import json +import logging +import os +import sys +import traceback +from typing import Dict, List, Optional, Tuple, TypedDict + +import click +import requests + +from localstack import config +from localstack.cli.exceptions import CLIError +from localstack.constants import VERSION +from localstack.utils.analytics.cli import publish_invocation +from localstack.utils.bootstrap import get_container_default_logfile_location +from localstack.utils.json import CustomEncoder + +from .console import BANNER, console +from .plugin import LocalstackCli, load_cli_plugins + + +class LocalStackCliGroup(click.Group): + """ + A Click group used for the top-level ``localstack`` command group. It implements global exception handling + by: + + - Ignoring click exceptions (already handled) + - Handling common exceptions (like DockerNotAvailable) + - Wrapping all unexpected exceptions in a ClickException (for a unified error message) + + It also implements a custom help formatter to build more fine-grained groups. + """ + + # FIXME: find a way to communicate this from the actual command + advanced_commands = [ + "aws", + "dns", + "extensions", + "license", + "login", + "logout", + "pod", + "state", + "ephemeral", + "replicator", + ] + + def invoke(self, ctx: click.Context): + try: + return super(LocalStackCliGroup, self).invoke(ctx) + except click.exceptions.Exit: + # raise Exit exceptions unmodified (e.g., raised on --help) + raise + except click.ClickException: + # don't handle ClickExceptions, just reraise + if ctx and ctx.params.get("debug"): + click.echo(traceback.format_exc()) + raise + except Exception as e: + if ctx and ctx.params.get("debug"): + click.echo(traceback.format_exc()) + from localstack.utils.container_utils.container_client import ( + ContainerException, + DockerNotAvailable, + ) + + if isinstance(e, DockerNotAvailable): + raise CLIError( + "Docker could not be found on the system.\n" + "Please make sure that you have a working docker environment on your machine." + ) + elif isinstance(e, ContainerException): + raise CLIError(e.message) + else: + # If we have a generic exception, we wrap it in a ClickException + raise CLIError(str(e)) from e + + def format_commands(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + """Extra format methods for multi methods that adds all the commands after the options. It also + groups commands into command categories.""" + categories = {"Commands": [], "Advanced": [], "Deprecated": []} + + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + if cmd.hidden: + continue + + commands.append((subcommand, cmd)) + + # allow for 3 times the default spacing + if len(commands): + limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) + + for subcommand, cmd in commands: + help = cmd.get_short_help_str(limit) + categories[self._get_category(cmd)].append((subcommand, help)) + + for category, rows in categories.items(): + if rows: + with formatter.section(category): + formatter.write_dl(rows) + + def _get_category(self, cmd) -> str: + if cmd.deprecated: + return "Deprecated" + + if cmd.name in self.advanced_commands: + return "Advanced" + + return "Commands" + + +def create_with_plugins() -> LocalstackCli: + """ + Creates a LocalstackCli instance with all cli plugins loaded. + :return: a LocalstackCli instance + """ + cli = LocalstackCli() + cli.group = localstack + load_cli_plugins(cli) + return cli + + +def _setup_cli_debug() -> None: + from localstack.logging.setup import setup_logging_for_cli + + config.DEBUG = True + os.environ["DEBUG"] = "1" + + setup_logging_for_cli(logging.DEBUG if config.DEBUG else logging.INFO) + + +# Re-usable format option decorator which can be used across multiple commands +_click_format_option = click.option( + "-f", + "--format", + "format_", + type=click.Choice(["table", "plain", "dict", "json"]), + default="table", + help="The formatting style for the command output.", +) + + +@click.group( + name="localstack", + help="The LocalStack Command Line Interface (CLI)", + cls=LocalStackCliGroup, + context_settings={ + # add "-h" as a synonym for "--help" + # https://click.palletsprojects.com/en/8.1.x/documentation/#help-parameter-customization + "help_option_names": ["-h", "--help"], + # show default values for options by default - https://github.com/pallets/click/pull/1225 + "show_default": True, + }, +) +@click.version_option( + VERSION, + "--version", + "-v", + message="LocalStack CLI %(version)s", + help="Show the version of the LocalStack CLI and exit", +) +@click.option("-d", "--debug", is_flag=True, help="Enable CLI debugging mode") +@click.option("-p", "--profile", type=str, help="Set the configuration profile") +def localstack(debug, profile) -> None: + # --profile is read manually in localstack.cli.main because it needs to be read before localstack.config is read + + if debug: + _setup_cli_debug() + + from localstack.utils.files import cache_dir + + # overwrite the config variable here to defer import of cache_dir + if not os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip(): + config.VOLUME_DIR = str(cache_dir() / "volume") + + # FIXME: at some point we should remove the use of `config.dirs` for the CLI, + # see https://github.com/localstack/localstack/pull/7906 + config.dirs.for_cli().mkdirs() + + +@localstack.group( + name="config", + short_help="Manage your LocalStack config", +) +def localstack_config() -> None: + """ + Inspect and validate your LocalStack configuration. + """ + pass + + +@localstack_config.command(name="show", short_help="Show your config") +@_click_format_option +@publish_invocation +def cmd_config_show(format_: str) -> None: + """ + Print the current LocalStack config values. + + This command prints the LocalStack configuration values from your environment. + It analyzes the environment variables as well as the LocalStack CLI profile. + It does _not_ analyze a specific file (like a docker-compose-yml). + """ + # TODO: parse values from potential docker-compose file? + assert config + + try: + # only load the ext config if it's available + from localstack.pro.core import config as ext_config + + assert ext_config + except ImportError: + # the ext package is not available + return None + + if format_ == "table": + _print_config_table() + elif format_ == "plain": + _print_config_pairs() + elif format_ == "dict": + _print_config_dict() + elif format_ == "json": + _print_config_json() + else: + _print_config_pairs() # fall back to plain + + +@localstack_config.command(name="validate", short_help="Validate your config") +@click.option( + "-f", + "--file", + help="Path to compose file", + default="docker-compose.yml", + type=click.Path(exists=True, file_okay=True, readable=True), +) +@publish_invocation +def cmd_config_validate(file: str) -> None: + """ + Validate your LocalStack configuration (docker compose). + + This command inspects the given docker-compose file (by default docker-compose.yml in the current working + directory) and validates if the configuration is valid. + + \b + It will show an error and return a non-zero exit code if: + - The docker-compose file is syntactically incorrect. + - If the file contains common issues when configuring LocalStack. + """ + + from localstack.utils import bootstrap + + if bootstrap.validate_localstack_config(file): + console.print("[green]:heavy_check_mark:[/green] config valid") + sys.exit(0) + else: + console.print("[red]:heavy_multiplication_x:[/red] validation error") + sys.exit(1) + + +def _print_config_json() -> None: + import json + + console.print(json.dumps(dict(config.collect_config_items()), cls=CustomEncoder)) + + +def _print_config_pairs() -> None: + for key, value in config.collect_config_items(): + console.print(f"{key}={value}") + + +def _print_config_dict() -> None: + console.print(dict(config.collect_config_items())) + + +def _print_config_table() -> None: + from rich.table import Table + + grid = Table(show_header=True) + grid.add_column("Key") + grid.add_column("Value") + + for key, value in config.collect_config_items(): + grid.add_row(key, str(value)) + + console.print(grid) + + +@localstack.group( + name="status", + short_help="Query status info", + invoke_without_command=True, +) +@click.pass_context +def localstack_status(ctx: click.Context) -> None: + """ + Query status information about the currently running LocalStack instance. + """ + if ctx.invoked_subcommand is None: + ctx.invoke(localstack_status.get_command(ctx, "docker")) + + +@localstack_status.command(name="docker", short_help="Query LocalStack Docker status") +@_click_format_option +def cmd_status_docker(format_: str) -> None: + """ + Query information about the currently running LocalStack Docker image, its container, + and the LocalStack runtime. + """ + with console.status("Querying Docker status"): + _print_docker_status(format_) + + +class DockerStatus(TypedDict, total=False): + running: bool + runtime_version: str + image_tag: str + image_id: str + image_created: str + container_name: Optional[str] + container_ip: Optional[str] + + +def _print_docker_status(format_: str) -> None: + from localstack.utils import docker_utils + from localstack.utils.bootstrap import get_docker_image_details, get_server_version + from localstack.utils.container_networking import get_main_container_ip, get_main_container_name + + img = get_docker_image_details() + cont_name = config.MAIN_CONTAINER_NAME + running = docker_utils.DOCKER_CLIENT.is_container_running(cont_name) + status = DockerStatus( + runtime_version=get_server_version(), + image_tag=img["tag"], + image_id=img["id"], + image_created=img["created"], + running=running, + ) + if running: + status["container_name"] = get_main_container_name() + status["container_ip"] = get_main_container_ip() + + if format_ == "dict": + console.print(status) + if format_ == "table": + _print_docker_status_table(status) + if format_ == "json": + console.print(json.dumps(status)) + if format_ == "plain": + for key, value in status.items(): + console.print(f"{key}={value}") + + +def _print_docker_status_table(status: DockerStatus) -> None: + from rich.table import Table + + grid = Table(show_header=False) + grid.add_column() + grid.add_column() + + grid.add_row("Runtime version", f"[bold]{status['runtime_version']}[/bold]") + grid.add_row( + "Docker image", + f"tag: {status['image_tag']}, " + f"id: {status['image_id']}, " + f":calendar: {status['image_created']}", + ) + cont_status = "[bold][red]:heavy_multiplication_x: stopped" + if status["running"]: + cont_status = ( + f"[bold][green]:heavy_check_mark: running[/green][/bold] " + f'(name: "[italic]{status["container_name"]}[/italic]", IP: {status["container_ip"]})' + ) + grid.add_row("Runtime status", cont_status) + console.print(grid) + + +@localstack_status.command(name="services", short_help="Query LocalStack services status") +@_click_format_option +def cmd_status_services(format_: str) -> None: + """ + Query information about the services of the currently running LocalStack instance. + """ + url = config.external_service_url() + + try: + health = requests.get(f"{url}/_localstack/health", timeout=2) + doc = health.json() + services = doc.get("services", []) + if format_ == "table": + _print_service_table(services) + if format_ == "plain": + for service, status in services.items(): + console.print(f"{service}={status}") + if format_ == "dict": + console.print(services) + if format_ == "json": + console.print(json.dumps(services)) + except requests.ConnectionError: + if config.DEBUG: + console.print_exception() + raise CLIError(f"could not connect to LocalStack health endpoint at {url}") + + +def _print_service_table(services: Dict[str, str]) -> None: + from rich.table import Table + + status_display = { + "running": "[green]:heavy_check_mark:[/green] running", + "starting": ":hourglass_flowing_sand: starting", + "available": "[grey]:heavy_check_mark:[/grey] available", + "error": "[red]:heavy_multiplication_x:[/red] error", + } + + table = Table() + table.add_column("Service") + table.add_column("Status") + + services = list(services.items()) + services.sort(key=lambda item: item[0]) + + for service, status in services: + if status in status_display: + status = status_display[status] + + table.add_row(service, status) + + console.print(table) + + +@localstack.command(name="start", short_help="Start LocalStack") +@click.option("--docker", is_flag=True, help="Start LocalStack in a docker container [default]") +@click.option("--host", is_flag=True, help="Start LocalStack directly on the host") +@click.option("--no-banner", is_flag=True, help="Disable LocalStack banner", default=False) +@click.option( + "-d", "--detached", is_flag=True, help="Start LocalStack in the background", default=False +) +@click.option( + "--network", + type=str, + help="The container network the LocalStack container should be started in. By default, the default docker bridge network is used.", + required=False, +) +@click.option( + "--env", + "-e", + help="Additional environment variables that are passed to the LocalStack container", + multiple=True, + required=False, +) +@click.option( + "--publish", + "-p", + help="Additional port mappings that are passed to the LocalStack container", + multiple=True, + required=False, +) +@click.option( + "--volume", + "-v", + help="Additional volume mounts that are passed to the LocalStack container", + multiple=True, + required=False, +) +@click.option( + "--host-dns", + help="Expose the LocalStack DNS server to the host using port bindings.", + required=False, + is_flag=True, + default=False, +) +@click.option( + "--stack", + "-s", + type=str, + help="Use a specific stack with optional version. Examples: [localstack:4.5, snowflake]", + required=False, +) +@publish_invocation +def cmd_start( + docker: bool, + host: bool, + no_banner: bool, + detached: bool, + network: str = None, + env: Tuple = (), + publish: Tuple = (), + volume: Tuple = (), + host_dns: bool = False, + stack: str = None, +) -> None: + """ + Start the LocalStack runtime. + + This command starts the LocalStack runtime with your current configuration. + By default, it will start a new Docker container from the latest LocalStack(-Pro) Docker image + with best-practice volume mounts and port mappings. + """ + if docker and host: + raise CLIError("Please specify either --docker or --host") + if host and detached: + raise CLIError("Cannot start detached in host mode") + + if stack: + # Validate allowed stacks + stack_name = stack.split(":")[0] + allowed_stacks = ("localstack", "localstack-pro", "snowflake") + if stack_name.lower() not in allowed_stacks: + raise CLIError(f"Invalid stack '{stack_name}'. Allowed stacks: {allowed_stacks}.") + + # Set IMAGE_NAME, defaulting to :latest if no version specified + if ":" not in stack: + stack = f"{stack}:latest" + os.environ["IMAGE_NAME"] = f"localstack/{stack}" + + if not no_banner: + print_banner() + print_version() + print_profile() + print_app() + console.line() + + from localstack.utils import bootstrap + + if not no_banner: + if host: + console.log("starting LocalStack in host mode :laptop_computer:") + else: + console.log("starting LocalStack in Docker mode :whale:") + + if host: + # call hooks to prepare host + bootstrap.prepare_host(console) + + # from here we abandon the regular CLI control path and start treating the process like a localstack + # runtime process + os.environ["LOCALSTACK_CLI"] = "0" + config.dirs = config.init_directories() + + try: + bootstrap.start_infra_locally() + except ImportError: + if config.DEBUG: + console.print_exception() + raise CLIError( + "It appears you have a light install of localstack which only supports running in docker.\n" + "If you would like to use --host, please install localstack with Python using " + "`pip install localstack[runtime]` instead." + ) + else: + # make sure to initialize the bootstrap environment and directories for the host (even if we're executing + # in Docker), to allow starting the container from within other containers (e.g., Github Codespaces). + config.OVERRIDE_IN_DOCKER = False + config.is_in_docker = False + config.dirs = config.init_directories() + + # call hooks to prepare host (note that this call should stay below the config overrides above) + bootstrap.prepare_host(console) + + # pass the parsed cli params to the start infra command + params = click.get_current_context().params + + if network: + # reconciles the network config and makes sure that MAIN_DOCKER_NETWORK is set automatically if + # `--network` is set. + if config.MAIN_DOCKER_NETWORK: + if config.MAIN_DOCKER_NETWORK != network: + raise CLIError( + f"Values of MAIN_DOCKER_NETWORK={config.MAIN_DOCKER_NETWORK} and --network={network} " + f"do not match" + ) + else: + config.MAIN_DOCKER_NETWORK = network + os.environ["MAIN_DOCKER_NETWORK"] = network + + if detached: + bootstrap.start_infra_in_docker_detached(console, params) + else: + bootstrap.start_infra_in_docker(console, params) + + +@localstack.command(name="stop", short_help="Stop LocalStack") +@publish_invocation +def cmd_stop() -> None: + """ + Stops the current LocalStack runtime. + + This command stops the currently running LocalStack docker container. + By default, this command looks for a container named `localstack-main` (which is the default + container name used by the `localstack start` command). + If your LocalStack container has a different name, set the config variable + `MAIN_CONTAINER_NAME`. + """ + from localstack.utils.docker_utils import DOCKER_CLIENT + + from ..utils.container_utils.container_client import NoSuchContainer + + container_name = config.MAIN_CONTAINER_NAME + + try: + DOCKER_CLIENT.stop_container(container_name) + console.print("container stopped: %s" % container_name) + except NoSuchContainer: + raise CLIError( + f'Expected a running LocalStack container named "{container_name}", but found none' + ) + + +@localstack.command(name="restart", short_help="Restart LocalStack") +@publish_invocation +def cmd_restart() -> None: + """ + Restarts the current LocalStack runtime. + """ + url = config.external_service_url() + + try: + response = requests.post( + f"{url}/_localstack/health", + json={"action": "restart"}, + ) + response.raise_for_status() + console.print("LocalStack restarted within the container.") + except requests.ConnectionError: + if config.DEBUG: + console.print_exception() + raise CLIError("could not restart the LocalStack container") + + +@localstack.command( + name="logs", + short_help="Show LocalStack logs", +) +@click.option( + "-f", + "--follow", + is_flag=True, + help="Block the terminal and follow the log output", + default=False, +) +@click.option( + "-n", + "--tail", + type=int, + help="Print only the last <N> lines of the log output", + default=None, + metavar="N", +) +@publish_invocation +def cmd_logs(follow: bool, tail: int) -> None: + """ + Show the logs of the current LocalStack runtime. + + This command shows the logs of the currently running LocalStack docker container. + By default, this command looks for a container named `localstack-main` (which is the default + container name used by the `localstack start` command). + If your LocalStack container has a different name, set the config variable + `MAIN_CONTAINER_NAME`. + """ + from localstack.utils.docker_utils import DOCKER_CLIENT + + container_name = config.MAIN_CONTAINER_NAME + logfile = get_container_default_logfile_location(container_name) + + if not DOCKER_CLIENT.is_container_running(container_name): + console.print("localstack container not running") + if os.path.exists(logfile): + console.print("printing logs from previous run") + with open(logfile) as fd: + for line in fd: + click.echo(line, nl=False) + sys.exit(1) + + if follow: + num_lines = 0 + for line in DOCKER_CLIENT.stream_container_logs(container_name): + print(line.decode("utf-8").rstrip("\r\n")) + num_lines += 1 + if tail is not None and num_lines >= tail: + break + + else: + logs = DOCKER_CLIENT.get_container_logs(container_name) + if tail is not None: + logs = "\n".join(logs.split("\n")[-tail:]) + print(logs) + + +@localstack.command(name="wait", short_help="Wait for LocalStack") +@click.option( + "-t", + "--timeout", + type=float, + help="Only wait for <N> seconds before raising a timeout error", + default=None, + metavar="N", +) +@publish_invocation +def cmd_wait(timeout: Optional[float] = None) -> None: + """ + Wait for the LocalStack runtime to be up and running. + + This commands waits for a started LocalStack runtime to be up and running, ready to serve + requests. + By default, this command looks for a container named `localstack-main` (which is the default + container name used by the `localstack start` command). + If your LocalStack container has a different name, set the config variable + `MAIN_CONTAINER_NAME`. + """ + from localstack.utils.bootstrap import wait_container_is_ready + + if not wait_container_is_ready(timeout=timeout): + raise CLIError("timeout") + + +@localstack.command(name="ssh", short_help="Obtain a shell in LocalStack") +@publish_invocation +def cmd_ssh() -> None: + """ + Obtain a shell in the current LocalStack runtime. + + This command starts a new interactive shell in the currently running LocalStack container. + By default, this command looks for a container named `localstack-main` (which is the default + container name used by the `localstack start` command). + If your LocalStack container has a different name, set the config variable + `MAIN_CONTAINER_NAME`. + """ + from localstack.utils.docker_utils import DOCKER_CLIENT + + if not DOCKER_CLIENT.is_container_running(config.MAIN_CONTAINER_NAME): + raise CLIError( + f'Expected a running LocalStack container named "{config.MAIN_CONTAINER_NAME}", but found none' + ) + os.execlp("docker", "docker", "exec", "-it", config.MAIN_CONTAINER_NAME, "bash") + + +@localstack.group(name="update", short_help="Update LocalStack") +def localstack_update() -> None: + """ + Update different LocalStack components. + """ + pass + + +@localstack_update.command(name="all", short_help="Update all LocalStack components") +@click.pass_context +@publish_invocation +def cmd_update_all(ctx: click.Context) -> None: + """ + Update all LocalStack components. + + This is the same as executing `localstack update localstack-cli` and + `localstack update docker-images`. + Updating the LocalStack CLI is currently only supported if the CLI + is installed and run via Python / PIP. If you used a different installation method, + please follow the instructions on https://docs.localstack.cloud/. + """ + ctx.invoke(localstack_update.get_command(ctx, "localstack-cli")) + ctx.invoke(localstack_update.get_command(ctx, "docker-images")) + + +@localstack_update.command(name="localstack-cli", short_help="Update LocalStack CLI") +@publish_invocation +def cmd_update_localstack_cli() -> None: + """ + Update the LocalStack CLI. + + This command updates the LocalStack CLI. This is currently only supported if the CLI + is installed and run via Python / PIP. If you used a different installation method, + please follow the instructions on https://docs.localstack.cloud/. + """ + if is_frozen_bundle(): + # "update" can only be performed if running from source / in a non-frozen interpreter + raise CLIError( + "The LocalStack CLI can only update itself if installed via PIP. " + "Please follow the instructions on https://docs.localstack.cloud/ to update your CLI." + ) + + import subprocess + from subprocess import CalledProcessError + + console.rule("Updating LocalStack CLI") + with console.status("Updating LocalStack CLI..."): + try: + subprocess.check_output( + [sys.executable, "-m", "pip", "install", "--upgrade", "localstack"] + ) + console.print(":heavy_check_mark: LocalStack CLI updated") + except CalledProcessError: + console.print(":heavy_multiplication_x: LocalStack CLI update failed", style="bold red") + + +@localstack_update.command( + name="docker-images", short_help="Update docker images LocalStack depends on" +) +@publish_invocation +def cmd_update_docker_images() -> None: + """ + Update all Docker images LocalStack depends on. + + This command updates all Docker LocalStack docker images, as well as other Docker images + LocalStack depends on (and which have been used before / are present on the machine). + """ + from localstack.utils.docker_utils import DOCKER_CLIENT + + console.rule("Updating docker images") + + all_images = DOCKER_CLIENT.get_docker_image_names(strip_latest=False) + image_prefixes = [ + "localstack/", + "public.ecr.aws/lambda", + ] + localstack_images = [ + image + for image in all_images + if any( + image.startswith(image_prefix) or image.startswith(f"docker.io/{image_prefix}") + for image_prefix in image_prefixes + ) + and not image.endswith(":<none>") # ignore dangling images + ] + update_images(localstack_images) + + +def update_images(image_list: List[str]) -> None: + from rich.markup import escape + from rich.progress import MofNCompleteColumn, Progress + + from localstack.utils.container_utils.container_client import ContainerException + from localstack.utils.docker_utils import DOCKER_CLIENT + + updated_count = 0 + failed_count = 0 + progress = Progress( + *Progress.get_default_columns(), MofNCompleteColumn(), transient=True, console=console + ) + with progress: + for image in progress.track(image_list, description="Processing image..."): + try: + updated = False + hash_before_pull = DOCKER_CLIENT.inspect_image(image_name=image, pull=False)["Id"] + DOCKER_CLIENT.pull_image(image) + if ( + hash_before_pull + != DOCKER_CLIENT.inspect_image(image_name=image, pull=False)["Id"] + ): + updated = True + updated_count += 1 + console.print( + f":heavy_check_mark: Image {escape(image)} {'updated' if updated else 'up-to-date'}.", + style="bold" if updated else None, + highlight=False, + ) + except ContainerException as e: + console.print( + f":heavy_multiplication_x: Image {escape(image)} pull failed: {e.message}", + style="bold red", + highlight=False, + ) + failed_count += 1 + console.rule() + console.print( + f"Images updated: {updated_count}, Images failed: {failed_count}, total images processed: {len(image_list)}." + ) + + +@localstack.command(name="completion", short_help="CLI shell completion") +@click.pass_context +@click.argument( + "shell", required=True, type=click.Choice(["bash", "zsh", "fish"], case_sensitive=False) +) +@publish_invocation +def localstack_completion(ctx: click.Context, shell: str) -> None: + """ + Print shell completion code for the specified shell (bash, zsh, or fish). + The shell code must be evaluated to enable the interactive shell completion of LocalStack CLI commands. + This is usually done by sourcing it from the .bash_profile. + + \b + Examples: + # Bash + ## Bash completion on Linux depends on the 'bash-completion' package. + ## Write the LocalStack CLI completion code for bash to a file and source it from .bash_profile + localstack completion bash > ~/.localstack/completion.bash.inc + printf " + # LocalStack CLI bash completion + source '$HOME/.localstack/completion.bash.inc' + " >> $HOME/.bash_profile + source $HOME/.bash_profile + \b + # zsh + ## Set the LocalStack completion code for zsh to autoload on startup: + localstack completion zsh > "${fpath[1]}/_localstack" + \b + # fish + ## Set the LocalStack completion code for fish to autoload on startup: + localstack completion fish > ~/.config/fish/completions/localstack.fish + """ + + # lookup the completion, raise an error if the given completion is not found + import click.shell_completion + + comp_cls = click.shell_completion.get_completion_class(shell) + if comp_cls is None: + raise CLIError("Completion for given shell could not be found.") + + # Click's program name is the base path of sys.argv[0] + path = sys.argv[0] + prog_name = os.path.basename(path) + + # create the completion variable according to the docs + # https://click.palletsprojects.com/en/8.1.x/shell-completion/#enabling-completion + complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper() + + # instantiate the completion class and print the completion source + comp = comp_cls(ctx.command, {}, prog_name, complete_var) + click.echo(comp.source()) + + +def print_version() -> None: + console.print(f"- [bold]LocalStack CLI:[/bold] [blue]{VERSION}[/blue]") + + +def print_profile() -> None: + if config.LOADED_PROFILES: + console.print(f"- [bold]Profile:[/bold] [blue]{', '.join(config.LOADED_PROFILES)}[/blue]") + + +def print_app() -> None: + console.print("- [bold]App:[/bold] https://app.localstack.cloud") + + +def print_banner() -> None: + print(BANNER) + + +def is_frozen_bundle() -> bool: + """ + :return: true if we are currently running in a frozen bundle / a pyinstaller binary. + """ + # check if we are in a PyInstaller binary + # https://pyinstaller.org/en/stable/runtime-information.html + return getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS") diff --git a/localstack/cli/lpm.py b/localstack-core/localstack/cli/lpm.py similarity index 96% rename from localstack/cli/lpm.py rename to localstack-core/localstack/cli/lpm.py index f316595e8ed43..ad4a6f5489d5c 100644 --- a/localstack/cli/lpm.py +++ b/localstack-core/localstack/cli/lpm.py @@ -4,10 +4,10 @@ from typing import List, Optional import click -from click import ClickException from rich.console import Console from localstack import config +from localstack.cli.exceptions import CLIError from localstack.packages import InstallTarget, Package from localstack.packages.api import NoSuchPackageException, PackagesPluginManager from localstack.utils.bootstrap import setup_logging @@ -105,10 +105,10 @@ def install( ) except NoSuchPackageException as e: LOG.debug(str(e), exc_info=e) - raise ClickException(str(e)) + raise CLIError(str(e)) except Exception as e: LOG.debug("one or more package installations failed.", exc_info=e) - raise ClickException("one or more package installations failed.") + raise CLIError("one or more package installations failed.") @cli.command(name="list") diff --git a/localstack-core/localstack/cli/main.py b/localstack-core/localstack/cli/main.py new file mode 100644 index 0000000000000..de1f04e38cac5 --- /dev/null +++ b/localstack-core/localstack/cli/main.py @@ -0,0 +1,22 @@ +import os + + +def main(): + # indicate to the environment we are starting from the CLI + os.environ["LOCALSTACK_CLI"] = "1" + + # config profiles are the first thing that need to be loaded (especially before localstack.config!) + from .profiles import set_and_remove_profile_from_sys_argv + + # WARNING: This function modifies sys.argv to remove the profile argument. + set_and_remove_profile_from_sys_argv() + + # initialize CLI plugins + from .localstack import create_with_plugins + + cli = create_with_plugins() + cli() + + +if __name__ == "__main__": + main() diff --git a/localstack/cli/plugin.py b/localstack-core/localstack/cli/plugin.py similarity index 95% rename from localstack/cli/plugin.py rename to localstack-core/localstack/cli/plugin.py index eb9c425f71965..f9af88474a6d5 100644 --- a/localstack/cli/plugin.py +++ b/localstack-core/localstack/cli/plugin.py @@ -3,7 +3,7 @@ import os import click -from plugin import Plugin, PluginManager +from plux import Plugin, PluginManager LOG = logging.getLogger(__name__) diff --git a/localstack-core/localstack/cli/plugins.py b/localstack-core/localstack/cli/plugins.py new file mode 100644 index 0000000000000..c63588161d304 --- /dev/null +++ b/localstack-core/localstack/cli/plugins.py @@ -0,0 +1,134 @@ +import os +import time + +import click +from plux import PluginManager +from plux.build.setuptools import find_plugins +from plux.core.entrypoint import spec_to_entry_point +from rich import print as rprint +from rich.console import Console +from rich.table import Table +from rich.tree import Tree + +from localstack.cli.exceptions import CLIError + +console = Console() + + +@click.group() +def cli(): + """ + The plugins CLI is a set of commands to help troubleshoot LocalStack's plugin mechanism. + """ + pass + + +@cli.command() +@click.option("--where", type=str, default=os.path.abspath(os.curdir)) +@click.option("--exclude", multiple=True, default=()) +@click.option("--include", multiple=True, default=("*",)) +@click.option("--output", type=str, default="tree") +def find(where, exclude, include, output): + """ + Find plugins by scanning the given path for PluginSpecs. + It starts from the current directory if --where is not specified. + This is what a setup.py method would run as a build step, i.e., discovering entry points. + """ + with console.status(f"Scanning path {where}"): + plugins = find_plugins(where, exclude, include) + + if output == "tree": + tree = Tree("Entrypoints") + for namespace, entry_points in plugins.items(): + node = tree.add(f"[bold]{namespace}") + + t = Table() + t.add_column("Name") + t.add_column("Location") + + for ep in entry_points: + key, value = ep.split("=") + t.add_row(key, value) + + node.add(t) + + rprint(tree) + elif output == "dict": + rprint(dict(plugins)) + else: + raise CLIError("unknown output format %s" % output) + + +@cli.command("list") +@click.option("--namespace", type=str, required=True) +def cmd_list(namespace): + """ + List all available plugins using a PluginManager from available endpoints. + """ + manager = PluginManager(namespace) + + t = Table() + t.add_column("Name") + t.add_column("Factory") + + for spec in manager.list_plugin_specs(): + ep = spec_to_entry_point(spec) + t.add_row(spec.name, ep.value) + + rprint(t) + + +@cli.command() +@click.option("--namespace", type=str, required=True) +@click.option("--name", type=str, required=True) +def load(namespace, name): + """ + Attempts to load a plugin using a PluginManager. + """ + manager = PluginManager(namespace) + + with console.status(f"Loading {namespace}:{name}"): + then = time.time() + plugin = manager.load(name) + took = time.time() - then + + rprint( + f":tada: successfully loaded [bold][green]{namespace}[/green][/bold]:[bold][cyan]{name}[/cyan][/bold] ({type(plugin)}" + ) + rprint(f":stopwatch: loading took {took:.4f} s") + + +@cli.command() +@click.option("--namespace", type=str) +def cache(namespace): + """ + Outputs the stevedore entrypoints cache from which plugins are loaded. + """ + from stevedore._cache import _c + + data = _c._get_data_for_path(None) + + tree = Tree("Entrypoints") + for group, entry_points in data.get("groups").items(): + if namespace and group != namespace: + continue + node = tree.add(f"[bold]{group}") + + t = Table() + t.add_column("Name") + t.add_column("Value") + + for key, value, _ in entry_points: + t.add_row(key, value) + + node.add(t) + + if namespace: + rprint(t) + return + + rprint(tree) + + +if __name__ == "__main__": + cli() diff --git a/localstack-core/localstack/cli/profiles.py b/localstack-core/localstack/cli/profiles.py new file mode 100644 index 0000000000000..5af5e089658a4 --- /dev/null +++ b/localstack-core/localstack/cli/profiles.py @@ -0,0 +1,66 @@ +import argparse +import os +import sys +from typing import Optional + +# important: this needs to be free of localstack imports + + +def set_and_remove_profile_from_sys_argv(): + """ + Performs the following steps: + + 1. Use argparse to parse the command line arguments for the --profile flag. + All occurrences are removed from the sys.argv list, and the value from + the last occurrence is used. This allows the user to specify a profile + at any point on the command line. + + 2. If a --profile flag is not found, check for the -p flag. The first + occurrence of the -p flag is used and it is not removed from sys.argv. + The reasoning for this is that at least one of the CLI subcommands has + a -p flag, and we want to keep it in sys.argv for that command to + pick up. An existing bug means that if a -p flag is used with a + subcommand, it could erroneously be used as the profile value as well. + This behaviour is undesired, but we must maintain back-compatibility of + allowing the profile to be specified using -p. + + 3. If a profile is found, the 'CONFIG_PROFILE' os variable is set + accordingly. This is later picked up by ``localstack.config``. + + WARNING: Any --profile options are REMOVED from sys.argv, so that they are + not passed to the localstack CLI. This allows the profile option + to be set at any point on the command line. + """ + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--profile") + namespace, sys.argv = parser.parse_known_args(sys.argv) + profile = namespace.profile + + if not profile: + # if no profile is given, check for the -p argument + profile = parse_p_argument(sys.argv) + + if profile: + os.environ["CONFIG_PROFILE"] = profile.strip() + + +def parse_p_argument(args) -> Optional[str]: + """ + Lightweight arg parsing to find the first occurrence of ``-p <config>``, or ``-p=<config>`` and return the value of + ``<config>`` from the given arguments. + + :param args: list of CLI arguments + :returns: the value of ``-p``. + """ + for i, current_arg in enumerate(args): + if current_arg.startswith("-p="): + # if using the "<arg>=<value>" notation, we remove the "-p=" prefix to get the value + return current_arg[3:] + if current_arg == "-p": + # otherwise use the next arg in the args list as value + try: + return args[i + 1] + except IndexError: + return None + + return None diff --git a/localstack-core/localstack/config.py b/localstack-core/localstack/config.py new file mode 100644 index 0000000000000..5c2af11762fb4 --- /dev/null +++ b/localstack-core/localstack/config.py @@ -0,0 +1,1644 @@ +import ipaddress +import logging +import os +import platform +import re +import socket +import subprocess +import tempfile +import time +import warnings +from collections import defaultdict +from typing import Any, Dict, List, Mapping, Optional, Tuple, TypeVar, Union + +from localstack import constants +from localstack.constants import ( + DEFAULT_BUCKET_MARKER_LOCAL, + DEFAULT_DEVELOP_PORT, + DEFAULT_VOLUME_DIR, + ENV_INTERNAL_TEST_COLLECT_METRIC, + ENV_INTERNAL_TEST_RUN, + FALSE_STRINGS, + LOCALHOST, + LOCALHOST_IP, + LOCALSTACK_ROOT_FOLDER, + LOG_LEVELS, + TRACE_LOG_LEVELS, + TRUE_STRINGS, +) + +T = TypeVar("T", str, int) + +# keep track of start time, for performance debugging +load_start_time = time.time() + + +class Directories: + """ + Holds different directories available to localstack. Some directories are shared between the host and the + localstack container, some live only on the host and others in the container. + + Attributes: + static_libs: container only; binaries and libraries statically packaged with the image + var_libs: shared; binaries and libraries+data computed at runtime: lazy-loaded binaries, ssl cert, ... + cache: shared; ephemeral data that has to persist across localstack runs and reboots + tmp: container only; ephemeral data that has to persist across localstack runs but not reboots + mounted_tmp: shared; same as above, but shared for persistence across different containers, tests, ... + functions: shared; volume to communicate between host<->lambda containers + data: shared; holds localstack state, pods, ... + config: host only; pre-defined configuration values, cached credentials, machine id, ... + init: shared; user-defined provisioning scripts executed in the container when it starts + logs: shared; log files produced by localstack + """ + + static_libs: str + var_libs: str + cache: str + tmp: str + mounted_tmp: str + functions: str + data: str + config: str + init: str + logs: str + + def __init__( + self, + static_libs: str, + var_libs: str, + cache: str, + tmp: str, + mounted_tmp: str, + functions: str, + data: str, + config: str, + init: str, + logs: str, + ) -> None: + super().__init__() + self.static_libs = static_libs + self.var_libs = var_libs + self.cache = cache + self.tmp = tmp + self.mounted_tmp = mounted_tmp + self.functions = functions + self.data = data + self.config = config + self.init = init + self.logs = logs + + @staticmethod + def defaults() -> "Directories": + """Returns Localstack directory paths based on the localstack filesystem hierarchy.""" + return Directories( + static_libs="/usr/lib/localstack", + var_libs=f"{DEFAULT_VOLUME_DIR}/lib", + cache=f"{DEFAULT_VOLUME_DIR}/cache", + tmp=os.path.join(tempfile.gettempdir(), "localstack"), + mounted_tmp=f"{DEFAULT_VOLUME_DIR}/tmp", + functions=f"{DEFAULT_VOLUME_DIR}/tmp", # FIXME: remove - this was misconceived + data=f"{DEFAULT_VOLUME_DIR}/state", + logs=f"{DEFAULT_VOLUME_DIR}/logs", + config="/etc/localstack/conf.d", # for future use + init="/etc/localstack/init", + ) + + @staticmethod + def for_container() -> "Directories": + """ + Returns Localstack directory paths as they are defined within the container. Everything shared and writable + lives in /var/lib/localstack or {tempfile.gettempdir()}/localstack. + + :returns: Directories object + """ + defaults = Directories.defaults() + + return Directories( + static_libs=defaults.static_libs, + var_libs=defaults.var_libs, + cache=defaults.cache, + tmp=defaults.tmp, + mounted_tmp=defaults.mounted_tmp, + functions=defaults.functions, + data=defaults.data if PERSISTENCE else os.path.join(defaults.tmp, "state"), + config=defaults.config, + logs=defaults.logs, + init=defaults.init, + ) + + @staticmethod + def for_host() -> "Directories": + """Return directories used for running localstack in host mode. Note that these are *not* the directories + that are mounted into the container when the user starts localstack.""" + root = os.environ.get("FILESYSTEM_ROOT") or os.path.join( + LOCALSTACK_ROOT_FOLDER, ".filesystem" + ) + root = os.path.abspath(root) + + defaults = Directories.for_container() + + tmp = os.path.join(root, defaults.tmp.lstrip("/")) + data = os.path.join(root, defaults.data.lstrip("/")) + + return Directories( + static_libs=os.path.join(root, defaults.static_libs.lstrip("/")), + var_libs=os.path.join(root, defaults.var_libs.lstrip("/")), + cache=os.path.join(root, defaults.cache.lstrip("/")), + tmp=tmp, + mounted_tmp=os.path.join(root, defaults.mounted_tmp.lstrip("/")), + functions=os.path.join(root, defaults.functions.lstrip("/")), + data=data if PERSISTENCE else os.path.join(tmp, "state"), + config=os.path.join(root, defaults.config.lstrip("/")), + init=os.path.join(root, defaults.init.lstrip("/")), + logs=os.path.join(root, defaults.logs.lstrip("/")), + ) + + @staticmethod + def for_cli() -> "Directories": + """Returns directories used for when running localstack CLI commands from the host system. Unlike + ``for_container``, these needs to be cross-platform. Ideally, this should not be needed at all, + because the localstack runtime and CLI do not share any control paths. There are a handful of + situations where directories or files may be created lazily for CLI commands. Some paths are + intentionally set to None to provoke errors if these paths are used from the CLI - which they + shouldn't. This is a symptom of not having a clear separation between CLI/runtime code, which will + be a future project.""" + import tempfile + + from localstack.utils import files + + tmp_dir = os.path.join(tempfile.gettempdir(), "localstack-cli") + cache_dir = (files.get_user_cache_dir()).absolute() / "localstack-cli" + + return Directories( + static_libs=None, + var_libs=None, + cache=str(cache_dir), # used by analytics metadata + tmp=tmp_dir, + mounted_tmp=tmp_dir, + functions=None, + data=os.path.join(tmp_dir, "state"), # used by localstack-pro config TODO: remove + logs=os.path.join(tmp_dir, "logs"), # used for container logs + config=None, # in the context of the CLI, config.CONFIG_DIR should be used + init=None, + ) + + def mkdirs(self): + for folder in [ + self.static_libs, + self.var_libs, + self.cache, + self.tmp, + self.mounted_tmp, + self.functions, + self.data, + self.config, + self.init, + self.logs, + ]: + if folder and not os.path.exists(folder): + try: + os.makedirs(folder) + except Exception: + # this can happen due to a race condition when starting + # multiple processes in parallel. Should be safe to ignore + pass + + def __str__(self): + return str(self.__dict__) + + +def eval_log_type(env_var_name: str) -> Union[str, bool]: + """Get the log type from environment variable""" + ls_log = os.environ.get(env_var_name, "").lower().strip() + return ls_log if ls_log in LOG_LEVELS else False + + +def parse_boolean_env(env_var_name: str) -> Optional[bool]: + """Parse the value of the given env variable and return True/False, or None if it is not a boolean value.""" + value = os.environ.get(env_var_name, "").lower().strip() + if value in TRUE_STRINGS: + return True + if value in FALSE_STRINGS: + return False + return None + + +def is_env_true(env_var_name: str) -> bool: + """Whether the given environment variable has a truthy value.""" + return os.environ.get(env_var_name, "").lower().strip() in TRUE_STRINGS + + +def is_env_not_false(env_var_name: str) -> bool: + """Whether the given environment variable is empty or has a truthy value.""" + return os.environ.get(env_var_name, "").lower().strip() not in FALSE_STRINGS + + +def load_environment(profiles: str = None, env=os.environ) -> List[str]: + """Loads the environment variables from ~/.localstack/{profile}.env, for each profile listed in the profiles. + :param env: environment to load profile to. Defaults to `os.environ` + :param profiles: a comma separated list of profiles to load (defaults to "default") + :returns str: the list of the actually loaded profiles (might be the fallback) + """ + if not profiles: + profiles = "default" + + profiles = profiles.split(",") + environment = {} + import dotenv + + for profile in profiles: + profile = profile.strip() + path = os.path.join(CONFIG_DIR, f"{profile}.env") + if not os.path.exists(path): + continue + environment.update(dotenv.dotenv_values(path)) + + for k, v in environment.items(): + # we do not want to override the environment + if k not in env and v is not None: + env[k] = v + + return profiles + + +def is_persistence_enabled() -> bool: + return PERSISTENCE and dirs.data + + +def is_linux() -> bool: + return platform.system() == "Linux" + + +def is_macos() -> bool: + return platform.system() == "Darwin" + + +def is_windows() -> bool: + return platform.system().lower() == "windows" + + +def ping(host): + """Returns True if the host responds to a ping request""" + is_in_windows = is_windows() + ping_opts = "-n 1 -w 2000" if is_in_windows else "-c 1 -W 2" + args = "ping %s %s" % (ping_opts, host) + return ( + subprocess.call( + args, shell=not is_in_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + == 0 + ) + + +def in_docker(): + """ + Returns True if running in a docker container, else False + Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups + """ + if OVERRIDE_IN_DOCKER is not None: + return OVERRIDE_IN_DOCKER + + # check some marker files that we create in our Dockerfiles + for path in [ + "/usr/lib/localstack/.community-version", + "/usr/lib/localstack/.pro-version", + "/tmp/localstack/.marker", + ]: + if os.path.isfile(path): + return True + + # details: https://github.com/localstack/localstack/pull/4352 + if os.path.exists("/.dockerenv"): + return True + if os.path.exists("/run/.containerenv"): + return True + + if not os.path.exists("/proc/1/cgroup"): + return False + try: + if any( + [ + os.path.exists("/sys/fs/cgroup/memory/docker/"), + any( + "docker-" in file_names + for file_names in os.listdir("/sys/fs/cgroup/memory/system.slice") + ), + os.path.exists("/sys/fs/cgroup/docker/"), + any( + "docker-" in file_names + for file_names in os.listdir("/sys/fs/cgroup/system.slice/") + ), + ] + ): + return False + except Exception: + pass + with open("/proc/1/cgroup", "rt") as ifh: + content = ifh.read() + if "docker" in content or "buildkit" in content: + return True + os_hostname = socket.gethostname() + if os_hostname and os_hostname in content: + return True + + # containerd does not set any specific file or config, but it does use + # io.containerd.snapshotter.v1.overlayfs as the overlay filesystem for `/`. + try: + with open("/proc/mounts", "rt") as infile: + for line in infile: + line = line.strip() + + if not line: + continue + + # skip comments + if line[0] == "#": + continue + + # format (man 5 fstab) + # <spec> <mount point> <type> <options> <rest>... + parts = line.split() + if len(parts) < 4: + # badly formatted line + continue + + mount_point = parts[1] + options = parts[3] + + # only consider the root filesystem + if mount_point != "/": + continue + + if "io.containerd" in options: + return True + + except FileNotFoundError: + pass + + return False + + +# whether the `in_docker` check should always return True or False +OVERRIDE_IN_DOCKER = parse_boolean_env("OVERRIDE_IN_DOCKER") + +is_in_docker = in_docker() +is_in_linux = is_linux() +is_in_macos = is_macos() +default_ip = "0.0.0.0" if is_in_docker else "127.0.0.1" + +# CLI specific: the configuration profile to load +CONFIG_PROFILE = os.environ.get("CONFIG_PROFILE", "").strip() + +# CLI specific: host configuration directory +CONFIG_DIR = os.environ.get("CONFIG_DIR", os.path.expanduser("~/.localstack")) + +# keep this on top to populate the environment +try: + # CLI specific: the actually loaded configuration profile + LOADED_PROFILES = load_environment(CONFIG_PROFILE) +except ImportError: + # dotenv may not be available in lambdas or other environments where config is loaded + LOADED_PROFILES = None + +# loaded components name - default: all components are loaded and the first one is chosen +RUNTIME_COMPONENTS = os.environ.get("RUNTIME_COMPONENTS", "").strip() + +# directory for persisting data (TODO: deprecated, simply use PERSISTENCE=1) +DATA_DIR = os.environ.get("DATA_DIR", "").strip() + +# whether localstack should persist service state across localstack runs +PERSISTENCE = is_env_true("PERSISTENCE") + +# the strategy for loading snapshots from disk when `PERSISTENCE=1` is used (on_startup, on_request, manual) +SNAPSHOT_LOAD_STRATEGY = os.environ.get("SNAPSHOT_LOAD_STRATEGY", "").upper() + +# the strategy saving snapshots to disk when `PERSISTENCE=1` is used (on_shutdown, on_request, scheduled, manual) +SNAPSHOT_SAVE_STRATEGY = os.environ.get("SNAPSHOT_SAVE_STRATEGY", "").upper() + +# the flush interval (in seconds) for persistence when the snapshot save strategy is set to "scheduled" +SNAPSHOT_FLUSH_INTERVAL = int(os.environ.get("SNAPSHOT_FLUSH_INTERVAL") or 15) + +# whether to clear config.dirs.tmp on startup and shutdown +CLEAR_TMP_FOLDER = is_env_not_false("CLEAR_TMP_FOLDER") + +# folder for temporary files and data +TMP_FOLDER = os.path.join(tempfile.gettempdir(), "localstack") + +# this is exclusively for the CLI to configure the container mount into /var/lib/localstack +VOLUME_DIR = os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip() or TMP_FOLDER + +# fix for Mac OS, to be able to mount /var/folders in Docker +if TMP_FOLDER.startswith("/var/folders/") and os.path.exists("/private%s" % TMP_FOLDER): + TMP_FOLDER = "/private%s" % TMP_FOLDER + +# whether to enable verbose debug logging ("LOG" is used when using the CLI with LOCALSTACK_LOG instead of LS_LOG) +LS_LOG = eval_log_type("LS_LOG") or eval_log_type("LOG") +DEBUG = is_env_true("DEBUG") or LS_LOG in TRACE_LOG_LEVELS + +# PUBLIC PREVIEW: 0 (default), 1 (preview) +# When enabled it triggers specialised workflows for the debugging. +LAMBDA_DEBUG_MODE = is_env_true("LAMBDA_DEBUG_MODE") + +# path to the lambda debug mode configuration file. +LAMBDA_DEBUG_MODE_CONFIG_PATH = os.environ.get("LAMBDA_DEBUG_MODE_CONFIG_PATH") + +# EXPERIMENTAL: allow setting custom log levels for individual loggers +LOG_LEVEL_OVERRIDES = os.environ.get("LOG_LEVEL_OVERRIDES", "") + +# whether to enable debugpy +DEVELOP = is_env_true("DEVELOP") + +# PORT FOR DEBUGGER +DEVELOP_PORT = int(os.environ.get("DEVELOP_PORT", "").strip() or DEFAULT_DEVELOP_PORT) + +# whether to make debugpy wait for a debbuger client +WAIT_FOR_DEBUGGER = is_env_true("WAIT_FOR_DEBUGGER") + +# whether to assume http or https for `get_protocol` +USE_SSL = is_env_true("USE_SSL") + +# Whether to report internal failures as 500 or 501 errors. +FAIL_FAST = is_env_true("FAIL_FAST") + +# whether to run in TF compatibility mode for TF integration tests +# (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.) +TF_COMPAT_MODE = is_env_true("TF_COMPAT_MODE") + +# default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility) +DEFAULT_ENCODING = "utf-8" + +# path to local Docker UNIX domain socket +DOCKER_SOCK = os.environ.get("DOCKER_SOCK", "").strip() or "/var/run/docker.sock" + +# additional flags to pass to "docker run" when starting the stack in Docker +DOCKER_FLAGS = os.environ.get("DOCKER_FLAGS", "").strip() + +# command used to run Docker containers (e.g., set to "sudo docker" to run as sudo) +DOCKER_CMD = os.environ.get("DOCKER_CMD", "").strip() or "docker" + +# use the command line docker client instead of the new sdk version, might get removed in the future +LEGACY_DOCKER_CLIENT = is_env_true("LEGACY_DOCKER_CLIENT") + +# Docker image to use when starting up containers for port checks +PORTS_CHECK_DOCKER_IMAGE = os.environ.get("PORTS_CHECK_DOCKER_IMAGE", "").strip() + + +def is_trace_logging_enabled(): + if LS_LOG: + log_level = str(LS_LOG).upper() + return log_level.lower() in TRACE_LOG_LEVELS + return False + + +# set log levels immediately, but will be overwritten later by setup_logging +if DEBUG: + logging.getLogger("").setLevel(logging.DEBUG) + logging.getLogger("localstack").setLevel(logging.DEBUG) + +LOG = logging.getLogger(__name__) +if is_trace_logging_enabled(): + load_end_time = time.time() + LOG.debug( + "Initializing the configuration took %s ms", int((load_end_time - load_start_time) * 1000) + ) + + +def is_ipv6_address(host: str) -> bool: + """ + Returns True if the given host is an IPv6 address. + """ + + if not host: + return False + + try: + ipaddress.IPv6Address(host) + return True + except ipaddress.AddressValueError: + return False + + +class HostAndPort: + """ + Definition of an address for a server to listen to. + + Includes a `parse` method to convert from `str`, allowing for default fallbacks, as well as + some helper methods to help tests - particularly testing for equality and a hash function + so that `HostAndPort` instances can be used as keys to dictionaries. + """ + + host: str + port: int + + def __init__(self, host: str, port: int): + self.host = host + self.port = port + + @classmethod + def parse( + cls, + input: str, + default_host: str, + default_port: int, + ) -> "HostAndPort": + """ + Parse a `HostAndPort` from strings like: + - 0.0.0.0:4566 -> host=0.0.0.0, port=4566 + - 0.0.0.0 -> host=0.0.0.0, port=`default_port` + - :4566 -> host=`default_host`, port=4566 + - [::]:4566 -> host=[::], port=4566 + - [::1] -> host=[::1], port=`default_port` + """ + host, port = default_host, default_port + + # recognize IPv6 addresses (+ port) + if input.startswith("["): + ipv6_pattern = re.compile(r"^\[(?P<host>[^]]+)\](:(?P<port>\d+))?$") + match = ipv6_pattern.match(input) + + if match: + host = match.group("host") + if not is_ipv6_address(host): + raise ValueError( + f"input looks like an IPv6 address (is enclosed in square brackets), but is not valid: {host}" + ) + port_s = match.group("port") + if port_s: + port = cls._validate_port(port_s) + else: + raise ValueError( + f'input looks like an IPv6 address, but is invalid. Should be formatted "[ip]:port": {input}' + ) + + # recognize IPv4 address + port + elif ":" in input: + hostname, port_s = input.split(":", 1) + if hostname.strip(): + host = hostname.strip() + port = cls._validate_port(port_s) + else: + if input.strip(): + host = input.strip() + + # validation + if port < 0 or port >= 2**16: + raise ValueError("port out of range") + + return cls(host=host, port=port) + + @classmethod + def _validate_port(cls, port_s: str) -> int: + try: + port = int(port_s) + except ValueError as e: + raise ValueError(f"specified port {port_s} not a number") from e + + return port + + def _get_unprivileged_port_range_start(self) -> int: + try: + with open( + "/proc/sys/net/ipv4/ip_unprivileged_port_start", "rt" + ) as unprivileged_port_start: + port = unprivileged_port_start.read() + return int(port.strip()) + except Exception: + return 1024 + + def is_unprivileged(self) -> bool: + return self.port >= self._get_unprivileged_port_range_start() + + def host_and_port(self) -> str: + formatted_host = f"[{self.host}]" if is_ipv6_address(self.host) else self.host + return f"{formatted_host}:{self.port}" if self.port is not None else formatted_host + + def __hash__(self) -> int: + return hash((self.host, self.port)) + + # easier tests + def __eq__(self, other: "str | HostAndPort") -> bool: + if isinstance(other, self.__class__): + return self.host == other.host and self.port == other.port + elif isinstance(other, str): + return str(self) == other + else: + raise TypeError(f"cannot compare {self.__class__} to {other.__class__}") + + def __str__(self) -> str: + return self.host_and_port() + + def __repr__(self) -> str: + return f"HostAndPort(host={self.host}, port={self.port})" + + +class UniqueHostAndPortList(List[HostAndPort]): + """ + Container type that ensures that ports added to the list are unique based + on these rules: + - :: "trumps" any other binding on the same port, including both IPv6 and IPv4 + addresses. All other bindings for this port are removed, since :: already + covers all interfaces. For example, adding 127.0.0.1:4566, [::1]:4566, + and [::]:4566 would result in only [::]:4566 being preserved. + - 0.0.0.0 "trumps" any other binding on IPv4 addresses only. IPv6 addresses + are not removed. + - Identical identical hosts and ports are de-duped + """ + + def __init__(self, iterable: Union[List[HostAndPort], None] = None): + super().__init__(iterable or []) + self._ensure_unique() + + def _ensure_unique(self): + """ + Ensure that all bindings on the same port are de-duped. + """ + if len(self) <= 1: + return + + unique: List[HostAndPort] = list() + + # Build a dictionary of hosts by port + hosts_by_port: Dict[int, List[str]] = defaultdict(list) + for item in self: + hosts_by_port[item.port].append(item.host) + + # For any given port, dedupe the hosts + for port, hosts in hosts_by_port.items(): + deduped_hosts = set(hosts) + + # IPv6 all interfaces: this is the most general binding. + # Any others should be removed. + if "::" in deduped_hosts: + unique.append(HostAndPort(host="::", port=port)) + continue + # IPv4 all interfaces: this is the next most general binding. + # Any others should be removed. + if "0.0.0.0" in deduped_hosts: + unique.append(HostAndPort(host="0.0.0.0", port=port)) + continue + + # All other bindings just need to be unique + unique.extend([HostAndPort(host=host, port=port) for host in deduped_hosts]) + + self.clear() + self.extend(unique) + + def append(self, value: HostAndPort): + super().append(value) + self._ensure_unique() + + +def populate_edge_configuration( + environment: Mapping[str, str], +) -> Tuple[HostAndPort, UniqueHostAndPortList]: + """Populate the LocalStack edge configuration from environment variables.""" + localstack_host_raw = environment.get("LOCALSTACK_HOST") + gateway_listen_raw = environment.get("GATEWAY_LISTEN") + + # parse gateway listen from multiple components + if gateway_listen_raw is not None: + gateway_listen = [] + for address in gateway_listen_raw.split(","): + gateway_listen.append( + HostAndPort.parse( + address.strip(), + default_host=default_ip, + default_port=constants.DEFAULT_PORT_EDGE, + ) + ) + else: + # use default if gateway listen is not defined + gateway_listen = [HostAndPort(host=default_ip, port=constants.DEFAULT_PORT_EDGE)] + + # the actual value of the LOCALSTACK_HOST port now depends on what gateway listen actually listens to. + if localstack_host_raw is None: + localstack_host = HostAndPort( + host=constants.LOCALHOST_HOSTNAME, port=gateway_listen[0].port + ) + else: + localstack_host = HostAndPort.parse( + localstack_host_raw, + default_host=constants.LOCALHOST_HOSTNAME, + default_port=gateway_listen[0].port, + ) + + assert gateway_listen is not None + assert localstack_host is not None + + return ( + localstack_host, + UniqueHostAndPortList(gateway_listen), + ) + + +# How to access LocalStack +( + # -- Cosmetic + LOCALSTACK_HOST, + # -- Edge configuration + # Main configuration of the listen address of the hypercorn proxy. Of the form + # <ip_address>:<port>(,<ip_address>:port>)* + GATEWAY_LISTEN, +) = populate_edge_configuration(os.environ) + +GATEWAY_WORKER_COUNT = int(os.environ.get("GATEWAY_WORKER_COUNT") or 1000) + +# the gateway server that should be used (supported: hypercorn, twisted dev: werkzeug) +GATEWAY_SERVER = os.environ.get("GATEWAY_SERVER", "").strip() or "twisted" + +# IP of the docker bridge used to enable access between containers +DOCKER_BRIDGE_IP = os.environ.get("DOCKER_BRIDGE_IP", "").strip() + +# Default timeout for Docker API calls sent by the Docker SDK client, in seconds. +DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS = int(os.environ.get("DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS") or 60) + +# Default number of retries to connect to the Docker API by the Docker SDK client. +DOCKER_SDK_DEFAULT_RETRIES = int(os.environ.get("DOCKER_SDK_DEFAULT_RETRIES") or 0) + +# whether to enable API-based updates of configuration variables at runtime +ENABLE_CONFIG_UPDATES = is_env_true("ENABLE_CONFIG_UPDATES") + +# CORS settings +DISABLE_CORS_HEADERS = is_env_true("DISABLE_CORS_HEADERS") +DISABLE_CORS_CHECKS = is_env_true("DISABLE_CORS_CHECKS") +DISABLE_CUSTOM_CORS_S3 = is_env_true("DISABLE_CUSTOM_CORS_S3") +DISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true("DISABLE_CUSTOM_CORS_APIGATEWAY") +EXTRA_CORS_ALLOWED_HEADERS = os.environ.get("EXTRA_CORS_ALLOWED_HEADERS", "").strip() +EXTRA_CORS_EXPOSE_HEADERS = os.environ.get("EXTRA_CORS_EXPOSE_HEADERS", "").strip() +EXTRA_CORS_ALLOWED_ORIGINS = os.environ.get("EXTRA_CORS_ALLOWED_ORIGINS", "").strip() +DISABLE_PREFLIGHT_PROCESSING = is_env_true("DISABLE_PREFLIGHT_PROCESSING") + +# whether to disable publishing events to the API +DISABLE_EVENTS = is_env_true("DISABLE_EVENTS") +DEBUG_ANALYTICS = is_env_true("DEBUG_ANALYTICS") + +# whether to log fine-grained debugging information for the handler chain +DEBUG_HANDLER_CHAIN = is_env_true("DEBUG_HANDLER_CHAIN") + +# whether to eagerly start services +EAGER_SERVICE_LOADING = is_env_true("EAGER_SERVICE_LOADING") + +# whether to selectively load services in SERVICES +STRICT_SERVICE_LOADING = is_env_not_false("STRICT_SERVICE_LOADING") + +# Whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions) +SKIP_INFRA_DOWNLOADS = os.environ.get("SKIP_INFRA_DOWNLOADS", "").strip() + +# Whether to skip downloading our signed SSL cert. +SKIP_SSL_CERT_DOWNLOAD = is_env_true("SKIP_SSL_CERT_DOWNLOAD") + +# Absolute path to a custom certificate (pem file) +CUSTOM_SSL_CERT_PATH = os.environ.get("CUSTOM_SSL_CERT_PATH", "").strip() + +# Whether delete the cached signed SSL certificate at startup +REMOVE_SSL_CERT = is_env_true("REMOVE_SSL_CERT") + +# Allow non-standard AWS regions +ALLOW_NONSTANDARD_REGIONS = is_env_true("ALLOW_NONSTANDARD_REGIONS") +if ALLOW_NONSTANDARD_REGIONS: + os.environ["MOTO_ALLOW_NONEXISTENT_REGION"] = "true" + +# name of the main Docker container +MAIN_CONTAINER_NAME = os.environ.get("MAIN_CONTAINER_NAME", "").strip() or "localstack-main" + +# the latest commit id of the repository when the docker image was created +LOCALSTACK_BUILD_GIT_HASH = os.environ.get("LOCALSTACK_BUILD_GIT_HASH", "").strip() or None + +# the date on which the docker image was created +LOCALSTACK_BUILD_DATE = os.environ.get("LOCALSTACK_BUILD_DATE", "").strip() or None + +# Equivalent to HTTP_PROXY, but only applicable for external connections +OUTBOUND_HTTP_PROXY = os.environ.get("OUTBOUND_HTTP_PROXY", "") + +# Equivalent to HTTPS_PROXY, but only applicable for external connections +OUTBOUND_HTTPS_PROXY = os.environ.get("OUTBOUND_HTTPS_PROXY", "") + +# Feature flag to enable validation of internal endpoint responses in the handler chain. For test use only. +OPENAPI_VALIDATE_RESPONSE = is_env_true("OPENAPI_VALIDATE_RESPONSE") +# Flag to enable the validation of the requests made to the LocalStack internal endpoints. Active by default. +OPENAPI_VALIDATE_REQUEST = is_env_true("OPENAPI_VALIDATE_REQUEST") + +# whether to skip waiting for the infrastructure to shut down, or exit immediately +FORCE_SHUTDOWN = is_env_not_false("FORCE_SHUTDOWN") + +# set variables no_proxy, i.e., run internal service calls directly +no_proxy = ",".join([constants.LOCALHOST_HOSTNAME, LOCALHOST, LOCALHOST_IP, "[::1]"]) +if os.environ.get("no_proxy"): + os.environ["no_proxy"] += "," + no_proxy +elif os.environ.get("NO_PROXY"): + os.environ["NO_PROXY"] += "," + no_proxy +else: + os.environ["no_proxy"] = no_proxy + +# additional CLI commands, can be set by plugins +CLI_COMMANDS = {} + +# determine IP of Docker bridge +if not DOCKER_BRIDGE_IP: + DOCKER_BRIDGE_IP = "172.17.0.1" + if is_in_docker: + candidates = (DOCKER_BRIDGE_IP, "172.18.0.1") + for ip in candidates: + # TODO: remove from here - should not perform I/O operations in top-level config.py + if ping(ip): + DOCKER_BRIDGE_IP = ip + break + +# AWS account used to store internal resources such as Lambda archives or internal SQS queues. +# It should not be modified by the user, or visible to him, except as through a presigned url with the +# get-function call. +INTERNAL_RESOURCE_ACCOUNT = os.environ.get("INTERNAL_RESOURCE_ACCOUNT") or "949334387222" + +# TODO: remove with 4.1.0 +# Determine which implementation to use for the event rule / event filtering engine used by multiple services: +# EventBridge, EventBridge Pipes, Lambda Event Source Mapping +# Options: python (default) | java (deprecated since 4.0.3) +EVENT_RULE_ENGINE = os.environ.get("EVENT_RULE_ENGINE", "python").strip() + +# ----- +# SERVICE-SPECIFIC CONFIGS BELOW +# ----- + +# port ranges for external service instances (f.e. elasticsearch clusters, opensearch clusters,...) +EXTERNAL_SERVICE_PORTS_START = int( + os.environ.get("EXTERNAL_SERVICE_PORTS_START") + or os.environ.get("SERVICE_INSTANCES_PORTS_START") + or 4510 +) +EXTERNAL_SERVICE_PORTS_END = int( + os.environ.get("EXTERNAL_SERVICE_PORTS_END") + or os.environ.get("SERVICE_INSTANCES_PORTS_END") + or (EXTERNAL_SERVICE_PORTS_START + 50) +) + +# The default container runtime to use +CONTAINER_RUNTIME = os.environ.get("CONTAINER_RUNTIME", "").strip() or "docker" + +# PUBLIC v1: -Xmx512M (example) Currently not supported in new provider but possible via custom entrypoint. +# Allow passing custom JVM options to Java Lambdas executed in Docker. +LAMBDA_JAVA_OPTS = os.environ.get("LAMBDA_JAVA_OPTS", "").strip() + +# limit in which to kinesis-mock will start throwing exceptions +KINESIS_SHARD_LIMIT = os.environ.get("KINESIS_SHARD_LIMIT", "").strip() or "100" +KINESIS_PERSISTENCE = is_env_not_false("KINESIS_PERSISTENCE") + +# limit in which to kinesis-mock will start throwing exceptions +KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT = ( + os.environ.get("KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT", "").strip() or "10" +) + +# delay in kinesis-mock response when making changes to streams +KINESIS_LATENCY = os.environ.get("KINESIS_LATENCY", "").strip() or "500" + +# Delay between data persistence (in seconds) +KINESIS_MOCK_PERSIST_INTERVAL = os.environ.get("KINESIS_MOCK_PERSIST_INTERVAL", "").strip() or "5s" + +# Kinesis mock log level override when inconsistent with LS_LOG (e.g., when LS_LOG=debug) +KINESIS_MOCK_LOG_LEVEL = os.environ.get("KINESIS_MOCK_LOG_LEVEL", "").strip() + +# randomly inject faults to Kinesis +KINESIS_ERROR_PROBABILITY = float(os.environ.get("KINESIS_ERROR_PROBABILITY", "").strip() or 0.0) + +# SEMI-PUBLIC: "node" (default); not actively communicated +# Select whether to use the node or scala build when running Kinesis Mock +KINESIS_MOCK_PROVIDER_ENGINE = os.environ.get("KINESIS_MOCK_PROVIDER_ENGINE", "").strip() or "node" + +# set the maximum Java heap size corresponding to the '-Xmx<size>' flag +KINESIS_MOCK_MAXIMUM_HEAP_SIZE = ( + os.environ.get("KINESIS_MOCK_MAXIMUM_HEAP_SIZE", "").strip() or "512m" +) + +# set the initial Java heap size corresponding to the '-Xms<size>' flag +KINESIS_MOCK_INITIAL_HEAP_SIZE = ( + os.environ.get("KINESIS_MOCK_INITIAL_HEAP_SIZE", "").strip() or "256m" +) + +# randomly inject faults to DynamoDB +DYNAMODB_ERROR_PROBABILITY = float(os.environ.get("DYNAMODB_ERROR_PROBABILITY", "").strip() or 0.0) +DYNAMODB_READ_ERROR_PROBABILITY = float( + os.environ.get("DYNAMODB_READ_ERROR_PROBABILITY", "").strip() or 0.0 +) +DYNAMODB_WRITE_ERROR_PROBABILITY = float( + os.environ.get("DYNAMODB_WRITE_ERROR_PROBABILITY", "").strip() or 0.0 +) + +# JAVA EE heap size for dynamodb +DYNAMODB_HEAP_SIZE = os.environ.get("DYNAMODB_HEAP_SIZE", "").strip() or "256m" + +# single DB instance across multiple credentials are regions +DYNAMODB_SHARE_DB = int(os.environ.get("DYNAMODB_SHARE_DB") or 0) + +# the port on which to expose dynamodblocal +DYNAMODB_LOCAL_PORT = int(os.environ.get("DYNAMODB_LOCAL_PORT") or 0) + +# Enables the automatic removal of stale KV pais based on TTL +DYNAMODB_REMOVE_EXPIRED_ITEMS = is_env_true("DYNAMODB_REMOVE_EXPIRED_ITEMS") + +# Used to toggle PurgeInProgress exceptions when calling purge within 60 seconds +SQS_DELAY_PURGE_RETRY = is_env_true("SQS_DELAY_PURGE_RETRY") + +# Used to toggle QueueDeletedRecently errors when re-creating a queue within 60 seconds of deleting it +SQS_DELAY_RECENTLY_DELETED = is_env_true("SQS_DELAY_RECENTLY_DELETED") + +# Used to toggle MessageRetentionPeriod functionality in SQS queues +SQS_ENABLE_MESSAGE_RETENTION_PERIOD = is_env_true("SQS_ENABLE_MESSAGE_RETENTION_PERIOD") + +# Strategy used when creating SQS queue urls. can be "off", "standard" (default), "domain", "path", or "dynamic" +SQS_ENDPOINT_STRATEGY = os.environ.get("SQS_ENDPOINT_STRATEGY", "") or "standard" + +# Disable the check for MaxNumberOfMessage in SQS ReceiveMessage +SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT = is_env_true("SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT") + +# Disable cloudwatch metrics for SQS +SQS_DISABLE_CLOUDWATCH_METRICS = is_env_true("SQS_DISABLE_CLOUDWATCH_METRICS") + +# Interval for reporting "approximate" metrics to cloudwatch, default is 60 seconds +SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL = int( + os.environ.get("SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL") or 60 +) + +# PUBLIC: Endpoint host under which LocalStack APIs are accessible from Lambda Docker containers. +HOSTNAME_FROM_LAMBDA = os.environ.get("HOSTNAME_FROM_LAMBDA", "").strip() + +# PUBLIC: hot-reload (default v2), __local__ (default v1) +# Magic S3 bucket name for Hot Reloading. The S3Key points to the source code on the local file system. +BUCKET_MARKER_LOCAL = ( + os.environ.get("BUCKET_MARKER_LOCAL", "").strip() or DEFAULT_BUCKET_MARKER_LOCAL +) + +# PUBLIC: Opt-out to inject the environment variable AWS_ENDPOINT_URL for automatic configuration of AWS SDKs: +# https://docs.aws.amazon.com/sdkref/latest/guide/feature-ss-endpoints.html +LAMBDA_DISABLE_AWS_ENDPOINT_URL = is_env_true("LAMBDA_DISABLE_AWS_ENDPOINT_URL") + +# PUBLIC: bridge (Docker default) +# Docker network driver for the Lambda and ECS containers. https://docs.docker.com/network/ +LAMBDA_DOCKER_NETWORK = os.environ.get("LAMBDA_DOCKER_NETWORK", "").strip() + +# PUBLIC v1: LocalStack DNS (default) +# Custom DNS server for the container running your lambda function. +LAMBDA_DOCKER_DNS = os.environ.get("LAMBDA_DOCKER_DNS", "").strip() + +# PUBLIC: -e KEY=VALUE -v host:container +# Additional flags passed to Docker run|create commands. +LAMBDA_DOCKER_FLAGS = os.environ.get("LAMBDA_DOCKER_FLAGS", "").strip() + +# PUBLIC: 0 (default) +# Enable this flag to run cross-platform compatible lambda functions natively (i.e., Docker selects architecture) and +# ignore the AWS architectures (i.e., x86_64, arm64) configured for the lambda function. +LAMBDA_IGNORE_ARCHITECTURE = is_env_true("LAMBDA_IGNORE_ARCHITECTURE") + +# TODO: test and add to docs +# EXPERIMENTAL: 0 (default) +# prebuild images before execution? Increased cold start time on the tradeoff of increased time until lambda is ACTIVE +LAMBDA_PREBUILD_IMAGES = is_env_true("LAMBDA_PREBUILD_IMAGES") + +# PUBLIC: docker (default), kubernetes (pro) +# Where Lambdas will be executed. +LAMBDA_RUNTIME_EXECUTOR = os.environ.get("LAMBDA_RUNTIME_EXECUTOR", CONTAINER_RUNTIME).strip() + +# PUBLIC: 20 (default) +# How many seconds Lambda will wait for the runtime environment to start up. +LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT = int(os.environ.get("LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT") or 20) + +# PUBLIC: base images for Lambda (default) https://docs.aws.amazon.com/lambda/latest/dg/runtimes-images.html +# localstack/services/lambda_/invocation/lambda_models.py:IMAGE_MAPPING +# Customize the Docker image of Lambda runtimes, either by: +# a) pattern with <runtime> placeholder, e.g. custom-repo/lambda-<runtime>:2022 +# b) json dict mapping the <runtime> to an image, e.g. {"python3.9": "custom-repo/lambda-py:thon3.9"} +LAMBDA_RUNTIME_IMAGE_MAPPING = os.environ.get("LAMBDA_RUNTIME_IMAGE_MAPPING", "").strip() + +# PUBLIC: 0 (default) +# Whether to disable usage of deprecated runtimes +LAMBDA_RUNTIME_VALIDATION = int(os.environ.get("LAMBDA_RUNTIME_VALIDATION") or 0) + +# PUBLIC: 1 (default) +# Whether to remove any Lambda Docker containers. +LAMBDA_REMOVE_CONTAINERS = ( + os.environ.get("LAMBDA_REMOVE_CONTAINERS", "").lower().strip() not in FALSE_STRINGS +) + +# PUBLIC: 600000 (default 10min) +# Time in milliseconds until lambda shuts down the execution environment after the last invocation has been processed. +# Set to 0 to immediately shut down the execution environment after an invocation. +LAMBDA_KEEPALIVE_MS = int(os.environ.get("LAMBDA_KEEPALIVE_MS", 600_000)) + +# PUBLIC: 1000 (default) +# The maximum number of events that functions can process simultaneously in the current Region. +# See AWS service quotas: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html +# Concurrency limits. Like on AWS these apply per account and region. +LAMBDA_LIMITS_CONCURRENT_EXECUTIONS = int( + os.environ.get("LAMBDA_LIMITS_CONCURRENT_EXECUTIONS", 1_000) +) +# SEMI-PUBLIC: not actively communicated +# per account/region: there must be at least <LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY> unreserved concurrency. +LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY = int( + os.environ.get("LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY", 100) +) +# SEMI-PUBLIC: not actively communicated +LAMBDA_LIMITS_TOTAL_CODE_SIZE = int(os.environ.get("LAMBDA_LIMITS_TOTAL_CODE_SIZE", 80_530_636_800)) +# PUBLIC: documented after AWS changed validation around 2023-11 +LAMBDA_LIMITS_CODE_SIZE_ZIPPED = int(os.environ.get("LAMBDA_LIMITS_CODE_SIZE_ZIPPED", 52_428_800)) +# SEMI-PUBLIC: not actively communicated +LAMBDA_LIMITS_CODE_SIZE_UNZIPPED = int( + os.environ.get("LAMBDA_LIMITS_CODE_SIZE_UNZIPPED", 262_144_000) +) +# PUBLIC: documented upon customer request +LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE = int( + os.environ.get("LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE", 70_167_211) +) +# SEMI-PUBLIC: not actively communicated +LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES = int( + os.environ.get("LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES", 4 * 1024) +) +# SEMI-PUBLIC: not actively communicated +LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES = int( + os.environ.get( + "LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES", 6 * 1024 * 1024 + 100 + ) # the 100 comes from the init defaults +) + +# DEV: 0 (default unless in host mode on macOS) For LS developers only. Only applies to Docker mode. +# Whether to explicitly expose a free TCP port in lambda containers when invoking functions in host mode for +# systems that cannot reach the container via its IPv4. For example, macOS cannot reach Docker containers: +# https://docs.docker.com/desktop/networking/#i-cannot-ping-my-containers +LAMBDA_DEV_PORT_EXPOSE = ( + # Enable this dev flag by default on macOS in host mode (i.e., non-Docker environment) + is_env_not_false("LAMBDA_DEV_PORT_EXPOSE") + if not is_in_docker and is_in_macos + else is_env_true("LAMBDA_DEV_PORT_EXPOSE") +) + +# DEV: only applies to new lambda provider. All LAMBDA_INIT_* configuration are for LS developers only. +# There are NO stability guarantees, and they may break at any time. + +# DEV: Release version of https://github.com/localstack/lambda-runtime-init overriding the current default +LAMBDA_INIT_RELEASE_VERSION = os.environ.get("LAMBDA_INIT_RELEASE_VERSION") +# DEV: 0 (default) Enable for mounting of RIE init binary and delve debugger +LAMBDA_INIT_DEBUG = is_env_true("LAMBDA_INIT_DEBUG") +# DEV: path to RIE init binary (e.g., var/rapid/init) +LAMBDA_INIT_BIN_PATH = os.environ.get("LAMBDA_INIT_BIN_PATH") +# DEV: path to entrypoint script (e.g., var/rapid/entrypoint.sh) +LAMBDA_INIT_BOOTSTRAP_PATH = os.environ.get("LAMBDA_INIT_BOOTSTRAP_PATH") +# DEV: path to delve debugger (e.g., var/rapid/dlv) +LAMBDA_INIT_DELVE_PATH = os.environ.get("LAMBDA_INIT_DELVE_PATH") +# DEV: Go Delve debug port +LAMBDA_INIT_DELVE_PORT = int(os.environ.get("LAMBDA_INIT_DELVE_PORT") or 40000) +# DEV: Time to wait after every invoke as a workaround to fix a race condition in persistence tests +LAMBDA_INIT_POST_INVOKE_WAIT_MS = os.environ.get("LAMBDA_INIT_POST_INVOKE_WAIT_MS") +# DEV: sbx_user1051 (default when not provided) Alternative system user or empty string to skip dropping privileges. +LAMBDA_INIT_USER = os.environ.get("LAMBDA_INIT_USER") + +# INTERNAL: 1 (default) +# The duration (in seconds) to wait between each poll call to an event source. +LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC = float( + os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC") or 1 +) + +# INTERNAL: 60 (default) +# Maximum duration (in seconds) to wait between retries when an event source poll fails. +LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC = float( + os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC") or 60 +) + +# INTERNAL: 10 (default) +# Maximum duration (in seconds) to wait between polls when an event source returns empty results. +LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC = float( + os.environ.get("LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC") or 10 +) + +# Specifies the path to the mock configuration file for Step Functions, commonly named MockConfigFile.json. +SFN_MOCK_CONFIG = os.environ.get("SFN_MOCK_CONFIG", "").strip() + +# path prefix for windows volume mounting +WINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get("WINDOWS_DOCKER_MOUNT_PREFIX", "/host_mnt") + +# whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved) +S3_SKIP_SIGNATURE_VALIDATION = is_env_not_false("S3_SKIP_SIGNATURE_VALIDATION") +# whether to skip S3 validation of provided KMS key +S3_SKIP_KMS_KEY_VALIDATION = is_env_not_false("S3_SKIP_KMS_KEY_VALIDATION") + +# PUBLIC: 2000 (default) +# Allows increasing the default char limit for truncation of lambda log lines when printed in the console. +# This does not affect the logs processing in CloudWatch. +LAMBDA_TRUNCATE_STDOUT = int(os.getenv("LAMBDA_TRUNCATE_STDOUT") or 2000) + +# INTERNAL: 60 (default matching AWS) only applies to new lambda provider +# Base delay in seconds for async retries. Further retries use: NUM_ATTEMPTS * LAMBDA_RETRY_BASE_DELAY_SECONDS +# 300 (5min) is the maximum because NUM_ATTEMPTS can be at most 3 and SQS has a message timer limit of 15 min. +# For example: +# 1x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between initial invocation and first retry +# 2x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between the first retry and the second retry +# 3x LAMBDA_RETRY_BASE_DELAY_SECONDS: delay between the second retry and the third retry +LAMBDA_RETRY_BASE_DELAY_SECONDS = int(os.getenv("LAMBDA_RETRY_BASE_DELAY") or 60) + +# PUBLIC: 0 (default) +# Set to 1 to create lambda functions synchronously (not recommended). +# Whether Lambda.CreateFunction will block until the function is in a terminal state (Active or Failed). +# This technically breaks behavior parity but is provided as a simplification over the default AWS behavior and +# to match the behavior of the old lambda provider. +LAMBDA_SYNCHRONOUS_CREATE = is_env_true("LAMBDA_SYNCHRONOUS_CREATE") + +# URL to a custom OpenSearch/Elasticsearch backend cluster. If this is set to a valid URL, then localstack will not +# create OpenSearch/Elasticsearch cluster instances, but instead forward all domains to the given backend. +OPENSEARCH_CUSTOM_BACKEND = os.environ.get("OPENSEARCH_CUSTOM_BACKEND", "").strip() + +# Strategy used when creating OpenSearch/Elasticsearch domain endpoints routed through the edge proxy +# valid values: domain | path | port (off) +OPENSEARCH_ENDPOINT_STRATEGY = ( + os.environ.get("OPENSEARCH_ENDPOINT_STRATEGY", "").strip() or "domain" +) +if OPENSEARCH_ENDPOINT_STRATEGY == "off": + OPENSEARCH_ENDPOINT_STRATEGY = "port" + +# Whether to start one cluster per domain (default), or multiplex opensearch domains to a single clusters +OPENSEARCH_MULTI_CLUSTER = is_env_not_false("OPENSEARCH_MULTI_CLUSTER") + +# Whether to really publish to GCM while using SNS Platform Application (needs credentials) +LEGACY_SNS_GCM_PUBLISHING = is_env_true("LEGACY_SNS_GCM_PUBLISHING") + +SNS_SES_SENDER_ADDRESS = os.environ.get("SNS_SES_SENDER_ADDRESS", "").strip() + +SNS_CERT_URL_HOST = os.environ.get("SNS_CERT_URL_HOST", "").strip() + +# Whether the Next Gen APIGW invocation logic is enabled (on by default) +APIGW_NEXT_GEN_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_APIGATEWAY", "") in ("next_gen", "") + +# Whether the DynamoDBStreams native provider is enabled +DDB_STREAMS_PROVIDER_V2 = os.environ.get("PROVIDER_OVERRIDE_DYNAMODBSTREAMS", "") == "v2" +_override_dynamodb_v2 = os.environ.get("PROVIDER_OVERRIDE_DYNAMODB", "") +if DDB_STREAMS_PROVIDER_V2: + # in order to not have conflicts between the 2 implementations, as they are tightly coupled, we need to set DDB + # to be v2 as well + if not _override_dynamodb_v2: + os.environ["PROVIDER_OVERRIDE_DYNAMODB"] = "v2" +elif _override_dynamodb_v2 == "v2": + os.environ["PROVIDER_OVERRIDE_DYNAMODBSTREAMS"] = "v2" + DDB_STREAMS_PROVIDER_V2 = True + +# TODO remove fallback to LAMBDA_DOCKER_NETWORK with next minor version +MAIN_DOCKER_NETWORK = os.environ.get("MAIN_DOCKER_NETWORK", "") or LAMBDA_DOCKER_NETWORK + +# Whether to return and parse access key ids starting with an "A", like on AWS +PARITY_AWS_ACCESS_KEY_ID = is_env_true("PARITY_AWS_ACCESS_KEY_ID") + +# Show exceptions for CloudFormation deploy errors +CFN_VERBOSE_ERRORS = is_env_true("CFN_VERBOSE_ERRORS") + +# The CFN_STRING_REPLACEMENT_DENY_LIST env variable is a comma separated list of strings that are not allowed to be +# replaced in CloudFormation templates (e.g. AWS URLs that are usually edited by Localstack to point to itself if found +# in a CFN template). They are extracted to a list of strings if the env variable is set. +CFN_STRING_REPLACEMENT_DENY_LIST = [ + x for x in os.environ.get("CFN_STRING_REPLACEMENT_DENY_LIST", "").split(",") if x +] + +# Set the timeout to deploy each individual CloudFormation resource +CFN_PER_RESOURCE_TIMEOUT = int(os.environ.get("CFN_PER_RESOURCE_TIMEOUT") or 300) + +# How localstack will react to encountering unsupported resource types. +# By default unsupported resource types will be ignored. +# EXPERIMENTAL +CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES = is_env_not_false("CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES") + +# bind address of local DNS server +DNS_ADDRESS = os.environ.get("DNS_ADDRESS") or "0.0.0.0" +# port of the local DNS server +DNS_PORT = int(os.environ.get("DNS_PORT", "53")) + +# Comma-separated list of regex patterns for DNS names to resolve locally. +# Any DNS name not matched against any of the patterns on this whitelist +# will resolve it to the real DNS entry, rather than the local one. +DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM = ( + os.environ.get("DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM") or "" +).strip() +DNS_LOCAL_NAME_PATTERNS = (os.environ.get("DNS_LOCAL_NAME_PATTERNS") or "").strip() # deprecated + +# IP address that AWS endpoints should resolve to in our local DNS server. By default, +# hostnames resolve to 127.0.0.1, which allows to use the LocalStack APIs transparently +# from the host machine. If your code is running in Docker, this should be configured +# to resolve to the Docker bridge network address, e.g., DNS_RESOLVE_IP=172.17.0.1 +DNS_RESOLVE_IP = os.environ.get("DNS_RESOLVE_IP") or LOCALHOST_IP + +# fallback DNS server to send upstream requests to +DNS_SERVER = os.environ.get("DNS_SERVER") +DNS_VERIFICATION_DOMAIN = os.environ.get("DNS_VERIFICATION_DOMAIN") or "localstack.cloud" + + +def use_custom_dns(): + return str(DNS_ADDRESS) not in FALSE_STRINGS + + +# s3 virtual host name +S3_VIRTUAL_HOSTNAME = "s3.%s" % LOCALSTACK_HOST.host +S3_STATIC_WEBSITE_HOSTNAME = "s3-website.%s" % LOCALSTACK_HOST.host + +BOTO_WAITER_DELAY = int(os.environ.get("BOTO_WAITER_DELAY") or "1") +BOTO_WAITER_MAX_ATTEMPTS = int(os.environ.get("BOTO_WAITER_MAX_ATTEMPTS") or "120") +DISABLE_CUSTOM_BOTO_WAITER_CONFIG = is_env_true("DISABLE_CUSTOM_BOTO_WAITER_CONFIG") + +# defaults to false +# if `DISABLE_BOTO_RETRIES=1` is set, all our created boto clients will have retries disabled +DISABLE_BOTO_RETRIES = is_env_true("DISABLE_BOTO_RETRIES") + +DISTRIBUTED_MODE = is_env_true("DISTRIBUTED_MODE") + +# This flag enables `connect_to` to be in-memory only and not do networking calls +IN_MEMORY_CLIENT = is_env_true("IN_MEMORY_CLIENT") + +# List of environment variable names used for configuration that are passed from the host into the LocalStack container. +# => Synchronize this list with the above and the configuration docs: +# https://docs.localstack.cloud/references/configuration/ +# => Sort this list alphabetically +# => Add deprecated environment variables to deprecations.py and add a comment in this list +# => Move removed legacy variables to the section grouped by release (still relevant for deprecation warnings) +# => Do *not* include any internal developer configurations that apply to host-mode only in this list. +CONFIG_ENV_VARS = [ + "ALLOW_NONSTANDARD_REGIONS", + "BOTO_WAITER_DELAY", + "BOTO_WAITER_MAX_ATTEMPTS", + "BUCKET_MARKER_LOCAL", + "CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES", + "CFN_PER_RESOURCE_TIMEOUT", + "CFN_STRING_REPLACEMENT_DENY_LIST", + "CFN_VERBOSE_ERRORS", + "CI", + "CONTAINER_RUNTIME", + "CUSTOM_SSL_CERT_PATH", + "DEBUG", + "DEBUG_HANDLER_CHAIN", + "DEVELOP", + "DEVELOP_PORT", + "DISABLE_BOTO_RETRIES", + "DISABLE_CORS_CHECKS", + "DISABLE_CORS_HEADERS", + "DISABLE_CUSTOM_BOTO_WAITER_CONFIG", + "DISABLE_CUSTOM_CORS_APIGATEWAY", + "DISABLE_CUSTOM_CORS_S3", + "DISABLE_EVENTS", + "DISTRIBUTED_MODE", + "DNS_ADDRESS", + "DNS_PORT", + "DNS_LOCAL_NAME_PATTERNS", + "DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM", + "DNS_RESOLVE_IP", + "DNS_SERVER", + "DNS_VERIFICATION_DOMAIN", + "DOCKER_BRIDGE_IP", + "DOCKER_SDK_DEFAULT_TIMEOUT_SECONDS", + "DYNAMODB_ERROR_PROBABILITY", + "DYNAMODB_HEAP_SIZE", + "DYNAMODB_IN_MEMORY", + "DYNAMODB_LOCAL_PORT", + "DYNAMODB_SHARE_DB", + "DYNAMODB_READ_ERROR_PROBABILITY", + "DYNAMODB_REMOVE_EXPIRED_ITEMS", + "DYNAMODB_WRITE_ERROR_PROBABILITY", + "EAGER_SERVICE_LOADING", + "ENABLE_CONFIG_UPDATES", + "EVENT_RULE_ENGINE", + "EXTRA_CORS_ALLOWED_HEADERS", + "EXTRA_CORS_ALLOWED_ORIGINS", + "EXTRA_CORS_EXPOSE_HEADERS", + "GATEWAY_LISTEN", + "GATEWAY_SERVER", + "GATEWAY_WORKER_THREAD_COUNT", + "HOSTNAME", + "HOSTNAME_FROM_LAMBDA", + "IN_MEMORY_CLIENT", + "KINESIS_ERROR_PROBABILITY", + "KINESIS_MOCK_PERSIST_INTERVAL", + "KINESIS_MOCK_LOG_LEVEL", + "KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT", + "KINESIS_PERSISTENCE", + "LAMBDA_DEBUG_MODE", + "LAMBDA_DEBUG_MODE_CONFIG", + "LAMBDA_DISABLE_AWS_ENDPOINT_URL", + "LAMBDA_DOCKER_DNS", + "LAMBDA_DOCKER_FLAGS", + "LAMBDA_DOCKER_NETWORK", + "LAMBDA_EVENTS_INTERNAL_SQS", + "LAMBDA_EVENT_SOURCE_MAPPING", + "LAMBDA_IGNORE_ARCHITECTURE", + "LAMBDA_INIT_DEBUG", + "LAMBDA_INIT_BIN_PATH", + "LAMBDA_INIT_BOOTSTRAP_PATH", + "LAMBDA_INIT_DELVE_PATH", + "LAMBDA_INIT_DELVE_PORT", + "LAMBDA_INIT_POST_INVOKE_WAIT_MS", + "LAMBDA_INIT_USER", + "LAMBDA_INIT_RELEASE_VERSION", + "LAMBDA_KEEPALIVE_MS", + "LAMBDA_LIMITS_CONCURRENT_EXECUTIONS", + "LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY", + "LAMBDA_LIMITS_TOTAL_CODE_SIZE", + "LAMBDA_LIMITS_CODE_SIZE_ZIPPED", + "LAMBDA_LIMITS_CODE_SIZE_UNZIPPED", + "LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE", + "LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES", + "LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES", + "LAMBDA_PREBUILD_IMAGES", + "LAMBDA_RUNTIME_IMAGE_MAPPING", + "LAMBDA_REMOVE_CONTAINERS", + "LAMBDA_RETRY_BASE_DELAY_SECONDS", + "LAMBDA_RUNTIME_EXECUTOR", + "LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT", + "LAMBDA_RUNTIME_VALIDATION", + "LAMBDA_SYNCHRONOUS_CREATE", + "LAMBDA_SQS_EVENT_SOURCE_MAPPING_INTERVAL", + "LAMBDA_TRUNCATE_STDOUT", + "LEGACY_DOCKER_CLIENT", + "LEGACY_SNS_GCM_PUBLISHING", + "LOCALSTACK_API_KEY", + "LOCALSTACK_AUTH_TOKEN", + "LOCALSTACK_HOST", + "LOG_LICENSE_ISSUES", + "LS_LOG", + "MAIN_CONTAINER_NAME", + "MAIN_DOCKER_NETWORK", + "OPENAPI_VALIDATE_REQUEST", + "OPENAPI_VALIDATE_RESPONSE", + "OPENSEARCH_ENDPOINT_STRATEGY", + "OUTBOUND_HTTP_PROXY", + "OUTBOUND_HTTPS_PROXY", + "PARITY_AWS_ACCESS_KEY_ID", + "PERSISTENCE", + "PORTS_CHECK_DOCKER_IMAGE", + "REQUESTS_CA_BUNDLE", + "REMOVE_SSL_CERT", + "S3_SKIP_SIGNATURE_VALIDATION", + "S3_SKIP_KMS_KEY_VALIDATION", + "SERVICES", + "SKIP_INFRA_DOWNLOADS", + "SKIP_SSL_CERT_DOWNLOAD", + "SNAPSHOT_LOAD_STRATEGY", + "SNAPSHOT_SAVE_STRATEGY", + "SNAPSHOT_FLUSH_INTERVAL", + "SNS_SES_SENDER_ADDRESS", + "SQS_DELAY_PURGE_RETRY", + "SQS_DELAY_RECENTLY_DELETED", + "SQS_ENABLE_MESSAGE_RETENTION_PERIOD", + "SQS_ENDPOINT_STRATEGY", + "SQS_DISABLE_CLOUDWATCH_METRICS", + "SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL", + "STRICT_SERVICE_LOADING", + "TF_COMPAT_MODE", + "USE_SSL", + "WAIT_FOR_DEBUGGER", + "WINDOWS_DOCKER_MOUNT_PREFIX", + # Removed legacy variables in 2.0.0 + # DATA_DIR => do *not* include in this list, as it is treated separately. # deprecated since 1.0.0 + "LEGACY_DIRECTORIES", # deprecated since 1.0.0 + "SYNCHRONOUS_API_GATEWAY_EVENTS", # deprecated since 1.3.0 + "SYNCHRONOUS_DYNAMODB_EVENTS", # deprecated since 1.3.0 + "SYNCHRONOUS_SNS_EVENTS", # deprecated since 1.3.0 + "SYNCHRONOUS_SQS_EVENTS", # deprecated since 1.3.0 + # Removed legacy variables in 3.0.0 + "DEFAULT_REGION", # deprecated since 0.12.7 + "EDGE_BIND_HOST", # deprecated since 2.0.0 + "EDGE_FORWARD_URL", # deprecated since 1.4.0 + "EDGE_PORT", # deprecated since 2.0.0 + "EDGE_PORT_HTTP", # deprecated since 2.0.0 + "ES_CUSTOM_BACKEND", # deprecated since 0.14.0 + "ES_ENDPOINT_STRATEGY", # deprecated since 0.14.0 + "ES_MULTI_CLUSTER", # deprecated since 0.14.0 + "HOSTNAME_EXTERNAL", # deprecated since 2.0.0 + "KINESIS_INITIALIZE_STREAMS", # deprecated since 1.4.0 + "KINESIS_PROVIDER", # deprecated since 1.3.0 + "KMS_PROVIDER", # deprecated since 1.4.0 + "LAMBDA_XRAY_INIT", # deprecated since 2.0.0 + "LAMBDA_CODE_EXTRACT_TIME", # deprecated since 2.0.0 + "LAMBDA_CONTAINER_REGISTRY", # deprecated since 2.0.0 + "LAMBDA_EXECUTOR", # deprecated since 2.0.0 + "LAMBDA_FALLBACK_URL", # deprecated since 2.0.0 + "LAMBDA_FORWARD_URL", # deprecated since 2.0.0 + "LAMBDA_JAVA_OPTS", # currently only supported in old Lambda provider but not officially deprecated + "LAMBDA_REMOTE_DOCKER", # deprecated since 2.0.0 + "LAMBDA_STAY_OPEN_MODE", # deprecated since 2.0.0 + "LEGACY_EDGE_PROXY", # deprecated since 1.0.0 + "LOCALSTACK_HOSTNAME", # deprecated since 2.0.0 + "SQS_PORT_EXTERNAL", # deprecated only in docs since 2022-07-13 + "SYNCHRONOUS_KINESIS_EVENTS", # deprecated since 1.3.0 + "USE_SINGLE_REGION", # deprecated since 0.12.7 + "MOCK_UNIMPLEMENTED", # deprecated since 1.3.0 +] + + +def is_local_test_mode() -> bool: + """Returns True if we are running in the context of our local integration tests.""" + return is_env_true(ENV_INTERNAL_TEST_RUN) + + +def is_collect_metrics_mode() -> bool: + """Returns True if metric collection is enabled.""" + return is_env_true(ENV_INTERNAL_TEST_COLLECT_METRIC) + + +def collect_config_items() -> List[Tuple[str, Any]]: + """Returns a list of key-value tuples of LocalStack configuration values.""" + none = object() # sentinel object + + # collect which keys to print + keys = [] + keys.extend(CONFIG_ENV_VARS) + keys.append("DATA_DIR") + keys.sort() + + values = globals() + + result = [] + for k in keys: + v = values.get(k, none) + if v is none: + continue + result.append((k, v)) + result.sort() + return result + + +def populate_config_env_var_names(): + global CONFIG_ENV_VARS + + CONFIG_ENV_VARS += [ + key + for key in [key.upper() for key in os.environ] + if (key.startswith("LOCALSTACK_") or key.startswith("PROVIDER_OVERRIDE_")) + # explicitly exclude LOCALSTACK_CLI (it's prefixed with "LOCALSTACK_", + # but is only used in the CLI (should not be forwarded to the container) + and key != "LOCALSTACK_CLI" + ] + + # create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOST) + CONFIG_ENV_VARS += [ + "LOCALSTACK_" + v for v in CONFIG_ENV_VARS if not v.startswith("LOCALSTACK_") + ] + + CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS)) + + +# populate env var names to be passed to the container +populate_config_env_var_names() + + +# helpers to build urls +def get_protocol() -> str: + return "https" if USE_SSL else "http" + + +def external_service_url( + host: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None, + subdomains: Optional[str] = None, +) -> str: + """Returns a service URL (e.g., SQS queue URL) to an external client (e.g., boto3) potentially running on another + machine than LocalStack. The configurations LOCALSTACK_HOST and USE_SSL can customize these returned URLs. + The optional parameters can be used to customize the defaults. + Examples with default configuration: + * external_service_url() == http://localhost.localstack.cloud:4566 + * external_service_url(subdomains="s3") == http://s3.localhost.localstack.cloud:4566 + """ + protocol = protocol or get_protocol() + subdomains = f"{subdomains}." if subdomains else "" + host = host or LOCALSTACK_HOST.host + port = port or LOCALSTACK_HOST.port + return f"{protocol}://{subdomains}{host}:{port}" + + +def internal_service_url( + host: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None, + subdomains: Optional[str] = None, +) -> str: + """Returns a service URL for internal use within LocalStack (i.e., same host). + The configuration USE_SSL can customize these returned URLs but LOCALSTACK_HOST has no effect. + The optional parameters can be used to customize the defaults. + Examples with default configuration: + * internal_service_url() == http://localhost:4566 + * internal_service_url(port=8080) == http://localhost:8080 + """ + protocol = protocol or get_protocol() + subdomains = f"{subdomains}." if subdomains else "" + host = host or LOCALHOST + port = port or GATEWAY_LISTEN[0].port + return f"{protocol}://{subdomains}{host}:{port}" + + +# DEPRECATED: old helpers for building URLs + + +def service_url(service_key, host=None, port=None): + """@deprecated: Use `internal_service_url()` instead. We assume that most usages are internal + but really need to check and update each usage accordingly. + """ + warnings.warn( + """@deprecated: Use `internal_service_url()` instead. We assume that most usages are + internal but really need to check and update each usage accordingly.""", + DeprecationWarning, + stacklevel=2, + ) + return internal_service_url(host=host, port=port) + + +def service_port(service_key: str, external: bool = False) -> int: + """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for + internal use.""" + warnings.warn( + "Deprecated: use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for " + "internal use.", + DeprecationWarning, + stacklevel=2, + ) + if external: + return LOCALSTACK_HOST.port + return GATEWAY_LISTEN[0].port + + +def get_edge_port_http(): + """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` for + internal use. This function is not needed anymore because we don't separate between HTTP + and HTTP ports anymore since LocalStack listens to both ports.""" + warnings.warn( + """@deprecated: Use `localstack_host().port` for external and `GATEWAY_LISTEN[0].port` + for internal use. This function is also not needed anymore because we don't separate + between HTTP and HTTP ports anymore since LocalStack listens to both.""", + DeprecationWarning, + stacklevel=2, + ) + return GATEWAY_LISTEN[0].port + + +def get_edge_url(localstack_hostname=None, protocol=None): + """@deprecated: Use `internal_service_url()` instead. + We assume that most usages are internal but really need to check and update each usage accordingly. + """ + warnings.warn( + """@deprecated: Use `internal_service_url()` instead. + We assume that most usages are internal but really need to check and update each usage accordingly. + """, + DeprecationWarning, + stacklevel=2, + ) + return internal_service_url(host=localstack_hostname, protocol=protocol) + + +class ServiceProviderConfig(Mapping[str, str]): + _provider_config: Dict[str, str] + default_value: str + override_prefix: str = "PROVIDER_OVERRIDE_" + + def __init__(self, default_value: str): + self._provider_config = {} + self.default_value = default_value + + def load_from_environment(self, env: Mapping[str, str] = None): + if env is None: + env = os.environ + for key, value in env.items(): + if key.startswith(self.override_prefix) and value: + self.set_provider(key[len(self.override_prefix) :].lower().replace("_", "-"), value) + + def get_provider(self, service: str) -> str: + return self._provider_config.get(service, self.default_value) + + def set_provider_if_not_exists(self, service: str, provider: str) -> None: + if service not in self._provider_config: + self._provider_config[service] = provider + + def set_provider(self, service: str, provider: str): + self._provider_config[service] = provider + + def bulk_set_provider_if_not_exists(self, services: List[str], provider: str): + for service in services: + self.set_provider_if_not_exists(service, provider) + + def __getitem__(self, item): + return self.get_provider(item) + + def __setitem__(self, key, value): + self.set_provider(key, value) + + def __len__(self): + return len(self._provider_config) + + def __iter__(self): + return self._provider_config.__iter__() + + +SERVICE_PROVIDER_CONFIG = ServiceProviderConfig("default") + +SERVICE_PROVIDER_CONFIG.load_from_environment() + + +def init_directories() -> Directories: + if is_in_docker: + return Directories.for_container() + else: + if is_env_true("LOCALSTACK_CLI"): + return Directories.for_cli() + + return Directories.for_host() + + +# initialize directories +dirs: Directories +dirs = init_directories() diff --git a/localstack-core/localstack/constants.py b/localstack-core/localstack/constants.py new file mode 100644 index 0000000000000..f5d43d2bab1e9 --- /dev/null +++ b/localstack-core/localstack/constants.py @@ -0,0 +1,186 @@ +import os + +from localstack.version import __version__ + +VERSION = __version__ + +# HTTP headers used to forward proxy request URLs +HEADER_LOCALSTACK_EDGE_URL = "x-localstack-edge" +HEADER_LOCALSTACK_REQUEST_URL = "x-localstack-request-url" +# xXx custom localstack authorization header only used in ext +HEADER_LOCALSTACK_AUTHORIZATION = "x-localstack-authorization" +HEADER_LOCALSTACK_TARGET = "x-localstack-target" +HEADER_AMZN_ERROR_TYPE = "X-Amzn-Errortype" + +# backend service ports, for services that are behind a proxy (counting down from 4566) +DEFAULT_PORT_EDGE = 4566 + +# host name for localhost +LOCALHOST = "localhost" +LOCALHOST_IP = "127.0.0.1" +LOCALHOST_HOSTNAME = "localhost.localstack.cloud" + +# User-agent string used in outgoing HTTP requests made by LocalStack +USER_AGENT_STRING = f"localstack/{VERSION}" + +# version of the Maven dependency with Java utility code +LOCALSTACK_MAVEN_VERSION = "0.2.21" +MAVEN_REPO_URL = "https://repo1.maven.org/maven2" + +# URL of localstack's artifacts repository on GitHub +ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts" + +# Artifacts endpoint +ASSETS_ENDPOINT = "https://assets.localstack.cloud" + +# Hugging Face endpoint for localstack +HUGGING_FACE_ENDPOINT = "https://huggingface.co/localstack" + +# host to bind to when starting the services +BIND_HOST = "0.0.0.0" + +# root code folder +MODULE_MAIN_PATH = os.path.dirname(os.path.realpath(__file__)) +# TODO rename to "ROOT_FOLDER"! +LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(MODULE_MAIN_PATH, "..")) + +# virtualenv folder +LOCALSTACK_VENV_FOLDER: str = os.environ.get("VIRTUAL_ENV") +if not LOCALSTACK_VENV_FOLDER: + # fallback to the previous logic + LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, ".venv") + if not os.path.isdir(LOCALSTACK_VENV_FOLDER): + # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/ + LOCALSTACK_VENV_FOLDER = os.path.realpath( + os.path.join(LOCALSTACK_ROOT_FOLDER, "..", "..", "..") + ) + +# default volume directory containing shared data +DEFAULT_VOLUME_DIR = "/var/lib/localstack" + +# API Gateway path to indicate a user request sent to the gateway +PATH_USER_REQUEST = "_user_request_" + +# name of LocalStack Docker image +DOCKER_IMAGE_NAME = "localstack/localstack" +DOCKER_IMAGE_NAME_PRO = "localstack/localstack-pro" +DOCKER_IMAGE_NAME_FULL = "localstack/localstack-full" + +# backdoor API path used to retrieve or update config variables +CONFIG_UPDATE_PATH = "/?_config_" + +# API path for localstack internal resources +INTERNAL_RESOURCE_PATH = "/_localstack" + +# environment variable name to tag local test runs +ENV_INTERNAL_TEST_RUN = "LOCALSTACK_INTERNAL_TEST_RUN" + +# environment variable name to tag collect metrics during a test run +ENV_INTERNAL_TEST_COLLECT_METRIC = "LOCALSTACK_INTERNAL_TEST_COLLECT_METRIC" + +# environment variable that flags whether pro was activated. do not use it for security purposes! +ENV_PRO_ACTIVATED = "PRO_ACTIVATED" + +# content types / encodings +HEADER_CONTENT_TYPE = "Content-Type" +TEXT_XML = "text/xml" +APPLICATION_AMZ_JSON_1_0 = "application/x-amz-json-1.0" +APPLICATION_AMZ_JSON_1_1 = "application/x-amz-json-1.1" +APPLICATION_AMZ_CBOR_1_1 = "application/x-amz-cbor-1.1" +APPLICATION_CBOR = "application/cbor" +APPLICATION_JSON = "application/json" +APPLICATION_XML = "application/xml" +APPLICATION_OCTET_STREAM = "application/octet-stream" +APPLICATION_X_WWW_FORM_URLENCODED = "application/x-www-form-urlencoded" +HEADER_ACCEPT_ENCODING = "Accept-Encoding" + +# strings to indicate truthy/falsy values +TRUE_STRINGS = ("1", "true", "True") +FALSE_STRINGS = ("0", "false", "False") +# strings with valid log levels for LS_LOG +LOG_LEVELS = ("trace-internal", "trace", "debug", "info", "warn", "error", "warning") + +# the version of elasticsearch that is pre-seeded into the base image (sync with Dockerfile.base) +ELASTICSEARCH_DEFAULT_VERSION = "Elasticsearch_7.10" +# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html +ELASTICSEARCH_PLUGIN_LIST = [ + "analysis-icu", + "ingest-attachment", + "analysis-kuromoji", + "mapper-murmur3", + "mapper-size", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "analysis-ukrainian", +] +# Default ES modules to exclude (save apprx 66MB in the final image) +ELASTICSEARCH_DELETE_MODULES = ["ingest-geoip"] + +# the version of opensearch which is used by default +OPENSEARCH_DEFAULT_VERSION = "OpenSearch_2.11" + +# See https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-plugins.html +OPENSEARCH_PLUGIN_LIST = [ + "ingest-attachment", + "analysis-kuromoji", +] + +# API endpoint for analytics events +API_ENDPOINT = os.environ.get("API_ENDPOINT") or "https://api.localstack.cloud/v1" +# new analytics API endpoint +ANALYTICS_API = os.environ.get("ANALYTICS_API") or "https://analytics.localstack.cloud/v1" + +# environment variable to indicate this process should run the localstack infrastructure +LOCALSTACK_INFRA_PROCESS = "LOCALSTACK_INFRA_PROCESS" + +# AWS region us-east-1 +AWS_REGION_US_EAST_1 = "us-east-1" + +# environment variable to override max pool connections +try: + MAX_POOL_CONNECTIONS = int(os.environ["MAX_POOL_CONNECTIONS"]) +except Exception: + MAX_POOL_CONNECTIONS = 150 + +# Fallback Account ID if not available in the client request +DEFAULT_AWS_ACCOUNT_ID = "000000000000" + +# Credentials used for internal calls +INTERNAL_AWS_ACCESS_KEY_ID = "__internal_call__" +INTERNAL_AWS_SECRET_ACCESS_KEY = "__internal_call__" + +# trace log levels (excluding/including internal API calls), configurable via $LS_LOG +LS_LOG_TRACE = "trace" +LS_LOG_TRACE_INTERNAL = "trace-internal" +TRACE_LOG_LEVELS = [LS_LOG_TRACE, LS_LOG_TRACE_INTERNAL] + +# list of official docker images +OFFICIAL_IMAGES = [ + "localstack/localstack", + "localstack/localstack-pro", +] + +# port for debug py +DEFAULT_DEVELOP_PORT = 5678 + +# Default bucket name of the s3 bucket used for local lambda development +# This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions +DEFAULT_BUCKET_MARKER_LOCAL = "hot-reload" +LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" + +# user that starts the opensearch process if the current user is root +OS_USER_OPENSEARCH = "localstack" + +# output string that indicates that the stack is ready +READY_MARKER_OUTPUT = "Ready." + +# Regex for `Credential` field in the Authorization header in AWS signature version v4 +# The format is as follows: +# Credential=<access-key-id>/<date>/<region-name>/<service-name>/aws4_request +# eg. +# Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request +AUTH_CREDENTIAL_REGEX = r"Credential=(?P<access_key_id>[a-zA-Z0-9-_.]{1,})/(?P<date>\d{8})/(?P<region_name>[a-z0-9-]{1,})/(?P<service_name>[a-z0-9]{1,})/" + +# Custom resource tag to override the generated resource ID. +TAG_KEY_CUSTOM_ID = "_custom_id_" diff --git a/localstack-core/localstack/deprecations.py b/localstack-core/localstack/deprecations.py new file mode 100644 index 0000000000000..1690ca227d878 --- /dev/null +++ b/localstack-core/localstack/deprecations.py @@ -0,0 +1,410 @@ +# A simple module to track deprecations over time / versions, and some simple functions guiding the affected users. +import logging +import os +from dataclasses import dataclass +from typing import Callable, List, Optional + +from localstack.utils.analytics import log + +LOG = logging.getLogger(__name__) + + +@dataclass +class EnvVarDeprecation: + """ + Simple class defining a deprecation of an environment variable config. + It helps keeping track of deprecations over time. + """ + + env_var: str + deprecation_version: str + deprecation_path: str = None + + @property + def is_affected(self) -> bool: + """ + Checks whether an environment is affected. + :return: true if the environment is affected / is using a deprecated config + """ + return os.environ.get(self.env_var) is not None + + +# +# List of deprecations +# +# Please make sure this is in-sync with https://docs.localstack.cloud/references/configuration/ +# +DEPRECATIONS = [ + # Since 0.11.3 - HTTP / HTTPS multiplexing + EnvVarDeprecation( + "USE_SSL", + "0.11.3", + "Each endpoint now supports multiplexing HTTP/HTTPS traffic over the same port. Please remove this environment variable.", # noqa + ), + # Since 0.12.8 - PORT_UI was removed + EnvVarDeprecation( + "PORT_WEB_UI", + "0.12.8", + "PORT_WEB_UI has been removed, and is not available anymore. Please remove this environment variable.", + ), + # Deprecated in 0.12.7, removed in 3.0.0 + EnvVarDeprecation( + "USE_SINGLE_REGION", + "0.12.7", + "LocalStack now has full multi-region support. This option has no effect. Please remove it from your configuration.", # noqa + ), + # Deprecated in 0.12.7, removed in 3.0.0 + EnvVarDeprecation( + "DEFAULT_REGION", + "0.12.7", + "LocalStack now has full multi-region support. This option has no effect. Please remove it from your configuration.", # noqa + ), + # Since 1.0.0 - New Persistence and file system + EnvVarDeprecation( + "DATA_DIR", + "1.0.0", + "Please use PERSISTENCE instead. The state will be stored in your LocalStack volume in the state/ directory.", + ), + EnvVarDeprecation( + "HOST_TMP_FOLDER", + "1.0.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "LEGACY_DIRECTORIES", + "1.0.0", + "This option has no effect anymore. Please migrate to the new filesystem layout (introduced with v1.0).", + ), + EnvVarDeprecation( + "TMPDIR", "1.0.0", "Please migrate to the new filesystem layout (introduced with v1.0)." + ), + EnvVarDeprecation( + "PERSISTENCE_SINGLE_FILE", + "1.0.0", + "The legacy persistence mechanism is not supported anymore, please migrate to the advanced persistence mechanism of LocalStack Pro.", # noqa + ), + # Since 1.0.0 - New ASF Gateway + EnvVarDeprecation( + "LEGACY_EDGE_PROXY", + "1.0.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + # Since 1.1.0 - Kinesalite removed with 1.3, only kinesis-mock is used as kinesis provider / backend + EnvVarDeprecation( + "KINESIS_PROVIDER", + "1.1.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + # Since 1.1.0 - Init dir has been deprecated in favor of pluggable init hooks + EnvVarDeprecation( + "LEGACY_INIT_DIR", + "1.1.0", + "This option has no effect anymore. " + "Please use the pluggable initialization hooks in /etc/localhost/init/<stage>.d instead.", + ), + EnvVarDeprecation( + "INIT_SCRIPTS_PATH", + "1.1.0", + "This option has no effect anymore. " + "Please use the pluggable initialization hooks in /etc/localhost/init/<stage>.d instead.", + ), + # Since 1.3.0 - Synchronous events break AWS parity + EnvVarDeprecation( + "SYNCHRONOUS_SNS_EVENTS", + "1.3.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "SYNCHRONOUS_SQS_EVENTS", + "1.3.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "SYNCHRONOUS_API_GATEWAY_EVENTS", + "1.3.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "SYNCHRONOUS_KINESIS_EVENTS", + "1.3.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "SYNCHRONOUS_DYNAMODB_EVENTS", + "1.3.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + # Since 1.3.0 - All non-pre-seeded infra is downloaded asynchronously + EnvVarDeprecation( + "SKIP_INFRA_DOWNLOADS", + "1.3.0", + "Infra downloads are triggered on-demand now. Please remove this environment variable.", + ), + # Since 1.3.0 - Mocking for unimplemented operations will be removed + EnvVarDeprecation( + "MOCK_UNIMPLEMENTED", + "1.3.0", + "This feature is not supported anymore. Please remove this environment variable.", + ), + # Since 1.4.0 - The Edge Forwarding is only used for legacy HTTPS proxying and will be removed + EnvVarDeprecation( + "EDGE_FORWARD_URL", + "1.4.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + # Deprecated in 1.4.0, removed in 3.0.0 + EnvVarDeprecation( + "KMS_PROVIDER", + "1.4.0", + "This option has no effect. Please remove it from your configuration.", + ), + # Since 2.0.0 - HOSTNAME_EXTERNAL will be replaced with LOCALSTACK_HOST + EnvVarDeprecation( + "HOSTNAME_EXTERNAL", + "2.0.0", + "This configuration will be migrated to LOCALSTACK_HOST", + ), + # Since 2.0.0 - LOCALSTACK_HOST will be replaced with LOCALSTACK_HOST + EnvVarDeprecation( + "LOCALSTACK_HOSTNAME", + "2.0.0", + "This configuration will be migrated to LOCALSTACK_HOST", + ), + # Since 2.0.0 - redefined as GATEWAY_LISTEN + EnvVarDeprecation( + "EDGE_BIND_HOST", + "2.0.0", + "This configuration will be migrated to GATEWAY_LISTEN", + ), + # Since 2.0.0 - redefined as GATEWAY_LISTEN + EnvVarDeprecation( + "EDGE_PORT", + "2.0.0", + "This configuration will be migrated to GATEWAY_LISTEN", + ), + # Since 2.0.0 - redefined as GATEWAY_LISTEN + EnvVarDeprecation( + "EDGE_PORT_HTTP", + "2.0.0", + "This configuration will be migrated to GATEWAY_LISTEN", + ), + EnvVarDeprecation( + "LAMBDA_EXECUTOR", + "2.0.0", + "This configuration is obsolete with the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2\n" + "Please mount the Docker socket /var/run/docker.sock as a volume when starting LocalStack.", + ), + EnvVarDeprecation( + "LAMBDA_STAY_OPEN_MODE", + "2.0.0", + "Stay open mode is the default behavior in the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_REMOTE_DOCKER", + "2.0.0", + "The new lambda provider copies zip files by default and automatically configures hot reloading " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_CODE_EXTRACT_TIME", + "2.0.0", + "Function creation now happens asynchronously in the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_CONTAINER_REGISTRY", + "2.0.0", + "The new lambda provider uses LAMBDA_RUNTIME_IMAGE_MAPPING instead " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_FALLBACK_URL", + "2.0.0", + "This feature is not supported in the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_FORWARD_URL", + "2.0.0", + "This feature is not supported in the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "LAMBDA_XRAY_INIT", + "2.0.0", + "The X-Ray daemon is always initialized in the new lambda provider " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "KINESIS_INITIALIZE_STREAMS", + "1.4.0", + "This option has no effect anymore. Please use the AWS client and init hooks instead.", + ), + EnvVarDeprecation( + "SQS_PORT_EXTERNAL", + "1.0.0", + "This option has no effect anymore. Please use LOCALSTACK_HOST instead.", + ), + EnvVarDeprecation( + "PROVIDER_OVERRIDE_LAMBDA", + "3.0.0", + "This option is ignored because the legacy Lambda provider (v1) has been removed since 3.0.0. " + "Please remove PROVIDER_OVERRIDE_LAMBDA and migrate to our new Lambda provider (v2): " + "https://docs.localstack.cloud/user-guide/aws/lambda/#migrating-to-lambda-v2", + ), + EnvVarDeprecation( + "ES_CUSTOM_BACKEND", + "0.14.0", + "This option has no effect anymore. Please use OPENSEARCH_CUSTOM_BACKEND instead.", + ), + EnvVarDeprecation( + "ES_MULTI_CLUSTER", + "0.14.0", + "This option has no effect anymore. Please use OPENSEARCH_MULTI_CLUSTER instead.", + ), + EnvVarDeprecation( + "ES_ENDPOINT_STRATEGY", + "0.14.0", + "This option has no effect anymore. Please use OPENSEARCH_ENDPOINT_STRATEGY instead.", + ), + EnvVarDeprecation( + "PERSIST_ALL", + "2.3.2", + "LocalStack treats backends and assets the same with respect to persistence. Please remove PERSIST_ALL.", + ), + EnvVarDeprecation( + "DNS_LOCAL_NAME_PATTERNS", + "3.0.0", + "This option was confusingly named. Please use DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM " + "instead.", + ), + EnvVarDeprecation( + "LAMBDA_EVENTS_INTERNAL_SQS", + "4.0.0", + "This option is ignored because the LocalStack SQS dependency for event invokes has been removed since 4.0.0" + " in favor of a lightweight Lambda-internal SQS implementation.", + ), + EnvVarDeprecation( + "LAMBDA_EVENT_SOURCE_MAPPING", + "4.0.0", + "This option has no effect anymore. Please remove this environment variable.", + ), + EnvVarDeprecation( + "LAMBDA_SQS_EVENT_SOURCE_MAPPING_INTERVAL_SEC", + "4.0.0", + "This option is not supported by the new Lambda Event Source Mapping v2 implementation." + " Please create a GitHub issue if you experience any performance challenges.", + ), + EnvVarDeprecation( + "PROVIDER_OVERRIDE_STEPFUNCTIONS", + "4.0.0", + "This option is ignored because the legacy StepFunctions provider (v1) has been removed since 4.0.0." + " Please remove PROVIDER_OVERRIDE_STEPFUNCTIONS.", + ), + EnvVarDeprecation( + "EVENT_RULE_ENGINE", + "4.0.3", + "This option is ignored because the Java-based event ruler has been removed since 4.1.0." + " Our latest Python-native implementation introduced in 4.0.3" + " is faster, achieves great AWS parity, and fixes compatibility issues with the StepFunctions JSONata feature." + " Please remove EVENT_RULE_ENGINE.", + ), + EnvVarDeprecation( + "STEPFUNCTIONS_LAMBDA_ENDPOINT", + "4.0.0", + "This is only supported for the legacy provider. URL to use as the Lambda service endpoint in Step Functions. " + "By default this is the LocalStack Lambda endpoint. Use default to select the original AWS Lambda endpoint.", + ), + EnvVarDeprecation( + "LOCAL_PORT_STEPFUNCTIONS", + "4.0.0", + "This is only supported for the legacy provider." + "It defines the local port to which Step Functions traffic is redirected." + "By default, LocalStack routes Step Functions traffic to its internal runtime. " + "Use this variable only if you need to redirect traffic to a different local Step Functions runtime.", + ), +] + + +def collect_affected_deprecations( + deprecations: Optional[List[EnvVarDeprecation]] = None, +) -> List[EnvVarDeprecation]: + """ + Collects all deprecations which are used in the OS environ. + :param deprecations: List of deprecations to check. Uses DEPRECATIONS list by default. + :return: List of deprecations which are used in the current environment + """ + if deprecations is None: + deprecations = DEPRECATIONS + return [deprecation for deprecation in deprecations if deprecation.is_affected] + + +def log_env_warning(deprecations: List[EnvVarDeprecation]) -> None: + """ + Logs warnings for the given deprecations. + :param deprecations: list of affected deprecations to show a warning for + """ + """ + Logs a warning if a given environment variable is set (no matter what the value is). + :param env_var: to check + :param deprecation_version: version with which the env variable has been deprecated + """ + if deprecations: + env_vars = [] + + # Print warnings for the env vars and collect them (for the analytics event) + for deprecation in deprecations: + LOG.warning( + "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! %s", + deprecation.env_var, + deprecation.deprecation_version, + deprecation.deprecation_path, + ) + env_vars.append(deprecation.env_var) + + # Log an event if deprecated env vars are used + log.event(event="deprecated_env_usage", payload={"deprecated_env_vars": env_vars}) + + +def log_deprecation_warnings(deprecations: Optional[List[EnvVarDeprecation]] = None) -> None: + affected_deprecations = collect_affected_deprecations(deprecations) + log_env_warning(affected_deprecations) + + provider_override_events = os.environ.get("PROVIDER_OVERRIDE_EVENTS") + if provider_override_events and provider_override_events in ["v1", "legacy"]: + env_var_value = f"PROVIDER_OVERRIDE_EVENTS={provider_override_events}" + deprecation_version = "4.0.0" + deprecation_path = f"Remove {env_var_value} to use the new EventBridge implementation." + LOG.warning( + "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! %s", + env_var_value, + deprecation_version, + deprecation_path, + ) + + +def deprecated_endpoint( + endpoint: Callable, previous_path: str, deprecation_version: str, new_path: str +) -> Callable: + """ + Wrapper function which logs a warning (and a deprecation path) whenever a deprecated URL is invoked by the router. + + :param endpoint: to wrap (log a warning whenever it is invoked) + :param previous_path: route path it is triggered by + :param deprecation_version: version of LocalStack with which this endpoint is deprecated + :param new_path: new route path which should be used instead + :return: wrapped function which can be registered for a route + """ + + def deprecated_wrapper(*args, **kwargs): + LOG.warning( + "%s is deprecated (since %s) and will be removed in upcoming releases of LocalStack! Use %s instead.", + previous_path, + deprecation_version, + new_path, + ) + return endpoint(*args, **kwargs) + + return deprecated_wrapper diff --git a/localstack/logging/__init__.py b/localstack-core/localstack/dev/__init__.py similarity index 100% rename from localstack/logging/__init__.py rename to localstack-core/localstack/dev/__init__.py diff --git a/localstack/runtime/__init__.py b/localstack-core/localstack/dev/debugger/__init__.py similarity index 100% rename from localstack/runtime/__init__.py rename to localstack-core/localstack/dev/debugger/__init__.py diff --git a/localstack-core/localstack/dev/debugger/plugins.py b/localstack-core/localstack/dev/debugger/plugins.py new file mode 100644 index 0000000000000..aa1d163f57b85 --- /dev/null +++ b/localstack-core/localstack/dev/debugger/plugins.py @@ -0,0 +1,25 @@ +import logging + +from localstack import config, constants +from localstack.runtime import hooks + +LOG = logging.getLogger(__name__) + + +def enable_debugger(): + from localstack.packages.debugpy import debugpy_package + + debugpy_package.install() + import debugpy # noqa: T100 + + LOG.info("Starting debug server at: %s:%s", constants.BIND_HOST, config.DEVELOP_PORT) + debugpy.listen((constants.BIND_HOST, config.DEVELOP_PORT)) # noqa: T100 + + if config.WAIT_FOR_DEBUGGER: + debugpy.wait_for_client() # noqa: T100 + + +@hooks.on_infra_start() +def conditionally_enable_debugger(): + if config.DEVELOP: + enable_debugger() diff --git a/localstack/services/__init__.py b/localstack-core/localstack/dev/kubernetes/__init__.py similarity index 100% rename from localstack/services/__init__.py rename to localstack-core/localstack/dev/kubernetes/__init__.py diff --git a/localstack-core/localstack/dev/kubernetes/__main__.py b/localstack-core/localstack/dev/kubernetes/__main__.py new file mode 100644 index 0000000000000..8935027298ef0 --- /dev/null +++ b/localstack-core/localstack/dev/kubernetes/__main__.py @@ -0,0 +1,330 @@ +import dataclasses +import os +from typing import Literal + +import click +import yaml + + +@dataclasses.dataclass +class MountPoint: + name: str + host_path: str + container_path: str + node_path: str + read_only: bool = True + volume_type: Literal["Directory", "File"] = "Directory" + + +def generate_mount_points( + pro: bool = False, mount_moto: bool = False, mount_entrypoints: bool = False +) -> list[MountPoint]: + mount_points = [] + # host paths + root_path = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..") + localstack_code_path = os.path.join(root_path, "localstack-core", "localstack") + pro_path = os.path.join(root_path, "..", "localstack-ext") + + # container paths + target_path = "/opt/code/localstack/" + venv_path = os.path.join(target_path, ".venv", "lib", "python3.11", "site-packages") + + # Community code + if pro: + # Pro installs community code as a package, so it lives in the venv site-packages + mount_points.append( + MountPoint( + name="localstack", + host_path=os.path.normpath(localstack_code_path), + node_path="/code/localstack", + container_path=os.path.join(venv_path, "localstack"), + # Read only has to be false here, as we mount the pro code into this mount, as it is the entire namespace package + read_only=False, + ) + ) + else: + # Community does not install the localstack package in the venv, but has the code directly in `/opt/code/localstack` + mount_points.append( + MountPoint( + name="localstack", + host_path=os.path.normpath(localstack_code_path), + node_path="/code/localstack", + container_path=os.path.join(target_path, "localstack-core", "localstack"), + ) + ) + + # Pro code + if pro: + pro_code_path = os.path.join(pro_path, "localstack-pro-core", "localstack", "pro", "core") + mount_points.append( + MountPoint( + name="localstack-pro", + host_path=os.path.normpath(pro_code_path), + node_path="/code/localstack-pro", + container_path=os.path.join(venv_path, "localstack", "pro", "core"), + ) + ) + + # entrypoints + if mount_entrypoints: + if pro: + # Community entrypoints in pro image + # TODO actual package version detection + print( + "WARNING: Package version detection is not implemented." + "You need to adapt the version in the .egg-info paths to match the package version installed in the used localstack-pro image." + ) + community_version = "4.1.1.dev14" + pro_version = "4.1.1.dev16" + egg_path = os.path.join( + root_path, "localstack-core", "localstack_core.egg-info/entry_points.txt" + ) + mount_points.append( + MountPoint( + name="entry-points-community", + host_path=os.path.normpath(egg_path), + node_path="/code/entry-points-community", + container_path=os.path.join( + venv_path, f"localstack-{community_version}.egg-info", "entry_points.txt" + ), + volume_type="File", + ) + ) + # Pro entrypoints in pro image + egg_path = os.path.join( + pro_path, "localstack-pro-core", "localstack_ext.egg-info/entry_points.txt" + ) + mount_points.append( + MountPoint( + name="entry-points-pro", + host_path=os.path.normpath(egg_path), + node_path="/code/entry-points-pro", + container_path=os.path.join( + venv_path, f"localstack_ext-{pro_version}.egg-info", "entry_points.txt" + ), + volume_type="File", + ) + ) + else: + # Community entrypoints in community repo + # In the community image, the code is not installed as package, so the paths are predictable + egg_path = os.path.join( + root_path, "localstack-core", "localstack_core.egg-info/entry_points.txt" + ) + mount_points.append( + MountPoint( + name="entry-points-community", + host_path=os.path.normpath(egg_path), + node_path="/code/entry-points-community", + container_path=os.path.join( + target_path, + "localstack-core", + "localstack_core.egg-info", + "entry_points.txt", + ), + volume_type="File", + ) + ) + + if mount_moto: + moto_path = os.path.join(root_path, "..", "moto", "moto") + mount_points.append( + MountPoint( + name="moto", + host_path=os.path.normpath(moto_path), + node_path="/code/moto", + container_path=os.path.join(venv_path, "moto"), + ) + ) + return mount_points + + +def generate_k8s_cluster_config(mount_points: list[MountPoint], port: int = 4566): + volumes = [ + { + "volume": f"{mount_point.host_path}:{mount_point.node_path}", + "nodeFilters": ["server:*", "agent:*"], + } + for mount_point in mount_points + ] + + ports = [{"port": f"{port}:31566", "nodeFilters": ["server:0"]}] + + config = {"apiVersion": "k3d.io/v1alpha5", "kind": "Simple", "volumes": volumes, "ports": ports} + + return config + + +def snake_to_kebab_case(string: str): + return string.lower().replace("_", "-") + + +def generate_k8s_cluster_overrides( + mount_points: list[MountPoint], pro: bool = False, env: list[str] | None = None +): + volumes = [ + { + "name": mount_point.name, + "hostPath": {"path": mount_point.node_path, "type": mount_point.volume_type}, + } + for mount_point in mount_points + ] + + volume_mounts = [ + { + "name": mount_point.name, + "readOnly": mount_point.read_only, + "mountPath": mount_point.container_path, + } + for mount_point in mount_points + ] + + extra_env_vars = [] + if env: + for env_variable in env: + lhs, _, rhs = env_variable.partition("=") + extra_env_vars.append( + { + "name": lhs, + "value": rhs, + } + ) + + if pro: + extra_env_vars += [ + { + "name": "LOCALSTACK_AUTH_TOKEN", + "value": "test", + }, + { + "name": "CONTAINER_RUNTIME", + "value": "kubernetes", + }, + ] + + image_repository = "localstack/localstack-pro" if pro else "localstack/localstack" + + overrides = { + "debug": True, + "volumes": volumes, + "volumeMounts": volume_mounts, + "extraEnvVars": extra_env_vars, + "image": {"repository": image_repository}, + "lambda": {"executor": "kubernetes"}, + } + + return overrides + + +def write_file(content: dict, output_path: str, file_name: str): + path = os.path.join(output_path, file_name) + with open(path, "w") as f: + f.write(yaml.dump(content)) + f.close() + print(f"Generated file at {path}") + + +def print_file(content: dict, file_name: str): + print(f"Generated file:\t{file_name}") + print("=====================================") + print(yaml.dump(content)) + print("=====================================") + + +@click.command("run") +@click.option( + "--pro", is_flag=True, default=None, help="Mount the localstack-pro code into the cluster." +) +@click.option( + "--mount-moto", is_flag=True, default=None, help="Mount the moto code into the cluster." +) +@click.option( + "--mount-entrypoints", is_flag=True, default=None, help="Mount the entrypoints into the pod." +) +@click.option( + "--write", + is_flag=True, + default=None, + help="Write the configuration and overrides to files.", +) +@click.option( + "--output-dir", + "-o", + type=click.Path(exists=True, file_okay=False, resolve_path=True), + help="Output directory for generated files.", +) +@click.option( + "--overrides-file", + "-of", + default=None, + help="Name of the overrides file (default: overrides.yml).", +) +@click.option( + "--config-file", + "-cf", + default=None, + help="Name of the configuration file (default: configuration.yml).", +) +@click.option( + "--env", "-e", default=None, help="Environment variable to set in the pod", multiple=True +) +@click.option( + "--port", + "-p", + default=4566, + help="Port to expose from the kubernetes node", + type=click.IntRange(0, 65535), +) +@click.argument("command", nargs=-1, required=False) +def run( + pro: bool = None, + mount_moto: bool = False, + mount_entrypoints: bool = False, + write: bool = False, + output_dir=None, + overrides_file: str = None, + config_file: str = None, + command: str = None, + env: list[str] = None, + port: int = None, +): + """ + A tool for localstack developers to generate the kubernetes cluster configuration file and the overrides to mount the localstack code into the cluster. + """ + mount_points = generate_mount_points(pro, mount_moto, mount_entrypoints) + + config = generate_k8s_cluster_config(mount_points, port=port) + + overrides = generate_k8s_cluster_overrides(mount_points, pro=pro, env=env) + + output_dir = output_dir or os.getcwd() + overrides_file = overrides_file or "overrides.yml" + config_file = config_file or "configuration.yml" + + if write: + write_file(config, output_dir, config_file) + write_file(overrides, output_dir, overrides_file) + else: + print_file(config, config_file) + print_file(overrides, overrides_file) + + overrides_file_path = os.path.join(output_dir, overrides_file) + config_file_path = os.path.join(output_dir, config_file) + + print("\nTo create a k3d cluster with the generated configuration, follow these steps:") + print("1. Run the following command to create the cluster:") + print(f"\n k3d cluster create --config {config_file_path}\n") + + print("2. Once the cluster is created, start LocalStack with the generated overrides:") + print("\n helm repo add localstack https://localstack.github.io/helm-charts # (if required)") + print( + f"\n helm upgrade --install localstack localstack/localstack -f {overrides_file_path}\n" + ) + + +def main(): + run() + + +if __name__ == "__main__": + main() diff --git a/localstack/services/acm/__init__.py b/localstack-core/localstack/dev/run/__init__.py similarity index 100% rename from localstack/services/acm/__init__.py rename to localstack-core/localstack/dev/run/__init__.py diff --git a/localstack-core/localstack/dev/run/__main__.py b/localstack-core/localstack/dev/run/__main__.py new file mode 100644 index 0000000000000..39ab236c9e3c2 --- /dev/null +++ b/localstack-core/localstack/dev/run/__main__.py @@ -0,0 +1,408 @@ +import dataclasses +import os +from typing import Iterable, Tuple + +import click +from rich.rule import Rule + +from localstack import config +from localstack.cli import console +from localstack.runtime import hooks +from localstack.utils.bootstrap import Container, ContainerConfigurators +from localstack.utils.container_utils.container_client import ( + ContainerConfiguration, + PortMappings, + VolumeMappings, +) +from localstack.utils.container_utils.docker_cmd_client import CmdDockerClient +from localstack.utils.files import cache_dir +from localstack.utils.run import run_interactive +from localstack.utils.strings import short_uid + +from .configurators import ( + ConfigEnvironmentConfigurator, + DependencyMountConfigurator, + EntryPointMountConfigurator, + ImageConfigurator, + PortConfigurator, + SourceVolumeMountConfigurator, +) +from .paths import HOST_PATH_MAPPINGS, HostPaths + + +@click.command("run") +@click.option( + "--image", + type=str, + required=False, + help="Overwrite the container image to be used (defaults to localstack/localstack or " + "localstack/localstack-pro).", +) +@click.option( + "--volume-dir", + type=click.Path(file_okay=False, dir_okay=True), + required=False, + help="The localstack volume on the host, default: ~/.cache/localstack/volume", +) +@click.option( + "--pro/--community", + is_flag=True, + default=None, + help="Whether to start localstack pro or community. If not set, it will guess from the current directory", +) +@click.option( + "--develop/--no-develop", + is_flag=True, + default=False, + help="Install debugpy and expose port 5678", +) +@click.option( + "--randomize", + is_flag=True, + default=False, + help="Randomize container name and ports to start multiple instances", +) +@click.option( + "--mount-source/--no-mount-source", + is_flag=True, + default=True, + help="Mount source files from localstack and localstack-ext. Use --local-packages for optional dependencies such as moto.", +) +@click.option( + "--mount-dependencies/--no-mount-dependencies", + is_flag=True, + default=False, + help="Whether to mount the dependencies of the current .venv directory into the container. Note this only works if the dependencies are compatible with the python and platform version from the venv and the container.", +) +@click.option( + "--mount-entrypoints/--no-mount-entrypoints", + is_flag=True, + default=False, + help="Mount entrypoints", +) +@click.option("--mount-docker-socket/--no-docker-socket", is_flag=True, default=True) +@click.option( + "--env", + "-e", + help="Additional environment variables that are passed to the LocalStack container", + multiple=True, + required=False, +) +@click.option( + "--volume", + "-v", + help="Additional volume mounts that are passed to the LocalStack container", + multiple=True, + required=False, +) +@click.option( + "--publish", + "-p", + help="Additional ports that are published to the host", + multiple=True, + required=False, +) +@click.option( + "--entrypoint", + type=str, + required=False, + help="Additional entrypoint flag passed to docker", +) +@click.option( + "--network", + type=str, + required=False, + help="Docker network to start the container in", +) +@click.option( + "--local-packages", + "-l", + multiple=True, + required=False, + type=click.Choice(HOST_PATH_MAPPINGS.keys(), case_sensitive=False), + help="Mount specified packages into the container", +) +@click.argument("command", nargs=-1, required=False) +def run( + image: str = None, + volume_dir: str = None, + pro: bool = None, + develop: bool = False, + randomize: bool = False, + mount_source: bool = True, + mount_dependencies: bool = False, + mount_entrypoints: bool = False, + mount_docker_socket: bool = True, + env: Tuple = (), + volume: Tuple = (), + publish: Tuple = (), + entrypoint: str = None, + network: str = None, + local_packages: list[str] | None = None, + command: str = None, +): + """ + A tool for localstack developers to start localstack containers. Run this in your localstack or + localstack-ext source tree to mount local source files or dependencies into the container. + Here are some examples:: + + \b + python -m localstack.dev.run + python -m localstack.dev.run -e DEBUG=1 -e LOCALSTACK_AUTH_TOKEN=test + python -m localstack.dev.run -- bash -c 'echo "hello"' + + Explanations and more examples: + + Start a normal container localstack container. If you run this from the localstack-ext repo, + it will start localstack-pro:: + + python -m localstack.dev.run + + If you start localstack-pro, you might also want to add the API KEY as environment variable:: + + python -m localstack.dev.run -e DEBUG=1 -e LOCALSTACK_AUTH_TOKEN=test + + If your local changes are making modifications to plux plugins (e.g., adding new providers or hooks), + then you also want to mount the newly generated entry_point.txt files into the container:: + + python -m localstack.dev.run --mount-entrypoints + + Start a new container with randomized gateway and service ports, and randomized container name:: + + python -m localstack.dev.run --randomize + + You can also run custom commands: + + python -m localstack.dev.run bash -c 'echo "hello"' + + Or use custom entrypoints: + + python -m localstack.dev.run --entrypoint /bin/bash -- echo "hello" + + You can import and expose debugpy: + + python -m localstack.dev.run --develop + + You can also mount local dependencies (e.g., pytest and other test dependencies, and then use that + in the container):: + + \b + python -m localstack.dev.run --mount-dependencies \\ + -v $PWD/tests:/opt/code/localstack/tests \\ + -- .venv/bin/python -m pytest tests/unit/http_/ + + The script generally assumes that you are executing in either localstack or localstack-ext source + repositories that are organized like this:: + + \b + somedir <- your workspace directory + ├── localstack <- execute script in here + │ ├── ... + │ ├── localstack-core + │ │ ├── localstack <- will be mounted into the container + │ │ └── localstack_core.egg-info + │ ├── pyproject.toml + │ ├── tests + │ └── ... + ├── localstack-ext <- or execute script in here + │ ├── ... + │ ├── localstack-pro-core + │ │ ├── localstack + │ │ │ └── pro + │ │ │ └── core <- will be mounted into the container + │ │ ├── localstack_ext.egg-info + │ │ ├── pyproject.toml + │ │ └── tests + │ └── ... + ├── moto + │ ├── AUTHORS.md + │ ├── ... + │ ├── moto <- will be mounted into the container + │ ├── moto_ext.egg-info + │ ├── pyproject.toml + │ ├── tests + │ └── ... + + You can choose which local source repositories are mounted in. For example, if `moto` and `rolo` are + both present, only mount `rolo` into the container. + + \b + python -m localstack.dev.run --local-packages rolo + + If both `rolo` and `moto` are available and both should be mounted, use the flag twice. + + \b + python -m localstack.dev.run --local-packages rolo --local-packages moto + """ + with console.status("Configuring") as status: + env_vars = parse_env_vars(env) + configure_licensing_credentials_environment(env_vars) + + # run all prepare_host hooks + hooks.prepare_host.run() + + # set the VOLUME_DIR config variable like in the CLI + if not os.environ.get("LOCALSTACK_VOLUME_DIR", "").strip(): + config.VOLUME_DIR = str(cache_dir() / "volume") + + # setup important paths on the host + host_paths = HostPaths( + # we assume that python -m localstack.dev.run is always executed in the repo source + workspace_dir=os.path.abspath(os.path.join(os.getcwd(), "..")), + volume_dir=volume_dir or config.VOLUME_DIR, + ) + + # auto-set pro flag + if pro is None: + if os.getcwd().endswith("localstack-ext"): + pro = True + else: + pro = False + + # setup base configuration + container_config = ContainerConfiguration( + image_name=image, + name=config.MAIN_CONTAINER_NAME if not randomize else f"localstack-{short_uid()}", + remove=True, + interactive=True, + tty=True, + env_vars=dict(), + volumes=VolumeMappings(), + ports=PortMappings(), + network=network, + ) + + # replicate pro startup + if pro: + try: + from localstack.pro.core.plugins import modify_gateway_listen_config + + modify_gateway_listen_config(config) + except ImportError: + pass + + # setup configurators + configurators = [ + ImageConfigurator(pro, image), + PortConfigurator(randomize), + ConfigEnvironmentConfigurator(pro), + ContainerConfigurators.mount_localstack_volume(host_paths.volume_dir), + ContainerConfigurators.config_env_vars, + ] + + # create stub container with configuration to apply + c = Container(container_config=container_config) + + # apply existing hooks first that can later be overwritten + hooks.configure_localstack_container.run(c) + + if command: + configurators.append(ContainerConfigurators.custom_command(list(command))) + if entrypoint: + container_config.entrypoint = entrypoint + if mount_docker_socket: + configurators.append(ContainerConfigurators.mount_docker_socket) + if mount_source: + configurators.append( + SourceVolumeMountConfigurator( + host_paths=host_paths, + pro=pro, + chosen_packages=local_packages, + ) + ) + if mount_entrypoints: + configurators.append(EntryPointMountConfigurator(host_paths=host_paths, pro=pro)) + if mount_dependencies: + configurators.append(DependencyMountConfigurator(host_paths=host_paths)) + if develop: + configurators.append(ContainerConfigurators.develop) + + # make sure anything coming from CLI arguments has priority + configurators.extend( + [ + ContainerConfigurators.volume_cli_params(volume), + ContainerConfigurators.port_cli_params(publish), + ContainerConfigurators.env_cli_params(env), + ] + ) + + # run configurators + for configurator in configurators: + configurator(container_config) + # print the config + print_config(container_config) + + # run the container + docker = CmdDockerClient() + status.update("Creating container") + container_id = docker.create_container_from_config(container_config) + + rule = Rule(f"Interactive session with {container_id[:12]} 💻") + console.print(rule) + try: + cmd = [*docker._docker_cmd(), "start", "--interactive", "--attach", container_id] + run_interactive(cmd) + finally: + if container_config.remove: + try: + if docker.is_container_running(container_id): + docker.stop_container(container_id) + docker.remove_container(container_id) + except Exception: + pass + + +def print_config(cfg: ContainerConfiguration): + d = dataclasses.asdict(cfg) + + d["volumes"] = [v.to_str() for v in d["volumes"].mappings] + d["ports"] = [p for p in d["ports"].to_list() if p != "-p"] + + for k in list(d.keys()): + if d[k] is None: + d.pop(k) + + console.print(d) + + +def parse_env_vars(params: Iterable[str] = None) -> dict[str, str]: + env = {} + + if not params: + return env + + for e in params: + if "=" in e: + k, v = e.split("=", maxsplit=1) + env[k] = v + else: + # there's currently no way in our abstraction to only pass the variable name (as + # you can do in docker) so we resolve the value here. + env[e] = os.getenv(e) + + return env + + +def configure_licensing_credentials_environment(env_vars: dict[str, str]): + """ + If an api key or auth token is set in the parsed CLI parameters, then we also set them into the OS environment + unless they are already set. This is just convenience so you don't have to set them twice. + + :param env_vars: the environment variables parsed from the CLI parameters + """ + if os.environ.get("LOCALSTACK_API_KEY"): + return + if os.environ.get("LOCALSTACK_AUTH_TOKEN"): + return + if api_key := env_vars.get("LOCALSTACK_API_KEY"): + os.environ["LOCALSTACK_API_KEY"] = api_key + if api_key := env_vars.get("LOCALSTACK_AUTH_TOKEN"): + os.environ["LOCALSTACK_AUTH_TOKEN"] = api_key + + +def main(): + run() + + +if __name__ == "__main__": + main() diff --git a/localstack-core/localstack/dev/run/configurators.py b/localstack-core/localstack/dev/run/configurators.py new file mode 100644 index 0000000000000..4f1b9e3e29cde --- /dev/null +++ b/localstack-core/localstack/dev/run/configurators.py @@ -0,0 +1,375 @@ +""" +Several ContainerConfigurator implementations to set up a development version of a localstack container. +""" + +import gzip +import os +from pathlib import Path, PurePosixPath +from tempfile import gettempdir + +from localstack import config, constants +from localstack.utils.bootstrap import ContainerConfigurators +from localstack.utils.container_utils.container_client import ( + BindMount, + ContainerClient, + ContainerConfiguration, + VolumeMappings, +) +from localstack.utils.docker_utils import DOCKER_CLIENT +from localstack.utils.files import get_user_cache_dir +from localstack.utils.run import run +from localstack.utils.strings import md5 + +from .paths import ( + HOST_PATH_MAPPINGS, + CommunityContainerPaths, + ContainerPaths, + HostPaths, + ProContainerPaths, +) + + +class ConfigEnvironmentConfigurator: + """Configures the environment variables from the localstack and localstack-pro config.""" + + def __init__(self, pro: bool): + self.pro = pro + + def __call__(self, cfg: ContainerConfiguration): + if cfg.env_vars is None: + cfg.env_vars = {} + + if self.pro: + # import localstack.pro.core.config extends the list of config vars + from localstack.pro.core import config as config_pro # noqa + + ContainerConfigurators.config_env_vars(cfg) + + +class PortConfigurator: + """ + Configures the port mappings. Can be randomized to run multiple localstack instances. + """ + + def __init__(self, randomize: bool = True): + self.randomize = randomize + + def __call__(self, cfg: ContainerConfiguration): + cfg.ports.bind_host = config.GATEWAY_LISTEN[0].host + + if self.randomize: + ContainerConfigurators.random_gateway_port(cfg) + ContainerConfigurators.random_service_port_range()(cfg) + else: + ContainerConfigurators.gateway_listen(config.GATEWAY_LISTEN)(cfg) + ContainerConfigurators.service_port_range(cfg) + + +class ImageConfigurator: + """ + Sets the container image to use for the container (by default either localstack/localstack or + localstack/localstack-pro) + """ + + def __init__(self, pro: bool, image_name: str | None): + self.pro = pro + self.image_name = image_name + + def __call__(self, cfg: ContainerConfiguration): + if self.image_name: + cfg.image_name = self.image_name + else: + if self.pro: + cfg.image_name = constants.DOCKER_IMAGE_NAME_PRO + else: + cfg.image_name = constants.DOCKER_IMAGE_NAME + + +class CustomEntryPointConfigurator: + """ + Creates a ``docker-entrypoint-<hash>.sh`` script from the given source and mounts it into the container. + It also configures the container to then use that entrypoint. + """ + + def __init__(self, script: str, tmp_dir: str = None): + self.script = script.lstrip(os.linesep) + self.container_paths = ProContainerPaths() + self.tmp_dir = tmp_dir + + def __call__(self, cfg: ContainerConfiguration): + h = md5(self.script) + tempdir = gettempdir() if not self.tmp_dir else self.tmp_dir + file_name = f"docker-entrypoint-{h}.sh" + + file = Path(tempdir, file_name) + if not file.exists(): + # newline separator should be '\n' independent of the os, since the entrypoint is executed in the container + # encoding needs to be "utf-8" since scripts could include emojis + file.write_text(self.script, newline="\n", encoding="utf-8") + file.chmod(0o777) + cfg.volumes.add(BindMount(str(file), f"/tmp/{file.name}")) + cfg.entrypoint = f"/tmp/{file.name}" + + +class SourceVolumeMountConfigurator: + """ + Mounts source code of localstack, localstack_ext, and moto into the container. It does this by assuming + that there is a "workspace" directory in which the source repositories are checked out into. + Depending on whether we want to start the pro container, the source paths for localstack are different. + """ + + def __init__( + self, + *, + host_paths: HostPaths = None, + pro: bool = False, + chosen_packages: list[str] | None = None, + ): + self.host_paths = host_paths or HostPaths() + self.container_paths = ProContainerPaths() if pro else CommunityContainerPaths() + self.pro = pro + self.chosen_packages = chosen_packages or [] + + def __call__(self, cfg: ContainerConfiguration): + # localstack source code if available + source = self.host_paths.aws_community_package_dir + if source.exists(): + cfg.volumes.add( + # read_only=False is a temporary workaround to make the mounting of the pro source work + # this can be reverted once we don't need the nested mounting anymore + BindMount(str(source), self.container_paths.localstack_source_dir, read_only=False) + ) + + # ext source code if available + if self.pro: + source = self.host_paths.aws_pro_package_dir + if source.exists(): + cfg.volumes.add( + BindMount( + str(source), self.container_paths.localstack_pro_source_dir, read_only=True + ) + ) + + # mount local code checkouts if possible + for package_name in self.chosen_packages: + # Unconditional lookup because the CLI rejects incorect items + extractor = HOST_PATH_MAPPINGS[package_name] + self.try_mount_to_site_packages(cfg, extractor(self.host_paths)) + + # docker entrypoint + if self.pro: + source = self.host_paths.localstack_pro_project_dir / "bin" / "docker-entrypoint.sh" + else: + source = self.host_paths.localstack_project_dir / "bin" / "docker-entrypoint.sh" + if source.exists(): + cfg.volumes.add( + BindMount(str(source), self.container_paths.docker_entrypoint, read_only=True) + ) + + def try_mount_to_site_packages(self, cfg: ContainerConfiguration, sources_path: Path): + """ + Attempts to mount something like `~/workspace/plux/plugin` on the host into + ``.venv/.../site-packages/plugin``. + + :param cfg: + :param sources_path: + :return: + """ + if sources_path.exists(): + cfg.volumes.add( + BindMount( + str(sources_path), + self.container_paths.dependency_source(sources_path.name), + read_only=True, + ) + ) + + +class EntryPointMountConfigurator: + """ + Mounts ``entry_points.txt`` files of localstack and dependencies into the venv in the container. + + For example, when starting the pro container, the entrypoints of localstack-ext on the host would be in + ``~/workspace/localstack-ext/localstack-pro-core/localstack_ext.egg-info/entry_points.txt`` + which needs to be mounted into the distribution info of the installed dependency within the container: + ``/opt/code/localstack/.venv/.../site-packages/localstack_ext-2.1.0.dev0.dist-info/entry_points.txt``. + """ + + entry_point_glob = ( + "/opt/code/localstack/.venv/lib/python3.*/site-packages/*.dist-info/entry_points.txt" + ) + localstack_community_entry_points = ( + "/opt/code/localstack/localstack_core.egg-info/entry_points.txt" + ) + + def __init__( + self, + *, + host_paths: HostPaths = None, + container_paths: ContainerPaths = None, + pro: bool = False, + ): + self.host_paths = host_paths or HostPaths() + self.pro = pro + self.container_paths = container_paths or None + + def __call__(self, cfg: ContainerConfiguration): + # special case for community code + if not self.pro: + host_path = self.host_paths.aws_community_package_dir + if host_path.exists(): + cfg.volumes.append( + BindMount( + str(host_path), self.localstack_community_entry_points, read_only=True + ) + ) + + # locate all relevant entry_point.txt files within the container + pattern = self.entry_point_glob + files = _list_files_in_container_image(DOCKER_CLIENT, cfg.image_name) + paths = [PurePosixPath(f) for f in files] + paths = [p for p in paths if p.match(pattern)] + + # then, check whether they exist in some form on the host within the workspace directory + for container_path in paths: + dep_path = container_path.parent.name.removesuffix(".dist-info") + dep, ver = dep_path.split("-") + + if dep == "localstack_core": + host_path = ( + self.host_paths.localstack_project_dir + / "localstack-core" + / "localstack_core.egg-info" + / "entry_points.txt" + ) + if host_path.is_file(): + cfg.volumes.add( + BindMount( + str(host_path), + str(container_path), + read_only=True, + ) + ) + continue + elif dep == "localstack_ext": + host_path = ( + self.host_paths.localstack_pro_project_dir + / "localstack-pro-core" + / "localstack_ext.egg-info" + / "entry_points.txt" + ) + if host_path.is_file(): + cfg.volumes.add( + BindMount( + str(host_path), + str(container_path), + read_only=True, + ) + ) + continue + for host_path in self.host_paths.workspace_dir.glob( + f"*/{dep}.egg-info/entry_points.txt" + ): + cfg.volumes.add(BindMount(str(host_path), str(container_path), read_only=True)) + break + + +class DependencyMountConfigurator: + """ + Mounts source folders from your host's .venv directory into the container's .venv. + """ + + dependency_glob = "/opt/code/localstack/.venv/lib/python3.*/site-packages/*" + + # skip mounting dependencies with incompatible binaries (e.g., on macOS) + skipped_dependencies = ["cryptography", "psutil", "rpds"] + + def __init__( + self, + *, + host_paths: HostPaths = None, + container_paths: ContainerPaths = None, + pro: bool = False, + ): + self.host_paths = host_paths or HostPaths() + self.pro = pro + self.container_paths = container_paths or ( + ProContainerPaths() if pro else CommunityContainerPaths() + ) + + def __call__(self, cfg: ContainerConfiguration): + # locate all relevant dependency directories + pattern = self.dependency_glob + files = _list_files_in_container_image(DOCKER_CLIENT, cfg.image_name) + paths = [PurePosixPath(f) for f in files] + # builds an index of "jinja2: /opt/code/.../site-packages/jinja2" + container_path_index = {p.name: p for p in paths if p.match(pattern)} + + # find dependencies from the host + for dep_path in self.host_paths.venv_dir.glob("lib/python3.*/site-packages/*"): + # filter out everything that heuristically cannot be a source path + if not self._can_be_source_path(dep_path): + continue + if dep_path.name.endswith(".dist-info"): + continue + if dep_path.name == "__pycache__": + continue + + if dep_path.name in self.skipped_dependencies: + continue + + if dep_path.name in container_path_index: + # find the target path in the index if it exists + target_path = str(container_path_index[dep_path.name]) + else: + # if the given dependency is not in the container, then we mount it anyway + # FIXME: we should also mount the dist-info directory. perhaps this method should be + # re-written completely + target_path = self.container_paths.dependency_source(dep_path.name) + + if self._has_mount(cfg.volumes, target_path): + continue + + cfg.volumes.append(BindMount(str(dep_path), target_path)) + + def _can_be_source_path(self, path: Path) -> bool: + return path.is_dir() or (path.name.endswith(".py") and not path.name.startswith("__")) + + def _has_mount(self, volumes: VolumeMappings, target_path: str) -> bool: + return True if volumes.find_target_mapping(target_path) else False + + +def _list_files_in_container_image(container_client: ContainerClient, image_name: str) -> list[str]: + """ + Uses ``docker export | tar -t`` to list all files in a given docker image. It caches the result based on + the image ID into a gziped file into ``~/.cache/localstack-dev-cli`` to (significantly) speed up + subsequent calls. + + :param container_client: the container client to use + :param image_name: the container image to analyze + :return: a list of file paths + """ + if not image_name: + raise ValueError("missing image name") + + image_id = container_client.inspect_image(image_name)["Id"] + + cache_dir = get_user_cache_dir() / "localstack-dev-cli" + cache_dir.mkdir(exist_ok=True, parents=True) + cache_file = cache_dir / f"{image_id}.files.txt.gz" + + if not cache_file.exists(): + container_id = container_client.create_container(image_name=image_name) + try: + # docker export yields paths without prefixed slashes, so we add them here + # since the file is pretty big (~4MB for community, ~7MB for pro) we gzip it + cmd = "docker export %s | tar -t | awk '{ print \"/\" $0 }' | gzip > %s" % ( + container_id, + cache_file, + ) + run(cmd, shell=True) + finally: + container_client.remove_container(container_id) + + with gzip.open(cache_file, mode="rt") as fd: + return fd.read().splitlines(keepends=False) diff --git a/localstack-core/localstack/dev/run/paths.py b/localstack-core/localstack/dev/run/paths.py new file mode 100644 index 0000000000000..b1fe9a95f24fd --- /dev/null +++ b/localstack-core/localstack/dev/run/paths.py @@ -0,0 +1,94 @@ +"""Utilities to resolve important paths on the host and in the container.""" + +import os +from pathlib import Path +from typing import Callable, Optional, Union + + +class HostPaths: + workspace_dir: Path + """We assume all repositories live in a workspace directory, e.g., ``~/workspace/ls/localstack``, + ``~/workspace/ls/localstack-ext``, ...""" + + localstack_project_dir: Path + localstack_pro_project_dir: Path + moto_project_dir: Path + postgresql_proxy: Path + rolo_dir: Path + volume_dir: Path + venv_dir: Path + + def __init__( + self, + workspace_dir: Union[os.PathLike, str] = None, + volume_dir: Union[os.PathLike, str] = None, + venv_dir: Union[os.PathLike, str] = None, + ): + self.workspace_dir = Path(workspace_dir or os.path.abspath(os.path.join(os.getcwd(), ".."))) + self.localstack_project_dir = self.workspace_dir / "localstack" + self.localstack_pro_project_dir = self.workspace_dir / "localstack-ext" + self.moto_project_dir = self.workspace_dir / "moto" + self.postgresql_proxy = self.workspace_dir / "postgresql-proxy" + self.rolo_dir = self.workspace_dir / "rolo" + self.volume_dir = Path(volume_dir or "/tmp/localstack") + self.venv_dir = Path( + venv_dir + or os.getenv("VIRTUAL_ENV") + or os.getenv("VENV_DIR") + or os.path.join(os.getcwd(), ".venv") + ) + + @property + def aws_community_package_dir(self) -> Path: + return self.localstack_project_dir / "localstack-core" / "localstack" + + @property + def aws_pro_package_dir(self) -> Path: + return ( + self.localstack_pro_project_dir / "localstack-pro-core" / "localstack" / "pro" / "core" + ) + + +# Type representing how to extract a specific path from a common root path, typically a lambda function +PathMappingExtractor = Callable[[HostPaths], Path] + +# Declaration of which local packages can be mounted into the container, and their locations on the host +HOST_PATH_MAPPINGS: dict[ + str, + PathMappingExtractor, +] = { + "moto": lambda paths: paths.moto_project_dir / "moto", + "postgresql_proxy": lambda paths: paths.postgresql_proxy / "postgresql_proxy", + "rolo": lambda paths: paths.rolo_dir / "rolo", + "plux": lambda paths: paths.workspace_dir / "plux" / "plugin", +} + + +class ContainerPaths: + """Important paths in the container""" + + project_dir: str = "/opt/code/localstack" + site_packages_target_dir: str = "/opt/code/localstack/.venv/lib/python3.11/site-packages" + docker_entrypoint: str = "/usr/local/bin/docker-entrypoint.sh" + localstack_supervisor: str = "/usr/local/bin/localstack-supervisor" + localstack_source_dir: str + localstack_pro_source_dir: Optional[str] + + def dependency_source(self, name: str) -> str: + """Returns path of the given source dependency in the site-packages directory.""" + return self.site_packages_target_dir + f"/{name}" + + +class CommunityContainerPaths(ContainerPaths): + """In the community image, code is copied into /opt/code/localstack/localstack-core/localstack""" + + def __init__(self): + self.localstack_source_dir = f"{self.project_dir}/localstack-core/localstack" + + +class ProContainerPaths(ContainerPaths): + """In the pro image, localstack and ext are installed into the venv as dependency""" + + def __init__(self): + self.localstack_source_dir = self.dependency_source("localstack") + self.localstack_pro_source_dir = self.dependency_source("localstack") + "/pro/core" diff --git a/localstack/services/apigateway/__init__.py b/localstack-core/localstack/dns/__init__.py similarity index 100% rename from localstack/services/apigateway/__init__.py rename to localstack-core/localstack/dns/__init__.py diff --git a/localstack-core/localstack/dns/models.py b/localstack-core/localstack/dns/models.py new file mode 100644 index 0000000000000..6df70bf6e0d86 --- /dev/null +++ b/localstack-core/localstack/dns/models.py @@ -0,0 +1,175 @@ +import dataclasses +from enum import Enum, auto +from typing import Callable, Protocol + + +class RecordType(Enum): + A = auto() + AAAA = auto() + CNAME = auto() + TXT = auto() + MX = auto() + SOA = auto() + NS = auto() + SRV = auto() + + +@dataclasses.dataclass(frozen=True) +class NameRecord: + """ + Dataclass of a stored record + """ + + record_type: RecordType + record_id: str | None = None + + +@dataclasses.dataclass(frozen=True) +class _TargetRecordBase: + """ + Dataclass of a stored record + """ + + target: str + + +@dataclasses.dataclass(frozen=True) +class TargetRecord(NameRecord, _TargetRecordBase): + pass + + +@dataclasses.dataclass(frozen=True) +class _SOARecordBase: + m_name: str + r_name: str + + +@dataclasses.dataclass(frozen=True) +class SOARecord(NameRecord, _SOARecordBase): + pass + + +@dataclasses.dataclass(frozen=True) +class AliasTarget: + target: str + alias_id: str | None = None + health_check: Callable[[], bool] | None = None + + +@dataclasses.dataclass(frozen=True) +class _DynamicRecordBase: + """ + Dataclass of a record that is dynamically determined at query time to return the IP address + of the LocalStack container + """ + + record_type: RecordType + + +@dataclasses.dataclass(frozen=True) +class DynamicRecord(NameRecord, _DynamicRecordBase): + pass + + +# TODO decide if we need the whole concept of multiple zones in our DNS implementation +class DnsServerProtocol(Protocol): + def add_host(self, name: str, record: NameRecord) -> None: + """ + Add a host resolution to the DNS server. + This will resolve the given host to the record provided, if it matches. + + :param name: Name pattern to add resolution for. Can be arbitrary regex. + :param record: Record, consisting of a record type, an optional record id, and the attached data. + Has to be a subclass of a NameRecord, not a NameRecord itself to contain some data. + """ + pass + + def delete_host(self, name: str, record: NameRecord) -> None: + """ + Deletes a host resolution from the DNS server. + Only the name, the record type, and optionally the given record id will be used to find entries to delete. + All matching entries will be deleted. + + :param name: Name pattern, identically to the one registered with `add_host` + :param record: Record, ideally identically to the one registered with add_host but only record_type and + record_id have to match to find the record. + + :raises ValueError: If no record that was previously registered with `add_host` was found which matches the provided record + """ + pass + + def add_host_pointing_to_localstack(self, name: str) -> None: + """ + Add a dns name which should be pointing to LocalStack when resolved. + + :param name: Name which should be pointing to LocalStack when resolved + """ + pass + + def delete_host_pointing_to_localstack(self, name: str) -> None: + """ + Removes a dns name from pointing to LocalStack + + :param name: Name to be removed + :raises ValueError: If the host pointing to LocalStack was not previously registered using `add_host_pointing_to_localstack` + """ + pass + + def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget) -> None: + """ + Adds an alias to the DNS, with an optional healthcheck callback. + When a request which matches `source_name` comes in, the DNS will check the aliases, and if the healthcheck + (if provided) succeeds, the resolution result for the `target_name` will be returned instead. + If multiple aliases are registered for the same source_name record_type tuple, and no health checks interfere, + the server will process requests with the first added alias + + :param source_name: Alias name + :param record_type: Record type of the alias + :param target: Target of the alias + """ + pass + + def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget) -> None: + """ + Removes an alias from the DNS. + Only the name, the record type, and optionally the given alias id will be used to find entries to delete. + All matching entries will be deleted. + + :param source_name: Alias name + :param record_type: Record type of the alias to remove + :param target: Target of the alias. Only relevant data for deletion will be its id. + :raises ValueError: If the alias was not previously registered using `add_alias` + """ + pass + + # TODO: support regex or wildcard? + # need to update when custom cloudpod destination is enabled + # has standard list of skips: localstack.services.dns_server.SKIP_PATTERNS + def add_skip(self, skip_pattern: str) -> None: + """ + Add a skip pattern to the DNS server. + + A skip pattern will prevent the DNS server from resolving a matching request against it's internal zones or + aliases, and will directly contact an upstream DNS for resolution. + + This is usually helpful if AWS endpoints are overwritten by internal entries, but we have to reach AWS for + some reason. (Often used for cloudpods or installers). + + :param skip_pattern: Skip pattern to add. Can be a valid regex. + """ + pass + + def delete_skip(self, skip_pattern: str) -> None: + """ + Removes a skip pattern from the DNS server. + + :param skip_pattern: Skip pattern to remove + :raises ValueError: If the skip pattern was not previously registered using `add_skip` + """ + pass + + def clear(self): + """ + Removes all runtime configurations. + """ + pass diff --git a/localstack-core/localstack/dns/plugins.py b/localstack-core/localstack/dns/plugins.py new file mode 100644 index 0000000000000..05566573cfec8 --- /dev/null +++ b/localstack-core/localstack/dns/plugins.py @@ -0,0 +1,45 @@ +import logging + +from localstack import config +from localstack.runtime import hooks + +LOG = logging.getLogger(__name__) + +# Note: Don't want to introduce a possible import order conflict by importing SERVICE_SHUTDOWN_PRIORITY +# TODO: consider extracting these priorities into some static configuration +DNS_SHUTDOWN_PRIORITY = -30 +"""Make sure the DNS server is shut down after the ON_AFTER_SERVICE_SHUTDOWN_HANDLERS, which in turn is after +SERVICE_SHUTDOWN_PRIORITY. Currently this value needs to be less than -20""" + + +@hooks.on_infra_start(priority=10) +def start_dns_server(): + try: + from localstack.dns import server + + server.start_dns_server(port=config.DNS_PORT, asynchronous=True) + except Exception as e: + LOG.warning("Unable to start DNS: %s", e) + + +@hooks.on_infra_start() +def setup_dns_configuration_on_host(): + try: + from localstack.dns import server + + if server.is_server_running(): + # Prepare network interfaces for DNS server for the infra. + server.setup_network_configuration() + except Exception as e: + LOG.warning("error setting up dns server: %s", e) + + +@hooks.on_infra_shutdown(priority=DNS_SHUTDOWN_PRIORITY) +def stop_server(): + try: + from localstack.dns import server + + server.revert_network_configuration() + server.stop_servers() + except Exception as e: + LOG.warning("Unable to stop DNS servers: %s", e) diff --git a/localstack-core/localstack/dns/server.py b/localstack-core/localstack/dns/server.py new file mode 100644 index 0000000000000..f32d81292c75e --- /dev/null +++ b/localstack-core/localstack/dns/server.py @@ -0,0 +1,1003 @@ +import argparse +import copy +import logging +import os +import re +import textwrap +import threading +from datetime import datetime +from functools import cache +from ipaddress import IPv4Address, IPv4Interface +from pathlib import Path +from socket import AddressFamily +from typing import Iterable, Literal, Tuple + +import psutil +from cachetools import TTLCache, cached +from dnslib import ( + AAAA, + CNAME, + MX, + NS, + QTYPE, + RCODE, + RD, + RDMAP, + RR, + SOA, + TXT, + A, + DNSHeader, + DNSLabel, + DNSQuestion, + DNSRecord, +) +from dnslib.server import DNSHandler, DNSServer +from psutil._common import snicaddr + +import dns.flags +import dns.message +import dns.query +from dns.exception import Timeout + +# Note: avoid adding additional imports here, to avoid import issues when running the CLI +from localstack import config +from localstack.constants import LOCALHOST_HOSTNAME, LOCALHOST_IP +from localstack.dns.models import ( + AliasTarget, + DnsServerProtocol, + DynamicRecord, + NameRecord, + RecordType, + SOARecord, + TargetRecord, +) +from localstack.services.edge import run_module_as_sudo +from localstack.utils import iputils +from localstack.utils.net import Port, port_can_be_bound +from localstack.utils.platform import in_docker +from localstack.utils.serving import Server +from localstack.utils.strings import to_bytes, to_str +from localstack.utils.sync import sleep_forever + +EPOCH = datetime(1970, 1, 1) +SERIAL = int((datetime.utcnow() - EPOCH).total_seconds()) + +DEFAULT_FALLBACK_DNS_SERVER = "8.8.8.8" +FALLBACK_DNS_LOCK = threading.RLock() +VERIFICATION_DOMAIN = config.DNS_VERIFICATION_DOMAIN + +RCODE_REFUSED = 5 + +DNS_SERVER: "DnsServerProtocol" = None +PREVIOUS_RESOLV_CONF_FILE: str | None = None + +REQUEST_TIMEOUT_SECS = 7 + +TYPE_LOOKUP = { + A: QTYPE.A, + AAAA: QTYPE.AAAA, + CNAME: QTYPE.CNAME, + MX: QTYPE.MX, + NS: QTYPE.NS, + SOA: QTYPE.SOA, + TXT: QTYPE.TXT, +} + +LOG = logging.getLogger(__name__) + +THREAD_LOCAL = threading.local() + +# Type of the value given by DNSHandler.client_address +# in the form (ip, port) e.g. ("127.0.0.1", 58291) +ClientAddress = Tuple[str, int] + +psutil_cache = TTLCache(maxsize=100, ttl=10) + + +# TODO: update route53 provider to use this util +def normalise_dns_name(name: DNSLabel | str) -> str: + name = str(name) + if not name.endswith("."): + return f"{name}." + + return name + + +@cached(cache=psutil_cache) +def list_network_interface_details() -> dict[str, list[snicaddr]]: + return psutil.net_if_addrs() + + +class Record: + def __init__(self, rdata_type, *args, **kwargs): + rtype = kwargs.get("rtype") + rname = kwargs.get("rname") + ttl = kwargs.get("ttl") + + if isinstance(rdata_type, RD): + # actually an instance, not a type + self._rtype = TYPE_LOOKUP[rdata_type.__class__] + rdata = rdata_type + else: + self._rtype = TYPE_LOOKUP[rdata_type] + if rdata_type == SOA and len(args) == 2: + # add sensible times to SOA + args += ( + ( + SERIAL, # serial number + 60 * 60 * 1, # refresh + 60 * 60 * 3, # retry + 60 * 60 * 24, # expire + 60 * 60 * 1, # minimum + ), + ) + rdata = rdata_type(*args) + + if rtype: + self._rtype = rtype + self._rname = rname + self.kwargs = dict(rdata=rdata, ttl=self.sensible_ttl() if ttl is None else ttl, **kwargs) + + def try_rr(self, q): + if q.qtype == QTYPE.ANY or q.qtype == self._rtype: + return self.as_rr(q.qname) + + def as_rr(self, alt_rname): + return RR(rname=self._rname or alt_rname, rtype=self._rtype, **self.kwargs) + + def sensible_ttl(self): + if self._rtype in (QTYPE.NS, QTYPE.SOA): + return 60 * 60 * 24 + else: + return 300 + + @property + def is_soa(self): + return self._rtype == QTYPE.SOA + + def __str__(self): + return f"{QTYPE[self._rtype]}({self.kwargs})" + + def __repr__(self): + return self.__str__() + + +class RecordConverter: + """ + Handles returning the correct DNS record for the stored name_record. + + Particularly, if the record is a DynamicRecord, then perform dynamic IP address lookup. + """ + + def __init__(self, request: DNSRecord, client_address: ClientAddress): + self.request = request + self.client_address = client_address + + def to_record(self, name_record: NameRecord) -> Record: + """ + :param name_record: Internal representation of the name entry + :return: Record type for the associated name record + """ + match name_record: + case TargetRecord(target=target, record_type=record_type): + return Record(RDMAP.get(record_type.name), target) + case SOARecord(m_name=m_name, r_name=r_name, record_type=_): + return Record(SOA, m_name, r_name) + case DynamicRecord(record_type=record_type): + # Marker indicating that the target of the domain name lookup should be resolved + # dynamically at query time to the most suitable LocalStack container IP address + ip = self._determine_best_ip() + # TODO: be more dynamic with IPv6 + if record_type == RecordType.AAAA: + ip = "::1" + return Record(RDMAP.get(record_type.name), ip) + case _: + raise NotImplementedError(f"Record type '{type(name_record)}' not implemented") + + def _determine_best_ip(self) -> str: + client_ip, _ = self.client_address + # allow for overriding if required + if config.DNS_RESOLVE_IP != LOCALHOST_IP: + return config.DNS_RESOLVE_IP + + # Look up best matching ip address for the client + interfaces = self._fetch_interfaces() + for interface in interfaces: + subnet = interface.network + ip_address = IPv4Address(client_ip) + if ip_address in subnet: + # check if the request has come from the gateway or not. If so + # assume the request has come from the host, and return + # 127.0.0.1 + if config.is_in_docker and self._is_gateway(ip_address): + return LOCALHOST_IP + + return str(interface.ip) + + # no best solution found + LOG.warning( + "could not determine subnet-matched IP address for %s, falling back to %s", + self.request.q.qname, + LOCALHOST_IP, + ) + return LOCALHOST_IP + + @staticmethod + def _is_gateway(ip: IPv4Address) -> bool: + """ + Look up the gateways that this contianer has, and return True if the + supplied ip address is in that list. + """ + return ip == iputils.get_default_gateway() + + @staticmethod + def _fetch_interfaces() -> Iterable[IPv4Interface]: + interfaces = list_network_interface_details() + for _, addresses in interfaces.items(): + for address in addresses: + if address.family != AddressFamily.AF_INET: + # TODO: IPv6 + continue + + # argument is of the form e.g. 127.0.0.1/255.0.0.0 + net = IPv4Interface(f"{address.address}/{address.netmask}") + yield net + + +class NonLoggingHandler(DNSHandler): + """Subclass of DNSHandler that avoids logging to stdout on error""" + + def handle(self, *args, **kwargs): + try: + THREAD_LOCAL.client_address = self.client_address + THREAD_LOCAL.server = self.server + THREAD_LOCAL.request = self.request + return super(NonLoggingHandler, self).handle(*args, **kwargs) + except Exception: + pass + + +# List of unique non-subdomain prefixes (e.g., data-) from endpoint.hostPrefix in the botocore specs. +# Subdomain-prefixes (e.g., api.) work properly unless DNS rebind protection blocks DNS resolution, but +# these `-` dash-prefixes require special consideration. +# IMPORTANT: Adding a new host prefix here requires deploying a public DNS entry to ensure proper DNS resolution for +# such non-dot prefixed domains (e.g., data-localhost.localstack.cloud) +# LIMITATION: As of 2025-05-26, only used prefixes are deployed to our public DNS, including `sync-` and `data-` +HOST_PREFIXES_NO_SUBDOMAIN = [ + "analytics-", + "control-storage-", + "data-", + "query-", + "runtime-", + "storage-", + "streaming-", + "sync-", + "tags-", + "workflows-", +] +HOST_PREFIX_NAME_PATTERNS = [ + f"{host_prefix}{LOCALHOST_HOSTNAME}" for host_prefix in HOST_PREFIXES_NO_SUBDOMAIN +] + +NAME_PATTERNS_POINTING_TO_LOCALSTACK = [ + f".*{LOCALHOST_HOSTNAME}", + *HOST_PREFIX_NAME_PATTERNS, +] + + +def exclude_from_resolution(domain_regex: str): + """ + Excludes the given domain pattern from being resolved to LocalStack. + Currently only works in docker, since in host mode dns is started as separate process + :param domain_regex: Domain regex string + """ + if DNS_SERVER: + DNS_SERVER.add_skip(domain_regex) + + +def revert_exclude_from_resolution(domain_regex: str): + """ + Reverts the exclusion of the given domain pattern + :param domain_regex: Domain regex string + """ + try: + if DNS_SERVER: + DNS_SERVER.delete_skip(domain_regex) + except ValueError: + pass + + +def _should_delete_zone(record_to_delete: NameRecord, record_to_check: NameRecord): + """ + Helper function to check if we should delete the record_to_check from the list we are iterating over + :param record_to_delete: Record which we got from the delete request + :param record_to_check: Record to be checked if it should be included in the records after delete + :return: + """ + if record_to_delete == record_to_check: + return True + return ( + record_to_delete.record_type == record_to_check.record_type + and record_to_delete.record_id == record_to_check.record_id + ) + + +def _should_delete_alias(alias_to_delete: AliasTarget, alias_to_check: AliasTarget): + """ + Helper function to check if we should delete the alias_to_check from the list we are iterating over + :param alias_to_delete: Alias which we got from the delete request + :param alias_to_check: Alias to be checked if it should be included in the records after delete + :return: + """ + return alias_to_delete.alias_id == alias_to_check.alias_id + + +class NoopLogger: + """ + Necessary helper class to avoid logging of any dns records by dnslib + """ + + def __init__(self, *args, **kwargs): + pass + + def log_pass(self, *args, **kwargs): + pass + + def log_prefix(self, *args, **kwargs): + pass + + def log_recv(self, *args, **kwargs): + pass + + def log_send(self, *args, **kwargs): + pass + + def log_request(self, *args, **kwargs): + pass + + def log_reply(self, *args, **kwargs): + pass + + def log_truncated(self, *args, **kwargs): + pass + + def log_error(self, *args, **kwargs): + pass + + def log_data(self, *args, **kwargs): + pass + + +class Resolver(DnsServerProtocol): + # Upstream DNS server + upstream_dns: str + # List of patterns which will be skipped for local resolution and always forwarded to upstream + skip_patterns: list[str] + # Dict of zones: (domain name or pattern) -> list[dns records] + zones: dict[str, list[NameRecord]] + # Alias map (source_name, record_type) => target_name (target name then still has to be resolved!) + aliases: dict[tuple[DNSLabel, RecordType], list[AliasTarget]] + # Lock to prevent issues due to concurrent modifications + lock: threading.RLock + + def __init__(self, upstream_dns: str): + self.upstream_dns = upstream_dns + self.skip_patterns = [] + self.zones = {} + self.aliases = {} + self.lock = threading.RLock() + + def resolve(self, request: DNSRecord, handler: DNSHandler) -> DNSRecord | None: + """ + Resolve a given request, by either checking locally registered records, or forwarding to the defined + upstream DNS server. + + :param request: DNS Request + :param handler: Unused. + :return: DNS Reply + """ + reply = request.reply() + found = False + + try: + if not self._skip_local_resolution(request): + found = self._resolve_name(request, reply, handler.client_address) + except Exception as e: + LOG.info("Unable to get DNS result: %s", e) + + if found: + return reply + + # If we did not find a matching record in our local zones, we forward to our upstream dns + try: + req_parsed = dns.message.from_wire(bytes(request.pack())) + r = dns.query.udp(req_parsed, self.upstream_dns, timeout=REQUEST_TIMEOUT_SECS) + result = self._map_response_dnspython_to_dnslib(r) + return result + except Exception as e: + LOG.info( + "Unable to get DNS result from upstream server %s for domain %s: %s", + self.upstream_dns, + str(request.q.qname), + e, + ) + + # if we cannot reach upstream dns, return SERVFAIL + if not reply.rr and reply.header.get_rcode == RCODE.NOERROR: + # setting this return code will cause commands like 'host' to try the next nameserver + reply.header.set_rcode(RCODE.SERVFAIL) + return None + + return reply + + def _skip_local_resolution(self, request) -> bool: + """ + Check whether we should skip local resolution for the given request, and directly contact upstream + + :param request: DNS Request + :return: Whether the request local resolution should be skipped + """ + request_name = to_str(str(request.q.qname)) + for p in self.skip_patterns: + if re.match(p, request_name): + return True + return False + + def _resolve_alias( + self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress + ) -> bool: + if request.q.qtype in (QTYPE.A, QTYPE.AAAA, QTYPE.CNAME): + key = (DNSLabel(to_bytes(request.q.qname)), RecordType[QTYPE[request.q.qtype]]) + # check if we have aliases defined for our given qname/qtype pair + if aliases := self.aliases.get(key): + for alias in aliases: + # if there is no health check, or the healthcheck is successful, we will consider this alias + # take the first alias passing this check + if not alias.health_check or alias.health_check(): + request_copy: DNSRecord = copy.deepcopy(request) + request_copy.q.qname = alias.target + # check if we can resolve the alias + found = self._resolve_name_from_zones(request_copy, reply, client_address) + if found: + LOG.debug( + "Found entry for AliasTarget '%s' ('%s')", request.q.qname, alias + ) + # change the replaced rr-DNS names back to the original request + for rr in reply.rr: + rr.set_rname(request.q.qname) + else: + reply.header.set_rcode(RCODE.REFUSED) + return True + return False + + def _resolve_name( + self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress + ) -> bool: + if alias_found := self._resolve_alias(request, reply, client_address): + LOG.debug("Alias found: %s", request.q.qname) + return alias_found + return self._resolve_name_from_zones(request, reply, client_address) + + def _resolve_name_from_zones( + self, request: DNSRecord, reply: DNSRecord, client_address: ClientAddress + ) -> bool: + found = False + + converter = RecordConverter(request, client_address) + + # check for direct (not regex based) response + zone = self.zones.get(normalise_dns_name(request.q.qname)) + if zone is not None: + for zone_records in zone: + rr = converter.to_record(zone_records).try_rr(request.q) + if rr: + found = True + reply.add_answer(rr) + else: + # no direct zone so look for an SOA record for a higher level zone + for zone_label, zone_records in self.zones.items(): + # try regex match + pattern = re.sub(r"(^|[^.])\*", ".*", str(zone_label)) + if re.match(pattern, str(request.q.qname)): + for record in zone_records: + rr = converter.to_record(record).try_rr(request.q) + if rr: + found = True + reply.add_answer(rr) + # try suffix match + elif request.q.qname.matchSuffix(to_bytes(zone_label)): + try: + soa_record = next(r for r in zone_records if converter.to_record(r).is_soa) + except StopIteration: + continue + else: + found = True + reply.add_answer(converter.to_record(soa_record).as_rr(zone_label)) + break + return found + + def _parse_section(self, section: str) -> list[RR]: + result = [] + for line in section.split("\n"): + line = line.strip() + if line: + if line.startswith(";"): + # section ended, stop parsing + break + else: + result += RR.fromZone(line) + return result + + def _map_response_dnspython_to_dnslib(self, response): + """Map response object from dnspython to dnslib (looks like we cannot + simply export/import the raw messages from the wire)""" + flags = dns.flags.to_text(response.flags) + + def flag(f): + return 1 if f.upper() in flags else 0 + + questions = [] + for q in response.question: + questions.append(DNSQuestion(qname=str(q.name), qtype=q.rdtype, qclass=q.rdclass)) + + result = DNSRecord( + DNSHeader( + qr=flag("qr"), aa=flag("aa"), ra=flag("ra"), id=response.id, rcode=response.rcode() + ), + q=questions[0], + ) + + # extract answers + answer_parts = str(response).partition(";ANSWER") + result.add_answer(*self._parse_section(answer_parts[2])) + # extract authority information + authority_parts = str(response).partition(";AUTHORITY") + result.add_auth(*self._parse_section(authority_parts[2])) + return result + + def add_host(self, name: str, record: NameRecord): + LOG.debug("Adding host %s with record %s", name, record) + name = normalise_dns_name(name) + with self.lock: + self.zones.setdefault(name, []) + self.zones[name].append(record) + + def delete_host(self, name: str, record: NameRecord): + LOG.debug("Deleting host %s with record %s", name, record) + name = normalise_dns_name(name) + with self.lock: + if not self.zones.get(name): + raise ValueError("Could not find entry %s for name %s in zones", record, name) + self.zones.setdefault(name, []) + current_zones = self.zones[name] + self.zones[name] = [ + zone for zone in self.zones[name] if not _should_delete_zone(record, zone) + ] + if self.zones[name] == current_zones: + raise ValueError("Could not find entry %s for name %s in zones", record, name) + # if we deleted the last entry, clean up + if not self.zones[name]: + del self.zones[name] + + def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget): + LOG.debug("Adding alias %s with record type %s target %s", source_name, record_type, target) + label = (DNSLabel(to_bytes(source_name)), record_type) + with self.lock: + self.aliases.setdefault(label, []) + self.aliases[label].append(target) + + def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget): + LOG.debug( + "Deleting alias %s with record type %s", + source_name, + record_type, + ) + label = (DNSLabel(to_bytes(source_name)), record_type) + with self.lock: + if not self.aliases.get(label): + raise ValueError( + "Could not find entry %s for name %s, record type %s in aliases", + target, + source_name, + record_type, + ) + self.aliases.setdefault(label, []) + current_aliases = self.aliases[label] + self.aliases[label] = [ + alias for alias in self.aliases[label] if not _should_delete_alias(target, alias) + ] + if self.aliases[label] == current_aliases: + raise ValueError( + "Could not find entry %s for name %s, record_type %s in aliases", + target, + source_name, + record_type, + ) + # if we deleted the last entry, clean up + if not self.aliases[label]: + del self.aliases[label] + + def add_host_pointing_to_localstack(self, name: str): + LOG.debug("Adding host %s pointing to LocalStack", name) + self.add_host(name, DynamicRecord(record_type=RecordType.A)) + if config.DNS_RESOLVE_IP == config.LOCALHOST_IP: + self.add_host(name, DynamicRecord(record_type=RecordType.AAAA)) + + def delete_host_pointing_to_localstack(self, name: str): + LOG.debug("Deleting host %s pointing to LocalStack", name) + self.delete_host(name, DynamicRecord(record_type=RecordType.A)) + if config.DNS_RESOLVE_IP == config.LOCALHOST_IP: + self.delete_host(name, DynamicRecord(record_type=RecordType.AAAA)) + + def add_skip(self, skip_pattern: str): + LOG.debug("Adding skip pattern %s", skip_pattern) + self.skip_patterns.append(skip_pattern) + + def delete_skip(self, skip_pattern: str): + LOG.debug("Deleting skip pattern %s", skip_pattern) + self.skip_patterns.remove(skip_pattern) + + def clear(self): + LOG.debug("Clearing DNS zones") + self.skip_patterns.clear() + self.zones.clear() + self.aliases.clear() + + +class DnsServer(Server, DnsServerProtocol): + servers: list[DNSServer] + resolver: Resolver | None + + def __init__( + self, + port: int, + protocols: list[Literal["udp", "tcp"]], + upstream_dns: str, + host: str = "0.0.0.0", + ) -> None: + super().__init__(port, host) + self.resolver = Resolver(upstream_dns=upstream_dns) + self.protocols = protocols + self.servers = [] + self.handler_class = NonLoggingHandler + + def _get_servers(self) -> list[DNSServer]: + servers = [] + for protocol in self.protocols: + # TODO add option to use normal logger instead of NoopLogger for verbose debug mode + servers.append( + DNSServer( + self.resolver, + handler=self.handler_class, + logger=NoopLogger(), + port=self.port, + address=self.host, + tcp=protocol == "tcp", + ) + ) + return servers + + @property + def protocol(self): + return "udp" + + def health(self): + """ + Runs a health check on the server. The default implementation performs is_port_open on the server URL. + """ + try: + request = dns.message.make_query("localhost.localstack.cloud", "A") + answers = dns.query.udp(request, "127.0.0.1", port=self.port, timeout=0.5).answer + return len(answers) > 0 + except Exception: + return False + + def do_run(self): + self.servers = self._get_servers() + for server in self.servers: + server.start_thread() + LOG.debug("DNS Server started") + for server in self.servers: + server.thread.join() + + def do_shutdown(self): + for server in self.servers: + server.stop() + + def add_host(self, name: str, record: NameRecord): + self.resolver.add_host(name, record) + + def delete_host(self, name: str, record: NameRecord): + self.resolver.delete_host(name, record) + + def add_alias(self, source_name: str, record_type: RecordType, target: AliasTarget): + self.resolver.add_alias(source_name, record_type, target) + + def delete_alias(self, source_name: str, record_type: RecordType, target: AliasTarget): + self.resolver.delete_alias(source_name, record_type, target) + + def add_host_pointing_to_localstack(self, name: str): + self.resolver.add_host_pointing_to_localstack(name) + + def delete_host_pointing_to_localstack(self, name: str): + self.resolver.delete_host_pointing_to_localstack(name) + + def add_skip(self, skip_pattern: str): + self.resolver.add_skip(skip_pattern) + + def delete_skip(self, skip_pattern: str): + self.resolver.delete_skip(skip_pattern) + + def clear(self): + self.resolver.clear() + + +class SeparateProcessDNSServer(Server, DnsServerProtocol): + def __init__( + self, + port: int = 53, + host: str = "0.0.0.0", + ) -> None: + super().__init__(port, host) + + @property + def protocol(self): + return "udp" + + def health(self): + """ + Runs a health check on the server. The default implementation performs is_port_open on the server URL. + """ + try: + request = dns.message.make_query("localhost.localstack.cloud", "A") + answers = dns.query.udp(request, "127.0.0.1", port=self.port, timeout=0.5).answer + return len(answers) > 0 + except Exception: + return False + + def do_start_thread(self): + # For host mode + env_vars = {} + for env_var in config.CONFIG_ENV_VARS: + if env_var.startswith("DNS_"): + value = os.environ.get(env_var, None) + if value is not None: + env_vars[env_var] = value + + # note: running in a separate process breaks integration with Route53 (to be fixed for local dev mode!) + thread = run_module_as_sudo( + "localstack.dns.server", + asynchronous=True, + env_vars=env_vars, + arguments=["-p", str(self.port)], + ) + return thread + + +def get_fallback_dns_server(): + return config.DNS_SERVER or get_available_dns_server() + + +@cache +def get_available_dns_server(): + # TODO check if more loop-checks are necessary than just not using our own DNS server + with FALLBACK_DNS_LOCK: + resolver = dns.resolver.Resolver() + # we do not want to include localhost here, or a loop might happen + candidates = [r for r in resolver.nameservers if r != "127.0.0.1"] + result = None + candidates.append(DEFAULT_FALLBACK_DNS_SERVER) + for ns in candidates: + resolver.nameservers = [ns] + try: + try: + answer = resolver.resolve(VERIFICATION_DOMAIN, "a", lifetime=3) + answer = [ + res.to_text() for answers in answer.response.answer for res in answers.items + ] + except Timeout: + answer = None + if not answer: + continue + result = ns + break + except Exception: + pass + + if result: + LOG.debug("Determined fallback dns: %s", result) + else: + LOG.info( + "Unable to determine fallback DNS. Please check if '%s' is reachable by your configured DNS servers" + "DNS fallback will be disabled.", + VERIFICATION_DOMAIN, + ) + return result + + +# ###### LEGACY METHODS ###### +def add_resolv_entry(file_path: Path | str = Path("/etc/resolv.conf")): + global PREVIOUS_RESOLV_CONF_FILE + # never overwrite the host configuration without the user's permission + if not in_docker(): + LOG.warning("Incorrectly attempted to alter host networking config") + return + + LOG.debug("Overwriting container DNS server to point to localhost") + content = textwrap.dedent( + """ + # The following line is required by LocalStack + nameserver 127.0.0.1 + """ + ) + file_path = Path(file_path) + try: + with file_path.open("r+") as outfile: + PREVIOUS_RESOLV_CONF_FILE = outfile.read() + previous_resolv_conf_without_nameservers = [ + line + for line in PREVIOUS_RESOLV_CONF_FILE.splitlines() + if not line.startswith("nameserver") + ] + outfile.seek(0) + outfile.write(content) + outfile.write("\n".join(previous_resolv_conf_without_nameservers)) + outfile.truncate() + except Exception: + LOG.warning( + "Could not update container DNS settings", exc_info=LOG.isEnabledFor(logging.DEBUG) + ) + + +def revert_resolv_entry(file_path: Path | str = Path("/etc/resolv.conf")): + # never overwrite the host configuration without the user's permission + if not in_docker(): + LOG.warning("Incorrectly attempted to alter host networking config") + return + + if not PREVIOUS_RESOLV_CONF_FILE: + LOG.warning("resolv.conf file to restore not found.") + return + + LOG.debug("Reverting container DNS config") + file_path = Path(file_path) + try: + with file_path.open("w") as outfile: + outfile.write(PREVIOUS_RESOLV_CONF_FILE) + except Exception: + LOG.warning( + "Could not revert container DNS settings", exc_info=LOG.isEnabledFor(logging.DEBUG) + ) + + +def setup_network_configuration(): + # check if DNS is disabled + if not config.use_custom_dns(): + return + + # add entry to /etc/resolv.conf + if in_docker(): + add_resolv_entry() + + +def revert_network_configuration(): + # check if DNS is disabled + if not config.use_custom_dns(): + return + + # add entry to /etc/resolv.conf + if in_docker(): + revert_resolv_entry() + + +def start_server(upstream_dns: str, host: str, port: int = config.DNS_PORT): + global DNS_SERVER + + if DNS_SERVER: + # already started - bail + LOG.debug("DNS servers are already started. Avoid starting again.") + return + + LOG.debug("Starting DNS servers (tcp/udp port %s on %s)...", port, host) + dns_server = DnsServer(port, protocols=["tcp", "udp"], host=host, upstream_dns=upstream_dns) + + for name in NAME_PATTERNS_POINTING_TO_LOCALSTACK: + dns_server.add_host_pointing_to_localstack(name) + if config.LOCALSTACK_HOST.host != LOCALHOST_HOSTNAME: + dns_server.add_host_pointing_to_localstack(f".*{config.LOCALSTACK_HOST.host}") + + # support both DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM and DNS_LOCAL_NAME_PATTERNS + # until the next major version change + # TODO(srw): remove the usage of DNS_LOCAL_NAME_PATTERNS + skip_local_resolution = " ".join( + [ + config.DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM, + config.DNS_LOCAL_NAME_PATTERNS, + ] + ).strip() + if skip_local_resolution: + for skip_pattern in re.split(r"[,;\s]+", skip_local_resolution): + dns_server.add_skip(skip_pattern.strip(" \"'")) + + dns_server.start() + if not dns_server.wait_is_up(timeout=5): + LOG.warning("DNS server did not come up within 5 seconds.") + dns_server.shutdown() + return + DNS_SERVER = dns_server + LOG.debug("DNS server startup finished.") + + +def stop_servers(): + if DNS_SERVER: + DNS_SERVER.shutdown() + + +def start_dns_server_as_sudo(port: int): + global DNS_SERVER + LOG.debug( + "Starting the DNS on its privileged port (%s) needs root permissions. Trying to start DNS with sudo.", + config.DNS_PORT, + ) + + dns_server = SeparateProcessDNSServer(port) + dns_server.start() + + if not dns_server.wait_is_up(timeout=5): + LOG.warning("DNS server did not come up within 5 seconds.") + dns_server.shutdown() + return + + DNS_SERVER = dns_server + LOG.debug("DNS server startup finished (as sudo).") + + +def start_dns_server(port: int, asynchronous: bool = False, standalone: bool = False): + if DNS_SERVER: + # already started - bail + LOG.error("DNS servers are already started. Avoid starting again.") + return + + # check if DNS server is disabled + if not config.use_custom_dns(): + LOG.debug("Not starting DNS. DNS_ADDRESS=%s", config.DNS_ADDRESS) + return + + upstream_dns = get_fallback_dns_server() + if not upstream_dns: + LOG.warning("Error starting the DNS server: No upstream dns server found.") + return + + # host to bind the DNS server to. In docker we always want to bind to "0.0.0.0" + host = config.DNS_ADDRESS + if in_docker(): + host = "0.0.0.0" + + if port_can_be_bound(Port(port, "udp"), address=host): + start_server(port=port, host=host, upstream_dns=upstream_dns) + if not asynchronous: + sleep_forever() + return + + if standalone: + LOG.debug("Already in standalone mode and port binding still fails.") + return + + start_dns_server_as_sudo(port) + + +def get_dns_server() -> DnsServerProtocol: + return DNS_SERVER + + +def is_server_running() -> bool: + return DNS_SERVER is not None + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", required=False, default=53, type=int) + args = parser.parse_args() + + start_dns_server(asynchronous=False, port=args.port, standalone=True) diff --git a/localstack-core/localstack/extensions/__init__.py b/localstack-core/localstack/extensions/__init__.py new file mode 100644 index 0000000000000..3b52add044d38 --- /dev/null +++ b/localstack-core/localstack/extensions/__init__.py @@ -0,0 +1,3 @@ +"""Extensions are third-party software modules to customize localstack.""" + +name = "extensions" diff --git a/localstack-core/localstack/extensions/api/__init__.py b/localstack-core/localstack/extensions/api/__init__.py new file mode 100644 index 0000000000000..9335bae5fe7c2 --- /dev/null +++ b/localstack-core/localstack/extensions/api/__init__.py @@ -0,0 +1,7 @@ +"""Public facing API for users to build LocalStack extensions.""" + +from .extension import Extension + +name = "api" + +__all__ = ["Extension"] diff --git a/localstack-core/localstack/extensions/api/aws.py b/localstack-core/localstack/extensions/api/aws.py new file mode 100644 index 0000000000000..120bf4958e72b --- /dev/null +++ b/localstack-core/localstack/extensions/api/aws.py @@ -0,0 +1,31 @@ +from localstack.aws.api import ( + CommonServiceException, + RequestContext, + ServiceException, + ServiceRequest, + ServiceResponse, +) +from localstack.aws.chain import ( + CompositeExceptionHandler, + CompositeHandler, + CompositeResponseHandler, + ExceptionHandler, + HandlerChain, +) +from localstack.aws.chain import Handler as RequestHandler +from localstack.aws.chain import Handler as ResponseHandler + +__all__ = [ + "RequestContext", + "ServiceRequest", + "ServiceResponse", + "ServiceException", + "CommonServiceException", + "RequestHandler", + "ResponseHandler", + "HandlerChain", + "CompositeHandler", + "ExceptionHandler", + "CompositeResponseHandler", + "CompositeExceptionHandler", +] diff --git a/localstack/extensions/api/extension.py b/localstack-core/localstack/extensions/api/extension.py similarity index 85% rename from localstack/extensions/api/extension.py rename to localstack-core/localstack/extensions/api/extension.py index 9f25eaca98f6d..080735c4ae3a3 100644 --- a/localstack/extensions/api/extension.py +++ b/localstack-core/localstack/extensions/api/extension.py @@ -1,6 +1,6 @@ -from plugin import Plugin +from plux import Plugin -from .aws import CompositeHandler, CompositeResponseHandler +from .aws import CompositeExceptionHandler, CompositeHandler, CompositeResponseHandler from .http import RouteHandler, Router @@ -17,7 +17,6 @@ def load(self, *args, **kwargs): :param kwargs: load keyword arguments :return: this extension object """ - self.on_extension_load(*args, **kwargs) return self def on_extension_load(self, *args, **kwargs): @@ -79,6 +78,14 @@ def update_response_handlers(self, handlers: CompositeResponseHandler): """ pass + def update_exception_handlers(self, handlers: CompositeExceptionHandler): + """ + Called with the custom exception handlers of the LocalStack gateway. Overwrite this to add or update handlers. + + :param handlers: custom exception handlers of the gateway + """ + pass + def on_platform_ready(self): """ Called when LocalStack is ready and the Ready marker has been printed. diff --git a/localstack/extensions/api/http.py b/localstack-core/localstack/extensions/api/http.py similarity index 100% rename from localstack/extensions/api/http.py rename to localstack-core/localstack/extensions/api/http.py diff --git a/localstack/extensions/api/runtime.py b/localstack-core/localstack/extensions/api/runtime.py similarity index 100% rename from localstack/extensions/api/runtime.py rename to localstack-core/localstack/extensions/api/runtime.py diff --git a/localstack/extensions/api/services.py b/localstack-core/localstack/extensions/api/services.py similarity index 100% rename from localstack/extensions/api/services.py rename to localstack-core/localstack/extensions/api/services.py diff --git a/localstack/services/awslambda/__init__.py b/localstack-core/localstack/extensions/patterns/__init__.py similarity index 100% rename from localstack/services/awslambda/__init__.py rename to localstack-core/localstack/extensions/patterns/__init__.py diff --git a/localstack-core/localstack/extensions/patterns/webapp.py b/localstack-core/localstack/extensions/patterns/webapp.py new file mode 100644 index 0000000000000..ab69d935d729c --- /dev/null +++ b/localstack-core/localstack/extensions/patterns/webapp.py @@ -0,0 +1,333 @@ +import importlib +import logging +import mimetypes +import typing as t +from functools import cached_property + +from rolo.gateway import HandlerChain +from rolo.router import RuleAdapter, WithHost +from werkzeug.routing import Submount + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.extensions.api import Extension, http + +if t.TYPE_CHECKING: + # although jinja2 is included transitively via moto, let's make sure jinja2 stays optional + import jinja2 + +LOG = logging.getLogger(__name__) + +_default = object() + + +class WebAppExtension(Extension): + """ + EXPERIMENTAL! This class is experimental and the API may change without notice. + + A webapp extension serves routes, templates, and static files via a submount and a subdomain through + localstack. + + It assumes you have the following directory layout:: + + my_extension + ├── extension.py + ├── __init__.py + ├── static <-- make sure static resources get packaged! + │ ├── __init__.py + │ ├── favicon.ico + │ └── style.css + └── templates <-- jinja2 templates + └── index.html + + Given this layout, you can define your extensions in ``my_extension.extension`` like this. Routes defined in the + extension itself are automatically registered:: + + class MyExtension(WebAppExtension): + name = "my-extension" + + @route("/") + def index(request: Request) -> Response: + # reference `static/style.css` to serve the static file from your package + return self.render_template_response("index.html") + + @route("/hello") + def hello(request: Request): + return {"message": "Hello World!"} + + This will create an extension that localstack serves via: + + * Submount: https://localhost.localstack.cloud:4566/_extension/my-extension + * Subdomain: https://my-extension.localhost.localstack.cloud:4566/ + + Both are created for full flexibility: + + * Subdomains: create a domain namespace that can be helpful for some extensions, especially when + running on the local machine + * Submounts: for some environments, like in ephemeral instances where subdomains are harder to control, + submounts are more convenient + + Any routes added by the extension will be served relative to these URLs. + """ + + def __init__( + self, + mount: str = None, + submount: str | None = _default, + subdomain: str | None = _default, + template_package_path: str | None = _default, + static_package_path: str | None = _default, + static_url_path: str = None, + ): + """ + Overwrite to customize your extension. For example, you can disable certain behavior by calling + ``super( ).__init__(subdomain=None, static_package_path=None)``, which will disable serving through + a subdomain, and disable static file serving. + + :param mount: the "mount point" which will be used as default value for the submount and + subdirectory, i.e., ``<mount>.localhost.localstack.cloud`` and + ``localhost.localstack.cloud/_extension/<mount>``. Defaults to the extension name. Note that, + in case the mount name clashes with another extension, extensions may overwrite each other's + routes. + :param submount: the submount path, needs to start with a trailing slash (default + ``/_extension/<mount>``) + :param subdomain: the subdomain (defaults to the value of ``mount``) + :param template_package_path: the path to the templates within the module. defaults to + ``templates`` which expands to ``<extension-module>.templates``) + :param static_package_path: the package serving static files. defaults to ``static``, which expands to + ``<extension-module>.static``. + :param static_url_path: the URL path to serve static files from (defaults to `/static`) + """ + mount = mount or self.name + + self.submount = f"/_extension/{mount}" if submount is _default else submount + self.subdomain = mount if subdomain is _default else subdomain + + self.template_package_path = ( + "templates" if template_package_path is _default else template_package_path + ) + self.static_package_path = ( + "static" if static_package_path is _default else static_package_path + ) + self.static_url_path = static_url_path or "/static" + + self.static_resource_module = None + + def collect_routes(self, routes: list[t.Any]): + """ + This method can be overwritten to add more routes to the controller. Everything in ``routes`` will + be added to a ``RuleAdapter`` and subsequently mounted into the gateway router. + + Here are some examples:: + + class MyRoutes: + @route("/hello") + def hello(request): + return "Hello World!" + + class MyExtension(WebAppExtension): + name = "my-extension" + + def collect_routes(self, routes: list[t.Any]): + + # scans all routes of MyRoutes + routes.append(MyRoutes()) + # use rule adapters to add routes without decorators + routes.append(RuleAdapter("/say-hello", self.say_hello)) + + # no idea why you would want to do this, but you can :-) + @route("/empty-dict") + def _inline_handler(request: Request) -> Response: + return Response.for_json({}) + routes.append(_inline_handler) + + def say_hello(request: Request): + return {"message": "Hello World!"} + + This creates the following routes available through both subdomain and submount. + + With subdomain: + + * ``my-extension.localhost.localstack.cloud:4566/hello`` + * ``my-extension.localhost.localstack.cloud:4566/say-hello`` + * ``my-extension.localhost.localstack.cloud:4566/empty-dict`` + * ``my-extension.localhost.localstack.cloud:4566/static`` <- automatically added static file endpoint + + With submount: + + * ``localhost.localstack.cloud:4566/_extension/my-extension/hello`` + * ``localhost.localstack.cloud:4566/_extension/my-extension/say-hello`` + * ``localhost.localstack.cloud:4566/_extension/my-extension/empty-dict`` + * ``localhost.localstack.cloud:4566/_extension/my-extension/static`` <- auto-added static file serving + + :param routes: the routes being collected + """ + pass + + @cached_property + def template_env(self) -> t.Optional["jinja2.Environment"]: + """ + Returns the singleton jinja2 template environment. By default, the environment uses a + ``PackageLoader`` that loads from ``my_extension.templates`` (where ``my_extension`` is the root + module of the extension, and ``templates`` refers to ``self.template_package_path``, + which is ``templates`` by default). + + :return: a template environment + """ + if self.template_package_path: + return self._create_template_env() + return None + + def _create_template_env(self) -> "jinja2.Environment": + """ + Factory method to create the jinja2 template environment. + :return: a new jinja2 environment + """ + import jinja2 + + return jinja2.Environment( + loader=jinja2.PackageLoader( + self.get_extension_module_root(), self.template_package_path + ), + autoescape=jinja2.select_autoescape(), + ) + + def render_template(self, template_name, **context) -> str: + """ + Uses the ``template_env`` to render a template and return the string value. + + :param template_name: the template name + :param context: template context + :return: the rendered result + """ + template = self.template_env.get_template(template_name) + return template.render(**context) + + def render_template_response(self, template_name, **context) -> http.Response: + """ + Uses the ``template_env`` to render a template into an HTTP response. It guesses the mimetype from the + template's file name. + + :param template_name: the template name + :param context: template context + :return: the rendered result as response + """ + template = self.template_env.get_template(template_name) + + mimetype = mimetypes.guess_type(template.filename) + mimetype = mimetype[0] if mimetype and mimetype[0] else "text/plain" + + return http.Response(response=template.render(**context), mimetype=mimetype) + + def on_extension_load(self): + logging.getLogger(self.get_extension_module_root()).setLevel( + logging.DEBUG if config.DEBUG else logging.INFO + ) + + if self.static_package_path and not self.static_resource_module: + try: + self.static_resource_module = importlib.import_module( + self.get_extension_module_root() + "." + self.static_package_path + ) + except ModuleNotFoundError: + LOG.warning("disabling static resources for extension %s", self.name) + + def _preprocess_request( + self, chain: HandlerChain, context: RequestContext, _response: http.Response + ): + """ + Default pre-processor, which implements a default behavior to add a trailing slash to the path if the + submount is used directly. For instance ``/_extension/my-extension``, then it forwards to + ``/_extension/my-extension/``. This is so you can reference relative paths like ``<link + href="static/style.css">`` in your HTML safely, and it will work with both subdomain and submount. + """ + path = context.request.path + + if path == self.submount.rstrip("/"): + chain.respond(301, headers={"Location": context.request.url + "/"}) + + def update_gateway_routes(self, router: http.Router[http.RouteHandler]): + from localstack.aws.handlers import preprocess_request + + if self.submount: + preprocess_request.append(self._preprocess_request) + + # adding self here makes sure that any ``@route`` decorators to the extension are mapped automatically + routes = [self] + + if self.static_resource_module: + routes.append( + RuleAdapter(f"{self.static_url_path}/<path:path>", self._serve_static_file) + ) + + self.collect_routes(routes) + + app = RuleAdapter(routes) + + if self.submount: + router.add(Submount(self.submount, [app])) + LOG.info( + "%s extension available at %s%s", + self.name, + config.external_service_url(), + self.submount, + ) + + if self.subdomain: + router.add(WithHost(f"{self.subdomain}.<__host__>", [app])) + self._configure_cors_for_subdomain() + LOG.info( + "%s extension available at %s", + self.name, + config.external_service_url(subdomains=self.subdomain), + ) + + def _serve_static_file(self, _request: http.Request, path: str): + """Route for serving static files, for ``/_extension/my-extension/static/<path:path>``.""" + return http.Response.for_resource(self.static_resource_module, path) + + def _configure_cors_for_subdomain(self): + """ + Automatically configures CORS for the subdomain, for both HTTP and HTTPS. + """ + from localstack.aws.handlers.cors import ALLOWED_CORS_ORIGINS + + for protocol in ("http", "https"): + url = self.get_subdomain_url(protocol) + LOG.debug("adding %s to ALLOWED_CORS_ORIGINS", url) + ALLOWED_CORS_ORIGINS.append(url) + + def get_subdomain_url(self, protocol: str = "https") -> str: + """ + Returns the URL that serves the extension under its subdomain + ``https://my-extension.localhost.localstack.cloud:4566/``. + + :return: a URL this extension is served at + """ + if not self.subdomain: + raise ValueError(f"Subdomain for extension {self.name} is not set") + return config.external_service_url(subdomains=self.subdomain, protocol=protocol) + + def get_submount_url(self, protocol: str = "https") -> str: + """ + Returns the URL that serves the extension under its submount + ``https://localhost.localstack.cloud:4566/_extension/my-extension``. + + :return: a URL this extension is served at + """ + + if not self.submount: + raise ValueError(f"Submount for extension {self.name} is not set") + + return f"{config.external_service_url(protocol=protocol)}{self.submount}" + + @classmethod + def get_extension_module_root(cls) -> str: + """ + Returns the root of the extension module. For instance, if the extension lives in + ``my_extension/plugins/extension.py``, then this will return ``my_extension``. Used to set up the + logger as well as the template environment and the static file module. + + :return: the root module the extension lives in + """ + return cls.__module__.split(".")[0] diff --git a/localstack/http/__init__.py b/localstack-core/localstack/http/__init__.py similarity index 100% rename from localstack/http/__init__.py rename to localstack-core/localstack/http/__init__.py diff --git a/localstack-core/localstack/http/asgi.py b/localstack-core/localstack/http/asgi.py new file mode 100644 index 0000000000000..8ba3dd3454bd3 --- /dev/null +++ b/localstack-core/localstack/http/asgi.py @@ -0,0 +1,21 @@ +from rolo.asgi import ( + ASGIAdapter, + ASGILifespanListener, + RawHTTPRequestEventStreamAdapter, + WebSocketEnvironment, + WebSocketListener, + WsgiStartResponse, + create_wsgi_input, + populate_wsgi_environment, +) + +__all__ = [ + "WebSocketEnvironment", + "populate_wsgi_environment", + "create_wsgi_input", + "RawHTTPRequestEventStreamAdapter", + "WsgiStartResponse", + "ASGILifespanListener", + "WebSocketListener", + "ASGIAdapter", +] diff --git a/localstack-core/localstack/http/client.py b/localstack-core/localstack/http/client.py new file mode 100644 index 0000000000000..cb8f4b33aee31 --- /dev/null +++ b/localstack-core/localstack/http/client.py @@ -0,0 +1,7 @@ +from rolo.client import HttpClient, SimpleRequestsClient, make_request + +__all__ = [ + "HttpClient", + "SimpleRequestsClient", + "make_request", +] diff --git a/localstack-core/localstack/http/dispatcher.py b/localstack-core/localstack/http/dispatcher.py new file mode 100644 index 0000000000000..308450fbd3296 --- /dev/null +++ b/localstack-core/localstack/http/dispatcher.py @@ -0,0 +1,25 @@ +from json import JSONEncoder +from typing import Type + +from rolo.routing.handler import Handler, ResultValue +from rolo.routing.handler import handler_dispatcher as _handler_dispatcher +from rolo.routing.router import Dispatcher + +from localstack.utils.json import CustomEncoder + +__all__ = [ + "ResultValue", + "Handler", + "handler_dispatcher", +] + + +def handler_dispatcher(json_encoder: Type[JSONEncoder] = None) -> Dispatcher[Handler]: + """ + Replacement for ``rolo.dispatcher.handler_dispatcher`` that uses by default LocalStack's CustomEncoder for + serializing JSON documents. + + :param json_encoder: the encoder to use + :return: a Dispatcher that dispatches to instances of a Handler + """ + return _handler_dispatcher(json_encoder or CustomEncoder) diff --git a/localstack-core/localstack/http/duplex_socket.py b/localstack-core/localstack/http/duplex_socket.py new file mode 100644 index 0000000000000..8006f398668e5 --- /dev/null +++ b/localstack-core/localstack/http/duplex_socket.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import logging +import socket +import ssl +from asyncio.selector_events import BaseSelectorEventLoop + +from localstack.utils.asyncio import run_sync +from localstack.utils.objects import singleton_factory +from localstack.utils.patch import Patch, patch + +# set up logger +LOG = logging.getLogger(__name__) + + +class DuplexSocket(ssl.SSLSocket): + """Simple duplex socket wrapper that allows serving HTTP/HTTPS over the same port.""" + + def accept(self): + newsock, addr = socket.socket.accept(self) + if DuplexSocket.is_ssl_socket(newsock) is not False: + newsock = self.context.wrap_socket( + newsock, + do_handshake_on_connect=self.do_handshake_on_connect, + suppress_ragged_eofs=self.suppress_ragged_eofs, + server_side=True, + ) + + return newsock, addr + + @staticmethod + def is_ssl_socket(newsock): + """Returns True/False if the socket uses SSL or not, or None if the status cannot be + determined""" + + def peek_ssl_header(): + peek_bytes = 5 + first_bytes = newsock.recv(peek_bytes, socket.MSG_PEEK) + if len(first_bytes or "") != peek_bytes: + return + first_byte = first_bytes[0] + return first_byte < 32 or first_byte >= 127 + + try: + return peek_ssl_header() + except Exception: + # Fix for "[Errno 11] Resource temporarily unavailable" - This can + # happen if we're using a non-blocking socket in a blocking thread. + newsock.setblocking(1) + newsock.settimeout(1) + try: + return peek_ssl_header() + except Exception: + return False + + +@singleton_factory +def enable_duplex_socket(): + """ + Function which replaces the ssl.SSLContext.sslsocket_class with the DuplexSocket, enabling serving both, + HTTP and HTTPS connections on a single port. + """ + + # set globally defined SSL socket implementation class + Patch(ssl.SSLContext, "sslsocket_class", DuplexSocket).apply() + + if hasattr(BaseSelectorEventLoop, "_accept_connection2"): + + @patch(BaseSelectorEventLoop._accept_connection2) + async def _accept_connection2( + fn, self, protocol_factory, conn, extra, sslcontext, *args, **kwargs + ): + is_ssl_socket = await run_sync(DuplexSocket.is_ssl_socket, conn) + if is_ssl_socket is False: + sslcontext = None + result = await fn(self, protocol_factory, conn, extra, sslcontext, *args, **kwargs) + return result diff --git a/localstack-core/localstack/http/hypercorn.py b/localstack-core/localstack/http/hypercorn.py new file mode 100644 index 0000000000000..e14f2e167c797 --- /dev/null +++ b/localstack-core/localstack/http/hypercorn.py @@ -0,0 +1,146 @@ +import asyncio +import threading +from asyncio import AbstractEventLoop + +from hypercorn import Config +from hypercorn.asyncio import serve +from hypercorn.typing import ASGIFramework + +from localstack.aws.gateway import Gateway +from localstack.aws.handlers.proxy import ProxyHandler +from localstack.aws.serving.asgi import AsgiGateway +from localstack.config import HostAndPort +from localstack.logging.setup import setup_hypercorn_logger +from localstack.utils.collections import ensure_list +from localstack.utils.functions import call_safe +from localstack.utils.serving import Server +from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available + + +class HypercornServer(Server): + """ + A sync wrapper around Hypercorn that implements the ``Server`` interface. + """ + + def __init__(self, app: ASGIFramework, config: Config, loop: AbstractEventLoop = None): + """ + Create a new Hypercorn server instance. Note that, if you pass an event loop to the constructor, + you are yielding control of that event loop to the server, as it will invoke `run_until_complete` and + shutdown the loop. + + :param app: the ASGI3 app + :param config: the hypercorn config + :param loop: optionally the event loop, otherwise ``asyncio.new_event_loop`` will be called + """ + self.app = app + self.config = config + self.loop = loop or asyncio.new_event_loop() + + self._close = asyncio.Event() + self._closed = threading.Event() + + parts = config.bind[0].split(":") + if len(parts) == 1: + # check ssl + host = parts[0] + port = 443 if config.ssl_enabled else 80 + else: + host, port = parts[0], int(parts[1]) + + super().__init__(port, host) + + @property + def protocol(self): + return "https" if self.config.ssl_enabled else "http" + + def do_run(self): + self.loop.run_until_complete( + serve(self.app, self.config, shutdown_trigger=self._shutdown_trigger) + ) + self._closed.set() + + def do_shutdown(self): + asyncio.run_coroutine_threadsafe(self._set_closed(), self.loop) + self._closed.wait(timeout=10) + asyncio.run_coroutine_threadsafe(self.loop.shutdown_asyncgens(), self.loop) + self.loop.shutdown_default_executor() + self.loop.stop() + call_safe(self.loop.close) + + async def _set_closed(self): + self._close.set() + + async def _shutdown_trigger(self): + await self._close.wait() + + +class GatewayServer(HypercornServer): + """ + A Hypercorn-based server implementation which serves a given Gateway. + It can be used to easily spawn new gateway servers, defining their individual request-, response-, and + exception-handlers. + """ + + def __init__( + self, + gateway: Gateway, + listen: HostAndPort | list[HostAndPort], + use_ssl: bool = False, + threads: int | None = None, + ): + """ + Creates a new GatewayServer instance. + + :param gateway: which will be served by this server + :param listen: defining the address and port pairs this server binds to. Can be a list of host and port pairs. + :param use_ssl: True if the LocalStack cert should be loaded and HTTP/HTTPS multiplexing should be enabled. + :param threads: Number of worker threads the gateway will use. + """ + # build server config + config = Config() + config.h11_pass_raw_headers = True + setup_hypercorn_logger(config) + + listens = ensure_list(listen) + config.bind = [str(host_and_port) for host_and_port in listens] + + if use_ssl: + install_predefined_cert_if_available() + serial_number = listens[0].port + _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number) + config.certfile = cert_file_name + config.keyfile = key_file_name + + # build gateway + loop = asyncio.new_event_loop() + app = AsgiGateway(gateway, event_loop=loop, threads=threads) + + # start serving gateway + super().__init__(app, config, loop) + + def do_shutdown(self): + super().do_shutdown() + self.app.close() # noqa (app will be of type AsgiGateway) + + +class ProxyServer(GatewayServer): + """ + Proxy server implementation which uses the localstack.http.proxy module. + These server instances can be spawned easily, while implementing HTTP/HTTPS multiplexing (if enabled), + and just forward all incoming requests to a backend. + """ + + def __init__( + self, forward_base_url: str, listen: HostAndPort | list[HostAndPort], use_ssl: bool = False + ): + """ + Creates a new ProxyServer instance. + + :param forward_base_url: URL of the backend system all requests this server receives should be forwarded to + :param port: defining the port of this server instance + :param bind_address: to bind this server instance to. Can be a host string or a list of host strings. + :param use_ssl: True if the LocalStack cert should be loaded and HTTP/HTTPS multiplexing should be enabled. + """ + gateway = Gateway() + gateway.request_handlers.append(ProxyHandler(forward_base_url=forward_base_url)) + super().__init__(gateway, listen, use_ssl) diff --git a/localstack-core/localstack/http/proxy.py b/localstack-core/localstack/http/proxy.py new file mode 100644 index 0000000000000..35cf74719277a --- /dev/null +++ b/localstack-core/localstack/http/proxy.py @@ -0,0 +1,7 @@ +from rolo.proxy import Proxy, ProxyHandler, forward + +__all__ = [ + "forward", + "Proxy", + "ProxyHandler", +] diff --git a/localstack-core/localstack/http/request.py b/localstack-core/localstack/http/request.py new file mode 100644 index 0000000000000..411ead4ab6bde --- /dev/null +++ b/localstack-core/localstack/http/request.py @@ -0,0 +1,21 @@ +from rolo.request import ( + Request, + dummy_wsgi_environment, + get_full_raw_path, + get_raw_base_url, + get_raw_current_url, + get_raw_path, + restore_payload, + set_environment_headers, +) + +__all__ = [ + "dummy_wsgi_environment", + "set_environment_headers", + "Request", + "get_raw_path", + "get_full_raw_path", + "get_raw_base_url", + "get_raw_current_url", + "restore_payload", +] diff --git a/localstack-core/localstack/http/resource.py b/localstack-core/localstack/http/resource.py new file mode 100644 index 0000000000000..40db6d941b0aa --- /dev/null +++ b/localstack-core/localstack/http/resource.py @@ -0,0 +1,6 @@ +from rolo.resource import Resource, resource + +__all__ = [ + "resource", + "Resource", +] diff --git a/localstack/services/awslambda/event_source_listeners/__init__.py b/localstack-core/localstack/http/resources/__init__.py similarity index 100% rename from localstack/services/awslambda/event_source_listeners/__init__.py rename to localstack-core/localstack/http/resources/__init__.py diff --git a/localstack/services/awslambda/layerfetcher/__init__.py b/localstack-core/localstack/http/resources/swagger/__init__.py similarity index 100% rename from localstack/services/awslambda/layerfetcher/__init__.py rename to localstack-core/localstack/http/resources/swagger/__init__.py diff --git a/localstack-core/localstack/http/resources/swagger/endpoints.py b/localstack-core/localstack/http/resources/swagger/endpoints.py new file mode 100644 index 0000000000000..f6cef4c9a33f8 --- /dev/null +++ b/localstack-core/localstack/http/resources/swagger/endpoints.py @@ -0,0 +1,25 @@ +import os + +from jinja2 import Environment, FileSystemLoader +from rolo import Request, route + +from localstack.config import external_service_url +from localstack.http import Response + + +def _get_service_url(request: Request) -> str: + # special case for ephemeral instances + if "sandbox.localstack.cloud" in request.host: + return external_service_url(protocol="https", port=443) + return external_service_url(protocol=request.scheme) + + +class SwaggerUIApi: + @route("/_localstack/swagger", methods=["GET"]) + def server_swagger_ui(self, request: Request) -> Response: + init_path = f"{_get_service_url(request)}/openapi.yaml" + oas_path = os.path.join(os.path.dirname(__file__), "templates") + env = Environment(loader=FileSystemLoader(oas_path)) + template = env.get_template("index.html") + rendered_template = template.render(swagger_url=init_path) + return Response(rendered_template, content_type="text/html") diff --git a/localstack-core/localstack/http/resources/swagger/plugins.py b/localstack-core/localstack/http/resources/swagger/plugins.py new file mode 100644 index 0000000000000..2e464f50deacd --- /dev/null +++ b/localstack-core/localstack/http/resources/swagger/plugins.py @@ -0,0 +1,23 @@ +import werkzeug +import yaml +from rolo.routing import RuleAdapter + +from localstack.http.resources.swagger.endpoints import SwaggerUIApi +from localstack.runtime import hooks +from localstack.services.edge import ROUTER +from localstack.services.internal import get_internal_apis +from localstack.utils.openapi import get_localstack_openapi_spec + + +@hooks.on_infra_start() +def register_swagger_endpoints(): + get_internal_apis().add(SwaggerUIApi()) + + def _serve_openapi_spec(_request): + spec = get_localstack_openapi_spec() + response_body = yaml.dump(spec) + return werkzeug.Response( + response_body, content_type="application/yaml", direct_passthrough=True + ) + + ROUTER.add(RuleAdapter("/openapi.yaml", _serve_openapi_spec)) diff --git a/localstack-core/localstack/http/resources/swagger/templates/index.html b/localstack-core/localstack/http/resources/swagger/templates/index.html new file mode 100644 index 0000000000000..a852b132deb56 --- /dev/null +++ b/localstack-core/localstack/http/resources/swagger/templates/index.html @@ -0,0 +1,22 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <meta name="viewport" content="width=device-width, initial-scale=1" /> + <meta name="description" content="SwaggerUI" /> + <title>SwaggerUI + + + +
+ + + + diff --git a/localstack-core/localstack/http/response.py b/localstack-core/localstack/http/response.py new file mode 100644 index 0000000000000..66863c147d370 --- /dev/null +++ b/localstack-core/localstack/http/response.py @@ -0,0 +1,22 @@ +from json import JSONEncoder +from typing import Any, Type + +from rolo import Response as RoloResponse + +from localstack.utils.common import CustomEncoder + + +class Response(RoloResponse): + """ + An HTTP Response object, which simply extends werkzeug's Response object with a few convenience methods. + """ + + def set_json(self, doc: Any, cls: Type[JSONEncoder] = CustomEncoder): + """ + Serializes the given dictionary using localstack's ``CustomEncoder`` into a json response, and sets the + mimetype automatically to ``application/json``. + + :param doc: the response dictionary to be serialized as JSON + :param cls: the json encoder used + """ + return super().set_json(doc, cls or CustomEncoder) diff --git a/localstack-core/localstack/http/router.py b/localstack-core/localstack/http/router.py new file mode 100644 index 0000000000000..da3bcdfe043c0 --- /dev/null +++ b/localstack-core/localstack/http/router.py @@ -0,0 +1,52 @@ +from typing import ( + Any, + Mapping, + TypeVar, +) + +from rolo.routing import ( + PortConverter, + RegexConverter, + Router, + RuleAdapter, + RuleGroup, + WithHost, + route, +) +from rolo.routing.router import Dispatcher, call_endpoint +from werkzeug.routing import PathConverter + +HTTP_METHODS = ("GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS", "TRACE") + +E = TypeVar("E") +RequestArguments = Mapping[str, Any] + + +class GreedyPathConverter(PathConverter): + """ + This converter makes sure that the path ``/mybucket//mykey`` can be matched to the pattern + ``/`` and will result in `Key` being `/mykey`. + """ + + regex = ".*?" + + part_isolating = False + """From the werkzeug docs: If a custom converter can match a forward slash, /, it should have the + attribute part_isolating set to False. This will ensure that rules using the custom converter are + correctly matched.""" + + +__all__ = [ + "RequestArguments", + "HTTP_METHODS", + "RegexConverter", + "PortConverter", + "Dispatcher", + "route", + "call_endpoint", + "Router", + "RuleAdapter", + "WithHost", + "RuleGroup", + "GreedyPathConverter", +] diff --git a/localstack-core/localstack/http/trace.py b/localstack-core/localstack/http/trace.py new file mode 100644 index 0000000000000..7d52b9ebf36dc --- /dev/null +++ b/localstack-core/localstack/http/trace.py @@ -0,0 +1,348 @@ +import dataclasses +import inspect +import logging +import time +from typing import Any, Callable + +from rolo import Response +from rolo.gateway import ExceptionHandler, Handler, HandlerChain, RequestContext +from werkzeug.datastructures import Headers + +from localstack.utils.patch import Patch, Patches + +LOG = logging.getLogger(__name__) + + +class Action: + """ + Encapsulates something that the handler performed on the request context, request, or response objects. + """ + + name: str + + def __init__(self, name: str): + self.name = name + + def __repr__(self): + return self.name + + +class SetAttributeAction(Action): + """ + The handler set an attribute of the request context or something else. + """ + + key: str + value: Any | None + + def __init__(self, key: str, value: Any | None = None): + super().__init__("set") + self.key = key + self.value = value + + def __repr__(self): + if self.value is None: + return f"set {self.key}" + return f"set {self.key} = {self.value!r}" + + +class ModifyHeadersAction(Action): + """ + The handler modified headers in some way, either adding, updating, or removing headers. + """ + + def __init__(self, name: str, before: Headers, after: Headers): + super().__init__(name) + self.before = before + self.after = after + + @property + def header_actions(self) -> list[Action]: + after = self.after + before = self.before + + actions = [] + + headers_set = dict(set(after.items()) - set(before.items())) + headers_removed = {k: v for k, v in before.items() if k not in after} + + for k, v in headers_set.items(): + actions.append(Action(f"set '{k}: {v}'")) + for k, v in headers_removed.items(): + actions.append(Action(f"del '{k}: {v}'")) + + return actions + + +@dataclasses.dataclass +class HandlerTrace: + handler: Handler + """The handler""" + duration_ms: float + """The runtime duration of the handler in milliseconds""" + actions: list[Action] + """The actions the handler chain performed""" + + @property + def handler_module(self): + return self.handler.__module__ + + @property + def handler_name(self): + if inspect.isfunction(self.handler): + return self.handler.__name__ + else: + return self.handler.__class__.__name__ + + +def _log_method_call(name: str, actions: list[Action]): + """Creates a wrapper around the original method `_fn`. It appends an action to the `actions` + list indicating that the function was called and then returns the original function.""" + + def _proxy(self, _fn, *args, **kwargs): + actions.append(Action(f"call {name}")) + return _fn(*args, **kwargs) + + return _proxy + + +class TracingHandlerBase: + """ + This class is a Handler that records a trace of the execution of another request handler. It has two + attributes: `trace`, which stores the tracing information, and `delegate`, which is the handler or + exception handler that will be traced. + """ + + trace: HandlerTrace | None + delegate: Handler | ExceptionHandler + + def __init__(self, delegate: Handler | ExceptionHandler): + self.trace = None + self.delegate = delegate + + def do_trace_call( + self, fn: Callable, chain: HandlerChain, context: RequestContext, response: Response + ): + """ + Wraps the function call with the tracing functionality and records a HandlerTrace. + + The method determines changes made by the request handler to specific aspects of the request. + Changes made to the request context and the response headers/status by the request handler are then + examined, and appropriate actions are added to the `actions` list of the trace. + + :param fn: which is the function to be traced, which is the request/response/exception handler + :param chain: the handler chain + :param context: the request context + :param response: the response object + """ + then = time.perf_counter() + + actions = [] + + prev_context = dict(context.__dict__) + prev_stopped = chain.stopped + prev_request_identity = id(context.request) + prev_terminated = chain.terminated + prev_request_headers = context.request.headers.copy() + prev_response_headers = response.headers.copy() + prev_response_status = response.status_code + + # add patches to log invocations or certain functions + patches = Patches( + [ + Patch.function( + context.request.get_data, + _log_method_call("request.get_data", actions), + ), + Patch.function( + context.request._load_form_data, + _log_method_call("request._load_form_data", actions), + ), + Patch.function( + response.get_data, + _log_method_call("response.get_data", actions), + ), + ] + ) + patches.apply() + + try: + return fn() + finally: + now = time.perf_counter() + # determine some basic things the handler changed in the context + patches.undo() + + # chain + if chain.stopped and not prev_stopped: + actions.append(Action("stop chain")) + if chain.terminated and not prev_terminated: + actions.append(Action("terminate chain")) + + # detect when attributes are set in the request contex + context_args = dict(context.__dict__) + context_args.pop("request", None) # request is handled separately + + for k, v in context_args.items(): + if not v: + continue + if prev_context.get(k): + # TODO: we could introduce "ModifyAttributeAction(k,v)" with an additional check + # ``if v != prev_context.get(k)`` + continue + actions.append(SetAttributeAction(k, v)) + + # request + if id(context.request) != prev_request_identity: + actions.append(Action("replaced request object")) + + # response + if response.status_code != prev_response_status: + actions.append(SetAttributeAction("response stats_code", response.status_code)) + if context.request.headers != prev_request_headers: + actions.append( + ModifyHeadersAction( + "modify request headers", + prev_request_headers, + context.request.headers.copy(), + ) + ) + if response.headers != prev_response_headers: + actions.append( + ModifyHeadersAction( + "modify response headers", prev_response_headers, response.headers.copy() + ) + ) + + self.trace = HandlerTrace( + handler=self.delegate, duration_ms=(now - then) * 1000, actions=actions + ) + + +class TracingHandler(TracingHandlerBase): + delegate: Handler + + def __init__(self, delegate: Handler): + super().__init__(delegate) + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + def _call(): + return self.delegate(chain, context, response) + + return self.do_trace_call(_call, chain, context, response) + + +class TracingExceptionHandler(TracingHandlerBase): + delegate: ExceptionHandler + + def __init__(self, delegate: ExceptionHandler): + super().__init__(delegate) + + def __call__( + self, chain: HandlerChain, exception: Exception, context: RequestContext, response: Response + ): + def _call(): + return self.delegate(chain, exception, context, response) + + return self.do_trace_call(_call, chain, context, response) + + +class TracingHandlerChain(HandlerChain): + """ + DebuggingHandlerChain - A subclass of HandlerChain for logging and tracing handlers. + + Attributes: + - duration (float): Total time taken for handling request in milliseconds. + - request_handler_traces (list[HandlerTrace]): List of request handler traces. + - response_handler_traces (list[HandlerTrace]): List of response handler traces. + - finalizer_traces (list[HandlerTrace]): List of finalizer traces. + - exception_handler_traces (list[HandlerTrace]): List of exception handler traces. + """ + + duration: float + request_handler_traces: list[HandlerTrace] + response_handler_traces: list[HandlerTrace] + finalizer_traces: list[HandlerTrace] + exception_handler_traces: list[HandlerTrace] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.request_handler_traces = [] + self.response_handler_traces = [] + self.finalizer_traces = [] + self.exception_handler_traces = [] + + def handle(self, context: RequestContext, response: Response): + """Overrides HandlerChain's handle method and adds tracing handler to request handlers. Logs the trace + report with request and response details.""" + then = time.perf_counter() + try: + self.request_handlers = [TracingHandler(handler) for handler in self.request_handlers] + return super().handle(context, response) + finally: + self.duration = (time.perf_counter() - then) * 1000 + self.request_handler_traces = [handler.trace for handler in self.request_handlers] + self._log_report() + + def _call_response_handlers(self, response): + self.response_handlers = [TracingHandler(handler) for handler in self.response_handlers] + try: + return super()._call_response_handlers(response) + finally: + self.response_handler_traces = [handler.trace for handler in self.response_handlers] + + def _call_finalizers(self, response): + self.finalizers = [TracingHandler(handler) for handler in self.finalizers] + try: + return super()._call_response_handlers(response) + finally: + self.finalizer_traces = [handler.trace for handler in self.finalizers] + + def _call_exception_handlers(self, e, response): + self.exception_handlers = [ + TracingExceptionHandler(handler) for handler in self.exception_handlers + ] + try: + return super()._call_exception_handlers(e, response) + finally: + self.exception_handler_traces = [handler.trace for handler in self.exception_handlers] + + def _log_report(self): + report = [] + request = self.context.request + response = self.response + + def _append_traces(traces: list[HandlerTrace]): + """Format and appends a list of traces to the report, and recursively append the trace's + actions (if any).""" + + for trace in traces: + if trace is None: + continue + + report.append( + f"{trace.handler_module:43s} {trace.handler_name:30s} {trace.duration_ms:8.2f}ms" + ) + _append_actions(trace.actions, 46) + + def _append_actions(actions: list[Action], indent: int): + for action in actions: + report.append((" " * indent) + f"- {action!r}") + + if isinstance(action, ModifyHeadersAction): + _append_actions(action.header_actions, indent + 2) + + report.append(f"request: {request.method} {request.url}") + report.append(f"response: {response.status_code}") + report.append("---- request handlers " + ("-" * 63)) + _append_traces(self.request_handler_traces) + report.append("---- response handlers " + ("-" * 63)) + _append_traces(self.response_handler_traces) + report.append("---- finalizers " + ("-" * 63)) + _append_traces(self.finalizer_traces) + report.append("---- exception handlers " + ("-" * 63)) + _append_traces(self.exception_handler_traces) + # Add a separator and total duration value to the end of the report + report.append(f"{'=' * 68} total {self.duration:8.2f}ms") + + LOG.info("handler chain trace report:\n%s\n%s", "=" * 85, "\n".join(report)) diff --git a/localstack-core/localstack/http/websocket.py b/localstack-core/localstack/http/websocket.py new file mode 100644 index 0000000000000..9bd92a927a998 --- /dev/null +++ b/localstack-core/localstack/http/websocket.py @@ -0,0 +1,15 @@ +from rolo.websocket.websocket import ( + WebSocket, + WebSocketDisconnectedError, + WebSocketError, + WebSocketProtocolError, + WebSocketRequest, +) + +__all__ = [ + "WebSocketError", + "WebSocketDisconnectedError", + "WebSocketProtocolError", + "WebSocket", + "WebSocketRequest", +] diff --git a/localstack/services/cloudformation/__init__.py b/localstack-core/localstack/logging/__init__.py similarity index 100% rename from localstack/services/cloudformation/__init__.py rename to localstack-core/localstack/logging/__init__.py diff --git a/localstack-core/localstack/logging/format.py b/localstack-core/localstack/logging/format.py new file mode 100644 index 0000000000000..5f308e34d9ecf --- /dev/null +++ b/localstack-core/localstack/logging/format.py @@ -0,0 +1,194 @@ +"""Tools for formatting localstack logs.""" + +import logging +import re +from functools import lru_cache +from typing import Any, Dict + +from localstack.utils.numbers import format_bytes +from localstack.utils.strings import to_bytes + +MAX_THREAD_NAME_LEN = 12 +MAX_NAME_LEN = 26 + +LOG_FORMAT = f"%(asctime)s.%(msecs)03d %(ls_level)5s --- [%(ls_thread){MAX_THREAD_NAME_LEN}s] %(ls_name)-{MAX_NAME_LEN}s : %(message)s" +LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" +LOG_INPUT_FORMAT = "%(input_type)s(%(input)s, headers=%(request_headers)s)" +LOG_OUTPUT_FORMAT = "%(output_type)s(%(output)s, headers=%(response_headers)s)" +LOG_CONTEXT_FORMAT = "%(account_id)s/%(region)s" + +CUSTOM_LEVEL_NAMES = { + 50: "FATAL", + 40: "ERROR", + 30: "WARN", + 20: "INFO", + 10: "DEBUG", +} + + +class DefaultFormatter(logging.Formatter): + """ + A formatter that uses ``LOG_FORMAT`` and ``LOG_DATE_FORMAT``. + """ + + def __init__(self, fmt=LOG_FORMAT, datefmt=LOG_DATE_FORMAT): + super(DefaultFormatter, self).__init__(fmt=fmt, datefmt=datefmt) + + +class AddFormattedAttributes(logging.Filter): + """ + Filter that adds three attributes to a log record: + + - ls_level: the abbreviated loglevel that's max 5 characters long + - ls_name: the abbreviated name of the logger (e.g., `l.bootstrap.install`), trimmed to ``MAX_NAME_LEN`` + - ls_thread: the abbreviated thread name (prefix trimmed, .e.g, ``omeThread-108``) + """ + + max_name_len: int + max_thread_len: int + + def __init__(self, max_name_len: int = None, max_thread_len: int = None): + super(AddFormattedAttributes, self).__init__() + self.max_name_len = max_name_len if max_name_len else MAX_NAME_LEN + self.max_thread_len = max_thread_len if max_thread_len else MAX_THREAD_NAME_LEN + + def filter(self, record): + record.ls_level = CUSTOM_LEVEL_NAMES.get(record.levelno, record.levelname) + record.ls_name = self._get_compressed_logger_name(record.name) + record.ls_thread = record.threadName[-self.max_thread_len :] + return True + + @lru_cache(maxsize=256) + def _get_compressed_logger_name(self, name): + return compress_logger_name(name, self.max_name_len) + + +class MaskSensitiveInputFilter(logging.Filter): + """ + Filter that hides sensitive from a binary json string in a record input. + It will find the mathing keys and replace their values with "******" + + For example, if initialized with `sensitive_keys=["my_key"]`, the input + b'{"my_key": "sensitive_value"}' would become b'{"my_key": "******"}'. + """ + + patterns: list[tuple[re.Pattern[bytes], bytes]] + + def __init__(self, sensitive_keys: list[str]): + super(MaskSensitiveInputFilter, self).__init__() + + self.patterns = [ + (re.compile(to_bytes(rf'"{key}":\s*"[^"]+"')), to_bytes(f'"{key}": "******"')) + for key in sensitive_keys + ] + + def filter(self, record): + if record.input and isinstance(record.input, bytes): + record.input = self.mask_sensitive_msg(record.input) + return True + + def mask_sensitive_msg(self, message: bytes) -> bytes: + for pattern, replacement in self.patterns: + message = re.sub(pattern, replacement, message) + return message + + +def compress_logger_name(name: str, length: int) -> str: + """ + Creates a short version of a logger name. For example ``my.very.long.logger.name`` with length=17 turns into + ``m.v.l.logger.name``. + + :param name: the logger name + :param length: the max length of the logger name + :return: the compressed name + """ + if len(name) <= length: + return name + + parts = name.split(".") + parts.reverse() + + new_parts = [] + + # we start by assuming that all parts are collapsed + # x.x.x requires 5 = 2n - 1 characters + cur_length = (len(parts) * 2) - 1 + + for i in range(len(parts)): + # try to expand the current part and calculate the resulting length + part = parts[i] + next_len = cur_length + (len(part) - 1) + + if next_len > length: + # if the resulting length would exceed the limit, add only the first letter of the parts of all remaining + # parts + new_parts += [p[0] for p in parts[i:]] + + # but if this is the first item, that means we would display nothing, so at least display as much of the + # max length as possible + if i == 0: + remaining = length - cur_length + if remaining > 0: + new_parts[0] = part[: (remaining + 1)] + + break + + # expanding the current part, i.e., instead of using just the one character, we add the entire part + new_parts.append(part) + cur_length = next_len + + new_parts.reverse() + return ".".join(new_parts) + + +class TraceLoggingFormatter(logging.Formatter): + aws_trace_log_format = "; ".join([LOG_FORMAT, LOG_INPUT_FORMAT, LOG_OUTPUT_FORMAT]) + bytes_length_display_threshold = 512 + + def __init__(self): + super().__init__(fmt=self.aws_trace_log_format, datefmt=LOG_DATE_FORMAT) + + def _replace_large_payloads(self, input: Any) -> Any: + """ + Replaces large payloads in the logs with placeholders to avoid cluttering the logs with huge bytes payloads. + :param input: Input/output extra passed when logging. If it is bytes, it will be replaced if larger than + bytes_length_display_threshold + :return: Input, unless it is bytes and longer than bytes_length_display_threshold, then `Bytes(length_of_input)` + """ + if isinstance(input, bytes) and len(input) > self.bytes_length_display_threshold: + return f"Bytes({format_bytes(len(input))})" + return input + + def format(self, record: logging.LogRecord) -> str: + record.input = self._replace_large_payloads(record.input) + record.output = self._replace_large_payloads(record.output) + return super().format(record=record) + + +class AwsTraceLoggingFormatter(TraceLoggingFormatter): + aws_trace_log_format = "; ".join( + [LOG_FORMAT, LOG_CONTEXT_FORMAT, LOG_INPUT_FORMAT, LOG_OUTPUT_FORMAT] + ) + + def __init__(self): + super().__init__() + + def _copy_service_dict(self, service_dict: Dict) -> Dict: + if not isinstance(service_dict, Dict): + return service_dict + result = {} + for key, value in service_dict.items(): + if isinstance(value, dict): + result[key] = self._copy_service_dict(value) + elif isinstance(value, bytes) and len(value) > self.bytes_length_display_threshold: + result[key] = f"Bytes({format_bytes(len(value))})" + elif isinstance(value, list): + result[key] = [self._copy_service_dict(item) for item in value] + else: + result[key] = value + return result + + def format(self, record: logging.LogRecord) -> str: + record.input = self._copy_service_dict(record.input) + record.output = self._copy_service_dict(record.output) + return super().format(record=record) diff --git a/localstack-core/localstack/logging/setup.py b/localstack-core/localstack/logging/setup.py new file mode 100644 index 0000000000000..4a10d7cb7452d --- /dev/null +++ b/localstack-core/localstack/logging/setup.py @@ -0,0 +1,142 @@ +import logging +import sys +import warnings + +from localstack import config, constants + +from ..utils.strings import key_value_pairs_to_dict +from .format import AddFormattedAttributes, DefaultFormatter + +# The log levels for modules are evaluated incrementally for logging granularity, +# from highest (DEBUG) to lowest (TRACE_INTERNAL). Hence, each module below should have +# higher level which serves as the default. + +default_log_levels = { + "asyncio": logging.INFO, + "boto3": logging.INFO, + "botocore": logging.ERROR, + "docker": logging.WARNING, + "elasticsearch": logging.ERROR, + "hpack": logging.ERROR, + "moto": logging.WARNING, + "requests": logging.WARNING, + "s3transfer": logging.INFO, + "urllib3": logging.WARNING, + "werkzeug": logging.WARNING, + "rolo": logging.WARNING, + "parse": logging.WARNING, + "localstack.aws.accounts": logging.INFO, + "localstack.aws.protocol.serializer": logging.INFO, + "localstack.aws.serving.wsgi": logging.WARNING, + "localstack.request": logging.INFO, + "localstack.request.internal": logging.WARNING, + "localstack.state.inspect": logging.INFO, + "localstack_persistence": logging.INFO, +} + +trace_log_levels = { + "rolo": logging.DEBUG, + "localstack.aws.protocol.serializer": logging.DEBUG, + "localstack.aws.serving.wsgi": logging.DEBUG, + "localstack.request": logging.DEBUG, + "localstack.request.internal": logging.INFO, + "localstack.state.inspect": logging.DEBUG, +} + +trace_internal_log_levels = { + "localstack.aws.accounts": logging.DEBUG, + "localstack.request.internal": logging.DEBUG, +} + + +def setup_logging_for_cli(log_level=logging.INFO): + logging.basicConfig(level=log_level) + + # set log levels of loggers + logging.root.setLevel(log_level) + logging.getLogger("localstack").setLevel(log_level) + for logger, level in default_log_levels.items(): + logging.getLogger(logger).setLevel(level) + + +def get_log_level_from_config(): + # overriding the log level if LS_LOG has been set + if config.LS_LOG: + log_level = str(config.LS_LOG).upper() + if log_level.lower() in constants.TRACE_LOG_LEVELS: + log_level = "DEBUG" + log_level = logging._nameToLevel[log_level] + return log_level + + return logging.DEBUG if config.DEBUG else logging.INFO + + +def setup_logging_from_config(): + log_level = get_log_level_from_config() + setup_logging(log_level) + + if config.is_trace_logging_enabled(): + for name, level in trace_log_levels.items(): + logging.getLogger(name).setLevel(level) + if config.LS_LOG == constants.LS_LOG_TRACE_INTERNAL: + for name, level in trace_internal_log_levels.items(): + logging.getLogger(name).setLevel(level) + + raw_logging_override = config.LOG_LEVEL_OVERRIDES + if raw_logging_override: + logging_overrides = key_value_pairs_to_dict(raw_logging_override) + for logger, level_name in logging_overrides.items(): + level = getattr(logging, level_name, None) + if not level: + raise ValueError( + f"Failed to configure logging overrides ({raw_logging_override}): '{level_name}' is not a valid log level" + ) + logging.getLogger(logger).setLevel(level) + + +def create_default_handler(log_level: int): + log_handler = logging.StreamHandler(stream=sys.stderr) + log_handler.setLevel(log_level) + log_handler.setFormatter(DefaultFormatter()) + log_handler.addFilter(AddFormattedAttributes()) + return log_handler + + +def setup_logging(log_level=logging.INFO) -> None: + """ + Configures the python logging environment for LocalStack. + + :param log_level: the optional log level. + """ + # set create a default handler for the root logger (basically logging.basicConfig but explicit) + log_handler = create_default_handler(log_level) + + # replace any existing handlers + logging.basicConfig(level=log_level, handlers=[log_handler]) + + # disable some logs and warnings + warnings.filterwarnings("ignore") + logging.captureWarnings(True) + + # set log levels of loggers + logging.root.setLevel(log_level) + logging.getLogger("localstack").setLevel(log_level) + for logger, level in default_log_levels.items(): + logging.getLogger(logger).setLevel(level) + + +def setup_hypercorn_logger(hypercorn_config) -> None: + """ + Sets the hypercorn loggers, which are created in a peculiar way, to the localstack settings. + + :param hypercorn_config: a hypercorn.Config object + """ + logger = hypercorn_config.log.access_logger + if logger: + logger.handlers[0].addFilter(AddFormattedAttributes()) + logger.handlers[0].setFormatter(DefaultFormatter()) + + logger = hypercorn_config.log.error_logger + if logger: + logger.handlers[0].addFilter(AddFormattedAttributes()) + logger.handlers[0].setFormatter(DefaultFormatter()) diff --git a/localstack-core/localstack/openapi.yaml b/localstack-core/localstack/openapi.yaml new file mode 100644 index 0000000000000..b3656c3f6f1af --- /dev/null +++ b/localstack-core/localstack/openapi.yaml @@ -0,0 +1,1070 @@ +openapi: 3.1.0 +info: + contact: + email: info@localstack.cloud + name: LocalStack Support + url: https://www.localstack.cloud/contact + summary: The LocalStack REST API exposes functionality related to diagnostics, health + checks, plugins, initialisation hooks, service introspection, and more. + termsOfService: https://www.localstack.cloud/legal/tos + title: LocalStack REST API for Community + version: latest +externalDocs: + description: LocalStack Documentation + url: https://docs.localstack.cloud +servers: + - url: http://{host}:{port} + variables: + port: + default: '4566' + host: + default: 'localhost.localstack.cloud' +components: + parameters: + SesIdFilter: + description: Filter for the `id` field in SES message + in: query + name: id + required: false + schema: + type: string + SesEmailFilter: + description: Filter for the `source` field in SES message + in: query + name: email + required: false + schema: + type: string + SnsAccountId: + description: '`accountId` field of the resource' + in: query + name: accountId + required: false + schema: + default: '000000000000' + type: string + SnsEndpointArn: + description: '`endpointArn` field of the resource' + in: query + name: endpointArn + required: false + schema: + type: string + SnsPhoneNumber: + description: '`phoneNumber` field of the resource' + in: query + name: phoneNumber + required: false + schema: + type: string + SnsRegion: + description: '`region` field of the resource' + in: query + name: region + required: false + schema: + default: us-east-1 + type: string + schemas: + InitScripts: + additionalProperties: false + properties: + completed: + additionalProperties: false + properties: + BOOT: + type: boolean + READY: + type: boolean + SHUTDOWN: + type: boolean + START: + type: boolean + required: + - BOOT + - START + - READY + - SHUTDOWN + type: object + scripts: + items: + additionalProperties: false + properties: + name: + type: string + stage: + type: string + state: + type: string + required: + - stage + - name + - state + type: object + type: array + required: + - completed + - scripts + type: object + InitScriptsStage: + additionalProperties: false + properties: + completed: + type: boolean + scripts: + items: + additionalProperties: false + properties: + name: + type: string + stage: + type: string + state: + type: string + required: + - stage + - name + - state + type: object + type: array + required: + - completed + - scripts + type: object + SESDestination: + type: object + description: Possible destination of a SES message + properties: + ToAddresses: + type: array + items: + type: string + format: email + CcAddresses: + type: array + items: + type: string + format: email + BccAddresses: + type: array + items: + type: string + format: email + additionalProperties: false + SesSentEmail: + additionalProperties: false + properties: + Body: + additionalProperties: false + properties: + html_part: + type: string + text_part: + type: string + required: + - text_part + type: object + Destination: + $ref: '#/components/schemas/SESDestination' + Id: + type: string + RawData: + type: string + Region: + type: string + Source: + type: string + Subject: + type: string + Template: + type: string + TemplateData: + type: string + Timestamp: + type: string + required: + - Id + - Region + - Timestamp + - Source + type: object + SessionInfo: + additionalProperties: false + properties: + edition: + type: string + is_docker: + type: boolean + is_license_activated: + type: boolean + machine_id: + type: string + server_time_utc: + type: string + session_id: + type: string + system: + type: string + uptime: + type: integer + version: + type: string + required: + - version + - edition + - is_license_activated + - session_id + - machine_id + - system + - is_docker + - server_time_utc + - uptime + type: object + SnsSubscriptionTokenError: + additionalProperties: false + properties: + error: + type: string + subscription_arn: + type: string + required: + - error + - subscription_arn + type: object + SNSPlatformEndpointMessage: + type: object + description: Message sent to a platform endpoint via SNS + additionalProperties: false + properties: + TargetArn: + type: string + TopicArn: + type: string + Message: + type: string + MessageAttributes: + type: object + MessageStructure: + type: string + Subject: + type: [string, 'null'] + MessageId: + type: string + SNSMessage: + type: object + description: Message sent via SNS + properties: + PhoneNumber: + type: string + TopicArn: + type: [string, 'null'] + SubscriptionArn: + type: [string, 'null'] + MessageId: + type: string + Message: + type: string + MessageAttributes: + type: object + MessageStructure: + type: [string, 'null'] + Subject: + type: [string, 'null'] + SNSPlatformEndpointMessages: + type: object + description: | + Messages sent to the platform endpoint retrieved via the retrospective endpoint. + The endpoint ARN is the key with a list of messages as value. + additionalProperties: + type: array + items: + $ref: '#/components/schemas/SNSPlatformEndpointMessage' + SMSMessages: + type: object + description: | + SMS messages retrieved via the retrospective endpoint. + The phone number is the key with a list of messages as value. + additionalProperties: + type: array + items: + $ref: '#/components/schemas/SNSMessage' + SNSPlatformEndpointResponse: + type: object + additionalProperties: false + description: Response payload for the /_aws/sns/platform-endpoint-messages endpoint + properties: + region: + type: string + description: "The AWS region, e.g., us-east-1" + platform_endpoint_messages: + $ref: '#/components/schemas/SNSPlatformEndpointMessages' + required: + - region + - platform_endpoint_messages + SNSSMSMessagesResponse: + type: object + additionalProperties: false + description: Response payload for the /_aws/sns/sms-messages endpoint + properties: + region: + type: string + description: "The AWS region, e.g., us-east-1" + sms_messages: + $ref: '#/components/schemas/SMSMessages' + required: + - region + - sms_messages + ReceiveMessageRequest: + type: object + description: https://github.com/boto/botocore/blob/develop/botocore/data/sqs/2012-11-05/service-2.json + required: + - QueueUrl + properties: + QueueUrl: + type: string + format: uri + AttributeNames: + type: array + items: + type: string + MessageSystemAttributeNames: + type: array + items: + type: string + MessageAttributeNames: + type: array + items: + type: string + MaxNumberOfMessages: + type: integer + VisibilityTimeout: + type: integer + WaitTimeSeconds: + type: integer + ReceiveRequestAttemptId: + type: string + ReceiveMessageResult: + type: object + description: https://github.com/boto/botocore/blob/develop/botocore/data/sqs/2012-11-05/service-2.json + properties: + Messages: + type: array + items: + $ref: '#/components/schemas/Message' + Message: + type: object + properties: + MessageId: + type: [string, 'null'] + ReceiptHandle: + type: [string, 'null'] + MD5OfBody: + type: [string, 'null'] + Body: + type: [string, 'null'] + Attributes: + type: object + MessageAttributes: + type: object + CloudWatchMetrics: + additionalProperties: false + properties: + metrics: + items: + additionalProperties: false + properties: + account: + description: Account ID + type: string + d: + description: Dimensions + items: + additionalProperties: false + properties: + n: + description: Dimension name + type: string + v: + description: Dimension value + oneOf: + - type: string + - type: integer + required: + - n + - v + type: object + type: array + n: + description: Metric name + type: string + ns: + description: Namespace + type: string + region: + description: Region name + type: string + t: + description: Timestamp + oneOf: + - type: string + format: date-time + - type: number + v: + description: Metric value + oneOf: + - type: string + - type: integer + required: + - ns + - n + - v + - t + - d + - account + - region + type: object + type: array + required: + - metrics + type: object +paths: + /_aws/cloudwatch/metrics/raw: + get: + description: Retrieve CloudWatch metrics + operationId: get_cloudwatch_metrics + tags: [aws] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/CloudWatchMetrics' + description: CloudWatch metrics + /_aws/dynamodb/expired: + delete: + description: Delete expired items from TTL-enabled DynamoDB tables + operationId: delete_ddb_expired_items + tags: [aws] + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + ExpiredItems: + description: Number of expired items that were deleted + type: integer + required: + - ExpiredItems + type: object + description: Operation was successful + /_aws/events/rules/{rule_arn}/trigger: + get: + description: Trigger a scheduled EventBridge rule + operationId: trigger_event_bridge_rule + tags: [aws] + parameters: + - description: EventBridge rule ARN + in: path + name: rule_arn + required: true + schema: + type: string + responses: + '200': + description: EventBridge rule was triggered + '404': + description: Not found + /_aws/lambda/init: + get: + description: Retrieve Lambda runtime init binary + operationId: get_lambda_init + tags: [aws] + responses: + '200': + content: + application/octet-stream: {} + description: Lambda runtime init binary + /_aws/lambda/runtimes: + get: + description: List available Lambda runtimes + operationId: get_lambda_runtimes + tags: [aws] + parameters: + - in: query + name: filter + required: false + schema: + default: supported + enum: + - all + - deprecated + - supported + type: string + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + Runtimes: + items: + type: string + type: array + required: + - Runtimes + type: object + description: Available Lambda runtimes + /_aws/ses: + delete: + description: Discard sent SES messages + operationId: discard_ses_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SesIdFilter' + responses: + '204': + description: Message was successfully discarded + get: + description: Retrieve sent SES messages + operationId: get_ses_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SesIdFilter' + - $ref: '#/components/parameters/SesEmailFilter' + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + messages: + items: + $ref: '#/components/schemas/SesSentEmail' + type: array + required: + - messages + type: object + description: List of sent messages + /_aws/sns/platform-endpoint-messages: + delete: + description: Discard the messages published to a platform endpoint via SNS + operationId: discard_sns_endpoint_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SnsAccountId' + - $ref: '#/components/parameters/SnsRegion' + - $ref: '#/components/parameters/SnsEndpointArn' + responses: + '204': + description: Platform endpoint message was discarded + get: + description: Retrieve the messages sent to a platform endpoint via SNS + operationId: get_sns_endpoint_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SnsAccountId' + - $ref: '#/components/parameters/SnsRegion' + - $ref: '#/components/parameters/SnsEndpointArn' + responses: + '200': + content: + application/json: + schema: + $ref: "#/components/schemas/SNSPlatformEndpointResponse" + description: SNS messages via retrospective access + /_aws/sns/sms-messages: + delete: + description: Discard SNS SMS messages + operationId: discard_sns_sms_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SnsAccountId' + - $ref: '#/components/parameters/SnsRegion' + - $ref: '#/components/parameters/SnsPhoneNumber' + responses: + '204': + description: SMS message was discarded + get: + description: Retrieve SNS SMS messages + operationId: get_sns_sms_messages + tags: [aws] + parameters: + - $ref: '#/components/parameters/SnsAccountId' + - $ref: '#/components/parameters/SnsRegion' + - $ref: '#/components/parameters/SnsPhoneNumber' + responses: + '200': + content: + application/json: + schema: + $ref: "#/components/schemas/SNSSMSMessagesResponse" + description: SNS messages via retrospective access + /_aws/sns/subscription-tokens/{subscription_arn}: + get: + description: Retrieve SNS subscription token for confirmation + operationId: get_sns_subscription_token + tags: [aws] + parameters: + - description: '`subscriptionArn` resource of subscription token' + in: path + name: subscription_arn + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + subscription_arn: + type: string + subscription_token: + type: string + required: + - subscription_token + - subscription_arn + type: object + description: Subscription token + '400': + content: + application/json: + schema: + $ref: '#/components/schemas/SnsSubscriptionTokenError' + description: Bad request + '404': + content: + application/json: + schema: + $ref: '#/components/schemas/SnsSubscriptionTokenError' + description: Not found + /_aws/sqs/messages: + get: + description: List SQS queue messages without side effects + operationId: list_all_sqs_messages + tags: [aws] + parameters: + - description: SQS queue URL + in: query + name: QueueUrl + required: false + schema: + type: string + responses: + '200': + content: + text/xml: + schema: + $ref: '#/components/schemas/ReceiveMessageResult' + application/json: + schema: + $ref: '#/components/schemas/ReceiveMessageResult' + description: SQS queue messages + '400': + content: + text/xml: {} + application/json: {} + description: Bad request + '404': + content: + text/xml: {} + application/json: {} + description: Not found + post: + summary: Retrieves one or more messages from the specified queue. + description: | + This API receives messages from an SQS queue. + https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#API_ReceiveMessage_ResponseSyntax + operationId: receive_message + requestBody: + required: true + content: + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/ReceiveMessageRequest' + application/json: + schema: + $ref: '#/components/schemas/ReceiveMessageRequest' + responses: + '200': + content: + text/xml: {} + application/json: + schema: + $ref: '#/components/schemas/ReceiveMessageResult' + description: SQS queue messages + '400': + content: + text/xml: {} + application/json: {} + description: Bad request + '404': + content: + text/xml: {} + application/json: {} + description: Not found + /_aws/sqs/messages/{region}/{account_id}/{queue_name}: + get: + description: List SQS messages without side effects + operationId: list_sqs_messages + tags: [aws] + parameters: + - description: SQS queue region + in: path + name: region + required: true + schema: + type: string + - description: SQS queue account ID + in: path + name: account_id + required: true + schema: + type: string + - description: SQS queue name + in: path + name: queue_name + required: true + schema: + type: string + responses: + '200': + content: + text/xml: {} + application/json: + schema: + $ref: '#/components/schemas/ReceiveMessageResult' + description: SQS queue messages + '400': + content: + text/xml: {} + application/json: {} + description: Bad request + '404': + content: + text/xml: {} + application/json: {} + description: Not found + /_localstack/config: + get: + description: Get current LocalStack configuration + operationId: get_config + tags: [localstack] + responses: + '200': + content: + application/json: + schema: + type: object + description: Current LocalStack configuration + post: + description: Configuration option to update with new value + operationId: update_config_option + tags: [localstack] + requestBody: + content: + application/json: + schema: + additionalProperties: false + properties: + value: + type: + - number + - string + variable: + pattern: ^[_a-zA-Z0-9]+$ + type: string + required: + - variable + - value + type: object + required: true + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + value: + type: + - number + - string + variable: + type: string + required: + - variable + - value + type: object + description: Configuration option is updated + '400': + content: + application/json: {} + description: Bad request + /_localstack/diagnose: + get: + description: Get diagnostics report + operationId: get_diagnostics + tags: [localstack] + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + config: + type: object + docker-dependent-image-hosts: + type: object + docker-inspect: + type: object + file-tree: + type: object + important-endpoints: + type: object + info: + $ref: '#/components/schemas/SessionInfo' + logs: + additionalProperties: false + properties: + docker: + type: string + required: + - docker + type: object + services: + type: object + usage: + type: object + version: + additionalProperties: false + properties: + host: + additionalProperties: false + properties: + kernel: + type: string + required: + - kernel + type: object + image-version: + additionalProperties: false + properties: + created: + type: string + id: + type: string + sha256: + type: string + tag: + type: string + required: + - id + - sha256 + - tag + - created + type: object + localstack-version: + additionalProperties: false + properties: + build-date: + type: + - string + - 'null' + build-git-hash: + type: + - string + - 'null' + build-version: + type: + - string + - 'null' + required: + - build-date + - build-git-hash + - build-version + type: object + required: + - image-version + - localstack-version + - host + type: object + required: + - version + - info + - services + - config + - docker-inspect + - docker-dependent-image-hosts + - file-tree + - important-endpoints + - logs + - usage + type: object + description: Diagnostics report + /_localstack/health: + get: + description: Get available LocalStack features and AWS services + operationId: get_features_and_services + tags: [localstack] + parameters: + - allowEmptyValue: true + in: query + name: reload + required: false + schema: + type: string + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + edition: + enum: + - community + - pro + - enterprise + - unknown + type: string + features: + type: object + services: + type: object + version: + type: string + required: + - edition + - services + - version + type: object + description: Available LocalStack features and AWS services + head: + tags: [localstack] + operationId: health + responses: + '200': + content: + text/plain: {} + description: '' + post: + description: Restart or terminate LocalStack session + operationId: manage_session + tags: [localstack] + requestBody: + content: + application/json: + schema: + additionalProperties: false + properties: + action: + enum: + - restart + - kill + type: string + required: + - action + type: object + description: Action to perform + required: true + responses: + '200': + content: + text/plain: {} + description: Action was successful + '400': + content: + text/plain: {} + description: Bad request + put: + description: Store arbitrary data to in-memory state + operationId: store_data + tags: [localstack] + requestBody: + content: + application/json: + schema: + type: object + description: Data to save + responses: + '200': + content: + application/json: + schema: + additionalProperties: false + properties: + status: + type: string + required: + - status + type: object + description: Data was saved + /_localstack/info: + get: + description: Get information about the current LocalStack session + operationId: get_session_info + tags: [localstack] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/SessionInfo' + description: Information about the current LocalStack session + /_localstack/init: + get: + description: Get information about init scripts + operationId: get_init_script_info + tags: [localstack] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/InitScripts' + description: Information about init scripts + /_localstack/init/{stage}: + get: + description: Get information about init scripts in a specific stage + operationId: get_init_script_info_stage + tags: [localstack] + parameters: + - in: path + name: stage + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/InitScriptsStage' + description: Information about init scripts in a specific stage + /_localstack/plugins: + get: + description: '' + operationId: get_plugins + tags: [localstack] + responses: + '200': + content: + application/json: {} + description: '' + /_localstack/usage: + get: + description: '' + operationId: get_usage + tags: [localstack] + responses: + '200': + content: + application/json: {} + description: '' diff --git a/localstack/packages/__init__.py b/localstack-core/localstack/packages/__init__.py similarity index 100% rename from localstack/packages/__init__.py rename to localstack-core/localstack/packages/__init__.py diff --git a/localstack-core/localstack/packages/api.py b/localstack-core/localstack/packages/api.py new file mode 100644 index 0000000000000..bcc8add9577c5 --- /dev/null +++ b/localstack-core/localstack/packages/api.py @@ -0,0 +1,415 @@ +import abc +import functools +import logging +import os +from collections import defaultdict +from enum import Enum +from inspect import getmodule +from threading import RLock +from typing import Any, Callable, Generic, List, Optional, ParamSpec, TypeVar + +from plux import Plugin, PluginManager, PluginSpec # type: ignore + +from localstack import config + +LOG = logging.getLogger(__name__) + + +class PackageException(Exception): + """Basic exception indicating that a package-specific exception occurred.""" + + pass + + +class NoSuchVersionException(PackageException): + """Exception indicating that a requested installer version is not available / supported.""" + + def __init__(self, package: str | None = None, version: str | None = None): + message = "Unable to find requested version" + if package and version: + message += f"Unable to find requested version '{version}' for package '{package}'" + super().__init__(message) + + +class InstallTarget(Enum): + """ + Different installation targets. + Attention: + - These targets are directly used in the LPM API and are therefore part of a public API! + - The order of the entries in the enum define the default lookup order when looking for package installations. + + These targets refer to the directories in config#Directories. + - VAR_LIBS: Used for packages installed at runtime. They are installed in a host-mounted volume. + This directory / these installations persist across multiple containers. + - STATIC_LIBS: Used for packages installed at build time. They are installed in a non-host-mounted volume. + This directory is re-created whenever a container is recreated. + """ + + VAR_LIBS = config.dirs.var_libs + STATIC_LIBS = config.dirs.static_libs + + +class PackageInstaller(abc.ABC): + """ + Base class for a specific installer. + An instance of an installer manages the installation of a specific Package (in a specific version, if there are + multiple versions). + """ + + def __init__(self, name: str, version: str, install_lock: Optional[RLock] = None): + """ + :param name: technical package name, f.e. "opensearch" + :param version: version of the package to install + :param install_lock: custom lock which should be used for this package installer instance for the + complete #install call. Defaults to a per-instance reentrant lock (RLock). + Package instances create one installer per version. Therefore, by default, the lock + ensures that package installations of the same package and version are mutually exclusive. + """ + self.name = name + self.version = version + self.install_lock = install_lock or RLock() + self._setup_for_target: dict[InstallTarget, bool] = defaultdict(lambda: False) + + def install(self, target: Optional[InstallTarget] = None) -> None: + """ + Performs the package installation. + + :param target: preferred installation target. Default is VAR_LIBS. + :return: None + :raises PackageException: if the installation fails + """ + try: + if not target: + target = InstallTarget.VAR_LIBS + # We have to acquire the lock before checking if the package is installed, as the is_installed check + # is _only_ reliable if no other thread is currently actually installing + with self.install_lock: + # Skip the installation if it's already installed + if not self.is_installed(): + LOG.debug("Starting installation of %s %s...", self.name, self.version) + self._prepare_installation(target) + self._install(target) + self._post_process(target) + LOG.debug("Installation of %s %s finished.", self.name, self.version) + else: + LOG.debug( + "Installation of %s %s skipped (already installed).", + self.name, + self.version, + ) + if not self._setup_for_target[target]: + LOG.debug("Performing runtime setup for already installed package.") + self._setup_existing_installation(target) + except PackageException as e: + raise e + except Exception as e: + raise PackageException(f"Installation of {self.name} {self.version} failed.") from e + + def is_installed(self) -> bool: + """ + Checks if the package is already installed. + + :return: True if the package is already installed (i.e. an installation is not necessary). + """ + return self.get_installed_dir() is not None + + def get_installed_dir(self) -> str | None: + """ + Returns the directory of an existing installation. The directory can differ based on the installation target + and version. + :return: str representation of the installation directory path or None if the package is not installed anywhere + """ + for target in InstallTarget: + directory = self._get_install_dir(target) + if directory and os.path.exists(self._get_install_marker_path(directory)): + return directory + return None + + def _get_install_dir(self, target: InstallTarget) -> str: + """ + Builds the installation directory for a specific target. + :param target: to create the installation directory path for + :return: str representation of the installation directory for the given target + """ + return os.path.join(target.value, self.name, self.version) + + def _get_install_marker_path(self, install_dir: str) -> str: + """ + Builds the path for a specific "marker" whose presence indicates that the package has been installed + successfully in the given directory. + + :param install_dir: base path for the check (f.e. /var/lib/localstack/lib/dynamodblocal/latest/) + :return: path which should be checked to indicate if the package has been installed successfully + (f.e. /var/lib/localstack/lib/dynamodblocal/latest/DynamoDBLocal.jar) + """ + raise NotImplementedError() + + def _setup_existing_installation(self, target: InstallTarget) -> None: + """ + Internal function to perform the setup for an existing installation, f.e. adding a path to an environment. + This is only necessary for certain installers (like the PythonPackageInstaller). + This function will _always_ be executed _exactly_ once within a Python session for a specific installer + instance and target, if #install is called for the respective target. + :param target: of the installation + :return: None + """ + pass + + def _prepare_installation(self, target: InstallTarget) -> None: + """ + Internal function to prepare an installation, f.e. by downloading some data or installing an OS package repo. + Can be implemented by specific installers. + :param target: of the installation + :return: None + """ + pass + + def _install(self, target: InstallTarget) -> None: + """ + Internal function to perform the actual installation. + Must be implemented by specific installers. + :param target: of the installation + :return: None + """ + raise NotImplementedError() + + def _post_process(self, target: InstallTarget) -> None: + """ + Internal function to perform some post-processing, f.e. patching an installation or creating symlinks. + :param target: of the installation + :return: None + """ + pass + + +# With Python 3.13 we should be able to set PackageInstaller as the default +# https://typing.python.org/en/latest/spec/generics.html#type-parameter-defaults +T = TypeVar("T", bound=PackageInstaller) + + +class Package(abc.ABC, Generic[T]): + """ + A Package defines a specific kind of software, mostly used as backends or supporting system for service + implementations. + """ + + def __init__(self, name: str, default_version: str): + """ + :param name: Human readable name of the package, f.e. "PostgreSQL" + :param default_version: Default version of the package which is used for installations if no version is defined + """ + self.name = name + self.default_version = default_version + + def get_installed_dir(self, version: str | None = None) -> str | None: + """ + Finds a directory where the package (in the specific version) is installed. + :param version: of the package to look for. If None, the default version of the package is used. + :return: str representation of the path to the existing installation directory or None if the package in this + version is not yet installed. + """ + return self.get_installer(version).get_installed_dir() + + def install(self, version: str | None = None, target: Optional[InstallTarget] = None) -> None: + """ + Installs the package in the given version in the preferred target location. + :param version: version of the package to install. If None, the default version of the package will be used. + :param target: preferred installation target. If None, the var_libs directory is used. + :raises NoSuchVersionException: If the given version is not supported. + """ + self.get_installer(version).install(target) + + @functools.lru_cache() + def get_installer(self, version: str | None = None) -> T: + """ + Returns the installer instance for a specific version of the package. + + It is important that this be LRU cached. Installers have a mutex lock to prevent races, and it is necessary + that this method returns the same installer instance for a given version. + + :param version: version of the package to install. If None, the default version of the package will be used. + :return: PackageInstaller instance for the given version. + :raises NoSuchVersionException: If the given version is not supported. + """ + if not version: + return self.get_installer(self.default_version) + if version not in self.get_versions(): + raise NoSuchVersionException(package=self.name, version=version) + return self._get_installer(version) + + def get_versions(self) -> List[str]: + """ + :return: List of all versions available for this package. + """ + raise NotImplementedError() + + def _get_installer(self, version: str) -> T: + """ + Internal lookup function which needs to be implemented by specific packages. + It creates PackageInstaller instances for the specific version. + + :param version: to find the installer for + :return: PackageInstaller instance responsible for installing the given version of the package. + """ + raise NotImplementedError() + + def __str__(self) -> str: + return self.name + + +class MultiPackageInstaller(PackageInstaller): + """ + PackageInstaller implementation which composes of multiple package installers. + """ + + def __init__(self, name: str, version: str, package_installer: List[PackageInstaller]): + """ + :param name: of the (multi-)package installer + :param version: of this (multi-)package installer + :param package_installer: List of installers this multi-package installer consists of + """ + super().__init__(name=name, version=version) + + assert isinstance(package_installer, list) + assert len(package_installer) > 0 + self.package_installer = package_installer + + def install(self, target: Optional[InstallTarget] = None) -> None: + """ + Installs the different packages this installer is composed of. + + :param target: which defines where to install the packages. + :return: None + """ + for package_installer in self.package_installer: + package_installer.install(target=target) + + def get_installed_dir(self) -> str | None: + # By default, use the installed-dir of the first package + return self.package_installer[0].get_installed_dir() + + def _install(self, target: InstallTarget) -> None: + # This package installer actually only calls other installers, we pass here + pass + + def _get_install_dir(self, target: InstallTarget) -> str: + # By default, use the install-dir of the first package + return self.package_installer[0]._get_install_dir(target) + + def _get_install_marker_path(self, install_dir: str) -> str: + # By default, use the install-marker-path of the first package + return self.package_installer[0]._get_install_marker_path(install_dir) + + +PLUGIN_NAMESPACE = "localstack.packages" + + +class PackagesPlugin(Plugin): # type: ignore[misc] + """ + Plugin implementation for Package plugins. + A package plugin exposes a specific package instance. + """ + + api: str + name: str + + def __init__( + self, + name: str, + scope: str, + get_package: Callable[[], Package[PackageInstaller] | List[Package[PackageInstaller]]], + should_load: Callable[[], bool] | None = None, + ) -> None: + super().__init__() + self.name = name + self.scope = scope + self._get_package = get_package + self._should_load = should_load + + def should_load(self) -> bool: + if self._should_load: + return self._should_load() + return True + + def get_package(self) -> Package[PackageInstaller]: + """ + :return: returns the package instance of this package plugin + """ + return self._get_package() # type: ignore[return-value] + + +class NoSuchPackageException(PackageException): + """Exception raised by the PackagesPluginManager to indicate that a package / version is not available.""" + + pass + + +class PackagesPluginManager(PluginManager[PackagesPlugin]): # type: ignore[misc] + """PluginManager which simplifies the loading / access of PackagesPlugins and their exposed package instances.""" + + def __init__(self) -> None: + super().__init__(PLUGIN_NAMESPACE) + + def get_all_packages(self) -> list[tuple[str, str, Package[PackageInstaller]]]: + return sorted( + [(plugin.name, plugin.scope, plugin.get_package()) for plugin in self.load_all()] + ) + + def get_packages( + self, package_names: list[str], version: Optional[str] = None + ) -> list[Package[PackageInstaller]]: + # Plugin names are unique, but there could be multiple packages with the same name in different scopes + plugin_specs_per_name = defaultdict(list) + # Plugin names have the format "/", build a dict of specs per package name for the lookup + for plugin_spec in self.list_plugin_specs(): + (package_name, _, _) = plugin_spec.name.rpartition("/") + plugin_specs_per_name[package_name].append(plugin_spec) + + package_instances: list[Package[PackageInstaller]] = [] + for package_name in package_names: + plugin_specs = plugin_specs_per_name.get(package_name) + if not plugin_specs: + raise NoSuchPackageException( + f"unable to locate installer for package {package_name}" + ) + for plugin_spec in plugin_specs: + package_instance = self.load(plugin_spec.name).get_package() + package_instances.append(package_instance) + if version and version not in package_instance.get_versions(): + raise NoSuchPackageException( + f"unable to locate installer for package {package_name} and version {version}" + ) + + return package_instances + + +P = ParamSpec("P") +T2 = TypeVar("T2") + + +def package( + name: str | None = None, + scope: str = "community", + should_load: Optional[Callable[[], bool]] = None, +) -> Callable[[Callable[[], Package[Any] | list[Package[Any]]]], PluginSpec]: + """ + Decorator for marking methods that create Package instances as a PackagePlugin. + Methods marked with this decorator are discoverable as a PluginSpec within the namespace "localstack.packages", + with the name ":". If api is not explicitly specified, then the parent module name is used as + service name. + """ + + def wrapper(fn: Callable[[], Package[Any] | list[Package[Any]]]) -> PluginSpec: + _name = name or getmodule(fn).__name__.split(".")[-2] # type: ignore[union-attr] + + @functools.wraps(fn) + def factory() -> PackagesPlugin: + return PackagesPlugin(name=_name, scope=scope, get_package=fn, should_load=should_load) + + return PluginSpec(PLUGIN_NAMESPACE, f"{_name}/{scope}", factory=factory) + + return wrapper + + +# TODO remove (only used for migrating to new #package decorator) +packages = package diff --git a/localstack-core/localstack/packages/core.py b/localstack-core/localstack/packages/core.py new file mode 100644 index 0000000000000..5b8996deaa844 --- /dev/null +++ b/localstack-core/localstack/packages/core.py @@ -0,0 +1,375 @@ +import logging +import os +import re +from abc import ABC +from functools import lru_cache +from sys import version_info +from typing import Any, Optional, Tuple + +import requests + +from localstack import config + +from ..constants import LOCALSTACK_VENV_FOLDER, MAVEN_REPO_URL +from ..utils.archives import download_and_extract +from ..utils.files import chmod_r, chown_r, mkdir, rm_rf +from ..utils.http import download +from ..utils.run import is_root, run +from ..utils.venv import VirtualEnvironment +from .api import InstallTarget, PackageException, PackageInstaller + +LOG = logging.getLogger(__name__) + + +class SystemNotSupportedException(PackageException): + """Exception indicating that the current system is not allowed.""" + + pass + + +class ExecutableInstaller(PackageInstaller, ABC): + """ + This installer simply adds a clean interface for accessing a downloaded executable directly + """ + + def get_executable_path(self) -> str | None: + """ + :return: the path to the downloaded binary or None if it's not yet downloaded / installed. + """ + install_dir = self.get_installed_dir() + if install_dir: + return self._get_install_marker_path(install_dir) + return None + + +class DownloadInstaller(ExecutableInstaller): + def __init__(self, name: str, version: str): + super().__init__(name, version) + + def _get_download_url(self) -> str: + raise NotImplementedError() + + def _get_install_marker_path(self, install_dir: str) -> str: + url = self._get_download_url() + binary_name = os.path.basename(url) + return os.path.join(install_dir, binary_name) + + def _install(self, target: InstallTarget) -> None: + target_directory = self._get_install_dir(target) + mkdir(target_directory) + download_url = self._get_download_url() + target_path = self._get_install_marker_path(target_directory) + download(download_url, target_path) + + +class ArchiveDownloadAndExtractInstaller(ExecutableInstaller): + def __init__(self, name: str, version: str, extract_single_directory: bool = False): + """ + :param name: technical package name, f.e. "opensearch" + :param version: version of the package to install + :param extract_single_directory: whether to extract files from single root folder in the archive + """ + super().__init__(name, version) + self.extract_single_directory = extract_single_directory + + def _get_install_marker_path(self, install_dir: str) -> str: + raise NotImplementedError() + + def _get_download_url(self) -> str: + raise NotImplementedError() + + def get_installed_dir(self) -> str | None: + installed_dir = super().get_installed_dir() + subdir = self._get_archive_subdir() + + # If the specific installer defines a subdirectory, we return the subdirectory. + # f.e. /var/lib/localstack/lib/amazon-mq/5.16.5/apache-activemq-5.16.5/ + if installed_dir and subdir: + return os.path.join(installed_dir, subdir) + + return installed_dir + + def _get_archive_subdir(self) -> str | None: + """ + :return: name of the subdirectory contained in the archive or none if the package content is at the root level + of the archive + """ + return None + + def get_executable_path(self) -> str | None: + subdir = self._get_archive_subdir() + if subdir is None: + return super().get_executable_path() + else: + install_dir = self.get_installed_dir() + if install_dir: + install_dir = install_dir[: -len(subdir)] + return self._get_install_marker_path(install_dir) + return None + + def _install(self, target: InstallTarget) -> None: + target_directory = self._get_install_dir(target) + mkdir(target_directory) + download_url = self._get_download_url() + archive_name = os.path.basename(download_url) + archive_path = os.path.join(config.dirs.tmp, archive_name) + download_and_extract( + download_url, + retries=3, + tmp_archive=archive_path, + target_dir=target_directory, + ) + rm_rf(archive_path) + if self.extract_single_directory: + dir_contents = os.listdir(target_directory) + if len(dir_contents) != 1: + return + target_subdir = os.path.join(target_directory, dir_contents[0]) + if not os.path.isdir(target_subdir): + return + os.rename(target_subdir, f"{target_directory}.backup") + rm_rf(target_directory) + os.rename(f"{target_directory}.backup", target_directory) + + +class PermissionDownloadInstaller(DownloadInstaller, ABC): + def _install(self, target: InstallTarget) -> None: + super()._install(target) + chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type] + + +class GitHubReleaseInstaller(PermissionDownloadInstaller): + """ + Installer which downloads an asset from a GitHub project's tag. + """ + + def __init__(self, name: str, tag: str, github_slug: str): + super().__init__(name, tag) + self.github_tag_url = ( + f"https://api.github.com/repos/{github_slug}/releases/tags/{self.version}" + ) + + @lru_cache() + def _get_download_url(self) -> str: + asset_name = self._get_github_asset_name() + # try to use a token when calling the GH API for increased API rate limits + headers = None + gh_token = os.environ.get("GITHUB_API_TOKEN") + if gh_token: + headers = {"authorization": f"Bearer {gh_token}"} + response = requests.get(self.github_tag_url, headers=headers) + if not response.ok: + raise PackageException( + f"Could not get list of releases from {self.github_tag_url}: {response.text}" + ) + github_release = response.json() + download_url = None + for asset in github_release.get("assets", []): + # find the correct binary in the release + if asset["name"] == asset_name: + download_url = asset["browser_download_url"] + break + if download_url is None: + raise PackageException( + f"Could not find required binary {asset_name} in release {self.github_tag_url}" + ) + return download_url + + def _get_install_marker_path(self, install_dir: str) -> str: + # Use the GitHub asset name instead of the download URL (since the download URL needs to be fetched online). + return os.path.join(install_dir, self._get_github_asset_name()) + + def _get_github_asset_name(self) -> str: + """ + Determines the name of the asset to download. + The asset name must be determinable without having any online data (because it is used in offline scenarios to + determine if the package is already installed). + + :return: name of the asset to download from the GitHub project's tag / version + """ + raise NotImplementedError() + + +class NodePackageInstaller(ExecutableInstaller): + """Package installer for Node / NPM packages.""" + + def __init__( + self, + package_name: str, + version: str, + package_spec: Optional[str] = None, + main_module: str = "main.js", + ): + """ + Initializes the Node / NPM package installer. + :param package_name: npm package name + :param version: version of the package which should be installed + :param package_spec: optional package spec for the installation. + If not set, the package name and version will be used for the installation. + :param main_module: main module file of the package + """ + super().__init__(package_name, version) + self.package_name = package_name + # If the package spec is not explicitly set (f.e. to a repo), we build it and pin the version + self.package_spec = package_spec or f"{self.package_name}@{version}" + self.main_module = main_module + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "node_modules", self.package_name, self.main_module) + + def _install(self, target: InstallTarget) -> None: + target_dir = self._get_install_dir(target) + + run( + [ + "npm", + "install", + "--prefix", + target_dir, + self.package_spec, + ] + ) + # npm 9+ does _not_ set the ownership of files anymore if run as root + # - https://github.blog/changelog/2022-10-24-npm-v9-0-0-released/ + # - https://github.com/npm/cli/pull/5704 + # - https://github.com/localstack/localstack/issues/7620 + if is_root(): + # if the package was installed as root, set the ownership manually + LOG.debug("Setting ownership root:root on %s", target_dir) + chown_r(target_dir, "root") + + +LOCALSTACK_VENV = VirtualEnvironment(LOCALSTACK_VENV_FOLDER) + + +class PythonPackageInstaller(PackageInstaller): + """ + Package installer which allows the runtime-installation of additional python packages used by certain services. + f.e. vosk as offline speech recognition toolkit (which is ~7MB in size compressed and ~26MB uncompressed). + """ + + normalized_name: str + """Normalized package name according to PEP440.""" + + def __init__(self, name: str, version: str, *args: Any, **kwargs: Any): + super().__init__(name, version, *args, **kwargs) + self.normalized_name = self._normalize_package_name(name) + + def _normalize_package_name(self, name: str) -> str: + """ + Normalized the Python package name according to PEP440. + https://packaging.python.org/en/latest/specifications/name-normalization/#name-normalization + """ + return re.sub(r"[-_.]+", "-", name).lower() + + def _get_install_dir(self, target: InstallTarget) -> str: + # all python installers share a venv + return os.path.join(target.value, "python-packages") + + def _get_install_marker_path(self, install_dir: str) -> str: + python_subdir = f"python{version_info[0]}.{version_info[1]}" + dist_info_dir = f"{self.normalized_name}-{self.version}.dist-info" + # the METADATA file is mandatory, use it as install marker + return os.path.join( + install_dir, "lib", python_subdir, "site-packages", dist_info_dir, "METADATA" + ) + + def _get_venv(self, target: InstallTarget) -> VirtualEnvironment: + venv_dir = self._get_install_dir(target) + return VirtualEnvironment(venv_dir) + + def _prepare_installation(self, target: InstallTarget) -> None: + # make sure the venv is properly set up before installing the package + venv = self._get_venv(target) + if not venv.exists: + LOG.info("creating virtual environment at %s", venv.venv_dir) + venv.create() + LOG.info("adding localstack venv path %s", venv.venv_dir) + venv.add_pth("localstack-venv", LOCALSTACK_VENV) + LOG.debug("injecting venv into path %s", venv.venv_dir) + venv.inject_to_sys_path() + + def _install(self, target: InstallTarget) -> None: + venv = self._get_venv(target) + python_bin = os.path.join(venv.venv_dir, "bin/python") + + # run pip via the python binary of the venv + run([python_bin, "-m", "pip", "install", f"{self.name}=={self.version}"], print_error=False) + + def _setup_existing_installation(self, target: InstallTarget) -> None: + """If the venv is already present, it just needs to be initialized once.""" + self._prepare_installation(target) + + +class MavenDownloadInstaller(DownloadInstaller): + """The packageURL is easy copy/pastable from the Maven central repository and the first package URL + defines the package name and version. + Example package_url: pkg:maven/software.amazon.event.ruler/event-ruler@1.7.3 + => name: event-ruler + => version: 1.7.3 + """ + + # Example: software.amazon.event.ruler + group_id: str + # Example: event-ruler + artifact_id: str + + # Custom installation directory + install_dir_suffix: str | None + + def __init__(self, package_url: str, install_dir_suffix: str | None = None): + self.group_id, self.artifact_id, version = parse_maven_package_url(package_url) + super().__init__(self.artifact_id, version) + self.install_dir_suffix = install_dir_suffix + + def _get_download_url(self) -> str: + group_id_path = self.group_id.replace(".", "/") + return f"{MAVEN_REPO_URL}/{group_id_path}/{self.artifact_id}/{self.version}/{self.artifact_id}-{self.version}.jar" + + def _get_install_dir(self, target: InstallTarget) -> str: + """Allow to overwrite the default installation directory. + This enables downloading transitive dependencies into the same directory. + """ + if self.install_dir_suffix: + return os.path.join(target.value, self.install_dir_suffix) + else: + return super()._get_install_dir(target) + + +class MavenPackageInstaller(MavenDownloadInstaller): + """Package installer for downloading Maven JARs, including optional dependencies. + The first Maven package is used as main LPM package and other dependencies are installed additionally. + Follows the Maven naming conventions: https://maven.apache.org/guides/mini/guide-naming-conventions.html + """ + + # Installers for Maven dependencies + dependencies: list[MavenDownloadInstaller] + + def __init__(self, *package_urls: str): + super().__init__(package_urls[0]) + self.dependencies = [] + + # Create installers for dependencies + for package_url in package_urls[1:]: + install_dir_suffix = os.path.join(self.name, self.version) + self.dependencies.append(MavenDownloadInstaller(package_url, install_dir_suffix)) + + def _install(self, target: InstallTarget) -> None: + # Install all dependencies first + for dependency in self.dependencies: + dependency._install(target) + # Install the main Maven package once all dependencies are installed. + # This main package indicates whether all dependencies are installed. + super()._install(target) + + +def parse_maven_package_url(package_url: str) -> Tuple[str, str, str]: + """Example: parse_maven_package_url("pkg:maven/software.amazon.event.ruler/event-ruler@1.7.3") + -> software.amazon.event.ruler, event-ruler, 1.7.3 + """ + parts = package_url.split("/") + group_id = parts[1] + sub_parts = parts[2].split("@") + artifact_id = sub_parts[0] + version = sub_parts[1] + return group_id, artifact_id, version diff --git a/localstack/packages/debugpy.py b/localstack-core/localstack/packages/debugpy.py similarity index 82% rename from localstack/packages/debugpy.py rename to localstack-core/localstack/packages/debugpy.py index e27b1b79d9938..2731236f747a1 100644 --- a/localstack/packages/debugpy.py +++ b/localstack-core/localstack/packages/debugpy.py @@ -4,14 +4,14 @@ from localstack.utils.run import run -class DebugPyPackage(Package): - def __init__(self): +class DebugPyPackage(Package["DebugPyPackageInstaller"]): + def __init__(self) -> None: super().__init__("DebugPy", "latest") def get_versions(self) -> List[str]: return ["latest"] - def _get_installer(self, version: str) -> PackageInstaller: + def _get_installer(self, version: str) -> "DebugPyPackageInstaller": return DebugPyPackageInstaller("debugpy", version) @@ -20,7 +20,7 @@ class DebugPyPackageInstaller(PackageInstaller): def is_installed(self) -> bool: try: - import debugpy + import debugpy # type: ignore[import-not-found] # noqa: T100 assert debugpy return True diff --git a/localstack-core/localstack/packages/ffmpeg.py b/localstack-core/localstack/packages/ffmpeg.py new file mode 100644 index 0000000000000..af9a18b544fb5 --- /dev/null +++ b/localstack-core/localstack/packages/ffmpeg.py @@ -0,0 +1,46 @@ +import os +from typing import List + +from localstack.packages import Package +from localstack.packages.core import ArchiveDownloadAndExtractInstaller +from localstack.utils.platform import Arch, get_arch + +# Mapping LocalStack architecture to BtbN's naming convention +ARCH_MAPPING = {Arch.amd64: "linux64", Arch.arm64: "linuxarm64"} + +# Download URL template for ffmpeg 7.1 LGPL builds from BtbN GitHub Releases +FFMPEG_STATIC_BIN_URL = "https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-n{version}-latest-{arch}-lgpl-{version}.tar.xz" + + +class FfmpegPackage(Package["FfmpegPackageInstaller"]): + def __init__(self) -> None: + super().__init__(name="ffmpeg", default_version="7.1") + + def _get_installer(self, version: str) -> "FfmpegPackageInstaller": + return FfmpegPackageInstaller(version) + + def get_versions(self) -> List[str]: + return ["7.1"] + + +class FfmpegPackageInstaller(ArchiveDownloadAndExtractInstaller): + def __init__(self, version: str): + super().__init__("ffmpeg", version) + + def _get_download_url(self) -> str: + return FFMPEG_STATIC_BIN_URL.format(arch=ARCH_MAPPING.get(get_arch()), version=self.version) + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, self._get_archive_subdir()) + + def _get_archive_subdir(self) -> str: + return f"ffmpeg-n{self.version}-latest-{ARCH_MAPPING.get(get_arch())}-lgpl-{self.version}" + + def get_ffmpeg_path(self) -> str: + return os.path.join(self.get_installed_dir(), "bin", "ffmpeg") # type: ignore[arg-type] + + def get_ffprobe_path(self) -> str: + return os.path.join(self.get_installed_dir(), "bin", "ffprobe") # type: ignore[arg-type] + + +ffmpeg_package = FfmpegPackage() diff --git a/localstack-core/localstack/packages/java.py b/localstack-core/localstack/packages/java.py new file mode 100644 index 0000000000000..c8a2e9f7c7f21 --- /dev/null +++ b/localstack-core/localstack/packages/java.py @@ -0,0 +1,205 @@ +import logging +import os +from typing import List + +import requests + +from localstack.constants import USER_AGENT_STRING +from localstack.packages import InstallTarget, Package +from localstack.packages.core import ArchiveDownloadAndExtractInstaller +from localstack.utils.files import rm_rf +from localstack.utils.platform import Arch, get_arch, is_linux, is_mac_os +from localstack.utils.run import run + +LOG = logging.getLogger(__name__) + +# Default version if not specified +DEFAULT_JAVA_VERSION = "11" + +# Supported Java LTS versions mapped with Eclipse Temurin build semvers +JAVA_VERSIONS = { + "8": "8u432-b06", + "11": "11.0.25+9", + "17": "17.0.13+11", + "21": "21.0.5+11", +} + + +class JavaInstallerMixin: + """ + Mixin class for packages that depend on Java. It introduces methods that install Java and help build environment. + """ + + def _prepare_installation(self, target: InstallTarget) -> None: + java_package.install(target=target) + + def get_java_home(self) -> str | None: + """ + Returns path to JRE installation. + """ + return java_package.get_installer().get_java_home() + + def get_java_lib_path(self) -> str | None: + """ + Returns the path to the Java shared library. + """ + if java_home := self.get_java_home(): + if is_mac_os(): + return os.path.join(java_home, "lib", "jli", "libjli.dylib") + return os.path.join(java_home, "lib", "server", "libjvm.so") + return None + + def get_java_env_vars( + self, path: str | None = None, ld_library_path: str | None = None + ) -> dict[str, str]: + """ + Returns environment variables pointing to the Java installation. This is useful to build the environment where + the application will run. + + :param path: If not specified, the value of PATH will be obtained from the environment + :param ld_library_path: If not specified, the value of LD_LIBRARY_PATH will be obtained from the environment + :return: dict consisting of two items: + - JAVA_HOME: path to JRE installation + - PATH: the env path variable updated with JRE bin path + """ + java_home = self.get_java_home() + java_bin = f"{java_home}/bin" + + path = path or os.environ["PATH"] + + library_path = ld_library_path or os.environ.get("LD_LIBRARY_PATH") + # null paths (e.g. `:/foo`) have a special meaning according to the manpages + if library_path is None: + full_library_path = f"{java_home}/lib:{java_home}/lib/server" + else: + full_library_path = f"{java_home}/lib:{java_home}/lib/server:{library_path}" + + return { + "JAVA_HOME": java_home, # type: ignore[dict-item] + "LD_LIBRARY_PATH": full_library_path, + "PATH": f"{java_bin}:{path}", + } + + +class JavaPackageInstaller(ArchiveDownloadAndExtractInstaller): + def __init__(self, version: str): + super().__init__("java", version, extract_single_directory=True) + + def _get_install_marker_path(self, install_dir: str) -> str: + if is_mac_os(): + return os.path.join(install_dir, "Contents", "Home", "bin", "java") + return os.path.join(install_dir, "bin", "java") + + def _get_download_url(self) -> str: + # Note: Eclipse Temurin does not provide Mac aarch64 Java 8 builds. + # See https://adoptium.net/en-GB/supported-platforms/ + try: + LOG.debug("Determining the latest Java build version") + return self._download_url_latest_release() + except Exception as exc: # noqa + LOG.debug( + "Unable to determine the latest Java build version. Using pinned versions: %s", exc + ) + return self._download_url_fallback() + + def _post_process(self, target: InstallTarget) -> None: + target_directory = self._get_install_dir(target) + minimal_jre_path = os.path.join(target.value, self.name, f"{self.version}.minimal") + rm_rf(minimal_jre_path) + + # If jlink is not available, use the environment as is + if not os.path.exists(os.path.join(target_directory, "bin", "jlink")): + LOG.warning("Skipping JRE optimisation because jlink is not available") + return + + # Build a custom JRE with only the necessary bits to minimise disk footprint + LOG.debug("Optimising JRE installation") + cmd = ( + "bin/jlink --add-modules " + # Required modules + "java.base,java.desktop,java.instrument,java.management," + "java.naming,java.scripting,java.sql,java.xml,jdk.compiler," + # jdk.unsupported contains sun.misc.Unsafe which is required by some dependencies + "jdk.unsupported," + # Additional cipher suites + "jdk.crypto.cryptoki," + # Archive support + "jdk.zipfs," + # Required by MQ broker + "jdk.httpserver,jdk.management,jdk.management.agent," + # Required by Spark and Hadoop + "java.security.jgss,jdk.security.auth," + # Include required locales + "jdk.localedata --include-locales en " + # Supplementary args + "--compress 2 --strip-debug --no-header-files --no-man-pages " + # Output directory + "--output " + minimal_jre_path + ) + run(cmd, cwd=target_directory) + + rm_rf(target_directory) + os.rename(minimal_jre_path, target_directory) + + def get_java_home(self) -> str | None: + """ + Get JAVA_HOME for this installation of Java. + """ + installed_dir = self.get_installed_dir() + if is_mac_os(): + return os.path.join(installed_dir, "Contents", "Home") # type: ignore[arg-type] + return installed_dir + + @property + def arch(self) -> str | None: + return ( + "x64" if get_arch() == Arch.amd64 else "aarch64" if get_arch() == Arch.arm64 else None + ) + + @property + def os_name(self) -> str | None: + return "linux" if is_linux() else "mac" if is_mac_os() else None + + def _download_url_latest_release(self) -> str: + """ + Return the download URL for latest stable JDK build. + """ + endpoint = ( + f"https://api.adoptium.net/v3/assets/latest/{self.version}/hotspot?" + f"os={self.os_name}&architecture={self.arch}&image_type=jdk" + ) + # Override user-agent because Adoptium API denies service to `requests` library + response = requests.get(endpoint, headers={"user-agent": USER_AGENT_STRING}).json() + return response[0]["binary"]["package"]["link"] + + def _download_url_fallback(self) -> str: + """ + Return the download URL for pinned JDK build. + """ + semver = JAVA_VERSIONS[self.version] + tag_slug = f"jdk-{semver}" + semver_safe = semver.replace("+", "_") + + # v8 uses a different tag and version scheme + if self.version == "8": + semver_safe = semver_safe.replace("-", "") + tag_slug = f"jdk{semver}" + + return ( + f"https://github.com/adoptium/temurin{self.version}-binaries/releases/download/{tag_slug}/" + f"OpenJDK{self.version}U-jdk_{self.arch}_{self.os_name}_hotspot_{semver_safe}.tar.gz" + ) + + +class JavaPackage(Package[JavaPackageInstaller]): + def __init__(self, default_version: str = DEFAULT_JAVA_VERSION): + super().__init__(name="Java", default_version=default_version) + + def get_versions(self) -> List[str]: + return list(JAVA_VERSIONS.keys()) + + def _get_installer(self, version: str) -> JavaPackageInstaller: + return JavaPackageInstaller(version) + + +java_package = JavaPackage() diff --git a/localstack-core/localstack/packages/plugins.py b/localstack-core/localstack/packages/plugins.py new file mode 100644 index 0000000000000..fdeba86a04204 --- /dev/null +++ b/localstack-core/localstack/packages/plugins.py @@ -0,0 +1,29 @@ +from typing import TYPE_CHECKING + +from localstack.packages.api import Package, package + +if TYPE_CHECKING: + from localstack.packages.ffmpeg import FfmpegPackageInstaller + from localstack.packages.java import JavaPackageInstaller + from localstack.packages.terraform import TerraformPackageInstaller + + +@package(name="terraform") +def terraform_package() -> Package["TerraformPackageInstaller"]: + from .terraform import terraform_package + + return terraform_package + + +@package(name="ffmpeg") +def ffmpeg_package() -> Package["FfmpegPackageInstaller"]: + from localstack.packages.ffmpeg import ffmpeg_package + + return ffmpeg_package + + +@package(name="java") +def java_package() -> Package["JavaPackageInstaller"]: + from localstack.packages.java import java_package + + return java_package diff --git a/localstack-core/localstack/packages/terraform.py b/localstack-core/localstack/packages/terraform.py new file mode 100644 index 0000000000000..6ee590f0387b5 --- /dev/null +++ b/localstack-core/localstack/packages/terraform.py @@ -0,0 +1,41 @@ +import os +import platform +from typing import List + +from localstack.packages import InstallTarget, Package +from localstack.packages.core import ArchiveDownloadAndExtractInstaller +from localstack.utils.files import chmod_r +from localstack.utils.platform import get_arch + +TERRAFORM_VERSION = os.getenv("TERRAFORM_VERSION", "1.5.7") +TERRAFORM_URL_TEMPLATE = ( + "https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os}_{arch}.zip" +) + + +class TerraformPackage(Package["TerraformPackageInstaller"]): + def __init__(self) -> None: + super().__init__("Terraform", TERRAFORM_VERSION) + + def get_versions(self) -> List[str]: + return [TERRAFORM_VERSION] + + def _get_installer(self, version: str) -> "TerraformPackageInstaller": + return TerraformPackageInstaller("terraform", version) + + +class TerraformPackageInstaller(ArchiveDownloadAndExtractInstaller): + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "terraform") + + def _get_download_url(self) -> str: + system = platform.system().lower() + arch = get_arch() + return TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch) + + def _install(self, target: InstallTarget) -> None: + super()._install(target) + chmod_r(self.get_executable_path(), 0o777) # type: ignore[arg-type] + + +terraform_package = TerraformPackage() diff --git a/localstack-core/localstack/plugins.py b/localstack-core/localstack/plugins.py new file mode 100644 index 0000000000000..a313032547bba --- /dev/null +++ b/localstack-core/localstack/plugins.py @@ -0,0 +1,76 @@ +import logging +import os +import sys +from pathlib import Path + +import yaml +from plux import Plugin + +from localstack import config +from localstack.runtime import hooks +from localstack.utils.files import rm_rf +from localstack.utils.ssl import get_cert_pem_file_path + +LOG = logging.getLogger(__name__) + + +@hooks.on_infra_start() +def deprecation_warnings() -> None: + LOG.debug("Checking for the usage of deprecated community features and configs...") + from localstack.deprecations import log_deprecation_warnings + + log_deprecation_warnings() + + +@hooks.on_infra_start(should_load=lambda: config.REMOVE_SSL_CERT) +def delete_cached_certificate(): + LOG.debug("Removing the cached local SSL certificate") + target_file = get_cert_pem_file_path() + rm_rf(target_file) + + +class OASPlugin(Plugin): + """ + This plugin allows to register an arbitrary number of OpenAPI specs, e.g., the spec for the public endpoints + of localstack.core. + The OpenAPIValidator handler uses (as opt-in) all the collected specs to validate the requests and the responses + to these public endpoints. + + An OAS plugin assumes the following directory layout. + + my_package + ├── sub_package + │ ├── __init__.py <-- spec file + │ ├── openapi.yaml + │ └── plugins.py <-- plugins + ├── plugins.py <-- plugins + └── openapi.yaml <-- spec file + + Each package can have its own OpenAPI yaml spec which is loaded by the correspondent plugin in plugins.py + You can simply create a plugin like the following: + + class MyPackageOASPlugin(OASPlugin): + name = "my_package" + + The only convention is that plugins.py and openapi.yaml have the same pathname. + """ + + namespace = "localstack.openapi.spec" + + def __init__(self) -> None: + # By convention a plugins.py is at the same level (i.e., same pathname) of the openapi.yaml file. + # importlib.resources would be a better approach but has issues with namespace packages in editable mode + _module = sys.modules[self.__module__] + self.spec_path = Path( + os.path.join(os.path.dirname(os.path.abspath(_module.__file__)), "openapi.yaml") + ) + assert self.spec_path.exists() + self.spec = {} + + def load(self): + with self.spec_path.open("r") as f: + self.spec = yaml.safe_load(f) + + +class CoreOASPlugin(OASPlugin): + name = "localstack" diff --git a/localstack/services/cloudformation/engine/__init__.py b/localstack-core/localstack/py.typed similarity index 100% rename from localstack/services/cloudformation/engine/__init__.py rename to localstack-core/localstack/py.typed diff --git a/localstack-core/localstack/runtime/__init__.py b/localstack-core/localstack/runtime/__init__.py new file mode 100644 index 0000000000000..99044a674080a --- /dev/null +++ b/localstack-core/localstack/runtime/__init__.py @@ -0,0 +1,5 @@ +from .current import get_current_runtime + +__all__ = [ + "get_current_runtime", +] diff --git a/localstack-core/localstack/runtime/analytics.py b/localstack-core/localstack/runtime/analytics.py new file mode 100644 index 0000000000000..2612ee8637bf9 --- /dev/null +++ b/localstack-core/localstack/runtime/analytics.py @@ -0,0 +1,136 @@ +import logging +import os + +from localstack import config +from localstack.runtime import hooks +from localstack.utils.analytics import log + +LOG = logging.getLogger(__name__) + +TRACKED_ENV_VAR = [ + "ALLOW_NONSTANDARD_REGIONS", + "BEDROCK_PREWARM", + "CLOUDFRONT_LAMBDA_EDGE", + "CONTAINER_RUNTIME", + "DEBUG", + "DEFAULT_REGION", # Not functional; deprecated in 0.12.7, removed in 3.0.0 + "DEFAULT_BEDROCK_MODEL", + "DISABLE_CORS_CHECK", + "DISABLE_CORS_HEADERS", + "DMS_SERVERLESS_DEPROVISIONING_DELAY", + "DMS_SERVERLESS_STATUS_CHANGE_WAITING_TIME", + "DNS_ADDRESS", + "DYNAMODB_ERROR_PROBABILITY", + "DYNAMODB_IN_MEMORY", + "DYNAMODB_REMOVE_EXPIRED_ITEMS", + "EAGER_SERVICE_LOADING", + "EC2_VM_MANAGER", + "ECS_TASK_EXECUTOR", + "EDGE_PORT", + "ENABLE_REPLICATOR", + "ENFORCE_IAM", + "ES_CUSTOM_BACKEND", # deprecated in 0.14.0, removed in 3.0.0 + "ES_MULTI_CLUSTER", # deprecated in 0.14.0, removed in 3.0.0 + "ES_ENDPOINT_STRATEGY", # deprecated in 0.14.0, removed in 3.0.0 + "EVENT_RULE_ENGINE", + "IAM_SOFT_MODE", + "KINESIS_PROVIDER", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "KINESIS_ERROR_PROBABILITY", + "KMS_PROVIDER", # defunct since 1.4.0 + "LAMBDA_DEBUG_MODE", + "LAMBDA_DOWNLOAD_AWS_LAYERS", + "LAMBDA_EXECUTOR", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_STAY_OPEN_MODE", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_REMOTE_DOCKER", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_CODE_EXTRACT_TIME", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_CONTAINER_REGISTRY", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_FALLBACK_URL", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_FORWARD_URL", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_XRAY_INIT", # Not functional; deprecated in 2.0.0, removed in 3.0.0 + "LAMBDA_PREBUILD_IMAGES", + "LAMBDA_RUNTIME_EXECUTOR", + "LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT", + "LEGACY_EDGE_PROXY", # Not functional; deprecated in 1.0.0, removed in 2.0.0 + "LS_LOG", + "MOCK_UNIMPLEMENTED", # Not functional; deprecated in 1.3.0, removed in 3.0.0 + "OPENSEARCH_ENDPOINT_STRATEGY", + "PERSISTENCE", + "PERSISTENCE_SINGLE_FILE", + "PERSIST_ALL", # defunct since 2.3.2 + "PORT_WEB_UI", + "RDS_MYSQL_DOCKER", + "REQUIRE_PRO", + "SERVICES", + "STRICT_SERVICE_LOADING", + "SKIP_INFRA_DOWNLOADS", + "SQS_ENDPOINT_STRATEGY", + "USE_SINGLE_REGION", # Not functional; deprecated in 0.12.7, removed in 3.0.0 + "USE_SSL", +] + +PRESENCE_ENV_VAR = [ + "DATA_DIR", + "EDGE_FORWARD_URL", # Not functional; deprecated in 1.4.0, removed in 3.0.0 + "GATEWAY_LISTEN", + "HOSTNAME", + "HOSTNAME_EXTERNAL", + "HOSTNAME_FROM_LAMBDA", + "HOST_TMP_FOLDER", # Not functional; deprecated in 1.0.0, removed in 2.0.0 + "INIT_SCRIPTS_PATH", # Not functional; deprecated in 1.1.0, removed in 2.0.0 + "LAMBDA_DEBUG_MODE_CONFIG_PATH", + "LEGACY_DIRECTORIES", # Not functional; deprecated in 1.1.0, removed in 2.0.0 + "LEGACY_INIT_DIR", # Not functional; deprecated in 1.1.0, removed in 2.0.0 + "LOCALSTACK_HOST", + "LOCALSTACK_HOSTNAME", + "OUTBOUND_HTTP_PROXY", + "OUTBOUND_HTTPS_PROXY", + "S3_DIR", + "SFN_MOCK_CONFIG", + "TMPDIR", +] + + +@hooks.on_infra_start() +def _publish_config_as_analytics_event(): + env_vars = list(TRACKED_ENV_VAR) + + for key, value in os.environ.items(): + if key.startswith("PROVIDER_OVERRIDE_"): + env_vars.append(key) + elif key.startswith("SYNCHRONOUS_") and key.endswith("_EVENTS"): + # these config variables have been removed with 3.0.0 + env_vars.append(key) + + env_vars = {key: os.getenv(key) for key in env_vars} + present_env_vars = {env_var: 1 for env_var in PRESENCE_ENV_VAR if os.getenv(env_var)} + + log.event("config", env_vars=env_vars, set_vars=present_env_vars) + + +class LocalstackContainerInfo: + def get_image_variant(self) -> str: + for f in os.listdir("/usr/lib/localstack"): + if f.startswith(".") and f.endswith("-version"): + return f[1:-8] + return "unknown" + + def has_docker_socket(self) -> bool: + return os.path.exists("/run/docker.sock") + + def to_dict(self): + return { + "variant": self.get_image_variant(), + "has_docker_socket": self.has_docker_socket(), + } + + +@hooks.on_infra_start() +def _publish_container_info(): + if not config.is_in_docker: + return + + try: + log.event("container_info", payload=LocalstackContainerInfo().to_dict()) + except Exception as e: + if config.DEBUG_ANALYTICS: + LOG.debug("error gathering container information: %s", e) diff --git a/localstack-core/localstack/runtime/components.py b/localstack-core/localstack/runtime/components.py new file mode 100644 index 0000000000000..db9662b2e030b --- /dev/null +++ b/localstack-core/localstack/runtime/components.py @@ -0,0 +1,56 @@ +""" +This package contains code to define and manage the core components that make up a ``LocalstackRuntime``. +These include: + - A ``Gateway`` + - A ``RuntimeServer`` as the main control loop + - A ``ServiceManager`` to manage service plugins (TODO: once the Service concept has been generalized) + - ... ? + +Components can then be accessed via ``get_current_runtime()``. +""" + +from functools import cached_property + +from plux import Plugin, PluginManager +from rolo.gateway import Gateway + +from .server.core import RuntimeServer, RuntimeServerPlugin + + +class Components(Plugin): + """ + A Plugin that allows a specific localstack runtime implementation (aws, snowflake, ...) to expose its + own component factory. + """ + + namespace = "localstack.runtime.components" + + @cached_property + def gateway(self) -> Gateway: + raise NotImplementedError + + @cached_property + def runtime_server(self) -> RuntimeServer: + raise NotImplementedError + + +class BaseComponents(Components): + """ + A component base, which includes a ``RuntimeServer`` created from the config variable, and a default + ServicePluginManager as ServiceManager. + """ + + @cached_property + def runtime_server(self) -> RuntimeServer: + from localstack import config + + # TODO: rename to RUNTIME_SERVER + server_type = config.GATEWAY_SERVER + + plugins = PluginManager(RuntimeServerPlugin.namespace) + + if not plugins.exists(server_type): + raise ValueError(f"Unknown gateway server type {server_type}") + + plugins.load(server_type) + return plugins.get_container(server_type).load_value diff --git a/localstack-core/localstack/runtime/current.py b/localstack-core/localstack/runtime/current.py new file mode 100644 index 0000000000000..fa033c58844fa --- /dev/null +++ b/localstack-core/localstack/runtime/current.py @@ -0,0 +1,40 @@ +"""This package gives access to the singleton ``LocalstackRuntime`` instance. This is the only global state +that should exist within localstack, which contains the singleton ``LocalstackRuntime`` which is currently +running.""" + +import threading +import typing + +if typing.TYPE_CHECKING: + # make sure we don't have any imports here at runtime, so it can be imported anywhere without conflicts + from .runtime import LocalstackRuntime + +_runtime: typing.Optional["LocalstackRuntime"] = None +"""The singleton LocalStack Runtime""" +_runtime_lock = threading.RLock() + + +def get_current_runtime() -> "LocalstackRuntime": + with _runtime_lock: + if not _runtime: + raise ValueError("LocalStack runtime has not yet been set") + return _runtime + + +def set_current_runtime(runtime: "LocalstackRuntime"): + with _runtime_lock: + global _runtime + _runtime = runtime + + +def initialize_runtime() -> "LocalstackRuntime": + from localstack.runtime import runtime + + with _runtime_lock: + try: + return get_current_runtime() + except ValueError: + pass + rt = runtime.create_from_environment() + set_current_runtime(rt) + return rt diff --git a/localstack-core/localstack/runtime/events.py b/localstack-core/localstack/runtime/events.py new file mode 100644 index 0000000000000..2382fab6a47a2 --- /dev/null +++ b/localstack-core/localstack/runtime/events.py @@ -0,0 +1,7 @@ +import threading + +# TODO: deprecate and replace access with ``get_current_runtime().starting``, ... +infra_starting = threading.Event() +infra_ready = threading.Event() +infra_stopping = threading.Event() +infra_stopped = threading.Event() diff --git a/localstack-core/localstack/runtime/exceptions.py b/localstack-core/localstack/runtime/exceptions.py new file mode 100644 index 0000000000000..b4a4f72e65066 --- /dev/null +++ b/localstack-core/localstack/runtime/exceptions.py @@ -0,0 +1,9 @@ +class LocalstackExit(Exception): + """ + This exception can be raised during the startup procedure to terminate localstack with an exit code and + a reason. + """ + + def __init__(self, reason: str = None, code: int = 0): + super().__init__(reason) + self.code = code diff --git a/localstack-core/localstack/runtime/hooks.py b/localstack-core/localstack/runtime/hooks.py new file mode 100644 index 0000000000000..05161679cf54e --- /dev/null +++ b/localstack-core/localstack/runtime/hooks.py @@ -0,0 +1,104 @@ +import functools + +from plux import PluginManager, plugin + +# plugin namespace constants +HOOKS_CONFIGURE_LOCALSTACK_CONTAINER = "localstack.hooks.configure_localstack_container" +HOOKS_ON_RUNTIME_CREATE = "localstack.hooks.on_runtime_create" +HOOKS_ON_INFRA_READY = "localstack.hooks.on_infra_ready" +HOOKS_ON_INFRA_START = "localstack.hooks.on_infra_start" +HOOKS_ON_PRO_INFRA_START = "localstack.hooks.on_pro_infra_start" +HOOKS_ON_INFRA_SHUTDOWN = "localstack.hooks.on_infra_shutdown" +HOOKS_PREPARE_HOST = "localstack.hooks.prepare_host" + + +def hook(namespace: str, priority: int = 0, **kwargs): + """ + Decorator for creating functional plugins that have a hook_priority attribute. Hooks with a higher priority value + will be executed earlier. + """ + + def wrapper(fn): + fn.hook_priority = priority + return plugin(namespace=namespace, **kwargs)(fn) + + return wrapper + + +def hook_spec(namespace: str): + """ + Creates a new hook decorator bound to a namespace. + + on_infra_start = hook_spec("localstack.hooks.on_infra_start") + + @on_infra_start() + def foo(): + pass + + # run all hooks in order + on_infra_start.run() + """ + fn = functools.partial(hook, namespace=namespace) + # attach hook manager and run method to decorator for convenience calls + fn.manager = HookManager(namespace) + fn.run = fn.manager.run_in_order + return fn + + +class HookManager(PluginManager): + def load_all_sorted(self, propagate_exceptions=False): + """ + Loads all hook plugins and sorts them by their hook_priority attribute. + """ + plugins = self.load_all(propagate_exceptions) + # the hook_priority attribute is part of the function wrapped in the FunctionPlugin + plugins.sort( + key=lambda _fn_plugin: getattr(_fn_plugin.fn, "hook_priority", 0), reverse=True + ) + return plugins + + def run_in_order(self, *args, **kwargs): + """ + Loads and runs all plugins in order them with the given arguments. + """ + for fn_plugin in self.load_all_sorted(): + fn_plugin(*args, **kwargs) + + def __str__(self): + return "HookManager(%s)" % self.namespace + + def __repr__(self): + return self.__str__() + + +configure_localstack_container = hook_spec(HOOKS_CONFIGURE_LOCALSTACK_CONTAINER) +"""Hooks to configure the LocalStack container before it starts. Executed on the host when invoking the CLI.""" + +prepare_host = hook_spec(HOOKS_PREPARE_HOST) +"""Hooks to prepare the host that's starting LocalStack. Executed on the host when invoking the CLI.""" + +on_infra_start = hook_spec(HOOKS_ON_INFRA_START) +"""Hooks that are executed right before starting the LocalStack infrastructure.""" + +on_runtime_create = hook_spec(HOOKS_ON_RUNTIME_CREATE) +"""Hooks that are executed right before the LocalstackRuntime is created. These can be used to apply +patches or otherwise configure the interpreter before any other code is imported.""" + +on_runtime_start = on_infra_start +"""Alias for on_infra_start. TODO: switch and deprecated `infra` naming.""" + +on_pro_infra_start = hook_spec(HOOKS_ON_PRO_INFRA_START) +"""Hooks that are executed after on_infra_start hooks, and only if LocalStack pro has been activated.""" + +on_infra_ready = hook_spec(HOOKS_ON_INFRA_READY) +"""Hooks that are execute after all startup hooks have been executed, and the LocalStack infrastructure has become +available.""" + +on_runtime_ready = on_infra_ready +"""Alias for on_infra_ready. TODO: switch and deprecated `infra` naming.""" + +on_infra_shutdown = hook_spec(HOOKS_ON_INFRA_SHUTDOWN) +"""Hooks that are execute when localstack shuts down.""" + +on_runtime_shutdown = on_infra_shutdown +"""Alias for on_infra_shutdown. TODO: switch and deprecated `infra` naming.""" diff --git a/localstack-core/localstack/runtime/init.py b/localstack-core/localstack/runtime/init.py new file mode 100644 index 0000000000000..e9b2f97dccf9e --- /dev/null +++ b/localstack-core/localstack/runtime/init.py @@ -0,0 +1,283 @@ +"""Module for initialization hooks https://docs.localstack.cloud/references/init-hooks/""" + +import dataclasses +import logging +import os.path +import subprocess +import time +from enum import Enum +from functools import cached_property +from typing import Dict, List, Optional + +from plux import Plugin, PluginManager + +from localstack.runtime import hooks +from localstack.utils.objects import singleton_factory + +LOG = logging.getLogger(__name__) + + +class State(Enum): + UNKNOWN = "UNKNOWN" + RUNNING = "RUNNING" + SUCCESSFUL = "SUCCESSFUL" + ERROR = "ERROR" + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +class Stage(Enum): + BOOT = 0 + START = 1 + READY = 2 + SHUTDOWN = 3 + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +@dataclasses.dataclass +class Script: + path: str + stage: Stage + state: State = State.UNKNOWN + + +class ScriptRunner(Plugin): + """ + Interface for running scripts. + """ + + namespace = "localstack.init.runner" + suffixes = [] + + def run(self, path: str) -> None: + """ + Run the given script with the appropriate runtime. + + :param path: the path to the script + """ + raise NotImplementedError + + def should_run(self, script_file: str) -> bool: + """ + Checks whether the given file should be run with this script runner. In case multiple runners + evaluate this condition to true on the same file (ideally this doesn't happen), the first one + loaded will be used, which is potentially indeterministic. + + :param script_file: the script file to run + :return: True if this runner should be used, False otherwise + """ + for suffix in self.suffixes: + if script_file.endswith(suffix): + return True + return False + + +class ShellScriptRunner(ScriptRunner): + """ + Runner that interprets scripts as shell scripts and calls them directly. + """ + + name = "sh" + suffixes = [".sh"] + + def run(self, path: str) -> None: + exit_code = subprocess.call(args=[], executable=path) + if exit_code != 0: + raise OSError("Script %s returned a non-zero exit code %s" % (path, exit_code)) + + +class PythonScriptRunner(ScriptRunner): + """ + Runner that uses ``exec`` to run a python script. + """ + + name = "py" + suffixes = [".py"] + + def run(self, path: str) -> None: + with open(path, "rb") as fd: + exec(fd.read(), {}) + + +class InitScriptManager: + _stage_directories: Dict[Stage, str] = { + Stage.BOOT: "boot.d", + Stage.START: "start.d", + Stage.READY: "ready.d", + Stage.SHUTDOWN: "shutdown.d", + } + + script_root: str + stage_completed: Dict[Stage, bool] + + def __init__(self, script_root: str): + self.script_root = script_root + self.stage_completed = dict.fromkeys(Stage, False) + self.runner_manager: PluginManager[ScriptRunner] = PluginManager(ScriptRunner.namespace) + + @cached_property + def scripts(self) -> Dict[Stage, List[Script]]: + return self._find_scripts() + + def get_script_runner(self, script_file: str) -> Optional[ScriptRunner]: + runners = self.runner_manager.load_all() + for runner in runners: + if runner.should_run(script_file): + return runner + return None + + def has_script_runner(self, script_file: str) -> bool: + return self.get_script_runner(script_file) is not None + + def run_stage(self, stage: Stage) -> List[Script]: + """ + Runs all scripts in the given stage. + + :param stage: the stage to run + :return: the scripts that were in the stage + """ + scripts = self.scripts.get(stage, []) + + if self.stage_completed[stage]: + LOG.debug("Stage %s already completed, skipping", stage) + return scripts + + try: + for script in scripts: + LOG.debug("Running %s script %s", script.stage, script.path) + + env_original = os.environ.copy() + + try: + script.state = State.RUNNING + runner = self.get_script_runner(script.path) + runner.run(script.path) + except Exception as e: + script.state = State.ERROR + if LOG.isEnabledFor(logging.DEBUG): + LOG.exception("Error while running script %s", script) + else: + LOG.error("Error while running script %s: %s", script, e) + else: + script.state = State.SUCCESSFUL + finally: + # Discard env variables overridden in startup script that may cause side-effects + for env_var in ( + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "AWS_DEFAULT_REGION", + "AWS_PROFILE", + "AWS_REGION", + ): + if env_var in env_original: + os.environ[env_var] = env_original[env_var] + else: + os.environ.pop(env_var, None) + finally: + self.stage_completed[stage] = True + + return scripts + + def _find_scripts(self) -> Dict[Stage, List[Script]]: + scripts = {} + + if self.script_root is None: + LOG.debug("Unable to discover init scripts as script_root is None") + return {} + + for stage in Stage: + scripts[stage] = [] + + stage_dir = self._stage_directories[stage] + if not stage_dir: + continue + + stage_path = os.path.join(self.script_root, stage_dir) + if not os.path.isdir(stage_path): + continue + + for root, dirs, files in os.walk(stage_path, topdown=True): + # from the docs: "When topdown is true, the caller can modify the dirnames list in-place" + dirs.sort() + files.sort() + for file in files: + script_path = os.path.abspath(os.path.join(root, file)) + if not os.path.isfile(script_path): + continue + + # only add the script if there's a runner for it + if not self.has_script_runner(script_path): + LOG.debug("No runner available for script %s", script_path) + continue + + scripts[stage].append(Script(path=script_path, stage=stage)) + LOG.debug("Init scripts discovered: %s", scripts) + + return scripts + + +# runtime integration + + +@singleton_factory +def init_script_manager() -> InitScriptManager: + from localstack import config + + return InitScriptManager(script_root=config.dirs.init) + + +@hooks.on_infra_start() +def _run_init_scripts_on_start(): + # this is a hack since we currently cannot know whether boot scripts have been executed or not + init_script_manager().stage_completed[Stage.BOOT] = True + _run_and_log(Stage.START) + + +@hooks.on_infra_ready() +def _run_init_scripts_on_ready(): + _run_and_log(Stage.READY) + + +@hooks.on_infra_shutdown() +def _run_init_scripts_on_shutdown(): + _run_and_log(Stage.SHUTDOWN) + + +def _run_and_log(stage: Stage): + from localstack.utils.analytics import log + + then = time.time() + scripts = init_script_manager().run_stage(stage) + took = (time.time() - then) * 1000 + + if scripts: + log.event("run_init", {"stage": stage.name, "scripts": len(scripts), "duration": took}) + + +def main(): + """ + Run the init scripts for a particular stage. For example, to run all boot scripts run:: + + python -m localstack.runtime.init BOOT + + The __main__ entrypoint is currently mainly used for the docker-entrypoint.sh. Other stages + are executed from runtime hooks. + """ + import sys + + stage = Stage[sys.argv[1]] + init_script_manager().run_stage(stage) + + +if __name__ == "__main__": + main() diff --git a/localstack-core/localstack/runtime/legacy.py b/localstack-core/localstack/runtime/legacy.py new file mode 100644 index 0000000000000..2a2f54c562929 --- /dev/null +++ b/localstack-core/localstack/runtime/legacy.py @@ -0,0 +1,17 @@ +"""Adapter code for the legacy runtime to make sure the new runtime is compatible with the old one, +and at the same time doesn't need ``localstack.services.infra``, which imports AWS-specific modules.""" + +import logging +import os +import signal + +LOG = logging.getLogger(__name__) + + +def signal_supervisor_restart(): + # TODO: we should think about moving the localstack-supervisor into a script in the runtime, + # and make `signal_supervisor_restart` part of the supervisor code. + if pid := os.environ.get("SUPERVISOR_PID"): + os.kill(int(pid), signal.SIGUSR1) + else: + LOG.warning("could not signal supervisor to restart localstack") diff --git a/localstack-core/localstack/runtime/main.py b/localstack-core/localstack/runtime/main.py new file mode 100644 index 0000000000000..3a0357e230ad0 --- /dev/null +++ b/localstack-core/localstack/runtime/main.py @@ -0,0 +1,93 @@ +"""This is the entrypoint used to start the localstack runtime. It starts the infrastructure and also +manages the interaction with the operating system - mostly signal handlers for now.""" + +import signal +import sys +import traceback + +from localstack import config, constants +from localstack.runtime.exceptions import LocalstackExit + + +def print_runtime_information(in_docker: bool = False): + # FIXME: this is legacy code from the old CLI, reconcile with new CLI and runtime output + from localstack.utils.container_networking import get_main_container_name + from localstack.utils.container_utils.container_client import ContainerException + from localstack.utils.docker_utils import DOCKER_CLIENT + + print() + print(f"LocalStack version: {constants.VERSION}") + if in_docker: + try: + container_name = get_main_container_name() + print("LocalStack Docker container name: %s" % container_name) + inspect_result = DOCKER_CLIENT.inspect_container(container_name) + container_id = inspect_result["Id"] + print("LocalStack Docker container id: %s" % container_id[:12]) + image_details = DOCKER_CLIENT.inspect_image(inspect_result["Image"]) + digests = image_details.get("RepoDigests") or ["Unavailable"] + print("LocalStack Docker image sha: %s" % digests[0]) + except ContainerException: + print( + "LocalStack Docker container info: Failed to inspect the LocalStack docker container. " + "This is likely because the docker socket was not mounted into the container. " + "Without access to the docker socket, LocalStack will not function properly. Please " + "consult the LocalStack documentation on how to correctly start up LocalStack. ", + end="", + ) + if config.DEBUG: + print("Docker debug information:") + traceback.print_exc() + else: + print( + "You can run LocalStack with `DEBUG=1` to get more information about the error." + ) + + if config.LOCALSTACK_BUILD_DATE: + print("LocalStack build date: %s" % config.LOCALSTACK_BUILD_DATE) + + if config.LOCALSTACK_BUILD_GIT_HASH: + print("LocalStack build git hash: %s" % config.LOCALSTACK_BUILD_GIT_HASH) + + print() + + +def main(): + from localstack.logging.setup import setup_logging_from_config + from localstack.runtime import current + + try: + setup_logging_from_config() + runtime = current.initialize_runtime() + except Exception as e: + sys.stdout.write(f"ERROR: The LocalStack Runtime could not be initialized: {e}\n") + sys.stdout.flush() + raise + + # TODO: where should this go? + print_runtime_information() + + # signal handler to make sure SIGTERM properly shuts down localstack + def _terminate_localstack(sig: int, frame): + sys.stdout.write(f"Localstack runtime received signal {sig}\n") + sys.stdout.flush() + runtime.exit(0) + + signal.signal(signal.SIGINT, _terminate_localstack) + signal.signal(signal.SIGTERM, _terminate_localstack) + + try: + runtime.run() + except LocalstackExit as e: + sys.stdout.write(f"Localstack returning with exit code {e.code}. Reason: {e}") + sys.exit(e.code) + except Exception as e: + sys.stdout.write(f"ERROR: the LocalStack runtime exited unexpectedly: {e}\n") + sys.stdout.flush() + raise + + sys.exit(runtime.exit_code) + + +if __name__ == "__main__": + main() diff --git a/localstack-core/localstack/runtime/patches.py b/localstack-core/localstack/runtime/patches.py new file mode 100644 index 0000000000000..4772a480bfee1 --- /dev/null +++ b/localstack-core/localstack/runtime/patches.py @@ -0,0 +1,70 @@ +""" +System-wide patches that should be applied. +""" + +from localstack.runtime import hooks +from localstack.utils.patch import patch + + +def patch_thread_pool(): + """ + This patch to ThreadPoolExecutor makes the executor remove the threads it creates from the global + ``_thread_queues`` of ``concurrent.futures.thread``, which joins all created threads at python exit and + will block interpreter shutdown if any threads are still running, even if they are daemon threads. + """ + + import concurrent.futures.thread + + @patch(concurrent.futures.thread.ThreadPoolExecutor._adjust_thread_count) + def _adjust_thread_count(fn, self) -> None: + fn(self) + + for t in self._threads: + if not t.daemon: + continue + try: + del concurrent.futures.thread._threads_queues[t] + except KeyError: + pass + + +def patch_urllib3_connection_pool(**constructor_kwargs): + """ + Override the default parameters of HTTPConnectionPool, e.g., set the pool size via maxsize=16 + """ + try: + from urllib3 import connectionpool, poolmanager + + class MyHTTPSConnectionPool(connectionpool.HTTPSConnectionPool): + def __init__(self, *args, **kwargs): + kwargs.update(constructor_kwargs) + super(MyHTTPSConnectionPool, self).__init__(*args, **kwargs) + + poolmanager.pool_classes_by_scheme["https"] = MyHTTPSConnectionPool + + class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool): + def __init__(self, *args, **kwargs): + kwargs.update(constructor_kwargs) + super(MyHTTPConnectionPool, self).__init__(*args, **kwargs) + + poolmanager.pool_classes_by_scheme["http"] = MyHTTPConnectionPool + except Exception: + pass + + +_applied = False + + +@hooks.on_runtime_start(priority=100) # apply patches earlier than other hooks +def apply_runtime_patches(): + # FIXME: find a better way to apply system-wide patches + global _applied + if _applied: + return + _applied = True + + from localstack.http.duplex_socket import enable_duplex_socket + + patch_urllib3_connection_pool(maxsize=128) + patch_thread_pool() + enable_duplex_socket() diff --git a/localstack-core/localstack/runtime/runtime.py b/localstack-core/localstack/runtime/runtime.py new file mode 100644 index 0000000000000..1e5d4e6ab5b21 --- /dev/null +++ b/localstack-core/localstack/runtime/runtime.py @@ -0,0 +1,203 @@ +import logging +import os +import threading + +from plux import PluginManager + +from localstack import config, constants +from localstack.runtime import events, hooks +from localstack.utils import files, functions, net, sync, threads + +from .components import Components + +LOG = logging.getLogger(__name__) + + +class LocalstackRuntime: + """ + The localstack runtime. It has the following responsibilities: + + - Manage localstack filesystem directories + - Execute runtime lifecycle hook plugins from ``localstack.runtime.hooks``. + - Manage the localstack SSL certificate + - Serve the gateway (It uses a ``RuntimeServer`` to serve a ``Gateway`` instance coming from the + ``Components`` factory.) + """ + + def __init__(self, components: Components): + self.components = components + + # at some point, far far in the future, we should no longer access a global config object, but rather + # the one from the current runtime. This will allow us to truly instantiate multiple localstack + # runtime instances in one process, which can be useful for many different things. but there is too + # much global state at the moment think about this seriously. however, this assignment here can + # serve as a reminder to avoid global state in general. + self.config = config + + # TODO: move away from `localstack.runtime.events` and instantiate new `threading.Event()` here + # instead + self.starting = events.infra_starting + self.ready = events.infra_ready + self.stopping = events.infra_stopping + self.stopped = events.infra_stopped + self.exit_code = 0 + self._lifecycle_lock = threading.RLock() + + def run(self): + """ + Start the main control loop of the runtime and block the thread. This will initialize the + filesystem, run all lifecycle hooks, initialize the gateway server, and then serve the + ``RuntimeServer`` until ``shutdown()`` is called. + """ + # indicates to the environment that this is an "infra process" (old terminology referring to the + # localstack runtime). this is necessary for disabling certain hooks that may run in the context of + # the CLI host mode. TODO: should not be needed over time. + os.environ[constants.LOCALSTACK_INFRA_PROCESS] = "1" + + self._init_filesystem() + self._on_starting() + self._init_gateway_server() + + # since we are blocking the main thread with the runtime server, we need to run the monitor that + # prints the ready marker asynchronously. this is different from how the runtime was started in the + # past, where the server was running in a thread. + # TODO: ideally we pass down a `shutdown` event that can be waited on so we can cancel the thread + # if the runtime shuts down beforehand + threading.Thread(target=self._run_ready_monitor, daemon=True).start() + + # run the main control loop of the server and block execution + try: + self.components.runtime_server.run() + finally: + self._on_return() + + def exit(self, code: int = 0): + """ + Sets the exit code and runs ``shutdown``. It does not actually call ``sys.exit``, this is for the + caller to do. + + :param code: the exit code to be set + """ + self.exit_code = code + # we don't know yet why, but shutdown does not work on the main thread + threading.Thread(target=self.shutdown, name="Runtime-Shutdown").start() + + def shutdown(self): + """ + Initiates an orderly shutdown of the runtime by stopping the main control loop of the + ``RuntimeServer``. The shutdown hooks are actually called by the main control loop (in the main + thread) after it returns. + """ + with self._lifecycle_lock: + if self.stopping.is_set(): + return + self.stopping.set() + + LOG.debug("[shutdown] Running shutdown hooks ...") + functions.call_safe( + hooks.on_runtime_shutdown.run, + exception_message="[shutdown] error calling shutdown hook", + ) + LOG.debug("[shutdown] Shutting down runtime server ...") + self.components.runtime_server.shutdown() + + def is_ready(self) -> bool: + return self.ready.is_set() + + def _init_filesystem(self): + self._clear_tmp_directory() + self.config.dirs.mkdirs() + + def _init_gateway_server(self): + from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available + + install_predefined_cert_if_available() + serial_number = self.config.GATEWAY_LISTEN[0].port + _, cert_file_name, key_file_name = create_ssl_cert(serial_number=serial_number) + ssl_creds = (cert_file_name, key_file_name) + + self.components.runtime_server.register( + self.components.gateway, self.config.GATEWAY_LISTEN, ssl_creds + ) + + def _on_starting(self): + self.starting.set() + hooks.on_runtime_start.run() + + def _on_ready(self): + hooks.on_runtime_ready.run() + print(constants.READY_MARKER_OUTPUT, flush=True) + self.ready.set() + + def _on_return(self): + LOG.debug("[shutdown] Cleaning up resources ...") + self._cleanup_resources() + self.stopped.set() + LOG.debug("[shutdown] Completed, bye!") + + def _run_ready_monitor(self): + self._wait_for_gateway() + self._on_ready() + + def _wait_for_gateway(self): + host_and_port = self.config.GATEWAY_LISTEN[0] + + if not sync.poll_condition( + lambda: net.is_port_open(host_and_port.port), timeout=15, interval=0.3 + ): + if LOG.isEnabledFor(logging.DEBUG): + # make another call with quiet=False to print detailed error logs + net.is_port_open(host_and_port.port, quiet=False) + raise TimeoutError(f"gave up waiting for gateway server to start on {host_and_port}") + + def _clear_tmp_directory(self): + if self.config.CLEAR_TMP_FOLDER: + # try to clear temp dir on startup + try: + files.rm_rf(self.config.dirs.tmp) + except PermissionError as e: + LOG.error( + "unable to delete temp folder %s: %s, please delete manually or you will " + "keep seeing these errors.", + self.config.dirs.tmp, + e, + ) + + def _cleanup_resources(self): + threads.cleanup_threads_and_processes() + self._clear_tmp_directory() + + +def create_from_environment() -> LocalstackRuntime: + """ + Creates a new runtime instance from the current environment. It uses a plugin manager to resolve the + necessary components from the ``localstack.runtime.components`` plugin namespace to start the runtime. + + :return: a new LocalstackRuntime instance + """ + hooks.on_runtime_create.run() + + plugin_manager = PluginManager(Components.namespace) + if config.RUNTIME_COMPONENTS: + try: + component = plugin_manager.load(config.RUNTIME_COMPONENTS) + return LocalstackRuntime(component) + except Exception as e: + raise ValueError( + f"Could not load runtime components from config RUNTIME_COMPONENTS={config.RUNTIME_COMPONENTS}: {e}." + ) from e + components = plugin_manager.load_all() + + if not components: + raise ValueError( + f"No component plugins found in namespace {Components.namespace}. Are entry points created " + f"correctly?" + ) + + if len(components) > 1: + LOG.warning( + "There are more than one component plugins, using the first one which is %s", + components[0].name, + ) + + return LocalstackRuntime(components[0]) diff --git a/localstack-core/localstack/runtime/server/__init__.py b/localstack-core/localstack/runtime/server/__init__.py new file mode 100644 index 0000000000000..808f22795246a --- /dev/null +++ b/localstack-core/localstack/runtime/server/__init__.py @@ -0,0 +1,5 @@ +from localstack.runtime.server.core import RuntimeServer + +__all__ = [ + "RuntimeServer", +] diff --git a/localstack-core/localstack/runtime/server/core.py b/localstack-core/localstack/runtime/server/core.py new file mode 100644 index 0000000000000..137f276f3d496 --- /dev/null +++ b/localstack-core/localstack/runtime/server/core.py @@ -0,0 +1,51 @@ +from plux import Plugin +from rolo.gateway import Gateway + +from localstack import config + + +class RuntimeServer: + """ + The main network IO loop of LocalStack. This could be twisted, hypercorn, or any other server + implementation. + """ + + def register( + self, + gateway: Gateway, + listen: list[config.HostAndPort], + ssl_creds: tuple[str, str] | None = None, + ): + """ + Registers the Gateway and the port configuration into the server. Some servers like ``twisted`` or + ``hypercorn`` support multiple calls to ``register``, allowing you to serve several Gateways + through a single event loop. + + :param gateway: the gateway to serve + :param listen: the host and port configuration + :param ssl_creds: ssl credentials (certificate file path, key file path) + """ + raise NotImplementedError + + def run(self): + """ + Run the server and block the thread. + """ + raise NotImplementedError + + def shutdown(self): + """ + Shutdown the running server. + """ + raise NotImplementedError + + +class RuntimeServerPlugin(Plugin): + """ + Plugin that serves as a factory for specific ```RuntimeServer`` implementations. + """ + + namespace = "localstack.runtime.server" + + def load(self, *args, **kwargs) -> RuntimeServer: + raise NotImplementedError diff --git a/localstack-core/localstack/runtime/server/hypercorn.py b/localstack-core/localstack/runtime/server/hypercorn.py new file mode 100644 index 0000000000000..ce15ea3d043e0 --- /dev/null +++ b/localstack-core/localstack/runtime/server/hypercorn.py @@ -0,0 +1,68 @@ +import asyncio +import threading + +from hypercorn import Config +from hypercorn.asyncio import serve +from rolo.gateway import Gateway +from rolo.gateway.asgi import AsgiGateway + +from localstack import config +from localstack.logging.setup import setup_hypercorn_logger + +from .core import RuntimeServer + + +class HypercornRuntimeServer(RuntimeServer): + def __init__(self): + self.loop = asyncio.get_event_loop() + + self._close = asyncio.Event() + self._closed = threading.Event() + + self._futures = [] + + def register( + self, + gateway: Gateway, + listen: list[config.HostAndPort], + ssl_creds: tuple[str, str] | None = None, + ): + hypercorn_config = Config() + hypercorn_config.h11_pass_raw_headers = True + hypercorn_config.bind = [str(host_and_port) for host_and_port in listen] + # hypercorn_config.use_reloader = use_reloader + + setup_hypercorn_logger(hypercorn_config) + + if ssl_creds: + cert_file_name, key_file_name = ssl_creds + hypercorn_config.certfile = cert_file_name + hypercorn_config.keyfile = key_file_name + + app = AsgiGateway(gateway, event_loop=self.loop) + + future = asyncio.run_coroutine_threadsafe( + serve(app, hypercorn_config, shutdown_trigger=self._shutdown_trigger), + self.loop, + ) + self._futures.append(future) + + def run(self): + self.loop.run_forever() + + def shutdown(self): + self._close.set() + asyncio.run_coroutine_threadsafe(self._set_closed(), self.loop) + # TODO: correctly wait for all hypercorn serve coroutines to finish + asyncio.run_coroutine_threadsafe(self.loop.shutdown_asyncgens(), self.loop) + self.loop.shutdown_default_executor() + self.loop.stop() + + async def _wait_server_stopped(self): + self._closed.set() + + async def _set_closed(self): + self._close.set() + + async def _shutdown_trigger(self): + await self._close.wait() diff --git a/localstack-core/localstack/runtime/server/plugins.py b/localstack-core/localstack/runtime/server/plugins.py new file mode 100644 index 0000000000000..95746e110375d --- /dev/null +++ b/localstack-core/localstack/runtime/server/plugins.py @@ -0,0 +1,19 @@ +from localstack.runtime.server.core import RuntimeServer, RuntimeServerPlugin + + +class TwistedRuntimeServerPlugin(RuntimeServerPlugin): + name = "twisted" + + def load(self, *args, **kwargs) -> RuntimeServer: + from .twisted import TwistedRuntimeServer + + return TwistedRuntimeServer() + + +class HypercornRuntimeServerPlugin(RuntimeServerPlugin): + name = "hypercorn" + + def load(self, *args, **kwargs) -> RuntimeServer: + from .hypercorn import HypercornRuntimeServer + + return HypercornRuntimeServer() diff --git a/localstack-core/localstack/runtime/server/twisted.py b/localstack-core/localstack/runtime/server/twisted.py new file mode 100644 index 0000000000000..eba02ae16422c --- /dev/null +++ b/localstack-core/localstack/runtime/server/twisted.py @@ -0,0 +1,57 @@ +from rolo.gateway import Gateway +from rolo.serving.twisted import TwistedGateway +from twisted.internet import endpoints, reactor, ssl + +from localstack import config +from localstack.aws.serving.twisted import TLSMultiplexerFactory, stop_thread_pool +from localstack.utils import patch + +from .core import RuntimeServer + + +class TwistedRuntimeServer(RuntimeServer): + def __init__(self): + self.thread_pool = None + + def register( + self, + gateway: Gateway, + listen: list[config.HostAndPort], + ssl_creds: tuple[str, str] | None = None, + ): + # setup twisted webserver Site + site = TwistedGateway(gateway) + + # configure ssl + if ssl_creds: + cert_file_name, key_file_name = ssl_creds + context_factory = ssl.DefaultOpenSSLContextFactory(key_file_name, cert_file_name) + context_factory.getContext().use_certificate_chain_file(cert_file_name) + protocol_factory = TLSMultiplexerFactory(context_factory, False, site) + else: + protocol_factory = site + + # add endpoint for each host/port combination + for host_and_port in listen: + if config.is_ipv6_address(host_and_port.host): + endpoint = endpoints.TCP6ServerEndpoint( + reactor, host_and_port.port, interface=host_and_port.host + ) + else: + # TODO: interface = host? + endpoint = endpoints.TCP4ServerEndpoint(reactor, host_and_port.port) + endpoint.listen(protocol_factory) + + def run(self): + reactor.suggestThreadPoolSize(config.GATEWAY_WORKER_COUNT) + self.thread_pool = reactor.getThreadPool() + patch.patch(self.thread_pool.stop)(stop_thread_pool) + + # we don't need signal handlers, since all they do is call ``reactor`` stop, which we expect the + # caller to do via ``shutdown``. + return reactor.run(installSignalHandlers=False) + + def shutdown(self): + if self.thread_pool: + self.thread_pool.stop(timeout=10) + reactor.stop() diff --git a/localstack-core/localstack/runtime/shutdown.py b/localstack-core/localstack/runtime/shutdown.py new file mode 100644 index 0000000000000..a64dab86ef930 --- /dev/null +++ b/localstack-core/localstack/runtime/shutdown.py @@ -0,0 +1,73 @@ +import logging +from typing import Any, Callable + +from localstack.runtime import hooks +from localstack.utils.functions import call_safe + +LOG = logging.getLogger(__name__) + +SERVICE_SHUTDOWN_PRIORITY = -10 +"""Shutdown hook priority for shutting down service plugins.""" + + +class ShutdownHandlers: + """ + Register / unregister shutdown handlers. All registered shutdown handlers should execute as fast as possible. + Blocking shutdown handlers will block infra shutdown. + """ + + def __init__(self): + self._callbacks = [] + + def register(self, shutdown_handler: Callable[[], Any]) -> None: + """ + Register shutdown handler. Handler should not block or take more than a couple seconds. + + :param shutdown_handler: Callable without parameters + """ + self._callbacks.append(shutdown_handler) + + def unregister(self, shutdown_handler: Callable[[], Any]) -> None: + """ + Unregister a handler. Idempotent operation. + + :param shutdown_handler: Shutdown handler which was previously registered + """ + try: + self._callbacks.remove(shutdown_handler) + except ValueError: + pass + + def run(self) -> None: + """ + Execute shutdown handlers in reverse order of registration. + Should only be called once, on shutdown. + """ + for callback in reversed(list(self._callbacks)): + call_safe(callback) + + +SHUTDOWN_HANDLERS = ShutdownHandlers() +"""Shutdown handlers run with default priority in an on_infra_shutdown hook.""" + +ON_AFTER_SERVICE_SHUTDOWN_HANDLERS = ShutdownHandlers() +"""Shutdown handlers that are executed after all services have been shut down.""" + + +@hooks.on_infra_shutdown() +def run_shutdown_handlers(): + SHUTDOWN_HANDLERS.run() + + +@hooks.on_infra_shutdown(priority=SERVICE_SHUTDOWN_PRIORITY) +def shutdown_services(): + # TODO: this belongs into the shutdown procedure of a `Platform` or `RuntimeContainer` class. + from localstack.services.plugins import SERVICE_PLUGINS + + LOG.info("[shutdown] Stopping all services") + SERVICE_PLUGINS.stop_all_services() + + +@hooks.on_infra_shutdown(priority=SERVICE_SHUTDOWN_PRIORITY - 10) +def run_on_after_service_shutdown_handlers(): + ON_AFTER_SERVICE_SHUTDOWN_HANDLERS.run() diff --git a/localstack/services/cloudwatch/__init__.py b/localstack-core/localstack/services/__init__.py similarity index 100% rename from localstack/services/cloudwatch/__init__.py rename to localstack-core/localstack/services/__init__.py diff --git a/localstack/services/configservice/__init__.py b/localstack-core/localstack/services/acm/__init__.py similarity index 100% rename from localstack/services/configservice/__init__.py rename to localstack-core/localstack/services/acm/__init__.py diff --git a/localstack-core/localstack/services/acm/provider.py b/localstack-core/localstack/services/acm/provider.py new file mode 100644 index 0000000000000..7425b88832e6b --- /dev/null +++ b/localstack-core/localstack/services/acm/provider.py @@ -0,0 +1,136 @@ +from moto import settings as moto_settings +from moto.acm import models as acm_models + +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.acm import ( + AcmApi, + ListCertificatesRequest, + ListCertificatesResponse, + RequestCertificateRequest, + RequestCertificateResponse, +) +from localstack.services import moto +from localstack.utils.patch import patch + +# reduce the validation wait time from 60 (default) to 10 seconds +moto_settings.ACM_VALIDATION_WAIT = min(10, moto_settings.ACM_VALIDATION_WAIT) + + +@patch(acm_models.CertBundle.describe) +def describe(describe_orig, self): + # TODO fix! Terrible hack (for parity). Moto adds certain required fields only if status is PENDING_VALIDATION. + cert_status = self.status + self.status = "PENDING_VALIDATION" + try: + result = describe_orig(self) + finally: + self.status = cert_status + + cert = result.get("Certificate", {}) + cert["Status"] = cert_status + sans = cert.setdefault("SubjectAlternativeNames", []) + sans_summaries = cert.setdefault("SubjectAlternativeNameSummaries", sans) + + # add missing attributes in ACM certs that cause Terraform to fail + addenda = { + "RenewalEligibility": "INELIGIBLE", + "KeyUsages": [{"Name": "DIGITAL_SIGNATURE"}, {"Name": "KEY_ENCIPHERMENT"}], + "ExtendedKeyUsages": [], + "Options": {"CertificateTransparencyLoggingPreference": "ENABLED"}, + } + addenda["DomainValidationOptions"] = options = cert.get("DomainValidationOptions") + if not options: + options = addenda["DomainValidationOptions"] = [ + {"ValidationMethod": cert.get("ValidationMethod")} + ] + + for option in options: + option["DomainName"] = domain_name = option.get("DomainName") or cert.get("DomainName") + validation_domain = option.get("ValidationDomain") or f"test.{domain_name.lstrip('*.')}" + option["ValidationDomain"] = validation_domain + option["ValidationMethod"] = option.get("ValidationMethod") or "DNS" + status = option.get("ValidationStatus") + option["ValidationStatus"] = ( + "SUCCESS" if (status is None or cert_status == "ISSUED") else status + ) + if option["ValidationMethod"] == "EMAIL": + option["ValidationEmails"] = option.get("ValidationEmails") or [ + f"admin@{self.common_name}" + ] + test_record = { + "Name": validation_domain, + "Type": "CNAME", + "Value": "test123", + } + option["ResourceRecord"] = option.get("ResourceRecord") or test_record + option["ResourceRecord"]["Name"] = option["ResourceRecord"]["Name"].replace(".*.", ".") + + for key, value in addenda.items(): + if not cert.get(key): + cert[key] = value + cert["Serial"] = str(cert.get("Serial") or "") + + if cert.get("KeyAlgorithm") in ["RSA_1024", "RSA_2048"]: + cert["KeyAlgorithm"] = cert["KeyAlgorithm"].replace("RSA_", "RSA-") + + # add subject alternative names + if cert["DomainName"] not in sans: + sans.append(cert["DomainName"]) + if cert["DomainName"] not in sans_summaries: + sans_summaries.append(cert["DomainName"]) + + if "HasAdditionalSubjectAlternativeNames" not in cert: + cert["HasAdditionalSubjectAlternativeNames"] = False + + if not cert.get("ExtendedKeyUsages"): + cert["ExtendedKeyUsages"] = [ + {"Name": "TLS_WEB_SERVER_AUTHENTICATION", "OID": "1.3.6.1.0.1.2.3.0"}, + {"Name": "TLS_WEB_CLIENT_AUTHENTICATION", "OID": "1.3.6.1.0.1.2.3.4"}, + ] + + # remove attributes prior to validation + if not cert.get("Status") == "ISSUED": + attrs = ["CertificateAuthorityArn", "IssuedAt", "NotAfter", "NotBefore", "Serial"] + for attr in attrs: + cert.pop(attr, None) + cert["KeyUsages"] = [] + cert["ExtendedKeyUsages"] = [] + + return result + + +class AcmProvider(AcmApi): + @handler("RequestCertificate", expand=False) + def request_certificate( + self, + context: RequestContext, + request: RequestCertificateRequest, + ) -> RequestCertificateResponse: + response: RequestCertificateResponse = moto.call_moto(context) + + cert_arn = response["CertificateArn"] + backend = acm_models.acm_backends[context.account_id][context.region] + cert = backend._certificates[cert_arn] + if not hasattr(cert, "domain_validation_options"): + cert.domain_validation_options = request.get("DomainValidationOptions") + + return response + + @handler("ListCertificates", expand=False) + def list_certificates( + self, + context: RequestContext, + request: ListCertificatesRequest, + ) -> ListCertificatesResponse: + response = moto.call_moto(context) + summaries = response.get("CertificateSummaryList") or [] + for summary in summaries: + if "KeyUsages" in summary: + summary["KeyUsages"] = [ + k["Name"] if isinstance(k, dict) else k for k in summary["KeyUsages"] + ] + if "ExtendedKeyUsages" in summary: + summary["ExtendedKeyUsages"] = [ + k["Name"] if isinstance(k, dict) else k for k in summary["ExtendedKeyUsages"] + ] + return response diff --git a/localstack/services/dynamodb/__init__.py b/localstack-core/localstack/services/apigateway/__init__.py similarity index 100% rename from localstack/services/dynamodb/__init__.py rename to localstack-core/localstack/services/apigateway/__init__.py diff --git a/localstack-core/localstack/services/apigateway/analytics.py b/localstack-core/localstack/services/apigateway/analytics.py new file mode 100644 index 0000000000000..13bd7109358ce --- /dev/null +++ b/localstack-core/localstack/services/apigateway/analytics.py @@ -0,0 +1,5 @@ +from localstack.utils.analytics.metrics import Counter + +invocation_counter = Counter( + namespace="apigateway", name="rest_api_execute", labels=["invocation_type"] +) diff --git a/localstack-core/localstack/services/apigateway/exporter.py b/localstack-core/localstack/services/apigateway/exporter.py new file mode 100644 index 0000000000000..0706e794c1651 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/exporter.py @@ -0,0 +1,341 @@ +import abc +import json +from typing import Type + +from apispec import APISpec + +from localstack.aws.api.apigateway import ListOfModel +from localstack.aws.connect import connect_to +from localstack.utils.time import TIMESTAMP_FORMAT_TZ, timestamp + +from .helpers import OpenAPIExt + +# TODO: +# - handle more extensions +# see the list in OpenAPIExt +# currently handled: +# - x-amazon-apigateway-integration +# + + +class _BaseOpenApiExporter(abc.ABC): + VERSION = None + + def __init__(self): + self.export_formats = {"application/json": "to_dict", "application/yaml": "to_yaml"} + + def _add_models(self, spec: APISpec, models: ListOfModel, base_path: str): + for model in models: + model_def = json.loads(model["schema"]) + self._resolve_refs(model_def, base_path) + spec.components.schema( + component_id=model["name"], + component=model_def, + ) + + def _resolve_refs(self, schema: dict, base_path: str): + if "$ref" in schema: + schema["$ref"] = f"{base_path}/{schema['$ref'].rsplit('/', maxsplit=1)[-1]}" + for value in schema.values(): + if isinstance(value, dict): + self._resolve_refs(value, base_path) + + @staticmethod + def _get_integration(method_integration: dict) -> dict: + fields = { + "type", + "passthroughBehavior", + "requestParameters", + "requestTemplates", + "httpMethod", + "uri", + } + integration = {k: v for k, v in method_integration.items() if k in fields} + integration["type"] = integration["type"].lower() + integration["passthroughBehavior"] = integration["passthroughBehavior"].lower() + if responses := method_integration.get("integrationResponses"): + integration["responses"] = {"default": responses.get("200")} + return integration + + @abc.abstractmethod + def export( + self, + api_id: str, + stage: str, + export_format: str, + with_extension: bool, + account_id: str, + region_name: str, + ) -> str | dict: ... + + @abc.abstractmethod + def _add_paths(self, spec: APISpec, resources: dict, with_extension: bool): + """ + This method iterates over the different REST resources and its methods to add the APISpec paths using the + `apispec` module. + The path format is different between Swagger (OpenAPI 2.0) and OpenAPI 3.0 + :param spec: an APISpec object representing the exported API Gateway REST API + :param resources: the API Gateway REST API resources (methods, methods integrations, responses...) + :param with_extension: flag to add the custom OpenAPI extension `apigateway`, allowing to properly import + integrations for example, or authorizers. (all the `x-amazon` fields contained in `OpenAPIExt`). + :return: None + """ + ... + + +class _OpenApiSwaggerExporter(_BaseOpenApiExporter): + VERSION = "2.0" + + def _add_paths(self, spec, resources, with_extension): + for item in resources.get("items"): + path = item.get("path") + for method, method_config in item.get("resourceMethods", {}).items(): + method = method.lower() + + method_integration = method_config.get("methodIntegration", {}) + integration_responses = method_integration.get("integrationResponses", {}) + method_responses = method_config.get("methodResponses") + responses = {} + produces = set() + for status_code, values in method_responses.items(): + response = {"description": f"{status_code} response"} + if response_parameters := values.get("responseParameters"): + headers = {} + for parameter in response_parameters: + in_, name = parameter.removeprefix("method.response.").split(".") + # TODO: other type? + if in_ == "header": + headers[name] = {"type": "string"} + + if headers: + response["headers"] = headers + if response_models := values.get("responseModels"): + for content_type, model_name in response_models.items(): + produces.add(content_type) + response["schema"] = model_name + if integration_response := integration_responses.get(status_code, {}): + produces.update(integration_response.get("responseTemplates", {}).keys()) + + responses[status_code] = response + + request_parameters = method_config.get("requestParameters", {}) + parameters = [] + for parameter, required in request_parameters.items(): + in_, name = parameter.removeprefix("method.request.").split(".") + in_ = in_ if in_ != "querystring" else "query" + parameters.append( + {"name": name, "in": in_, "required": required, "type": "string"} + ) + + request_models = method_config.get("requestModels", {}) + for model_name in request_models.values(): + parameter = { + "in": "body", + "name": model_name, + "required": True, + "schema": {"$ref": f"#/definitions/{model_name}"}, + } + parameters.append(parameter) + + method_operations = {"responses": responses} + if parameters: + method_operations["parameters"] = parameters + if produces: + method_operations["produces"] = list(produces) + if content_types := request_models | method_integration.get("requestTemplates", {}): + method_operations["consumes"] = list(content_types.keys()) + if operation_name := method_config.get("operationName"): + method_operations["operationId"] = operation_name + if with_extension and method_integration: + method_operations[OpenAPIExt.INTEGRATION] = self._get_integration( + method_integration + ) + + spec.path(path=path, operations={method: method_operations}) + + def export( + self, + api_id: str, + stage: str, + export_format: str, + with_extension: bool, + account_id: str, + region_name: str, + ) -> str: + """ + https://github.com/OAI/OpenAPI-Specification/blob/main/versions/2.0.md + """ + apigateway_client = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).apigateway + + rest_api = apigateway_client.get_rest_api(restApiId=api_id) + resources = apigateway_client.get_resources(restApiId=api_id) + models = apigateway_client.get_models(restApiId=api_id) + + info = {} + if (description := rest_api.get("description")) is not None: + info["description"] = description + + spec = APISpec( + title=rest_api.get("name"), + version=rest_api.get("version") + or timestamp(rest_api.get("createdDate"), format=TIMESTAMP_FORMAT_TZ), + info=info, + openapi_version=self.VERSION, + basePath=f"/{stage}", + schemes=["https"], + ) + + self._add_paths(spec, resources, with_extension) + self._add_models(spec, models["items"], "#/definitions") + + response = getattr(spec, self.export_formats.get(export_format))() + if ( + with_extension + and isinstance(response, dict) + and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None + ): + response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types + + return response + + +class _OpenApiOAS30Exporter(_BaseOpenApiExporter): + VERSION = "3.0.1" + + def _add_paths(self, spec, resources, with_extension): + for item in resources.get("items"): + path = item.get("path") + for method, method_config in item.get("resourceMethods", {}).items(): + method = method.lower() + + method_integration = method_config.get("methodIntegration", {}) + integration_responses = method_integration.get("integrationResponses", {}) + method_responses = method_config.get("methodResponses") + responses = {} + produces = set() + for status_code, values in method_responses.items(): + response = {"description": f"{status_code} response"} + content = {} + if response_parameters := values.get("responseParameters"): + headers = {} + for parameter in response_parameters: + in_, name = parameter.removeprefix("method.response.").split(".") + # TODO: other type? query? + if in_ == "header": + headers[name] = {"schema": {"type": "string"}} + + if headers: + response["headers"] = headers + if response_models := values.get("responseModels"): + for content_type, model_name in response_models.items(): + content[content_type] = { + "schema": {"$ref": f"#/components/schemas/{model_name}"} + } + if integration_response := integration_responses.get(status_code, {}): + produces.update(integration_response.get("responseTemplates", {}).keys()) + + response["content"] = content + responses[status_code] = response + + request_parameters = method_config.get("requestParameters", {}) + parameters = [] + for parameter, required in request_parameters.items(): + in_, name = parameter.removeprefix("method.request.").split(".") + in_ = in_ if in_ != "querystring" else "query" + parameters.append({"name": name, "in": in_, "schema": {"type": "string"}}) + + request_body = {"content": {}} + request_models = method_config.get("requestModels", {}) + for content_type, model_name in request_models.items(): + request_body["content"][content_type] = { + "schema": {"$ref": f"#/components/schemas/{model_name}"}, + } + request_body["required"] = True + + method_operations = {"responses": responses} + if parameters: + method_operations["parameters"] = parameters + if request_body["content"]: + method_operations["requestBody"] = request_body + if operation_name := method_config.get("operationName"): + method_operations["operationId"] = operation_name + if with_extension and method_integration: + method_operations[OpenAPIExt.INTEGRATION] = self._get_integration( + method_integration + ) + + spec.path(path=path, operations={method: method_operations}) + + def export( + self, + api_id: str, + stage: str, + export_format: str, + with_extension: bool, + account_id: str, + region_name: str, + ) -> str: + """ + https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md + """ + apigateway_client = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).apigateway + + rest_api = apigateway_client.get_rest_api(restApiId=api_id) + resources = apigateway_client.get_resources(restApiId=api_id) + models = apigateway_client.get_models(restApiId=api_id) + + info = {} + + if (description := rest_api.get("description")) is not None: + info["description"] = description + + spec = APISpec( + title=rest_api.get("name"), + version=rest_api.get("version") + or timestamp(rest_api.get("createdDate"), format=TIMESTAMP_FORMAT_TZ), + info=info, + openapi_version=self.VERSION, + servers=[{"variables": {"basePath": {"default": stage}}}], + ) + + self._add_paths(spec, resources, with_extension) + self._add_models(spec, models["items"], "#/components/schemas") + + response = getattr(spec, self.export_formats.get(export_format))() + if isinstance(response, dict): + if "components" not in response: + response["components"] = {} + + if ( + with_extension + and (binary_media_types := rest_api.get("binaryMediaTypes")) is not None + ): + response[OpenAPIExt.BINARY_MEDIA_TYPES] = binary_media_types + + return response + + +class OpenApiExporter: + exporters: dict[str, Type[_BaseOpenApiExporter]] + + def __init__(self): + self.exporters = {"swagger": _OpenApiSwaggerExporter, "oas30": _OpenApiOAS30Exporter} + + def export_api( + self, + api_id: str, + stage: str, + export_type: str, + account_id: str, + region_name: str, + export_format: str = "application/json", + with_extension=False, + ) -> str: + exporter = self.exporters.get(export_type)() + return exporter.export( + api_id, stage, export_format, with_extension, account_id, region_name + ) diff --git a/localstack-core/localstack/services/apigateway/helpers.py b/localstack-core/localstack/services/apigateway/helpers.py new file mode 100644 index 0000000000000..6cb103d50f637 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/helpers.py @@ -0,0 +1,1010 @@ +import contextlib +import copy +import hashlib +import json +import logging +from typing import List, Optional, TypedDict, Union +from urllib import parse as urlparse + +from jsonpatch import apply_patch +from jsonpointer import JsonPointerException +from moto.apigateway import models as apigw_models +from moto.apigateway.models import APIGatewayBackend, Integration, Resource +from moto.apigateway.models import RestAPI as MotoRestAPI +from moto.apigateway.utils import ApigwAuthorizerIdentifier, ApigwResourceIdentifier + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.apigateway import ( + Authorizer, + ConnectionType, + DocumentationPart, + DocumentationPartLocation, + IntegrationType, + Model, + NotFoundException, + PutRestApiRequest, + RequestValidator, +) +from localstack.constants import ( + APPLICATION_JSON, + AWS_REGION_US_EAST_1, + DEFAULT_AWS_ACCOUNT_ID, + PATH_USER_REQUEST, +) +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.services.apigateway.models import ( + ApiGatewayStore, + RestApiContainer, + apigateway_stores, +) +from localstack.utils import common +from localstack.utils.json import parse_json_or_yaml +from localstack.utils.strings import short_uid, to_bytes, to_str +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +REQUEST_TIME_DATE_FORMAT = "%d/%b/%Y:%H:%M:%S %z" + +INVOKE_TEST_LOG_TEMPLATE = """Execution log for request {request_id} + {formatted_date} : Starting execution for request: {request_id} + {formatted_date} : HTTP Method: {http_method}, Resource Path: {resource_path} + {formatted_date} : Method request path: {request_path} + {formatted_date} : Method request query string: {query_string} + {formatted_date} : Method request headers: {request_headers} + {formatted_date} : Method request body before transformations: {request_body} + {formatted_date} : Method response body after transformations: {response_body} + {formatted_date} : Method response headers: {response_headers} + {formatted_date} : Successfully completed execution + {formatted_date} : Method completed with status: {status_code} + """ + +EMPTY_MODEL = "Empty" +ERROR_MODEL = "Error" + + +# TODO: we could actually parse the schema to get TypedDicts with the proper schema/types for each properties +class OpenAPIExt: + """ + Represents the specific OpenAPI extensions for API Gateway + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions.html + """ + + ANY_METHOD = "x-amazon-apigateway-any-method" + CORS = "x-amazon-apigateway-cors" + API_KEY_SOURCE = "x-amazon-apigateway-api-key-source" + AUTH = "x-amazon-apigateway-auth" + AUTHORIZER = "x-amazon-apigateway-authorizer" + AUTHTYPE = "x-amazon-apigateway-authtype" + BINARY_MEDIA_TYPES = "x-amazon-apigateway-binary-media-types" + DOCUMENTATION = "x-amazon-apigateway-documentation" + ENDPOINT_CONFIGURATION = "x-amazon-apigateway-endpoint-configuration" + GATEWAY_RESPONSES = "x-amazon-apigateway-gateway-responses" + IMPORTEXPORT_VERSION = "x-amazon-apigateway-importexport-version" + INTEGRATION = "x-amazon-apigateway-integration" + INTEGRATIONS = "x-amazon-apigateway-integrations" # used in components + MINIMUM_COMPRESSION_SIZE = "x-amazon-apigateway-minimum-compression-size" + POLICY = "x-amazon-apigateway-policy" + REQUEST_VALIDATOR = "x-amazon-apigateway-request-validator" + REQUEST_VALIDATORS = "x-amazon-apigateway-request-validators" + TAG_VALUE = "x-amazon-apigateway-tag-value" + + +class AuthorizerConfig(TypedDict): + authorizer: Authorizer + authorization_scopes: Optional[list[str]] + + +# TODO: make the CRUD operations in this file generic for the different model types (authorizes, validators, ...) + + +def get_apigateway_store(context: RequestContext) -> ApiGatewayStore: + return apigateway_stores[context.account_id][context.region] + + +def get_apigateway_store_for_invocation(context: ApiInvocationContext) -> ApiGatewayStore: + account_id = context.account_id or DEFAULT_AWS_ACCOUNT_ID + region_name = context.region_name or AWS_REGION_US_EAST_1 + return apigateway_stores[account_id][region_name] + + +def get_moto_backend(account_id: str, region: str) -> APIGatewayBackend: + return apigw_models.apigateway_backends[account_id][region] + + +def get_moto_rest_api(context: RequestContext, rest_api_id: str) -> MotoRestAPI: + moto_backend = apigw_models.apigateway_backends[context.account_id][context.region] + if rest_api := moto_backend.apis.get(rest_api_id): + return rest_api + else: + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + +def get_rest_api_container(context: RequestContext, rest_api_id: str) -> RestApiContainer: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + return rest_api_container + + +class OpenAPISpecificationResolver: + def __init__(self, document: dict, rest_api_id: str, allow_recursive=True): + self.document = document + self.allow_recursive = allow_recursive + # cache which maps known refs to part of the document + self._cache = {} + self._refpaths = ["#"] + host_definition = localstack_host() + self._base_url = f"{config.get_protocol()}://apigateway.{host_definition.host_and_port()}/restapis/{rest_api_id}/models/" + + def _is_ref(self, item) -> bool: + return isinstance(item, dict) and "$ref" in item + + def _is_internal_ref(self, refpath) -> bool: + return str(refpath).startswith("#/") + + @property + def current_path(self): + return self._refpaths[-1] + + @contextlib.contextmanager + def _pathctx(self, refpath: str): + if not self._is_internal_ref(refpath): + refpath = "/".join((self.current_path, refpath)) + + self._refpaths.append(refpath) + yield + self._refpaths.pop() + + def _resolve_refpath(self, refpath: str) -> dict: + if refpath in self._refpaths and not self.allow_recursive: + raise Exception("recursion detected with allow_recursive=False") + + # We don't resolve the Model definition, we will return a absolute reference to the model like AWS + # When validating the schema, we will need to resolve the $ref there + # Because if we resolved all $ref in schema, it can lead to circular references in complex schemas + if self.current_path.startswith("#/definitions") or self.current_path.startswith( + "#/components/schemas" + ): + return {"$ref": f"{self._base_url}{refpath.rsplit('/', maxsplit=1)[-1]}"} + + # We should not resolve the Model either, because we need its name to set it to the Request/ResponseModels, + # it just makes our job more difficult to retrieve the Model name + # We still need to verify that the ref exists + is_schema = self.current_path.endswith("schema") + + if refpath in self._cache and not is_schema: + return self._cache.get(refpath) + + with self._pathctx(refpath): + if self._is_internal_ref(self.current_path): + cur = self.document + else: + raise NotImplementedError("External references not yet supported.") + + for step in self.current_path.split("/")[1:]: + cur = cur.get(step) + + self._cache[self.current_path] = cur + + if is_schema: + # If the $ref doesn't exist in our schema, return None, otherwise return the ref + return {"$ref": refpath} if cur else None + + return cur + + def _namespaced_resolution(self, namespace: str, data: Union[dict, list]) -> Union[dict, list]: + with self._pathctx(namespace): + return self._resolve_references(data) + + def _resolve_references(self, data) -> Union[dict, list]: + if self._is_ref(data): + return self._resolve_refpath(data["$ref"]) + + if isinstance(data, dict): + for k, v in data.items(): + data[k] = self._namespaced_resolution(k, v) + elif isinstance(data, list): + for i, v in enumerate(data): + data[i] = self._namespaced_resolution(str(i), v) + + return data + + def resolve_references(self) -> dict: + return self._resolve_references(self.document) + + +class ModelResolver: + """ + This class allows a Model to use recursive and circular references to other Models. + To be able to JSON dump Models, AWS will not resolve Models but will use their absolute $ref instead. + When validating, we need to resolve those references, using JSON schema tricks to allow recursion. + See: https://json-schema.org/understanding-json-schema/structuring.html#recursion + + To allow a simpler structure, we're not replacing directly the reference with the schema, but instead create + a map of all used schema in $defs, as advised on JSON schema: + See: https://json-schema.org/understanding-json-schema/structuring.html#defs + + This allows us to not render every sub schema/models, but instead keep a clean map of used schemas. + """ + + def __init__(self, rest_api_container: RestApiContainer, model_name: str): + self.rest_api_container = rest_api_container + self.model_name = model_name + self._deps = {} + self._current_resolving_name = None + + @contextlib.contextmanager + def _resolving_ctx(self, current_resolving_name: str): + self._current_resolving_name = current_resolving_name + yield + self._current_resolving_name = None + + def resolve_model(self, model: dict) -> dict | None: + resolved_model = copy.deepcopy(model) + model_names = set() + + def _look_for_ref(sub_model): + for key, value in sub_model.items(): + if key == "$ref": + ref_name = value.rsplit("/", maxsplit=1)[-1] + if ref_name == self.model_name: + # if we reference our main Model, use the # for recursive access + sub_model[key] = "#" + continue + # otherwise, this Model will be available in $defs + sub_model[key] = f"#/$defs/{ref_name}" + + if ref_name != self._current_resolving_name: + # add the ref to the next ref to resolve and to $deps + model_names.add(ref_name) + + elif isinstance(value, dict): + _look_for_ref(value) + elif isinstance(value, list): + for val in value: + if isinstance(val, dict): + _look_for_ref(val) + + if isinstance(resolved_model, dict): + _look_for_ref(resolved_model) + + if model_names: + for ref_model_name in model_names: + if ref_model_name in self._deps: + continue + + def_resolved, was_resolved = self._get_resolved_submodel(model_name=ref_model_name) + + if not def_resolved: + LOG.debug( + "Failed to resolve submodel %s for model %s", + ref_model_name, + self._current_resolving_name, + ) + return + # if the ref was already resolved, we copy the result to not alter the already resolved schema + if was_resolved: + def_resolved = copy.deepcopy(def_resolved) + + self._remove_self_ref(def_resolved) + + if "$deps" in def_resolved: + # this will happen only if the schema was already resolved, otherwise the deps would be in _deps + # remove own definition in case of recursive / circular Models + def_resolved["$defs"].pop(self.model_name, None) + # remove the $defs from the schema, we don't want nested $defs + def_resolved_defs = def_resolved.pop("$defs") + # merge the resolved sub model $defs to the main schema + self._deps.update(def_resolved_defs) + + # add the dependencies to the global $deps + self._deps[ref_model_name] = def_resolved + + return resolved_model + + def _remove_self_ref(self, resolved_schema: dict): + for key, value in resolved_schema.items(): + if key == "$ref": + ref_name = value.rsplit("/", maxsplit=1)[-1] + if ref_name == self.model_name: + resolved_schema[key] = "#" + + elif isinstance(value, dict): + self._remove_self_ref(value) + + def get_resolved_model(self) -> dict | None: + if not (resolved_model := self.rest_api_container.resolved_models.get(self.model_name)): + model = self.rest_api_container.models.get(self.model_name) + if not model: + return None + schema = json.loads(model["schema"]) + resolved_model = self.resolve_model(schema) + if not resolved_model: + return None + # attach the resolved dependencies of the schema + if self._deps: + resolved_model["$defs"] = self._deps + self.rest_api_container.resolved_models[self.model_name] = resolved_model + + return resolved_model + + def _get_resolved_submodel(self, model_name: str) -> tuple[dict | None, bool | None]: + was_resolved = True + if not (resolved_model := self.rest_api_container.resolved_models.get(model_name)): + was_resolved = False + model = self.rest_api_container.models.get(model_name) + if not model: + LOG.warning( + "Error while validating the request body, could not the find the Model: '%s'", + model_name, + ) + return None, was_resolved + schema = json.loads(model["schema"]) + + with self._resolving_ctx(model_name): + resolved_model = self.resolve_model(schema) + + return resolved_model, was_resolved + + +def resolve_references(data: dict, rest_api_id, allow_recursive=True) -> dict: + resolver = OpenAPISpecificationResolver( + data, allow_recursive=allow_recursive, rest_api_id=rest_api_id + ) + return resolver.resolve_references() + + +# --------------- +# UTIL FUNCTIONS +# --------------- + + +def path_based_url(api_id: str, stage_name: str, path: str) -> str: + """Return URL for inbound API gateway for given API ID, stage name, and path""" + pattern = "%s/restapis/{api_id}/{stage_name}/%s{path}" % ( + config.external_service_url(), + PATH_USER_REQUEST, + ) + return pattern.format(api_id=api_id, stage_name=stage_name, path=path) + + +def localstack_path_based_url(api_id: str, stage_name: str, path: str) -> str: + """Return URL for inbound API gateway for given API ID, stage name, and path on the _aws namespace""" + return f"{config.external_service_url()}/_aws/execute-api/{api_id}/{stage_name}{path}" + + +def host_based_url(rest_api_id: str, path: str, stage_name: str = None): + """Return URL for inbound API gateway for given API ID, stage name, and path with custom dns + format""" + pattern = "{endpoint}{stage}{path}" + stage = stage_name and f"/{stage_name}" or "" + return pattern.format(endpoint=get_execute_api_endpoint(rest_api_id), stage=stage, path=path) + + +def get_execute_api_endpoint(api_id: str, protocol: str | None = None) -> str: + host = localstack_host() + protocol = protocol or config.get_protocol() + return f"{protocol}://{api_id}.execute-api.{host.host_and_port()}" + + +def apply_json_patch_safe(subject, patch_operations, in_place=True, return_list=False): + """Apply JSONPatch operations, using some customizations for compatibility with API GW + resources.""" + + results = [] + patch_operations = ( + [patch_operations] if isinstance(patch_operations, dict) else patch_operations + ) + for operation in patch_operations: + try: + # special case: for "replace" operations, assume "" as the default value + if operation["op"] == "replace" and operation.get("value") is None: + operation["value"] = "" + + if operation["op"] != "remove" and operation.get("value") is None: + LOG.info('Missing "value" in JSONPatch operation for %s: %s', subject, operation) + continue + + if operation["op"] == "add": + path = operation["path"] + target = subject.get(path.strip("/")) + target = target or common.extract_from_jsonpointer_path(subject, path) + if not isinstance(target, list): + # for `add` operation, if the target does not exist, set it to an empty dict (default behaviour) + # previous behaviour was an empty list. Revisit this if issues arise. + # TODO: we are assigning a value, even if not `in_place=True` + common.assign_to_path(subject, path, value={}, delimiter="/") + + target = common.extract_from_jsonpointer_path(subject, path) + if isinstance(target, list) and not path.endswith("/-"): + # if "path" is an attribute name pointing to an array in "subject", and we're running + # an "add" operation, then we should use the standard-compliant notation "/path/-" + operation["path"] = f"{path}/-" + + if operation["op"] == "remove": + path = operation["path"] + common.assign_to_path(subject, path, value={}, delimiter="/") + + result = apply_patch(subject, [operation], in_place=in_place) + if not in_place: + subject = result + results.append(result) + except JsonPointerException: + pass # path cannot be found - ignore + except Exception as e: + if "non-existent object" in str(e): + if operation["op"] == "replace": + # fall back to an ADD operation if the REPLACE fails + operation["op"] = "add" + result = apply_patch(subject, [operation], in_place=in_place) + results.append(result) + continue + if operation["op"] == "remove" and isinstance(subject, dict): + result = subject.pop(operation["path"], None) + results.append(result) + continue + raise + if return_list: + return results + return (results or [subject])[-1] + + +def add_documentation_parts(rest_api_container, documentation): + for doc_part in documentation.get("documentationParts", []): + entity_id = short_uid()[:6] + location = doc_part["location"] + rest_api_container.documentation_parts[entity_id] = DocumentationPart( + id=entity_id, + location=DocumentationPartLocation( + type=location.get("type"), + path=location.get("path", "/") + if location.get("type") not in ["API", "MODEL"] + else None, + method=location.get("method"), + statusCode=location.get("statusCode"), + name=location.get("name"), + ), + properties=doc_part["properties"], + ) + + +def import_api_from_openapi_spec( + rest_api: MotoRestAPI, context: RequestContext, request: PutRestApiRequest +) -> tuple[MotoRestAPI, list[str]]: + """Import an API from an OpenAPI spec document""" + body = parse_json_or_yaml(to_str(request["body"].read())) + + warnings = [] + + # TODO There is an issue with the botocore specs so the parameters doesn't get populated as it should + # Once this is fixed we can uncomment the code below instead of taking the parameters the context request + # query_params = request.get("parameters") or {} + query_params: dict = context.request.values.to_dict() + + resolved_schema = resolve_references(copy.deepcopy(body), rest_api_id=rest_api.id) + account_id = context.account_id + region_name = context.region + + # TODO: + # 1. validate the "mode" property of the spec document, "merge" or "overwrite", and properly apply it + # for now, it only considers it for the binaryMediaTypes + # 2. validate the document type, "swagger" or "openapi" + mode = request.get("mode", "merge") + + rest_api.version = ( + str(version) if (version := resolved_schema.get("info", {}).get("version")) else None + ) + # XXX for some reason this makes cf tests fail that's why is commented. + # test_cfn_handle_serverless_api_resource + # rest_api.name = resolved_schema.get("info", {}).get("title") + rest_api.description = resolved_schema.get("info", {}).get("description") + + # authorizers map to avoid duplication + authorizers = {} + + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis[rest_api.id] + + def is_api_key_required(path_payload: dict) -> bool: + # TODO: consolidate and refactor with `create_authorizer`, duplicate logic for now + if not (security_schemes := path_payload.get("security")): + return False + + for security_scheme in security_schemes: + for security_scheme_name in security_scheme.keys(): + # $.securityDefinitions is Swagger 2.0 + # $.components.SecuritySchemes is OpenAPI 3.0 + security_definitions = resolved_schema.get( + "securityDefinitions" + ) or resolved_schema.get("components", {}).get("securitySchemes", {}) + if security_scheme_name in security_definitions: + security_config = security_definitions.get(security_scheme_name) + if ( + OpenAPIExt.AUTHORIZER not in security_config + and security_config.get("type") == "apiKey" + and security_config.get("name", "").lower() == "x-api-key" + ): + return True + return False + + def create_authorizers(security_schemes: dict) -> None: + for security_scheme_name, security_config in security_schemes.items(): + aws_apigateway_authorizer = security_config.get(OpenAPIExt.AUTHORIZER, {}) + if not aws_apigateway_authorizer: + continue + + if security_scheme_name in authorizers: + continue + + authorizer_type = aws_apigateway_authorizer.get("type", "").upper() + # TODO: do we need validation of resources here? + authorizer = Authorizer( + id=ApigwAuthorizerIdentifier( + account_id, region_name, security_scheme_name + ).generate(), + name=security_scheme_name, + type=authorizer_type, + authorizerResultTtlInSeconds=aws_apigateway_authorizer.get( + "authorizerResultTtlInSeconds", None + ), + ) + if provider_arns := aws_apigateway_authorizer.get("providerARNs"): + authorizer["providerARNs"] = provider_arns + if auth_type := security_config.get(OpenAPIExt.AUTHTYPE): + authorizer["authType"] = auth_type + if authorizer_uri := aws_apigateway_authorizer.get("authorizerUri"): + authorizer["authorizerUri"] = authorizer_uri + if authorizer_credentials := aws_apigateway_authorizer.get("authorizerCredentials"): + authorizer["authorizerCredentials"] = authorizer_credentials + if authorizer_type in ("TOKEN", "COGNITO_USER_POOLS"): + header_name = security_config.get("name") + authorizer["identitySource"] = f"method.request.header.{header_name}" + elif identity_source := aws_apigateway_authorizer.get("identitySource"): + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-authorizer.html + # Applicable for the authorizer of the request and jwt type only + authorizer["identitySource"] = identity_source + if identity_validation_expression := aws_apigateway_authorizer.get( + "identityValidationExpression" + ): + authorizer["identityValidationExpression"] = identity_validation_expression + + rest_api_container.authorizers[authorizer["id"]] = authorizer + + authorizers[security_scheme_name] = authorizer + + def get_authorizer(path_payload: dict) -> Optional[AuthorizerConfig]: + if not (security_schemes := path_payload.get("security")): + return None + + for security_scheme in security_schemes: + for security_scheme_name, scopes in security_scheme.items(): + if authorizer := authorizers.get(security_scheme_name): + return AuthorizerConfig(authorizer=authorizer, authorization_scopes=scopes) + + def get_or_create_path(abs_path: str, base_path: str): + parts = abs_path.rstrip("/").replace("//", "/").split("/") + parent_id = "" + if len(parts) > 1: + parent_path = "/".join(parts[:-1]) + parent = get_or_create_path(parent_path, base_path=base_path) + parent_id = parent.id + if existing := [ + r + for r in rest_api.resources.values() + if r.path_part == (parts[-1] or "/") and (r.parent_id or "") == (parent_id or "") + ]: + return existing[0] + + # construct relative path (without base path), then add field resources for this path + rel_path = abs_path.removeprefix(base_path) + return add_path_methods(rel_path, parts, parent_id=parent_id) + + def add_path_methods(rel_path: str, parts: List[str], parent_id=""): + rel_path = rel_path or "/" + child_id = ApigwResourceIdentifier(account_id, region_name, parent_id, rel_path).generate() + + # Create a `Resource` for the passed `rel_path` + resource = Resource( + account_id=rest_api.account_id, + resource_id=child_id, + region_name=rest_api.region_name, + api_id=rest_api.id, + path_part=parts[-1] or "/", + parent_id=parent_id, + ) + + paths_dict = resolved_schema["paths"] + method_paths = paths_dict.get(rel_path, {}) + # Iterate over each field of the `path` to try to find the methods defined + for field, field_schema in method_paths.items(): + if field in [ + "parameters", + "servers", + "description", + "summary", + "$ref", + ] or not isinstance(field_schema, dict): + LOG.warning("Ignoring unsupported field %s in path %s", field, rel_path) + # TODO: check if we should skip parameters, those are global parameters applied to every routes but + # can be overridden at the operation level + continue + + method_name = field.upper() + if method_name == OpenAPIExt.ANY_METHOD.upper(): + method_name = "ANY" + + # Create the `Method` resource for each method path + method_resource = create_method_resource(resource, method_name, field_schema) + + # Get the `Method` requestParameters and requestModels + request_parameters_schema = field_schema.get("parameters", []) + request_parameters = {} + request_models = {} + if request_parameters_schema: + for req_param_data in request_parameters_schema: + # For Swagger 2.0, possible values for `in` from the specs are "query", "header", "path", + # "formData" or "body". + # For OpenAPI 3.0, values are "query", "header", "path" or "cookie". + # Only "path", "header" and "query" are supported in API Gateway for requestParameters + # "body" is mapped to a requestModel + param_location = req_param_data.get("in") + param_name = req_param_data.get("name") + param_required = req_param_data.get("required", False) + if param_location in ("query", "header", "path"): + if param_location == "query": + param_location = "querystring" + + request_parameters[f"method.request.{param_location}.{param_name}"] = ( + param_required + ) + + elif param_location == "body": + request_models = {APPLICATION_JSON: param_name} + + else: + LOG.warning( + "Ignoring unsupported requestParameters/requestModels location value for %s: %s", + param_name, + param_location, + ) + continue + + # this replaces 'body' in Parameters for OpenAPI 3.0, a requestBody Object + # https://swagger.io/specification/v3/#request-body-object + if request_models_schema := field_schema.get("requestBody"): + model_ref = None + for content_type, media_type in request_models_schema.get("content", {}).items(): + # we're iterating over the Media Type object: + # https://swagger.io/specification/v3/#media-type-object + if content_type == APPLICATION_JSON: + model_ref = media_type.get("schema", {}).get("$ref") + continue + LOG.warning( + "Found '%s' content-type for the MethodResponse model for path '%s' and method '%s', not adding the model as currently not supported", + content_type, + rel_path, + method_name, + ) + if model_ref: + model_schema = model_ref.rsplit("/", maxsplit=1)[-1] + request_models = {APPLICATION_JSON: model_schema} + + method_resource.request_models = request_models or None + + # check if there's a request validator set in the method + request_validator_name = field_schema.get( + OpenAPIExt.REQUEST_VALIDATOR, default_req_validator_name + ) + if request_validator_name: + if not ( + req_validator_id := request_validator_name_id_map.get(request_validator_name) + ): + # Might raise an exception here if we properly validate the template + LOG.warning( + "A validator ('%s') was referenced for %s.(%s), but is not defined", + request_validator_name, + rel_path, + method_name, + ) + method_resource.request_validator_id = req_validator_id + + # we check if there's a path parameter, AWS adds the requestParameter automatically + resource_path_part = parts[-1].strip("/") + if is_variable_path(resource_path_part) and not is_greedy_path(resource_path_part): + path_parameter = resource_path_part[1:-1] # remove the curly braces + request_parameters[f"method.request.path.{path_parameter}"] = True + + method_resource.request_parameters = request_parameters or None + + # Create the `MethodResponse` for the previously created `Method` + method_responses = field_schema.get("responses", {}) + for method_status_code, method_response in method_responses.items(): + method_status_code = str(method_status_code) + method_response_model = None + model_ref = None + # separating the two different versions, Swagger (2.0) and OpenAPI 3.0 + if "schema" in method_response: # this is Swagger + model_ref = method_response["schema"].get("$ref") + elif "content" in method_response: # this is OpenAPI 3.0 + for content_type, media_type in method_response["content"].items(): + # we're iterating over the Media Type object: + # https://swagger.io/specification/v3/#media-type-object + if content_type == APPLICATION_JSON: + model_ref = media_type.get("schema", {}).get("$ref") + continue + LOG.warning( + "Found '%s' content-type for the MethodResponse model for path '%s' and method '', not adding the model as currently not supported", + content_type, + rel_path, + method_name, + ) + + if model_ref: + model_schema = model_ref.rsplit("/", maxsplit=1)[-1] + + method_response_model = {APPLICATION_JSON: model_schema} + + method_response_parameters = {} + if response_param_headers := method_response.get("headers"): + for header, header_info in response_param_headers.items(): + # TODO: make use of `header_info` + method_response_parameters[f"method.response.header.{header}"] = False + + method_resource.create_response( + method_status_code, + method_response_model, + method_response_parameters or None, + ) + + # Create the `Integration` for the previously created `Method` + method_integration = field_schema.get(OpenAPIExt.INTEGRATION, {}) + + integration_type = ( + i_type.upper() if (i_type := method_integration.get("type")) else None + ) + + match integration_type: + case "AWS_PROXY": + # if the integration is AWS_PROXY with lambda, the only accepted integration method is POST + integration_method = "POST" + case _: + integration_method = ( + method_integration.get("httpMethod") or method_name + ).upper() + + connection_type = ( + ConnectionType.INTERNET + if integration_type in (IntegrationType.HTTP, IntegrationType.HTTP_PROXY) + else None + ) + + if integration_request_parameters := method_integration.get("requestParameters"): + validated_parameters = {} + for k, v in integration_request_parameters.items(): + if isinstance(v, str): + validated_parameters[k] = v + else: + # TODO This fixes for boolean serialization. We should validate how other types behave + value = str(v).lower() + warnings.append( + "Invalid format for 'requestParameters'. Expected type string for property " + f"'{k}' of resource '{resource.get_path()}' and method '{method_name}' but got '{value}'" + ) + + integration_request_parameters = validated_parameters + + integration = Integration( + http_method=integration_method, + uri=method_integration.get("uri"), + integration_type=integration_type, + passthrough_behavior=method_integration.get( + "passthroughBehavior", "WHEN_NO_MATCH" + ).upper(), + request_templates=method_integration.get("requestTemplates"), + request_parameters=integration_request_parameters, + cache_namespace=resource.id, + timeout_in_millis=method_integration.get("timeoutInMillis") or "29000", + content_handling=method_integration.get("contentHandling"), + connection_type=connection_type, + ) + + # Create the `IntegrationResponse` for the previously created `Integration` + if method_integration_responses := method_integration.get("responses"): + for pattern, integration_responses in method_integration_responses.items(): + integration_response_templates = integration_responses.get("responseTemplates") + integration_response_parameters = integration_responses.get( + "responseParameters" + ) + + integration_response = integration.create_integration_response( + status_code=str(integration_responses.get("statusCode", 200)), + selection_pattern=pattern if pattern != "default" else None, + response_templates=integration_response_templates, + response_parameters=integration_response_parameters, + content_handling=None, + ) + # moto set the responseTemplates to an empty dict when it should be None if not defined + if integration_response_templates is None: + integration_response.response_templates = None + + resource.resource_methods[method_name].method_integration = integration + + rest_api.resources[child_id] = resource + rest_api_container.resource_children.setdefault(parent_id, []).append(child_id) + return resource + + def create_method_resource(child, method, method_schema): + authorization_type = "NONE" + api_key_required = is_api_key_required(method_schema) + kwargs = {} + + if authorizer := get_authorizer(method_schema) or default_authorizer: + method_authorizer = authorizer["authorizer"] + # override the authorizer_type if it's a TOKEN or REQUEST to CUSTOM + if (authorizer_type := method_authorizer["type"]) in ("TOKEN", "REQUEST"): + authorization_type = "CUSTOM" + else: + authorization_type = authorizer_type + + kwargs["authorizer_id"] = method_authorizer["id"] + + if authorization_scopes := authorizer.get("authorization_scopes"): + kwargs["authorization_scopes"] = authorization_scopes + + return child.add_method( + method, + api_key_required=api_key_required, + authorization_type=authorization_type, + operation_name=method_schema.get("operationId"), + **kwargs, + ) + + models = resolved_schema.get("definitions") or resolved_schema.get("components", {}).get( + "schemas", {} + ) + for name, model_data in models.items(): + model_id = short_uid()[:6] # length 6 to make TF tests pass + model = Model( + id=model_id, + name=name, + contentType=APPLICATION_JSON, + description=model_data.get("description"), + schema=json.dumps(model_data), + ) + store.rest_apis[rest_api.id].models[name] = model + + # create the RequestValidators defined at the top-level field `x-amazon-apigateway-request-validators` + request_validators = resolved_schema.get(OpenAPIExt.REQUEST_VALIDATORS, {}) + request_validator_name_id_map = {} + for validator_name, validator_schema in request_validators.items(): + validator_id = short_uid()[:6] + + validator = RequestValidator( + id=validator_id, + name=validator_name, + validateRequestBody=validator_schema.get("validateRequestBody") or False, + validateRequestParameters=validator_schema.get("validateRequestParameters") or False, + ) + + store.rest_apis[rest_api.id].validators[validator_id] = validator + request_validator_name_id_map[validator_name] = validator_id + + # get default requestValidator if present + default_req_validator_name = resolved_schema.get(OpenAPIExt.REQUEST_VALIDATOR) + + # $.securityDefinitions is Swagger 2.0 + # $.components.SecuritySchemes is OpenAPI 3.0 + security_data = resolved_schema.get("securityDefinitions") or resolved_schema.get( + "components", {} + ).get("securitySchemes", {}) + # create the defined authorizers, even if they're not used by any routes + if security_data: + create_authorizers(security_data) + + # create default authorizer if present + default_authorizer = get_authorizer(resolved_schema) + + # determine base path + # default basepath mode is "ignore" + # see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api-basePath.html + basepath_mode = query_params.get("basepath") or "ignore" + base_path = "" + + if basepath_mode != "ignore": + # in Swagger 2.0, the basePath is a top-level property + if "basePath" in resolved_schema: + base_path = resolved_schema["basePath"] + + # in OpenAPI 3.0, the basePath is contained in the server object + elif "servers" in resolved_schema: + servers_property = resolved_schema.get("servers", []) + for server in servers_property: + # first, we check if there are a basePath variable (1st choice) + if "basePath" in server.get("variables", {}): + base_path = server["variables"]["basePath"].get("default", "") + break + # TODO: this allows both absolute and relative part, but AWS might not manage relative + url_path = urlparse.urlparse(server.get("url", "")).path + if url_path: + base_path = url_path if url_path != "/" else "" + break + + if basepath_mode == "split": + base_path = base_path.strip("/").partition("/")[-1] + base_path = f"/{base_path}" if base_path else "" + + api_paths = resolved_schema.get("paths", {}) + if api_paths: + # Remove default root, then add paths from API spec + # TODO: the default mode is now `merge`, not `overwrite` if using `PutRestApi` + # TODO: quick hack for now, but do not remove the rootResource if the OpenAPI file is empty + rest_api.resources = {} + + for path in api_paths: + get_or_create_path(base_path + path, base_path=base_path) + + # binary types + if mode == "merge": + existing_binary_media_types = rest_api.binaryMediaTypes or [] + else: + existing_binary_media_types = [] + + rest_api.binaryMediaTypes = existing_binary_media_types + resolved_schema.get( + OpenAPIExt.BINARY_MEDIA_TYPES, [] + ) + + policy = resolved_schema.get(OpenAPIExt.POLICY) + if policy: + policy = json.dumps(policy) if isinstance(policy, dict) else str(policy) + rest_api.policy = policy + minimum_compression_size = resolved_schema.get(OpenAPIExt.MINIMUM_COMPRESSION_SIZE) + if minimum_compression_size is not None: + rest_api.minimum_compression_size = int(minimum_compression_size) + endpoint_config = resolved_schema.get(OpenAPIExt.ENDPOINT_CONFIGURATION) + if endpoint_config: + if endpoint_config.get("vpcEndpointIds"): + endpoint_config.setdefault("types", ["PRIVATE"]) + rest_api.endpoint_configuration = endpoint_config + + api_key_source = resolved_schema.get(OpenAPIExt.API_KEY_SOURCE) + if api_key_source is not None: + rest_api.api_key_source = api_key_source.upper() + + documentation = resolved_schema.get(OpenAPIExt.DOCUMENTATION) + if documentation: + add_documentation_parts(rest_api_container, documentation) + + return rest_api, warnings + + +def is_greedy_path(path_part: str) -> bool: + return path_part.startswith("{") and path_part.endswith("+}") + + +def is_variable_path(path_part: str) -> bool: + return path_part.startswith("{") and path_part.endswith("}") + + +def get_domain_name_hash(domain_name: str) -> str: + """ + Return a hash of the given domain name, which help construct regional domain names for APIs. + TODO: use this in the future to dispatch API Gateway API invocations made to the regional domain name + """ + return hashlib.shake_128(to_bytes(domain_name)).hexdigest(4) + + +def get_regional_domain_name(domain_name: str) -> str: + """ + Return the regional domain name for the given domain name. + In real AWS, this would look something like: "d-oplm2qchq0.execute-api.us-east-1.amazonaws.com" + In LocalStack, we're returning this format: "d-.execute-api.localhost.localstack.cloud" + """ + domain_name_hash = get_domain_name_hash(domain_name) + host = localstack_host().host + return f"d-{domain_name_hash}.execute-api.{host}" diff --git a/localstack/services/dynamodbstreams/__init__.py b/localstack-core/localstack/services/apigateway/legacy/__init__.py similarity index 100% rename from localstack/services/dynamodbstreams/__init__.py rename to localstack-core/localstack/services/apigateway/legacy/__init__.py diff --git a/localstack-core/localstack/services/apigateway/legacy/context.py b/localstack-core/localstack/services/apigateway/legacy/context.py new file mode 100644 index 0000000000000..37b9725f3feb8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/context.py @@ -0,0 +1,201 @@ +import base64 +import json +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from responses import Response + +from localstack.constants import HEADER_LOCALSTACK_EDGE_URL +from localstack.utils.aws.aws_responses import parse_query_string +from localstack.utils.strings import short_uid, to_str + +# type definition for data parameters (i.e., invocation payloads) +InvocationPayload = Union[Dict, str, bytes] + + +class ApiGatewayVersion(Enum): + V1 = "v1" + V2 = "v2" + + +class ApiInvocationContext: + """Represents the context for an incoming API Gateway invocation.""" + + # basic (raw) HTTP invocation details (method, path, data, headers) + method: str + path: str + data: InvocationPayload + headers: Dict[str, str] + + # raw URI (including query string) retired from werkzeug "RAW_URI" environment variable + raw_uri: str + + # invocation context + context: Dict[str, Any] + # authentication info for this invocation + auth_context: Dict[str, Any] + + # target API/resource details extracted from the invocation + apigw_version: ApiGatewayVersion + api_id: str + stage: str + account_id: str + region_name: str + # resource path, including any path parameter placeholders (e.g., "/my/path/{id}") + resource_path: str + integration: Dict + resource: Dict + # Invocation path with query string, e.g., "/my/path?test". Defaults to "path", can be used + # to overwrite the actual API path, in case the path format "../_user_request_/.." is used. + _path_with_query_string: str + + # response templates to be applied to the invocation result + response_templates: Dict + + route: Dict + connection_id: str + path_params: Dict + + # response object + response: Response + + # dict of stage variables (mapping names to values) + stage_variables: Dict[str, str] + + # websockets route selection + ws_route: str + + def __init__( + self, + method: str, + path: str, + data: Union[str, bytes], + headers: Dict[str, str], + api_id: str = None, + stage: str = None, + context: Dict[str, Any] = None, + auth_context: Dict[str, Any] = None, + ): + self.method = method + self._path = path + self.data = data + self.headers = headers + self.context = {"requestId": short_uid()} if context is None else context + self.auth_context = {} if auth_context is None else auth_context + self.apigw_version = None + self.api_id = api_id + self.stage = stage + self.region_name = None + self.account_id = None + self.integration = None + self.resource = None + self.resource_path = None + self.path_with_query_string = None + self.response_templates = {} + self.stage_variables = {} + self.path_params = {} + self.route = None + self.ws_route = None + self.response = None + + @property + def path(self) -> str: + return self._path + + @path.setter + def path(self, new_path: str): + if isinstance(new_path, str): + new_path = "/" + new_path.lstrip("/") + self._path = new_path + + @property + def resource_id(self) -> Optional[str]: + return (self.resource or {}).get("id") + + @property + def invocation_path(self) -> str: + """Return the plain invocation path, without query parameters.""" + path = self.path_with_query_string or self.path + return path.split("?")[0] + + @property + def path_with_query_string(self) -> str: + """Return invocation path with query string - defaults to the value of 'path', unless customized.""" + return self._path_with_query_string or self.path + + @path_with_query_string.setter + def path_with_query_string(self, new_path: str): + """Set a custom invocation path with query string (used to handle "../_user_request_/.." paths).""" + if isinstance(new_path, str): + new_path = "/" + new_path.lstrip("/") + self._path_with_query_string = new_path + + def query_params(self) -> Dict[str, str]: + """Extract the query parameters from the target URL or path in this request context.""" + query_string = self.path_with_query_string.partition("?")[2] + return parse_query_string(query_string) + + @property + def integration_uri(self) -> Optional[str]: + integration = self.integration or {} + return integration.get("uri") or integration.get("integrationUri") + + @property + def auth_identity(self) -> Optional[Dict]: + if isinstance(self.auth_context, dict): + if self.auth_context.get("identity") is None: + self.auth_context["identity"] = {} + return self.auth_context["identity"] + + @property + def authorizer_type(self) -> str: + if isinstance(self.auth_context, dict): + return self.auth_context.get("authorizer_type") if self.auth_context else None + + @property + def authorizer_result(self) -> Dict[str, Any]: + if isinstance(self.auth_context, dict): + return self.auth_context.get("authorizer") if self.auth_context else {} + + def is_websocket_request(self) -> bool: + upgrade_header = str(self.headers.get("upgrade") or "") + return upgrade_header.lower() == "websocket" + + def is_v1(self) -> bool: + """Whether this is an API Gateway v1 request""" + return self.apigw_version == ApiGatewayVersion.V1 + + def cookies(self) -> Optional[List[str]]: + if cookies := self.headers.get("cookie") or "": + return list(cookies.split(";")) + return None + + @property + def is_data_base64_encoded(self) -> bool: + try: + json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data) + return False + except UnicodeDecodeError: + return True + + def data_as_string(self) -> str: + try: + return ( + json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data) + ) + except UnicodeDecodeError: + # we string encode our base64 as string as well + return to_str(base64.b64encode(self.data)) + + def _extract_host_from_header(self) -> str: + host = self.headers.get(HEADER_LOCALSTACK_EDGE_URL) or self.headers.get("host", "") + return host.split("://")[-1].split("/")[0].split(":")[0] + + @property + def domain_name(self) -> str: + return self._extract_host_from_header() + + @property + def domain_prefix(self) -> str: + host = self._extract_host_from_header() + return host.split(".")[0] diff --git a/localstack-core/localstack/services/apigateway/legacy/helpers.py b/localstack-core/localstack/services/apigateway/legacy/helpers.py new file mode 100644 index 0000000000000..62a91a32e78b0 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/helpers.py @@ -0,0 +1,711 @@ +import json +import logging +import re +import time +from collections import defaultdict +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union +from urllib import parse as urlparse + +from botocore.utils import InvalidArnException +from moto.apigateway.models import apigateway_backends +from requests.models import Response + +from localstack.aws.connect import connect_to +from localstack.constants import ( + APPLICATION_JSON, + DEFAULT_AWS_ACCOUNT_ID, + HEADER_LOCALSTACK_EDGE_URL, + PATH_USER_REQUEST, +) +from localstack.services.apigateway.helpers import REQUEST_TIME_DATE_FORMAT +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.utils import common +from localstack.utils.aws import resources as resource_utils +from localstack.utils.aws.arns import get_partition, parse_arn +from localstack.utils.aws.aws_responses import requests_error_response_json, requests_response +from localstack.utils.json import try_json +from localstack.utils.numbers import is_number +from localstack.utils.strings import canonicalize_bool_to_str, long_uid, to_str + +LOG = logging.getLogger(__name__) + +# regex path patterns +PATH_REGEX_MAIN = r"^/restapis/([A-Za-z0-9_\-]+)/[a-z]+(\?.*)?" +PATH_REGEX_SUB = r"^/restapis/([A-Za-z0-9_\-]+)/[a-z]+/([A-Za-z0-9_\-]+)/.*" +PATH_REGEX_TEST_INVOKE_API = r"^\/restapis\/([A-Za-z0-9_\-]+)\/resources\/([A-Za-z0-9_\-]+)\/methods\/([A-Za-z0-9_\-]+)/?(\?.*)?" + +# regex path pattern for user requests, handles stages like $default +PATH_REGEX_USER_REQUEST = ( + r"^/restapis/([A-Za-z0-9_\\-]+)(?:/([A-Za-z0-9\_($|%%24)\\-]+))?/%s/(.*)$" % PATH_USER_REQUEST +) +# URL pattern for invocations +HOST_REGEX_EXECUTE_API = r"(?:.*://)?([a-zA-Z0-9]+)(?:(-vpce-[^.]+))?\.execute-api\.(.*)" + +# template for SQS inbound data +APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE = ( + "Action=SendMessage&MessageBody=$util.base64Encode($input.json('$'))" +) + + +class ApiGatewayIntegrationError(Exception): + """ + Base class for all ApiGateway Integration errors. + Can be used as is or extended for common error types. + These exceptions should be handled in one place, and bubble up from all others. + """ + + message: str + status_code: int + + def __init__(self, message: str, status_code: int): + super().__init__(message) + self.message = message + self.status_code = status_code + + def to_response(self): + return requests_response({"message": self.message}, status_code=self.status_code) + + +class IntegrationParameters(TypedDict): + path: dict[str, str] + querystring: dict[str, str] + headers: dict[str, str] + + +class RequestParametersResolver: + """ + Integration request data mapping expressions + https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html + + Note: Use on REST APIs only + """ + + def resolve(self, context: ApiInvocationContext) -> IntegrationParameters: + """ + Resolve method request parameters into integration request parameters. + Integration request parameters, in the form of path variables, query strings + or headers, can be mapped from any defined method request parameters + and the payload. + + :return: IntegrationParameters + """ + method_request_params: Dict[str, Any] = self.method_request_dict(context) + + # requestParameters: { + # "integration.request.path.pathParam": "method.request.header.Content-Type" + # "integration.request.querystring.who": "method.request.querystring.who", + # "integration.request.header.Content-Type": "'application/json'", + # } + request_params = context.integration.get("requestParameters", {}) + + # resolve all integration request parameters with the already resolved method request parameters + integrations_parameters = {} + for k, v in request_params.items(): + if v.lower() in method_request_params: + integrations_parameters[k] = method_request_params[v.lower()] + else: + # static values + integrations_parameters[k] = v.replace("'", "") + + # build the integration parameters + result: IntegrationParameters = IntegrationParameters(path={}, querystring={}, headers={}) + for k, v in integrations_parameters.items(): + # headers + if k.startswith("integration.request.header."): + header_name = k.split(".")[-1] + result["headers"].update({header_name: v}) + + # querystring + if k.startswith("integration.request.querystring."): + param_name = k.split(".")[-1] + result["querystring"].update({param_name: v}) + + # path + if k.startswith("integration.request.path."): + path_name = k.split(".")[-1] + result["path"].update({path_name: v}) + + return result + + def method_request_dict(self, context: ApiInvocationContext) -> Dict[str, Any]: + """ + Build a dict with all method request parameters and their values. + :return: dict with all method request parameters and their values, + and all keys in lowercase + """ + params: Dict[str, str] = {} + + # TODO: add support for multi-values headers and multi-values querystring + + for k, v in context.query_params().items(): + params[f"method.request.querystring.{k}"] = v + + for k, v in context.headers.items(): + params[f"method.request.header.{k}"] = v + + for k, v in context.path_params.items(): + params[f"method.request.path.{k}"] = v + + for k, v in context.stage_variables.items(): + params[f"stagevariables.{k}"] = v + + # TODO: add support for missing context variables, use `context.context` which contains most of the variables + # see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference + # - all `context.identity` fields + # - protocol + # - requestId, extendedRequestId + # - all requestOverride, responseOverride + # - requestTime, requestTimeEpoch + # - resourcePath + # - wafResponseCode, webaclArn + params["context.accountId"] = context.account_id + params["context.apiId"] = context.api_id + params["context.domainName"] = context.domain_name + params["context.httpMethod"] = context.method + params["context.path"] = context.path + params["context.resourceId"] = context.resource_id + params["context.stage"] = context.stage + + auth_context_authorizer = context.auth_context.get("authorizer") or {} + for k, v in auth_context_authorizer.items(): + if isinstance(v, bool): + v = canonicalize_bool_to_str(v) + elif is_number(v): + v = str(v) + + params[f"context.authorizer.{k.lower()}"] = v + + if context.data: + params["method.request.body"] = context.data + + return {key.lower(): val for key, val in params.items()} + + +class ResponseParametersResolver: + def resolve(self, context: ApiInvocationContext) -> Dict[str, str]: + """ + Resolve integration response parameters into method response parameters. + Integration response parameters can map header, body, + or static values to the header type of the method response. + + :return: dict with all method response parameters and their values + """ + integration_request_params: Dict[str, Any] = self.integration_request_dict(context) + + # "responseParameters" : { + # "method.response.header.Location" : "integration.response.body.redirect.url", + # "method.response.header.x-user-id" : "integration.response.header.x-userid" + # } + integration_responses = context.integration.get("integrationResponses", {}) + # XXX Fix for other status codes context.response contains a response status code, but response + # can be a LambdaResponse or Response object and the field is not the same, normalize it or use introspection + response_params = integration_responses.get("200", {}).get("responseParameters", {}) + + # resolve all integration request parameters with the already resolved method + # request parameters + method_parameters = {} + for k, v in response_params.items(): + if v.lower() in integration_request_params: + method_parameters[k] = integration_request_params[v.lower()] + else: + # static values + method_parameters[k] = v.replace("'", "") + + # build the integration parameters + result: Dict[str, str] = {} + for k, v in method_parameters.items(): + # headers + if k.startswith("method.response.header."): + header_name = k.split(".")[-1] + result[header_name] = v + + return result + + def integration_request_dict(self, context: ApiInvocationContext) -> Dict[str, Any]: + params: Dict[str, str] = {} + + for k, v in context.headers.items(): + params[f"integration.request.header.{k}"] = v + + if context.data: + params["integration.request.body"] = try_json(context.data) + + return {key.lower(): val for key, val in params.items()} + + +def make_json_response(message): + return requests_response(json.dumps(message), headers={"Content-Type": APPLICATION_JSON}) + + +def make_error_response(message, code=400, error_type=None): + if code == 404 and not error_type: + error_type = "NotFoundException" + error_type = error_type or "InvalidRequest" + return requests_error_response_json(message, code=code, error_type=error_type) + + +def select_integration_response(matched_part: str, invocation_context: ApiInvocationContext): + int_responses = invocation_context.integration.get("integrationResponses") or {} + if select_by_pattern := [ + response + for response in int_responses.values() + if response.get("selectionPattern") + and re.match(response.get("selectionPattern"), matched_part) + ]: + selected_response = select_by_pattern[0] + if len(select_by_pattern) > 1: + LOG.warning( + "Multiple integration responses matching '%s' statuscode. Choosing '%s' (first).", + matched_part, + selected_response["statusCode"], + ) + else: + # choose default return code + default_responses = [ + response for response in int_responses.values() if not response.get("selectionPattern") + ] + if not default_responses: + raise ApiGatewayIntegrationError("Internal server error", 500) + + selected_response = default_responses[0] + if len(default_responses) > 1: + LOG.warning( + "Multiple default integration responses. Choosing %s (first).", + selected_response["statusCode"], + ) + return selected_response + + +def make_accepted_response(): + response = Response() + response.status_code = 202 + return response + + +def get_api_id_from_path(path): + if match := re.match(PATH_REGEX_SUB, path): + return match.group(1) + return re.match(PATH_REGEX_MAIN, path).group(1) + + +def is_test_invoke_method(method, path): + return method == "POST" and bool(re.match(PATH_REGEX_TEST_INVOKE_API, path)) + + +def get_stage_variables(context: ApiInvocationContext) -> Optional[Dict[str, str]]: + if is_test_invoke_method(context.method, context.path): + return None + + if not context.stage: + return {} + + account_id, region_name = get_api_account_id_and_region(context.api_id) + api_gateway_client = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).apigateway + try: + response = api_gateway_client.get_stage(restApiId=context.api_id, stageName=context.stage) + return response.get("variables", {}) + except Exception: + LOG.info("Failed to get stage %s for API id %s", context.stage, context.api_id) + return {} + + +def tokenize_path(path): + return path.lstrip("/").split("/") + + +def extract_path_params(path: str, extracted_path: str) -> Dict[str, str]: + tokenized_extracted_path = tokenize_path(extracted_path) + # Looks for '{' in the tokenized extracted path + path_params_list = [(i, v) for i, v in enumerate(tokenized_extracted_path) if "{" in v] + tokenized_path = tokenize_path(path) + path_params = {} + for param in path_params_list: + path_param_name = param[1][1:-1] + path_param_position = param[0] + if path_param_name.endswith("+"): + path_params[path_param_name.rstrip("+")] = "/".join( + tokenized_path[path_param_position:] + ) + else: + path_params[path_param_name] = tokenized_path[path_param_position] + path_params = common.json_safe(path_params) + return path_params + + +def extract_query_string_params(path: str) -> Tuple[str, Dict[str, str]]: + parsed_path = urlparse.urlparse(path) + if not path.startswith("//"): + path = parsed_path.path + parsed_query_string_params = urlparse.parse_qs(parsed_path.query) + + query_string_params = {} + for query_param_name, query_param_values in parsed_query_string_params.items(): + if len(query_param_values) == 1: + query_string_params[query_param_name] = query_param_values[0] + else: + query_string_params[query_param_name] = query_param_values + + path = path or "/" + return path, query_string_params + + +def get_cors_response(headers): + # TODO: for now we simply return "allow-all" CORS headers, but in the future + # we should implement custom headers for CORS rules, as supported by API Gateway: + # http://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html + response = Response() + response.status_code = 200 + response.headers["Access-Control-Allow-Origin"] = "*" + response.headers["Access-Control-Allow-Methods"] = "GET, POST, PUT, DELETE, PATCH" + response.headers["Access-Control-Allow-Headers"] = "*" + response._content = "" + return response + + +def get_apigateway_path_for_resource( + api_id, resource_id, path_suffix="", resources=None, region_name=None +): + if resources is None: + apigateway = connect_to(region_name=region_name).apigateway + resources = apigateway.get_resources(restApiId=api_id, limit=100)["items"] + target_resource = list(filter(lambda res: res["id"] == resource_id, resources))[0] + path_part = target_resource.get("pathPart", "") + if path_suffix: + if path_part: + path_suffix = "%s/%s" % (path_part, path_suffix) + else: + path_suffix = path_part + parent_id = target_resource.get("parentId") + if not parent_id: + return "/%s" % path_suffix + return get_apigateway_path_for_resource( + api_id, + parent_id, + path_suffix=path_suffix, + resources=resources, + region_name=region_name, + ) + + +def get_rest_api_paths(account_id: str, region_name: str, rest_api_id: str): + apigateway = connect_to(aws_access_key_id=account_id, region_name=region_name).apigateway + resources = apigateway.get_resources(restApiId=rest_api_id, limit=100) + resource_map = {} + for resource in resources["items"]: + path = resource.get("path") + # TODO: check if this is still required in the general case (can we rely on "path" being + # present?) + path = path or get_apigateway_path_for_resource( + rest_api_id, resource["id"], region_name=region_name + ) + resource_map[path] = resource + return resource_map + + +# TODO: Extract this to a set of rules that have precedence and easy to test individually. +# +# https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-settings +# -method-request.html +# https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-routes.html +def get_resource_for_path( + path: str, method: str, path_map: Dict[str, Dict] +) -> tuple[Optional[str], Optional[dict]]: + matches = [] + # creates a regex from the input path if there are parameters, e.g /foo/{bar}/baz -> /foo/[ + # ^\]+/baz, otherwise is a direct match. + for api_path, details in path_map.items(): + api_path_regex = re.sub(r"{[^+]+\+}", r"[^\?#]+", api_path) + api_path_regex = re.sub(r"{[^}]+}", r"[^/]+", api_path_regex) + if re.match(r"^%s$" % api_path_regex, path): + matches.append((api_path, details)) + + # if there are no matches, it's not worth to proceed, bail here! + if not matches: + LOG.debug("No match found for path: '%s' and method: '%s'", path, method) + return None, None + + if len(matches) == 1: + LOG.debug("Match found for path: '%s' and method: '%s'", path, method) + return matches[0] + + # so we have more than one match + # /{proxy+} and /api/{proxy+} for inputs like /api/foo/bar + # /foo/{param1}/baz and /foo/{param1}/{param2} for inputs like /for/bar/baz + proxy_matches = [] + param_matches = [] + for match in matches: + match_methods = list(match[1].get("resourceMethods", {}).keys()) + # only look for path matches if the request method is in the resource + if method.upper() in match_methods or "ANY" in match_methods: + # check if we have an exact match (exact matches take precedence) if the method is the same + if match[0] == path: + return match + + elif path_matches_pattern(path, match[0]): + # parameters can fit in + param_matches.append(match) + continue + + proxy_matches.append(match) + + if param_matches: + # count the amount of parameters, return the one with the least which is the most precise + sorted_matches = sorted(param_matches, key=lambda x: x[0].count("{")) + LOG.debug("Match found for path: '%s' and method: '%s'", path, method) + return sorted_matches[0] + + if proxy_matches: + # at this stage, we still have more than one match, but we have an eager example like + # /{proxy+} or /api/{proxy+}, so we pick the best match by sorting by length, only if they have a method + # that could match + sorted_matches = sorted(proxy_matches, key=lambda x: len(x[0]), reverse=True) + LOG.debug("Match found for path: '%s' and method: '%s'", path, method) + return sorted_matches[0] + + # if there are no matches with a method that would match, return + LOG.debug("No match found for method: '%s' for matched path: %s", method, path) + return None, None + + +def path_matches_pattern(path, api_path): + api_paths = api_path.split("/") + paths = path.split("/") + reg_check = re.compile(r"{(.*)}") + if len(api_paths) != len(paths): + return False + results = [ + part == paths[indx] + for indx, part in enumerate(api_paths) + if reg_check.match(part) is None and part + ] + + return len(results) > 0 and all(results) + + +def connect_api_gateway_to_sqs(gateway_name, stage_name, queue_arn, path, account_id, region_name): + resources = {} + template = APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE + resource_path = path.replace("/", "") + + try: + arn = parse_arn(queue_arn) + queue_name = arn["resource"] + sqs_account = arn["account"] + sqs_region = arn["region"] + except InvalidArnException: + queue_name = queue_arn + sqs_account = account_id + sqs_region = region_name + + partition = get_partition(region_name) + resources[resource_path] = [ + { + "httpMethod": "POST", + "authorizationType": "NONE", + "integrations": [ + { + "type": "AWS", + "uri": "arn:%s:apigateway:%s:sqs:path/%s/%s" + % (partition, sqs_region, sqs_account, queue_name), + "requestTemplates": {"application/json": template}, + "requestParameters": { + "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" + }, + } + ], + } + ] + return resource_utils.create_api_gateway( + name=gateway_name, + resources=resources, + stage_name=stage_name, + client=connect_to(aws_access_key_id=sqs_account, region_name=sqs_region).apigateway, + ) + + +def get_target_resource_details( + invocation_context: ApiInvocationContext, +) -> Tuple[Optional[str], Optional[dict]]: + """Look up and return the API GW resource (path pattern + resource dict) for the given invocation context.""" + path_map = get_rest_api_paths( + account_id=invocation_context.account_id, + region_name=invocation_context.region_name, + rest_api_id=invocation_context.api_id, + ) + relative_path = invocation_context.invocation_path.rstrip("/") or "/" + try: + extracted_path, resource = get_resource_for_path( + path=relative_path, method=invocation_context.method, path_map=path_map + ) + if not extracted_path: + return None, None + invocation_context.resource = resource + invocation_context.resource_path = extracted_path + try: + invocation_context.path_params = extract_path_params( + path=relative_path, extracted_path=extracted_path + ) + except Exception: + invocation_context.path_params = {} + + return extracted_path, resource + + except Exception: + return None, None + + +def get_target_resource_method(invocation_context: ApiInvocationContext) -> Optional[Dict]: + """Look up and return the API GW resource method for the given invocation context.""" + _, resource = get_target_resource_details(invocation_context) + if not resource: + return None + methods = resource.get("resourceMethods") or {} + return methods.get(invocation_context.method.upper()) or methods.get("ANY") + + +def event_type_from_route_key(invocation_context): + action = invocation_context.route["RouteKey"] + return ( + "CONNECT" + if action == "$connect" + else "DISCONNECT" + if action == "$disconnect" + else "MESSAGE" + ) + + +def get_event_request_context(invocation_context: ApiInvocationContext): + method = invocation_context.method + path = invocation_context.path + headers = invocation_context.headers + integration_uri = invocation_context.integration_uri + resource_path = invocation_context.resource_path + resource_id = invocation_context.resource_id + + set_api_id_stage_invocation_path(invocation_context) + api_id = invocation_context.api_id + stage = invocation_context.stage + + if "_user_request_" in invocation_context.raw_uri: + full_path = invocation_context.raw_uri.partition("_user_request_")[2] + else: + full_path = invocation_context.raw_uri.removeprefix(f"/{stage}") + relative_path, query_string_params = extract_query_string_params(path=full_path) + + source_ip = invocation_context.auth_identity.get("sourceIp") + integration_uri = integration_uri or "" + account_id = integration_uri.split(":lambda:path")[-1].split(":function:")[0].split(":")[-1] + account_id = account_id or DEFAULT_AWS_ACCOUNT_ID + request_context = { + "accountId": account_id, + "apiId": api_id, + "resourcePath": resource_path or relative_path, + "domainPrefix": invocation_context.domain_prefix, + "domainName": invocation_context.domain_name, + "resourceId": resource_id, + "requestId": long_uid(), + "identity": { + "accountId": account_id, + "sourceIp": source_ip, + "userAgent": headers.get("User-Agent"), + }, + "httpMethod": method, + "protocol": "HTTP/1.1", + "requestTime": datetime.now(timezone.utc).strftime(REQUEST_TIME_DATE_FORMAT), + "requestTimeEpoch": int(time.time() * 1000), + "authorizer": {}, + } + + if invocation_context.is_websocket_request(): + request_context["connectionId"] = invocation_context.connection_id + + # set "authorizer" and "identity" event attributes from request context + authorizer_result = invocation_context.authorizer_result + if authorizer_result: + request_context["authorizer"] = authorizer_result + request_context["identity"].update(invocation_context.auth_identity or {}) + + if not is_test_invoke_method(method, path): + request_context["path"] = (f"/{stage}" if stage else "") + relative_path + request_context["stage"] = stage + return request_context + + +def set_api_id_stage_invocation_path( + invocation_context: ApiInvocationContext, +) -> ApiInvocationContext: + # skip if all details are already available + values = ( + invocation_context.api_id, + invocation_context.stage, + invocation_context.path_with_query_string, + ) + if all(values): + return invocation_context + + # skip if this is a websocket request + if invocation_context.is_websocket_request(): + return invocation_context + + path = invocation_context.path + headers = invocation_context.headers + + path_match = re.search(PATH_REGEX_USER_REQUEST, path) + host_header = headers.get(HEADER_LOCALSTACK_EDGE_URL, "") or headers.get("Host") or "" + host_match = re.search(HOST_REGEX_EXECUTE_API, host_header) + test_invoke_match = re.search(PATH_REGEX_TEST_INVOKE_API, path) + if path_match: + api_id = path_match.group(1) + stage = path_match.group(2) + relative_path_w_query_params = "/%s" % path_match.group(3) + elif host_match: + api_id = extract_api_id_from_hostname_in_url(host_header) + stage = path.strip("/").split("/")[0] + relative_path_w_query_params = "/%s" % path.lstrip("/").partition("/")[2] + elif test_invoke_match: + stage = invocation_context.stage + api_id = invocation_context.api_id + relative_path_w_query_params = invocation_context.path_with_query_string + else: + raise Exception( + f"Unable to extract API Gateway details from request: {path} {dict(headers)}" + ) + + # set details in invocation context + invocation_context.api_id = api_id + invocation_context.stage = stage + invocation_context.path_with_query_string = relative_path_w_query_params + return invocation_context + + +def get_api_account_id_and_region(api_id: str) -> Tuple[Optional[str], Optional[str]]: + """Return the region name for the given REST API ID""" + for account_id, account in apigateway_backends.items(): + for region_name, region in account.items(): + # compare low case keys to avoid case sensitivity issues + for key in region.apis.keys(): + if key.lower() == api_id.lower(): + return account_id, region_name + return None, None + + +def extract_api_id_from_hostname_in_url(hostname: str) -> str: + """Extract API ID 'id123' from URLs like https://id123.execute-api.localhost.localstack.cloud:4566""" + match = re.match(HOST_REGEX_EXECUTE_API, hostname) + return match.group(1) + + +def multi_value_dict_for_list(elements: Union[List, Dict]) -> Dict: + temp_mv_dict = defaultdict(list) + for key in elements: + if isinstance(key, (list, tuple)): + key, value = key + else: + value = elements[key] + + key = to_str(key) + temp_mv_dict[key].append(value) + return {k: tuple(v) for k, v in temp_mv_dict.items()} diff --git a/localstack-core/localstack/services/apigateway/legacy/integration.py b/localstack-core/localstack/services/apigateway/legacy/integration.py new file mode 100644 index 0000000000000..12852fff266af --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/integration.py @@ -0,0 +1,1119 @@ +import base64 +import json +import logging +import re +from abc import ABC, abstractmethod +from functools import lru_cache +from http import HTTPMethod, HTTPStatus +from typing import Any, Dict +from urllib.parse import urljoin + +import requests +from botocore.exceptions import ClientError +from moto.apigatewayv2.exceptions import BadRequestException +from requests import Response + +from localstack import config +from localstack.aws.connect import ( + INTERNAL_REQUEST_PARAMS_HEADER, + InternalRequestParameters, + connect_to, + dump_dto, +) +from localstack.constants import APPLICATION_JSON, HEADER_CONTENT_TYPE +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.services.apigateway.legacy.helpers import ( + ApiGatewayIntegrationError, + IntegrationParameters, + RequestParametersResolver, + ResponseParametersResolver, + extract_path_params, + extract_query_string_params, + get_event_request_context, + get_stage_variables, + make_error_response, + multi_value_dict_for_list, +) +from localstack.services.apigateway.legacy.templates import ( + MappingTemplates, + RequestTemplates, + ResponseTemplates, +) +from localstack.services.stepfunctions.stepfunctions_utils import await_sfn_execution_result +from localstack.utils import common +from localstack.utils.aws.arns import ARN_PARTITION_REGEX, extract_region_from_arn, get_partition +from localstack.utils.aws.aws_responses import ( + LambdaResponse, + request_response_stream, + requests_response, +) +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.aws.request_context import mock_aws_request_headers +from localstack.utils.aws.templating import VtlTemplate +from localstack.utils.collections import dict_multi_values, remove_attributes +from localstack.utils.common import make_http_request, to_str +from localstack.utils.http import add_query_params_to_url, canonicalize_headers, parse_request_data +from localstack.utils.json import json_safe, try_json +from localstack.utils.strings import camel_to_snake_case, to_bytes + +LOG = logging.getLogger(__name__) + + +class IntegrationAccessError(ApiGatewayIntegrationError): + """ + Error message when an integration cannot be accessed. + """ + + def __init__(self): + super().__init__("Internal server error", 500) + + +class BackendIntegration(ABC): + """Abstract base class representing a backend integration""" + + def __init__(self): + self.request_templates = RequestTemplates() + self.response_templates = ResponseTemplates() + self.request_params_resolver = RequestParametersResolver() + self.response_params_resolver = ResponseParametersResolver() + + @abstractmethod + def invoke(self, invocation_context: ApiInvocationContext): + pass + + @classmethod + def _create_response(cls, status_code, headers, data=""): + response = Response() + response.status_code = status_code + response.headers = headers + response._content = data + return response + + @classmethod + def apply_request_parameters( + cls, integration_params: IntegrationParameters, headers: Dict[str, Any] + ): + for k, v in integration_params.get("headers").items(): + headers.update({k: v}) + + @classmethod + def apply_response_parameters( + cls, invocation_context: ApiInvocationContext, response: Response + ): + integration = invocation_context.integration + integration_responses = integration.get("integrationResponses") or {} + if not integration_responses: + return response + entries = list(integration_responses.keys()) + return_code = str(response.status_code) + if return_code not in entries: + if len(entries) > 1: + LOG.info("Found multiple integration response status codes: %s", entries) + return response + return_code = entries[0] + response_params = integration_responses[return_code].get("responseParameters", {}) + for key, value in response_params.items(): + # TODO: add support for method.response.body, etc ... + if str(key).lower().startswith("method.response.header."): + header_name = key[len("method.response.header.") :] + response.headers[header_name] = value.strip("'") + return response + + @classmethod + def render_template_selection_expression(cls, invocation_context: ApiInvocationContext): + integration = invocation_context.integration + template_selection_expression = integration.get("templateSelectionExpression") + + # AWS template selection relies on the content type + # to select an input template or output mapping AND template selection expressions. + # All of them will fall back to the $default template if a matching template is not found. + if not template_selection_expression: + content_type = invocation_context.headers.get(HEADER_CONTENT_TYPE, APPLICATION_JSON) + if integration.get("RequestTemplates", {}).get(content_type): + return content_type + return "$default" + + data = try_json(invocation_context.data) + variables = { + "request": { + "header": invocation_context.headers, + "querystring": invocation_context.query_params(), + "body": data, + "context": invocation_context.context or {}, + "stage_variables": invocation_context.stage_variables or {}, + } + } + return VtlTemplate().render_vtl(template_selection_expression, variables) or "$default" + + +@lru_cache(maxsize=64) +def get_service_factory(region_name: str, role_arn: str): + if role_arn: + return connect_to.with_assumed_role( + role_arn=role_arn, + region_name=region_name, + service_principal=ServicePrincipal.apigateway, + session_name="BackplaneAssumeRoleSession", + ) + else: + return connect_to(region_name=region_name) + + +@lru_cache(maxsize=64) +def get_internal_mocked_headers( + service_name: str, + region_name: str, + source_arn: str, + role_arn: str | None, +) -> dict[str, str]: + if role_arn: + access_key_id = ( + connect_to(region_name=region_name) + .sts.request_metadata(service_principal=ServicePrincipal.apigateway) + .assume_role(RoleArn=role_arn, RoleSessionName="BackplaneAssumeRoleSession")[ + "Credentials" + ]["AccessKeyId"] + ) + else: + access_key_id = None + headers = mock_aws_request_headers( + service=service_name, aws_access_key_id=access_key_id, region_name=region_name + ) + + dto = InternalRequestParameters( + service_principal=ServicePrincipal.apigateway, source_arn=source_arn + ) + headers[INTERNAL_REQUEST_PARAMS_HEADER] = dump_dto(dto) + return headers + + +def get_source_arn(invocation_context: ApiInvocationContext): + return f"arn:{get_partition(invocation_context.region_name)}:execute-api:{invocation_context.region_name}:{invocation_context.account_id}:{invocation_context.api_id}/{invocation_context.stage}/{invocation_context.method}{invocation_context.path}" + + +def call_lambda( + function_arn: str, event: bytes, asynchronous: bool, invocation_context: ApiInvocationContext +) -> str: + clients = get_service_factory( + region_name=extract_region_from_arn(function_arn), + role_arn=invocation_context.integration.get("credentials"), + ) + inv_result = clients.lambda_.request_metadata( + service_principal=ServicePrincipal.apigateway, source_arn=get_source_arn(invocation_context) + ).invoke( + FunctionName=function_arn, + Payload=event, + InvocationType="Event" if asynchronous else "RequestResponse", + ) + if payload := inv_result.get("Payload"): + payload = to_str(payload.read()) + return payload + return "" + + +class LambdaProxyIntegration(BackendIntegration): + @classmethod + def update_content_length(cls, response: Response): + if response and response.content is not None: + response.headers["Content-Length"] = str(len(response.content)) + + @classmethod + def lambda_result_to_response(cls, result) -> LambdaResponse: + response = LambdaResponse() + response.headers.update({"content-type": "application/json"}) + parsed_result = result if isinstance(result, dict) else json.loads(str(result or "{}")) + parsed_result = common.json_safe(parsed_result) + parsed_result = {} if parsed_result is None else parsed_result + + if set(parsed_result) - { + "body", + "statusCode", + "headers", + "isBase64Encoded", + "multiValueHeaders", + }: + LOG.warning( + 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."}\n Lambda output: %s', + parsed_result, + ) + response.status_code = 502 + response._content = json.dumps({"message": "Internal server error"}) + return response + + response.status_code = int(parsed_result.get("statusCode", 200)) + parsed_headers = parsed_result.get("headers", {}) + if parsed_headers is not None: + response.headers.update(parsed_headers) + try: + result_body = parsed_result.get("body") + if isinstance(result_body, dict): + response._content = json.dumps(result_body) + else: + body_bytes = to_bytes(to_str(result_body or "")) + if parsed_result.get("isBase64Encoded", False): + body_bytes = base64.b64decode(body_bytes) + response._content = body_bytes + except Exception as e: + LOG.warning("Couldn't set Lambda response content: %s", e) + response._content = "{}" + response.multi_value_headers = parsed_result.get("multiValueHeaders") or {} + return response + + @staticmethod + def fix_proxy_path_params(path_params): + proxy_path_param_value = path_params.get("proxy+") + if not proxy_path_param_value: + return + del path_params["proxy+"] + path_params["proxy"] = proxy_path_param_value + + @staticmethod + def validate_integration_method(invocation_context: ApiInvocationContext): + if invocation_context.integration["httpMethod"] != HTTPMethod.POST: + raise ApiGatewayIntegrationError("Internal server error", status_code=500) + + @classmethod + def construct_invocation_event( + cls, method, path, headers, data, query_string_params=None, is_base64_encoded=False + ): + query_string_params = query_string_params or parse_request_data(method, path, "") + + single_value_query_string_params = { + k: v[-1] if isinstance(v, list) else v for k, v in query_string_params.items() + } + # Some headers get capitalized like in CloudFront, see + # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html#add-origin-custom-headers-forward-authorization + # It seems AWS_PROXY lambda integrations are behind cloudfront, as seen by the returned headers in AWS + to_capitalize: list[str] = ["authorization"] # some headers get capitalized + headers = { + k.capitalize() if k.lower() in to_capitalize else k: v for k, v in headers.items() + } + + # AWS canonical header names, converting them to lower-case + headers = canonicalize_headers(headers) + + return { + "path": "/" + path.lstrip("/"), + "headers": headers, + "multiValueHeaders": multi_value_dict_for_list(headers), + "body": data, + "isBase64Encoded": is_base64_encoded, + "httpMethod": method, + "queryStringParameters": single_value_query_string_params or None, + "multiValueQueryStringParameters": dict_multi_values(query_string_params) or None, + } + + @classmethod + def process_apigateway_invocation( + cls, + func_arn, + path, + payload, + invocation_context: ApiInvocationContext, + query_string_params=None, + ) -> str: + if (path_params := invocation_context.path_params) is None: + path_params = {} + if (request_context := invocation_context.context) is None: + request_context = {} + try: + resource_path = invocation_context.resource_path or path + event = cls.construct_invocation_event( + invocation_context.method, + path, + invocation_context.headers, + payload, + query_string_params, + invocation_context.is_data_base64_encoded, + ) + path_params = dict(path_params) + cls.fix_proxy_path_params(path_params) + event["pathParameters"] = path_params + event["resource"] = resource_path + event["requestContext"] = request_context + event["stageVariables"] = invocation_context.stage_variables + LOG.debug( + "Running Lambda function %s from API Gateway invocation: %s %s", + func_arn, + invocation_context.method or "GET", + path, + ) + asynchronous = invocation_context.headers.get("X-Amz-Invocation-Type") == "'Event'" + return call_lambda( + function_arn=func_arn, + event=to_bytes(json.dumps(event)), + asynchronous=asynchronous, + invocation_context=invocation_context, + ) + except ClientError as e: + raise IntegrationAccessError() from e + except Exception as e: + LOG.warning( + "Unable to run Lambda function on API Gateway message: %s", + e, + ) + + def invoke(self, invocation_context: ApiInvocationContext): + self.validate_integration_method(invocation_context) + uri = ( + invocation_context.integration.get("uri") + or invocation_context.integration.get("integrationUri") + or "" + ) + invocation_context.context = get_event_request_context(invocation_context) + relative_path, query_string_params = extract_query_string_params( + path=invocation_context.path_with_query_string + ) + try: + path_params = extract_path_params( + path=relative_path, extracted_path=invocation_context.resource_path + ) + invocation_context.path_params = path_params + except Exception: + pass + + func_arn = uri + if ":lambda:path" in uri: + func_arn = uri.split(":lambda:path")[1].split("functions/")[1].split("/invocations")[0] + + if invocation_context.authorizer_type: + invocation_context.context["authorizer"] = invocation_context.authorizer_result + + payload = self.request_templates.render(invocation_context) + + result = self.process_apigateway_invocation( + func_arn=func_arn, + path=relative_path, + payload=payload, + invocation_context=invocation_context, + query_string_params=query_string_params, + ) + + response = LambdaResponse() + response.headers.update({"content-type": "application/json"}) + parsed_result = json.loads(str(result or "{}")) + parsed_result = common.json_safe(parsed_result) + parsed_result = {} if parsed_result is None else parsed_result + + if set(parsed_result) - { + "body", + "statusCode", + "headers", + "isBase64Encoded", + "multiValueHeaders", + }: + LOG.warning( + 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."}\n Lambda output: %s', + parsed_result, + ) + response.status_code = 502 + response._content = json.dumps({"message": "Internal server error"}) + return response + + response.status_code = int(parsed_result.get("statusCode", 200)) + parsed_headers = parsed_result.get("headers", {}) + if parsed_headers is not None: + response.headers.update(parsed_headers) + try: + result_body = parsed_result.get("body") + if isinstance(result_body, dict): + response._content = json.dumps(result_body) + else: + body_bytes = to_bytes(result_body or "") + if parsed_result.get("isBase64Encoded", False): + body_bytes = base64.b64decode(body_bytes) + response._content = body_bytes + except Exception as e: + LOG.warning("Couldn't set Lambda response content: %s", e) + response._content = "{}" + response.multi_value_headers = parsed_result.get("multiValueHeaders") or {} + + # apply custom response template + self.update_content_length(response) + invocation_context.response = response + + return invocation_context.response + + +class LambdaIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext): + invocation_context.stage_variables = get_stage_variables(invocation_context) + headers = invocation_context.headers + + # resolve integration parameters + integration_parameters = self.request_params_resolver.resolve(context=invocation_context) + headers.update(integration_parameters.get("headers", {})) + + if invocation_context.authorizer_type: + invocation_context.context["authorizer"] = invocation_context.authorizer_result + + func_arn = self._lambda_integration_uri(invocation_context) + # integration type "AWS" is only supported for WebSocket APIs and REST + # API (v1), but the template selection expression is only supported for + # Websockets + if invocation_context.is_websocket_request(): + template_key = self.render_template_selection_expression(invocation_context) + payload = self.request_templates.render(invocation_context, template_key) + else: + payload = self.request_templates.render(invocation_context) + + asynchronous = headers.get("X-Amz-Invocation-Type", "").strip("'") == "Event" + try: + result = call_lambda( + function_arn=func_arn, + event=to_bytes(payload or ""), + asynchronous=asynchronous, + invocation_context=invocation_context, + ) + except ClientError as e: + raise IntegrationAccessError() from e + + # default lambda status code is 200 + response = LambdaResponse() + response.status_code = 200 + response._content = result + + if asynchronous: + response._content = "" + + # response template + invocation_context.response = response + self.response_templates.render(invocation_context) + invocation_context.response.headers["Content-Length"] = str(len(response.content or "")) + + headers = self.response_params_resolver.resolve(invocation_context) + invocation_context.response.headers.update(headers) + + return invocation_context.response + + def _lambda_integration_uri(self, invocation_context: ApiInvocationContext): + """ + https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html + """ + uri = ( + invocation_context.integration.get("uri") + or invocation_context.integration.get("integrationUri") + or "" + ) + variables = {"stageVariables": invocation_context.stage_variables} + uri = VtlTemplate().render_vtl(uri, variables) + if ":lambda:path" in uri: + uri = uri.split(":lambda:path")[1].split("functions/")[1].split("/invocations")[0] + return uri + + +class KinesisIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext): + integration = invocation_context.integration + integration_type_orig = integration.get("type") or integration.get("integrationType") or "" + integration_type = integration_type_orig.upper() + uri = integration.get("uri") or integration.get("integrationUri") or "" + integration_subtype = integration.get("integrationSubtype") + + if uri.endswith("kinesis:action/PutRecord") or integration_subtype == "Kinesis-PutRecord": + target = "Kinesis_20131202.PutRecord" + elif uri.endswith("kinesis:action/PutRecords"): + target = "Kinesis_20131202.PutRecords" + elif uri.endswith("kinesis:action/ListStreams"): + target = "Kinesis_20131202.ListStreams" + else: + LOG.info( + "Unexpected API Gateway integration URI '%s' for integration type %s", + uri, + integration_type, + ) + target = "" + + try: + # xXx this "event" request context is used in multiple places, we probably + # want to refactor this into a model class. + # I'd argue we should not make a decision on the event_request_context inside the integration because, + # it's different between API types (REST, HTTP, WebSocket) and per event version + invocation_context.context = get_event_request_context(invocation_context) + invocation_context.stage_variables = get_stage_variables(invocation_context) + + # integration type "AWS" is only supported for WebSocket APIs and REST + # API (v1), but the template selection expression is only supported for + # Websockets + if invocation_context.is_websocket_request(): + template_key = self.render_template_selection_expression(invocation_context) + payload = self.request_templates.render(invocation_context, template_key) + else: + # For HTTP APIs with a specified integration_subtype, + # a key-value map specifying parameters that are passed to AWS_PROXY integrations + if integration_type == "AWS_PROXY" and integration_subtype == "Kinesis-PutRecord": + payload = self._create_request_parameters(invocation_context) + else: + payload = self.request_templates.render(invocation_context) + + except Exception as e: + LOG.warning("Unable to convert API Gateway payload to str", e) + raise + + # forward records to target kinesis stream + headers = get_internal_mocked_headers( + service_name="kinesis", + region_name=invocation_context.region_name, + role_arn=invocation_context.integration.get("credentials"), + source_arn=get_source_arn(invocation_context), + ) + headers["X-Amz-Target"] = target + + result = common.make_http_request( + url=config.internal_service_url(), data=payload, headers=headers, method="POST" + ) + + # apply response template + invocation_context.response = result + self.response_templates.render(invocation_context) + return invocation_context.response + + @classmethod + def _validate_required_params(cls, request_parameters: Dict[str, Any]) -> None: + if not request_parameters: + raise BadRequestException("Missing required parameters") + # https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-aws-services-reference.html#Kinesis-PutRecord + stream_name = request_parameters.get("StreamName") + partition_key = request_parameters.get("PartitionKey") + data = request_parameters.get("Data") + + if not stream_name: + raise BadRequestException("StreamName") + + if not partition_key: + raise BadRequestException("PartitionKey") + + if not data: + raise BadRequestException("Data") + + def _create_request_parameters( + self, invocation_context: ApiInvocationContext + ) -> Dict[str, Any]: + request_parameters = invocation_context.integration.get("requestParameters", {}) + self._validate_required_params(request_parameters) + + variables = { + "request": { + "header": invocation_context.headers, + "querystring": invocation_context.query_params(), + "body": invocation_context.data_as_string(), + "context": invocation_context.context or {}, + "stage_variables": invocation_context.stage_variables or {}, + } + } + + if invocation_context.headers.get("Content-Type") == "application/json": + variables["request"]["body"] = json.loads(invocation_context.data_as_string()) + else: + # AWS parity no content type still yields a valid response from Kinesis + variables["request"]["body"] = try_json(invocation_context.data_as_string()) + + # Required parameters + payload = { + "StreamName": VtlTemplate().render_vtl(request_parameters.get("StreamName"), variables), + "Data": VtlTemplate().render_vtl(request_parameters.get("Data"), variables), + "PartitionKey": VtlTemplate().render_vtl( + request_parameters.get("PartitionKey"), variables + ), + } + # Optional Parameters + if "ExplicitHashKey" in request_parameters: + payload["ExplicitHashKey"] = VtlTemplate().render_vtl( + request_parameters.get("ExplicitHashKey"), variables + ) + if "SequenceNumberForOrdering" in request_parameters: + payload["SequenceNumberForOrdering"] = VtlTemplate().render_vtl( + request_parameters.get("SequenceNumberForOrdering"), variables + ) + # TODO: XXX we don't support the Region parameter + # if "Region" in request_parameters: + # payload["Region"] = VtlTemplate().render_vtl( + # request_parameters.get("Region"), variables + # ) + return json.dumps(payload) + + +class DynamoDBIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext): + # TODO we might want to do it plain http instead of using boto here, like kinesis + integration = invocation_context.integration + uri = integration.get("uri") or integration.get("integrationUri") or "" + + # example: arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection + action = uri.split(":dynamodb:action/")[1].split("&")[0] + + # render request template + payload = self.request_templates.render(invocation_context) + payload = json.loads(payload) + + # determine target method via reflection + clients = get_service_factory( + region_name=invocation_context.region_name, + role_arn=invocation_context.integration.get("credentials"), + ) + dynamo_client = clients.dynamodb.request_metadata( + service_principal=ServicePrincipal.apigateway, + source_arn=get_source_arn(invocation_context), + ) + method_name = camel_to_snake_case(action) + client_method = getattr(dynamo_client, method_name, None) + if not client_method: + raise Exception(f"Unsupported action {action} in API Gateway integration URI {uri}") + + # run request against DynamoDB backend + try: + response = client_method(**payload) + except ClientError as e: + response = e.response + # The request body is packed into the "Error" field. To make the response match AWS, we will remove that + # field and merge with the response dict + error = response.pop("Error", {}) + error.pop("Code", None) # the Code is also something not relayed + response |= error + + status_code = response.get("ResponseMetadata", {}).get("HTTPStatusCode", 200) + # apply response templates + response_content = json.dumps(remove_attributes(response, ["ResponseMetadata"])) + response_obj = requests_response(content=response_content) + response = self.response_templates.render(invocation_context, response=response_obj) + + # construct final response + # TODO: set response header based on response templates + headers = {HEADER_CONTENT_TYPE: APPLICATION_JSON} + response = requests_response(response, headers=headers, status_code=status_code) + + return response + + +class S3Integration(BackendIntegration): + # target ARN patterns + TARGET_REGEX_PATH_S3_URI = rf"{ARN_PARTITION_REGEX}:apigateway:[a-zA-Z0-9\-]+:s3:path/(?P[^/]+)/(?P.+)$" + TARGET_REGEX_ACTION_S3_URI = rf"{ARN_PARTITION_REGEX}:apigateway:[a-zA-Z0-9\-]+:s3:action/(?:GetObject&Bucket\=(?P[^&]+)&Key\=(?P.+))$" + + def invoke(self, invocation_context: ApiInvocationContext): + invocation_path = invocation_context.path_with_query_string + integration = invocation_context.integration + path_params = invocation_context.path_params + relative_path, query_string_params = extract_query_string_params(path=invocation_path) + uri = integration.get("uri") or integration.get("integrationUri") or "" + + s3 = connect_to().s3 + uri = apply_request_parameters( + uri, + integration=integration, + path_params=path_params, + query_params=query_string_params, + ) + uri_match = re.match(self.TARGET_REGEX_PATH_S3_URI, uri) or re.match( + self.TARGET_REGEX_ACTION_S3_URI, uri + ) + if not uri_match: + msg = "Request URI does not match s3 specifications" + LOG.warning(msg) + return make_error_response(msg, 400) + + bucket, object_key = uri_match.group("bucket", "object") + LOG.debug("Getting request for bucket %s object %s", bucket, object_key) + + action = None + invoke_args = {"Bucket": bucket, "Key": object_key} + match invocation_context.method: + case HTTPMethod.GET: + action = s3.get_object + case HTTPMethod.PUT: + invoke_args["Body"] = invocation_context.data + action = s3.put_object + case HTTPMethod.DELETE: + action = s3.delete_object + case _: + make_error_response( + "The specified method is not allowed against this resource.", 405 + ) + + try: + object = action(**invoke_args) + except s3.exceptions.NoSuchKey: + msg = f"Object {object_key} not found" + LOG.debug(msg) + return make_error_response(msg, 404) + + headers = mock_aws_request_headers( + service="s3", + aws_access_key_id=invocation_context.account_id, + region_name=invocation_context.region_name, + ) + + if object.get("ContentType"): + headers["Content-Type"] = object["ContentType"] + + # stream used so large files do not fill memory + if body := object.get("Body"): + response = request_response_stream(stream=body, headers=headers) + else: + response = requests_response(content="", headers=headers) + return response + + +class HTTPIntegration(BackendIntegration): + @staticmethod + def _set_http_apigw_headers(headers: Dict[str, Any], invocation_context: ApiInvocationContext): + del headers["host"] + headers["x-amzn-apigateway-api-id"] = invocation_context.api_id + return headers + + def invoke(self, invocation_context: ApiInvocationContext): + invocation_path = invocation_context.path_with_query_string + integration = invocation_context.integration + path_params = invocation_context.path_params + method = invocation_context.method + headers = invocation_context.headers + + relative_path, query_string_params = extract_query_string_params(path=invocation_path) + uri = integration.get("uri") or integration.get("integrationUri") or "" + + # resolve integration parameters + integration_parameters = self.request_params_resolver.resolve(context=invocation_context) + headers.update(integration_parameters.get("headers", {})) + self._set_http_apigw_headers(headers, invocation_context) + + if ":servicediscovery:" in uri: + # check if this is a servicediscovery integration URI + client = connect_to().servicediscovery + service_id = uri.split("/")[-1] + instances = client.list_instances(ServiceId=service_id)["Instances"] + instance = (instances or [None])[0] + if instance and instance.get("Id"): + uri = "http://%s/%s" % (instance["Id"], invocation_path.lstrip("/")) + + # apply custom request template + invocation_context.context = get_event_request_context(invocation_context) + invocation_context.stage_variables = get_stage_variables(invocation_context) + payload = self.request_templates.render(invocation_context) + + if isinstance(payload, dict): + payload = json.dumps(payload) + + # https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html + # HTTP integration URIs + # + # A stage variable can be used as part of an HTTP integration URL, as shown in the following examples: + # + # A full URI without protocol – http://${stageVariables.} + # A full domain – http://${stageVariables.}/resource/operation + # A subdomain – http://${stageVariables.}.example.com/resource/operation + # A path – http://example.com/${stageVariables.}/bar + # A query string – http://example.com/foo?q=${stageVariables.} + render_vars = {"stageVariables": invocation_context.stage_variables} + rendered_uri = VtlTemplate().render_vtl(uri, render_vars) + + uri = apply_request_parameters( + rendered_uri, + integration=integration, + path_params=path_params, + query_params=query_string_params, + ) + result = requests.request(method=method, url=uri, data=payload, headers=headers) + if not result.ok: + LOG.debug( + "Upstream response from <%s> %s returned with status code: %s", + method, + uri, + result.status_code, + ) + # apply custom response template for non-proxy integration + invocation_context.response = result + if integration["type"] != "HTTP_PROXY": + self.response_templates.render(invocation_context) + return invocation_context.response + + +class SQSIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext): + integration = invocation_context.integration + uri = integration.get("uri") or integration.get("integrationUri") or "" + account_id, queue = uri.split("/")[-2:] + region_name = uri.split(":")[3] + + headers = get_internal_mocked_headers( + service_name="sqs", + region_name=region_name, + role_arn=invocation_context.integration.get("credentials"), + source_arn=get_source_arn(invocation_context), + ) + + # integration parameters can override headers + integration_parameters = self.request_params_resolver.resolve(context=invocation_context) + headers.update(integration_parameters.get("headers", {})) + if "Accept" not in headers: + headers["Accept"] = "application/json" + + if invocation_context.is_websocket_request(): + template_key = self.render_template_selection_expression(invocation_context) + payload = self.request_templates.render(invocation_context, template_key) + else: + payload = self.request_templates.render(invocation_context) + + # not sure what the purpose of this is, but it's in the original code + # TODO: check if this is still needed + if "GetQueueUrl" in payload or "CreateQueue" in payload: + new_request = f"{payload}&QueueName={queue}" + else: + queue_url = f"{config.internal_service_url()}/queue/{region_name}/{account_id}/{queue}" + new_request = f"{payload}&QueueUrl={queue_url}" + + url = urljoin(config.internal_service_url(), f"/queue/{region_name}/{account_id}/{queue}") + response = common.make_http_request(url, method="POST", headers=headers, data=new_request) + + # apply response template + invocation_context.response = response + response._content = self.response_templates.render(invocation_context) + return response + + +class SNSIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext) -> Response: + # TODO: check if the logic below is accurate - cover with snapshot tests! + invocation_context.context = get_event_request_context(invocation_context) + invocation_context.stage_variables = get_stage_variables(invocation_context) + integration = invocation_context.integration + uri = integration.get("uri") or integration.get("integrationUri") or "" + + try: + if invocation_context.is_websocket_request(): + template_key = self.render_template_selection_expression(invocation_context) + payload = self.request_templates.render(invocation_context, template_key) + else: + payload = self.request_templates.render(invocation_context) + except Exception as e: + LOG.warning("Failed to apply template for SNS integration", e) + raise + region_name = uri.split(":")[3] + headers = mock_aws_request_headers( + service="sns", aws_access_key_id=invocation_context.account_id, region_name=region_name + ) + response = make_http_request( + config.internal_service_url(), method="POST", headers=headers, data=payload + ) + + invocation_context.response = response + response._content = self.response_templates.render(invocation_context) + return self.apply_response_parameters(invocation_context, response) + + +class StepFunctionIntegration(BackendIntegration): + @classmethod + def _validate_required_params(cls, request_parameters: Dict[str, Any]) -> None: + if not request_parameters: + raise BadRequestException("Missing required parameters") + # stateMachineArn and input are required + state_machine_arn_param = request_parameters.get("StateMachineArn") + input_param = request_parameters.get("Input") + + if not state_machine_arn_param: + raise BadRequestException("StateMachineArn") + + if not input_param: + raise BadRequestException("Input") + + def invoke(self, invocation_context: ApiInvocationContext): + uri = ( + invocation_context.integration.get("uri") + or invocation_context.integration.get("integrationUri") + or "" + ) + action = uri.split("/")[-1] + + if invocation_context.integration.get("IntegrationType") == "AWS_PROXY": + payload = self._create_request_parameters(invocation_context) + elif APPLICATION_JSON in invocation_context.integration.get("requestTemplates", {}): + payload = self.request_templates.render(invocation_context) + payload = json.loads(payload) + else: + payload = json.loads(invocation_context.data) + + client = get_service_factory( + region_name=invocation_context.region_name, + role_arn=invocation_context.integration.get("credentials"), + ).stepfunctions + + if isinstance(payload.get("input"), dict): + payload["input"] = json.dumps(payload["input"]) + + # Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution' + method_name = ( + camel_to_snake_case(action) if action != "StartSyncExecution" else "start_execution" + ) + + try: + # call method on step function client + method = getattr(client, method_name) + except AttributeError: + msg = f"Invalid step function action: {method_name}" + LOG.error(msg) + return StepFunctionIntegration._create_response( + HTTPStatus.BAD_REQUEST.value, + headers={"Content-Type": APPLICATION_JSON}, + data=json.dumps({"message": msg}), + ) + + result = method(**payload) + result = json_safe(remove_attributes(result, ["ResponseMetadata"])) + response = StepFunctionIntegration._create_response( + HTTPStatus.OK.value, + mock_aws_request_headers( + "stepfunctions", + aws_access_key_id=invocation_context.account_id, + region_name=invocation_context.region_name, + ), + data=json.dumps(result), + ) + if action == "StartSyncExecution": + # poll for the execution result and return it + result = await_sfn_execution_result(result["executionArn"]) + result_status = result.get("status") + if result_status != "SUCCEEDED": + return StepFunctionIntegration._create_response( + HTTPStatus.INTERNAL_SERVER_ERROR.value, + headers={"Content-Type": APPLICATION_JSON}, + data=json.dumps( + { + "message": "StepFunctions execution %s failed with status '%s'" + % (result["executionArn"], result_status) + } + ), + ) + + result = json_safe(result) + response = requests_response(content=result) + + # apply response templates + invocation_context.response = response + response._content = self.response_templates.render(invocation_context) + return response + + def _create_request_parameters(self, invocation_context): + request_parameters = invocation_context.integration.get("requestParameters", {}) + self._validate_required_params(request_parameters) + + variables = { + "request": { + "header": invocation_context.headers, + "querystring": invocation_context.query_params(), + "body": invocation_context.data_as_string(), + "context": invocation_context.context or {}, + "stage_variables": invocation_context.stage_variables or {}, + } + } + rendered_input = VtlTemplate().render_vtl(request_parameters.get("Input"), variables) + return { + "stateMachineArn": request_parameters.get("StateMachineArn"), + "input": rendered_input, + } + + +class MockIntegration(BackendIntegration): + @classmethod + def check_passthrough_behavior(cls, passthrough_behavior: str, request_template: str): + return MappingTemplates(passthrough_behavior).check_passthrough_behavior(request_template) + + def invoke(self, invocation_context: ApiInvocationContext) -> Response: + passthrough_behavior = invocation_context.integration.get("passthroughBehavior") or "" + request_template = invocation_context.integration.get("requestTemplates", {}).get( + invocation_context.headers.get(HEADER_CONTENT_TYPE, APPLICATION_JSON) + ) + + # based on the configured passthrough behavior and the existence of template or not, + # we proceed calling the integration or raise an exception. + try: + self.check_passthrough_behavior(passthrough_behavior, request_template) + except MappingTemplates.UnsupportedMediaType: + return MockIntegration._create_response( + HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value, + headers={"Content-Type": APPLICATION_JSON}, + data=json.dumps({"message": f"{HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase}"}), + ) + + # request template rendering + request_payload = self.request_templates.render(invocation_context) + + # mapping is done based on "statusCode" field, we default to 200 + status_code = 200 + if invocation_context.headers.get(HEADER_CONTENT_TYPE) == APPLICATION_JSON: + try: + mock_response = json.loads(request_payload) + status_code = mock_response.get("statusCode", status_code) + except Exception as e: + LOG.warning("failed to deserialize request payload after transformation: %s", e) + http_status = HTTPStatus(500) + return MockIntegration._create_response( + http_status.value, + headers={"Content-Type": APPLICATION_JSON}, + data=json.dumps({"message": f"{http_status.phrase}"}), + ) + + # response template + response = MockIntegration._create_response( + status_code, invocation_context.headers, data=request_payload + ) + response._content = self.response_templates.render(invocation_context, response=response) + # apply response parameters + response = self.apply_response_parameters(invocation_context, response) + if not invocation_context.headers.get(HEADER_CONTENT_TYPE): + invocation_context.headers.update({HEADER_CONTENT_TYPE: APPLICATION_JSON}) + return response + + +# TODO: remove once we migrate all usages to `apply_request_parameters` on BackendIntegration +def apply_request_parameters( + uri: str, integration: Dict[str, Any], path_params: Dict[str, str], query_params: Dict[str, str] +): + request_parameters = integration.get("requestParameters") + uri = uri or integration.get("uri") or integration.get("integrationUri") or "" + if request_parameters: + for key in path_params: + # check if path_params is present in the integration request parameters + request_param_key = f"integration.request.path.{key}" + request_param_value = f"method.request.path.{key}" + if request_parameters.get(request_param_key) == request_param_value: + uri = uri.replace(f"{{{key}}}", path_params[key]) + + if integration.get("type") != "HTTP_PROXY" and request_parameters: + for key in query_params.copy(): + request_query_key = f"integration.request.querystring.{key}" + request_param_val = f"method.request.querystring.{key}" + if request_parameters.get(request_query_key, None) != request_param_val: + query_params.pop(key) + + return add_query_params_to_url(uri, query_params) + + +class EventBridgeIntegration(BackendIntegration): + def invoke(self, invocation_context: ApiInvocationContext): + invocation_context.context = get_event_request_context(invocation_context) + try: + payload = self.request_templates.render(invocation_context) + except Exception as e: + LOG.warning("Failed to apply template for EventBridge integration: %s", e) + raise + uri = ( + invocation_context.integration.get("uri") + or invocation_context.integration.get("integrationUri") + or "" + ) + region_name = uri.split(":")[3] + headers = get_internal_mocked_headers( + service_name="events", + region_name=region_name, + role_arn=invocation_context.integration.get("credentials"), + source_arn=get_source_arn(invocation_context), + ) + headers.update({"X-Amz-Target": invocation_context.headers.get("X-Amz-Target")}) + response = make_http_request( + config.internal_service_url(), method="POST", headers=headers, data=payload + ) + + invocation_context.response = response + + self.response_templates.render(invocation_context) + invocation_context.response.headers["Content-Length"] = str(len(response.content or "")) + return invocation_context.response diff --git a/localstack-core/localstack/services/apigateway/legacy/invocations.py b/localstack-core/localstack/services/apigateway/legacy/invocations.py new file mode 100644 index 0000000000000..18085fc52e22e --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/invocations.py @@ -0,0 +1,400 @@ +import json +import logging +import re + +from jsonschema import ValidationError, validate +from requests.models import Response +from werkzeug.exceptions import NotFound + +from localstack.aws.connect import connect_to +from localstack.constants import APPLICATION_JSON +from localstack.services.apigateway.helpers import ( + EMPTY_MODEL, + ModelResolver, + get_apigateway_store_for_invocation, +) +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.services.apigateway.legacy.helpers import ( + get_cors_response, + get_event_request_context, + get_target_resource_details, + make_error_response, + set_api_id_stage_invocation_path, +) +from localstack.services.apigateway.legacy.integration import ( + ApiGatewayIntegrationError, + DynamoDBIntegration, + EventBridgeIntegration, + HTTPIntegration, + KinesisIntegration, + LambdaIntegration, + LambdaProxyIntegration, + MockIntegration, + S3Integration, + SNSIntegration, + SQSIntegration, + StepFunctionIntegration, +) +from localstack.services.apigateway.models import ApiGatewayStore +from localstack.utils.aws.arns import ARN_PARTITION_REGEX +from localstack.utils.aws.aws_responses import requests_response + +LOG = logging.getLogger(__name__) + + +class AuthorizationError(Exception): + message: str + status_code: int + + def __init__(self, message: str, status_code: int): + super().__init__(message) + self.message = message + self.status_code = status_code + + def to_response(self): + return requests_response({"message": self.message}, status_code=self.status_code) + + +# we separate those 2 exceptions to allow better GatewayResponse support later on +class BadRequestParameters(Exception): + message: str + + def __init__(self, message: str): + super().__init__(message) + self.message = message + + def to_response(self): + return requests_response({"message": self.message}, status_code=400) + + +class BadRequestBody(Exception): + message: str + + def __init__(self, message: str): + super().__init__(message) + self.message = message + + def to_response(self): + return requests_response({"message": self.message}, status_code=400) + + +class RequestValidator: + __slots__ = ["context", "rest_api_container"] + + def __init__(self, context: ApiInvocationContext, store: ApiGatewayStore = None): + self.context = context + store = store or get_apigateway_store_for_invocation(context=context) + if not (container := store.rest_apis.get(context.api_id)): + # TODO: find the right exception + raise NotFound() + self.rest_api_container = container + + def validate_request(self) -> None: + """ + :raises BadRequestParameters if the request has required parameters which are not present + :raises BadRequestBody if the request has required body validation with a model and it does not respect it + :return: None + """ + # make all the positive checks first + if self.context.resource is None or "resourceMethods" not in self.context.resource: + return + + resource_methods = self.context.resource["resourceMethods"] + if self.context.method not in resource_methods and "ANY" not in resource_methods: + return + + # check if there is validator for the resource + resource = resource_methods.get(self.context.method, resource_methods.get("ANY", {})) + if not (resource.get("requestValidatorId") or "").strip(): + return + + # check if there is a validator for this request + validator = self.rest_api_container.validators.get(resource["requestValidatorId"]) + if not validator: + return + + if self.should_validate_request(validator) and ( + missing_parameters := self._get_missing_required_parameters(resource) + ): + message = f"Missing required request parameters: [{', '.join(missing_parameters)}]" + raise BadRequestParameters(message=message) + + if self.should_validate_body(validator) and not self._is_body_valid(resource): + raise BadRequestBody(message="Invalid request body") + + return + + def _is_body_valid(self, resource) -> bool: + # if there's no model to validate the body, use the Empty model + # https://docs.aws.amazon.com/cdk/api/v1/docs/@aws-cdk_aws-apigateway.EmptyModel.html + if not (request_models := resource.get("requestModels")): + model_name = EMPTY_MODEL + else: + model_name = request_models.get( + APPLICATION_JSON, request_models.get("$default", EMPTY_MODEL) + ) + + model_resolver = ModelResolver( + rest_api_container=self.rest_api_container, + model_name=model_name, + ) + + # try to get the resolved model first + resolved_schema = model_resolver.get_resolved_model() + if not resolved_schema: + LOG.exception( + "An exception occurred while trying to validate the request: could not find the model" + ) + return False + + try: + # if the body is empty, replace it with an empty JSON body + validate( + instance=json.loads(self.context.data or "{}"), + schema=resolved_schema, + ) + return True + except ValidationError as e: + LOG.warning("failed to validate request body %s", e) + return False + except json.JSONDecodeError as e: + LOG.warning("failed to validate request body, request data is not valid JSON %s", e) + return False + + def _get_missing_required_parameters(self, resource) -> list[str]: + missing_params = [] + if not (request_parameters := resource.get("requestParameters")): + return missing_params + + for request_parameter, required in sorted(request_parameters.items()): + if not required: + continue + + param_type, param_value = request_parameter.removeprefix("method.request.").split(".") + match param_type: + case "header": + is_missing = param_value not in self.context.headers + case "path": + is_missing = param_value not in self.context.resource_path + case "querystring": + is_missing = param_value not in self.context.query_params() + case _: + # TODO: method.request.body is not specified in the documentation, and requestModels should do it + # verify this + is_missing = False + + if is_missing: + missing_params.append(param_value) + + return missing_params + + @staticmethod + def should_validate_body(validator): + return validator["validateRequestBody"] + + @staticmethod + def should_validate_request(validator): + return validator.get("validateRequestParameters") + + +# ------------ +# API METHODS +# ------------ + + +def validate_api_key(api_key: str, invocation_context: ApiInvocationContext): + usage_plan_ids = [] + client = connect_to( + aws_access_key_id=invocation_context.account_id, region_name=invocation_context.region_name + ).apigateway + + usage_plans = client.get_usage_plans() + for item in usage_plans.get("items", []): + api_stages = item.get("apiStages", []) + usage_plan_ids.extend( + item.get("id") + for api_stage in api_stages + if ( + api_stage.get("stage") == invocation_context.stage + and api_stage.get("apiId") == invocation_context.api_id + ) + ) + for usage_plan_id in usage_plan_ids: + usage_plan_keys = client.get_usage_plan_keys(usagePlanId=usage_plan_id) + for key in usage_plan_keys.get("items", []): + if key.get("value") == api_key: + # check if the key is enabled + api_key = client.get_api_key(apiKey=key.get("id")) + return api_key.get("enabled") in ("true", True) + + return False + + +def is_api_key_valid(invocation_context: ApiInvocationContext) -> bool: + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-key-source.html + client = connect_to( + aws_access_key_id=invocation_context.account_id, region_name=invocation_context.region_name + ).apigateway + rest_api = client.get_rest_api(restApiId=invocation_context.api_id) + + # The source of the API key for metering requests according to a usage plan. + # Valid values are: + # - HEADER to read the API key from the X-API-Key header of a request. + # - AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer. + + api_key_source = rest_api.get("apiKeySource") + match api_key_source: + case "HEADER": + api_key = invocation_context.headers.get("X-API-Key") + return validate_api_key(api_key, invocation_context) if api_key else False + case "AUTHORIZER": + api_key = invocation_context.auth_identity.get("apiKey") + return validate_api_key(api_key, invocation_context) if api_key else False + + +def update_content_length(response: Response): + if response and response.content is not None: + response.headers["Content-Length"] = str(len(response.content)) + + +def invoke_rest_api_from_request(invocation_context: ApiInvocationContext): + set_api_id_stage_invocation_path(invocation_context) + try: + return invoke_rest_api(invocation_context) + except AuthorizationError as e: + LOG.warning( + "Authorization error while invoking API Gateway ID %s: %s", + invocation_context.api_id, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + return e.to_response() + + +def invoke_rest_api(invocation_context: ApiInvocationContext): + invocation_path = invocation_context.path_with_query_string + raw_path = invocation_context.path or invocation_path + method = invocation_context.method + headers = invocation_context.headers + + extracted_path, resource = get_target_resource_details(invocation_context) + if not resource: + return make_error_response("Unable to find path %s" % invocation_context.path, 404) + + # validate request + validator = RequestValidator(invocation_context) + try: + validator.validate_request() + except (BadRequestParameters, BadRequestBody) as e: + return e.to_response() + + api_key_required = resource.get("resourceMethods", {}).get(method, {}).get("apiKeyRequired") + if api_key_required and not is_api_key_valid(invocation_context): + raise AuthorizationError("Forbidden", 403) + + resource_methods = resource.get("resourceMethods", {}) + resource_method = resource_methods.get(method, {}) + if not resource_method: + # HttpMethod: '*' + # ResourcePath: '/*' - produces 'X-AMAZON-APIGATEWAY-ANY-METHOD' + resource_method = resource_methods.get("ANY", {}) or resource_methods.get( + "X-AMAZON-APIGATEWAY-ANY-METHOD", {} + ) + method_integration = resource_method.get("methodIntegration") + if not method_integration: + if method == "OPTIONS" and "Origin" in headers: + # default to returning CORS headers if this is an OPTIONS request + return get_cors_response(headers) + return make_error_response( + "Unable to find integration for: %s %s (%s)" % (method, invocation_path, raw_path), + 404, + ) + + # update fields in invocation context, then forward request to next handler + invocation_context.resource_path = extracted_path + invocation_context.integration = method_integration + + return invoke_rest_api_integration(invocation_context) + + +def invoke_rest_api_integration(invocation_context: ApiInvocationContext): + try: + response = invoke_rest_api_integration_backend(invocation_context) + # TODO remove this setter once all the integrations are migrated to the new response + # handling + invocation_context.response = response + return response + except ApiGatewayIntegrationError as e: + LOG.warning( + "Error while invoking integration for ApiGateway ID %s: %s", + invocation_context.api_id, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + return e.to_response() + except Exception as e: + msg = f"Error invoking integration for API Gateway ID '{invocation_context.api_id}': {e}" + LOG.exception(msg) + return make_error_response(msg, 400) + + +# This function is patched downstream for backend integrations that are only available +# in Pro (potentially to be replaced with a runtime hook in the future). +def invoke_rest_api_integration_backend(invocation_context: ApiInvocationContext): + # define local aliases from invocation context + method = invocation_context.method + headers = invocation_context.headers + integration = invocation_context.integration + integration_type_orig = integration.get("type") or integration.get("integrationType") or "" + integration_type = integration_type_orig.upper() + integration_method = integration.get("httpMethod") + uri = integration.get("uri") or integration.get("integrationUri") or "" + + if (re.match(f"{ARN_PARTITION_REGEX}:apigateway:", uri) and ":lambda:path" in uri) or re.match( + f"{ARN_PARTITION_REGEX}:lambda", uri + ): + invocation_context.context = get_event_request_context(invocation_context) + if integration_type == "AWS_PROXY": + return LambdaProxyIntegration().invoke(invocation_context) + elif integration_type == "AWS": + return LambdaIntegration().invoke(invocation_context) + + elif integration_type == "AWS": + if "kinesis:action/" in uri: + return KinesisIntegration().invoke(invocation_context) + + if "states:action/" in uri: + return StepFunctionIntegration().invoke(invocation_context) + + if ":dynamodb:action" in uri: + return DynamoDBIntegration().invoke(invocation_context) + + if "s3:path/" in uri or "s3:action/" in uri: + return S3Integration().invoke(invocation_context) + + if integration_method == "POST" and ":sqs:path" in uri: + return SQSIntegration().invoke(invocation_context) + + if method == "POST" and ":sns:path" in uri: + return SNSIntegration().invoke(invocation_context) + + if ( + method == "POST" + and re.match(f"{ARN_PARTITION_REGEX}:apigateway:", uri) + and "events:action/PutEvents" in uri + ): + return EventBridgeIntegration().invoke(invocation_context) + + elif integration_type in ["HTTP_PROXY", "HTTP"]: + return HTTPIntegration().invoke(invocation_context) + + elif integration_type == "MOCK": + return MockIntegration().invoke(invocation_context) + + if method == "OPTIONS": + # fall back to returning CORS headers if this is an OPTIONS request + return get_cors_response(headers) + + raise Exception( + f'API Gateway integration type "{integration_type}", method "{method}", URI "{uri}" not yet implemented' + ) diff --git a/localstack-core/localstack/services/apigateway/legacy/provider.py b/localstack-core/localstack/services/apigateway/legacy/provider.py new file mode 100644 index 0000000000000..084108eaf2e0c --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/provider.py @@ -0,0 +1,3050 @@ +import copy +import io +import json +import logging +import re +from copy import deepcopy +from datetime import datetime +from typing import IO, Any + +from moto.apigateway import models as apigw_models +from moto.apigateway.models import Resource as MotoResource +from moto.apigateway.models import RestAPI as MotoRestAPI +from moto.core.utils import camelcase_to_underscores + +from localstack.aws.api import CommonServiceException, RequestContext, ServiceRequest, handler +from localstack.aws.api.apigateway import ( + Account, + ApigatewayApi, + ApiKey, + ApiKeys, + Authorizer, + Authorizers, + BadRequestException, + BasePathMapping, + BasePathMappings, + Blob, + Boolean, + ClientCertificate, + ClientCertificates, + ConflictException, + ConnectionType, + CreateAuthorizerRequest, + CreateRestApiRequest, + CreateStageRequest, + Deployment, + DocumentationPart, + DocumentationPartIds, + DocumentationPartLocation, + DocumentationParts, + DocumentationVersion, + DocumentationVersions, + DomainName, + DomainNames, + DomainNameStatus, + EndpointConfiguration, + ExportResponse, + GatewayResponse, + GatewayResponses, + GatewayResponseType, + GetDocumentationPartsRequest, + Integration, + IntegrationResponse, + IntegrationType, + ListOfApiStage, + ListOfPatchOperation, + ListOfStageKeys, + ListOfString, + MapOfStringToBoolean, + MapOfStringToString, + Method, + MethodResponse, + Model, + Models, + MutualTlsAuthenticationInput, + NotFoundException, + NullableBoolean, + NullableInteger, + PutIntegrationRequest, + PutIntegrationResponseRequest, + PutMode, + PutRestApiRequest, + QuotaSettings, + RequestValidator, + RequestValidators, + Resource, + ResourceOwner, + RestApi, + RestApis, + SecurityPolicy, + Stage, + Stages, + StatusCode, + String, + Tags, + TestInvokeMethodRequest, + TestInvokeMethodResponse, + ThrottleSettings, + UsagePlan, + UsagePlanKeys, + UsagePlans, + VpcLink, + VpcLinks, +) +from localstack.aws.connect import connect_to +from localstack.aws.forwarder import NotImplementedAvoidFallbackError, create_aws_request_context +from localstack.constants import APPLICATION_JSON +from localstack.services.apigateway.exporter import OpenApiExporter +from localstack.services.apigateway.helpers import ( + EMPTY_MODEL, + ERROR_MODEL, + INVOKE_TEST_LOG_TEMPLATE, + OpenAPIExt, + apply_json_patch_safe, + get_apigateway_store, + get_moto_backend, + get_moto_rest_api, + get_regional_domain_name, + get_rest_api_container, + import_api_from_openapi_spec, + is_greedy_path, + is_variable_path, + resolve_references, +) +from localstack.services.apigateway.legacy.helpers import multi_value_dict_for_list +from localstack.services.apigateway.legacy.invocations import invoke_rest_api_from_request +from localstack.services.apigateway.legacy.router_asf import ApigatewayRouter, to_invocation_context +from localstack.services.apigateway.models import ApiGatewayStore, RestApiContainer +from localstack.services.apigateway.next_gen.execute_api.router import ( + ApiGatewayRouter as ApiGatewayRouterNextGen, +) +from localstack.services.apigateway.patches import apply_patches +from localstack.services.edge import ROUTER +from localstack.services.moto import call_moto, call_moto_with_request +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.aws.arns import InvalidArnException, get_partition, parse_arn +from localstack.utils.collections import ( + DelSafeDict, + PaginatedList, + ensure_list, + select_from_typed_dict, +) +from localstack.utils.json import parse_json_or_yaml +from localstack.utils.strings import md5, short_uid, str_to_bool, to_bytes, to_str +from localstack.utils.time import TIMESTAMP_FORMAT_TZ, now_utc, timestamp + +LOG = logging.getLogger(__name__) + +# list of valid paths for Stage update patch operations (extracted from AWS responses via snapshot tests) +STAGE_UPDATE_PATHS = [ + "/deploymentId", + "/description", + "/cacheClusterEnabled", + "/cacheClusterSize", + "/clientCertificateId", + "/accessLogSettings", + "/accessLogSettings/destinationArn", + "/accessLogSettings/format", + "/{resourcePath}/{httpMethod}/metrics/enabled", + "/{resourcePath}/{httpMethod}/logging/dataTrace", + "/{resourcePath}/{httpMethod}/logging/loglevel", + "/{resourcePath}/{httpMethod}/throttling/burstLimit", + "/{resourcePath}/{httpMethod}/throttling/rateLimit", + "/{resourcePath}/{httpMethod}/caching/ttlInSeconds", + "/{resourcePath}/{httpMethod}/caching/enabled", + "/{resourcePath}/{httpMethod}/caching/dataEncrypted", + "/{resourcePath}/{httpMethod}/caching/requireAuthorizationForCacheControl", + "/{resourcePath}/{httpMethod}/caching/unauthorizedCacheControlHeaderStrategy", + "/*/*/metrics/enabled", + "/*/*/logging/dataTrace", + "/*/*/logging/loglevel", + "/*/*/throttling/burstLimit", + "/*/*/throttling/rateLimit", + "/*/*/caching/ttlInSeconds", + "/*/*/caching/enabled", + "/*/*/caching/dataEncrypted", + "/*/*/caching/requireAuthorizationForCacheControl", + "/*/*/caching/unauthorizedCacheControlHeaderStrategy", + "/variables/{variable_name}", + "/tracingEnabled", +] + +VALID_INTEGRATION_TYPES = { + IntegrationType.AWS, + IntegrationType.AWS_PROXY, + IntegrationType.HTTP, + IntegrationType.HTTP_PROXY, + IntegrationType.MOCK, +} + + +class ApigatewayProvider(ApigatewayApi, ServiceLifecycleHook): + router: ApigatewayRouter | ApiGatewayRouterNextGen + + def __init__(self, router: ApigatewayRouter | ApiGatewayRouterNextGen = None): + self.router = router or ApigatewayRouter(ROUTER) + + def on_after_init(self): + apply_patches() + self.router.register_routes() + + @handler("TestInvokeMethod", expand=False) + def test_invoke_method( + self, context: RequestContext, request: TestInvokeMethodRequest + ) -> TestInvokeMethodResponse: + invocation_context = to_invocation_context(context.request) + invocation_context.method = request.get("httpMethod") + invocation_context.api_id = request.get("restApiId") + invocation_context.path_with_query_string = request.get("pathWithQueryString") + invocation_context.region_name = context.region + invocation_context.account_id = context.account_id + + moto_rest_api = get_moto_rest_api(context=context, rest_api_id=invocation_context.api_id) + resource = moto_rest_api.resources.get(request["resourceId"]) + if not resource: + raise NotFoundException("Invalid Resource identifier specified") + + invocation_context.resource = {"id": resource.id} + invocation_context.resource_path = resource.path_part + + if data := parse_json_or_yaml(to_str(invocation_context.data or b"")): + invocation_context.data = data.get("body") + invocation_context.headers = data.get("headers", {}) + + req_start_time = datetime.now() + result = invoke_rest_api_from_request(invocation_context) + req_end_time = datetime.now() + + # TODO: add the missing fields to the log. Next iteration will add helpers to extract the missing fields + # from the apicontext + formatted_date = req_start_time.strftime("%a %b %d %H:%M:%S %Z %Y") + log = INVOKE_TEST_LOG_TEMPLATE.format( + request_id=invocation_context.context["requestId"], + formatted_date=formatted_date, + http_method=invocation_context.method, + resource_path=invocation_context.invocation_path, + request_path="", + query_string="", + request_headers="", + request_body="", + response_body="", + response_headers=result.headers, + status_code=result.status_code, + ) + + return TestInvokeMethodResponse( + status=result.status_code, + headers=dict(result.headers), + body=to_str(result.content), + log=log, + latency=int((req_end_time - req_start_time).total_seconds()), + multiValueHeaders=multi_value_dict_for_list(result.headers), + ) + + @handler("CreateRestApi", expand=False) + def create_rest_api(self, context: RequestContext, request: CreateRestApiRequest) -> RestApi: + if request.get("description") == "": + raise BadRequestException("Description cannot be an empty string") + + minimum_compression_size = request.get("minimumCompressionSize") + if minimum_compression_size is not None and ( + minimum_compression_size < 0 or minimum_compression_size > 10485760 + ): + raise BadRequestException( + "Invalid minimum compression size, must be between 0 and 10485760" + ) + + result = call_moto(context) + rest_api = get_moto_rest_api(context, rest_api_id=result["id"]) + rest_api.version = request.get("version") + if binary_media_types := request.get("binaryMediaTypes"): + rest_api.binaryMediaTypes = binary_media_types + + response: RestApi = rest_api.to_dict() + remove_empty_attributes_from_rest_api(response) + store = get_apigateway_store(context=context) + rest_api_container = RestApiContainer(rest_api=response) + store.rest_apis[result["id"]] = rest_api_container + # add the 2 default models + rest_api_container.models[EMPTY_MODEL] = DEFAULT_EMPTY_MODEL + rest_api_container.models[ERROR_MODEL] = DEFAULT_ERROR_MODEL + + return response + + def create_api_key( + self, + context: RequestContext, + name: String = None, + description: String = None, + enabled: Boolean = None, + generate_distinct_id: Boolean = None, + value: String = None, + stage_keys: ListOfStageKeys = None, + customer_id: String = None, + tags: MapOfStringToString = None, + **kwargs, + ) -> ApiKey: + api_key = call_moto(context) + + # transform array of stage keys [{'restApiId': '0iscapk09u', 'stageName': 'dev'}] into + # array of strings ['0iscapk09u/dev'] + stage_keys = api_key.get("stageKeys", []) + api_key["stageKeys"] = [f"{sk['restApiId']}/{sk['stageName']}" for sk in stage_keys] + + return api_key + + def get_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> RestApi: + rest_api: RestApi = call_moto(context) + remove_empty_attributes_from_rest_api(rest_api) + return rest_api + + def update_rest_api( + self, + context: RequestContext, + rest_api_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> RestApi: + rest_api = get_moto_rest_api(context, rest_api_id) + + fixed_patch_ops = [] + binary_media_types_path = "/binaryMediaTypes" + # TODO: validate a bit more patch operations + for patch_op in patch_operations: + patch_op_path = patch_op.get("path", "") + # binaryMediaTypes has a specific way of being set + # see https://docs.aws.amazon.com/apigateway/latest/api/API_PatchOperation.html + # TODO: maybe implement a more generalized way if this happens anywhere else + if patch_op_path.startswith(binary_media_types_path): + if patch_op_path == binary_media_types_path: + raise BadRequestException(f"Invalid patch path {patch_op_path}") + value = patch_op_path.rsplit("/", maxsplit=1)[-1] + path_value = value.replace("~1", "/") + patch_op["path"] = binary_media_types_path + + if patch_op["op"] == "add": + patch_op["value"] = path_value + + elif patch_op["op"] == "remove": + remove_index = rest_api.binaryMediaTypes.index(path_value) + patch_op["path"] = f"{binary_media_types_path}/{remove_index}" + + elif patch_op["op"] == "replace": + # AWS is behaving weirdly, and will actually remove/add instead of replacing in place + # it will put the replaced value last in the array + replace_index = rest_api.binaryMediaTypes.index(path_value) + fixed_patch_ops.append( + {"op": "remove", "path": f"{binary_media_types_path}/{replace_index}"} + ) + patch_op["op"] = "add" + + elif patch_op_path == "/minimumCompressionSize": + if patch_op["op"] != "replace": + raise BadRequestException( + "Invalid patch operation specified. Must be one of: [replace]" + ) + + try: + # try to cast the value to integer if truthy, else reject + value = int(val) if (val := patch_op.get("value")) else None + except ValueError: + raise BadRequestException( + "Invalid minimum compression size, must be between 0 and 10485760" + ) + + if value is not None and (value < 0 or value > 10485760): + raise BadRequestException( + "Invalid minimum compression size, must be between 0 and 10485760" + ) + patch_op["value"] = value + + fixed_patch_ops.append(patch_op) + + patch_api_gateway_entity(rest_api, fixed_patch_ops) + + # fix data types after patches have been applied + endpoint_configs = rest_api.endpoint_configuration or {} + if isinstance(endpoint_configs.get("vpcEndpointIds"), str): + endpoint_configs["vpcEndpointIds"] = [endpoint_configs["vpcEndpointIds"]] + + # minimum_compression_size is a unique path as it's a nullable integer, + # it would throw an error if it stays an empty string + if rest_api.minimum_compression_size == "": + rest_api.minimum_compression_size = None + + response = rest_api.to_dict() + + remove_empty_attributes_from_rest_api(response, remove_tags=False) + store = get_apigateway_store(context=context) + store.rest_apis[rest_api_id].rest_api = response + return response + + @handler("PutRestApi", expand=False) + def put_rest_api(self, context: RequestContext, request: PutRestApiRequest) -> RestApi: + # TODO: take into account the mode: overwrite or merge + # the default is now `merge`, but we are removing everything + rest_api = get_moto_rest_api(context, request["restApiId"]) + rest_api, warnings = import_api_from_openapi_spec( + rest_api, context=context, request=request + ) + + rest_api.root_resource_id = get_moto_rest_api_root_resource(rest_api) + response = rest_api.to_dict() + remove_empty_attributes_from_rest_api(response) + store = get_apigateway_store(context=context) + store.rest_apis[request["restApiId"]].rest_api = response + # TODO: verify this + response = to_rest_api_response_json(response) + response.setdefault("tags", {}) + + # TODO Failing still keeps all applied mutations. We need to revert to the previous state instead + if warnings: + response["warnings"] = warnings + + return response + + @handler("CreateDomainName") + def create_domain_name( + self, + context: RequestContext, + domain_name: String, + certificate_name: String = None, + certificate_body: String = None, + certificate_private_key: String = None, + certificate_chain: String = None, + certificate_arn: String = None, + regional_certificate_name: String = None, + regional_certificate_arn: String = None, + endpoint_configuration: EndpointConfiguration = None, + tags: MapOfStringToString = None, + security_policy: SecurityPolicy = None, + mutual_tls_authentication: MutualTlsAuthenticationInput = None, + ownership_verification_certificate_arn: String = None, + policy: String = None, + **kwargs, + ) -> DomainName: + if not domain_name: + raise BadRequestException("No Domain Name specified") + + store: ApiGatewayStore = get_apigateway_store(context=context) + if store.domain_names.get(domain_name): + raise ConflictException(f"Domain name with ID {domain_name} already exists") + + # find matching hosted zone + zone_id = None + # TODO check if this call is IAM enforced + route53 = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).route53 + hosted_zones = route53.list_hosted_zones().get("HostedZones", []) + hosted_zones = [hz for hz in hosted_zones if domain_name.endswith(hz["Name"].strip("."))] + zone_id = hosted_zones[0]["Id"].replace("/hostedzone/", "") if hosted_zones else zone_id + + domain: DomainName = DomainName( + domainName=domain_name, + certificateName=certificate_name, + certificateArn=certificate_arn, + regionalDomainName=get_regional_domain_name(domain_name), + domainNameStatus=DomainNameStatus.AVAILABLE, + regionalHostedZoneId=zone_id, + regionalCertificateName=regional_certificate_name, + regionalCertificateArn=regional_certificate_arn, + securityPolicy=SecurityPolicy.TLS_1_2, + endpointConfiguration=endpoint_configuration, + ) + store.domain_names[domain_name] = domain + return domain + + @handler("GetDomainName") + def get_domain_name( + self, context: RequestContext, domain_name: String, domain_name_id: String = None, **kwargs + ) -> DomainName: + store: ApiGatewayStore = get_apigateway_store(context=context) + if domain := store.domain_names.get(domain_name): + return domain + raise NotFoundException("Invalid domain name identifier specified") + + @handler("GetDomainNames") + def get_domain_names( + self, + context: RequestContext, + position: String = None, + limit: NullableInteger = None, + resource_owner: ResourceOwner = None, + **kwargs, + ) -> DomainNames: + store = get_apigateway_store(context=context) + domain_names = store.domain_names.values() + return DomainNames(items=list(domain_names), position=position) + + @handler("DeleteDomainName") + def delete_domain_name( + self, context: RequestContext, domain_name: String, domain_name_id: String = None, **kwargs + ) -> None: + store: ApiGatewayStore = get_apigateway_store(context=context) + if not store.domain_names.pop(domain_name, None): + raise NotFoundException("Invalid domain name identifier specified") + + def delete_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> None: + try: + store = get_apigateway_store(context=context) + store.rest_apis.pop(rest_api_id, None) + call_moto(context) + except KeyError as e: + # moto raises a key error if we're trying to delete an API that doesn't exist + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) from e + + def get_rest_apis( + self, + context: RequestContext, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> RestApis: + response: RestApis = call_moto(context) + for rest_api in response["items"]: + remove_empty_attributes_from_rest_api(rest_api) + return response + + # resources + + def create_resource( + self, + context: RequestContext, + rest_api_id: String, + parent_id: String, + path_part: String, + **kwargs, + ) -> Resource: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + parent_moto_resource: MotoResource = moto_rest_api.resources.get(parent_id, None) + # validate here if the parent exists. Moto would first create then validate, which would lead to the resource + # being created anyway + if not parent_moto_resource: + raise NotFoundException("Invalid Resource identifier specified") + + parent_path = parent_moto_resource.path_part + if is_greedy_path(parent_path): + raise BadRequestException( + f"Cannot create a child of a resource with a greedy path variable: {parent_path}" + ) + + store = get_apigateway_store(context=context) + rest_api = store.rest_apis.get(rest_api_id) + children = rest_api.resource_children.setdefault(parent_id, []) + + if is_variable_path(path_part): + for sibling in children: + sibling_resource: MotoResource = moto_rest_api.resources.get(sibling, None) + if is_variable_path(sibling_resource.path_part): + raise BadRequestException( + f"A sibling ({sibling_resource.path_part}) of this resource already has a variable path part -- only one is allowed" + ) + + response: Resource = call_moto(context) + + # save children to allow easy deletion of all children if we delete a parent route + children.append(response["id"]) + + return response + + def delete_resource( + self, context: RequestContext, rest_api_id: String, resource_id: String, **kwargs + ) -> None: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + + moto_resource: MotoResource = moto_rest_api.resources.pop(resource_id, None) + if not moto_resource: + raise NotFoundException("Invalid Resource identifier specified") + + store = get_apigateway_store(context=context) + rest_api = store.rest_apis.get(rest_api_id) + api_resources = rest_api.resource_children + # we need to recursively delete all children resources of the resource we're deleting + + def _delete_children(resource_to_delete: str): + children = api_resources.get(resource_to_delete, []) + for child in children: + moto_rest_api.resources.pop(child) + _delete_children(child) + + api_resources.pop(resource_to_delete, None) + + _delete_children(resource_id) + + # remove the resource as a child from its parent + parent_id = moto_resource.parent_id + api_resources[parent_id].remove(resource_id) + + def update_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> IntegrationResponse: + # XXX: THIS IS NOT A COMPLETE IMPLEMENTATION, just the minimum required to get tests going + # TODO: validate patch operations + + moto_rest_api = get_moto_rest_api(context, rest_api_id) + moto_resource = moto_rest_api.resources.get(resource_id) + if not moto_resource: + raise NotFoundException("Invalid Resource identifier specified") + + moto_method = moto_resource.resource_methods.get(http_method) + if not moto_method: + raise NotFoundException("Invalid Method identifier specified") + + integration_response = moto_method.method_integration.integration_responses.get(status_code) + if not integration_response: + raise NotFoundException("Invalid Integration Response identifier specified") + + for patch_operation in patch_operations: + op = patch_operation.get("op") + path = patch_operation.get("path") + + # for path "/responseTemplates/application~1json" + if "/responseTemplates" in path: + integration_response.response_templates = ( + integration_response.response_templates or {} + ) + value = patch_operation.get("value") + if not isinstance(value, str): + raise BadRequestException( + f"Invalid patch value '{value}' specified for op '{op}'. Must be a string" + ) + param = path.removeprefix("/responseTemplates/") + param = param.replace("~1", "/") + if op == "remove": + integration_response.response_templates.pop(param) + elif op in ("add", "replace"): + integration_response.response_templates[param] = value + + elif "/contentHandling" in path and op == "replace": + integration_response.content_handling = patch_operation.get("value") + + def update_resource( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Resource: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + moto_resource = moto_rest_api.resources.get(resource_id) + if not moto_resource: + raise NotFoundException("Invalid Resource identifier specified") + + store = get_apigateway_store(context=context) + + rest_api = store.rest_apis.get(rest_api_id) + api_resources = rest_api.resource_children + + future_path_part = moto_resource.path_part + current_parent_id = moto_resource.parent_id + + for patch_operation in patch_operations: + op = patch_operation.get("op") + if (path := patch_operation.get("path")) not in ("/pathPart", "/parentId"): + raise BadRequestException( + f"Invalid patch path '{path}' specified for op '{op}'. Must be one of: [/parentId, /pathPart]" + ) + if op != "replace": + raise BadRequestException( + f"Invalid patch path '{path}' specified for op '{op}'. Please choose supported operations" + ) + + if path == "/parentId": + value = patch_operation.get("value") + future_parent_resource = moto_rest_api.resources.get(value) + if not future_parent_resource: + raise NotFoundException("Invalid Resource identifier specified") + + children_resources = api_resources.get(resource_id, []) + if value in children_resources: + raise BadRequestException("Resources cannot be cyclical.") + + new_sibling_resources = api_resources.get(value, []) + + else: # path == "/pathPart" + future_path_part = patch_operation.get("value") + new_sibling_resources = api_resources.get(moto_resource.parent_id, []) + + for sibling in new_sibling_resources: + sibling_resource = moto_rest_api.resources[sibling] + if sibling_resource.path_part == future_path_part: + raise ConflictException( + f"Another resource with the same parent already has this name: {future_path_part}" + ) + + # TODO: test with multiple patch operations which would not be compatible between each other + patch_api_gateway_entity(moto_resource, patch_operations) + + # after setting it, mutate the store + if moto_resource.parent_id != current_parent_id: + current_sibling_resources = api_resources.get(current_parent_id) + if current_sibling_resources: + current_sibling_resources.remove(resource_id) + # if the parent does not have children anymore, remove from the list + if not current_sibling_resources: + api_resources.pop(current_parent_id) + + # add it to the new parent children + future_sibling_resources = api_resources[moto_resource.parent_id] + future_sibling_resources.append(resource_id) + + response = moto_resource.to_dict() + return response + + # resource method + + def get_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> Method: + response: Method = call_moto(context) + remove_empty_attributes_from_method(response) + if method_integration := response.get("methodIntegration"): + remove_empty_attributes_from_integration(method_integration) + # moto will not return `responseParameters` field if it's not truthy, but AWS will return an empty dict + # if it was set to an empty dict + if "responseParameters" not in method_integration: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + moto_resource = moto_rest_api.resources[resource_id] + moto_method_integration = moto_resource.resource_methods[ + http_method + ].method_integration + if moto_method_integration.integration_responses: + for ( + status_code, + integration_response, + ) in moto_method_integration.integration_responses.items(): + if integration_response.response_parameters == {}: + method_integration["integrationResponses"][str(status_code)][ + "responseParameters" + ] = {} + + return response + + def put_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + authorization_type: String, + authorizer_id: String = None, + api_key_required: Boolean = None, + operation_name: String = None, + request_parameters: MapOfStringToBoolean = None, + request_models: MapOfStringToString = None, + request_validator_id: String = None, + authorization_scopes: ListOfString = None, + **kwargs, + ) -> Method: + # TODO: add missing validation? check order of validation as well + moto_backend = get_moto_backend(context.account_id, context.region) + moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id) + if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)): + raise NotFoundException("Invalid Resource identifier specified") + + if http_method not in ("GET", "PUT", "POST", "DELETE", "PATCH", "OPTIONS", "HEAD", "ANY"): + raise BadRequestException( + "Invalid HttpMethod specified. " + "Valid options are GET,PUT,POST,DELETE,PATCH,OPTIONS,HEAD,ANY" + ) + + if request_parameters: + request_parameters_names = { + name.rsplit(".", maxsplit=1)[-1] for name in request_parameters.keys() + } + if len(request_parameters_names) != len(request_parameters): + raise BadRequestException( + "Parameter names must be unique across querystring, header and path" + ) + need_authorizer_id = authorization_type in ("CUSTOM", "COGNITO_USER_POOLS") + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis[rest_api_id] + if need_authorizer_id and ( + not authorizer_id or authorizer_id not in rest_api_container.authorizers + ): + # TODO: will be cleaner with https://github.com/localstack/localstack/pull/7750 + raise BadRequestException( + "Invalid authorizer ID specified. " + "Setting the authorization type to CUSTOM or COGNITO_USER_POOLS requires a valid authorizer." + ) + + if request_validator_id and request_validator_id not in rest_api_container.validators: + raise BadRequestException("Invalid Request Validator identifier specified") + + if request_models: + for content_type, model_name in request_models.items(): + # FIXME: add Empty model to rest api at creation + if model_name == EMPTY_MODEL: + continue + if model_name not in rest_api_container.models: + raise BadRequestException(f"Invalid model identifier specified: {model_name}") + + response: Method = call_moto(context) + remove_empty_attributes_from_method(response) + moto_http_method = moto_resource.resource_methods[http_method] + moto_http_method.authorization_type = moto_http_method.authorization_type.upper() + + # this is straight from the moto patch, did not test it yet but has the same functionality + # FIXME: check if still necessary after testing Authorizers + if need_authorizer_id and "authorizerId" not in response: + response["authorizerId"] = authorizer_id + + response["authorizationType"] = response["authorizationType"].upper() + + return response + + def update_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Method: + # see https://www.linkedin.com/pulse/updating-aws-cli-patch-operations-rest-api-yitzchak-meirovich/ + # for path construction + moto_backend = get_moto_backend(context.account_id, context.region) + moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id) + if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)): + raise NotFoundException("Invalid Resource identifier specified") + + if not (moto_method := moto_resource.resource_methods.get(http_method)): + raise NotFoundException("Invalid Method identifier specified") + store = get_apigateway_store(context=context) + rest_api = store.rest_apis[rest_api_id] + applicable_patch_operations = [] + modifying_auth_type = False + modified_authorizer_id = False + had_req_params = bool(moto_method.request_parameters) + had_req_models = bool(moto_method.request_models) + + for patch_operation in patch_operations: + op = patch_operation.get("op") + path = patch_operation.get("path") + # if the path is not supported at all, raise an Exception + if len(path.split("/")) > 3 or not any( + path.startswith(s_path) for s_path in UPDATE_METHOD_PATCH_PATHS["supported_paths"] + ): + raise BadRequestException(f"Invalid patch path {path}") + + # if the path is not supported by the operation, ignore it and skip + op_supported_path = UPDATE_METHOD_PATCH_PATHS.get(op, []) + if not any(path.startswith(s_path) for s_path in op_supported_path): + available_ops = [ + available_op + for available_op in ("add", "replace", "delete") + if available_op != op + ] + supported_ops = ", ".join( + [ + supported_op + for supported_op in available_ops + if any( + path.startswith(s_path) + for s_path in UPDATE_METHOD_PATCH_PATHS.get(supported_op, []) + ) + ] + ) + raise BadRequestException( + f"Invalid patch operation specified. Must be one of: [{supported_ops}]" + ) + + value = patch_operation.get("value") + if op not in ("add", "replace"): + # skip + applicable_patch_operations.append(patch_operation) + continue + + if path == "/authorizationType" and value in ("CUSTOM", "COGNITO_USER_POOLS"): + modifying_auth_type = True + + elif path == "/authorizerId": + modified_authorizer_id = value + + if any( + path.startswith(s_path) for s_path in ("/apiKeyRequired", "/requestParameters/") + ): + patch_op = {"op": op, "path": path, "value": str_to_bool(value)} + applicable_patch_operations.append(patch_op) + continue + + elif path == "/requestValidatorId" and value not in rest_api.validators: + if not value: + # you can remove a requestValidator by passing an empty string as a value + patch_op = {"op": "remove", "path": path, "value": value} + applicable_patch_operations.append(patch_op) + continue + raise BadRequestException("Invalid Request Validator identifier specified") + + elif path.startswith("/requestModels/"): + if value != EMPTY_MODEL and value not in rest_api.models: + raise BadRequestException(f"Invalid model identifier specified: {value}") + + applicable_patch_operations.append(patch_operation) + + if modifying_auth_type: + if not modified_authorizer_id or modified_authorizer_id not in rest_api.authorizers: + raise BadRequestException( + "Invalid authorizer ID specified. " + "Setting the authorization type to CUSTOM or COGNITO_USER_POOLS requires a valid authorizer." + ) + elif modified_authorizer_id: + if moto_method.authorization_type not in ("CUSTOM", "COGNITO_USER_POOLS"): + # AWS will ignore this patch if the method does not have a proper authorization type + # filter the patches to remove the modified authorizerId + applicable_patch_operations = [ + op for op in applicable_patch_operations if op.get("path") != "/authorizerId" + ] + + # TODO: test with multiple patch operations which would not be compatible between each other + patch_api_gateway_entity(moto_method, applicable_patch_operations) + + # if we removed all values of those fields, set them to None so that they're not returned anymore + if had_req_params and len(moto_method.request_parameters) == 0: + moto_method.request_parameters = None + if had_req_models and len(moto_method.request_models) == 0: + moto_method.request_models = None + + response = moto_method.to_json() + remove_empty_attributes_from_method(response) + remove_empty_attributes_from_integration(response.get("methodIntegration")) + return response + + def delete_method( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> None: + moto_backend = get_moto_backend(context.account_id, context.region) + moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id) + if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)): + raise NotFoundException("Invalid Resource identifier specified") + + if not (moto_resource.resource_methods.get(http_method)): + raise NotFoundException("Invalid Method identifier specified") + + call_moto(context) + + # method responses + + def get_method_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> MethodResponse: + # this could probably be easier in a patch? + moto_backend = get_moto_backend(context.account_id, context.region) + moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id) + # TODO: snapshot test different possibilities + if not moto_rest_api or not (moto_resource := moto_rest_api.resources.get(resource_id)): + raise NotFoundException("Invalid Resource identifier specified") + + if not (moto_method := moto_resource.resource_methods.get(http_method)): + raise NotFoundException("Invalid Method identifier specified") + + if not (moto_method_response := moto_method.get_response(status_code)): + raise NotFoundException("Invalid Response status code specified") + + method_response = moto_method_response.to_json() + return method_response + + @handler("UpdateMethodResponse", expand=False) + def update_method_response( + self, context: RequestContext, request: TestInvokeMethodRequest + ) -> MethodResponse: + # this operation is not implemented by moto, but raises a 500 error (instead of a 501). + # avoid a fallback to moto and return the 501 to the client directly instead. + raise NotImplementedAvoidFallbackError + + # stages + + # TODO: add createdDate / lastUpdatedDate in Stage operations below! + @handler("CreateStage", expand=False) + def create_stage(self, context: RequestContext, request: CreateStageRequest) -> Stage: + call_moto(context) + moto_api = get_moto_rest_api(context, rest_api_id=request["restApiId"]) + stage = moto_api.stages.get(request["stageName"]) + if not stage: + raise NotFoundException("Invalid Stage identifier specified") + + if not hasattr(stage, "documentation_version"): + stage.documentation_version = request.get("documentationVersion") + + # make sure we update the stage_name on the deployment entity in moto + deployment = moto_api.deployments.get(request["deploymentId"]) + deployment.stage_name = stage.name + + response = stage.to_json() + self._patch_stage_response(response) + return response + + def get_stage( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> Stage: + response = call_moto(context) + self._patch_stage_response(response) + return response + + def get_stages( + self, context: RequestContext, rest_api_id: String, deployment_id: String = None, **kwargs + ) -> Stages: + response = call_moto(context) + for stage in response["item"]: + self._patch_stage_response(stage) + if not stage.get("description"): + stage.pop("description", None) + return Stages(**response) + + @handler("UpdateStage") + def update_stage( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Stage: + call_moto(context) + + moto_backend = get_moto_backend(context.account_id, context.region) + moto_rest_api: MotoRestAPI = moto_backend.apis.get(rest_api_id) + if not (moto_stage := moto_rest_api.stages.get(stage_name)): + raise NotFoundException("Invalid Stage identifier specified") + + # construct list of path regexes for validation + path_regexes = [re.sub("{[^}]+}", ".+", path) for path in STAGE_UPDATE_PATHS] + + # copy the patch operations to not mutate them, so that we're logging the correct input + patch_operations = copy.deepcopy(patch_operations) or [] + for patch_operation in patch_operations: + patch_path = patch_operation["path"] + + # special case: handle updates (op=remove) for wildcard method settings + patch_path_stripped = patch_path.strip("/") + if patch_path_stripped == "*/*" and patch_operation["op"] == "remove": + if not moto_stage.method_settings.pop(patch_path_stripped, None): + raise BadRequestException( + "Cannot remove method setting */* because there is no method setting for this method " + ) + response = moto_stage.to_json() + self._patch_stage_response(response) + return response + + path_valid = patch_path in STAGE_UPDATE_PATHS or any( + re.match(regex, patch_path) for regex in path_regexes + ) + if not path_valid: + valid_paths = f"[{', '.join(STAGE_UPDATE_PATHS)}]" + # note: weird formatting in AWS - required for snapshot testing + valid_paths = valid_paths.replace( + "/{resourcePath}/{httpMethod}/throttling/burstLimit, /{resourcePath}/{httpMethod}/throttling/rateLimit, /{resourcePath}/{httpMethod}/caching/ttlInSeconds", + "/{resourcePath}/{httpMethod}/throttling/burstLimit/{resourcePath}/{httpMethod}/throttling/rateLimit/{resourcePath}/{httpMethod}/caching/ttlInSeconds", + ) + valid_paths = valid_paths.replace("/burstLimit, /", "/burstLimit /") + valid_paths = valid_paths.replace("/rateLimit, /", "/rateLimit /") + raise BadRequestException( + f"Invalid method setting path: {patch_operation['path']}. Must be one of: {valid_paths}" + ) + + # TODO: check if there are other boolean, maybe add a global step in _patch_api_gateway_entity + if patch_path == "/tracingEnabled" and (value := patch_operation.get("value")): + patch_operation["value"] = value and value.lower() == "true" or False + + patch_api_gateway_entity(moto_stage, patch_operations) + moto_stage.apply_operations(patch_operations) + + response = moto_stage.to_json() + self._patch_stage_response(response) + return response + + def _patch_stage_response(self, response: dict): + """Apply a few patches required for AWS parity""" + response.setdefault("cacheClusterStatus", "NOT_AVAILABLE") + response.setdefault("tracingEnabled", False) + if not response.get("variables"): + response.pop("variables", None) + + def update_deployment( + self, + context: RequestContext, + rest_api_id: String, + deployment_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Deployment: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + try: + deployment = moto_rest_api.get_deployment(deployment_id) + except KeyError: + raise NotFoundException("Invalid Deployment identifier specified") + + for patch_operation in patch_operations: + # TODO: add validation for unsupported paths + # see https://docs.aws.amazon.com/apigateway/latest/api/patch-operations.html#UpdateDeployment-Patch + if ( + patch_operation.get("path") == "/description" + and patch_operation.get("op") == "replace" + ): + deployment.description = patch_operation["value"] + + deployment_response: Deployment = deployment.to_json() or {} + return deployment_response + + # authorizers + + @handler("CreateAuthorizer", expand=False) + def create_authorizer( + self, context: RequestContext, request: CreateAuthorizerRequest + ) -> Authorizer: + # TODO: add validation + api_id = request["restApiId"] + store = get_apigateway_store(context=context) + if api_id not in store.rest_apis: + # this seems like a weird exception to throw, but couldn't get anything different + # we might need to have a look again + raise ConflictException( + "Unable to complete operation due to concurrent modification. Please try again later." + ) + + authorizer_id = short_uid()[:6] # length 6 to make TF tests pass + authorizer = deepcopy(select_from_typed_dict(Authorizer, request)) + authorizer["id"] = authorizer_id + authorizer["authorizerResultTtlInSeconds"] = int( + authorizer.get("authorizerResultTtlInSeconds", 300) + ) + store.rest_apis[api_id].authorizers[authorizer_id] = authorizer + + response = to_authorizer_response_json(api_id, authorizer) + return response + + def get_authorizers( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> Authorizers: + # TODO add paging, validation + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + result = [ + to_authorizer_response_json(rest_api_id, a) + for a in rest_api_container.authorizers.values() + ] + return Authorizers(items=result) + + def get_authorizer( + self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs + ) -> Authorizer: + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + authorizer = ( + rest_api_container.authorizers.get(authorizer_id) if rest_api_container else None + ) + + if authorizer is None: + raise NotFoundException(f"Authorizer not found: {authorizer_id}") + return to_authorizer_response_json(rest_api_id, authorizer) + + def delete_authorizer( + self, context: RequestContext, rest_api_id: String, authorizer_id: String, **kwargs + ) -> None: + # TODO: add validation if authorizer does not exist + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + if rest_api_container: + rest_api_container.authorizers.pop(authorizer_id, None) + + def update_authorizer( + self, + context: RequestContext, + rest_api_id: String, + authorizer_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Authorizer: + # TODO: add validation + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + authorizer = ( + rest_api_container.authorizers.get(authorizer_id) if rest_api_container else None + ) + + if authorizer is None: + raise NotFoundException(f"Authorizer not found: {authorizer_id}") + + patched_authorizer = apply_json_patch_safe(authorizer, patch_operations) + # terraform sends this as a string in patch, so convert to int + patched_authorizer["authorizerResultTtlInSeconds"] = int( + patched_authorizer.get("authorizerResultTtlInSeconds", 300) + ) + + # store the updated Authorizer + rest_api_container.authorizers[authorizer_id] = patched_authorizer + + result = to_authorizer_response_json(rest_api_id, patched_authorizer) + return result + + # accounts + + def get_account(self, context: RequestContext, **kwargs) -> Account: + region_details = get_apigateway_store(context=context) + result = to_account_response_json(region_details.account) + return Account(**result) + + def update_account( + self, context: RequestContext, patch_operations: ListOfPatchOperation = None, **kwargs + ) -> Account: + region_details = get_apigateway_store(context=context) + apply_json_patch_safe(region_details.account, patch_operations, in_place=True) + result = to_account_response_json(region_details.account) + return Account(**result) + + # documentation parts + + def get_documentation_parts( + self, context: RequestContext, request: GetDocumentationPartsRequest, **kwargs + ) -> DocumentationParts: + # TODO: add validation + api_id = request["restApiId"] + rest_api_container = get_rest_api_container(context, rest_api_id=api_id) + + result = [ + to_documentation_part_response_json(api_id, a) + for a in rest_api_container.documentation_parts.values() + ] + return DocumentationParts(items=result) + + def get_documentation_part( + self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs + ) -> DocumentationPart: + # TODO: add validation + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + documentation_part = ( + rest_api_container.documentation_parts.get(documentation_part_id) + if rest_api_container + else None + ) + + if documentation_part is None: + raise NotFoundException("Invalid Documentation part identifier specified") + return to_documentation_part_response_json(rest_api_id, documentation_part) + + def create_documentation_part( + self, + context: RequestContext, + rest_api_id: String, + location: DocumentationPartLocation, + properties: String, + **kwargs, + ) -> DocumentationPart: + entity_id = short_uid()[:6] # length 6 for AWS parity / Terraform compatibility + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + # TODO: add complete validation for + # location parameter: https://docs.aws.amazon.com/apigateway/latest/api/API_DocumentationPartLocation.html + # As of now we validate only "type" + location_type = location.get("type") + valid_location_types = [ + "API", + "AUTHORIZER", + "MODEL", + "RESOURCE", + "METHOD", + "PATH_PARAMETER", + "QUERY_PARAMETER", + "REQUEST_HEADER", + "REQUEST_BODY", + "RESPONSE", + "RESPONSE_HEADER", + "RESPONSE_BODY", + ] + if location_type not in valid_location_types: + raise CommonServiceException( + "ValidationException", + f"1 validation error detected: Value '{location_type}' at " + f"'createDocumentationPartInput.location.type' failed to satisfy constraint: " + f"Member must satisfy enum value set: " + f"[RESPONSE_BODY, RESPONSE, METHOD, MODEL, AUTHORIZER, RESPONSE_HEADER, " + f"RESOURCE, PATH_PARAMETER, REQUEST_BODY, QUERY_PARAMETER, API, REQUEST_HEADER]", + ) + + doc_part = DocumentationPart( + id=entity_id, + location=location, + properties=properties, + ) + rest_api_container.documentation_parts[entity_id] = doc_part + + result = to_documentation_part_response_json(rest_api_id, doc_part) + return DocumentationPart(**result) + + def update_documentation_part( + self, + context: RequestContext, + rest_api_id: String, + documentation_part_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> DocumentationPart: + # TODO: add validation + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + doc_part = ( + rest_api_container.documentation_parts.get(documentation_part_id) + if rest_api_container + else None + ) + + if doc_part is None: + raise NotFoundException("Invalid Documentation part identifier specified") + + for patch_operation in patch_operations: + path = patch_operation.get("path") + operation = patch_operation.get("op") + if operation != "replace": + raise BadRequestException( + f"Invalid patch path '{path}' specified for op '{operation}'. " + f"Please choose supported operations" + ) + + if path != "/properties": + raise BadRequestException( + f"Invalid patch path '{path}' specified for op 'replace'. " + f"Must be one of: [/properties]" + ) + + key = path[1:] + if key == "properties" and not patch_operation.get("value"): + raise BadRequestException("Documentation part properties must be non-empty") + + patched_doc_part = apply_json_patch_safe(doc_part, patch_operations) + + rest_api_container.documentation_parts[documentation_part_id] = patched_doc_part + + return to_documentation_part_response_json(rest_api_id, patched_doc_part) + + def delete_documentation_part( + self, context: RequestContext, rest_api_id: String, documentation_part_id: String, **kwargs + ) -> None: + # TODO: add validation if document_part does not exist, or rest_api + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + documentation_part = rest_api_container.documentation_parts.get(documentation_part_id) + + if documentation_part is None: + raise NotFoundException("Invalid Documentation part identifier specified") + + if rest_api_container: + rest_api_container.documentation_parts.pop(documentation_part_id, None) + + def import_documentation_parts( + self, + context: RequestContext, + rest_api_id: String, + body: IO[Blob], + mode: PutMode = None, + fail_on_warnings: Boolean = None, + **kwargs, + ) -> DocumentationPartIds: + body_data = body.read() + openapi_spec = parse_json_or_yaml(to_str(body_data)) + + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-documenting-api-quick-start-import-export.html + resolved_schema = resolve_references(openapi_spec, rest_api_id=rest_api_id) + documentation = resolved_schema.get(OpenAPIExt.DOCUMENTATION) + + ids = [] + # overwrite mode + if mode == PutMode.overwrite: + rest_api_container.documentation_parts.clear() + for doc_part in documentation["documentationParts"]: + entity_id = short_uid()[:6] + rest_api_container.documentation_parts[entity_id] = DocumentationPart( + id=entity_id, **doc_part + ) + ids.append(entity_id) + # TODO: implement the merge mode + return DocumentationPartIds(ids=ids) + + # documentation versions + + def create_documentation_version( + self, + context: RequestContext, + rest_api_id: String, + documentation_version: String, + stage_name: String = None, + description: String = None, + **kwargs, + ) -> DocumentationVersion: + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + result = DocumentationVersion( + version=documentation_version, createdDate=datetime.now(), description=description + ) + rest_api_container.documentation_versions[documentation_version] = result + + return result + + def get_documentation_version( + self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs + ) -> DocumentationVersion: + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + result = rest_api_container.documentation_versions.get(documentation_version) + if not result: + raise NotFoundException(f"Documentation version not found: {documentation_version}") + + return result + + def get_documentation_versions( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> DocumentationVersions: + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + result = list(rest_api_container.documentation_versions.values()) + return DocumentationVersions(items=result) + + def delete_documentation_version( + self, context: RequestContext, rest_api_id: String, documentation_version: String, **kwargs + ) -> None: + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + result = rest_api_container.documentation_versions.pop(documentation_version, None) + if not result: + raise NotFoundException(f"Documentation version not found: {documentation_version}") + + def update_documentation_version( + self, + context: RequestContext, + rest_api_id: String, + documentation_version: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> DocumentationVersion: + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + + result = rest_api_container.documentation_versions.get(documentation_version) + if not result: + raise NotFoundException(f"Documentation version not found: {documentation_version}") + + patch_api_gateway_entity(result, patch_operations) + + return result + + # base path mappings + + def get_base_path_mappings( + self, + context: RequestContext, + domain_name: String, + domain_name_id: String = None, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> BasePathMappings: + region_details = get_apigateway_store(context=context) + + mappings_list = region_details.base_path_mappings.get(domain_name) or [] + + result = [ + to_base_mapping_response_json(domain_name, m["basePath"], m) for m in mappings_list + ] + return BasePathMappings(items=result) + + def get_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String = None, + **kwargs, + ) -> BasePathMapping: + region_details = get_apigateway_store(context=context) + + mappings_list = region_details.base_path_mappings.get(domain_name) or [] + mapping = ([m for m in mappings_list if m["basePath"] == base_path] or [None])[0] + if mapping is None: + raise NotFoundException(f"Base path mapping not found: {domain_name} - {base_path}") + + result = to_base_mapping_response_json(domain_name, base_path, mapping) + return BasePathMapping(**result) + + def create_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + rest_api_id: String, + domain_name_id: String = None, + base_path: String = None, + stage: String = None, + **kwargs, + ) -> BasePathMapping: + region_details = get_apigateway_store(context=context) + + # Note: "(none)" is a special value in API GW: + # https://docs.aws.amazon.com/apigateway/api-reference/link-relation/basepathmapping-by-base-path + base_path = base_path or "(none)" + + entry = { + "domainName": domain_name, + "restApiId": rest_api_id, + "basePath": base_path, + "stage": stage, + } + region_details.base_path_mappings.setdefault(domain_name, []).append(entry) + + result = to_base_mapping_response_json(domain_name, base_path, entry) + return BasePathMapping(**result) + + def update_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String = None, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> BasePathMapping: + region_details = get_apigateway_store(context=context) + + mappings_list = region_details.base_path_mappings.get(domain_name) or [] + + mapping = ([m for m in mappings_list if m["basePath"] == base_path] or [None])[0] + if mapping is None: + raise NotFoundException( + f"Not found: mapping for domain name {domain_name}, " + f"base path {base_path} in list {mappings_list}" + ) + + patch_operations = ensure_list(patch_operations) + for operation in patch_operations: + if operation["path"] == "/restapiId": + operation["path"] = "/restApiId" + result = apply_json_patch_safe(mapping, patch_operations) + + for i in range(len(mappings_list)): + if mappings_list[i]["basePath"] == base_path: + mappings_list[i] = result + + result = to_base_mapping_response_json(domain_name, base_path, result) + return BasePathMapping(**result) + + def delete_base_path_mapping( + self, + context: RequestContext, + domain_name: String, + base_path: String, + domain_name_id: String = None, + **kwargs, + ) -> None: + region_details = get_apigateway_store(context=context) + + mappings_list = region_details.base_path_mappings.get(domain_name) or [] + for i in range(len(mappings_list)): + if mappings_list[i]["basePath"] == base_path: + del mappings_list[i] + return + + raise NotFoundException(f"Base path mapping {base_path} for domain {domain_name} not found") + + # client certificates + + def get_client_certificate( + self, context: RequestContext, client_certificate_id: String, **kwargs + ) -> ClientCertificate: + region_details = get_apigateway_store(context=context) + result = region_details.client_certificates.get(client_certificate_id) + if result is None: + raise NotFoundException(f"Client certificate ID {client_certificate_id} not found") + return ClientCertificate(**result) + + def get_client_certificates( + self, + context: RequestContext, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> ClientCertificates: + region_details = get_apigateway_store(context=context) + result = list(region_details.client_certificates.values()) + return ClientCertificates(items=result) + + def generate_client_certificate( + self, + context: RequestContext, + description: String = None, + tags: MapOfStringToString = None, + **kwargs, + ) -> ClientCertificate: + region_details = get_apigateway_store(context=context) + cert_id = short_uid() + creation_time = now_utc() + entry = { + "description": description, + "tags": tags, + "clientCertificateId": cert_id, + "createdDate": creation_time, + "expirationDate": creation_time + 60 * 60 * 24 * 30, # assume 30 days validity + "pemEncodedCertificate": "testcert-123", # TODO return proper certificate! + } + region_details.client_certificates[cert_id] = entry + result = to_client_cert_response_json(entry) + return ClientCertificate(**result) + + def update_client_certificate( + self, + context: RequestContext, + client_certificate_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> ClientCertificate: + region_details = get_apigateway_store(context=context) + entity = region_details.client_certificates.get(client_certificate_id) + if entity is None: + raise NotFoundException(f'Client certificate ID "{client_certificate_id}" not found') + result = apply_json_patch_safe(entity, patch_operations) + result = to_client_cert_response_json(result) + return ClientCertificate(**result) + + def delete_client_certificate( + self, context: RequestContext, client_certificate_id: String, **kwargs + ) -> None: + region_details = get_apigateway_store(context=context) + entity = region_details.client_certificates.pop(client_certificate_id, None) + if entity is None: + raise NotFoundException(f'VPC link ID "{client_certificate_id}" not found for deletion') + + # VPC links + + def create_vpc_link( + self, + context: RequestContext, + name: String, + target_arns: ListOfString, + description: String = None, + tags: MapOfStringToString = None, + **kwargs, + ) -> VpcLink: + region_details = get_apigateway_store(context=context) + link_id = short_uid() + entry = {"id": link_id, "status": "AVAILABLE"} + region_details.vpc_links[link_id] = entry + result = to_vpc_link_response_json(entry) + return VpcLink(**result) + + def get_vpc_links( + self, + context: RequestContext, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> VpcLinks: + region_details = get_apigateway_store(context=context) + result = region_details.vpc_links.values() + result = [to_vpc_link_response_json(r) for r in result] + result = {"items": result} + return result + + def get_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> VpcLink: + region_details = get_apigateway_store(context=context) + vpc_link = region_details.vpc_links.get(vpc_link_id) + if vpc_link is None: + raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found') + result = to_vpc_link_response_json(vpc_link) + return VpcLink(**result) + + def update_vpc_link( + self, + context: RequestContext, + vpc_link_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> VpcLink: + region_details = get_apigateway_store(context=context) + vpc_link = region_details.vpc_links.get(vpc_link_id) + if vpc_link is None: + raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found') + result = apply_json_patch_safe(vpc_link, patch_operations) + result = to_vpc_link_response_json(result) + return VpcLink(**result) + + def delete_vpc_link(self, context: RequestContext, vpc_link_id: String, **kwargs) -> None: + region_details = get_apigateway_store(context=context) + vpc_link = region_details.vpc_links.pop(vpc_link_id, None) + if vpc_link is None: + raise NotFoundException(f'VPC link ID "{vpc_link_id}" not found for deletion') + + # request validators + + def get_request_validators( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> RequestValidators: + # TODO: add validation and pagination? + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + result = [ + to_validator_response_json(rest_api_id, a) + for a in rest_api_container.validators.values() + ] + return RequestValidators(items=result) + + def get_request_validator( + self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs + ) -> RequestValidator: + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + validator = ( + rest_api_container.validators.get(request_validator_id) if rest_api_container else None + ) + + if validator is None: + raise NotFoundException("Invalid Request Validator identifier specified") + + result = to_validator_response_json(rest_api_id, validator) + return result + + def create_request_validator( + self, + context: RequestContext, + rest_api_id: String, + name: String = None, + validate_request_body: Boolean = None, + validate_request_parameters: Boolean = None, + **kwargs, + ) -> RequestValidator: + # TODO: add validation (ex: name cannot be blank) + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise BadRequestException("Invalid REST API identifier specified") + # length 6 for AWS parity and TF compatibility + validator_id = short_uid()[:6] + + validator = RequestValidator( + id=validator_id, + name=name, + validateRequestBody=validate_request_body or False, + validateRequestParameters=validate_request_parameters or False, + ) + + rest_api_container.validators[validator_id] = validator + + # missing to_validator_response_json ? + return validator + + def update_request_validator( + self, + context: RequestContext, + rest_api_id: String, + request_validator_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> RequestValidator: + # TODO: add validation + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + # TODO: validate the restAPI id to remove the conditional + validator = ( + rest_api_container.validators.get(request_validator_id) if rest_api_container else None + ) + + if validator is None: + raise NotFoundException( + f"Validator {request_validator_id} for API Gateway {rest_api_id} not found" + ) + + for patch_operation in patch_operations: + path = patch_operation.get("path") + operation = patch_operation.get("op") + if operation != "replace": + raise BadRequestException( + f"Invalid patch path '{path}' specified for op '{operation}'. " + f"Please choose supported operations" + ) + if path not in ("/name", "/validateRequestBody", "/validateRequestParameters"): + raise BadRequestException( + f"Invalid patch path '{path}' specified for op 'replace'. " + f"Must be one of: [/name, /validateRequestParameters, /validateRequestBody]" + ) + + key = path[1:] + value = patch_operation.get("value") + if key == "name" and not value: + raise BadRequestException("Request Validator name cannot be blank") + + elif key in ("validateRequestParameters", "validateRequestBody"): + value = value and value.lower() == "true" or False + + rest_api_container.validators[request_validator_id][key] = value + + return to_validator_response_json( + rest_api_id, rest_api_container.validators[request_validator_id] + ) + + def delete_request_validator( + self, context: RequestContext, rest_api_id: String, request_validator_id: String, **kwargs + ) -> None: + # TODO: add validation if rest api does not exist + store = get_apigateway_store(context=context) + rest_api_container = store.rest_apis.get(rest_api_id) + if not rest_api_container: + raise NotFoundException("Invalid Request Validator identifier specified") + + validator = rest_api_container.validators.pop(request_validator_id, None) + if not validator: + raise NotFoundException("Invalid Request Validator identifier specified") + + # tags + + def get_tags( + self, + context: RequestContext, + resource_arn: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> Tags: + result = get_apigateway_store(context=context).TAGS.get(resource_arn, {}) + return Tags(tags=result) + + def tag_resource( + self, context: RequestContext, resource_arn: String, tags: MapOfStringToString, **kwargs + ) -> None: + resource_tags = get_apigateway_store(context=context).TAGS.setdefault(resource_arn, {}) + resource_tags.update(tags) + + def untag_resource( + self, context: RequestContext, resource_arn: String, tag_keys: ListOfString, **kwargs + ) -> None: + resource_tags = get_apigateway_store(context=context).TAGS.setdefault(resource_arn, {}) + for key in tag_keys: + resource_tags.pop(key, None) + + def import_rest_api( + self, + context: RequestContext, + body: IO[Blob], + fail_on_warnings: Boolean = None, + parameters: MapOfStringToString = None, + **kwargs, + ) -> RestApi: + body_data = body.read() + + # create rest api + openapi_spec = parse_json_or_yaml(to_str(body_data)) + create_api_request = CreateRestApiRequest(name=openapi_spec.get("info").get("title")) + create_api_context = create_custom_context( + context, + "CreateRestApi", + create_api_request, + ) + response = self.create_rest_api(create_api_context, create_api_request) + api_id = response.get("id") + # remove the 2 default models automatically created, but not when importing + store = get_apigateway_store(context=context) + store.rest_apis[api_id].models = {} + + # put rest api + put_api_request = PutRestApiRequest( + restApiId=api_id, + failOnWarnings=str_to_bool(fail_on_warnings) or False, + parameters=parameters or {}, + body=io.BytesIO(body_data), + ) + put_api_context = create_custom_context( + context, + "PutRestApi", + put_api_request, + ) + put_api_response = self.put_rest_api(put_api_context, put_api_request) + if not put_api_response.get("tags"): + put_api_response.pop("tags", None) + return put_api_response + + # integrations + + def get_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> Integration: + try: + response: Integration = call_moto(context) + except CommonServiceException as e: + # the Exception raised by moto does not have the right message not status code + if e.code == "NotFoundException": + raise NotFoundException("Invalid Integration identifier specified") + raise + + if integration_responses := response.get("integrationResponses"): + for integration_response in integration_responses.values(): + remove_empty_attributes_from_integration_response(integration_response) + + return response + + def put_integration( + self, context: RequestContext, request: PutIntegrationRequest, **kwargs + ) -> Integration: + if (integration_type := request.get("type")) not in VALID_INTEGRATION_TYPES: + raise CommonServiceException( + "ValidationException", + f"1 validation error detected: Value '{integration_type}' at " + f"'putIntegrationInput.type' failed to satisfy constraint: " + f"Member must satisfy enum value set: [HTTP, MOCK, AWS_PROXY, HTTP_PROXY, AWS]", + ) + + elif integration_type in (IntegrationType.AWS_PROXY, IntegrationType.AWS): + if not request.get("integrationHttpMethod"): + raise BadRequestException("Enumeration value for HttpMethod must be non-empty") + if not (integration_uri := request.get("uri") or "").startswith("arn:"): + raise BadRequestException("Invalid ARN specified in the request") + + try: + parsed_arn = parse_arn(integration_uri) + except InvalidArnException: + raise BadRequestException("Invalid ARN specified in the request") + + if not any( + parsed_arn["resource"].startswith(action_type) for action_type in ("path", "action") + ): + raise BadRequestException("AWS ARN for integration must contain path or action") + + if integration_type == IntegrationType.AWS_PROXY and ( + parsed_arn["account"] != "lambda" + or not parsed_arn["resource"].startswith("path/2015-03-31/functions/") + ): + # the Firehose message is misleading, this is not implemented in AWS + raise BadRequestException( + "Integrations of type 'AWS_PROXY' currently only supports " + "Lambda function and Firehose stream invocations." + ) + + moto_rest_api = get_moto_rest_api(context=context, rest_api_id=request.get("restApiId")) + resource = moto_rest_api.resources.get(request.get("resourceId")) + if not resource: + raise NotFoundException("Invalid Resource identifier specified") + + method = resource.resource_methods.get(request.get("httpMethod")) + if not method: + raise NotFoundException("Invalid Method identifier specified") + + # TODO: if the IntegrationType is AWS, `credentials` is mandatory + moto_request = copy.copy(request) + moto_request.setdefault("passthroughBehavior", "WHEN_NO_MATCH") + moto_request.setdefault("timeoutInMillis", 29000) + if integration_type in (IntegrationType.HTTP, IntegrationType.HTTP_PROXY): + moto_request.setdefault("connectionType", ConnectionType.INTERNET) + response = call_moto_with_request(context, moto_request) + remove_empty_attributes_from_integration(integration=response) + + # TODO: should fix fundamentally once we move away from moto + if integration_type == "MOCK": + response.pop("uri", None) + + return response + + def update_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Integration: + moto_rest_api = get_moto_rest_api(context=context, rest_api_id=rest_api_id) + resource = moto_rest_api.resources.get(resource_id) + if not resource: + raise NotFoundException("Invalid Resource identifier specified") + + method = resource.resource_methods.get(http_method) + if not method: + raise NotFoundException("Invalid Integration identifier specified") + + integration = method.method_integration + patch_api_gateway_entity(integration, patch_operations) + + # fix data types + if integration.timeout_in_millis: + integration.timeout_in_millis = int(integration.timeout_in_millis) + if skip_verification := (integration.tls_config or {}).get("insecureSkipVerification"): + integration.tls_config["insecureSkipVerification"] = str_to_bool(skip_verification) + + integration_dict: Integration = integration.to_json() + return integration_dict + + def delete_integration( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + **kwargs, + ) -> None: + try: + call_moto(context) + except Exception as e: + raise NotFoundException("Invalid Resource identifier specified") from e + + # integration responses + + def get_integration_response( + self, + context: RequestContext, + rest_api_id: String, + resource_id: String, + http_method: String, + status_code: StatusCode, + **kwargs, + ) -> IntegrationResponse: + response: IntegrationResponse = call_moto(context) + remove_empty_attributes_from_integration_response(response) + # moto does not return selectionPattern is set to an empty string + # TODO: fix upstream + if "selectionPattern" not in response: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + moto_resource = moto_rest_api.resources.get(resource_id) + method_integration = moto_resource.resource_methods[http_method].method_integration + integration_response = method_integration.integration_responses[status_code] + if integration_response.selection_pattern is not None: + response["selectionPattern"] = integration_response.selection_pattern + return response + + @handler("PutIntegrationResponse", expand=False) + def put_integration_response( + self, + context: RequestContext, + request: PutIntegrationResponseRequest, + ) -> IntegrationResponse: + moto_rest_api = get_moto_rest_api(context=context, rest_api_id=request.get("restApiId")) + moto_resource = moto_rest_api.resources.get(request.get("resourceId")) + if not moto_resource: + raise NotFoundException("Invalid Resource identifier specified") + + method = moto_resource.resource_methods.get(request.get("httpMethod")) + if not method: + raise NotFoundException("Invalid Method identifier specified") + + response = call_moto(context) + # Moto has a specific case where it will set a None to an empty dict, but AWS does not behave the same + if request.get("responseTemplates") is None: + method_integration = moto_resource.resource_methods[ + request["httpMethod"] + ].method_integration + integration_response = method_integration.integration_responses[request["statusCode"]] + integration_response.response_templates = None + response.pop("responseTemplates", None) + + # Moto also does not return the selection pattern if it is set to an empty string + # TODO: fix upstream + if (selection_pattern := request.get("selectionPattern")) is not None: + response["selectionPattern"] = selection_pattern + + return response + + def get_export( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + export_type: String, + parameters: MapOfStringToString = None, + accepts: String = None, + **kwargs, + ) -> ExportResponse: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + openapi_exporter = OpenApiExporter() + # FIXME: look into parser why `parameters` is always None + has_extension = context.request.values.get("extensions") == "apigateway" + result = openapi_exporter.export_api( + api_id=rest_api_id, + stage=stage_name, + export_type=export_type, + export_format=accepts, + with_extension=has_extension, + account_id=context.account_id, + region_name=context.region, + ) + + accepts = accepts or APPLICATION_JSON + + if accepts == APPLICATION_JSON: + result = json.dumps(result, indent=2) + + file_ext = accepts.split("/")[-1] + version = moto_rest_api.version or timestamp( + moto_rest_api.create_date, format=TIMESTAMP_FORMAT_TZ + ) + return ExportResponse( + body=to_bytes(result), + contentType="application/octet-stream", + contentDisposition=f'attachment; filename="{export_type}_{version}.{file_ext}"', + ) + + def get_api_keys( + self, + context: RequestContext, + position: String = None, + limit: NullableInteger = None, + name_query: String = None, + customer_id: String = None, + include_values: NullableBoolean = None, + **kwargs, + ) -> ApiKeys: + # TODO: migrate API keys in our store + moto_backend = get_moto_backend(context.account_id, context.region) + api_keys = [api_key.to_json() for api_key in reversed(moto_backend.keys.values())] + if not include_values: + for api_key in api_keys: + api_key.pop("value") + + item_list = PaginatedList(api_keys) + + def token_generator(item): + return md5(item["id"]) + + def filter_function(item): + return item["name"].startswith(name_query) + + paginated_list, next_token = item_list.get_page( + token_generator=token_generator, + next_token=position, + page_size=limit, + filter_function=filter_function if name_query else None, + ) + + return ApiKeys(items=paginated_list, position=next_token) + + def update_api_key( + self, + context: RequestContext, + api_key: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> ApiKey: + response: ApiKey = call_moto(context) + if "value" in response: + response.pop("value", None) + + if "tags" not in response: + response["tags"] = {} + + return response + + def create_model( + self, + context: RequestContext, + rest_api_id: String, + name: String, + content_type: String, + description: String = None, + schema: String = None, + **kwargs, + ) -> Model: + store = get_apigateway_store(context=context) + if rest_api_id not in store.rest_apis: + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if not name: + raise BadRequestException("Model name must be non-empty") + + if name in store.rest_apis[rest_api_id].models: + raise ConflictException("Model name already exists for this REST API") + + if not schema: + # TODO: maybe add more validation around the schema, valid json string? + raise BadRequestException( + "Model schema must have at least 1 property or array items defined" + ) + + model_id = short_uid()[:6] # length 6 to make TF tests pass + model = Model( + id=model_id, name=name, contentType=content_type, description=description, schema=schema + ) + store.rest_apis[rest_api_id].models[name] = model + remove_empty_attributes_from_model(model) + return model + + def get_models( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> Models: + store = get_apigateway_store(context=context) + if rest_api_id not in store.rest_apis: + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + models = [ + remove_empty_attributes_from_model(model) + for model in store.rest_apis[rest_api_id].models.values() + ] + return Models(items=models) + + def get_model( + self, + context: RequestContext, + rest_api_id: String, + model_name: String, + flatten: Boolean = None, + **kwargs, + ) -> Model: + store = get_apigateway_store(context=context) + if rest_api_id not in store.rest_apis or not ( + model := store.rest_apis[rest_api_id].models.get(model_name) + ): + raise NotFoundException(f"Invalid model name specified: {model_name}") + + return model + + def update_model( + self, + context: RequestContext, + rest_api_id: String, + model_name: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Model: + # manually update the model, not need for JSON patch, only 2 path supported with replace operation + # /schema + # /description + store = get_apigateway_store(context=context) + if rest_api_id not in store.rest_apis or not ( + model := store.rest_apis[rest_api_id].models.get(model_name) + ): + raise NotFoundException(f"Invalid model name specified: {model_name}") + + for operation in patch_operations: + path = operation.get("path") + if operation.get("op") != "replace": + raise BadRequestException( + f"Invalid patch path '{path}' specified for op 'add'. Please choose supported operations" + ) + if path not in ("/schema", "/description"): + raise BadRequestException( + f"Invalid patch path '{path}' specified for op 'replace'. Must be one of: [/description, /schema]" + ) + + key = path[1:] # remove the leading slash + value = operation.get("value") + if key == "schema": + if not value: + raise BadRequestException( + "Model schema must have at least 1 property or array items defined" + ) + # delete the resolved model to invalidate it + store.rest_apis[rest_api_id].resolved_models.pop(model_name, None) + model[key] = value + remove_empty_attributes_from_model(model) + return model + + def delete_model( + self, context: RequestContext, rest_api_id: String, model_name: String, **kwargs + ) -> None: + store = get_apigateway_store(context=context) + + if ( + rest_api_id not in store.rest_apis + or model_name not in store.rest_apis[rest_api_id].models + ): + raise NotFoundException(f"Invalid model name specified: {model_name}") + + moto_rest_api = get_moto_rest_api(context, rest_api_id) + validate_model_in_use(moto_rest_api, model_name) + + store.rest_apis[rest_api_id].models.pop(model_name, None) + store.rest_apis[rest_api_id].resolved_models.pop(model_name, None) + + @handler("CreateUsagePlan") + def create_usage_plan( + self, + context: RequestContext, + name: String, + description: String = None, + api_stages: ListOfApiStage = None, + throttle: ThrottleSettings = None, + quota: QuotaSettings = None, + tags: MapOfStringToString = None, + **kwargs, + ) -> UsagePlan: + usage_plan: UsagePlan = call_moto(context=context) + if not usage_plan.get("quota"): + usage_plan.pop("quota", None) + + fix_throttle_and_quota_from_usage_plan(usage_plan) + + return usage_plan + + def update_usage_plan( + self, + context: RequestContext, + usage_plan_id: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> UsagePlan: + for patch_op in patch_operations: + if patch_op.get("op") == "remove" and patch_op.get("path") == "/apiStages": + if not (api_stage_id := patch_op.get("value")): + raise BadRequestException("Invalid API Stage specified") + if not len(split_stage_id := api_stage_id.split(":")) == 2: + raise BadRequestException("Invalid API Stage specified") + rest_api_id, stage_name = split_stage_id + moto_backend = apigw_models.apigateway_backends[context.account_id][context.region] + if not (rest_api := moto_backend.apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API Stage {{api: {rest_api_id}, stage: {stage_name}}} specified for usageplan {usage_plan_id}" + ) + if stage_name not in rest_api.stages: + raise NotFoundException( + f"Invalid API Stage {{api: {rest_api_id}, stage: {stage_name}}} specified for usageplan {usage_plan_id}" + ) + + usage_plan = call_moto(context=context) + if not usage_plan.get("quota"): + usage_plan.pop("quota", None) + + usage_plan_arn = f"arn:{get_partition(context.region)}:apigateway:{context.region}::/usageplans/{usage_plan_id}" + existing_tags = get_apigateway_store(context=context).TAGS.get(usage_plan_arn, {}) + if "tags" not in usage_plan: + usage_plan["tags"] = existing_tags + else: + usage_plan["tags"].update(existing_tags) + + fix_throttle_and_quota_from_usage_plan(usage_plan) + + return usage_plan + + def get_usage_plan(self, context: RequestContext, usage_plan_id: String, **kwargs) -> UsagePlan: + usage_plan: UsagePlan = call_moto(context=context) + if not usage_plan.get("quota"): + usage_plan.pop("quota", None) + + fix_throttle_and_quota_from_usage_plan(usage_plan) + + usage_plan_arn = f"arn:{get_partition(context.region)}:apigateway:{context.region}::/usageplans/{usage_plan_id}" + existing_tags = get_apigateway_store(context=context).TAGS.get(usage_plan_arn, {}) + if "tags" not in usage_plan: + usage_plan["tags"] = existing_tags + else: + usage_plan["tags"].update(existing_tags) + + return usage_plan + + @handler("GetUsagePlans") + def get_usage_plans( + self, + context: RequestContext, + position: String = None, + key_id: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> UsagePlans: + usage_plans: UsagePlans = call_moto(context=context) + if not usage_plans.get("items"): + usage_plans["items"] = [] + + items = usage_plans["items"] + for up in items: + if not up.get("quota"): + up.pop("quota", None) + + fix_throttle_and_quota_from_usage_plan(up) + + if "tags" not in up: + up.pop("tags", None) + + return usage_plans + + def get_usage_plan_keys( + self, + context: RequestContext, + usage_plan_id: String, + position: String = None, + limit: NullableInteger = None, + name_query: String = None, + **kwargs, + ) -> UsagePlanKeys: + # TODO: migrate Usage Plan and UsagePlan Keys to our store + moto_backend = get_moto_backend(context.account_id, context.region) + + if not (usage_plan_keys := moto_backend.usage_plan_keys.get(usage_plan_id)): + return UsagePlanKeys(items=[]) + + usage_plan_keys = [ + usage_plan_key.to_json() + for usage_plan_key in reversed(usage_plan_keys.values()) + if usage_plan_key.id in moto_backend.keys + ] + + item_list = PaginatedList(usage_plan_keys) + + def token_generator(item): + return md5(item["id"]) + + def filter_function(item): + return item["name"].startswith(name_query) + + paginated_list, next_token = item_list.get_page( + token_generator=token_generator, + next_token=position, + page_size=limit, + filter_function=filter_function if name_query else None, + ) + + return UsagePlanKeys(items=paginated_list, position=next_token) + + def put_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + status_code: StatusCode = None, + response_parameters: MapOfStringToString = None, + response_templates: MapOfStringToString = None, + **kwargs, + ) -> GatewayResponse: + # There were no validation in moto, so implementing as is + # TODO: add validation + # TODO: this is only the CRUD implementation, implement it in the invocation part of the code + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + gateway_response = GatewayResponse( + statusCode=status_code, + responseParameters=response_parameters, + responseTemplates=response_templates, + responseType=response_type, + defaultResponse=False, + ) + rest_api_container.gateway_responses[response_type] = gateway_response + return gateway_response + + def get_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + **kwargs, + ) -> GatewayResponse: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + gateway_response = rest_api_container.gateway_responses.get( + response_type, DEFAULT_GATEWAY_RESPONSES[response_type] + ) + # TODO: add validation with the parameters? seems like it validated client side? how to try? + return gateway_response + + def get_gateway_responses( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> GatewayResponses: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + user_gateway_resp = rest_api_container.gateway_responses + gateway_responses = [ + user_gateway_resp.get(key) or value for key, value in DEFAULT_GATEWAY_RESPONSES.items() + ] + return GatewayResponses(items=gateway_responses) + + def delete_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + **kwargs, + ) -> None: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + if not rest_api_container.gateway_responses.pop(response_type, None): + raise NotFoundException("Gateway response type not defined on api") + + def update_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> GatewayResponse: + """ + Support operations table: + Path | op:add | op:replace | op:remove | op:copy + /statusCode | Not supported | Supported | Not supported | Not supported + /responseParameters | Supported | Supported | Supported | Not supported + /responseTemplates | Supported | Supported | Supported | Not supported + See https://docs.aws.amazon.com/apigateway/latest/api/patch-operations.html#UpdateGatewayResponse-Patch + """ + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + if response_type not in rest_api_container.gateway_responses: + # deep copy to avoid in place mutation of the default response when update using JSON patch + rest_api_container.gateway_responses[response_type] = copy.deepcopy( + DEFAULT_GATEWAY_RESPONSES[response_type] + ) + rest_api_container.gateway_responses[response_type]["defaultResponse"] = False + + patched_entity = rest_api_container.gateway_responses[response_type] + + for index, operation in enumerate(patch_operations): + if (op := operation.get("op")) not in VALID_PATCH_OPERATIONS: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{op}' at 'updateGatewayResponseInput.patchOperations.{index + 1}.member.op' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(VALID_PATCH_OPERATIONS)}]", + ) + + path = operation.get("path", "null") + if not any( + path.startswith(s_path) + for s_path in ("/statusCode", "/responseParameters", "/responseTemplates") + ): + raise BadRequestException(f"Invalid patch path {path}") + + if op in ("add", "remove") and path == "/statusCode": + raise BadRequestException(f"Invalid patch path {path}") + + elif op in ("add", "replace"): + for param_type in ("responseParameters", "responseTemplates"): + if path.startswith(f"/{param_type}"): + if op == "replace": + param = path.removeprefix(f"/{param_type}/") + param = param.replace("~1", "/") + if param not in patched_entity.get(param_type): + raise NotFoundException("Invalid parameter name specified") + if operation.get("value") is None: + raise BadRequestException( + f"Invalid null or empty value in {param_type}" + ) + + patch_api_gateway_entity(patched_entity, patch_operations) + + return patched_entity + + # TODO + + +# --------------- +# UTIL FUNCTIONS +# --------------- + + +def remove_empty_attributes_from_rest_api(rest_api: RestApi, remove_tags=True) -> RestApi: + if not rest_api.get("binaryMediaTypes"): + rest_api.pop("binaryMediaTypes", None) + + if not isinstance(rest_api.get("minimumCompressionSize"), int): + rest_api.pop("minimumCompressionSize", None) + + if not rest_api.get("tags"): + if remove_tags: + rest_api.pop("tags", None) + else: + # if `tags` is falsy, set it to an empty dict + rest_api["tags"] = {} + + if not rest_api.get("version"): + rest_api.pop("version", None) + if not rest_api.get("description"): + rest_api.pop("description", None) + + return rest_api + + +def remove_empty_attributes_from_method(method: Method) -> Method: + if not method.get("methodResponses"): + method.pop("methodResponses", None) + + if method.get("requestModels") is None: + method.pop("requestModels", None) + + if method.get("requestParameters") is None: + method.pop("requestParameters", None) + + return method + + +def remove_empty_attributes_from_integration(integration: Integration): + if not integration: + return integration + + if not integration.get("integrationResponses"): + integration.pop("integrationResponses", None) + + if integration.get("requestParameters") is None: + integration.pop("requestParameters", None) + + return integration + + +def remove_empty_attributes_from_model(model: Model) -> Model: + if not model.get("description"): + model.pop("description", None) + + return model + + +def remove_empty_attributes_from_integration_response(integration_response: IntegrationResponse): + if integration_response.get("responseTemplates") is None: + integration_response.pop("responseTemplates", None) + + return integration_response + + +def fix_throttle_and_quota_from_usage_plan(usage_plan: UsagePlan) -> None: + if quota := usage_plan.get("quota"): + if "offset" not in quota: + quota["offset"] = 0 + else: + usage_plan.pop("quota", None) + + if throttle := usage_plan.get("throttle"): + if rate_limit := throttle.get("rateLimit"): + throttle["rateLimit"] = float(rate_limit) + + if burst_limit := throttle.get("burstLimit"): + throttle["burstLimit"] = int(burst_limit) + else: + usage_plan.pop("throttle", None) + + +def validate_model_in_use(moto_rest_api: MotoRestAPI, model_name: str) -> None: + for resource in moto_rest_api.resources.values(): + for method in resource.resource_methods.values(): + if method.request_models and model_name in set(method.request_models.values()): + path = f"{resource.get_path()}/{method.http_method}" + raise ConflictException( + f"Cannot delete model '{model_name}', is referenced in method request: {path}" + ) + + +def get_moto_rest_api_root_resource(moto_rest_api: MotoRestAPI) -> str: + for res_id, res_obj in moto_rest_api.resources.items(): + if res_obj.path_part == "/" and not res_obj.parent_id: + return res_id + raise Exception(f"Unable to find root resource for API {moto_rest_api.id}") + + +def create_custom_context( + context: RequestContext, action: str, parameters: ServiceRequest +) -> RequestContext: + ctx = create_aws_request_context( + service_name=context.service.service_name, + action=action, + parameters=parameters, + region=context.region, + ) + ctx.request.headers.update(context.request.headers) + ctx.account_id = context.account_id + return ctx + + +def patch_api_gateway_entity(entity: Any, patch_operations: ListOfPatchOperation): + patch_operations = patch_operations or [] + + if isinstance(entity, dict): + entity_dict = entity + else: + if not isinstance(entity.__dict__, DelSafeDict): + entity.__dict__ = DelSafeDict(entity.__dict__) + entity_dict = entity.__dict__ + + not_supported_attributes = {"/id", "/region_name", "/create_date"} + + model_attributes = list(entity_dict.keys()) + for operation in patch_operations: + path_start = operation["path"].strip("/").split("/")[0] + path_start_usc = camelcase_to_underscores(path_start) + if path_start not in model_attributes and path_start_usc in model_attributes: + operation["path"] = operation["path"].replace(path_start, path_start_usc) + if operation["path"] in not_supported_attributes: + raise BadRequestException(f"Invalid patch path {operation['path']}") + + apply_json_patch_safe(entity_dict, patch_operations, in_place=True) + + +def to_authorizer_response_json(api_id, data): + result = to_response_json("authorizer", data, api_id=api_id) + result = select_from_typed_dict(Authorizer, result) + return result + + +def to_validator_response_json(api_id, data): + result = to_response_json("validator", data, api_id=api_id) + result = select_from_typed_dict(RequestValidator, result) + return result + + +def to_documentation_part_response_json(api_id, data): + result = to_response_json("documentationpart", data, api_id=api_id) + result = select_from_typed_dict(DocumentationPart, result) + return result + + +def to_base_mapping_response_json(domain_name, base_path, data): + self_link = "/domainnames/%s/basepathmappings/%s" % (domain_name, base_path) + result = to_response_json("basepathmapping", data, self_link=self_link) + result = select_from_typed_dict(BasePathMapping, result) + return result + + +def to_account_response_json(data): + result = to_response_json("account", data, self_link="/account") + result = select_from_typed_dict(Account, result) + return result + + +def to_vpc_link_response_json(data): + result = to_response_json("vpclink", data) + result = select_from_typed_dict(VpcLink, result) + return result + + +def to_client_cert_response_json(data): + result = to_response_json("clientcertificate", data, id_attr="clientCertificateId") + result = select_from_typed_dict(ClientCertificate, result) + return result + + +def to_rest_api_response_json(data): + result = to_response_json("restapi", data) + result = select_from_typed_dict(RestApi, result) + return result + + +def to_response_json(model_type, data, api_id=None, self_link=None, id_attr=None): + if isinstance(data, list) and len(data) == 1: + data = data[0] + id_attr = id_attr or "id" + result = deepcopy(data) + if not self_link: + self_link = "/%ss/%s" % (model_type, data[id_attr]) + if api_id: + self_link = "/restapis/%s/%s" % (api_id, self_link) + # TODO: check if this is still required - "_links" are listed in the sample responses in the docs, but + # recent parity tests indicate that this field is not returned by real AWS... + # https://docs.aws.amazon.com/apigateway/latest/api/API_GetAuthorizers.html#API_GetAuthorizers_Example_1_Response + if "_links" not in result: + result["_links"] = {} + result["_links"]["self"] = {"href": self_link} + result["_links"]["curies"] = { + "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-authorizer-latest.html", + "name": model_type, + "templated": True, + } + result["_links"]["%s:delete" % model_type] = {"href": self_link} + return result + + +DEFAULT_EMPTY_MODEL = Model( + id=short_uid()[:6], + name=EMPTY_MODEL, + contentType="application/json", + description="This is a default empty schema model", + schema=json.dumps( + { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "Empty Schema", + "type": "object", + } + ), +) + +DEFAULT_ERROR_MODEL = Model( + id=short_uid()[:6], + name=ERROR_MODEL, + contentType="application/json", + description="This is a default error schema model", + schema=json.dumps( + { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "Error Schema", + "type": "object", + "properties": {"message": {"type": "string"}}, + } + ), +) + + +# TODO: maybe extract this in its own files, or find a better generalizable way +UPDATE_METHOD_PATCH_PATHS = { + "supported_paths": [ + "/authorizationScopes", + "/authorizationType", + "/authorizerId", + "/apiKeyRequired", + "/operationName", + "/requestParameters/", + "/requestModels/", + "/requestValidatorId", + ], + "add": [ + "/authorizationScopes", + "/requestParameters/", + "/requestModels/", + ], + "remove": [ + "/authorizationScopes", + "/requestParameters/", + "/requestModels/", + ], + "replace": [ + "/authorizationType", + "/authorizerId", + "/apiKeyRequired", + "/operationName", + "/requestParameters/", + "/requestModels/", + "/requestValidatorId", + ], +} + +DEFAULT_GATEWAY_RESPONSES: dict[GatewayResponseType, GatewayResponse] = { + GatewayResponseType.REQUEST_TOO_LARGE: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "REQUEST_TOO_LARGE", + "statusCode": "413", + }, + GatewayResponseType.RESOURCE_NOT_FOUND: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "RESOURCE_NOT_FOUND", + "statusCode": "404", + }, + GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "AUTHORIZER_CONFIGURATION_ERROR", + "statusCode": "500", + }, + GatewayResponseType.MISSING_AUTHENTICATION_TOKEN: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "MISSING_AUTHENTICATION_TOKEN", + "statusCode": "403", + }, + GatewayResponseType.BAD_REQUEST_BODY: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "BAD_REQUEST_BODY", + "statusCode": "400", + }, + GatewayResponseType.INVALID_SIGNATURE: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "INVALID_SIGNATURE", + "statusCode": "403", + }, + GatewayResponseType.INVALID_API_KEY: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "INVALID_API_KEY", + "statusCode": "403", + }, + GatewayResponseType.BAD_REQUEST_PARAMETERS: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "BAD_REQUEST_PARAMETERS", + "statusCode": "400", + }, + GatewayResponseType.AUTHORIZER_FAILURE: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "AUTHORIZER_FAILURE", + "statusCode": "500", + }, + GatewayResponseType.UNAUTHORIZED: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "UNAUTHORIZED", + "statusCode": "401", + }, + GatewayResponseType.INTEGRATION_TIMEOUT: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "INTEGRATION_TIMEOUT", + "statusCode": "504", + }, + GatewayResponseType.ACCESS_DENIED: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "ACCESS_DENIED", + "statusCode": "403", + }, + GatewayResponseType.DEFAULT_4XX: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "DEFAULT_4XX", + }, + GatewayResponseType.DEFAULT_5XX: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "DEFAULT_5XX", + }, + GatewayResponseType.WAF_FILTERED: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "WAF_FILTERED", + "statusCode": "403", + }, + GatewayResponseType.QUOTA_EXCEEDED: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "QUOTA_EXCEEDED", + "statusCode": "429", + }, + GatewayResponseType.THROTTLED: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "THROTTLED", + "statusCode": "429", + }, + GatewayResponseType.API_CONFIGURATION_ERROR: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "API_CONFIGURATION_ERROR", + "statusCode": "500", + }, + GatewayResponseType.UNSUPPORTED_MEDIA_TYPE: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "UNSUPPORTED_MEDIA_TYPE", + "statusCode": "415", + }, + GatewayResponseType.INTEGRATION_FAILURE: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "INTEGRATION_FAILURE", + "statusCode": "504", + }, + GatewayResponseType.EXPIRED_TOKEN: { + "defaultResponse": True, + "responseParameters": {}, + "responseTemplates": {"application/json": '{"message":$context.error.messageString}'}, + "responseType": "EXPIRED_TOKEN", + "statusCode": "403", + }, +} + +VALID_PATCH_OPERATIONS = ["add", "remove", "move", "test", "replace", "copy"] diff --git a/localstack-core/localstack/services/apigateway/legacy/router_asf.py b/localstack-core/localstack/services/apigateway/legacy/router_asf.py new file mode 100644 index 0000000000000..0664c98c56f20 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/router_asf.py @@ -0,0 +1,160 @@ +import logging +from typing import Any, Dict + +from requests.models import Response as RequestsResponse +from werkzeug.datastructures import Headers +from werkzeug.exceptions import NotFound + +from localstack.constants import HEADER_LOCALSTACK_EDGE_URL +from localstack.http import Request, Response, Router +from localstack.http.dispatcher import Handler +from localstack.http.request import restore_payload +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.services.apigateway.legacy.helpers import get_api_account_id_and_region +from localstack.services.apigateway.legacy.invocations import invoke_rest_api_from_request +from localstack.utils.aws.aws_responses import LambdaResponse +from localstack.utils.strings import remove_leading_extra_slashes + +LOG = logging.getLogger(__name__) + + +# TODO: with the latest snapshot tests, we might start moving away from the +# invocation context property decorators and use the url_params directly, +# something asked for a long time. +def to_invocation_context( + request: Request, url_params: Dict[str, Any] = None +) -> ApiInvocationContext: + """ + Converts an HTTP Request object into an ApiInvocationContext. + + :param request: the original request + :param url_params: the parameters extracted from the URL matching rules + :return: the ApiInvocationContext + """ + if url_params is None: + url_params = {} + + method = request.method + # Base path is not URL-decoded. + # Example: test%2Balias@gmail.com => test%2Balias@gmail.com + raw_uri = path = request.environ.get("RAW_URI") + if raw_uri.startswith("//"): + # if starts with //, then replace the first // with / + path = remove_leading_extra_slashes(raw_uri) + + data = restore_payload(request) + headers = Headers(request.headers) + + # TODO: verify that this is needed + # adjust the X-Forwarded-For header + x_forwarded_for = headers.getlist("X-Forwarded-For") + x_forwarded_for.append(request.remote_addr) + x_forwarded_for.append(request.host) + headers["X-Forwarded-For"] = ", ".join(x_forwarded_for) + + # set the x-localstack-edge header, it is used to parse the domain + headers[HEADER_LOCALSTACK_EDGE_URL] = request.host_url.strip("/") + + # FIXME: Use the already parsed url params instead of parsing them into the ApiInvocationContext part-by-part. + # We already would have all params at hand to avoid _all_ the parsing, but the parsing + # has side-effects (f.e. setting the region in a thread local)! + # It would be best to use a small (immutable) context for the already parsed params and the Request object + # and use it everywhere. + ctx = ApiInvocationContext(method, path, data, headers, stage=url_params.get("stage")) + ctx.raw_uri = raw_uri + ctx.auth_identity["sourceIp"] = request.remote_addr + + return ctx + + +def convert_response(result: RequestsResponse) -> Response: + """ + Utility function to convert a response for the requests library to our internal (Werkzeug based) Response object. + """ + if result is None: + return Response() + + if isinstance(result, LambdaResponse): + headers = Headers(dict(result.headers)) + for k, values in result.multi_value_headers.items(): + for value in values: + headers.add(k, value) + else: + headers = dict(result.headers) + + response = Response(status=result.status_code, headers=headers) + + if isinstance(result.content, dict): + response.set_json(result.content) + elif isinstance(result.content, (str, bytes)): + response.data = result.content + else: + raise ValueError(f"Unhandled content type {type(result.content)}") + + return response + + +class ApigatewayRouter: + """ + Simple implementation around a Router to manage dynamic restapi routes (routes added by a user through the + apigateway API). + """ + + router: Router[Handler] + + def __init__(self, router: Router[Handler]): + self.router = router + self.registered = False + + def register_routes(self) -> None: + """Registers parameterized routes for API Gateway user invocations.""" + if self.registered: + LOG.debug("Skipped API Gateway route registration (routes already registered).") + return + self.registered = True + LOG.debug("Registering parameterized API Gateway routes.") + host_pattern = ".execute-api." + self.router.add( + "/", + host=host_pattern, + endpoint=self.invoke_rest_api, + defaults={"path": "", "stage": None}, + strict_slashes=True, + ) + self.router.add( + "//", + host=host_pattern, + endpoint=self.invoke_rest_api, + defaults={"path": ""}, + strict_slashes=False, + ) + self.router.add( + "//", + host=host_pattern, + endpoint=self.invoke_rest_api, + strict_slashes=True, + ) + + # add the localstack-specific _user_request_ routes + self.router.add( + "/restapis///_user_request_", + endpoint=self.invoke_rest_api, + defaults={"path": ""}, + ) + self.router.add( + "/restapis///_user_request_/", + endpoint=self.invoke_rest_api, + strict_slashes=True, + ) + + def invoke_rest_api(self, request: Request, **url_params: str) -> Response: + account_id, region_name = get_api_account_id_and_region(url_params["api_id"]) + if not region_name: + return Response(status=404) + invocation_context = to_invocation_context(request, url_params) + invocation_context.region_name = region_name + invocation_context.account_id = account_id + result = invoke_rest_api_from_request(invocation_context) + if result is not None: + return convert_response(result) + raise NotFound() diff --git a/localstack-core/localstack/services/apigateway/legacy/templates.py b/localstack-core/localstack/services/apigateway/legacy/templates.py new file mode 100644 index 0000000000000..0ae853981ac02 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/legacy/templates.py @@ -0,0 +1,381 @@ +import base64 +import copy +import json +import logging +from enum import Enum +from typing import Any, Dict, Union +from urllib.parse import quote_plus, unquote_plus + +import xmltodict + +from localstack import config +from localstack.constants import APPLICATION_JSON, APPLICATION_XML +from localstack.services.apigateway.legacy.context import ApiInvocationContext +from localstack.services.apigateway.legacy.helpers import select_integration_response +from localstack.utils.aws.templating import APIGW_SOURCE, VelocityUtil, VtlTemplate +from localstack.utils.json import extract_jsonpath, json_safe, try_json +from localstack.utils.strings import to_str + +LOG = logging.getLogger(__name__) + + +class PassthroughBehavior(Enum): + WHEN_NO_MATCH = "WHEN_NO_MATCH" + WHEN_NO_TEMPLATES = "WHEN_NO_TEMPLATES" + NEVER = "NEVER" + + +class MappingTemplates: + """ + API Gateway uses mapping templates to transform incoming requests before they are sent to the + integration back end. With API Gateway, you can define one mapping template for each possible + content type. The content type selection is based on the Content-Type header of the incoming + request. If no content type is specified in the request, API Gateway uses an application/json + mapping template. By default, mapping templates are configured to simply pass through the + request input. Mapping templates use Apache Velocity to generate a request to your back end. + """ + + passthrough_behavior: PassthroughBehavior + + class UnsupportedMediaType(Exception): + pass + + def __init__(self, passthrough_behaviour: str): + self.passthrough_behavior = self.get_passthrough_behavior(passthrough_behaviour) + + def check_passthrough_behavior(self, request_template): + """ + Specifies how the method request body of an unmapped content type will be passed through + the integration request to the back end without transformation. + A content type is unmapped if no mapping template is defined in the integration or the + content type does not match any of the mapped content types, as specified in requestTemplates + """ + if not request_template and self.passthrough_behavior in { + PassthroughBehavior.NEVER, + PassthroughBehavior.WHEN_NO_TEMPLATES, + }: + raise MappingTemplates.UnsupportedMediaType() + + @staticmethod + def get_passthrough_behavior(passthrough_behaviour: str): + return getattr(PassthroughBehavior, passthrough_behaviour, None) + + +class AttributeDict(dict): + """ + Wrapper returned by VelocityUtilApiGateway.parseJson to allow access to dict values as attributes (dot notation), + e.g.: $util.parseJson('$.foo').bar + """ + + def __init__(self, *args, **kwargs): + super(AttributeDict, self).__init__(*args, **kwargs) + for key, value in self.items(): + if isinstance(value, dict): + self[key] = AttributeDict(value) + + def __getattr__(self, name): + if name in self: + return self[name] + raise AttributeError(f"'AttributeDict' object has no attribute '{name}'") + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + if name in self: + del self[name] + else: + raise AttributeError(f"'AttributeDict' object has no attribute '{name}'") + + +class VelocityUtilApiGateway(VelocityUtil): + """ + Simple class to mimic the behavior of variable '$util' in AWS API Gateway integration + velocity templates. + See: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + + def base64Encode(self, s): + if not isinstance(s, str): + s = json.dumps(s) + encoded_str = s.encode(config.DEFAULT_ENCODING) + encoded_b64_str = base64.b64encode(encoded_str) + return encoded_b64_str.decode(config.DEFAULT_ENCODING) + + def base64Decode(self, s): + if not isinstance(s, str): + s = json.dumps(s) + return base64.b64decode(s) + + def toJson(self, obj): + return obj and json.dumps(obj) + + def urlEncode(self, s): + return quote_plus(s) + + def urlDecode(self, s): + return unquote_plus(s) + + def escapeJavaScript(self, obj: Any) -> str: + """ + Converts the given object to a string and escapes any regular single quotes (') into escaped ones (\'). + JSON dumps will escape the single quotes. + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + if obj is None: + return "null" + if isinstance(obj, str): + # empty string escapes to empty object + if len(obj.strip()) == 0: + return "{}" + return json.dumps(obj)[1:-1] + if obj in (True, False): + return str(obj).lower() + return str(obj) + + def parseJson(self, s: str): + obj = json.loads(s) + return AttributeDict(obj) if isinstance(obj, dict) else obj + + +class VelocityInput: + """ + Simple class to mimic the behavior of variable '$input' in AWS API Gateway integration + velocity templates. + See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + + def __init__(self, body, params): + self.parameters = params or {} + self.value = body + + def path(self, path): + if not self.value: + return {} + value = self.value if isinstance(self.value, dict) else json.loads(self.value) + return extract_jsonpath(value, path) + + def json(self, path): + path = path or "$" + matching = self.path(path) + if isinstance(matching, (list, dict)): + matching = json_safe(matching) + return json.dumps(matching) + + @property + def body(self): + return self.value + + def params(self, name=None): + if not name: + return self.parameters + for k in ["path", "querystring", "header"]: + if val := self.parameters.get(k).get(name): + return val + return "" + + def __getattr__(self, name): + return self.value.get(name) + + def __repr__(self): + return "$input" + + +class ApiGatewayVtlTemplate(VtlTemplate): + """Util class for rendering VTL templates with API Gateway specific extensions""" + + def prepare_namespace(self, variables, source: str = APIGW_SOURCE) -> Dict[str, Any]: + namespace = super().prepare_namespace(variables, source) + if stage_var := variables.get("stage_variables") or {}: + namespace["stageVariables"] = stage_var + input_var = variables.get("input") or {} + variables = { + "input": VelocityInput(input_var.get("body"), input_var.get("params")), + "util": VelocityUtilApiGateway(), + } + namespace.update(variables) + return namespace + + +class Templates: + __slots__ = ["vtl"] + + def __init__(self): + self.vtl = ApiGatewayVtlTemplate() + + def render(self, api_context: ApiInvocationContext) -> Union[bytes, str]: + pass + + def render_vtl(self, template, variables): + return self.vtl.render_vtl(template, variables=variables) + + @staticmethod + def build_variables_mapping(api_context: ApiInvocationContext) -> dict[str, Any]: + # TODO: make this (dict) an object so usages of "render_vtl" variables are defined + ctx = copy.deepcopy(api_context.context or {}) + # https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-override-request-response-parameters.html + # create namespace for request override + ctx["requestOverride"] = { + "header": {}, + "path": {}, + "querystring": {}, + } + + ctx["responseOverride"] = { + "header": {}, + "status": 200, + } + + return { + "context": ctx, + "stage_variables": api_context.stage_variables or {}, + "input": { + "body": api_context.data_as_string(), + "params": { + "path": api_context.path_params, + "querystring": api_context.query_params(), + # Sometimes we get a werkzeug.datastructures.Headers object, sometimes a dict + # depending on the request. We need to convert to a dict to be able to render + # the template. + "header": dict(api_context.headers), + }, + }, + } + + +class RequestTemplates(Templates): + """ + Handles request template rendering + """ + + def render( + self, api_context: ApiInvocationContext, template_key: str = APPLICATION_JSON + ) -> Union[bytes, str]: + LOG.debug( + "Method request body before transformations: %s", to_str(api_context.data_as_string()) + ) + request_templates = api_context.integration.get("requestTemplates", {}) + template = request_templates.get(template_key) + if not template: + return api_context.data_as_string() + + variables = self.build_variables_mapping(api_context) + result = self.render_vtl(template.strip(), variables=variables) + + # set the request overrides into context + api_context.headers.update( + variables.get("context", {}).get("requestOverride", {}).get("header", {}) + ) + + LOG.debug("Endpoint request body after transformations:\n%s", result) + return result + + +class ResponseTemplates(Templates): + """ + Handles response template rendering. The integration response status code is used to select + the correct template to render, if there is no template for the status code, the default + template is used. + """ + + def render(self, api_context: ApiInvocationContext, **kwargs) -> Union[bytes, str]: + # XXX: keep backwards compatibility until we migrate all integrations to this new classes + # api_context contains a response object that we want slowly remove from it + data = kwargs.get("response", "") + response = data or api_context.response + integration = api_context.integration + # we set context data with the response content because later on we use context data as + # the body field in the template. We need to improve this by using the right source + # depending on the type of templates. + api_context.data = response._content + + # status code returned by the integration + status_code = str(response.status_code) + + # get the integration responses configuration from the integration object + integration_responses = integration.get("integrationResponses") + if not integration_responses: + return response._content + + # get the configured integration response status codes, + # e.g. ["200", "400", "500"] + integration_status_codes = [str(code) for code in list(integration_responses.keys())] + # if there are no integration responses, we return the response as is + if not integration_status_codes: + return response.content + + # The following code handles two use cases.If there is an integration response for the status code returned + # by the integration, we use the template configured for that status code (1) or the errorMessage (2) for + # lambda integrations. + # For an HTTP integration, API Gateway matches the regex to the HTTP status code to return + # For a Lambda function, API Gateway matches the regex to the errorMessage header to + # return a status code. + # For example, to set a 400 response for any error that starts with Malformed, + # set the method response status code to 400 and the Lambda error regex to Malformed.*. + match_resp = status_code + if isinstance(try_json(response._content), dict): + resp_dict = try_json(response._content) + if "errorMessage" in resp_dict: + match_resp = resp_dict.get("errorMessage") + + selected_integration_response = select_integration_response(match_resp, api_context) + response.status_code = int(selected_integration_response.get("statusCode", 200)) + response_templates = selected_integration_response.get("responseTemplates", {}) + + # we only support JSON and XML templates for now - if there is no template we return the response as is + # If the content type is not supported we always use application/json as default value + # TODO - support other content types, besides application/json and application/xml + # see https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#selecting-mapping-templates + accept = api_context.headers.get("accept", APPLICATION_JSON) + supported_types = [APPLICATION_JSON, APPLICATION_XML] + media_type = accept if accept in supported_types else APPLICATION_JSON + if not (template := response_templates.get(media_type, {})): + return response._content + + # we render the template with the context data and the response content + variables = self.build_variables_mapping(api_context) + # update the response body + response._content = self._render_as_text(template, variables) + if media_type == APPLICATION_JSON: + self._validate_json(response.content) + elif media_type == APPLICATION_XML: + self._validate_xml(response.content) + + if response_overrides := variables.get("context", {}).get("responseOverride", {}): + response.headers.update(response_overrides.get("header", {}).items()) + response.status_code = response_overrides.get("status", 200) + + LOG.debug("Endpoint response body after transformations:\n%s", response._content) + return response._content + + def _render_as_text(self, template: str, variables: dict[str, Any]) -> str: + """ + Render the given Velocity template string + variables into a plain string. + :return: the template rendering result as a string + """ + rendered_tpl = self.render_vtl(template, variables=variables) + return rendered_tpl.strip() + + @staticmethod + def _validate_json(content: str): + """ + Checks that the content received is a valid JSON. + :raise JSONDecodeError: if content is not valid JSON + """ + try: + json.loads(content) + except Exception as e: + LOG.info("Unable to parse template result as JSON: %s - %s", e, content) + raise + + @staticmethod + def _validate_xml(content: str): + """ + Checks that the content received is a valid XML. + :raise xml.parsers.expat.ExpatError: if content is not valid XML + """ + try: + xmltodict.parse(content) + except Exception as e: + LOG.info("Unable to parse template result as XML: %s - %s", e, content) + raise diff --git a/localstack-core/localstack/services/apigateway/models.py b/localstack-core/localstack/services/apigateway/models.py new file mode 100644 index 0000000000000..44fca6b65ae29 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/models.py @@ -0,0 +1,155 @@ +from typing import Any, Dict, List + +from requests.structures import CaseInsensitiveDict + +from localstack.aws.api.apigateway import ( + Authorizer, + DocumentationPart, + DocumentationVersion, + DomainName, + GatewayResponse, + GatewayResponseType, + Model, + RequestValidator, + Resource, + RestApi, +) +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossAccountAttribute, + CrossRegionAttribute, + LocalAttribute, +) +from localstack.utils.aws import arns + + +class RestApiContainer: + # contains the RestApi dictionary. We're not making use of it yet, still using moto data. + rest_api: RestApi + # maps AuthorizerId -> Authorizer + authorizers: Dict[str, Authorizer] + # maps RequestValidatorId -> RequestValidator + validators: Dict[str, RequestValidator] + # map DocumentationPartId -> DocumentationPart + documentation_parts: Dict[str, DocumentationPart] + # map doc version name -> DocumentationVersion + documentation_versions: Dict[str, DocumentationVersion] + # not used yet, still in moto + gateway_responses: Dict[GatewayResponseType, GatewayResponse] + # maps Model name -> Model + models: Dict[str, Model] + # maps Model name -> resolved dict Model, so we don't need to load the JSON everytime + resolved_models: Dict[str, dict] + # maps ResourceId of a Resource to its children ResourceIds + resource_children: Dict[str, List[str]] + + def __init__(self, rest_api: RestApi): + self.rest_api = rest_api + self.authorizers = {} + self.validators = {} + self.documentation_parts = {} + self.documentation_versions = {} + self.gateway_responses = {} + self.models = {} + self.resolved_models = {} + self.resource_children = {} + + +class MergedRestApi(RestApiContainer): + """Merged REST API between Moto data and LocalStack data, used in our Invocation logic""" + + # TODO: when migrating away from Moto, RestApiContainer and MergedRestApi will have the same signature, so we can + # safely remove it and only use RestApiContainer in our invocation logic + resources: dict[str, Resource] + + def __init__(self, rest_api: RestApi): + super().__init__(rest_api) + self.resources = {} + + @classmethod + def from_rest_api_container( + cls, + rest_api_container: RestApiContainer, + resources: dict[str, Resource], + ) -> "MergedRestApi": + merged = cls(rest_api=rest_api_container.rest_api) + merged.authorizers = rest_api_container.authorizers + merged.validators = rest_api_container.validators + merged.documentation_parts = rest_api_container.documentation_parts + merged.documentation_versions = rest_api_container.documentation_versions + merged.gateway_responses = rest_api_container.gateway_responses + merged.models = rest_api_container.models + merged.resolved_models = rest_api_container.resolved_models + merged.resource_children = rest_api_container.resource_children + merged.resources = resources + + return merged + + +class RestApiDeployment: + def __init__( + self, + account_id: str, + region: str, + rest_api: MergedRestApi, + ): + self.rest_api = rest_api + self.account_id = account_id + self.region = region + + +class ApiGatewayStore(BaseStore): + # maps (API id) -> RestApiContainer + # TODO: remove CaseInsensitiveDict, and lower the value of the ID when getting it from the tags + rest_apis: Dict[str, RestApiContainer] = LocalAttribute(default=CaseInsensitiveDict) + + # account details + _account: Dict[str, Any] = LocalAttribute(default=dict) + + # maps (domain_name) -> [path_mappings] + base_path_mappings: Dict[str, List[Dict]] = LocalAttribute(default=dict) + + # maps ID to VPC link details + vpc_links: Dict[str, Dict] = LocalAttribute(default=dict) + + # maps cert ID to client certificate details + client_certificates: Dict[str, Dict] = LocalAttribute(default=dict) + + # maps domain name to domain name model + domain_names: Dict[str, DomainName] = LocalAttribute(default=dict) + + # maps resource ARN to tags + TAGS: Dict[str, Dict[str, str]] = CrossRegionAttribute(default=dict) + + # internal deployments, represents a frozen REST API for a deployment, used in our router + # TODO: make sure API ID are unique across all accounts + # maps ApiID to a map of deploymentId and RestApiDeployment, an executable/snapshot of a REST API + internal_deployments: dict[str, dict[str, RestApiDeployment]] = CrossAccountAttribute( + default=dict + ) + + # active deployments, mapping API ID to a map of Stage and deployment ID + # TODO: make sure API ID are unique across all accounts + active_deployments: dict[str, dict[str, str]] = CrossAccountAttribute(dict) + + def __init__(self): + super().__init__() + + @property + def account(self): + if not self._account: + self._account.update( + { + "cloudwatchRoleArn": arns.iam_role_arn( + "api-gw-cw-role", self._account_id, self._region_name + ), + "throttleSettings": {"burstLimit": 1000, "rateLimit": 500}, + "features": ["UsagePlans"], + "apiKeyVersion": "1", + } + ) + return self._account + + +apigateway_stores = AccountRegionBundle("apigateway", ApiGatewayStore) diff --git a/localstack/services/ec2/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/__init__.py similarity index 100% rename from localstack/services/ec2/__init__.py rename to localstack-core/localstack/services/apigateway/next_gen/__init__.py diff --git a/localstack/services/es/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/__init__.py similarity index 100% rename from localstack/services/es/__init__.py rename to localstack-core/localstack/services/apigateway/next_gen/execute_api/__init__.py diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py new file mode 100644 index 0000000000000..843938e0611ed --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/api.py @@ -0,0 +1,17 @@ +from typing import Callable, Type + +from rolo import Response +from rolo.gateway.chain import HandlerChain as RoloHandlerChain + +from .context import RestApiInvocationContext + +RestApiGatewayHandler = Callable[ + [RoloHandlerChain[RestApiInvocationContext], RestApiInvocationContext, Response], None +] + +RestApiGatewayExceptionHandler = Callable[ + [RoloHandlerChain[RestApiInvocationContext], Exception, RestApiInvocationContext, Response], + None, +] + +RestApiGatewayHandlerChain: Type[RoloHandlerChain[RestApiInvocationContext]] = RoloHandlerChain diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py new file mode 100644 index 0000000000000..9f6be795d9af8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/context.py @@ -0,0 +1,141 @@ +from http import HTTPMethod +from typing import Optional, TypedDict + +from rolo import Request +from rolo.gateway import RequestContext +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import Integration, Method, Resource, Stage +from localstack.services.apigateway.models import RestApiDeployment + +from .variables import ContextVariableOverrides, ContextVariables, LoggingContextVariables + + +class InvocationRequest(TypedDict, total=False): + http_method: HTTPMethod + """HTTP Method of the incoming request""" + raw_path: Optional[str] + # TODO: verify if raw_path is needed + """Raw path of the incoming request with no modification, needed to keep double forward slashes""" + path: Optional[str] + """Path of the request with no URL decoding""" + path_parameters: Optional[dict[str, str]] + """Path parameters of the request""" + query_string_parameters: dict[str, str] + """Query string parameters of the request""" + headers: Headers + """Raw headers using the Headers datastructure which allows access with no regards to casing""" + multi_value_query_string_parameters: dict[str, list[str]] + """Multi value query string parameters of the request""" + body: bytes + """Body content of the request""" + + +class IntegrationRequest(TypedDict, total=False): + http_method: HTTPMethod + """HTTP Method of the incoming request""" + uri: str + """URI of the integration""" + query_string_parameters: dict[str, str | list[str]] + """Query string parameters of the request""" + headers: Headers + """Headers of the request""" + body: bytes + """Body content of the request""" + + +class BaseResponse(TypedDict): + """Base class for Response objects in the context""" + + status_code: int + """Status code of the response""" + headers: Headers + """Headers of the response""" + body: bytes + """Body content of the response""" + + +class EndpointResponse(BaseResponse): + """Represents the response coming from an integration, called Endpoint Response in AWS""" + + pass + + +class InvocationResponse(BaseResponse): + """Represents the response coming after being serialized in an Integration Response in AWS""" + + pass + + +class RestApiInvocationContext(RequestContext): + """ + This context is going to be used to pass relevant information across an API Gateway invocation. + """ + + deployment: Optional[RestApiDeployment] + """Contains the invoked REST API Resources""" + integration: Optional[Integration] + """The Method Integration for the invoked request""" + api_id: Optional[str] + """The REST API identifier of the invoked API""" + stage: Optional[str] + """The REST API stage name linked to this invocation""" + base_path: Optional[str] + """The REST API base path mapped to the stage of this invocation""" + deployment_id: Optional[str] + """The REST API deployment linked to this invocation""" + region: Optional[str] + """The region the REST API is living in.""" + account_id: Optional[str] + """The account the REST API is living in.""" + trace_id: Optional[str] + """The X-Ray trace ID for the request.""" + resource: Optional[Resource] + """The resource the invocation matched""" + resource_method: Optional[Method] + """The method of the resource the invocation matched""" + stage_variables: Optional[dict[str, str]] + """The Stage variables, also used in parameters mapping and mapping templates""" + stage_configuration: Optional[Stage] + """The Stage configuration, containing canary deployment settings""" + is_canary: Optional[bool] + """If the current call was directed to a canary deployment""" + context_variables: Optional[ContextVariables] + """The $context used in data models, authorizers, mapping templates, and CloudWatch access logging""" + context_variable_overrides: Optional[ContextVariableOverrides] + """requestOverrides and responseOverrides are passed from request templates to response templates but are + not in the integration context""" + logging_context_variables: Optional[LoggingContextVariables] + """Additional $context variables available only for access logging, not yet implemented""" + invocation_request: Optional[InvocationRequest] + """Contains the data relative to the invocation request""" + integration_request: Optional[IntegrationRequest] + """Contains the data needed to construct an HTTP request to an Integration""" + endpoint_response: Optional[EndpointResponse] + """Contains the data returned by an Integration""" + invocation_response: Optional[InvocationResponse] + """Contains the data serialized and to be returned by an invocation""" + + def __init__(self, request: Request): + super().__init__(request) + self.deployment = None + self.api_id = None + self.stage = None + self.base_path = None + self.deployment_id = None + self.account_id = None + self.region = None + self.invocation_request = None + self.resource = None + self.resource_method = None + self.integration = None + self.stage_variables = None + self.stage_configuration = None + self.is_canary = None + self.context_variables = None + self.logging_context_variables = None + self.integration_request = None + self.endpoint_response = None + self.invocation_response = None + self.trace_id = None + self.context_variable_overrides = None diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py new file mode 100644 index 0000000000000..85a31da903fde --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway.py @@ -0,0 +1,50 @@ +from rolo import Response +from rolo.gateway import Gateway + +from . import handlers +from .context import RestApiInvocationContext + + +class RestApiGateway(Gateway): + """ + This class controls the main path of an API Gateway REST API. It contains the definitions of the different handlers + to be called as part of the different steps of the invocation of the API. + + For now, you can extend the behavior of the invocation by adding handlers to the `preprocess_request` + CompositeHandler. + The documentation of this class will be extended as more behavior will be added to its handlers, as well as more + ways to extend it. + """ + + def __init__(self): + super().__init__(context_class=RestApiInvocationContext) + self.request_handlers.extend( + [ + handlers.parse_request, + handlers.modify_request, + handlers.route_request, + handlers.preprocess_request, + handlers.api_key_validation_handler, + handlers.method_request_handler, + handlers.integration_request_handler, + handlers.integration_handler, + handlers.integration_response_handler, + handlers.method_response_handler, + ] + ) + self.exception_handlers.extend( + [ + handlers.gateway_exception_handler, + ] + ) + self.response_handlers.extend( + [ + handlers.response_enricher, + handlers.usage_counter, + # add composite response handlers? + ] + ) + + def process_with_context(self, context: RestApiInvocationContext, response: Response): + chain = self.new_chain() + chain.handle(context, response) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py new file mode 100644 index 0000000000000..a0e9935ccf775 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/gateway_response.py @@ -0,0 +1,298 @@ +from enum import Enum + +from localstack.aws.api.apigateway import ( + GatewayResponse, + GatewayResponseType, + MapOfStringToString, + StatusCode, +) +from localstack.constants import APPLICATION_JSON + + +class GatewayResponseCode(StatusCode, Enum): + REQUEST_TOO_LARGE = "413" + RESOURCE_NOT_FOUND = "404" + AUTHORIZER_CONFIGURATION_ERROR = "500" + MISSING_AUTHENTICATION_TOKEN = "403" + BAD_REQUEST_BODY = "400" + INVALID_SIGNATURE = "403" + INVALID_API_KEY = "403" + BAD_REQUEST_PARAMETERS = "400" + AUTHORIZER_FAILURE = "500" + UNAUTHORIZED = "401" + INTEGRATION_TIMEOUT = "504" + ACCESS_DENIED = "403" + DEFAULT_4XX = "" + DEFAULT_5XX = "" + WAF_FILTERED = "403" + QUOTA_EXCEEDED = "429" + THROTTLED = "429" + API_CONFIGURATION_ERROR = "500" + UNSUPPORTED_MEDIA_TYPE = "415" + INTEGRATION_FAILURE = "504" + EXPIRED_TOKEN = "403" + + +class BaseGatewayException(Exception): + """ + Base class for all Gateway exceptions + Do not raise from this class directly. Instead, raise the specific Exception + """ + + message: str = "Unimplemented Response" + type: GatewayResponseType = None + status_code: int | str = None + code: str = "" + + def __init__(self, message: str = None, status_code: int | str = None): + if message is not None: + self.message = message + if status_code is not None: + self.status_code = status_code + elif self.status_code is None and self.type: + # Fallback to the default value + self.status_code = GatewayResponseCode[self.type] + + +class Default4xxError(BaseGatewayException): + """Do not raise from this class directly. + Use one of the subclasses instead, as they contain the appropriate header + """ + + type = GatewayResponseType.DEFAULT_4XX + status_code = 400 + + +class Default5xxError(BaseGatewayException): + """Do not raise from this class directly. + Use one of the subclasses instead, as they contain the appropriate header + """ + + type = GatewayResponseType.DEFAULT_5XX + status_code = 500 + + +class BadRequestException(Default4xxError): + code = "BadRequestException" + + +class InternalFailureException(Default5xxError): + code = "InternalFailureException" + + +class InternalServerError(Default5xxError): + code = "InternalServerErrorException" + + +class AccessDeniedError(BaseGatewayException): + type = GatewayResponseType.ACCESS_DENIED + # TODO validate this header with aws validated tests + code = "AccessDeniedException" + + +class ApiConfigurationError(BaseGatewayException): + type = GatewayResponseType.API_CONFIGURATION_ERROR + # TODO validate this header with aws validated tests + code = "ApiConfigurationException" + + +class AuthorizerConfigurationError(BaseGatewayException): + type = GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR + # TODO validate this header with aws validated tests + code = "AuthorizerConfigurationException" + # the message is set to None by default in AWS + message = None + + +class AuthorizerFailureError(BaseGatewayException): + type = GatewayResponseType.AUTHORIZER_FAILURE + # TODO validate this header with aws validated tests + code = "AuthorizerFailureException" + + +class BadRequestParametersError(BaseGatewayException): + type = GatewayResponseType.BAD_REQUEST_PARAMETERS + code = "BadRequestException" + + +class BadRequestBodyError(BaseGatewayException): + type = GatewayResponseType.BAD_REQUEST_BODY + code = "BadRequestException" + + +class ExpiredTokenError(BaseGatewayException): + type = GatewayResponseType.EXPIRED_TOKEN + # TODO validate this header with aws validated tests + code = "ExpiredTokenException" + + +class IntegrationFailureError(BaseGatewayException): + type = GatewayResponseType.INTEGRATION_FAILURE + code = "InternalServerErrorException" + status_code = 500 + + +class IntegrationTimeoutError(BaseGatewayException): + type = GatewayResponseType.INTEGRATION_TIMEOUT + code = "InternalServerErrorException" + + +class InvalidAPIKeyError(BaseGatewayException): + type = GatewayResponseType.INVALID_API_KEY + code = "ForbiddenException" + + +class InvalidSignatureError(BaseGatewayException): + type = GatewayResponseType.INVALID_SIGNATURE + # TODO validate this header with aws validated tests + code = "InvalidSignatureException" + + +class MissingAuthTokenError(BaseGatewayException): + type = GatewayResponseType.MISSING_AUTHENTICATION_TOKEN + code = "MissingAuthenticationTokenException" + + +class QuotaExceededError(BaseGatewayException): + type = GatewayResponseType.QUOTA_EXCEEDED + code = "LimitExceededException" + + +class RequestTooLargeError(BaseGatewayException): + type = GatewayResponseType.REQUEST_TOO_LARGE + # TODO validate this header with aws validated tests + code = "RequestTooLargeException" + + +class ResourceNotFoundError(BaseGatewayException): + type = GatewayResponseType.RESOURCE_NOT_FOUND + # TODO validate this header with aws validated tests + code = "ResourceNotFoundException" + + +class ThrottledError(BaseGatewayException): + type = GatewayResponseType.THROTTLED + code = "TooManyRequestsException" + + +class UnauthorizedError(BaseGatewayException): + type = GatewayResponseType.UNAUTHORIZED + code = "UnauthorizedException" + + +class UnsupportedMediaTypeError(BaseGatewayException): + type = GatewayResponseType.UNSUPPORTED_MEDIA_TYPE + code = "BadRequestException" + + +class WafFilteredError(BaseGatewayException): + type = GatewayResponseType.WAF_FILTERED + # TODO validate this header with aws validated tests + code = "WafFilteredException" + + +def build_gateway_response( + response_type: GatewayResponseType, + status_code: StatusCode = None, + response_parameters: MapOfStringToString = None, + response_templates: MapOfStringToString = None, + default_response: bool = True, +) -> GatewayResponse: + """Building a Gateway Response. Non provided attributes will use default.""" + response = GatewayResponse( + responseParameters=response_parameters or {}, + responseTemplates=response_templates + or {APPLICATION_JSON: '{"message":$context.error.messageString}'}, + responseType=response_type, + defaultResponse=default_response, + statusCode=status_code, + ) + + return response + + +def get_gateway_response_or_default( + response_type: GatewayResponseType, + gateway_responses: dict[GatewayResponseType, GatewayResponse], +) -> GatewayResponse: + """Utility function that will look for a matching Gateway Response in the following order. + - If provided in the gateway_response, return the dicts value + - If the DEFAULT_XXX was configured will create a new response + - Otherwise we return from DEFAULT_GATEWAY_RESPONSE""" + + if response := gateway_responses.get(response_type): + # User configured response + return response + response_code = GatewayResponseCode[response_type] + if response_code == "": + # DEFAULT_XXX response do not have a default code + return DEFAULT_GATEWAY_RESPONSES.get(response_type) + if response_code >= "500": + # 5XX response will either get a user configured DEFAULT_5XX or the DEFAULT_GATEWAY_RESPONSES + default = gateway_responses.get(GatewayResponseType.DEFAULT_5XX) + else: + # 4XX response will either get a user configured DEFAULT_4XX or the DEFAULT_GATEWAY_RESPONSES + default = gateway_responses.get(GatewayResponseType.DEFAULT_4XX) + + if not default: + # If DEFAULT_XXX was not provided return default + return DEFAULT_GATEWAY_RESPONSES.get(response_type) + + return build_gateway_response( + # Build a new response from default + response_type, + status_code=default.get("statusCode"), + response_parameters=default.get("responseParameters"), + response_templates=default.get("responseTemplates"), + ) + + +DEFAULT_GATEWAY_RESPONSES = { + GatewayResponseType.REQUEST_TOO_LARGE: build_gateway_response( + GatewayResponseType.REQUEST_TOO_LARGE + ), + GatewayResponseType.RESOURCE_NOT_FOUND: build_gateway_response( + GatewayResponseType.RESOURCE_NOT_FOUND + ), + GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR: build_gateway_response( + GatewayResponseType.AUTHORIZER_CONFIGURATION_ERROR + ), + GatewayResponseType.MISSING_AUTHENTICATION_TOKEN: build_gateway_response( + GatewayResponseType.MISSING_AUTHENTICATION_TOKEN + ), + GatewayResponseType.BAD_REQUEST_BODY: build_gateway_response( + GatewayResponseType.BAD_REQUEST_BODY + ), + GatewayResponseType.INVALID_SIGNATURE: build_gateway_response( + GatewayResponseType.INVALID_SIGNATURE + ), + GatewayResponseType.INVALID_API_KEY: build_gateway_response( + GatewayResponseType.INVALID_API_KEY + ), + GatewayResponseType.BAD_REQUEST_PARAMETERS: build_gateway_response( + GatewayResponseType.BAD_REQUEST_PARAMETERS + ), + GatewayResponseType.AUTHORIZER_FAILURE: build_gateway_response( + GatewayResponseType.AUTHORIZER_FAILURE + ), + GatewayResponseType.UNAUTHORIZED: build_gateway_response(GatewayResponseType.UNAUTHORIZED), + GatewayResponseType.INTEGRATION_TIMEOUT: build_gateway_response( + GatewayResponseType.INTEGRATION_TIMEOUT + ), + GatewayResponseType.ACCESS_DENIED: build_gateway_response(GatewayResponseType.ACCESS_DENIED), + GatewayResponseType.DEFAULT_4XX: build_gateway_response(GatewayResponseType.DEFAULT_4XX), + GatewayResponseType.DEFAULT_5XX: build_gateway_response(GatewayResponseType.DEFAULT_5XX), + GatewayResponseType.WAF_FILTERED: build_gateway_response(GatewayResponseType.WAF_FILTERED), + GatewayResponseType.QUOTA_EXCEEDED: build_gateway_response(GatewayResponseType.QUOTA_EXCEEDED), + GatewayResponseType.THROTTLED: build_gateway_response(GatewayResponseType.THROTTLED), + GatewayResponseType.API_CONFIGURATION_ERROR: build_gateway_response( + GatewayResponseType.API_CONFIGURATION_ERROR + ), + GatewayResponseType.UNSUPPORTED_MEDIA_TYPE: build_gateway_response( + GatewayResponseType.UNSUPPORTED_MEDIA_TYPE + ), + GatewayResponseType.INTEGRATION_FAILURE: build_gateway_response( + GatewayResponseType.INTEGRATION_FAILURE + ), + GatewayResponseType.EXPIRED_TOKEN: build_gateway_response(GatewayResponseType.EXPIRED_TOKEN), +} diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py new file mode 100644 index 0000000000000..e9e1dcb618166 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/__init__.py @@ -0,0 +1,29 @@ +from rolo.gateway import CompositeHandler + +from localstack.services.apigateway.analytics import invocation_counter + +from .analytics import IntegrationUsageCounter +from .api_key_validation import ApiKeyValidationHandler +from .gateway_exception import GatewayExceptionHandler +from .integration import IntegrationHandler +from .integration_request import IntegrationRequestHandler +from .integration_response import IntegrationResponseHandler +from .method_request import MethodRequestHandler +from .method_response import MethodResponseHandler +from .parse import InvocationRequestParser +from .resource_router import InvocationRequestRouter +from .response_enricher import InvocationResponseEnricher + +parse_request = InvocationRequestParser() +modify_request = CompositeHandler() +route_request = InvocationRequestRouter() +preprocess_request = CompositeHandler() +method_request_handler = MethodRequestHandler() +integration_request_handler = IntegrationRequestHandler() +integration_handler = IntegrationHandler() +integration_response_handler = IntegrationResponseHandler() +method_response_handler = MethodResponseHandler() +gateway_exception_handler = GatewayExceptionHandler() +api_key_validation_handler = ApiKeyValidationHandler() +response_enricher = InvocationResponseEnricher() +usage_counter = IntegrationUsageCounter(counter=invocation_counter) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py new file mode 100644 index 0000000000000..7c6525eb0e7e1 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/analytics.py @@ -0,0 +1,48 @@ +import logging + +from localstack.http import Response +from localstack.utils.analytics.metrics import LabeledCounterMetric + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import RestApiInvocationContext + +LOG = logging.getLogger(__name__) + + +class IntegrationUsageCounter(RestApiGatewayHandler): + counter: LabeledCounterMetric + + def __init__(self, counter: LabeledCounterMetric): + self.counter = counter + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + if context.integration: + invocation_type = context.integration["type"] + if invocation_type == "AWS": + service_name = self._get_aws_integration_service(context.integration.get("uri")) + invocation_type = f"{invocation_type}:{service_name}" + else: + # if the invocation does not have an integration attached, it probably failed before routing the request, + # hence we should count it as a NOT_FOUND invocation + invocation_type = "NOT_FOUND" + + self.counter.labels(invocation_type=invocation_type).increment() + + @staticmethod + def _get_aws_integration_service(integration_uri: str) -> str: + if not integration_uri: + return "null" + + if len(split_arn := integration_uri.split(":", maxsplit=5)) < 4: + return "null" + + service = split_arn[4] + # the URI can also contain some .-api kind of route like `execute-api` or `appsync-api` + # we need to make sure we do not pass the full value back + service = service.split(".")[-1] + return service diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py new file mode 100644 index 0000000000000..ba8ada9769f17 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/api_key_validation.py @@ -0,0 +1,113 @@ +import logging +from typing import Optional + +from localstack.aws.api.apigateway import ApiKey, ApiKeySourceType, RestApi +from localstack.http import Response + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import InvocationRequest, RestApiInvocationContext +from ..gateway_response import InvalidAPIKeyError +from ..moto_helpers import get_api_key, get_usage_plan_keys, get_usage_plans +from ..variables import ContextVarsIdentity + +LOG = logging.getLogger(__name__) + + +class ApiKeyValidationHandler(RestApiGatewayHandler): + """ + Handles Api key validation. + If an api key is required, we will validate that a usage plan associated with that stage + has a usage plan key with the corresponding value. + """ + + # TODO We currently do not support rate limiting or quota limit. As such we are not raising any related Exception + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + method = context.resource_method + request = context.invocation_request + rest_api = context.deployment.rest_api.rest_api + + # If api key is not required by the method, we can exit the handler + if not method.get("apiKeyRequired"): + return + + identity = context.context_variables.get("identity") + + # Look for the api key value in the request. If it is not found, raise an exception + if not (api_key_value := self.get_request_api_key(rest_api, request, identity)): + LOG.debug("API Key is empty") + raise InvalidAPIKeyError("Forbidden") + + # Get the validated key, if no key is found, raise an exception + if not (validated_key := self.validate_api_key(api_key_value, context)): + LOG.debug("Provided API Key is not valid") + raise InvalidAPIKeyError("Forbidden") + + # Update the context's identity with the key value and id + if not identity.get("apiKey"): + LOG.debug("Updating $context.identity.apiKey='%s'", validated_key["value"]) + identity["apiKey"] = validated_key["value"] + + LOG.debug("Updating $context.identity.apiKeyId='%s'", validated_key["id"]) + identity["apiKeyId"] = validated_key["id"] + + def validate_api_key( + self, api_key_value, context: RestApiInvocationContext + ) -> Optional[ApiKey]: + api_id = context.api_id + stage = context.stage + account_id = context.account_id + region = context.region + + # Get usage plans from the store + usage_plans = get_usage_plans(account_id=account_id, region_name=region) + + # Loop through usage plans and keep ids of the plans associated with the deployment stage + usage_plan_ids = [] + for usage_plan in usage_plans: + api_stages = usage_plan.get("apiStages", []) + usage_plan_ids.extend( + usage_plan.get("id") + for api_stage in api_stages + if (api_stage.get("stage") == stage and api_stage.get("apiId") == api_id) + ) + if not usage_plan_ids: + LOG.debug("No associated usage plans found stage '%s'", stage) + return + + # Loop through plans with an association with the stage find a key with matching value + for usage_plan_id in usage_plan_ids: + usage_plan_keys = get_usage_plan_keys( + usage_plan_id=usage_plan_id, account_id=account_id, region_name=region + ) + for key in usage_plan_keys: + if key["value"] == api_key_value: + api_key = get_api_key( + api_key_id=key["id"], account_id=account_id, region_name=region + ) + LOG.debug("Found Api Key '%s'", api_key["id"]) + return api_key if api_key["enabled"] else None + + def get_request_api_key( + self, rest_api: RestApi, request: InvocationRequest, identity: ContextVarsIdentity + ) -> Optional[str]: + """https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-api-key-source.html + The source of the API key for metering requests according to a usage plan. + Valid values are: + - HEADER to read the API key from the X-API-Key header of a request. + - AUTHORIZER to read the API key from the Context Variables. + """ + match api_key_source := rest_api.get("apiKeySource"): + case ApiKeySourceType.HEADER: + LOG.debug("Looking for api key in header 'X-API-Key'") + return request.get("headers", {}).get("X-API-Key") + case ApiKeySourceType.AUTHORIZER: + LOG.debug("Looking for api key in Identity Context") + return identity.get("apiKey") + case _: + LOG.debug("Api Key Source is not valid: '%s'", api_key_source) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py new file mode 100644 index 0000000000000..174b2cf8c1bc2 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/gateway_exception.py @@ -0,0 +1,98 @@ +import json +import logging + +from rolo import Response +from werkzeug.datastructures import Headers + +from localstack.constants import APPLICATION_JSON +from localstack.services.apigateway.next_gen.execute_api.api import ( + RestApiGatewayExceptionHandler, + RestApiGatewayHandlerChain, +) +from localstack.services.apigateway.next_gen.execute_api.context import RestApiInvocationContext +from localstack.services.apigateway.next_gen.execute_api.gateway_response import ( + AccessDeniedError, + BaseGatewayException, + get_gateway_response_or_default, +) +from localstack.services.apigateway.next_gen.execute_api.variables import ( + GatewayResponseContextVarsError, +) + +LOG = logging.getLogger(__name__) + + +class GatewayExceptionHandler(RestApiGatewayExceptionHandler): + """ + Exception handler that serializes the Gateway Exceptions into Gateway Responses + """ + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + exception: Exception, + context: RestApiInvocationContext, + response: Response, + ): + if not isinstance(exception, BaseGatewayException): + LOG.warning( + "Non Gateway Exception raised: %s", + exception, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + response.update_from( + Response(response=f"Error in apigateway invocation: {exception}", status="500") + ) + return + + LOG.info("Error raised during invocation: %s", exception.type) + self.set_error_context(exception, context) + error = self.create_exception_response(exception, context) + if error: + response.update_from(error) + + @staticmethod + def set_error_context(exception: BaseGatewayException, context: RestApiInvocationContext): + context.context_variables["error"] = GatewayResponseContextVarsError( + message=exception.message, + messageString=exception.message, + responseType=exception.type, + validationErrorString="", # TODO + ) + + def create_exception_response( + self, exception: BaseGatewayException, context: RestApiInvocationContext + ): + gateway_response = get_gateway_response_or_default( + exception.type, context.deployment.rest_api.gateway_responses + ) + + content = self._build_response_content(exception) + + headers = self._build_response_headers(exception) + + status_code = gateway_response.get("statusCode") + if not status_code: + status_code = exception.status_code or 500 + + response = Response(response=content, headers=headers, status=status_code) + return response + + @staticmethod + def _build_response_content(exception: BaseGatewayException) -> str: + # TODO apply responseTemplates to the content. We should also handle the default simply by managing the default + # template body `{"message":$context.error.messageString}` + + # TODO: remove this workaround by properly managing the responseTemplate for UnauthorizedError + # on the CRUD level, it returns the same template as all other errors but in reality the message field is + # capitalized + if isinstance(exception, AccessDeniedError): + return json.dumps({"Message": exception.message}, separators=(",", ":")) + + return json.dumps({"message": exception.message}) + + @staticmethod + def _build_response_headers(exception: BaseGatewayException) -> dict: + # TODO apply responseParameters to the headers and get content-type from the gateway_response + headers = Headers({"Content-Type": APPLICATION_JSON, "x-amzn-ErrorType": exception.code}) + return headers diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py new file mode 100644 index 0000000000000..a05e87e201cd4 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration.py @@ -0,0 +1,33 @@ +import logging + +from localstack.http import Response + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import EndpointResponse, RestApiInvocationContext +from ..integrations import REST_API_INTEGRATIONS + +LOG = logging.getLogger(__name__) + + +class IntegrationHandler(RestApiGatewayHandler): + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + integration_type = context.integration["type"] + is_proxy = "PROXY" in integration_type + + integration = REST_API_INTEGRATIONS.get(integration_type) + + if not integration: + # this should not happen, as we validated the type in the provider + raise NotImplementedError( + f"This integration type is not yet supported: {integration_type}" + ) + + endpoint_response: EndpointResponse = integration.invoke(context) + context.endpoint_response = endpoint_response + if is_proxy: + context.invocation_response = endpoint_response diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py new file mode 100644 index 0000000000000..b9cf68b1ab006 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_request.py @@ -0,0 +1,349 @@ +import base64 +import logging +from http import HTTPMethod + +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import ContentHandlingStrategy, Integration, IntegrationType +from localstack.constants import APPLICATION_JSON +from localstack.http import Request, Response +from localstack.utils.collections import merge_recursive +from localstack.utils.strings import to_bytes, to_str + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import IntegrationRequest, InvocationRequest, RestApiInvocationContext +from ..gateway_response import InternalServerError, UnsupportedMediaTypeError +from ..header_utils import drop_headers, set_default_headers +from ..helpers import mime_type_matches_binary_media_types, render_integration_uri +from ..parameters_mapping import ParametersMapper, RequestDataMapping +from ..template_mapping import ( + ApiGatewayVtlTemplate, + MappingTemplateInput, + MappingTemplateParams, + MappingTemplateVariables, +) +from ..variables import ContextVariableOverrides, ContextVarsRequestOverride + +LOG = logging.getLogger(__name__) + +# Illegal headers to include in transformation +ILLEGAL_INTEGRATION_REQUESTS_COMMON = [ + "content-length", + "transfer-encoding", + "x-amzn-trace-id", + "X-Amzn-Apigateway-Api-Id", +] +ILLEGAL_INTEGRATION_REQUESTS_AWS = [ + *ILLEGAL_INTEGRATION_REQUESTS_COMMON, + "authorization", + "connection", + "expect", + "proxy-authenticate", + "te", +] + +# These are dropped after the templates override were applied. they will never make it to the requests. +DROPPED_FROM_INTEGRATION_REQUESTS_COMMON = ["Expect", "Proxy-Authenticate", "TE"] +DROPPED_FROM_INTEGRATION_REQUESTS_AWS = [*DROPPED_FROM_INTEGRATION_REQUESTS_COMMON, "Referer"] +DROPPED_FROM_INTEGRATION_REQUESTS_HTTP = [*DROPPED_FROM_INTEGRATION_REQUESTS_COMMON, "Via"] + +# Default headers +DEFAULT_REQUEST_HEADERS = {"Accept": APPLICATION_JSON, "Connection": "keep-alive"} + + +class PassthroughBehavior(str): + # TODO maybe this class should be moved where it can also be used for validation in + # the provider when we switch out of moto + WHEN_NO_MATCH = "WHEN_NO_MATCH" + WHEN_NO_TEMPLATES = "WHEN_NO_TEMPLATES" + NEVER = "NEVER" + + +class IntegrationRequestHandler(RestApiGatewayHandler): + """ + This class will take care of the Integration Request part, which is mostly linked to template mapping + See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-integration-settings-integration-request.html + """ + + def __init__(self): + self._param_mapper = ParametersMapper() + self._vtl_template = ApiGatewayVtlTemplate() + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + integration: Integration = context.integration + integration_type = integration["type"] + + integration_request_parameters = integration["requestParameters"] or {} + request_data_mapping = self.get_integration_request_data( + context, integration_request_parameters + ) + path_parameters = request_data_mapping["path"] + + if integration_type in (IntegrationType.AWS_PROXY, IntegrationType.HTTP_PROXY): + # `PROXY` types cannot use integration mapping templates, they pass most of the data straight + # We make a copy to avoid modifying the invocation headers and keep a cleaner history + headers = context.invocation_request["headers"].copy() + query_string_parameters: dict[str, list[str]] = context.invocation_request[ + "multi_value_query_string_parameters" + ] + body = context.invocation_request["body"] + + # HTTP_PROXY still make uses of the request data mappings, and merges it with the invocation request + # this is undocumented but validated behavior + if integration_type == IntegrationType.HTTP_PROXY: + # These headers won't be passed through by default from the invocation. + # They can however be added through request mappings. + drop_headers(headers, ["Host", "Content-Encoding"]) + headers.update(request_data_mapping["header"]) + + query_string_parameters = self._merge_http_proxy_query_string( + query_string_parameters, request_data_mapping["querystring"] + ) + + else: + self._set_proxy_headers(headers, context.request) + # AWS_PROXY does not allow URI path rendering + # TODO: verify this + path_parameters = {} + + else: + # find request template to raise UnsupportedMediaTypeError early + request_template = self.get_request_template( + integration=integration, request=context.invocation_request + ) + + converted_body = self.convert_body(context) + + body, mapped_overrides = self.render_request_template_mapping( + context=context, body=converted_body, template=request_template + ) + # Update the context with the returned mapped overrides + context.context_variable_overrides = mapped_overrides + # mutate the ContextVariables with the requestOverride result, as we copy the context when rendering the + # template to avoid mutation on other fields + request_override: ContextVarsRequestOverride = mapped_overrides.get( + "requestOverride", {} + ) + # TODO: log every override that happens afterwards (in a loop on `request_override`) + merge_recursive(request_override, request_data_mapping, overwrite=True) + + headers = Headers(request_data_mapping["header"]) + query_string_parameters = request_data_mapping["querystring"] + + # Some headers can't be modified by parameter mappings or mapping templates. + # Aws will raise in those were present. Even for AWS_PROXY, where it is not applying them. + if header_mappings := request_data_mapping["header"]: + self._validate_headers_mapping(header_mappings, integration_type) + + self._apply_header_transforms(headers, integration_type, context) + + # looks like the stageVariables rendering part is done in the Integration part in AWS + # but we can avoid duplication by doing it here for now + # TODO: if the integration if of AWS Lambda type and the Lambda is in another account, we cannot render + # stageVariables. Work on that special case later (we can add a quick check for the URI region and set the + # stage variables to an empty dict) + rendered_integration_uri = render_integration_uri( + uri=integration["uri"], + path_parameters=path_parameters, + stage_variables=context.stage_variables, + ) + + # if the integration method is defined and is not ANY, we can use it for the integration + if not (integration_method := integration["httpMethod"]) or integration_method == "ANY": + # otherwise, fallback to the request's method + integration_method = context.invocation_request["http_method"] + + integration_request = IntegrationRequest( + http_method=integration_method, + uri=rendered_integration_uri, + query_string_parameters=query_string_parameters, + headers=headers, + body=body, + ) + + context.integration_request = integration_request + + def get_integration_request_data( + self, context: RestApiInvocationContext, request_parameters: dict[str, str] + ) -> RequestDataMapping: + return self._param_mapper.map_integration_request( + request_parameters=request_parameters, + invocation_request=context.invocation_request, + context_variables=context.context_variables, + stage_variables=context.stage_variables, + ) + + def render_request_template_mapping( + self, + context: RestApiInvocationContext, + body: str | bytes, + template: str, + ) -> tuple[bytes, ContextVariableOverrides]: + request: InvocationRequest = context.invocation_request + + if not template: + return to_bytes(body), context.context_variable_overrides + + try: + body_utf8 = to_str(body) + except UnicodeError: + raise InternalServerError("Internal server error") + + body, mapped_overrides = self._vtl_template.render_request( + template=template, + variables=MappingTemplateVariables( + context=context.context_variables, + stageVariables=context.stage_variables or {}, + input=MappingTemplateInput( + body=body_utf8, + params=MappingTemplateParams( + path=request.get("path_parameters"), + querystring=request.get("query_string_parameters", {}), + header=request.get("headers"), + ), + ), + ), + context_overrides=context.context_variable_overrides, + ) + return to_bytes(body), mapped_overrides + + @staticmethod + def get_request_template(integration: Integration, request: InvocationRequest) -> str: + """ + Attempts to return the request template. + Will raise UnsupportedMediaTypeError if there are no match according to passthrough behavior. + """ + request_templates = integration.get("requestTemplates") or {} + passthrough_behavior = integration.get("passthroughBehavior") + # If content-type is not provided aws assumes application/json + content_type = request["headers"].get("Content-Type", APPLICATION_JSON) + # first look to for a template associated to the content-type, otherwise look for the $default template + request_template = request_templates.get(content_type) or request_templates.get("$default") + + if request_template or passthrough_behavior == PassthroughBehavior.WHEN_NO_MATCH: + return request_template + + match passthrough_behavior: + case PassthroughBehavior.NEVER: + LOG.debug( + "No request template found for '%s' and passthrough behavior set to NEVER", + content_type, + ) + raise UnsupportedMediaTypeError("Unsupported Media Type") + case PassthroughBehavior.WHEN_NO_TEMPLATES: + if request_templates: + LOG.debug( + "No request template found for '%s' and passthrough behavior set to WHEN_NO_TEMPLATES", + content_type, + ) + raise UnsupportedMediaTypeError("Unsupported Media Type") + case _: + LOG.debug("Unknown passthrough behavior: '%s'", passthrough_behavior) + + return request_template + + @staticmethod + def convert_body(context: RestApiInvocationContext) -> bytes | str: + """ + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings.html + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings-workflow.html + :param context: + :return: the body, either as is, or converted depending on the table in the second link + """ + request: InvocationRequest = context.invocation_request + body = request["body"] + + is_binary_request = mime_type_matches_binary_media_types( + mime_type=request["headers"].get("Content-Type"), + binary_media_types=context.deployment.rest_api.rest_api.get("binaryMediaTypes", []), + ) + content_handling = context.integration.get("contentHandling") + if is_binary_request: + if content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT: + body = base64.b64encode(body) + # if the content handling is not defined, or CONVERT_TO_BINARY, we do not touch the body and leave it as + # proper binary + else: + if not content_handling or content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT: + body = body.decode(encoding="UTF-8", errors="replace") + else: + # it means we have CONVERT_TO_BINARY, so we need to try to decode the base64 string + try: + body = base64.b64decode(body) + except ValueError: + raise InternalServerError("Internal server error") + + return body + + @staticmethod + def _merge_http_proxy_query_string( + query_string_parameters: dict[str, list[str]], + mapped_query_string: dict[str, str | list[str]], + ): + new_query_string_parameters = {k: v.copy() for k, v in query_string_parameters.items()} + for param, value in mapped_query_string.items(): + if existing := new_query_string_parameters.get(param): + if isinstance(value, list): + existing.extend(value) + else: + existing.append(value) + else: + new_query_string_parameters[param] = value + + return new_query_string_parameters + + @staticmethod + def _set_proxy_headers(headers: Headers, request: Request): + headers.set("X-Forwarded-For", request.remote_addr) + headers.set("X-Forwarded-Port", request.environ.get("SERVER_PORT")) + headers.set( + "X-Forwarded-Proto", + request.environ.get("SERVER_PROTOCOL", "").split("/")[0], + ) + + @staticmethod + def _apply_header_transforms( + headers: Headers, integration_type: IntegrationType, context: RestApiInvocationContext + ): + # Dropping matching headers for the provided integration type + match integration_type: + case IntegrationType.AWS: + drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_AWS) + case IntegrationType.HTTP | IntegrationType.HTTP_PROXY: + drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_HTTP) + case _: + drop_headers(headers, DROPPED_FROM_INTEGRATION_REQUESTS_COMMON) + + # Adding default headers to the requests headers + default_headers = { + **DEFAULT_REQUEST_HEADERS, + "User-Agent": f"AmazonAPIGateway_{context.api_id}", + } + if ( + content_type := context.request.headers.get("Content-Type") + ) and context.request.method not in {HTTPMethod.OPTIONS, HTTPMethod.GET, HTTPMethod.HEAD}: + default_headers["Content-Type"] = content_type + + set_default_headers(headers, default_headers) + headers.set("X-Amzn-Trace-Id", context.trace_id) + if integration_type not in (IntegrationType.AWS_PROXY, IntegrationType.AWS): + headers.set("X-Amzn-Apigateway-Api-Id", context.api_id) + + @staticmethod + def _validate_headers_mapping(headers: dict[str, str], integration_type: IntegrationType): + """Validates and raises an error when attempting to set an illegal header""" + to_validate = ILLEGAL_INTEGRATION_REQUESTS_COMMON + if integration_type in {IntegrationType.AWS, IntegrationType.AWS_PROXY}: + to_validate = ILLEGAL_INTEGRATION_REQUESTS_AWS + + for header in headers: + if header.lower() in to_validate: + LOG.debug( + "Execution failed due to configuration error: %s header already present", header + ) + raise InternalServerError("Internal server error") diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py new file mode 100644 index 0000000000000..2dccb39c74a6b --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/integration_response.py @@ -0,0 +1,312 @@ +import base64 +import json +import logging +import re + +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import ( + ContentHandlingStrategy, + Integration, + IntegrationResponse, + IntegrationType, +) +from localstack.constants import APPLICATION_JSON +from localstack.http import Response +from localstack.utils.strings import to_bytes + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import ( + EndpointResponse, + InvocationRequest, + InvocationResponse, + RestApiInvocationContext, +) +from ..gateway_response import ApiConfigurationError, InternalServerError +from ..helpers import mime_type_matches_binary_media_types +from ..parameters_mapping import ParametersMapper, ResponseDataMapping +from ..template_mapping import ( + ApiGatewayVtlTemplate, + MappingTemplateInput, + MappingTemplateParams, + MappingTemplateVariables, +) +from ..variables import ContextVarsResponseOverride + +LOG = logging.getLogger(__name__) + + +class IntegrationResponseHandler(RestApiGatewayHandler): + """ + This class will take care of the Integration Response part, which is mostly linked to template mapping + See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-integration-settings-integration-response.html + """ + + def __init__(self): + self._param_mapper = ParametersMapper() + self._vtl_template = ApiGatewayVtlTemplate() + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + # TODO: we should log the response coming in from the Integration, either in Integration or here. + # before modification / after? + integration: Integration = context.integration + integration_type = integration["type"] + + if integration_type in (IntegrationType.AWS_PROXY, IntegrationType.HTTP_PROXY): + # `PROXY` types cannot use integration response mapping templates + # TODO: verify assumptions against AWS + return + + endpoint_response: EndpointResponse = context.endpoint_response + status_code = endpoint_response["status_code"] + body = endpoint_response["body"] + + # we first need to find the right IntegrationResponse based on their selection template, linked to the status + # code of the Response + if integration_type == IntegrationType.AWS and "lambda:path/" in integration["uri"]: + selection_value = self.parse_error_message_from_lambda(body) + else: + selection_value = str(status_code) + + integration_response: IntegrationResponse = self.select_integration_response( + selection_value, + integration["integrationResponses"], + ) + + # we then need to apply Integration Response parameters mapping, to only return select headers + response_parameters = integration_response.get("responseParameters") or {} + response_data_mapping = self.get_method_response_data( + context=context, + response=endpoint_response, + response_parameters=response_parameters, + ) + + # We then fetch a response templates and apply the template mapping + response_template = self.get_response_template( + integration_response=integration_response, request=context.invocation_request + ) + # binary support + converted_body = self.convert_body( + context, + body=body, + content_handling=integration_response.get("contentHandling"), + ) + + body, response_override = self.render_response_template_mapping( + context=context, template=response_template, body=converted_body + ) + + # We basically need to remove all headers and replace them with the mapping, then + # override them if there are overrides. + # The status code is pretty straight forward. By default, it would be set by the integration response, + # unless there was an override + response_status_code = int(integration_response["statusCode"]) + if response_status_override := response_override["status"]: + # maybe make a better error message format, same for the overrides for request too + LOG.debug("Overriding response status code: '%s'", response_status_override) + response_status_code = response_status_override + + # Create a new headers object that we can manipulate before overriding the original response headers + response_headers = Headers(response_data_mapping.get("header")) + if header_override := response_override["header"]: + LOG.debug("Response header overrides: %s", header_override) + response_headers.update(header_override) + + LOG.debug("Method response body after transformations: %s", body) + context.invocation_response = InvocationResponse( + body=body, + headers=response_headers, + status_code=response_status_code, + ) + + def get_method_response_data( + self, + context: RestApiInvocationContext, + response: EndpointResponse, + response_parameters: dict[str, str], + ) -> ResponseDataMapping: + return self._param_mapper.map_integration_response( + response_parameters=response_parameters, + integration_response=response, + context_variables=context.context_variables, + stage_variables=context.stage_variables, + ) + + @staticmethod + def select_integration_response( + selection_value: str, integration_responses: dict[str, IntegrationResponse] + ) -> IntegrationResponse: + if not integration_responses: + LOG.warning( + "Configuration error: No match for output mapping and no default output mapping configured. " + "Endpoint Response Status Code: %s", + selection_value, + ) + raise ApiConfigurationError("Internal server error") + + if select_by_pattern := [ + response + for response in integration_responses.values() + if (selectionPatten := response.get("selectionPattern")) + and re.match(selectionPatten, selection_value) + ]: + selected_response = select_by_pattern[0] + if len(select_by_pattern) > 1: + LOG.warning( + "Multiple integration responses matching '%s' statuscode. Choosing '%s' (first).", + selection_value, + selected_response["statusCode"], + ) + else: + # choose default return code + # TODO: the provider should check this, as we should only have one default with no value in selectionPattern + default_responses = [ + response + for response in integration_responses.values() + if not response.get("selectionPattern") + ] + if not default_responses: + # TODO: verify log message when the selection_value is a lambda errorMessage + LOG.warning( + "Configuration error: No match for output mapping and no default output mapping configured. " + "Endpoint Response Status Code: %s", + selection_value, + ) + raise ApiConfigurationError("Internal server error") + + selected_response = default_responses[0] + if len(default_responses) > 1: + LOG.warning( + "Multiple default integration responses. Choosing %s (first).", + selected_response["statusCode"], + ) + return selected_response + + @staticmethod + def get_response_template( + integration_response: IntegrationResponse, request: InvocationRequest + ) -> str: + """The Response Template is selected from the response templates. + If there are no templates defined, the body will pass through. + Apigateway looks at the integration request `Accept` header and defaults to `application/json`. + If no template is matched, Apigateway will use the "first" existing template and use it as default. + https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#transforming-request-response-body + """ + if not (response_templates := integration_response["responseTemplates"]): + return "" + + # The invocation request header is used to find the right response templated + accepts = request["headers"].getlist("accept") + if accepts and (template := response_templates.get(accepts[-1])): + return template + # TODO aws seemed to favor application/json as default when unmatched regardless of "first" + if template := response_templates.get(APPLICATION_JSON): + return template + # TODO What is first? do we need to keep an order as to when they were added/modified? + template = next(iter(response_templates.values())) + LOG.warning("No templates were matched, Using template: %s", template) + return template + + @staticmethod + def convert_body( + context: RestApiInvocationContext, + body: bytes, + content_handling: ContentHandlingStrategy | None, + ) -> bytes | str: + """ + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings.html + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-payload-encodings-workflow.html + :param context: RestApiInvocationContext + :param body: the endpoint response body + :param content_handling: the contentHandling of the IntegrationResponse + :return: the body, either as is, or converted depending on the table in the second link + """ + + request: InvocationRequest = context.invocation_request + response: EndpointResponse = context.endpoint_response + binary_media_types = context.deployment.rest_api.rest_api.get("binaryMediaTypes", []) + + is_binary_payload = mime_type_matches_binary_media_types( + mime_type=response["headers"].get("Content-Type"), + binary_media_types=binary_media_types, + ) + is_binary_accept = mime_type_matches_binary_media_types( + mime_type=request["headers"].get("Accept"), + binary_media_types=binary_media_types, + ) + + if is_binary_payload: + if ( + content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT + ) or (not content_handling and not is_binary_accept): + body = base64.b64encode(body) + else: + # this means the Payload is of type `Text` in AWS terms for the table + if ( + content_handling and content_handling == ContentHandlingStrategy.CONVERT_TO_TEXT + ) or (not content_handling and not is_binary_accept): + body = body.decode(encoding="UTF-8", errors="replace") + else: + try: + body = base64.b64decode(body) + except ValueError: + raise InternalServerError("Internal server error") + + return body + + def render_response_template_mapping( + self, context: RestApiInvocationContext, template: str, body: bytes | str + ) -> tuple[bytes, ContextVarsResponseOverride]: + if not template: + return to_bytes(body), context.context_variable_overrides["responseOverride"] + + # if there are no template, we can pass binary data through + if not isinstance(body, str): + # TODO: check, this might be ApiConfigurationError + raise InternalServerError("Internal server error") + + body, response_override = self._vtl_template.render_response( + template=template, + variables=MappingTemplateVariables( + context=context.context_variables, + stageVariables=context.stage_variables or {}, + input=MappingTemplateInput( + body=body, + params=MappingTemplateParams( + path=context.invocation_request.get("path_parameters"), + querystring=context.invocation_request.get("query_string_parameters", {}), + header=context.invocation_request.get("headers", {}), + ), + ), + ), + context_overrides=context.context_variable_overrides, + ) + + # AWS ignores the status if the override isn't an integer between 100 and 599 + if (status := response_override["status"]) and not ( + isinstance(status, int) and 100 <= status < 600 + ): + response_override["status"] = 0 + return to_bytes(body), response_override + + @staticmethod + def parse_error_message_from_lambda(payload: bytes) -> str: + try: + lambda_response = json.loads(payload) + if not isinstance(lambda_response, dict): + return "" + + # very weird case, but AWS will not return the Error from Lambda in AWS integration, where it does for + # Kinesis and such. The AWS Lambda only behavior is concentrated in this method + if lambda_response.get("__type") == "AccessDeniedException": + raise InternalServerError("Internal server error") + + return lambda_response.get("errorMessage", "") + + except json.JSONDecodeError: + return "" diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py new file mode 100644 index 0000000000000..00a35129225b1 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_request.py @@ -0,0 +1,147 @@ +import json +import logging + +from jsonschema import ValidationError, validate + +from localstack.aws.api.apigateway import Method +from localstack.constants import APPLICATION_JSON +from localstack.http import Response +from localstack.services.apigateway.helpers import EMPTY_MODEL, ModelResolver +from localstack.services.apigateway.models import RestApiContainer + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import InvocationRequest, RestApiInvocationContext +from ..gateway_response import BadRequestBodyError, BadRequestParametersError + +LOG = logging.getLogger(__name__) + + +class MethodRequestHandler(RestApiGatewayHandler): + """ + This class will mostly take care of Request validation with Models + See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-method-settings-method-request.html + """ + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + self.validate_request( + context.resource_method, + context.deployment.rest_api, + context.invocation_request, + ) + + def validate_request( + self, method: Method, rest_api: RestApiContainer, request: InvocationRequest + ) -> None: + """ + :raises BadRequestParametersError if the request has required parameters which are not present + :raises BadRequestBodyError if the request has required body validation with a model and it does not respect it + :return: None + """ + + # check if there is validator for the method + if not (request_validator_id := method.get("requestValidatorId") or "").strip(): + return + + # check if there is a validator for this request + if not (validator := rest_api.validators.get(request_validator_id)): + # TODO Should we raise an exception instead? + LOG.exception("No validator were found with matching id: '%s'", request_validator_id) + return + + if self.should_validate_request(validator) and ( + missing_parameters := self._get_missing_required_parameters(method, request) + ): + message = f"Missing required request parameters: [{', '.join(missing_parameters)}]" + raise BadRequestParametersError(message=message) + + if self.should_validate_body(validator) and not self._is_body_valid( + method, rest_api, request + ): + raise BadRequestBodyError(message="Invalid request body") + + return + + @staticmethod + def _is_body_valid( + method: Method, rest_api: RestApiContainer, request: InvocationRequest + ) -> bool: + # if there's no model to validate the body, use the Empty model + # https://docs.aws.amazon.com/cdk/api/v1/docs/@aws-cdk_aws-apigateway.EmptyModel.html + if not (request_models := method.get("requestModels")): + model_name = EMPTY_MODEL + else: + model_name = request_models.get( + APPLICATION_JSON, request_models.get("$default", EMPTY_MODEL) + ) + + model_resolver = ModelResolver( + rest_api_container=rest_api, + model_name=model_name, + ) + + # try to get the resolved model first + resolved_schema = model_resolver.get_resolved_model() + if not resolved_schema: + LOG.exception( + "An exception occurred while trying to validate the request: could not resolve the model '%s'", + model_name, + ) + return False + + try: + # if the body is empty, replace it with an empty JSON body + validate( + instance=json.loads(request.get("body") or "{}"), + schema=resolved_schema, + ) + return True + except ValidationError as e: + LOG.debug("failed to validate request body %s", e) + return False + except json.JSONDecodeError as e: + LOG.debug("failed to validate request body, request data is not valid JSON %s", e) + return False + + @staticmethod + def _get_missing_required_parameters(method: Method, request: InvocationRequest) -> list[str]: + missing_params = [] + if not (request_parameters := method.get("requestParameters")): + return missing_params + + case_sensitive_headers = list(request.get("headers").keys()) + + for request_parameter, required in sorted(request_parameters.items()): + if not required: + continue + + param_type, param_value = request_parameter.removeprefix("method.request.").split(".") + match param_type: + case "header": + is_missing = param_value not in case_sensitive_headers + case "path": + path = request.get("path_parameters", "") + is_missing = param_value not in path + case "querystring": + is_missing = param_value not in request.get("query_string_parameters", []) + case _: + # This shouldn't happen + LOG.debug("Found an invalid request parameter: %s", request_parameter) + is_missing = False + + if is_missing: + missing_params.append(param_value) + + return missing_params + + @staticmethod + def should_validate_body(validator): + return validator.get("validateRequestBody") + + @staticmethod + def should_validate_request(validator): + return validator.get("validateRequestParameters") diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py new file mode 100644 index 0000000000000..004f99b98a4da --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/method_response.py @@ -0,0 +1,96 @@ +import logging + +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import IntegrationType +from localstack.http import Response + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import InvocationResponse, RestApiInvocationContext +from ..header_utils import drop_headers + +LOG = logging.getLogger(__name__) + +# These are dropped after the templates override were applied. they will never make it to the requests. +DROPPED_FROM_INTEGRATION_RESPONSES_COMMON = ["Transfer-Encoding"] +DROPPED_FROM_INTEGRATION_RESPONSES_HTTP_PROXY = [ + *DROPPED_FROM_INTEGRATION_RESPONSES_COMMON, + "Content-Encoding", + "Via", +] + + +# Headers that will receive a remap +REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON = [ + "Connection", + "Content-Length", + "Date", + "Server", +] +REMAPPED_FROM_INTEGRATION_RESPONSE_NON_PROXY = [ + *REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON, + "Authorization", + "Content-MD5", + "Expect", + "Host", + "Max-Forwards", + "Proxy-Authenticate", + "Trailer", + "Upgrade", + "User-Agent", + "WWW-Authenticate", +] + + +class MethodResponseHandler(RestApiGatewayHandler): + """ + Last handler of the chain, responsible for serializing the Response object + """ + + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + invocation_response = context.invocation_response + integration_type = context.integration["type"] + headers = invocation_response["headers"] + + self._transform_headers(headers, integration_type) + + method_response = self.serialize_invocation_response(invocation_response) + response.update_from(method_response) + + @staticmethod + def serialize_invocation_response(invocation_response: InvocationResponse) -> Response: + is_content_type_set = invocation_response["headers"].get("content-type") is not None + response = Response( + response=invocation_response["body"], + headers=invocation_response["headers"], + status=invocation_response["status_code"], + ) + if not is_content_type_set: + # Response sets a content-type by default. This will always be ignored. + response.headers.remove("content-type") + return response + + @staticmethod + def _transform_headers(headers: Headers, integration_type: IntegrationType): + """Remaps the provided headers in-place. Adding new `x-amzn-Remapped-` headers and dropping the original headers""" + to_remap = REMAPPED_FROM_INTEGRATION_RESPONSE_COMMON + to_drop = DROPPED_FROM_INTEGRATION_RESPONSES_COMMON + + match integration_type: + case IntegrationType.HTTP | IntegrationType.AWS: + to_remap = REMAPPED_FROM_INTEGRATION_RESPONSE_NON_PROXY + case IntegrationType.HTTP_PROXY: + to_drop = DROPPED_FROM_INTEGRATION_RESPONSES_HTTP_PROXY + + for header in to_remap: + if headers.get(header): + LOG.debug("Remapping header: %s", header) + remapped = headers.pop(header) + headers[f"x-amzn-Remapped-{header}"] = remapped + + drop_headers(headers, to_drop) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py new file mode 100644 index 0000000000000..3da898bf8845e --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/parse.py @@ -0,0 +1,204 @@ +import datetime +import logging +import re +from collections import defaultdict +from typing import Optional +from urllib.parse import urlparse + +from rolo.request import restore_payload +from werkzeug.datastructures import Headers, MultiDict + +from localstack.http import Response +from localstack.services.apigateway.helpers import REQUEST_TIME_DATE_FORMAT +from localstack.utils.strings import long_uid, short_uid +from localstack.utils.time import timestamp + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import InvocationRequest, RestApiInvocationContext +from ..header_utils import should_drop_header_from_invocation +from ..helpers import generate_trace_id, generate_trace_parent, parse_trace_id +from ..variables import ( + ContextVariableOverrides, + ContextVariables, + ContextVarsIdentity, + ContextVarsRequestOverride, + ContextVarsResponseOverride, +) + +LOG = logging.getLogger(__name__) + + +class InvocationRequestParser(RestApiGatewayHandler): + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + context.account_id = context.deployment.account_id + context.region = context.deployment.region + self.parse_and_enrich(context) + + def parse_and_enrich(self, context: RestApiInvocationContext): + # first, create the InvocationRequest with the incoming request + context.invocation_request = self.create_invocation_request(context) + # then we can create the ContextVariables, used throughout the invocation as payload and to render authorizer + # payload, mapping templates and such. + context.context_variables = self.create_context_variables(context) + context.context_variable_overrides = ContextVariableOverrides( + requestOverride=ContextVarsRequestOverride(header={}, querystring={}, path={}), + responseOverride=ContextVarsResponseOverride(header={}, status=0), + ) + # TODO: maybe adjust the logging + LOG.debug("Initializing $context='%s'", context.context_variables) + # then populate the stage variables + context.stage_variables = self.get_stage_variables(context) + LOG.debug("Initializing $stageVariables='%s'", context.stage_variables) + + context.trace_id = self.populate_trace_id(context.request.headers) + + def create_invocation_request(self, context: RestApiInvocationContext) -> InvocationRequest: + request = context.request + params, multi_value_params = self._get_single_and_multi_values_from_multidict(request.args) + headers = self._get_invocation_headers(request.headers) + invocation_request = InvocationRequest( + http_method=request.method, + query_string_parameters=params, + multi_value_query_string_parameters=multi_value_params, + headers=headers, + body=restore_payload(request), + ) + self._enrich_with_raw_path(context, invocation_request) + + return invocation_request + + @staticmethod + def _enrich_with_raw_path( + context: RestApiInvocationContext, invocation_request: InvocationRequest + ): + # Base path is not URL-decoded, so we need to get the `RAW_URI` from the request + request = context.request + raw_uri = request.environ.get("RAW_URI") or request.path + + # if the request comes from the LocalStack only `_user_request_` route, we need to remove this prefix from the + # path, in order to properly route the request + if "_user_request_" in raw_uri: + # in this format, the stage is before `_user_request_`, so we don't need to remove it + raw_uri = raw_uri.partition("_user_request_")[2] + else: + if raw_uri.startswith("/_aws/execute-api"): + # the API can be cased in the path, so we need to ignore it to remove it + raw_uri = re.sub( + f"^/_aws/execute-api/{context.api_id}", + "", + raw_uri, + flags=re.IGNORECASE, + ) + + # remove the stage from the path, only replace the first occurrence + raw_uri = raw_uri.replace(f"/{context.stage}", "", 1) + + if raw_uri.startswith("//"): + # TODO: AWS validate this assumption + # if the RAW_URI starts with double slashes, `urlparse` will fail to decode it as path only + # it also means that we already only have the path, so we just need to remove the query string + raw_uri = raw_uri.split("?")[0] + raw_path = "/" + raw_uri.lstrip("/") + + else: + # we need to make sure we have a path here, sometimes RAW_URI can be a full URI (when proxied) + raw_path = raw_uri = urlparse(raw_uri).path + + invocation_request["path"] = raw_path + invocation_request["raw_path"] = raw_uri + + @staticmethod + def _get_single_and_multi_values_from_multidict( + multi_dict: MultiDict, + ) -> tuple[dict[str, str], dict[str, list[str]]]: + single_values = {} + multi_values = defaultdict(list) + + for key, value in multi_dict.items(multi=True): + multi_values[key].append(value) + # for the single value parameters, AWS only keeps the last value of the list + single_values[key] = value + + return single_values, dict(multi_values) + + @staticmethod + def _get_invocation_headers(headers: Headers) -> Headers: + invocation_headers = Headers() + for key, value in headers: + if should_drop_header_from_invocation(key): + LOG.debug("Dropping header from invocation request: '%s'", key) + continue + invocation_headers.add(key, value) + return invocation_headers + + @staticmethod + def create_context_variables(context: RestApiInvocationContext) -> ContextVariables: + invocation_request: InvocationRequest = context.invocation_request + domain_name = invocation_request["headers"].get("Host", "") + domain_prefix = domain_name.split(".")[0] + now = datetime.datetime.now() + + context_variables = ContextVariables( + accountId=context.account_id, + apiId=context.api_id, + deploymentId=context.deployment_id, + domainName=domain_name, + domainPrefix=domain_prefix, + extendedRequestId=short_uid(), # TODO: use snapshot tests to verify format + httpMethod=invocation_request["http_method"], + identity=ContextVarsIdentity( + accountId=None, + accessKey=None, + caller=None, + cognitoAuthenticationProvider=None, + cognitoAuthenticationType=None, + cognitoIdentityId=None, + cognitoIdentityPoolId=None, + principalOrgId=None, + sourceIp="127.0.0.1", # TODO: get the sourceIp from the Request + user=None, + userAgent=invocation_request["headers"].get("User-Agent"), + userArn=None, + ), + path=f"/{context.stage}{invocation_request['raw_path']}", + protocol="HTTP/1.1", + requestId=long_uid(), + requestTime=timestamp(time=now, format=REQUEST_TIME_DATE_FORMAT), + requestTimeEpoch=int(now.timestamp() * 1000), + stage=context.stage, + ) + if context.is_canary is not None: + context_variables["isCanaryRequest"] = context.is_canary + + return context_variables + + @staticmethod + def get_stage_variables(context: RestApiInvocationContext) -> Optional[dict[str, str]]: + stage_variables = context.stage_configuration.get("variables") + if context.is_canary: + overrides = ( + context.stage_configuration["canarySettings"].get("stageVariableOverrides") or {} + ) + stage_variables = (stage_variables or {}) | overrides + + if not stage_variables: + return None + + return stage_variables + + @staticmethod + def populate_trace_id(headers: Headers) -> str: + incoming_trace = parse_trace_id(headers.get("x-amzn-trace-id", "")) + # parse_trace_id always return capitalized keys + + trace = incoming_trace.get("Root", generate_trace_id()) + incoming_parent = incoming_trace.get("Parent") + parent = incoming_parent or generate_trace_parent() + sampled = incoming_trace.get("Sampled", "1" if incoming_parent else "0") + # TODO: lineage? not sure what it related to + return f"Root={trace};Parent={parent};Sampled={sampled}" diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py new file mode 100644 index 0000000000000..4dfe6f95dbcbe --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/resource_router.py @@ -0,0 +1,170 @@ +import logging +from functools import cache +from http import HTTPMethod +from typing import Iterable + +from werkzeug.exceptions import MethodNotAllowed, NotFound +from werkzeug.routing import Map, MapAdapter, Rule + +from localstack.aws.api.apigateway import Resource +from localstack.aws.protocol.routing import ( + path_param_regex, + post_process_arg_name, + transform_path_params_to_rule_vars, +) +from localstack.http import Response +from localstack.http.router import GreedyPathConverter +from localstack.services.apigateway.models import RestApiDeployment + +from ..api import RestApiGatewayHandler, RestApiGatewayHandlerChain +from ..context import RestApiInvocationContext +from ..gateway_response import MissingAuthTokenError +from ..variables import ContextVariables + +LOG = logging.getLogger(__name__) + + +class ApiGatewayMethodRule(Rule): + """ + Small extension to Werkzeug's Rule class which reverts unwanted assumptions made by Werkzeug. + Reverted assumptions: + - Werkzeug automatically matches HEAD requests to the corresponding GET request (i.e. Werkzeug's rule + automatically adds the HEAD HTTP method to a rule which should only match GET requests). + Added behavior: + - ANY is equivalent to 7 HTTP methods listed. We manually set them to the rule's methods + """ + + def __init__(self, string: str, method: str, **kwargs) -> None: + super().__init__(string=string, methods=[method], **kwargs) + + if method == "ANY": + self.methods = { + HTTPMethod.DELETE, + HTTPMethod.GET, + HTTPMethod.HEAD, + HTTPMethod.OPTIONS, + HTTPMethod.PATCH, + HTTPMethod.POST, + HTTPMethod.PUT, + } + else: + # Make sure Werkzeug's Rule does not add any other methods + # (f.e. the HEAD method even though the rule should only match GET) + self.methods = {method.upper()} + + +class RestAPIResourceRouter: + """ + A router implementation which abstracts the routing of incoming REST API Context to a specific + resource of the Deployment. + """ + + _map: Map + + def __init__(self, deployment: RestApiDeployment): + self._resources = deployment.rest_api.resources + self._map = get_rule_map_for_resources(self._resources.values()) + + def match(self, context: RestApiInvocationContext) -> tuple[Resource, dict[str, str]]: + """ + Matches the given request to the resource it targets (or raises an exception if no resource matches). + + :param context: + :return: A tuple with the matched resource and the (already parsed) path params + :raises: MissingAuthTokenError, weird naming but that is the default NotFound for REST API + """ + + request = context.request + # bind the map to get the actual matcher + matcher: MapAdapter = self._map.bind(context.request.host) + + # perform the matching + # trailing slashes are ignored in APIGW + path = context.invocation_request["path"].rstrip("/") + try: + rule, args = matcher.match(path, method=request.method, return_rule=True) + except (MethodNotAllowed, NotFound) as e: + # MethodNotAllowed (405) exception is raised if a path is matching, but the method does not. + # Our router might handle this as a 404, validate with AWS. + LOG.warning( + "API Gateway: No resource or method was found for: %s %s", + request.method, + path, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + raise MissingAuthTokenError("Missing Authentication Token") from e + + # post process the arg keys and values + # - the path param keys need to be "un-sanitized", i.e. sanitized rule variable names need to be reverted + # - the path param values might still be url-encoded + args = {post_process_arg_name(k): v for k, v in args.items()} + + # extract the operation model from the rule + resource_id: str = rule.endpoint + resource = self._resources[resource_id] + + return resource, args + + +class InvocationRequestRouter(RestApiGatewayHandler): + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + self.route_and_enrich(context) + + def route_and_enrich(self, context: RestApiInvocationContext): + router = self.get_router_for_deployment(context.deployment) + + resource, path_parameters = router.match(context) + resource: Resource + + context.invocation_request["path_parameters"] = path_parameters + context.resource = resource + + method = ( + resource["resourceMethods"].get(context.request.method) + or resource["resourceMethods"]["ANY"] + ) + context.resource_method = method + context.integration = method["methodIntegration"] + + self.update_context_variables_with_resource(context.context_variables, resource) + + @staticmethod + def update_context_variables_with_resource( + context_variables: ContextVariables, resource: Resource + ): + LOG.debug("Updating $context.resourcePath='%s'", resource["path"]) + context_variables["resourcePath"] = resource["path"] + LOG.debug("Updating $context.resourceId='%s'", resource["id"]) + context_variables["resourceId"] = resource["id"] + + @staticmethod + @cache + def get_router_for_deployment(deployment: RestApiDeployment) -> RestAPIResourceRouter: + return RestAPIResourceRouter(deployment) + + +def get_rule_map_for_resources(resources: Iterable[Resource]) -> Map: + rules = [] + for resource in resources: + for method, resource_method in resource.get("resourceMethods", {}).items(): + path = resource["path"] + # translate the requestUri to a Werkzeug rule string + rule_string = path_param_regex.sub(transform_path_params_to_rule_vars, path) + rules.append( + ApiGatewayMethodRule(string=rule_string, method=method, endpoint=resource["id"]) + ) # type: ignore + + return Map( + rules=rules, + # don't be strict about trailing slashes when matching + strict_slashes=False, + # we can't really use werkzeug's merge-slashes since it uses HTTP redirects to solve it + merge_slashes=False, + # get service-specific converters + converters={"path": GreedyPathConverter}, + ) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py new file mode 100644 index 0000000000000..8b6308e7e3d2c --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/handlers/response_enricher.py @@ -0,0 +1,30 @@ +from localstack.aws.api.apigateway import IntegrationType +from localstack.http import Response +from localstack.services.apigateway.next_gen.execute_api.api import ( + RestApiGatewayHandler, + RestApiGatewayHandlerChain, +) +from localstack.services.apigateway.next_gen.execute_api.context import RestApiInvocationContext +from localstack.utils.strings import short_uid + + +class InvocationResponseEnricher(RestApiGatewayHandler): + def __call__( + self, + chain: RestApiGatewayHandlerChain, + context: RestApiInvocationContext, + response: Response, + ): + headers = response.headers + + headers.set("x-amzn-RequestId", context.context_variables["requestId"]) + + # Todo, as we go into monitoring, we will want to have these values come from the context? + headers.set("x-amz-apigw-id", short_uid() + "=") + if ( + context.integration + and context.integration["type"] + not in (IntegrationType.HTTP_PROXY, IntegrationType.MOCK) + and not context.context_variables.get("error") + ): + headers.set("X-Amzn-Trace-Id", context.trace_id) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py new file mode 100644 index 0000000000000..1b1fcbfa3f35a --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/header_utils.py @@ -0,0 +1,56 @@ +import logging +from collections import defaultdict +from typing import Iterable + +from werkzeug.datastructures.headers import Headers + +LOG = logging.getLogger(__name__) + +# Headers dropped at the request parsing. They will never make it to the invocation requests. +# And won't be available for request mapping. +DROPPED_FROM_REQUEST_COMMON = [ + "Connection", + "Content-Length", + "Content-MD5", + "Expect", + "Max-Forwards", + "Proxy-Authenticate", + "Server", + "TE", + "Transfer-Encoding", + "Trailer", + "Upgrade", + "WWW-Authenticate", +] +DROPPED_FROM_REQUEST_COMMON_LOWER = [header.lower() for header in DROPPED_FROM_REQUEST_COMMON] + + +def should_drop_header_from_invocation(header: str) -> bool: + """These headers are not making it to the invocation requests. Even Proxy integrations are not sending them.""" + return header.lower() in DROPPED_FROM_REQUEST_COMMON_LOWER + + +def build_multi_value_headers(headers: Headers) -> dict[str, list[str]]: + multi_value_headers = defaultdict(list) + for key, value in headers: + multi_value_headers[key].append(value) + + return multi_value_headers + + +def drop_headers(headers: Headers, to_drop: Iterable[str]): + """Will modify the provided headers in-place. Dropping matching headers from the provided list""" + dropped_headers = [] + + for header in to_drop: + if headers.get(header): + headers.remove(header) + dropped_headers.append(header) + + LOG.debug("Dropping headers: %s", dropped_headers) + + +def set_default_headers(headers: Headers, default_headers: dict[str, str]): + for header, value in default_headers.items(): + if not headers.get(header): + headers.set(header, value) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py new file mode 100644 index 0000000000000..33999b69ea1a9 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/helpers.py @@ -0,0 +1,183 @@ +import copy +import logging +import random +import re +import time +from secrets import token_hex +from typing import Type, TypedDict + +from moto.apigateway.models import RestAPI as MotoRestAPI + +from localstack.services.apigateway.models import MergedRestApi, RestApiContainer, RestApiDeployment +from localstack.utils.aws.arns import get_partition + +from .context import RestApiInvocationContext +from .moto_helpers import get_resources_from_moto_rest_api + +LOG = logging.getLogger(__name__) + +_stage_variable_pattern = re.compile(r"\${stageVariables\.(?P.*?)}") + + +def freeze_rest_api( + account_id: str, region: str, moto_rest_api: MotoRestAPI, localstack_rest_api: RestApiContainer +) -> RestApiDeployment: + """ + Snapshot a REST API in time to create a deployment + This will merge the Moto and LocalStack data into one `MergedRestApi` + """ + moto_resources = get_resources_from_moto_rest_api(moto_rest_api) + + rest_api = MergedRestApi.from_rest_api_container( + rest_api_container=localstack_rest_api, + resources=moto_resources, + ) + + return RestApiDeployment( + account_id=account_id, + region=region, + rest_api=copy.deepcopy(rest_api), + ) + + +def render_uri_with_stage_variables( + uri: str | None, stage_variables: dict[str, str] | None +) -> str | None: + """ + https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html#stage-variables-in-integration-HTTP-uris + URI=https://${stageVariables.} + This format is the same as VTL, but we're using a simplified version to only replace `${stageVariables.}` + values, as AWS will ignore `${path}` for example + """ + if not uri: + return uri + stage_vars = stage_variables or {} + + def replace_match(match_obj: re.Match) -> str: + return stage_vars.get(match_obj.group("varName"), "") + + return _stage_variable_pattern.sub(replace_match, uri) + + +def render_uri_with_path_parameters(uri: str | None, path_parameters: dict[str, str]) -> str | None: + if not uri: + return uri + + for key, value in path_parameters.items(): + uri = uri.replace(f"{{{key}}}", value) + + return uri + + +def render_integration_uri( + uri: str | None, path_parameters: dict[str, str], stage_variables: dict[str, str] +) -> str: + """ + A URI can contain different value to interpolate / render + It will have path parameters substitutions with this shape (can also add a querystring). + URI=http://myhost.test/rootpath/{path} + + It can also have another format, for stage variables, documented here: + https://docs.aws.amazon.com/apigateway/latest/developerguide/aws-api-gateway-stage-variables-reference.html#stage-variables-in-integration-HTTP-uris + URI=https://${stageVariables.} + This format is the same as VTL. + + :param uri: the integration URI + :param path_parameters: the list of path parameters, coming from the parameters mapping and override + :param stage_variables: - + :return: the rendered URI + """ + if not uri: + return "" + + uri_with_path = render_uri_with_path_parameters(uri, path_parameters) + return render_uri_with_stage_variables(uri_with_path, stage_variables) + + +def get_source_arn(context: RestApiInvocationContext): + method = context.resource_method["httpMethod"] + path = context.resource["path"] + return ( + f"arn:{get_partition(context.region)}:execute-api" + f":{context.region}" + f":{context.account_id}" + f":{context.api_id}" + f"/{context.stage}/{method}{path}" + ) + + +def get_lambda_function_arn_from_invocation_uri(uri: str) -> str: + """ + "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:SimpleLambda4ProxyResource/invocations", + :param uri: the integration URI value for a lambda function + :return: the lambda function ARN + """ + return uri.split("functions/")[1].removesuffix("/invocations") + + +def validate_sub_dict_of_typed_dict(typed_dict: Type[TypedDict], obj: dict) -> bool: + """ + Validate that the object is a subset off the keys of a given `TypedDict`. + :param typed_dict: the `TypedDict` blueprint + :param obj: the object to validate + :return: True if it is a subset, False otherwise + """ + typed_dict_keys = {*typed_dict.__required_keys__, *typed_dict.__optional_keys__} + + return not bool(set(obj) - typed_dict_keys) + + +def generate_trace_id(): + """https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids""" + original_request_epoch = int(time.time()) + timestamp_hex = hex(original_request_epoch)[2:] + version_number = "1" + unique_id = token_hex(12) + return f"{version_number}-{timestamp_hex}-{unique_id}" + + +def generate_trace_parent(): + return token_hex(8) + + +def parse_trace_id(trace_id: str) -> dict[str, str]: + split_trace = trace_id.split(";") + trace_values = {} + for trace_part in split_trace: + key_value = trace_part.split("=") + if len(key_value) == 2: + trace_values[key_value[0].capitalize()] = key_value[1] + + return trace_values + + +def mime_type_matches_binary_media_types(mime_type: str | None, binary_media_types: list[str]): + if not mime_type or not binary_media_types: + return False + + mime_type_and_subtype = mime_type.split(",")[0].split(";")[0].split("/") + if len(mime_type_and_subtype) != 2: + return False + mime_type, mime_subtype = mime_type_and_subtype + + for bmt in binary_media_types: + type_and_subtype = bmt.split(";")[0].split("/") + if len(type_and_subtype) != 2: + continue + _type, subtype = type_and_subtype + if _type == "*": + continue + + if subtype == "*" and mime_type == _type: + return True + + if mime_type == _type and mime_subtype == subtype: + return True + + return False + + +def should_divert_to_canary(percent_traffic: float) -> bool: + if int(percent_traffic) == 100: + return True + return percent_traffic > random.random() * 100 diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py new file mode 100644 index 0000000000000..7900965784631 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/__init__.py @@ -0,0 +1,15 @@ +from .aws import RestApiAwsIntegration, RestApiAwsProxyIntegration +from .http import RestApiHttpIntegration, RestApiHttpProxyIntegration +from .mock import RestApiMockIntegration + +REST_API_INTEGRATIONS = { + RestApiAwsIntegration.name: RestApiAwsIntegration(), + RestApiAwsProxyIntegration.name: RestApiAwsProxyIntegration(), + RestApiHttpIntegration.name: RestApiHttpIntegration(), + RestApiHttpProxyIntegration.name: RestApiHttpProxyIntegration(), + RestApiMockIntegration.name: RestApiMockIntegration(), +} + +__all__ = [ + "REST_API_INTEGRATIONS", +] diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py new file mode 100644 index 0000000000000..5e65458ed4ac3 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/aws.py @@ -0,0 +1,598 @@ +import base64 +import json +import logging +from functools import lru_cache +from http import HTTPMethod +from typing import Literal, Optional, TypedDict +from urllib.parse import urlparse + +import requests +from botocore.exceptions import ClientError +from werkzeug.datastructures import Headers + +from localstack import config +from localstack.aws.connect import ( + INTERNAL_REQUEST_PARAMS_HEADER, + InternalRequestParameters, + connect_to, + dump_dto, +) +from localstack.aws.spec import get_service_catalog +from localstack.constants import APPLICATION_JSON, INTERNAL_AWS_ACCESS_KEY_ID +from localstack.utils.aws.arns import extract_region_from_arn +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.strings import to_bytes, to_str + +from ..context import ( + EndpointResponse, + IntegrationRequest, + InvocationRequest, + RestApiInvocationContext, +) +from ..gateway_response import IntegrationFailureError, InternalServerError +from ..header_utils import build_multi_value_headers +from ..helpers import ( + get_lambda_function_arn_from_invocation_uri, + get_source_arn, + mime_type_matches_binary_media_types, + render_uri_with_stage_variables, + validate_sub_dict_of_typed_dict, +) +from ..variables import ContextVariables +from .core import RestApiIntegration + +LOG = logging.getLogger(__name__) + +NO_BODY_METHODS = { + HTTPMethod.OPTIONS, + HTTPMethod.GET, + HTTPMethod.HEAD, +} + + +class LambdaProxyResponse(TypedDict, total=False): + body: Optional[str] + statusCode: Optional[int | str] + headers: Optional[dict[str, str]] + isBase64Encoded: Optional[bool] + multiValueHeaders: Optional[dict[str, list[str]]] + + +class LambdaInputEvent(TypedDict, total=False): + body: str + isBase64Encoded: bool + httpMethod: str | HTTPMethod + resource: str + path: str + headers: dict[str, str] + multiValueHeaders: dict[str, list[str]] + queryStringParameters: dict[str, str] + multiValueQueryStringParameters: dict[str, list[str]] + requestContext: ContextVariables + pathParameters: dict[str, str] + stageVariables: dict[str, str] + + +class ParsedAwsIntegrationUri(TypedDict): + service_name: str + region_name: str + action_type: Literal["path", "action"] + path: str + + +@lru_cache(maxsize=64) +def get_service_factory(region_name: str, role_arn: str): + if role_arn: + return connect_to.with_assumed_role( + role_arn=role_arn, + region_name=region_name, + service_principal=ServicePrincipal.apigateway, + session_name="BackplaneAssumeRoleSession", + ) + else: + return connect_to(region_name=region_name) + + +@lru_cache(maxsize=64) +def get_internal_mocked_headers( + service_name: str, + region_name: str, + source_arn: str, + role_arn: str | None, +) -> dict[str, str]: + if role_arn: + access_key_id = ( + connect_to() + .sts.request_metadata(service_principal=ServicePrincipal.apigateway) + .assume_role(RoleArn=role_arn, RoleSessionName="BackplaneAssumeRoleSession")[ + "Credentials" + ]["AccessKeyId"] + ) + else: + access_key_id = INTERNAL_AWS_ACCESS_KEY_ID + + dto = InternalRequestParameters( + service_principal=ServicePrincipal.apigateway, source_arn=source_arn + ) + # TODO: maybe use the localstack.utils.aws.client.SigningHttpClient instead of directly mocking the Authorization + # header (but will need to select the right signer depending on the service?) + headers = { + "Authorization": ( + "AWS4-HMAC-SHA256 " + + f"Credential={access_key_id}/20160623/{region_name}/{service_name}/aws4_request, " + + "SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234" + ), + INTERNAL_REQUEST_PARAMS_HEADER: dump_dto(dto), + } + + return headers + + +@lru_cache(maxsize=64) +def get_target_prefix_for_service(service_name: str) -> str | None: + return get_service_catalog().get(service_name).metadata.get("targetPrefix") + + +class RestApiAwsIntegration(RestApiIntegration): + """ + This is a REST API integration responsible to directly interact with AWS services. It uses the `uri` to + map the incoming request to the concerned AWS service, and can have 2 types. + - `path`: the request is targeting the direct URI of the AWS service, like you would with an HTTP client + example: For S3 GetObject call: arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key} + - `action`: this is a simpler way, where you can pass the request parameters like you would do with an SDK, and you + can specify the service action (for ex. here S3 `GetObject`). It seems the request parameters can be pass as query + string parameters, JSON body and maybe more. TODO: verify, 2 documentation pages indicates divergent information. + (one indicates parameters through QS, one through request body) + example: arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} + + https://docs.aws.amazon.com/apigateway/latest/developerguide/integration-request-basic-setup.html + + + TODO: it seems we can global AWS integration type, we should not need to subclass for each service + we just need to separate usage between the `path` URI type and the `action` URI type. + - `path`, we can simply pass along the full rendered request along with specific `mocked` AWS headers + that are dependant of the service (retrieving for the ARN in the uri) + - `action`, we might need either a full Boto call or use the Boto request serializer, as it seems the request + parameters are expected as parameters + """ + + name = "AWS" + + # TODO: it seems in AWS, you don't need to manually set the `X-Amz-Target` header when using the `action` type. + # for now, we know `events` needs the user to manually add the header, but Kinesis and DynamoDB don't. + # Maybe reverse the list to exclude instead of include. + SERVICES_AUTO_TARGET = ["dynamodb", "kinesis", "ssm", "stepfunctions"] + + # TODO: some services still target the Query protocol (validated with AWS), even though SSM for example is JSON for + # as long as the Boto SDK exists. We will need to emulate the Query protocol and translate it to JSON + SERVICES_LEGACY_QUERY_PROTOCOL = ["ssm"] + + SERVICE_MAP = { + "states": "stepfunctions", + } + + def __init__(self): + self._base_domain = config.internal_service_url() + self._base_host = "" + self._service_names = get_service_catalog().service_names + + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + integration_req: IntegrationRequest = context.integration_request + method = integration_req["http_method"] + parsed_uri = self.parse_aws_integration_uri(integration_req["uri"]) + service_name = parsed_uri["service_name"] + integration_region = parsed_uri["region_name"] + + if credentials := context.integration.get("credentials"): + credentials = render_uri_with_stage_variables(credentials, context.stage_variables) + + headers = integration_req["headers"] + # Some integrations will use a special format for the service in the URI, like AppSync, and so those requests + # are not directed to a service directly, so need to add the Authorization header. It would fail parsing + # by our service name parser anyway + if service_name in self._service_names: + headers.update( + get_internal_mocked_headers( + service_name=service_name, + region_name=integration_region, + source_arn=get_source_arn(context), + role_arn=credentials, + ) + ) + query_params = integration_req["query_string_parameters"].copy() + data = integration_req["body"] + + if parsed_uri["action_type"] == "path": + # the Path action type allows you to override the path the request is sent to, like you would send to AWS + path = f"/{parsed_uri['path']}" + else: + # Action passes the `Action` query string parameter + path = "" + action = parsed_uri["path"] + + if target := self.get_action_service_target(service_name, action): + headers["X-Amz-Target"] = target + + query_params["Action"] = action + + if service_name in self.SERVICES_LEGACY_QUERY_PROTOCOL: + # this has been tested in AWS: for `ssm`, it fully overrides the body because SSM uses the Query + # protocol, so we simulate it that way + data = self.get_payload_from_query_string(query_params) + + url = f"{self._base_domain}{path}" + headers["Host"] = self.get_internal_host_for_service( + service_name=service_name, region_name=integration_region + ) + + request_parameters = { + "method": method, + "url": url, + "params": query_params, + "headers": headers, + } + + if method not in NO_BODY_METHODS: + request_parameters["data"] = data + + request_response = requests.request(**request_parameters) + response_content = request_response.content + + if ( + parsed_uri["action_type"] == "action" + and service_name in self.SERVICES_LEGACY_QUERY_PROTOCOL + ): + response_content = self.format_response_content_legacy( + payload=response_content, + service_name=service_name, + action=parsed_uri["path"], + request_id=context.context_variables["requestId"], + ) + + return EndpointResponse( + body=response_content, + status_code=request_response.status_code, + headers=Headers(dict(request_response.headers)), + ) + + def parse_aws_integration_uri(self, uri: str) -> ParsedAwsIntegrationUri: + """ + The URI can be of 2 shapes: Path or Action. + Path : arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key} + Action: arn:aws:apigateway:us-east-1:kinesis:action/PutRecord + :param uri: the URI of the AWS integration + :return: a ParsedAwsIntegrationUri containing the service name, the region and the type of action + """ + arn, _, path = uri.partition("/") + split_arn = arn.split(":", maxsplit=5) + *_, region_name, service_name, action_type = split_arn + boto_service_name = self.SERVICE_MAP.get(service_name, service_name) + return ParsedAwsIntegrationUri( + region_name=region_name, + service_name=boto_service_name, + action_type=action_type, + path=path, + ) + + def get_action_service_target(self, service_name: str, action: str) -> str | None: + if service_name not in self.SERVICES_AUTO_TARGET: + return None + + target_prefix = get_target_prefix_for_service(service_name) + if not target_prefix: + return None + + return f"{target_prefix}.{action}" + + def get_internal_host_for_service(self, service_name: str, region_name: str): + url = self._base_domain + if service_name == "sqs": + # This follow the new SQS_ENDPOINT_STRATEGY=standard + url = config.external_service_url(subdomains=f"sqs.{region_name}") + elif "-api" in service_name: + # this could be an `.-api`, used by some services + url = config.external_service_url(subdomains=service_name) + + return urlparse(url).netloc + + @staticmethod + def get_payload_from_query_string(query_string_parameters: dict) -> str: + return json.dumps(query_string_parameters) + + @staticmethod + def format_response_content_legacy( + service_name: str, action: str, payload: bytes, request_id: str + ) -> bytes: + # TODO: not sure how much we need to support this, this supports SSM for now, once we write more tests for + # `action` type, see if we can generalize more + data = json.loads(payload) + try: + # we try to populate the missing fields from the OperationModel of the operation + operation_model = get_service_catalog().get(service_name).operation_model(action) + for key in operation_model.output_shape.members: + if key not in data: + data[key] = None + + except Exception: + # the operation above is only for parity reason, skips if it fails + pass + + wrapped = { + f"{action}Response": { + f"{action}Result": data, + "ResponseMetadata": { + "RequestId": request_id, + }, + } + } + return to_bytes(json.dumps(wrapped)) + + +class RestApiAwsProxyIntegration(RestApiIntegration): + """ + This is a custom, simplified REST API integration focused only on the Lambda service, with minimal modification from + API Gateway. It passes the incoming request almost as is, in a custom created event payload, to the configured + Lambda function. + + https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html + """ + + name = "AWS_PROXY" + + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + integration_req: IntegrationRequest = context.integration_request + method = integration_req["http_method"] + + if method != HTTPMethod.POST: + LOG.warning( + "The 'AWS_PROXY' integration can only be used with the POST integration method.", + ) + raise IntegrationFailureError("Internal server error") + + input_event = self.create_lambda_input_event(context) + + # TODO: verify stage variables rendering in AWS_PROXY + integration_uri = integration_req["uri"] + + function_arn = get_lambda_function_arn_from_invocation_uri(integration_uri) + source_arn = get_source_arn(context) + + # TODO: write test for credentials rendering + if credentials := context.integration.get("credentials"): + credentials = render_uri_with_stage_variables(credentials, context.stage_variables) + + try: + lambda_payload = self.call_lambda( + function_arn=function_arn, + event=to_bytes(json.dumps(input_event)), + source_arn=source_arn, + credentials=credentials, + ) + + except ClientError as e: + LOG.warning( + "Exception during integration invocation: '%s'", + e, + ) + status_code = 502 + if e.response["Error"]["Code"] == "AccessDeniedException": + status_code = 500 + raise IntegrationFailureError("Internal server error", status_code=status_code) from e + + except Exception as e: + LOG.warning( + "Unexpected exception during integration invocation: '%s'", + e, + ) + raise IntegrationFailureError("Internal server error", status_code=502) from e + + lambda_response = self.parse_lambda_response(lambda_payload) + + headers = Headers({"Content-Type": APPLICATION_JSON}) + + response_headers = self._merge_lambda_response_headers(lambda_response) + headers.update(response_headers) + + # TODO: maybe centralize this flag inside the context, when we are also using it for other integration types + # AWS_PROXY behaves a bit differently, but this could checked only once earlier + binary_response_accepted = mime_type_matches_binary_media_types( + mime_type=context.invocation_request["headers"].get("Accept"), + binary_media_types=context.deployment.rest_api.rest_api.get("binaryMediaTypes", []), + ) + body = self._parse_body( + body=lambda_response.get("body"), + is_base64_encoded=binary_response_accepted and lambda_response.get("isBase64Encoded"), + ) + + return EndpointResponse( + headers=headers, + body=body, + status_code=int(lambda_response.get("statusCode") or 200), + ) + + @staticmethod + def call_lambda( + function_arn: str, + event: bytes, + source_arn: str, + credentials: str = None, + ) -> bytes: + lambda_client = get_service_factory( + region_name=extract_region_from_arn(function_arn), + role_arn=credentials, + ).lambda_ + inv_result = lambda_client.request_metadata( + service_principal=ServicePrincipal.apigateway, + source_arn=source_arn, + ).invoke( + FunctionName=function_arn, + Payload=event, + InvocationType="RequestResponse", + ) + if payload := inv_result.get("Payload"): + return payload.read() + return b"" + + def parse_lambda_response(self, payload: bytes) -> LambdaProxyResponse: + try: + lambda_response = json.loads(payload) + except json.JSONDecodeError: + LOG.warning( + 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."} but was: %s', + payload, + ) + LOG.debug( + "Execution failed due to configuration error: Malformed Lambda proxy response" + ) + raise InternalServerError("Internal server error", status_code=502) + + # none of the lambda response fields are mandatory, but you cannot return any other fields + if not self._is_lambda_response_valid(lambda_response): + if "errorMessage" in lambda_response: + LOG.debug( + "Lambda execution failed with status 200 due to customer function error: %s. Lambda request id: %s", + lambda_response["errorMessage"], + lambda_response.get("requestId", ""), + ) + else: + LOG.warning( + 'Lambda output should follow the next JSON format: { "isBase64Encoded": true|false, "statusCode": httpStatusCode, "headers": { "headerName": "headerValue", ... },"body": "..."} but was: %s', + payload, + ) + LOG.debug( + "Execution failed due to configuration error: Malformed Lambda proxy response" + ) + raise InternalServerError("Internal server error", status_code=502) + + def serialize_header(value: bool | str) -> str: + if isinstance(value, bool): + return "true" if value else "false" + return value + + if headers := lambda_response.get("headers"): + lambda_response["headers"] = {k: serialize_header(v) for k, v in headers.items()} + + if multi_value_headers := lambda_response.get("multiValueHeaders"): + lambda_response["multiValueHeaders"] = { + k: [serialize_header(v) for v in values] + for k, values in multi_value_headers.items() + } + + return lambda_response + + @staticmethod + def _is_lambda_response_valid(lambda_response: dict) -> bool: + if not isinstance(lambda_response, dict): + return False + + if not validate_sub_dict_of_typed_dict(LambdaProxyResponse, lambda_response): + return False + + if (headers := lambda_response.get("headers")) is not None: + if not isinstance(headers, dict): + return False + if any(not isinstance(header_value, (str, bool)) for header_value in headers.values()): + return False + + if (multi_value_headers := lambda_response.get("multiValueHeaders")) is not None: + if not isinstance(multi_value_headers, dict): + return False + if any( + not isinstance(header_value, list) for header_value in multi_value_headers.values() + ): + return False + + if "statusCode" in lambda_response: + try: + int(lambda_response["statusCode"]) + except ValueError: + return False + + # TODO: add more validations of the values' type + return True + + def create_lambda_input_event(self, context: RestApiInvocationContext) -> LambdaInputEvent: + # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format + # for building the Lambda Payload, we need access to the Invocation Request, as some data is not available in + # the integration request and does not make sense for it + invocation_req: InvocationRequest = context.invocation_request + integration_req: IntegrationRequest = context.integration_request + + body, is_b64_encoded = self._format_body(integration_req["body"]) + + if context.base_path: + path = context.context_variables["path"] + else: + path = invocation_req["path"] + + input_event = LambdaInputEvent( + headers=self._format_headers(dict(integration_req["headers"])), + multiValueHeaders=self._format_headers( + build_multi_value_headers(integration_req["headers"]) + ), + body=body or None, + isBase64Encoded=is_b64_encoded, + requestContext=context.context_variables, + stageVariables=context.stage_variables, + # still using the InvocationRequest query string parameters as the logic is the same, maybe refactor? + queryStringParameters=invocation_req["query_string_parameters"] or None, + multiValueQueryStringParameters=invocation_req["multi_value_query_string_parameters"] + or None, + pathParameters=invocation_req["path_parameters"] or None, + httpMethod=invocation_req["http_method"], + path=path, + resource=context.resource["path"], + ) + + return input_event + + @staticmethod + def _format_headers(headers: dict[str, str | list[str]]) -> dict[str, str | list[str]]: + # Some headers get capitalized like in CloudFront, see + # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html#add-origin-custom-headers-forward-authorization + # It seems AWS_PROXY lambda integrations are behind CloudFront, as seen by the returned headers in AWS + to_capitalize: list[str] = ["authorization", "user-agent"] # some headers get capitalized + to_filter: list[str] = ["content-length", "connection"] + headers = { + k.title() if k.lower() in to_capitalize else k: v + for k, v in headers.items() + if k.lower() not in to_filter + } + + return headers + + @staticmethod + def _format_body(body: bytes) -> tuple[str, bool]: + try: + return body.decode("utf-8"), False + except UnicodeDecodeError: + return to_str(base64.b64encode(body)), True + + @staticmethod + def _parse_body(body: str | None, is_base64_encoded: bool) -> bytes: + if not body: + return b"" + + if is_base64_encoded: + try: + return base64.b64decode(body) + except Exception: + raise InternalServerError("Internal server error", status_code=500) + + return to_bytes(body) + + @staticmethod + def _merge_lambda_response_headers(lambda_response: LambdaProxyResponse) -> dict: + headers = lambda_response.get("headers") or {} + + if multi_value_headers := lambda_response.get("multiValueHeaders"): + # multiValueHeaders has the priority and will decide the casing of the final headers, as they are merged + headers_low_keys = {k.lower(): v for k, v in headers.items()} + + for k, values in multi_value_headers.items(): + if (k_lower := k.lower()) in headers_low_keys: + headers[k] = [*values, headers_low_keys[k_lower]] + else: + headers[k] = values + + return headers diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py new file mode 100644 index 0000000000000..c65b1a9539d7f --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/core.py @@ -0,0 +1,19 @@ +from abc import abstractmethod + +from ..api import RestApiInvocationContext +from ..context import EndpointResponse + + +class RestApiIntegration: + """ + This REST API Integration exposes an API to invoke the specific Integration with a common interface. + + https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-integration-settings.html + TODO: Add more abstractmethods when starting to work on the Integration handler + """ + + name: str + + @abstractmethod + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + pass diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py new file mode 100644 index 0000000000000..fa0511072c9d1 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/http.py @@ -0,0 +1,147 @@ +import logging +from http import HTTPMethod +from typing import Optional, TypedDict + +import requests +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import Integration + +from ..context import EndpointResponse, IntegrationRequest, RestApiInvocationContext +from ..gateway_response import ApiConfigurationError, IntegrationFailureError +from ..header_utils import build_multi_value_headers +from .core import RestApiIntegration + +LOG = logging.getLogger(__name__) + +NO_BODY_METHODS = {HTTPMethod.OPTIONS, HTTPMethod.GET, HTTPMethod.HEAD} + + +class SimpleHttpRequest(TypedDict, total=False): + method: HTTPMethod | str + url: str + params: Optional[dict[str, str | list[str]]] + data: bytes + headers: Optional[dict[str, str]] + cookies: Optional[dict[str, str]] + timeout: Optional[int] + allow_redirects: Optional[bool] + stream: Optional[bool] + verify: Optional[bool] + # TODO: check if there was a situation where we'd pass certs? + cert: Optional[str | tuple[str, str]] + + +class BaseRestApiHttpIntegration(RestApiIntegration): + @staticmethod + def _get_integration_timeout(integration: Integration) -> float: + return int(integration.get("timeoutInMillis", 29000)) / 1000 + + +class RestApiHttpIntegration(BaseRestApiHttpIntegration): + """ + This is a REST API integration responsible to send a request to another HTTP API. + https://docs.aws.amazon.com/apigateway/latest/developerguide/setup-http-integrations.html#api-gateway-set-up-http-proxy-integration-on-proxy-resource + """ + + name = "HTTP" + + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + integration_req: IntegrationRequest = context.integration_request + method = integration_req["http_method"] + uri = integration_req["uri"] + + request_parameters: SimpleHttpRequest = { + "method": method, + "url": uri, + "params": integration_req["query_string_parameters"], + "headers": integration_req["headers"], + } + + if method not in NO_BODY_METHODS: + request_parameters["data"] = integration_req["body"] + + # TODO: configurable timeout (29 by default) (check type and default value in provider) + # integration: Integration = context.resource_method["methodIntegration"] + # request_parameters["timeout"] = self._get_integration_timeout(integration) + # TODO: check for redirects + # request_parameters["allow_redirects"] = False + try: + request_response = requests.request(**request_parameters) + + except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema) as e: + LOG.warning("Execution failed due to configuration error: Invalid endpoint address") + LOG.debug("The URI specified for the HTTP/HTTP_PROXY integration is invalid: %s", uri) + raise ApiConfigurationError("Internal server error") from e + + except (requests.exceptions.Timeout, requests.exceptions.SSLError) as e: + # TODO make the exception catching more fine grained + # this can be reproduced in AWS if you try to hit an HTTP endpoint which is HTTPS only like lambda URL + LOG.warning("Execution failed due to a network error communicating with endpoint") + raise IntegrationFailureError("Network error communicating with endpoint") from e + + except requests.exceptions.ConnectionError as e: + raise ApiConfigurationError("Internal server error") from e + + return EndpointResponse( + body=request_response.content, + status_code=request_response.status_code, + headers=Headers(dict(request_response.headers)), + ) + + +class RestApiHttpProxyIntegration(BaseRestApiHttpIntegration): + """ + This is a simplified REST API integration responsible to send a request to another HTTP API by proxying it almost + directly. + https://docs.aws.amazon.com/apigateway/latest/developerguide/setup-http-integrations.html#api-gateway-set-up-http-proxy-integration-on-proxy-resource + """ + + name = "HTTP_PROXY" + + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + integration_req: IntegrationRequest = context.integration_request + method = integration_req["http_method"] + uri = integration_req["uri"] + + multi_value_headers = build_multi_value_headers(integration_req["headers"]) + request_headers = {key: ",".join(value) for key, value in multi_value_headers.items()} + + request_parameters: SimpleHttpRequest = { + "method": method, + "url": uri, + "params": integration_req["query_string_parameters"], + "headers": request_headers, + } + + # TODO: validate this for HTTP_PROXY + if method not in NO_BODY_METHODS: + request_parameters["data"] = integration_req["body"] + + # TODO: configurable timeout (29 by default) (check type and default value in provider) + # integration: Integration = context.resource_method["methodIntegration"] + # request_parameters["timeout"] = self._get_integration_timeout(integration) + try: + request_response = requests.request(**request_parameters) + + except (requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema) as e: + LOG.warning("Execution failed due to configuration error: Invalid endpoint address") + LOG.debug("The URI specified for the HTTP/HTTP_PROXY integration is invalid: %s", uri) + raise ApiConfigurationError("Internal server error") from e + + except (requests.exceptions.Timeout, requests.exceptions.SSLError): + # TODO make the exception catching more fine grained + # this can be reproduced in AWS if you try to hit an HTTP endpoint which is HTTPS only like lambda URL + LOG.warning("Execution failed due to a network error communicating with endpoint") + raise IntegrationFailureError("Network error communicating with endpoint") + + except requests.exceptions.ConnectionError: + raise ApiConfigurationError("Internal server error") + + response_headers = Headers(dict(request_response.headers)) + + return EndpointResponse( + body=request_response.content, + status_code=request_response.status_code, + headers=response_headers, + ) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py new file mode 100644 index 0000000000000..84ddecc05862e --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/integrations/mock.py @@ -0,0 +1,108 @@ +import json +import logging +import re +from json import JSONDecodeError + +from werkzeug.datastructures import Headers + +from localstack.utils.strings import to_str + +from ..context import EndpointResponse, IntegrationRequest, RestApiInvocationContext +from ..gateway_response import InternalServerError +from .core import RestApiIntegration + +LOG = logging.getLogger(__name__) + + +class RestApiMockIntegration(RestApiIntegration): + """ + This is a simple REST API integration but quite limited, allowing you to quickly test your APIs or return + hardcoded responses to the client. + This integration can never return a proper response, and all the work is done with integration request and response + mappings. + This can be used to set up CORS response for `OPTIONS` requests. + https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-mock-integration.html + """ + + name = "MOCK" + + def invoke(self, context: RestApiInvocationContext) -> EndpointResponse: + integration_req: IntegrationRequest = context.integration_request + + status_code = self.get_status_code(integration_req) + + if status_code is None: + LOG.debug( + "Execution failed due to configuration error: Unable to parse statusCode. " + "It should be an integer that is defined in the request template." + ) + raise InternalServerError("Internal server error") + + return EndpointResponse(status_code=status_code, body=b"", headers=Headers()) + + def get_status_code(self, integration_req: IntegrationRequest) -> int | None: + try: + body = json.loads(integration_req["body"]) + except JSONDecodeError as e: + LOG.debug( + "Exception while JSON parsing integration request body: %s" + "Falling back to custom parser", + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + body = self.parse_invalid_json(to_str(integration_req["body"])) + + status_code = body.get("statusCode") + if not isinstance(status_code, int): + return + + return status_code + + def parse_invalid_json(self, body: str) -> dict: + """This is a quick fix to unblock cdk users setting cors policy for rest apis. + CDK creates a MOCK OPTIONS route with in valid json. `{statusCode: 200}` + Aws probably has a custom token parser. We can implement one + at some point if we have user requests for it""" + + def convert_null_value(value) -> str: + if (value := value.strip()) in ("null", ""): + return '""' + return value + + try: + statuscode = "" + matched = re.match(r"^\s*{(.+)}\s*$", body).group(1) + pairs = [m.strip() for m in matched.split(",")] + # TODO this is not right, but nested object would otherwise break the parsing + key_values = [s.split(":", maxsplit=1) for s in pairs if s] + for key_value in key_values: + assert len(key_value) == 2 + key, value = [convert_null_value(el) for el in key_value] + + if key in ("statusCode", "'statusCode'", '"statusCode"'): + statuscode = int(value) + continue + + assert (leading_key_char := key[0]) not in "[{" + if leading_key_char in "'\"": + assert len(key) >= 2 + assert key[-1] == leading_key_char + + if (leading_value_char := value[0]) in "[{'\"": + assert len(value) >= 2 + if leading_value_char == "{": + # TODO reparse objects + assert value[-1] == "}" + elif leading_value_char == "[": + # TODO validate arrays + assert value[-1] == "]" + else: + assert value[-1] == leading_value_char + + return {"statusCode": statuscode} + + except Exception as e: + LOG.debug( + "Error Parsing an invalid json, %s", e, exc_info=LOG.isEnabledFor(logging.DEBUG) + ) + return {"statusCode": ""} diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py new file mode 100644 index 0000000000000..d54b25b560759 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/moto_helpers.py @@ -0,0 +1,82 @@ +from moto.apigateway.models import APIGatewayBackend, apigateway_backends +from moto.apigateway.models import RestAPI as MotoRestAPI + +from localstack.aws.api.apigateway import ( + ApiKey, + ListOfUsagePlan, + ListOfUsagePlanKey, + Resource, + Stage, +) + + +def get_resources_from_moto_rest_api(moto_rest_api: MotoRestAPI) -> dict[str, Resource]: + """ + This returns the `Resources` from a Moto REST API + This allows to decouple the underlying split of resources between Moto and LocalStack, and always return the right + format. + """ + moto_resources = moto_rest_api.resources + + resources: dict[str, Resource] = {} + for moto_resource in moto_resources.values(): + resource = Resource( + id=moto_resource.id, + parentId=moto_resource.parent_id, + pathPart=moto_resource.path_part, + path=moto_resource.get_path(), + resourceMethods={ + # TODO: check if resource_methods.to_json() returns everything we need/want + k: v.to_json() + for k, v in moto_resource.resource_methods.items() + }, + ) + + resources[moto_resource.id] = resource + + return resources + + +def get_stage_variables( + account_id: str, region: str, api_id: str, stage_name: str +) -> dict[str, str]: + apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region] + moto_rest_api = apigateway_backend.get_rest_api(api_id) + stage = moto_rest_api.stages[stage_name] + return stage.variables + + +def get_stage_configuration(account_id: str, region: str, api_id: str, stage_name: str) -> Stage: + apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region] + moto_rest_api = apigateway_backend.get_rest_api(api_id) + stage = moto_rest_api.stages[stage_name] + return stage.to_json() + + +def get_usage_plans(account_id: str, region_name: str) -> ListOfUsagePlan: + """ + Will return a list of usage plans from the moto store. + """ + apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name] + return [usage_plan.to_json() for usage_plan in apigateway_backend.usage_plans.values()] + + +def get_api_key(api_key_id: str, account_id: str, region_name: str) -> ApiKey: + """ + Will return an api key from the moto store. + """ + apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name] + return apigateway_backend.keys[api_key_id].to_json() + + +def get_usage_plan_keys( + usage_plan_id: str, account_id: str, region_name: str +) -> ListOfUsagePlanKey: + """ + Will return a list of usage plan keys from the moto store. + """ + apigateway_backend: APIGatewayBackend = apigateway_backends[account_id][region_name] + return [ + usage_plan_key.to_json() + for usage_plan_key in apigateway_backend.usage_plan_keys.get(usage_plan_id, {}).values() + ] diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py new file mode 100644 index 0000000000000..bb723e58ea4ef --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/parameters_mapping.py @@ -0,0 +1,298 @@ +# > This section explains how to set up data mappings from an API's method request data, including other data +# stored in context, stage, or util variables, to the corresponding integration request parameters and from an +# integration response data, including the other data, to the method response parameters. The method request +# data includes request parameters (path, query string, headers) and the body. The integration response data +# includes response parameters (headers) and the body. For more information about using the stage variables, +# see API Gateway stage variables reference. +# +# https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html +import json +import logging +from typing import Any, TypedDict + +from localstack.utils.json import extract_jsonpath +from localstack.utils.strings import to_str + +from .context import EndpointResponse, InvocationRequest +from .gateway_response import BadRequestException, InternalFailureException +from .header_utils import build_multi_value_headers +from .variables import ContextVariables + +LOG = logging.getLogger(__name__) + + +class RequestDataMapping(TypedDict): + # Integration request parameters, in the form of path variables, query strings or headers, can be mapped from any + # defined method request parameters and the payload. + header: dict[str, str] + path: dict[str, str] + querystring: dict[str, str | list[str]] + + +class ResponseDataMapping(TypedDict): + # Method response header parameters can be mapped from any integration response header or integration response body, + # $context variables, or static values. + header: dict[str, str] + + +class ParametersMapper: + def map_integration_request( + self, + request_parameters: dict[str, str], + invocation_request: InvocationRequest, + context_variables: ContextVariables, + stage_variables: dict[str, str], + ) -> RequestDataMapping: + request_data_mapping = RequestDataMapping( + header={}, + path={}, + querystring={}, + ) + # storing the case-sensitive headers once, the mapping is strict + case_sensitive_headers = build_multi_value_headers(invocation_request["headers"]) + + for integration_mapping, request_mapping in request_parameters.items(): + # TODO: remove this once the validation has been added to the provider, to avoid breaking + if not isinstance(integration_mapping, str) or not isinstance(request_mapping, str): + LOG.warning( + "Wrong parameter mapping value type: %s: %s. They should both be string. Skipping this mapping.", + integration_mapping, + request_mapping, + ) + continue + + integration_param_location, param_name = integration_mapping.removeprefix( + "integration.request." + ).split(".") + + if request_mapping.startswith("method.request."): + method_req_expr = request_mapping.removeprefix("method.request.") + value = self._retrieve_parameter_from_invocation_request( + method_req_expr, invocation_request, case_sensitive_headers + ) + + else: + value = self._retrieve_parameter_from_variables_and_static( + mapping_value=request_mapping, + context_variables=context_variables, + stage_variables=stage_variables, + ) + + if value: + request_data_mapping[integration_param_location][param_name] = value + + return request_data_mapping + + def map_integration_response( + self, + response_parameters: dict[str, str], + integration_response: EndpointResponse, + context_variables: ContextVariables, + stage_variables: dict[str, str], + ) -> ResponseDataMapping: + response_data_mapping = ResponseDataMapping(header={}) + + # storing the case-sensitive headers once, the mapping is strict + case_sensitive_headers = build_multi_value_headers(integration_response["headers"]) + + for response_mapping, integration_mapping in response_parameters.items(): + header_name = response_mapping.removeprefix("method.response.header.") + + if integration_mapping.startswith("integration.response."): + method_req_expr = integration_mapping.removeprefix("integration.response.") + value = self._retrieve_parameter_from_integration_response( + method_req_expr, integration_response, case_sensitive_headers + ) + else: + value = self._retrieve_parameter_from_variables_and_static( + mapping_value=integration_mapping, + context_variables=context_variables, + stage_variables=stage_variables, + ) + + if value: + response_data_mapping["header"][header_name] = value + + return response_data_mapping + + def _retrieve_parameter_from_variables_and_static( + self, + mapping_value: str, + context_variables: dict[str, Any], + stage_variables: dict[str, str], + ) -> str | None: + if mapping_value.startswith("context."): + context_var_expr = mapping_value.removeprefix("context.") + return self._retrieve_parameter_from_context_variables( + context_var_expr, context_variables + ) + + elif mapping_value.startswith("stageVariables."): + stage_var_name = mapping_value.removeprefix("stageVariables.") + return self._retrieve_parameter_from_stage_variables(stage_var_name, stage_variables) + + elif mapping_value.startswith("'") and mapping_value.endswith("'"): + return mapping_value.strip("'") + + else: + LOG.warning( + "Unrecognized parameter mapping value: '%s'. Skipping this mapping.", + mapping_value, + ) + return None + + def _retrieve_parameter_from_integration_response( + self, + expr: str, + integration_response: EndpointResponse, + case_sensitive_headers: dict[str, list[str]], + ) -> str | None: + """ + See https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#mapping-response-parameters + :param expr: mapping expression stripped from `integration.response.`: + Can be of the following: `header.`, multivalueheader., `body` and + `body..` + :param integration_response: the Response to map parameters from + :return: the value to map in the ResponseDataMapping + """ + if expr.startswith("body"): + body = integration_response.get("body") or b"{}" + body = body.strip() + try: + decoded_body = self._json_load(body) + except ValueError: + raise InternalFailureException(message="Internal server error") + + if expr == "body": + return to_str(body) + + elif expr.startswith("body."): + json_path = expr.removeprefix("body.") + return self._get_json_path_from_dict(decoded_body, json_path) + else: + LOG.warning( + "Unrecognized integration.response parameter: '%s'. Skipping the parameter mapping.", + expr, + ) + return None + + param_type, param_name = expr.split(".") + + if param_type == "header": + if header := case_sensitive_headers.get(param_name): + return header[-1] + + elif param_type == "multivalueheader": + if header := case_sensitive_headers.get(param_name): + return ",".join(header) + + else: + LOG.warning( + "Unrecognized integration.response parameter: '%s'. Skipping the parameter mapping.", + expr, + ) + + def _retrieve_parameter_from_invocation_request( + self, + expr: str, + invocation_request: InvocationRequest, + case_sensitive_headers: dict[str, list[str]], + ) -> str | list[str] | None: + """ + See https://docs.aws.amazon.com/apigateway/latest/developerguide/request-response-data-mappings.html#mapping-response-parameters + :param expr: mapping expression stripped from `method.request.`: + Can be of the following: `path.`, `querystring.`, + `multivaluequerystring.`, `header.`, `multivalueheader.`, + `body` and `body..` + :param invocation_request: the InvocationRequest to map parameters from + :return: the value to map in the RequestDataMapping + """ + if expr.startswith("body"): + body = invocation_request["body"] or b"{}" + body = body.strip() + try: + decoded_body = self._json_load(body) + except ValueError: + raise BadRequestException(message="Invalid JSON in request body") + + if expr == "body": + return to_str(body) + + elif expr.startswith("body."): + json_path = expr.removeprefix("body.") + return self._get_json_path_from_dict(decoded_body, json_path) + else: + LOG.warning( + "Unrecognized method.request parameter: '%s'. Skipping the parameter mapping.", + expr, + ) + return None + + param_type, param_name = expr.split(".") + if param_type == "path": + return invocation_request["path_parameters"].get(param_name) + + elif param_type == "querystring": + multi_qs_params = invocation_request["multi_value_query_string_parameters"].get( + param_name + ) + if multi_qs_params: + return multi_qs_params[-1] + + elif param_type == "multivaluequerystring": + multi_qs_params = invocation_request["multi_value_query_string_parameters"].get( + param_name + ) + if len(multi_qs_params) == 1: + return multi_qs_params[0] + return multi_qs_params + + elif param_type == "header": + if header := case_sensitive_headers.get(param_name): + return header[-1] + + elif param_type == "multivalueheader": + if header := case_sensitive_headers.get(param_name): + return ",".join(header) + + else: + LOG.warning( + "Unrecognized method.request parameter: '%s'. Skipping the parameter mapping.", + expr, + ) + + def _retrieve_parameter_from_context_variables( + self, expr: str, context_variables: dict[str, Any] + ) -> str | None: + # we're using JSON path here because we could access nested properties like `context.identity.sourceIp` + if (value := self._get_json_path_from_dict(context_variables, expr)) and isinstance( + value, str + ): + return value + + @staticmethod + def _retrieve_parameter_from_stage_variables( + stage_var_name: str, stage_variables: dict[str, str] + ) -> str | None: + return stage_variables.get(stage_var_name) + + @staticmethod + def _get_json_path_from_dict(body: dict, path: str) -> str | None: + # TODO: verify we don't have special cases + try: + return extract_jsonpath(body, f"$.{path}") + except KeyError: + return None + + @staticmethod + def _json_load(body: bytes) -> dict | list: + """ + AWS only tries to JSON decode the body if it starts with some leading characters ({, [, ", ') + otherwise, it ignores it + :param body: + :return: + """ + if any(body.startswith(c) for c in (b"{", b"[", b"'", b'"')): + return json.loads(body) + + return {} diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py new file mode 100644 index 0000000000000..6c0ca3245164b --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/router.py @@ -0,0 +1,222 @@ +import logging +from typing import TypedDict, Unpack + +from rolo import Request, Router +from rolo.routing.handler import Handler +from werkzeug.routing import Rule + +from localstack.aws.api.apigateway import Stage +from localstack.constants import APPLICATION_JSON, AWS_REGION_US_EAST_1, DEFAULT_AWS_ACCOUNT_ID +from localstack.deprecations import deprecated_endpoint +from localstack.http import Response +from localstack.services.apigateway.models import ApiGatewayStore, apigateway_stores +from localstack.services.edge import ROUTER +from localstack.services.stores import AccountRegionBundle + +from .context import RestApiInvocationContext +from .gateway import RestApiGateway +from .helpers import should_divert_to_canary +from .moto_helpers import get_stage_configuration + +LOG = logging.getLogger(__name__) + + +class RouteHostPathParameters(TypedDict, total=False): + """ + Represents the kwargs typing for calling APIGatewayEndpoint. + Each field might be populated from the route host and path parameters, defined when registering a route. + """ + + api_id: str + path: str + port: int | None + server: str | None + stage: str | None + vpce_suffix: str | None + + +class ApiGatewayEndpoint: + """ + This class is the endpoint for API Gateway invocations of the `execute-api` route. It will take the incoming + invocation request, create a context from the API matching the route parameters, and dispatch the request to the + Gateway to be processed by the handler chain. + """ + + def __init__(self, rest_gateway: RestApiGateway = None, store: AccountRegionBundle = None): + self.rest_gateway = rest_gateway or RestApiGateway() + # we only access CrossAccount attributes in the handler, so we use a global store in default account and region + self._store = store or apigateway_stores + + @property + def _global_store(self) -> ApiGatewayStore: + return self._store[DEFAULT_AWS_ACCOUNT_ID][AWS_REGION_US_EAST_1] + + def __call__(self, request: Request, **kwargs: Unpack[RouteHostPathParameters]) -> Response: + """ + :param request: the incoming Request object + :param kwargs: can contain all the field of RouteHostPathParameters. Those values are defined on the registered + routes in ApiGatewayRouter, through host and path parameters in the shape or only. + :return: the Response object to return to the client + """ + # api_id can be cased because of custom-tag id + api_id, stage = kwargs.get("api_id", "").lower(), kwargs.get("stage") + if self.is_rest_api(api_id, stage): + context, response = self.prepare_rest_api_invocation(request, api_id, stage) + self.rest_gateway.process_with_context(context, response) + return response + else: + return self.create_not_found_response(api_id) + + def prepare_rest_api_invocation( + self, request: Request, api_id: str, stage: str + ) -> tuple[RestApiInvocationContext, Response]: + LOG.debug("APIGW v1 Endpoint called") + response = self.create_response(request) + context = RestApiInvocationContext(request) + self.populate_rest_api_invocation_context(context, api_id, stage) + + return context, response + + def is_rest_api(self, api_id: str, stage: str): + return stage in self._global_store.active_deployments.get(api_id, {}) + + def populate_rest_api_invocation_context( + self, context: RestApiInvocationContext, api_id: str, stage: str + ): + try: + deployment_id = self._global_store.active_deployments[api_id][stage] + frozen_deployment = self._global_store.internal_deployments[api_id][deployment_id] + + except KeyError: + # TODO: find proper error when trying to hit an API with no deployment/stage linked + return + + stage_configuration = self.fetch_stage_configuration( + account_id=frozen_deployment.account_id, + region=frozen_deployment.region, + api_id=api_id, + stage_name=stage, + ) + if canary_settings := stage_configuration.get("canarySettings"): + if should_divert_to_canary(canary_settings["percentTraffic"]): + deployment_id = canary_settings["deploymentId"] + frozen_deployment = self._global_store.internal_deployments[api_id][deployment_id] + context.is_canary = True + else: + context.is_canary = False + + context.deployment = frozen_deployment + context.api_id = api_id + context.stage = stage + context.stage_configuration = stage_configuration + context.deployment_id = deployment_id + + @staticmethod + def fetch_stage_configuration( + account_id: str, region: str, api_id: str, stage_name: str + ) -> Stage: + # this will be migrated once we move away from Moto, so we won't need the helper anymore and the logic will + # be implemented here + stage_variables = get_stage_configuration( + account_id=account_id, + region=region, + api_id=api_id, + stage_name=stage_name, + ) + + return stage_variables + + @staticmethod + def create_response(request: Request) -> Response: + # Creates a default apigw response. + response = Response(headers={"Content-Type": APPLICATION_JSON}) + if not (connection := request.headers.get("Connection")) or connection != "close": + # We only set the connection if it isn't close. + # There appears to be in issue in Localstack, where setting "close" will result in "close, close" + response.headers.set("Connection", "keep-alive") + return response + + @staticmethod + def create_not_found_response(api_id: str) -> Response: + not_found = Response(status=404) + not_found.set_json( + {"message": f"The API id '{api_id}' does not correspond to a deployed API Gateway API"} + ) + return not_found + + +class ApiGatewayRouter: + router: Router[Handler] + handler: ApiGatewayEndpoint + EXECUTE_API_INTERNAL_PATH = "/_aws/execute-api" + + def __init__(self, router: Router[Handler] = None, handler: ApiGatewayEndpoint = None): + self.router = router or ROUTER + self.handler = handler or ApiGatewayEndpoint() + self.registered_rules: list[Rule] = [] + + def register_routes(self) -> None: + LOG.debug("Registering API Gateway routes.") + host_pattern = ".execute-api." + deprecated_route_endpoint = deprecated_endpoint( + endpoint=self.handler, + previous_path="/restapis///_user_request_", + deprecation_version="3.8.0", + new_path=f"{self.EXECUTE_API_INTERNAL_PATH}//", + ) + rules = [ + self.router.add( + path="/", + host=host_pattern, + endpoint=self.handler, + defaults={"path": "", "stage": None}, + strict_slashes=True, + ), + self.router.add( + path="//", + host=host_pattern, + endpoint=self.handler, + defaults={"path": ""}, + strict_slashes=False, + ), + self.router.add( + path="//", + host=host_pattern, + endpoint=self.handler, + strict_slashes=True, + ), + # add the deprecated localstack-specific _user_request_ routes + self.router.add( + path="/restapis///_user_request_", + endpoint=deprecated_route_endpoint, + defaults={"path": "", "random": "?"}, + ), + self.router.add( + path="/restapis///_user_request_/", + endpoint=deprecated_route_endpoint, + strict_slashes=True, + ), + # add the localstack-specific so-called "path-style" routes when DNS resolving is not possible + self.router.add( + path=f"{self.EXECUTE_API_INTERNAL_PATH}//", + endpoint=self.handler, + defaults={"path": "", "stage": None}, + strict_slashes=True, + ), + self.router.add( + path=f"{self.EXECUTE_API_INTERNAL_PATH}///", + endpoint=self.handler, + defaults={"path": ""}, + strict_slashes=False, + ), + self.router.add( + path=f"{self.EXECUTE_API_INTERNAL_PATH}///", + endpoint=self.handler, + strict_slashes=True, + ), + ] + for rule in rules: + self.registered_rules.append(rule) + + def unregister_routes(self): + self.router.remove(self.registered_rules) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py new file mode 100644 index 0000000000000..01beb0114f598 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/template_mapping.py @@ -0,0 +1,296 @@ +# > In API Gateway, an API's method request or response can take a payload in a different format from the integration +# request or response. +# +# You can transform your data to: +# - Match the payload to an API-specified format. +# - Override an API's request and response parameters and status codes. +# - Return client selected response headers. +# - Associate path parameters, query string parameters, or header parameters in the method request of HTTP proxy +# or AWS service proxy. TODO: this is from the documentation. Can we use requestOverides for proxy integrations? +# - Select which data to send using integration with AWS services, such as Amazon DynamoDB or Lambda functions, +# or HTTP endpoints. +# +# You can use mapping templates to transform your data. A mapping template is a script expressed in Velocity Template +# Language (VTL) and applied to the payload using JSONPath . +# +# https://docs.aws.amazon.com/apigateway/latest/developerguide/models-mappings.html +import base64 +import copy +import json +import logging +from typing import Any, TypedDict +from urllib.parse import quote_plus, unquote_plus + +import airspeed +from airspeed.operators import dict_to_string + +from localstack import config +from localstack.services.apigateway.next_gen.execute_api.variables import ( + ContextVariableOverrides, + ContextVariables, + ContextVarsResponseOverride, +) +from localstack.utils.aws.templating import APIGW_SOURCE, VelocityUtil, VtlTemplate +from localstack.utils.json import extract_jsonpath, json_safe + +LOG = logging.getLogger(__name__) + + +class MappingTemplateParams(TypedDict, total=False): + path: dict[str, str] + querystring: dict[str, str] + header: dict[str, str] + + +class MappingTemplateInput(TypedDict, total=False): + body: str + params: MappingTemplateParams + + +class MappingTemplateVariables(TypedDict, total=False): + context: ContextVariables + input: MappingTemplateInput + stageVariables: dict[str, str] + + +def cast_to_vtl_object(value): + if isinstance(value, dict): + return VTLMap(value) + if isinstance(value, list): + return [cast_to_vtl_object(item) for item in value] + return value + + +def cast_to_vtl_json_object(value: Any) -> Any: + if isinstance(value, dict): + return VTLJsonDict(value) + if isinstance(value, list): + return VTLJsonList(value) + return value + + +class VTLMap(dict): + """Overrides __str__ of python dict (and all child dict) to return a Java like string representation""" + + # TODO apply this class more generally through the template mappings + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.update(*args, **kwargs) + + @staticmethod + def cast_factory(value: Any) -> Any: + return cast_to_vtl_object(value) + + def update(self, *args, **kwargs): + for k, v in self.items(): + self[k] = self.cast_factory(v) + + def __str__(self) -> str: + return dict_to_string(self) + + +class VTLJsonList(list): + """Some VTL List behave differently when being represented as string and everything + inside will be represented as a json string + + Example: $input.path('$').b // Where path is {"a": 1, "b": [{"c": 5}]} + Results: '[{"c":5}]' // Where everything inside the list is a valid json object + """ + + def __init__(self, *args): + super(VTLJsonList, self).__init__(*args) + for idx, item in enumerate(self): + self[idx] = cast_to_vtl_json_object(item) + + def __str__(self): + if isinstance(self, list): + return json.dumps(self, separators=(",", ":")) + + +class VTLJsonDict(VTLMap): + """Some VTL Map behave differently when being represented as string and a list + encountered in the dictionary will be represented as a json string + + Example: $input.path('$') // Where path is {"a": 1, "b": [{"c": 5}]} + Results: '{a=1, b=[{"c":5}]}' // Where everything inside the list is a valid json object + """ + + @staticmethod + def cast_factory(value: Any) -> Any: + return cast_to_vtl_json_object(value) + + +class AttributeDict(dict): + """ + Wrapper returned by VelocityUtilApiGateway.parseJson to allow access to dict values as attributes (dot notation), + e.g.: $util.parseJson('$.foo').bar + """ + + def __init__(self, *args, **kwargs): + super(AttributeDict, self).__init__(*args, **kwargs) + for key, value in self.items(): + if isinstance(value, dict): + self[key] = AttributeDict(value) + + def __getattr__(self, name): + if name in self: + return self[name] + raise AttributeError(f"'AttributeDict' object has no attribute '{name}'") + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + if name in self: + del self[name] + else: + raise AttributeError(f"'AttributeDict' object has no attribute '{name}'") + + +class VelocityUtilApiGateway(VelocityUtil): + """ + Simple class to mimic the behavior of variable '$util' in AWS API Gateway integration + velocity templates. + See: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + + def base64Encode(self, s): + if not isinstance(s, str): + s = json.dumps(s) + encoded_str = s.encode(config.DEFAULT_ENCODING) + encoded_b64_str = base64.b64encode(encoded_str) + return encoded_b64_str.decode(config.DEFAULT_ENCODING) + + def base64Decode(self, s): + if not isinstance(s, str): + s = json.dumps(s) + return base64.b64decode(s) + + def toJson(self, obj): + return obj and json.dumps(obj) + + def urlEncode(self, s): + return quote_plus(s) + + def urlDecode(self, s): + return unquote_plus(s) + + def escapeJavaScript(self, obj: Any) -> str: + """ + Converts the given object to a string and escapes any regular single quotes (') into escaped ones (\'). + JSON dumps will escape the single quotes. + https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + if obj is None: + return "null" + if isinstance(obj, str): + # empty string escapes to empty object + if len(obj.strip()) == 0: + return "{}" + return json.dumps(obj)[1:-1] + if obj in (True, False): + return str(obj).lower() + return str(obj) + + def parseJson(self, s: str): + obj = json.loads(s) + return AttributeDict(obj) if isinstance(obj, dict) else obj + + +class VelocityInput: + """ + Simple class to mimic the behavior of variable '$input' in AWS API Gateway integration + velocity templates. + See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + """ + + def __init__(self, body, params): + self.parameters = params or {} + self.value = body + + def _extract_json_path(self, path): + if not self.value: + return {} + value = self.value if isinstance(self.value, dict) else json.loads(self.value) + return extract_jsonpath(value, path) + + def path(self, path): + return cast_to_vtl_json_object(self._extract_json_path(path)) + + def json(self, path): + path = path or "$" + matching = self._extract_json_path(path) + if isinstance(matching, (list, dict)): + matching = json_safe(matching) + return json.dumps(matching) + + @property + def body(self): + if not self.value: + return "{}" + + return self.value + + def params(self, name=None): + if not name: + return self.parameters + for k in ["path", "querystring", "header"]: + if val := self.parameters.get(k).get(name): + return val + return "" + + def __getattr__(self, name): + return self.value.get(name) + + def __repr__(self): + return "$input" + + +class ApiGatewayVtlTemplate(VtlTemplate): + """Util class for rendering VTL templates with API Gateway specific extensions""" + + def prepare_namespace(self, variables, source: str = APIGW_SOURCE) -> dict[str, Any]: + namespace = super().prepare_namespace(variables, source) + input_var = variables.get("input") or {} + variables = { + "input": VelocityInput(input_var.get("body"), input_var.get("params")), + "util": VelocityUtilApiGateway(), + } + namespace.update(variables) + return namespace + + def render_request( + self, + template: str, + variables: MappingTemplateVariables, + context_overrides: ContextVariableOverrides, + ) -> tuple[str, ContextVariableOverrides]: + variables_copy: MappingTemplateVariables = copy.deepcopy(variables) + variables_copy["context"].update(copy.deepcopy(context_overrides)) + result = self.render_vtl(template=template.strip(), variables=variables_copy) + return result, ContextVariableOverrides( + requestOverride=variables_copy["context"]["requestOverride"], + responseOverride=variables_copy["context"]["responseOverride"], + ) + + def render_response( + self, + template: str, + variables: MappingTemplateVariables, + context_overrides: ContextVariableOverrides, + ) -> tuple[str, ContextVarsResponseOverride]: + variables_copy: MappingTemplateVariables = copy.deepcopy(variables) + variables_copy["context"].update(copy.deepcopy(context_overrides)) + result = self.render_vtl(template=template.strip(), variables=variables_copy) + return result, variables_copy["context"]["responseOverride"] + + +# patches required to allow our custom class operations in VTL templates processed by airspeed +airspeed.operators.__additional_methods__[VTLMap] = airspeed.operators.__additional_methods__[dict] +airspeed.operators.__additional_methods__[VTLJsonDict] = airspeed.operators.__additional_methods__[ + dict +] +airspeed.operators.__additional_methods__[VTLJsonList] = airspeed.operators.__additional_methods__[ + list +] diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py new file mode 100644 index 0000000000000..0d871077aa707 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/test_invoke.py @@ -0,0 +1,214 @@ +import datetime +from urllib.parse import parse_qs + +from rolo import Request +from rolo.gateway.chain import HandlerChain +from werkzeug.datastructures import Headers + +from localstack.aws.api.apigateway import TestInvokeMethodRequest, TestInvokeMethodResponse +from localstack.constants import APPLICATION_JSON +from localstack.http import Response +from localstack.utils.strings import to_bytes, to_str + +from ...models import RestApiDeployment +from . import handlers +from .context import InvocationRequest, RestApiInvocationContext +from .handlers.resource_router import RestAPIResourceRouter +from .header_utils import build_multi_value_headers +from .template_mapping import dict_to_string +from .variables import ( + ContextVariableOverrides, + ContextVarsRequestOverride, + ContextVarsResponseOverride, +) + +# TODO: we probably need to write and populate those logs as part of the handler chain itself +# and store it in the InvocationContext. That way, we could also retrieve in when calling TestInvoke + +TEST_INVOKE_TEMPLATE = """Execution log for request {request_id} +{formatted_date} : Starting execution for request: {request_id} +{formatted_date} : HTTP Method: {request_method}, Resource Path: {resource_path} +{formatted_date} : Method request path: {method_request_path_parameters} +{formatted_date} : Method request query string: {method_request_query_string} +{formatted_date} : Method request headers: {method_request_headers} +{formatted_date} : Method request body before transformations: {method_request_body} +{formatted_date} : Endpoint request URI: {endpoint_uri} +{formatted_date} : Endpoint request headers: {endpoint_request_headers} +{formatted_date} : Endpoint request body after transformations: {endpoint_request_body} +{formatted_date} : Sending request to {endpoint_uri} +{formatted_date} : Received response. Status: {endpoint_response_status_code}, Integration latency: {endpoint_response_latency} ms +{formatted_date} : Endpoint response headers: {endpoint_response_headers} +{formatted_date} : Endpoint response body before transformations: {endpoint_response_body} +{formatted_date} : Method response body after transformations: {method_response_body} +{formatted_date} : Method response headers: {method_response_headers} +{formatted_date} : Successfully completed execution +{formatted_date} : Method completed with status: {method_response_status} +""" + + +def _dump_headers(headers: Headers) -> str: + if not headers: + return "{}" + multi_headers = {key: ",".join(headers.getlist(key)) for key in headers.keys()} + string_headers = dict_to_string(multi_headers) + if len(string_headers) > 998: + return f"{string_headers[:998]} [TRUNCATED]" + + return string_headers + + +def log_template(invocation_context: RestApiInvocationContext, response_headers: Headers) -> str: + # TODO: funny enough, in AWS for the `endpoint_response_headers` in AWS_PROXY, they log the response headers from + # lambda HTTP Invoke call even though we use the headers from the lambda response itself + formatted_date = datetime.datetime.now(tz=datetime.UTC).strftime("%a %b %d %H:%M:%S %Z %Y") + request = invocation_context.invocation_request + context_var = invocation_context.context_variables + integration_req = invocation_context.integration_request + endpoint_resp = invocation_context.endpoint_response + method_resp = invocation_context.invocation_response + # TODO: if endpoint_uri is an ARN, it means it's an AWS_PROXY integration + # this should be transformed to the true URL of a lambda invoke call + endpoint_uri = integration_req.get("uri", "") + + return TEST_INVOKE_TEMPLATE.format( + formatted_date=formatted_date, + request_id=context_var["requestId"], + resource_path=request["path"], + request_method=request["http_method"], + method_request_path_parameters=dict_to_string(request["path_parameters"]), + method_request_query_string=dict_to_string(request["query_string_parameters"]), + method_request_headers=_dump_headers(request.get("headers")), + method_request_body=to_str(request.get("body", "")), + endpoint_uri=endpoint_uri, + endpoint_request_headers=_dump_headers(integration_req.get("headers")), + endpoint_request_body=to_str(integration_req.get("body", "")), + # TODO: measure integration latency + endpoint_response_latency=150, + endpoint_response_status_code=endpoint_resp.get("status_code"), + endpoint_response_body=to_str(endpoint_resp.get("body", "")), + endpoint_response_headers=_dump_headers(endpoint_resp.get("headers")), + method_response_status=method_resp.get("status_code"), + method_response_body=to_str(method_resp.get("body", "")), + method_response_headers=_dump_headers(response_headers), + ) + + +def create_test_chain() -> HandlerChain[RestApiInvocationContext]: + return HandlerChain( + request_handlers=[ + handlers.method_request_handler, + handlers.integration_request_handler, + handlers.integration_handler, + handlers.integration_response_handler, + handlers.method_response_handler, + ], + exception_handlers=[ + handlers.gateway_exception_handler, + ], + ) + + +def create_test_invocation_context( + test_request: TestInvokeMethodRequest, + deployment: RestApiDeployment, +) -> RestApiInvocationContext: + parse_handler = handlers.parse_request + http_method = test_request["httpMethod"] + + # we do not need a true HTTP request for the context, as we are skipping all the parsing steps and using the + # provider data + invocation_context = RestApiInvocationContext( + request=Request(method=http_method), + ) + path_query = test_request.get("pathWithQueryString", "/").split("?") + path = path_query[0] + multi_query_args: dict[str, list[str]] = {} + + if len(path_query) > 1: + multi_query_args = parse_qs(path_query[1]) + + # for the single value parameters, AWS only keeps the last value of the list + single_query_args = {k: v[-1] for k, v in multi_query_args.items()} + + invocation_request = InvocationRequest( + http_method=http_method, + path=path, + raw_path=path, + query_string_parameters=single_query_args, + multi_value_query_string_parameters=multi_query_args, + headers=Headers(test_request.get("headers")), + # TODO: handle multiValueHeaders + body=to_bytes(test_request.get("body") or ""), + ) + invocation_context.invocation_request = invocation_request + + _, path_parameters = RestAPIResourceRouter(deployment).match(invocation_context) + invocation_request["path_parameters"] = path_parameters + + invocation_context.deployment = deployment + invocation_context.api_id = test_request["restApiId"] + invocation_context.stage = None + invocation_context.deployment_id = "" + invocation_context.account_id = deployment.account_id + invocation_context.region = deployment.region + invocation_context.stage_variables = test_request.get("stageVariables", {}) + invocation_context.context_variables = parse_handler.create_context_variables( + invocation_context + ) + invocation_context.context_variable_overrides = ContextVariableOverrides( + requestOverride=ContextVarsRequestOverride(header={}, path={}, querystring={}), + responseOverride=ContextVarsResponseOverride(header={}, status=0), + ) + invocation_context.trace_id = parse_handler.populate_trace_id({}) + resource = deployment.rest_api.resources[test_request["resourceId"]] + resource_method = resource["resourceMethods"][http_method] + invocation_context.resource = resource + invocation_context.resource_method = resource_method + invocation_context.integration = resource_method["methodIntegration"] + handlers.route_request.update_context_variables_with_resource( + invocation_context.context_variables, resource + ) + + return invocation_context + + +def run_test_invocation( + test_request: TestInvokeMethodRequest, deployment: RestApiDeployment +) -> TestInvokeMethodResponse: + # validate resource exists in deployment + invocation_context = create_test_invocation_context(test_request, deployment) + + test_chain = create_test_chain() + # header order is important + if invocation_context.integration["type"] == "MOCK": + base_headers = {"Content-Type": APPLICATION_JSON} + else: + # we manually add the trace-id, as it is normally added by handlers.response_enricher which adds to much data + # for the TestInvoke. It needs to be first + base_headers = { + "X-Amzn-Trace-Id": invocation_context.trace_id, + "Content-Type": APPLICATION_JSON, + } + + test_response = Response(headers=base_headers) + start_time = datetime.datetime.now() + test_chain.handle(context=invocation_context, response=test_response) + end_time = datetime.datetime.now() + + response_headers = test_response.headers.copy() + # AWS does not return the Content-Length for TestInvokeMethod + response_headers.remove("Content-Length") + + log = log_template(invocation_context, response_headers) + + headers = dict(response_headers) + multi_value_headers = build_multi_value_headers(response_headers) + + return TestInvokeMethodResponse( + log=log, + status=test_response.status_code, + body=test_response.get_data(as_text=True), + headers=headers, + multiValueHeaders=multi_value_headers, + latency=int((end_time - start_time).total_seconds()), + ) diff --git a/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py b/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py new file mode 100644 index 0000000000000..e457c61180353 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/execute_api/variables.py @@ -0,0 +1,195 @@ +from typing import Optional, TypedDict + + +class ContextVarsAuthorizer(TypedDict, total=False): + # this is merged with the Context returned by the Authorizer, which can attach any property to this dict in string + # format + + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + claims: Optional[dict[str, str]] + """Claims returned from the Amazon Cognito user pool after the method caller is successfully authenticated""" + principalId: Optional[str] + """The principal user identification associated with the token sent by the client and returned from an API Gateway Lambda authorizer""" + + +class ContextVarsIdentityClientCertValidity(TypedDict, total=False): + notBefore: str + notAfter: str + + +class ContextVarsIdentityClientCert(TypedDict, total=False): + """Certificate that a client presents. Present only in access logs if mutual TLS authentication fails.""" + + clientCertPem: str + subjectDN: str + issuerDN: str + serialNumber: str + validity: ContextVarsIdentityClientCertValidity + + +class ContextVarsIdentity(TypedDict, total=False): + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html + accountId: Optional[str] + """The AWS account ID associated with the request.""" + accessKey: Optional[str] + """The AWS access key associated with the request.""" + apiKey: Optional[str] + """For API methods that require an API key, this variable is the API key associated with the method request.""" + apiKeyId: Optional[str] + """The API key ID associated with an API request that requires an API key.""" + caller: Optional[str] + """The principal identifier of the caller that signed the request. Supported for resources that use IAM authorization.""" + cognitoAuthenticationProvider: Optional[str] + """A comma-separated list of the Amazon Cognito authentication providers used by the caller making the request""" + cognitoAuthenticationType: Optional[str] + """The Amazon Cognito authentication type of the caller making the request""" + cognitoIdentityId: Optional[str] + """The Amazon Cognito identity ID of the caller making the request""" + cognitoIdentityPoolId: Optional[str] + """The Amazon Cognito identity pool ID of the caller making the request""" + principalOrgId: Optional[str] + """The AWS organization ID.""" + sourceIp: Optional[str] + """The source IP address of the immediate TCP connection making the request to the API Gateway endpoint""" + clientCert: ContextVarsIdentityClientCert + vpcId: Optional[str] + """The VPC ID of the VPC making the request to the API Gateway endpoint.""" + vpceId: Optional[str] + """The VPC endpoint ID of the VPC endpoint making the request to the API Gateway endpoint.""" + user: Optional[str] + """The principal identifier of the user that will be authorized against resource access for resources that use IAM authorization.""" + userAgent: Optional[str] + """The User-Agent header of the API caller.""" + userArn: Optional[str] + """The Amazon Resource Name (ARN) of the effective user identified after authentication.""" + + +class ContextVarsRequestOverride(TypedDict, total=False): + header: dict[str, str] + path: dict[str, str] + querystring: dict[str, str] + + +class ContextVarsResponseOverride(TypedDict): + header: dict[str, str] + status: int + + +class ContextVariableOverrides(TypedDict): + requestOverride: ContextVarsRequestOverride + responseOverride: ContextVarsResponseOverride + + +class GatewayResponseContextVarsError(TypedDict, total=False): + # This variable can only be used for simple variable substitution in a GatewayResponse body-mapping template, + # which is not processed by the Velocity Template Language engine, and in access logging. + message: str + messageString: str + responseType: str + validationErrorString: str + + +class ContextVariables(TypedDict, total=False): + # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference + accountId: str + """The API owner's AWS account ID.""" + apiId: str + """The identifier API Gateway assigns to your API.""" + authorizer: Optional[ContextVarsAuthorizer] + """The principal user identification associated with the token.""" + awsEndpointRequestId: Optional[str] + """The AWS endpoint's request ID.""" + deploymentId: str + """The ID of the API deployment.""" + domainName: str + """The full domain name used to invoke the API. This should be the same as the incoming Host header.""" + domainPrefix: str + """The first label of the $context.domainName.""" + error: GatewayResponseContextVarsError + """The error context variables.""" + extendedRequestId: str + """The extended ID that API Gateway generates and assigns to the API request. """ + httpMethod: str + """The HTTP method used""" + identity: Optional[ContextVarsIdentity] + isCanaryRequest: Optional[bool] + """Indicates if the request was directed to the canary""" + path: str + """The request path.""" + protocol: str + """The request protocol""" + requestId: str + """An ID for the request. Clients can override this request ID. """ + requestOverride: Optional[ContextVarsRequestOverride] + """Request override. Only exists for request mapping template""" + requestTime: str + """The CLF-formatted request time (dd/MMM/yyyy:HH:mm:ss +-hhmm).""" + requestTimeEpoch: int + """The Epoch-formatted request time, in milliseconds.""" + resourceId: Optional[str] + """The identifier that API Gateway assigns to your resource.""" + resourcePath: Optional[str] + """The path to your resource""" + responseOverride: Optional[ContextVarsResponseOverride] + """Response override. Only exists for response mapping template""" + stage: str + """The deployment stage of the API request """ + wafResponseCode: Optional[str] + """The response received from AWS WAF: WAF_ALLOW or WAF_BLOCK. Will not be set if the stage is not associated with a web ACL""" + webaclArn: Optional[str] + """The complete ARN of the web ACL that is used to decide whether to allow or block the request. Will not be set if the stage is not associated with a web ACL.""" + + +class LoggingContextVarsAuthorize(TypedDict, total=False): + error: Optional[str] + latency: Optional[str] + status: Optional[str] + + +class LoggingContextVarsAuthorizer(TypedDict, total=False): + error: Optional[str] + integrationLatency: Optional[str] + integrationStatus: Optional[str] + latency: Optional[str] + requestId: Optional[str] + status: Optional[str] + + +class LoggingContextVarsAuthenticate(TypedDict, total=False): + error: Optional[str] + latency: Optional[str] + status: Optional[str] + + +class LoggingContextVarsCustomDomain(TypedDict, total=False): + basePathMatched: Optional[str] + + +class LoggingContextVarsIntegration(TypedDict, total=False): + error: Optional[str] + integrationStatus: Optional[str] + latency: Optional[str] + requestId: Optional[str] + status: Optional[str] + + +class LoggingContextVarsWaf(TypedDict, total=False): + error: Optional[str] + latency: Optional[str] + status: Optional[str] + + +class LoggingContextVariables(TypedDict, total=False): + authorize: Optional[LoggingContextVarsAuthorize] + authorizer: Optional[LoggingContextVarsAuthorizer] + authenticate: Optional[LoggingContextVarsAuthenticate] + customDomain: Optional[LoggingContextVarsCustomDomain] + endpointType: Optional[str] + integration: Optional[LoggingContextVarsIntegration] + integrationLatency: Optional[str] + integrationStatus: Optional[str] + responseLatency: Optional[str] + responseLength: Optional[str] + status: Optional[str] + waf: Optional[LoggingContextVarsWaf] + xrayTraceId: Optional[str] diff --git a/localstack-core/localstack/services/apigateway/next_gen/provider.py b/localstack-core/localstack/services/apigateway/next_gen/provider.py new file mode 100644 index 0000000000000..5153463c60a4c --- /dev/null +++ b/localstack-core/localstack/services/apigateway/next_gen/provider.py @@ -0,0 +1,490 @@ +import copy +import datetime +import re + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.apigateway import ( + BadRequestException, + CacheClusterSize, + CreateStageRequest, + Deployment, + DeploymentCanarySettings, + GatewayResponse, + GatewayResponses, + GatewayResponseType, + ListOfPatchOperation, + MapOfStringToString, + NotFoundException, + NullableBoolean, + NullableInteger, + Stage, + StatusCode, + String, + TestInvokeMethodRequest, + TestInvokeMethodResponse, +) +from localstack.services.apigateway.helpers import ( + get_apigateway_store, + get_moto_rest_api, + get_rest_api_container, +) +from localstack.services.apigateway.legacy.provider import ( + STAGE_UPDATE_PATHS, + ApigatewayProvider, + patch_api_gateway_entity, +) +from localstack.services.apigateway.patches import apply_patches +from localstack.services.edge import ROUTER +from localstack.services.moto import call_moto + +from ..models import apigateway_stores +from .execute_api.gateway_response import ( + DEFAULT_GATEWAY_RESPONSES, + GatewayResponseCode, + build_gateway_response, + get_gateway_response_or_default, +) +from .execute_api.helpers import freeze_rest_api +from .execute_api.router import ApiGatewayEndpoint, ApiGatewayRouter +from .execute_api.test_invoke import run_test_invocation + + +class ApigatewayNextGenProvider(ApigatewayProvider): + router: ApiGatewayRouter + + def __init__(self, router: ApiGatewayRouter = None): + # we initialize the route handler with a global store with default account and region, because it only ever + # access values with CrossAccount attributes + if not router: + route_handler = ApiGatewayEndpoint(store=apigateway_stores) + router = ApiGatewayRouter(ROUTER, handler=route_handler) + + super().__init__(router=router) + + def on_after_init(self): + apply_patches() + self.router.register_routes() + + @handler("DeleteRestApi") + def delete_rest_api(self, context: RequestContext, rest_api_id: String, **kwargs) -> None: + super().delete_rest_api(context, rest_api_id, **kwargs) + store = get_apigateway_store(context=context) + api_id_lower = rest_api_id.lower() + store.active_deployments.pop(api_id_lower, None) + store.internal_deployments.pop(api_id_lower, None) + + @handler("CreateStage", expand=False) + def create_stage(self, context: RequestContext, request: CreateStageRequest) -> Stage: + # TODO: we need to internalize Stages and Deployments in LocalStack, we have a lot of split logic + super().create_stage(context, request) + rest_api_id = request["restApiId"].lower() + stage_name = request["stageName"] + moto_api = get_moto_rest_api(context, rest_api_id) + stage = moto_api.stages[stage_name] + + if canary_settings := request.get("canarySettings"): + if ( + deployment_id := canary_settings.get("deploymentId") + ) and deployment_id not in moto_api.deployments: + raise BadRequestException("Deployment id does not exist") + + default_settings = { + "deploymentId": stage.deployment_id, + "percentTraffic": 0.0, + "useStageCache": False, + } + default_settings.update(canary_settings) + stage.canary_settings = default_settings + else: + stage.canary_settings = None + + store = get_apigateway_store(context=context) + + store.active_deployments.setdefault(rest_api_id, {}) + store.active_deployments[rest_api_id][stage_name] = request["deploymentId"] + response: Stage = stage.to_json() + self._patch_stage_response(response) + return response + + @handler("UpdateStage") + def update_stage( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String, + patch_operations: ListOfPatchOperation = None, + **kwargs, + ) -> Stage: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + if not (moto_stage := moto_rest_api.stages.get(stage_name)): + raise NotFoundException("Invalid Stage identifier specified") + + # construct list of path regexes for validation + path_regexes = [re.sub("{[^}]+}", ".+", path) for path in STAGE_UPDATE_PATHS] + + # copy the patch operations to not mutate them, so that we're logging the correct input + patch_operations = copy.deepcopy(patch_operations) or [] + # we are only passing a subset of operations to Moto as it does not handle properly all of them + moto_patch_operations = [] + moto_stage_copy = copy.deepcopy(moto_stage) + for patch_operation in patch_operations: + skip_moto_apply = False + patch_path = patch_operation["path"] + patch_op = patch_operation["op"] + + # special case: handle updates (op=remove) for wildcard method settings + patch_path_stripped = patch_path.strip("/") + if patch_path_stripped == "*/*" and patch_op == "remove": + if not moto_stage.method_settings.pop(patch_path_stripped, None): + raise BadRequestException( + "Cannot remove method setting */* because there is no method setting for this method " + ) + response = moto_stage.to_json() + self._patch_stage_response(response) + return response + + path_valid = patch_path in STAGE_UPDATE_PATHS or any( + re.match(regex, patch_path) for regex in path_regexes + ) + if is_canary := patch_path.startswith("/canarySettings"): + skip_moto_apply = True + path_valid = is_canary_settings_update_patch_valid(op=patch_op, path=patch_path) + # it seems our JSON Patch utility does not handle replace properly if the value does not exists before + # it seems to maybe be a Stage-only thing, so replacing it here + if patch_op == "replace": + patch_operation["op"] = "add" + + if patch_op == "copy": + copy_from = patch_operation.get("from") + if patch_path not in ("/deploymentId", "/variables") or copy_from not in ( + "/canarySettings/deploymentId", + "/canarySettings/stageVariableOverrides", + ): + raise BadRequestException( + "Invalid copy operation with path: /canarySettings/stageVariableOverrides and from /variables. Valid copy:path are [/deploymentId, /variables] and valid copy:from are [/canarySettings/deploymentId, /canarySettings/stageVariableOverrides]" + ) + + if copy_from.startswith("/canarySettings") and not getattr( + moto_stage_copy, "canary_settings", None + ): + raise BadRequestException("Promotion not available. Canary does not exist.") + + if patch_path == "/variables": + moto_stage_copy.variables.update( + moto_stage_copy.canary_settings.get("stageVariableOverrides", {}) + ) + elif patch_path == "/deploymentId": + moto_stage_copy.deployment_id = moto_stage_copy.canary_settings["deploymentId"] + + # we manually assign `copy` ops, no need to apply them + continue + + if not path_valid: + valid_paths = f"[{', '.join(STAGE_UPDATE_PATHS)}]" + # note: weird formatting in AWS - required for snapshot testing + valid_paths = valid_paths.replace( + "/{resourcePath}/{httpMethod}/throttling/burstLimit, /{resourcePath}/{httpMethod}/throttling/rateLimit, /{resourcePath}/{httpMethod}/caching/ttlInSeconds", + "/{resourcePath}/{httpMethod}/throttling/burstLimit/{resourcePath}/{httpMethod}/throttling/rateLimit/{resourcePath}/{httpMethod}/caching/ttlInSeconds", + ) + valid_paths = valid_paths.replace("/burstLimit, /", "/burstLimit /") + valid_paths = valid_paths.replace("/rateLimit, /", "/rateLimit /") + raise BadRequestException( + f"Invalid method setting path: {patch_operation['path']}. Must be one of: {valid_paths}" + ) + + # TODO: check if there are other boolean, maybe add a global step in _patch_api_gateway_entity + if patch_path == "/tracingEnabled" and (value := patch_operation.get("value")): + patch_operation["value"] = value and value.lower() == "true" or False + + elif patch_path in ("/canarySettings/deploymentId", "/deploymentId"): + if patch_op != "copy" and not moto_rest_api.deployments.get( + patch_operation.get("value") + ): + raise BadRequestException("Deployment id does not exist") + + if not skip_moto_apply: + # we need to copy the patch operation because `_patch_api_gateway_entity` is mutating it in place + moto_patch_operations.append(dict(patch_operation)) + + # we need to apply patch operation individually to be able to validate the logic + # TODO: rework the patching logic + patch_api_gateway_entity(moto_stage_copy, [patch_operation]) + if is_canary and (canary_settings := getattr(moto_stage_copy, "canary_settings", None)): + default_canary_settings = { + "deploymentId": moto_stage_copy.deployment_id, + "percentTraffic": 0.0, + "useStageCache": False, + } + default_canary_settings.update(canary_settings) + default_canary_settings["percentTraffic"] = float( + default_canary_settings["percentTraffic"] + ) + moto_stage_copy.canary_settings = default_canary_settings + + moto_rest_api.stages[stage_name] = moto_stage_copy + moto_stage_copy.apply_operations(moto_patch_operations) + if moto_stage.deployment_id != moto_stage_copy.deployment_id: + store = get_apigateway_store(context=context) + store.active_deployments.setdefault(rest_api_id.lower(), {})[stage_name] = ( + moto_stage_copy.deployment_id + ) + + moto_stage_copy.last_updated_date = datetime.datetime.now(tz=datetime.UTC) + + response = moto_stage_copy.to_json() + self._patch_stage_response(response) + return response + + def delete_stage( + self, context: RequestContext, rest_api_id: String, stage_name: String, **kwargs + ) -> None: + call_moto(context) + store = get_apigateway_store(context=context) + store.active_deployments[rest_api_id.lower()].pop(stage_name, None) + + def create_deployment( + self, + context: RequestContext, + rest_api_id: String, + stage_name: String = None, + stage_description: String = None, + description: String = None, + cache_cluster_enabled: NullableBoolean = None, + cache_cluster_size: CacheClusterSize = None, + variables: MapOfStringToString = None, + canary_settings: DeploymentCanarySettings = None, + tracing_enabled: NullableBoolean = None, + **kwargs, + ) -> Deployment: + moto_rest_api = get_moto_rest_api(context, rest_api_id) + if canary_settings: + # TODO: add validation to the canary settings + if not stage_name: + error_stage = stage_name if stage_name is not None else "null" + raise BadRequestException( + f"Invalid deployment content specified.Non null and non empty stageName must be provided for canary deployment. Provided value is {error_stage}" + ) + if stage_name not in moto_rest_api.stages: + raise BadRequestException( + "Invalid deployment content specified.Stage non-existing must already be created before making a canary release deployment" + ) + + # FIXME: moto has an issue and is not handling canarySettings, hence overwriting the current stage with the + # canary deployment + current_stage = None + if stage_name: + current_stage = copy.deepcopy(moto_rest_api.stages.get(stage_name)) + + # TODO: if the REST API does not contain any method, we should raise an exception + deployment: Deployment = call_moto(context) + # https://docs.aws.amazon.com/apigateway/latest/developerguide/updating-api.html + # TODO: the deployment is not accessible until it is linked to a stage + # you can combine a stage or later update the deployment with a stage id + store = get_apigateway_store(context=context) + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + frozen_deployment = freeze_rest_api( + account_id=context.account_id, + region=context.region, + moto_rest_api=moto_rest_api, + localstack_rest_api=rest_api_container, + ) + router_api_id = rest_api_id.lower() + deployment_id = deployment["id"] + store.internal_deployments.setdefault(router_api_id, {})[deployment_id] = frozen_deployment + + if stage_name: + moto_stage = moto_rest_api.stages[stage_name] + if canary_settings: + moto_stage = current_stage + moto_rest_api.stages[stage_name] = current_stage + + default_settings = { + "deploymentId": deployment_id, + "percentTraffic": 0.0, + "useStageCache": False, + } + default_settings.update(canary_settings) + moto_stage.canary_settings = default_settings + else: + store.active_deployments.setdefault(router_api_id, {})[stage_name] = deployment_id + moto_stage.canary_settings = None + + if variables: + moto_stage.variables = variables + + moto_stage.description = stage_description or moto_stage.description or None + + if cache_cluster_enabled is not None: + moto_stage.cache_cluster_enabled = cache_cluster_enabled + + if cache_cluster_size is not None: + moto_stage.cache_cluster_size = cache_cluster_size + + if tracing_enabled is not None: + moto_stage.tracing_enabled = tracing_enabled + + return deployment + + def delete_deployment( + self, context: RequestContext, rest_api_id: String, deployment_id: String, **kwargs + ) -> None: + call_moto(context) + store = get_apigateway_store(context=context) + store.internal_deployments.get(rest_api_id.lower(), {}).pop(deployment_id, None) + + def put_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + status_code: StatusCode = None, + response_parameters: MapOfStringToString = None, + response_templates: MapOfStringToString = None, + **kwargs, + ) -> GatewayResponse: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + gateway_response = build_gateway_response( + status_code=status_code, + response_parameters=response_parameters, + response_templates=response_templates, + response_type=response_type, + default_response=False, + ) + + rest_api_container.gateway_responses[response_type] = gateway_response + + # The CRUD provider has a weird behavior: for some responses (for now, INTEGRATION_FAILURE), it sets the default + # status code to `504`. However, in the actual invocation logic, it returns 500. To deal with the inconsistency, + # we need to set the value to None if not provided by the user, so that the invocation logic can properly return + # 500, and the CRUD layer can still return 504 even though it is technically wrong. + response = gateway_response.copy() + if response.get("statusCode") is None: + response["statusCode"] = GatewayResponseCode[response_type] + + return response + + def get_gateway_response( + self, + context: RequestContext, + rest_api_id: String, + response_type: GatewayResponseType, + **kwargs, + ) -> GatewayResponse: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + if response_type not in DEFAULT_GATEWAY_RESPONSES: + raise CommonServiceException( + code="ValidationException", + message=f"1 validation error detected: Value '{response_type}' at 'responseType' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(DEFAULT_GATEWAY_RESPONSES)}]", + ) + + gateway_response = _get_gateway_response_or_default( + response_type, rest_api_container.gateway_responses + ) + # TODO: add validation with the parameters? seems like it validated client side? how to try? + return gateway_response + + def get_gateway_responses( + self, + context: RequestContext, + rest_api_id: String, + position: String = None, + limit: NullableInteger = None, + **kwargs, + ) -> GatewayResponses: + store = get_apigateway_store(context=context) + if not (rest_api_container := store.rest_apis.get(rest_api_id)): + raise NotFoundException( + f"Invalid API identifier specified {context.account_id}:{rest_api_id}" + ) + + user_gateway_resp = rest_api_container.gateway_responses + gateway_responses = [ + _get_gateway_response_or_default(response_type, user_gateway_resp) + for response_type in DEFAULT_GATEWAY_RESPONSES + ] + return GatewayResponses(items=gateway_responses) + + def test_invoke_method( + self, context: RequestContext, request: TestInvokeMethodRequest + ) -> TestInvokeMethodResponse: + rest_api_id = request["restApiId"] + moto_rest_api = get_moto_rest_api(context=context, rest_api_id=rest_api_id) + resource = moto_rest_api.resources.get(request["resourceId"]) + if not resource: + raise NotFoundException("Invalid Resource identifier specified") + + # test httpMethod + + rest_api_container = get_rest_api_container(context, rest_api_id=rest_api_id) + frozen_deployment = freeze_rest_api( + account_id=context.account_id, + region=context.region, + moto_rest_api=moto_rest_api, + localstack_rest_api=rest_api_container, + ) + + response = run_test_invocation( + test_request=request, + deployment=frozen_deployment, + ) + + return response + + +def is_canary_settings_update_patch_valid(op: str, path: str) -> bool: + path_regexes = ( + r"\/canarySettings\/percentTraffic", + r"\/canarySettings\/deploymentId", + r"\/canarySettings\/stageVariableOverrides\/.+", + r"\/canarySettings\/useStageCache", + ) + if path == "/canarySettings" and op == "remove": + return True + + matches_path = any(re.match(regex, path) for regex in path_regexes) + + if op not in ("replace", "copy"): + if matches_path: + raise BadRequestException(f"Invalid {op} operation with path: {path}") + + raise BadRequestException( + f"Cannot {op} method setting {path.lstrip('/')} because there is no method setting for this method " + ) + + # stageVariableOverrides is a bit special as it's nested, it doesn't return the same error message + if not matches_path and path != "/canarySettings/stageVariableOverrides": + return False + + return True + + +def _get_gateway_response_or_default( + response_type: GatewayResponseType, + gateway_responses: dict[GatewayResponseType, GatewayResponse], +) -> GatewayResponse: + """ + Utility function that overrides the behavior of `get_gateway_response_or_default` by setting a default status code + from the `GatewayResponseCode` values. In reality, some default values in the invocation layer are different from + what the CRUD layer of API Gateway is returning. + """ + response = get_gateway_response_or_default(response_type, gateway_responses) + if response.get("statusCode") is None and (status_code := GatewayResponseCode[response_type]): + response["statusCode"] = status_code + + return response diff --git a/localstack-core/localstack/services/apigateway/patches.py b/localstack-core/localstack/services/apigateway/patches.py new file mode 100644 index 0000000000000..ca12f96284fff --- /dev/null +++ b/localstack-core/localstack/services/apigateway/patches.py @@ -0,0 +1,207 @@ +import datetime +import json +import logging + +from moto.apigateway import models as apigateway_models +from moto.apigateway.exceptions import ( + DeploymentNotFoundException, + NoIntegrationDefined, + RestAPINotFound, + StageStillActive, +) +from moto.apigateway.responses import APIGatewayResponse +from moto.core.utils import camelcase_to_underscores + +from localstack.constants import TAG_KEY_CUSTOM_ID +from localstack.services.apigateway.helpers import apply_json_patch_safe +from localstack.utils.common import str_to_bool +from localstack.utils.patch import patch + +LOG = logging.getLogger(__name__) + + +def apply_patches(): + # TODO refactor patches in this module (e.g., use @patch decorator, simplify, ...) + + def apigateway_models_Stage_init( + self, cacheClusterEnabled=False, cacheClusterSize=None, **kwargs + ): + apigateway_models_Stage_init_orig( + self, + cacheClusterEnabled=cacheClusterEnabled, + cacheClusterSize=cacheClusterSize, + **kwargs, + ) + + if (cacheClusterSize or cacheClusterEnabled) and not self.cache_cluster_status: + self.cache_cluster_status = "AVAILABLE" + + now = datetime.datetime.now(tz=datetime.UTC) + self.created_date = now + self.last_updated_date = now + + apigateway_models_Stage_init_orig = apigateway_models.Stage.__init__ + apigateway_models.Stage.__init__ = apigateway_models_Stage_init + + @patch(APIGatewayResponse.put_integration) + def apigateway_put_integration(fn, self, *args, **kwargs): + # TODO: verify if this patch is still necessary, this might have been fixed upstream + fn(self, *args, **kwargs) + + url_path_parts = self.path.split("/") + function_id = url_path_parts[2] + resource_id = url_path_parts[4] + method_type = url_path_parts[6] + integration = self.backend.get_integration(function_id, resource_id, method_type) + + timeout_milliseconds = self._get_param("timeoutInMillis") + cache_key_parameters = self._get_param("cacheKeyParameters") or [] + content_handling = self._get_param("contentHandling") + integration.cache_namespace = resource_id + integration.timeout_in_millis = timeout_milliseconds + integration.cache_key_parameters = cache_key_parameters + integration.content_handling = content_handling + return 201, {}, json.dumps(integration.to_json()) + + # define json-patch operations for backend models + + def backend_model_apply_operations(self, patch_operations): + # run pre-actions + if isinstance(self, apigateway_models.Stage) and [ + op for op in patch_operations if "/accessLogSettings" in op.get("path", "") + ]: + self.access_log_settings = self.access_log_settings or {} + # apply patches + apply_json_patch_safe(self, patch_operations, in_place=True) + # run post-actions + if isinstance(self, apigateway_models.Stage): + bool_params = ["cacheClusterEnabled", "tracingEnabled"] + for bool_param in bool_params: + if getattr(self, camelcase_to_underscores(bool_param), None): + value = getattr(self, camelcase_to_underscores(bool_param), None) + setattr(self, camelcase_to_underscores(bool_param), str_to_bool(value)) + return self + + model_classes = [ + apigateway_models.Authorizer, + apigateway_models.DomainName, + apigateway_models.MethodResponse, + ] + for model_class in model_classes: + model_class.apply_operations = model_class.apply_patch_operations = ( + backend_model_apply_operations + ) + + # fix data types for some json-patch operation values + + @patch(apigateway_models.Stage._get_default_method_settings) + def _get_default_method_settings(fn, self): + result = fn(self) + default_settings = self.method_settings.get("*/*", {}) + result["cacheDataEncrypted"] = default_settings.get("cacheDataEncrypted", False) + result["throttlingRateLimit"] = default_settings.get("throttlingRateLimit", 10000.0) + result["throttlingBurstLimit"] = default_settings.get("throttlingBurstLimit", 5000) + result["metricsEnabled"] = default_settings.get("metricsEnabled", False) + result["dataTraceEnabled"] = default_settings.get("dataTraceEnabled", False) + result["unauthorizedCacheControlHeaderStrategy"] = default_settings.get( + "unauthorizedCacheControlHeaderStrategy", "SUCCEED_WITH_RESPONSE_HEADER" + ) + result["cacheTtlInSeconds"] = default_settings.get("cacheTtlInSeconds", 300) + result["cachingEnabled"] = default_settings.get("cachingEnabled", False) + result["requireAuthorizationForCacheControl"] = default_settings.get( + "requireAuthorizationForCacheControl", True + ) + return result + + # patch integration error responses + @patch(apigateway_models.Resource.get_integration) + def apigateway_models_resource_get_integration(fn, self, method_type): + resource_method = self.resource_methods.get(method_type, {}) + if not resource_method.method_integration: + raise NoIntegrationDefined() + return resource_method.method_integration + + @patch(apigateway_models.RestAPI.to_dict) + def apigateway_models_rest_api_to_dict(fn, self): + resp = fn(self) + resp["policy"] = None + if self.policy: + # Strip whitespaces for TF compatibility (not entirely sure why we need double-dumps, + # but otherwise: "error normalizing policy JSON: invalid character 'V' after top-level value") + resp["policy"] = json.dumps(json.dumps(json.loads(self.policy), separators=(",", ":")))[ + 1:-1 + ] + + if not self.tags: + resp["tags"] = None + + resp["disableExecuteApiEndpoint"] = ( + str(resp.get("disableExecuteApiEndpoint")).lower() == "true" + ) + + return resp + + @patch(apigateway_models.Stage.to_json) + def apigateway_models_stage_to_json(fn, self): + result = fn(self) + + if "documentationVersion" not in result: + result["documentationVersion"] = getattr(self, "documentation_version", None) + + if "canarySettings" not in result: + result["canarySettings"] = getattr(self, "canary_settings", None) + + if "createdDate" not in result: + created_date = getattr(self, "created_date", None) + if created_date: + created_date = int(created_date.timestamp()) + result["createdDate"] = created_date + + if "lastUpdatedDate" not in result: + last_updated_date = getattr(self, "last_updated_date", None) + if last_updated_date: + last_updated_date = int(last_updated_date.timestamp()) + result["lastUpdatedDate"] = last_updated_date + + return result + + @patch(apigateway_models.Stage._str2bool, pass_target=False) + def apigateway_models_stage_str_to_bool(self, v: bool | str) -> bool: + return str_to_bool(v) + + # TODO remove this patch when the behavior is implemented in moto + @patch(apigateway_models.APIGatewayBackend.create_rest_api) + def create_rest_api(fn, self, *args, tags=None, **kwargs): + """ + https://github.com/localstack/localstack/pull/4413/files + Add ability to specify custom IDs for API GW REST APIs via tags + """ + tags = tags or {} + result = fn(self, *args, tags=tags, **kwargs) + # TODO: lower the custom_id when getting it from the tags, as AWS is case insensitive + if custom_id := tags.get(TAG_KEY_CUSTOM_ID): + self.apis.pop(result.id) + result.id = custom_id + self.apis[custom_id] = result + return result + + @patch(apigateway_models.APIGatewayBackend.get_rest_api, pass_target=False) + def get_rest_api(self, function_id): + for key in self.apis.keys(): + if key.lower() == function_id.lower(): + return self.apis[key] + raise RestAPINotFound() + + @patch(apigateway_models.RestAPI.delete_deployment, pass_target=False) + def patch_delete_deployment(self, deployment_id: str) -> apigateway_models.Deployment: + if deployment_id not in self.deployments: + raise DeploymentNotFoundException() + deployment = self.deployments[deployment_id] + if deployment.stage_name and ( + (stage := self.stages.get(deployment.stage_name)) + and stage.deployment_id == deployment.id + ): + # Stage is still active + raise StageStillActive() + + return self.deployments.pop(deployment_id) diff --git a/localstack/services/events/__init__.py b/localstack-core/localstack/services/apigateway/resource_providers/__init__.py similarity index 100% rename from localstack/services/events/__init__.py rename to localstack-core/localstack/services/apigateway/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py new file mode 100644 index 0000000000000..8c78925a5a8b8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.py @@ -0,0 +1,110 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayAccountProperties(TypedDict): + CloudWatchRoleArn: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayAccountProvider(ResourceProvider[ApiGatewayAccountProperties]): + TYPE = "AWS::ApiGateway::Account" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayAccountProperties], + ) -> ProgressEvent[ApiGatewayAccountProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + + + Read-only properties: + - /properties/Id + + IAM permissions required: + - apigateway:PATCH + - iam:GetRole + - iam:PassRole + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + role_arn = model["CloudWatchRoleArn"] + apigw.update_account( + patchOperations=[{"op": "replace", "path": "/cloudwatchRoleArn", "value": role_arn}] + ) + + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayAccountProperties], + ) -> ProgressEvent[ApiGatewayAccountProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayAccountProperties], + ) -> ProgressEvent[ApiGatewayAccountProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + + # note: deletion of accounts is currently a no-op + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayAccountProperties], + ) -> ProgressEvent[ApiGatewayAccountProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:PATCH + - iam:GetRole + - iam:PassRole + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json new file mode 100644 index 0000000000000..3192ca8c3b443 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account.schema.json @@ -0,0 +1,46 @@ +{ + "typeName": "AWS::ApiGateway::Account", + "description": "Resource Type definition for AWS::ApiGateway::Account", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "Id": { + "description": "Primary identifier which is manually generated.", + "type": "string" + }, + "CloudWatchRoleArn": { + "description": "The Amazon Resource Name (ARN) of an IAM role that has write access to CloudWatch Logs in your account.", + "type": "string" + } + }, + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:PATCH", + "iam:GetRole", + "iam:PassRole" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:PATCH", + "iam:GetRole", + "iam:PassRole" + ] + }, + "delete": { + "permissions": [] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py new file mode 100644 index 0000000000000..d7dc5c91ce0d1 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_account_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayAccountProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Account" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_account import ( + ApiGatewayAccountProvider, + ) + + self.factory = ApiGatewayAccountProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py new file mode 100644 index 0000000000000..1385cd6c5d01c --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.py @@ -0,0 +1,136 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower + + +class ApiGatewayApiKeyProperties(TypedDict): + APIKeyId: Optional[str] + CustomerId: Optional[str] + Description: Optional[str] + Enabled: Optional[bool] + GenerateDistinctId: Optional[bool] + Name: Optional[str] + StageKeys: Optional[list[StageKey]] + Tags: Optional[list[Tag]] + Value: Optional[str] + + +class StageKey(TypedDict): + RestApiId: Optional[str] + StageName: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayApiKeyProvider(ResourceProvider[ApiGatewayApiKeyProperties]): + TYPE = "AWS::ApiGateway::ApiKey" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayApiKeyProperties], + ) -> ProgressEvent[ApiGatewayApiKeyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/APIKeyId + + + Create-only properties: + - /properties/GenerateDistinctId + - /properties/Name + - /properties/Value + + Read-only properties: + - /properties/APIKeyId + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + params = util.select_attributes( + model, ["Description", "CustomerId", "Name", "Value", "Enabled", "StageKeys"] + ) + params = keys_to_lower(params.copy()) + if "enabled" in params: + params["enabled"] = bool(params["enabled"]) + + if model.get("Tags"): + params["tags"] = {tag["Key"]: tag["Value"] for tag in model["Tags"]} + + response = apigw.create_api_key(**params) + model["APIKeyId"] = response["id"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayApiKeyProperties], + ) -> ProgressEvent[ApiGatewayApiKeyProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayApiKeyProperties], + ) -> ProgressEvent[ApiGatewayApiKeyProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + apigw.delete_api_key(apiKey=model["APIKeyId"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayApiKeyProperties], + ) -> ProgressEvent[ApiGatewayApiKeyProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:PATCH + - apigateway:PUT + - apigateway:DELETE + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json new file mode 100644 index 0000000000000..4d58557451ff8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey.schema.json @@ -0,0 +1,135 @@ +{ + "typeName": "AWS::ApiGateway::ApiKey", + "description": "Resource Type definition for AWS::ApiGateway::ApiKey", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "APIKeyId": { + "description": "A Unique Key ID which identifies the API Key. Generated by the Create API and returned by the Read and List APIs ", + "type": "string" + }, + "CustomerId": { + "description": "An AWS Marketplace customer identifier to use when integrating with the AWS SaaS Marketplace.", + "type": "string" + }, + "Description": { + "description": "A description of the purpose of the API key.", + "type": "string" + }, + "Enabled": { + "description": "Indicates whether the API key can be used by clients.", + "default": false, + "type": "boolean" + }, + "GenerateDistinctId": { + "description": "Specifies whether the key identifier is distinct from the created API key value. This parameter is deprecated and should not be used.", + "type": "boolean" + }, + "Name": { + "description": "A name for the API key. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the API key name.", + "type": "string" + }, + "StageKeys": { + "description": "A list of stages to associate with this API key.", + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/StageKey" + } + }, + "Tags": { + "description": "An array of arbitrary tags (key-value pairs) to associate with the API key.", + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Value": { + "description": "The value of the API key. Must be at least 20 characters long.", + "type": "string" + } + }, + "definitions": { + "StageKey": { + "type": "object", + "additionalProperties": false, + "properties": { + "RestApiId": { + "description": "The ID of a RestApi resource that includes the stage with which you want to associate the API key.", + "type": "string" + }, + "StageName": { + "description": "The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property. ", + "type": "string" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. ", + "type": "string", + "maxLength": 256 + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "createOnlyProperties": [ + "/properties/GenerateDistinctId", + "/properties/Name", + "/properties/Value" + ], + "writeOnlyProperties": [ + "/properties/GenerateDistinctId" + ], + "primaryIdentifier": [ + "/properties/APIKeyId" + ], + "readOnlyProperties": [ + "/properties/APIKeyId" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:PATCH", + "apigateway:PUT", + "apigateway:DELETE" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py new file mode 100644 index 0000000000000..352ec19eec4d3 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_apikey_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayApiKeyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::ApiKey" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_apikey import ( + ApiGatewayApiKeyProvider, + ) + + self.factory = ApiGatewayApiKeyProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py new file mode 100644 index 0000000000000..51debd7811631 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.py @@ -0,0 +1,122 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayBasePathMappingProperties(TypedDict): + DomainName: Optional[str] + BasePath: Optional[str] + RestApiId: Optional[str] + Stage: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayBasePathMappingProvider(ResourceProvider[ApiGatewayBasePathMappingProperties]): + TYPE = "AWS::ApiGateway::BasePathMapping" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayBasePathMappingProperties], + ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DomainName + - /properties/BasePath + + Required properties: + - DomainName + + Create-only properties: + - /properties/DomainName + - /properties/BasePath + + + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + + # TODO we are using restApiId for PhysicalResourceId + # check if we need to change it + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + params = { + "domainName": model.get("DomainName"), + "restApiId": model.get("RestApiId"), + **({"basePath": model.get("BasePath")} if model.get("BasePath") else {}), + **({"stage": model.get("Stage")} if model.get("Stage") else {}), + } + response = apigw.create_base_path_mapping(**params) + model["RestApiId"] = response["restApiId"] + # TODO: validations + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayBasePathMappingProperties], + ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayBasePathMappingProperties], + ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + apigw.delete_base_path_mapping(domainName=model["DomainName"], basePath=model["BasePath"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayBasePathMappingProperties], + ) -> ProgressEvent[ApiGatewayBasePathMappingProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + - apigateway:PATCH + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json new file mode 100644 index 0000000000000..ded5541adedac --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping.schema.json @@ -0,0 +1,81 @@ +{ + "typeName": "AWS::ApiGateway::BasePathMapping", + "description": "Resource Type definition for AWS::ApiGateway::BasePathMapping", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "BasePath": { + "type": "string", + "description": "The base path name that callers of the API must provide in the URL after the domain name." + }, + "DomainName": { + "type": "string", + "description": "The DomainName of an AWS::ApiGateway::DomainName resource." + }, + "RestApiId": { + "type": "string", + "description": "The ID of the API." + }, + "Stage": { + "type": "string", + "description": "The name of the API's stage." + } + }, + "required": [ + "DomainName" + ], + "createOnlyProperties": [ + "/properties/DomainName", + "/properties/BasePath" + ], + "primaryIdentifier": [ + "/properties/DomainName", + "/properties/BasePath" + ], + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE", + "apigateway:PATCH" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "DomainName": { + "$ref": "resource-schema.json#/properties/DomainName" + } + }, + "required": [ + "DomainName" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py new file mode 100644 index 0000000000000..2dcb4b036e9ef --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_basepathmapping_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayBasePathMappingProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::BasePathMapping" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_basepathmapping import ( + ApiGatewayBasePathMappingProvider, + ) + + self.factory = ApiGatewayBasePathMappingProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py new file mode 100644 index 0000000000000..68bae12d2af24 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.py @@ -0,0 +1,196 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayDeploymentProperties(TypedDict): + RestApiId: Optional[str] + DeploymentCanarySettings: Optional[DeploymentCanarySettings] + DeploymentId: Optional[str] + Description: Optional[str] + StageDescription: Optional[StageDescription] + StageName: Optional[str] + + +class DeploymentCanarySettings(TypedDict): + PercentTraffic: Optional[float] + StageVariableOverrides: Optional[dict] + UseStageCache: Optional[bool] + + +class AccessLogSetting(TypedDict): + DestinationArn: Optional[str] + Format: Optional[str] + + +class CanarySetting(TypedDict): + PercentTraffic: Optional[float] + StageVariableOverrides: Optional[dict] + UseStageCache: Optional[bool] + + +class MethodSetting(TypedDict): + CacheDataEncrypted: Optional[bool] + CacheTtlInSeconds: Optional[int] + CachingEnabled: Optional[bool] + DataTraceEnabled: Optional[bool] + HttpMethod: Optional[str] + LoggingLevel: Optional[str] + MetricsEnabled: Optional[bool] + ResourcePath: Optional[str] + ThrottlingBurstLimit: Optional[int] + ThrottlingRateLimit: Optional[float] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class StageDescription(TypedDict): + AccessLogSetting: Optional[AccessLogSetting] + CacheClusterEnabled: Optional[bool] + CacheClusterSize: Optional[str] + CacheDataEncrypted: Optional[bool] + CacheTtlInSeconds: Optional[int] + CachingEnabled: Optional[bool] + CanarySetting: Optional[CanarySetting] + ClientCertificateId: Optional[str] + DataTraceEnabled: Optional[bool] + Description: Optional[str] + DocumentationVersion: Optional[str] + LoggingLevel: Optional[str] + MethodSettings: Optional[list[MethodSetting]] + MetricsEnabled: Optional[bool] + Tags: Optional[list[Tag]] + ThrottlingBurstLimit: Optional[int] + ThrottlingRateLimit: Optional[float] + TracingEnabled: Optional[bool] + Variables: Optional[dict] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayDeploymentProvider(ResourceProvider[ApiGatewayDeploymentProperties]): + TYPE = "AWS::ApiGateway::Deployment" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayDeploymentProperties], + ) -> ProgressEvent[ApiGatewayDeploymentProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DeploymentId + - /properties/RestApiId + + Required properties: + - RestApiId + + Create-only properties: + - /properties/DeploymentCanarySettings + - /properties/RestApiId + + Read-only properties: + - /properties/DeploymentId + + IAM permissions required: + - apigateway:POST + + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + params = {"restApiId": model["RestApiId"]} + + if model.get("StageName"): + params["stageName"] = model["StageName"] + + if model.get("StageDescription"): + params["stageDescription"] = json.dumps(model["StageDescription"]) + + if model.get("Description"): + params["description"] = model["Description"] + + response = api.create_deployment(**params) + + model["DeploymentId"] = response["id"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayDeploymentProperties], + ) -> ProgressEvent[ApiGatewayDeploymentProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayDeploymentProperties], + ) -> ProgressEvent[ApiGatewayDeploymentProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + try: + # TODO: verify if AWS behaves the same? + get_stages = api.get_stages( + restApiId=model["RestApiId"], deploymentId=model["DeploymentId"] + ) + if stages := get_stages["item"]: + for stage in stages: + api.delete_stage(restApiId=model["RestApiId"], stageName=stage["stageName"]) + + api.delete_deployment(restApiId=model["RestApiId"], deploymentId=model["DeploymentId"]) + except api.exceptions.NotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayDeploymentProperties], + ) -> ProgressEvent[ApiGatewayDeploymentProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:PATCH + - apigateway:GET + - apigateway:PUT + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json new file mode 100644 index 0000000000000..ab10bbf5e2a7a --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment.schema.json @@ -0,0 +1,318 @@ +{ + "typeName": "AWS::ApiGateway::Deployment", + "description": "Resource Type definition for AWS::ApiGateway::Deployment", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "DeploymentId": { + "type": "string", + "description": "Primary Id for this resource" + }, + "DeploymentCanarySettings": { + "$ref": "#/definitions/DeploymentCanarySettings", + "description": "Specifies settings for the canary deployment." + }, + "Description": { + "type": "string", + "description": "A description of the purpose of the API Gateway deployment." + }, + "RestApiId": { + "type": "string", + "description": "The ID of the RestApi resource to deploy. " + }, + "StageDescription": { + "$ref": "#/definitions/StageDescription", + "description": "Configures the stage that API Gateway creates with this deployment." + }, + "StageName": { + "type": "string", + "description": "A name for the stage that API Gateway creates with this deployment. Use only alphanumeric characters." + } + }, + "definitions": { + "StageDescription": { + "type": "object", + "additionalProperties": false, + "properties": { + "AccessLogSetting": { + "description": "Specifies settings for logging access in this stage.", + "$ref": "#/definitions/AccessLogSetting" + }, + "CacheClusterEnabled": { + "description": "Indicates whether cache clustering is enabled for the stage.", + "type": "boolean" + }, + "CacheClusterSize": { + "description": "The size of the stage's cache cluster.", + "type": "string" + }, + "CacheDataEncrypted": { + "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ", + "type": "boolean" + }, + "CacheTtlInSeconds": { + "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ", + "type": "integer" + }, + "CachingEnabled": { + "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.", + "type": "boolean" + }, + "CanarySetting": { + "description": "Specifies settings for the canary deployment in this stage.", + "$ref": "#/definitions/CanarySetting" + }, + "ClientCertificateId": { + "description": "The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage. ", + "type": "string" + }, + "DataTraceEnabled": { + "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. ", + "type": "boolean" + }, + "Description": { + "description": "A description of the purpose of the stage.", + "type": "string" + }, + "DocumentationVersion": { + "description": "The version identifier of the API documentation snapshot.", + "type": "string" + }, + "LoggingLevel": { + "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. ", + "type": "string" + }, + "MethodSettings": { + "description": "Configures settings for all of the stage's methods.", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/MethodSetting" + } + }, + "MetricsEnabled": { + "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.", + "type": "boolean" + }, + "Tags": { + "description": "An array of arbitrary tags (key-value pairs) to associate with the stage.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "ThrottlingBurstLimit": { + "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "integer" + }, + "ThrottlingRateLimit": { + "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "number" + }, + "TracingEnabled": { + "description": "Specifies whether active tracing with X-ray is enabled for this stage.", + "type": "boolean" + }, + "Variables": { + "description": "A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+. ", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + } + } + }, + "DeploymentCanarySettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "PercentTraffic": { + "description": "The percentage (0-100) of traffic diverted to a canary deployment.", + "type": "number" + }, + "StageVariableOverrides": { + "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "UseStageCache": { + "description": "Whether the canary deployment uses the stage cache.", + "type": "boolean" + } + } + }, + "AccessLogSetting": { + "type": "object", + "additionalProperties": false, + "properties": { + "DestinationArn": { + "description": "The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. ", + "type": "string" + }, + "Format": { + "description": "A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId. ", + "type": "string" + } + } + }, + "CanarySetting": { + "type": "object", + "additionalProperties": false, + "properties": { + "PercentTraffic": { + "description": "The percent (0-100) of traffic diverted to a canary deployment.", + "type": "number" + }, + "StageVariableOverrides": { + "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. ", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "UseStageCache": { + "description": "Whether the canary deployment uses the stage cache or not.", + "type": "boolean" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "description": "The key name of the tag", + "type": "string" + }, + "Value": { + "description": "The value for the tag", + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "MethodSetting": { + "type": "object", + "additionalProperties": false, + "properties": { + "CacheDataEncrypted": { + "description": "Indicates whether the cached responses are encrypted", + "type": "boolean" + }, + "CacheTtlInSeconds": { + "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. ", + "type": "integer" + }, + "CachingEnabled": { + "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.", + "type": "boolean" + }, + "DataTraceEnabled": { + "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. ", + "type": "boolean" + }, + "HttpMethod": { + "description": "The HTTP method.", + "type": "string" + }, + "LoggingLevel": { + "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. ", + "type": "string" + }, + "MetricsEnabled": { + "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.", + "type": "boolean" + }, + "ResourcePath": { + "description": "The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. ", + "type": "string" + }, + "ThrottlingBurstLimit": { + "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "integer" + }, + "ThrottlingRateLimit": { + "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "number" + } + } + } + }, + "taggable": true, + "required": [ + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/DeploymentCanarySettings", + "/properties/RestApiId" + ], + "primaryIdentifier": [ + "/properties/DeploymentId", + "/properties/RestApiId" + ], + "readOnlyProperties": [ + "/properties/DeploymentId" + ], + "writeOnlyProperties": [ + "/properties/StageName", + "/properties/StageDescription", + "/properties/DeploymentCanarySettings" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:PATCH", + "apigateway:GET", + "apigateway:PUT" + ] + }, + "delete": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "RestApiId": { + "$ref": "resource-schema.json#/properties/RestApiId" + } + }, + "required": [ + "RestApiId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py new file mode 100644 index 0000000000000..80ff9801a1ed5 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_deployment_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayDeploymentProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Deployment" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_deployment import ( + ApiGatewayDeploymentProvider, + ) + + self.factory = ApiGatewayDeploymentProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py new file mode 100644 index 0000000000000..778ec9da3cbf8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.py @@ -0,0 +1,164 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower + + +class ApiGatewayDomainNameProperties(TypedDict): + CertificateArn: Optional[str] + DistributionDomainName: Optional[str] + DistributionHostedZoneId: Optional[str] + DomainName: Optional[str] + EndpointConfiguration: Optional[EndpointConfiguration] + MutualTlsAuthentication: Optional[MutualTlsAuthentication] + OwnershipVerificationCertificateArn: Optional[str] + RegionalCertificateArn: Optional[str] + RegionalDomainName: Optional[str] + RegionalHostedZoneId: Optional[str] + SecurityPolicy: Optional[str] + Tags: Optional[list[Tag]] + + +class EndpointConfiguration(TypedDict): + Types: Optional[list[str]] + + +class MutualTlsAuthentication(TypedDict): + TruststoreUri: Optional[str] + TruststoreVersion: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayDomainNameProvider(ResourceProvider[ApiGatewayDomainNameProperties]): + TYPE = "AWS::ApiGateway::DomainName" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayDomainNameProperties], + ) -> ProgressEvent[ApiGatewayDomainNameProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DomainName + + Create-only properties: + - /properties/DomainName + + Read-only properties: + - /properties/RegionalHostedZoneId + - /properties/DistributionDomainName + - /properties/RegionalDomainName + - /properties/DistributionHostedZoneId + + IAM permissions required: + - apigateway:* + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + params = keys_to_lower(model.copy()) + param_names = [ + "certificateArn", + "domainName", + "endpointConfiguration", + "mutualTlsAuthentication", + "ownershipVerificationCertificateArn", + "regionalCertificateArn", + "securityPolicy", + ] + params = util.select_attributes(params, param_names) + if model.get("Tags"): + params["tags"] = {tag["key"]: tag["value"] for tag in model["Tags"]} + + result = apigw.create_domain_name(**params) + + hosted_zones = request.aws_client_factory.route53.list_hosted_zones() + """ + The hardcoded value is the only one that should be returned but due limitations it is not possible to + use it. + """ + if hosted_zones["HostedZones"]: + model["DistributionHostedZoneId"] = hosted_zones["HostedZones"][0]["Id"] + else: + model["DistributionHostedZoneId"] = "Z2FDTNDATAQYW2" + + model["DistributionDomainName"] = result.get("distributionDomainName") or result.get( + "domainName" + ) + model["RegionalDomainName"] = ( + result.get("regionalDomainName") or model["DistributionDomainName"] + ) + model["RegionalHostedZoneId"] = ( + result.get("regionalHostedZoneId") or model["DistributionHostedZoneId"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayDomainNameProperties], + ) -> ProgressEvent[ApiGatewayDomainNameProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:* + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayDomainNameProperties], + ) -> ProgressEvent[ApiGatewayDomainNameProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:* + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + apigw.delete_domain_name(domainName=model["DomainName"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayDomainNameProperties], + ) -> ProgressEvent[ApiGatewayDomainNameProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:* + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json new file mode 100644 index 0000000000000..c0b50b24f2c33 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname.schema.json @@ -0,0 +1,124 @@ +{ + "typeName": "AWS::ApiGateway::DomainName", + "description": "Resource Type definition for AWS::ApiGateway::DomainName.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "EndpointConfiguration": { + "type": "object", + "properties": { + "Types": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "MutualTlsAuthentication": { + "type": "object", + "properties": { + "TruststoreUri": { + "type": "string" + }, + "TruststoreVersion": { + "type": "string" + } + }, + "additionalProperties": false + }, + "Tag": { + "type": "object", + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "properties": { + "DomainName": { + "type": "string" + }, + "DistributionDomainName": { + "type": "string" + }, + "DistributionHostedZoneId": { + "type": "string" + }, + "EndpointConfiguration": { + "$ref": "#/definitions/EndpointConfiguration" + }, + "MutualTlsAuthentication": { + "$ref": "#/definitions/MutualTlsAuthentication" + }, + "RegionalDomainName": { + "type": "string" + }, + "RegionalHostedZoneId": { + "type": "string" + }, + "CertificateArn": { + "type": "string" + }, + "RegionalCertificateArn": { + "type": "string" + }, + "OwnershipVerificationCertificateArn": { + "type": "string" + }, + "SecurityPolicy": { + "type": "string" + }, + "Tags": { + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "additionalProperties": false, + "primaryIdentifier": [ + "/properties/DomainName" + ], + "createOnlyProperties": [ + "/properties/DomainName" + ], + "readOnlyProperties": [ + "/properties/RegionalHostedZoneId", + "/properties/DistributionDomainName", + "/properties/RegionalDomainName", + "/properties/DistributionHostedZoneId" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:*" + ] + }, + "read": { + "permissions": [ + "apigateway:*" + ] + }, + "update": { + "permissions": [ + "apigateway:*" + ] + }, + "delete": { + "permissions": [ + "apigateway:*" + ] + }, + "list": { + "permissions": [ + "apigateway:*" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py new file mode 100644 index 0000000000000..49e6db22f12d8 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_domainname_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayDomainNameProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::DomainName" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_domainname import ( + ApiGatewayDomainNameProvider, + ) + + self.factory = ApiGatewayDomainNameProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py new file mode 100644 index 0000000000000..bb52d43256e7b --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.py @@ -0,0 +1,122 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower + + +class ApiGatewayGatewayResponseProperties(TypedDict): + ResponseType: Optional[str] + RestApiId: Optional[str] + Id: Optional[str] + ResponseParameters: Optional[dict] + ResponseTemplates: Optional[dict] + StatusCode: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayGatewayResponseProvider(ResourceProvider[ApiGatewayGatewayResponseProperties]): + TYPE = "AWS::ApiGateway::GatewayResponse" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayGatewayResponseProperties], + ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - ResponseType + - RestApiId + + Create-only properties: + - /properties/ResponseType + - /properties/RestApiId + + Read-only properties: + - /properties/Id + + IAM permissions required: + - apigateway:PUT + - apigateway:GET + + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + # TODO: validations + model["Id"] = util.generate_default_name_without_stack(request.logical_resource_id) + + params = util.select_attributes( + model, + ["RestApiId", "ResponseType", "StatusCode", "ResponseParameters", "ResponseTemplates"], + ) + params = keys_to_lower(params.copy()) + + api.put_gateway_response(**params) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayGatewayResponseProperties], + ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayGatewayResponseProperties], + ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + api.delete_gateway_response( + restApiId=model["RestApiId"], responseType=model["ResponseType"] + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayGatewayResponseProperties], + ) -> ProgressEvent[ApiGatewayGatewayResponseProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:PUT + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json new file mode 100644 index 0000000000000..063b2c6c91ca4 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse.schema.json @@ -0,0 +1,84 @@ +{ + "typeName": "AWS::ApiGateway::GatewayResponse", + "description": "Resource Type definition for AWS::ApiGateway::GatewayResponse", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "additionalProperties": false, + "properties": { + "Id": { + "description": "A Cloudformation auto generated ID.", + "type": "string" + }, + "RestApiId": { + "description": "The identifier of the API.", + "type": "string" + }, + "ResponseType": { + "description": "The type of the Gateway Response.", + "type": "string" + }, + "StatusCode": { + "description": "The HTTP status code for the response.", + "type": "string" + }, + "ResponseParameters": { + "description": "The response parameters (paths, query strings, and headers) for the response.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "ResponseTemplates": { + "description": "The response templates for the response.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + } + }, + "required": [ + "ResponseType", + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/ResponseType", + "/properties/RestApiId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "taggable": false, + "handlers": { + "create": { + "permissions": [ + "apigateway:PUT", + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:PUT" + ] + }, + "delete": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE" + ] + }, + "list": { + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py new file mode 100644 index 0000000000000..86f43d46cdd21 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_gatewayresponse_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayGatewayResponseProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::GatewayResponse" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_gatewayresponse import ( + ApiGatewayGatewayResponseProvider, + ) + + self.factory = ApiGatewayGatewayResponseProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py new file mode 100644 index 0000000000000..64598a4463898 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.py @@ -0,0 +1,234 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from copy import deepcopy +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayMethodProperties(TypedDict): + HttpMethod: Optional[str] + ResourceId: Optional[str] + RestApiId: Optional[str] + ApiKeyRequired: Optional[bool] + AuthorizationScopes: Optional[list[str]] + AuthorizationType: Optional[str] + AuthorizerId: Optional[str] + Integration: Optional[Integration] + MethodResponses: Optional[list[MethodResponse]] + OperationName: Optional[str] + RequestModels: Optional[dict] + RequestParameters: Optional[dict] + RequestValidatorId: Optional[str] + + +class IntegrationResponse(TypedDict): + StatusCode: Optional[str] + ContentHandling: Optional[str] + ResponseParameters: Optional[dict] + ResponseTemplates: Optional[dict] + SelectionPattern: Optional[str] + + +class Integration(TypedDict): + Type: Optional[str] + CacheKeyParameters: Optional[list[str]] + CacheNamespace: Optional[str] + ConnectionId: Optional[str] + ConnectionType: Optional[str] + ContentHandling: Optional[str] + Credentials: Optional[str] + IntegrationHttpMethod: Optional[str] + IntegrationResponses: Optional[list[IntegrationResponse]] + PassthroughBehavior: Optional[str] + RequestParameters: Optional[dict] + RequestTemplates: Optional[dict] + TimeoutInMillis: Optional[int] + Uri: Optional[str] + + +class MethodResponse(TypedDict): + StatusCode: Optional[str] + ResponseModels: Optional[dict] + ResponseParameters: Optional[dict] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayMethodProvider(ResourceProvider[ApiGatewayMethodProperties]): + TYPE = "AWS::ApiGateway::Method" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayMethodProperties], + ) -> ProgressEvent[ApiGatewayMethodProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + - /properties/ResourceId + - /properties/HttpMethod + + Required properties: + - RestApiId + - ResourceId + - HttpMethod + + Create-only properties: + - /properties/RestApiId + - /properties/ResourceId + - /properties/HttpMethod + + + + IAM permissions required: + - apigateway:PUT + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + operation_model = apigw.meta.service_model.operation_model + + apigw.put_method( + **util.convert_request_kwargs(model, operation_model("PutMethod").input_shape) + ) + + # setting up integrations + integration = model.get("Integration") + if integration: + apigw.put_integration( + restApiId=model.get("RestApiId"), + resourceId=model.get("ResourceId"), + httpMethod=model.get("HttpMethod"), + **util.convert_request_kwargs( + integration, operation_model("PutIntegration").input_shape + ), + ) + + integration_responses = integration.pop("IntegrationResponses", []) + for integration_response in integration_responses: + apigw.put_integration_response( + restApiId=model.get("RestApiId"), + resourceId=model.get("ResourceId"), + httpMethod=model.get("HttpMethod"), + **util.convert_request_kwargs( + integration_response, operation_model("PutIntegrationResponse").input_shape + ), + ) + + responses = model.get("MethodResponses", []) + for response in responses: + apigw.put_method_response( + restApiId=model.get("RestApiId"), + resourceId=model.get("ResourceId"), + httpMethod=model.get("HttpMethod"), + **util.convert_request_kwargs( + response, operation_model("PutMethodResponse").input_shape + ), + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayMethodProperties], + ) -> ProgressEvent[ApiGatewayMethodProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayMethodProperties], + ) -> ProgressEvent[ApiGatewayMethodProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + + # FIXME we sometimes get warnings when calling this method, probably because + # restAPI or resource has been already deleted + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + try: + apigw.delete_method( + **util.convert_request_kwargs( + model, apigw.meta.service_model.operation_model("DeleteMethod").input_shape + ) + ) + except apigw.exceptions.NotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayMethodProperties], + ) -> ProgressEvent[ApiGatewayMethodProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + - apigateway:PUT + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + operation_model = apigw.meta.service_model.operation_model + + must_params = util.select_attributes( + model, + [ + "RestApiId", + "ResourceId", + "HttpMethod", + ], + ) + + if integration := deepcopy(model.get("Integration")): + integration.update(must_params) + apigw.put_integration( + **util.convert_request_kwargs( + integration, operation_model("PutIntegration").input_shape + ) + ) + + else: + must_params.update({"AuthorizationType": model.get("AuthorizationType")}) + apigw.put_method( + **util.convert_request_kwargs(must_params, operation_model("PutMethod").input_shape) + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json new file mode 100644 index 0000000000000..1b64f208e9c6d --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method.schema.json @@ -0,0 +1,318 @@ +{ + "typeName": "AWS::ApiGateway::Method", + "description": "Resource Type definition for AWS::ApiGateway::Method", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway.git", + "definitions": { + "Integration": { + "type": "object", + "additionalProperties": false, + "properties": { + "CacheKeyParameters": { + "description": "A list of request parameters whose values API Gateway caches.", + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "CacheNamespace": { + "description": "An API-specific tag group of related cached parameters.", + "type": "string" + }, + "ConnectionId": { + "description": "The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined.", + "type": "string" + }, + "ConnectionType": { + "description": "The type of the network connection to the integration endpoint.", + "type": "string", + "enum": [ + "INTERNET", + "VPC_LINK" + ] + }, + "ContentHandling": { + "description": "Specifies how to handle request payload content type conversions.", + "type": "string", + "enum": [ + "CONVERT_TO_BINARY", + "CONVERT_TO_TEXT" + ] + }, + "Credentials": { + "description": "The credentials that are required for the integration.", + "type": "string" + }, + "IntegrationHttpMethod": { + "description": "The integration's HTTP method type.", + "type": "string" + }, + "IntegrationResponses": { + "description": "The response that API Gateway provides after a method's backend completes processing a request.", + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/IntegrationResponse" + } + }, + "PassthroughBehavior": { + "description": "Indicates when API Gateway passes requests to the targeted backend.", + "type": "string", + "enum": [ + "WHEN_NO_MATCH", + "WHEN_NO_TEMPLATES", + "NEVER" + ] + }, + "RequestParameters": { + "description": "The request parameters that API Gateway sends with the backend request.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "RequestTemplates": { + "description": "A map of Apache Velocity templates that are applied on the request payload.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "TimeoutInMillis": { + "description": "Custom timeout between 50 and 29,000 milliseconds.", + "type": "integer", + "minimum": 50, + "maximum": 29000 + }, + "Type": { + "description": "The type of backend that your method is running.", + "type": "string", + "enum": [ + "AWS", + "AWS_PROXY", + "HTTP", + "HTTP_PROXY", + "MOCK" + ] + }, + "Uri": { + "description": "The Uniform Resource Identifier (URI) for the integration.", + "type": "string" + } + }, + "required": [ + "Type" + ] + }, + "MethodResponse": { + "type": "object", + "additionalProperties": false, + "properties": { + "ResponseModels": { + "description": "The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "ResponseParameters": { + "description": "Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "boolean" + } + } + }, + "StatusCode": { + "description": "The method response's status code, which you map to an IntegrationResponse.", + "type": "string" + } + }, + "required": [ + "StatusCode" + ] + }, + "IntegrationResponse": { + "type": "object", + "additionalProperties": false, + "properties": { + "ContentHandling": { + "description": "Specifies how to handle request payload content type conversions.", + "type": "string", + "enum": [ + "CONVERT_TO_BINARY", + "CONVERT_TO_TEXT" + ] + }, + "ResponseParameters": { + "description": "The response parameters from the backend response that API Gateway sends to the method response.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "ResponseTemplates": { + "description": "The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "SelectionPattern": { + "description": "A regular expression that specifies which error strings or status codes from the backend map to the integration response.", + "type": "string" + }, + "StatusCode": { + "description": "The status code that API Gateway uses to map the integration response to a MethodResponse status code.", + "type": "string" + } + }, + "required": [ + "StatusCode" + ] + } + }, + "properties": { + "ApiKeyRequired": { + "description": "Indicates whether the method requires clients to submit a valid API key.", + "type": "boolean" + }, + "AuthorizationScopes": { + "description": "A list of authorization scopes configured on the method.", + "type": "array", + "items": { + "type": "string" + } + }, + "AuthorizationType": { + "description": "The method's authorization type.", + "type": "string", + "enum": [ + "NONE", + "AWS_IAM", + "CUSTOM", + "COGNITO_USER_POOLS" + ] + }, + "AuthorizerId": { + "description": "The identifier of the authorizer to use on this method.", + "type": "string" + }, + "HttpMethod": { + "description": "The backend system that the method calls when it receives a request.", + "type": "string" + }, + "Integration": { + "description": "The backend system that the method calls when it receives a request.", + "$ref": "#/definitions/Integration" + }, + "MethodResponses": { + "description": "The responses that can be sent to the client who calls the method.", + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/MethodResponse" + } + }, + "OperationName": { + "description": "A friendly operation name for the method.", + "type": "string" + }, + "RequestModels": { + "description": "The resources that are used for the request's content type. Specify request models as key-value pairs (string-to-string mapping), with a content type as the key and a Model resource name as the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "RequestParameters": { + "description": "The request parameters that API Gateway accepts. Specify request parameters as key-value pairs (string-to-Boolean mapping), with a source as the key and a Boolean as the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "boolean" + } + } + }, + "RequestValidatorId": { + "description": "The ID of the associated request validator.", + "type": "string" + }, + "ResourceId": { + "description": "The ID of an API Gateway resource.", + "type": "string" + }, + "RestApiId": { + "description": "The ID of the RestApi resource in which API Gateway creates the method.", + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "RestApiId", + "ResourceId", + "HttpMethod" + ], + "primaryIdentifier": [ + "/properties/RestApiId", + "/properties/ResourceId", + "/properties/HttpMethod" + ], + "createOnlyProperties": [ + "/properties/RestApiId", + "/properties/ResourceId", + "/properties/HttpMethod" + ], + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "apigateway:PUT", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE", + "apigateway:PUT" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py new file mode 100644 index 0000000000000..34e0cec7971a9 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_method_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayMethodProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Method" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_method import ( + ApiGatewayMethodProvider, + ) + + self.factory = ApiGatewayMethodProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py new file mode 100644 index 0000000000000..07883e62983ca --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.py @@ -0,0 +1,134 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayModelProperties(TypedDict): + RestApiId: Optional[str] + ContentType: Optional[str] + Description: Optional[str] + Name: Optional[str] + Schema: Optional[dict | str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayModelProvider(ResourceProvider[ApiGatewayModelProperties]): + TYPE = "AWS::ApiGateway::Model" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayModelProperties], + ) -> ProgressEvent[ApiGatewayModelProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + - /properties/Name + + Required properties: + - RestApiId + + Create-only properties: + - /properties/ContentType + - /properties/Name + - /properties/RestApiId + + + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + if not model.get("Name"): + model["Name"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + if not model.get("ContentType"): + model["ContentType"] = "application/json" + + schema = json.dumps(model.get("Schema", {})) + + apigw.create_model( + restApiId=model["RestApiId"], + name=model["Name"], + contentType=model["ContentType"], + schema=schema, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayModelProperties], + ) -> ProgressEvent[ApiGatewayModelProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayModelProperties], + ) -> ProgressEvent[ApiGatewayModelProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + try: + apigw.delete_model(modelName=model["Name"], restApiId=model["RestApiId"]) + except apigw.exceptions.NotFoundException: + # We are using try/except since at the moment + # CFN doesn't properly resolve dependency between resources + # so this resource could be deleted if parent resource was deleted first + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayModelProperties], + ) -> ProgressEvent[ApiGatewayModelProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:PATCH + - apigateway:GET + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json new file mode 100644 index 0000000000000..7196fd5cc44b0 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model.schema.json @@ -0,0 +1,83 @@ +{ + "typeName": "AWS::ApiGateway::Model", + "description": "Resource Type definition for AWS::ApiGateway::Model", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "ContentType": { + "type": "string", + "description": "The content type for the model." + }, + "Description": { + "type": "string", + "description": "A description that identifies this model." + }, + "Name": { + "type": "string", + "description": "A name for the model. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the model name." + }, + "RestApiId": { + "type": "string", + "description": "The ID of a REST API with which to associate this model." + }, + "Schema": { + "description": "The schema to use to transform data to one or more output formats. Specify null ({}) if you don't want to specify a schema.", + "type": [ + "object", + "string" + ] + } + }, + "required": [ + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/ContentType", + "/properties/Name", + "/properties/RestApiId" + ], + "primaryIdentifier": [ + "/properties/RestApiId", + "/properties/Name" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:PATCH", + "apigateway:GET" + ] + }, + "delete": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "RestApiId": { + "$ref": "resource-schema.json#/properties/RestApiId" + } + }, + "required": [ + "RestApiId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py new file mode 100644 index 0000000000000..d1bd727b602e5 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_model_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayModelProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Model" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_model import ( + ApiGatewayModelProvider, + ) + + self.factory = ApiGatewayModelProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py new file mode 100644 index 0000000000000..55d2a3bc4964e --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.py @@ -0,0 +1,125 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayRequestValidatorProperties(TypedDict): + RestApiId: Optional[str] + Name: Optional[str] + RequestValidatorId: Optional[str] + ValidateRequestBody: Optional[bool] + ValidateRequestParameters: Optional[bool] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayRequestValidatorProvider(ResourceProvider[ApiGatewayRequestValidatorProperties]): + TYPE = "AWS::ApiGateway::RequestValidator" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayRequestValidatorProperties], + ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + - /properties/RequestValidatorId + + Required properties: + - RestApiId + + Create-only properties: + - /properties/Name + - /properties/RestApiId + + Read-only properties: + - /properties/RequestValidatorId + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + if not model.get("Name"): + model["Name"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + response = api.create_request_validator( + name=model["Name"], + restApiId=model["RestApiId"], + validateRequestBody=model.get("ValidateRequestBody", False), + validateRequestParameters=model.get("ValidateRequestParameters", False), + ) + model["RequestValidatorId"] = response["id"] + # FIXME error happens when other resources try to reference this one + # "An error occurred (BadRequestException) when calling the PutMethod operation: + # Invalid Request Validator identifier specified" + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayRequestValidatorProperties], + ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayRequestValidatorProperties], + ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + api.delete_request_validator( + restApiId=model["RestApiId"], requestValidatorId=model["RequestValidatorId"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayRequestValidatorProperties], + ) -> ProgressEvent[ApiGatewayRequestValidatorProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:PATCH + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json new file mode 100644 index 0000000000000..39d00e7be7d6d --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator.schema.json @@ -0,0 +1,80 @@ +{ + "typeName": "AWS::ApiGateway::RequestValidator", + "description": "Resource Type definition for AWS::ApiGateway::RequestValidator", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "RequestValidatorId": { + "description": "ID of the request validator.", + "type": "string" + }, + "Name": { + "description": "Name of the request validator.", + "type": "string" + }, + "RestApiId": { + "description": "The identifier of the targeted API entity.", + "type": "string" + }, + "ValidateRequestBody": { + "description": "Indicates whether to validate the request body according to the configured schema for the targeted API and method. ", + "type": "boolean" + }, + "ValidateRequestParameters": { + "description": "Indicates whether to validate request parameters.", + "type": "boolean" + } + }, + "required": [ + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/Name", + "/properties/RestApiId" + ], + "readOnlyProperties": [ + "/properties/RequestValidatorId" + ], + "primaryIdentifier": [ + "/properties/RestApiId", + "/properties/RequestValidatorId" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:PATCH" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "RestApiId": { + "$ref": "resource-schema.json#/properties/RestApiId" + } + }, + "required": [ + "RestApiId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py new file mode 100644 index 0000000000000..41175341a69de --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_requestvalidator_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayRequestValidatorProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::RequestValidator" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_requestvalidator import ( + ApiGatewayRequestValidatorProvider, + ) + + self.factory = ApiGatewayRequestValidatorProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py new file mode 100644 index 0000000000000..89b868306e68d --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.py @@ -0,0 +1,168 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +from botocore.exceptions import ClientError + +import localstack.services.cloudformation.provider_utils as util +from localstack.aws.api.cloudcontrol import InvalidRequestException, ResourceNotFoundException +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ApiGatewayResourceProperties(TypedDict): + ParentId: Optional[str] + PathPart: Optional[str] + RestApiId: Optional[str] + ResourceId: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayResourceProvider(ResourceProvider[ApiGatewayResourceProperties]): + TYPE = "AWS::ApiGateway::Resource" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayResourceProperties], + ) -> ProgressEvent[ApiGatewayResourceProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + - /properties/ResourceId + + Required properties: + - ParentId + - PathPart + - RestApiId + + Create-only properties: + - /properties/PathPart + - /properties/ParentId + - /properties/RestApiId + + Read-only properties: + - /properties/ResourceId + + IAM permissions required: + - apigateway:POST + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + params = { + "restApiId": model.get("RestApiId"), + "pathPart": model.get("PathPart"), + "parentId": model.get("ParentId"), + } + if not params.get("parentId"): + # get root resource id + resources = apigw.get_resources(restApiId=params["restApiId"])["items"] + root_resource = ([r for r in resources if r["path"] == "/"] or [None])[0] + if not root_resource: + raise Exception( + "Unable to find root resource for REST API %s" % params["restApiId"] + ) + params["parentId"] = root_resource["id"] + response = apigw.create_resource(**params) + + model["ResourceId"] = response["id"] + model["ParentId"] = response["parentId"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayResourceProperties], + ) -> ProgressEvent[ApiGatewayResourceProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[ApiGatewayResourceProperties], + ) -> ProgressEvent[ApiGatewayResourceProperties]: + if "RestApiId" not in request.desired_state: + # TODO: parity + raise InvalidRequestException( + f"Missing or invalid ResourceModel property in {self.TYPE} list handler request input: 'RestApiId'" + ) + + rest_api_id = request.desired_state["RestApiId"] + try: + resources = request.aws_client_factory.apigateway.get_resources(restApiId=rest_api_id)[ + "items" + ] + except ClientError as exc: + if exc.response.get("Error", {}).get("Code", {}) == "NotFoundException": + raise ResourceNotFoundException(f"Invalid API identifier specified: {rest_api_id}") + raise + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + ApiGatewayResourceProperties( + RestApiId=rest_api_id, + ResourceId=resource["id"], + ParentId=resource.get("parentId"), + PathPart=resource.get("path"), + ) + for resource in resources + ], + ) + + def delete( + self, + request: ResourceRequest[ApiGatewayResourceProperties], + ) -> ProgressEvent[ApiGatewayResourceProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + try: + apigw.delete_resource(restApiId=model["RestApiId"], resourceId=model["ResourceId"]) + except apigw.exceptions.NotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayResourceProperties], + ) -> ProgressEvent[ApiGatewayResourceProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:PATCH + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json new file mode 100644 index 0000000000000..7eaa8175b1827 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource.schema.json @@ -0,0 +1,80 @@ +{ + "typeName": "AWS::ApiGateway::Resource", + "description": "Resource Type definition for AWS::ApiGateway::Resource", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "ResourceId": { + "description": "A unique primary identifier for a Resource", + "type": "string" + }, + "RestApiId": { + "description": "The ID of the RestApi resource in which you want to create this resource..", + "type": "string" + }, + "ParentId": { + "description": "The parent resource's identifier.", + "type": "string" + }, + "PathPart": { + "description": "The last path segment for this resource.", + "type": "string" + } + }, + "taggable": false, + "required": [ + "ParentId", + "PathPart", + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/PathPart", + "/properties/ParentId", + "/properties/RestApiId" + ], + "primaryIdentifier": [ + "/properties/RestApiId", + "/properties/ResourceId" + ], + "readOnlyProperties": [ + "/properties/ResourceId" + ], + "handlers": { + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "create": { + "permissions": [ + "apigateway:POST" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:PATCH" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "RestApiId": { + "$ref": "resource-schema.json#/properties/RestApiId" + } + }, + "required": [ + "RestApiId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py new file mode 100644 index 0000000000000..f7ece7204435d --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_resource_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayResourceProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Resource" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_resource import ( + ApiGatewayResourceProvider, + ) + + self.factory = ApiGatewayResourceProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py new file mode 100644 index 0000000000000..c90e2b36f328b --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.py @@ -0,0 +1,245 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower +from localstack.utils.strings import to_bytes + + +class ApiGatewayRestApiProperties(TypedDict): + ApiKeySourceType: Optional[str] + BinaryMediaTypes: Optional[list[str]] + Body: Optional[dict | str] + BodyS3Location: Optional[S3Location] + CloneFrom: Optional[str] + Description: Optional[str] + DisableExecuteApiEndpoint: Optional[bool] + EndpointConfiguration: Optional[EndpointConfiguration] + FailOnWarnings: Optional[bool] + MinimumCompressionSize: Optional[int] + Mode: Optional[str] + Name: Optional[str] + Parameters: Optional[dict | str] + Policy: Optional[dict | str] + RestApiId: Optional[str] + RootResourceId: Optional[str] + Tags: Optional[list[Tag]] + + +class S3Location(TypedDict): + Bucket: Optional[str] + ETag: Optional[str] + Key: Optional[str] + Version: Optional[str] + + +class EndpointConfiguration(TypedDict): + Types: Optional[list[str]] + VpcEndpointIds: Optional[list[str]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayRestApiProvider(ResourceProvider[ApiGatewayRestApiProperties]): + TYPE = "AWS::ApiGateway::RestApi" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayRestApiProperties], + ) -> ProgressEvent[ApiGatewayRestApiProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + + + Read-only properties: + - /properties/RestApiId + - /properties/RootResourceId + + IAM permissions required: + - apigateway:GET + - apigateway:POST + - apigateway:UpdateRestApiPolicy + - s3:GetObject + - iam:PassRole + + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + # FIXME: this is only when Body or BodyS3Location is set, otherwise the deployment should fail without a name + role_name = model.get("Name") + if not role_name: + model["Name"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + params = util.select_attributes( + model, + [ + "Name", + "Description", + "Version", + "CloneFrom", + "BinaryMediaTypes", + "MinimumCompressionSize", + "ApiKeySource", + "EndpointConfiguration", + "Policy", + "Tags", + "DisableExecuteApiEndpoint", + ], + ) + params = keys_to_lower(params, skip_children_of=["policy"]) + params["tags"] = {tag["key"]: tag["value"] for tag in params.get("tags", [])} + + cfn_client = request.aws_client_factory.cloudformation + stack_id = cfn_client.describe_stacks(StackName=request.stack_name)["Stacks"][0]["StackId"] + params["tags"].update( + { + "aws:cloudformation:logical-id": request.logical_resource_id, + "aws:cloudformation:stack-name": request.stack_name, + "aws:cloudformation:stack-id": stack_id, + } + ) + if isinstance(params.get("policy"), dict): + params["policy"] = json.dumps(params["policy"]) + + result = api.create_rest_api(**params) + model["RestApiId"] = result["id"] + + body = model.get("Body") + s3_body_location = model.get("BodyS3Location") + if body or s3_body_location: + # the default behavior for imports via CFn is basepath=ignore (validated against AWS) + import_parameters = model.get("Parameters", {}) + import_parameters.setdefault("basepath", "ignore") + + if body: + body = json.dumps(body) if isinstance(body, dict) else body + else: + get_obj_kwargs = {} + if version_id := s3_body_location.get("Version"): + get_obj_kwargs["VersionId"] = version_id + + # what is the approach when client call fail? Do we bubble it up? + s3_client = request.aws_client_factory.s3 + get_obj_req = s3_client.get_object( + Bucket=s3_body_location.get("Bucket"), + Key=s3_body_location.get("Key"), + **get_obj_kwargs, + ) + if etag := s3_body_location.get("ETag"): + if etag != get_obj_req["ETag"]: + # TODO: validate the exception message + raise Exception( + "The ETag provided for the S3BodyLocation does not match the S3 Object" + ) + body = get_obj_req["Body"].read() + + put_kwargs = {} + if import_mode := model.get("Mode"): + put_kwargs["mode"] = import_mode + if fail_on_warnings_mode := model.get("FailOnWarnings"): + put_kwargs["failOnWarnings"] = fail_on_warnings_mode + + api.put_rest_api( + restApiId=result["id"], + body=to_bytes(body), + parameters=import_parameters, + **put_kwargs, + ) + + resources = api.get_resources(restApiId=result["id"])["items"] + for res in resources: + if res["path"] == "/" and not res.get("parentId"): + model["RootResourceId"] = res["id"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayRestApiProperties], + ) -> ProgressEvent[ApiGatewayRestApiProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[ApiGatewayRestApiProperties], + ) -> ProgressEvent[ApiGatewayRestApiProperties]: + # TODO: pagination + resources = request.aws_client_factory.apigateway.get_rest_apis()["items"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + ApiGatewayRestApiProperties(RestApiId=resource["id"], Name=resource["name"]) + for resource in resources + ], + ) + + def delete( + self, + request: ResourceRequest[ApiGatewayRestApiProperties], + ) -> ProgressEvent[ApiGatewayRestApiProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + api = request.aws_client_factory.apigateway + + api.delete_rest_api(restApiId=model["RestApiId"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayRestApiProperties], + ) -> ProgressEvent[ApiGatewayRestApiProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + - apigateway:PATCH + - apigateway:PUT + - apigateway:UpdateRestApiPolicy + - s3:GetObject + - iam:PassRole + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json new file mode 100644 index 0000000000000..73e6f5dda9447 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi.schema.json @@ -0,0 +1,197 @@ +{ + "typeName": "AWS::ApiGateway::RestApi", + "description": "Resource Type definition for AWS::ApiGateway::RestApi.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "additionalProperties": false, + "definitions": { + "EndpointConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Types": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "VpcEndpointIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ] + }, + "S3Location": { + "type": "object", + "additionalProperties": false, + "properties": { + "Bucket": { + "type": "string" + }, + "ETag": { + "type": "string" + }, + "Version": { + "type": "string" + }, + "Key": { + "type": "string" + } + } + } + }, + "properties": { + "RestApiId": { + "type": "string" + }, + "RootResourceId": { + "type": "string" + }, + "ApiKeySourceType": { + "type": "string" + }, + "BinaryMediaTypes": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Body": { + "type": [ + "object", + "string" + ] + }, + "BodyS3Location": { + "$ref": "#/definitions/S3Location" + }, + "CloneFrom": { + "type": "string" + }, + "EndpointConfiguration": { + "$ref": "#/definitions/EndpointConfiguration" + }, + "Description": { + "type": "string" + }, + "DisableExecuteApiEndpoint": { + "type": "boolean" + }, + "FailOnWarnings": { + "type": "boolean" + }, + "Name": { + "type": "string" + }, + "MinimumCompressionSize": { + "type": "integer" + }, + "Mode": { + "type": "string" + }, + "Policy": { + "type": [ + "object", + "string" + ] + }, + "Parameters": { + "type": [ + "object", + "string" + ], + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "primaryIdentifier": [ + "/properties/RestApiId" + ], + "readOnlyProperties": [ + "/properties/RestApiId", + "/properties/RootResourceId" + ], + "writeOnlyProperties": [ + "/properties/Body", + "/properties/BodyS3Location", + "/properties/CloneFrom", + "/properties/FailOnWarnings", + "/properties/Mode", + "/properties/Parameters" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:GET", + "apigateway:POST", + "apigateway:UpdateRestApiPolicy", + "s3:GetObject", + "iam:PassRole" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE", + "apigateway:PATCH", + "apigateway:PUT", + "apigateway:UpdateRestApiPolicy", + "s3:GetObject", + "iam:PassRole" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py new file mode 100644 index 0000000000000..e53c4a4d8205f --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_restapi_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayRestApiProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::RestApi" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_restapi import ( + ApiGatewayRestApiProvider, + ) + + self.factory = ApiGatewayRestApiProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py new file mode 100644 index 0000000000000..b2b98bc715455 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.py @@ -0,0 +1,183 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import copy +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower + + +class ApiGatewayStageProperties(TypedDict): + RestApiId: Optional[str] + AccessLogSetting: Optional[AccessLogSetting] + CacheClusterEnabled: Optional[bool] + CacheClusterSize: Optional[str] + CanarySetting: Optional[CanarySetting] + ClientCertificateId: Optional[str] + DeploymentId: Optional[str] + Description: Optional[str] + DocumentationVersion: Optional[str] + MethodSettings: Optional[list[MethodSetting]] + StageName: Optional[str] + Tags: Optional[list[Tag]] + TracingEnabled: Optional[bool] + Variables: Optional[dict] + + +class AccessLogSetting(TypedDict): + DestinationArn: Optional[str] + Format: Optional[str] + + +class CanarySetting(TypedDict): + DeploymentId: Optional[str] + PercentTraffic: Optional[float] + StageVariableOverrides: Optional[dict] + UseStageCache: Optional[bool] + + +class MethodSetting(TypedDict): + CacheDataEncrypted: Optional[bool] + CacheTtlInSeconds: Optional[int] + CachingEnabled: Optional[bool] + DataTraceEnabled: Optional[bool] + HttpMethod: Optional[str] + LoggingLevel: Optional[str] + MetricsEnabled: Optional[bool] + ResourcePath: Optional[str] + ThrottlingBurstLimit: Optional[int] + ThrottlingRateLimit: Optional[float] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayStageProvider(ResourceProvider[ApiGatewayStageProperties]): + TYPE = "AWS::ApiGateway::Stage" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayStageProperties], + ) -> ProgressEvent[ApiGatewayStageProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RestApiId + - /properties/StageName + + Required properties: + - RestApiId + + Create-only properties: + - /properties/RestApiId + - /properties/StageName + + + + IAM permissions required: + - apigateway:POST + - apigateway:PATCH + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + stage_name = model.get("StageName", "default") + stage_variables = model.get("Variables") + # we need to deep copy as several fields are nested dict and arrays + params = keys_to_lower(copy.deepcopy(model)) + # TODO: add methodSettings + # TODO: add custom CfN tags + param_names = [ + "restApiId", + "deploymentId", + "description", + "cacheClusterEnabled", + "cacheClusterSize", + "documentationVersion", + "canarySettings", + "tracingEnabled", + "tags", + ] + params = util.select_attributes(params, param_names) + params["tags"] = {t["key"]: t["value"] for t in params.get("tags", [])} + params["stageName"] = stage_name + if stage_variables: + params["variables"] = stage_variables + + result = apigw.create_stage(**params) + model["StageName"] = result["stageName"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayStageProperties], + ) -> ProgressEvent[ApiGatewayStageProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayStageProperties], + ) -> ProgressEvent[ApiGatewayStageProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + try: + # we are checking if stage api has already been deleted before calling delete + apigw.get_stage(restApiId=model["RestApiId"], stageName=model["StageName"]) + apigw.delete_stage(restApiId=model["RestApiId"], stageName=model["StageName"]) + except apigw.exceptions.NotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayStageProperties], + ) -> ProgressEvent[ApiGatewayStageProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:PATCH + - apigateway:PUT + - apigateway:DELETE + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json new file mode 100644 index 0000000000000..fe67c2c0c626f --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage.schema.json @@ -0,0 +1,260 @@ +{ + "typeName": "AWS::ApiGateway::Stage", + "description": "Resource Type definition for AWS::ApiGateway::Stage", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "AccessLogSetting": { + "description": "Specifies settings for logging access in this stage.", + "$ref": "#/definitions/AccessLogSetting" + }, + "CacheClusterEnabled": { + "description": "Indicates whether cache clustering is enabled for the stage.", + "type": "boolean" + }, + "CacheClusterSize": { + "description": "The stage's cache cluster size.", + "type": "string" + }, + "CanarySetting": { + "description": "Specifies settings for the canary deployment in this stage.", + "$ref": "#/definitions/CanarySetting" + }, + "ClientCertificateId": { + "description": "The ID of the client certificate that API Gateway uses to call your integration endpoints in the stage. ", + "type": "string" + }, + "DeploymentId": { + "description": "The ID of the deployment that the stage is associated with. This parameter is required to create a stage. ", + "type": "string" + }, + "Description": { + "description": "A description of the stage.", + "type": "string" + }, + "DocumentationVersion": { + "description": "The version ID of the API documentation snapshot.", + "type": "string" + }, + "MethodSettings": { + "description": "Settings for all methods in the stage.", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/MethodSetting" + } + }, + "RestApiId": { + "description": "The ID of the RestApi resource that you're deploying with this stage.", + "type": "string" + }, + "StageName": { + "description": "The name of the stage, which API Gateway uses as the first path segment in the invoked Uniform Resource Identifier (URI).", + "type": "string" + }, + "Tags": { + "description": "An array of arbitrary tags (key-value pairs) to associate with the stage.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "TracingEnabled": { + "description": "Specifies whether active X-Ray tracing is enabled for this stage.", + "type": "boolean" + }, + "Variables": { + "description": "A map (string-to-string map) that defines the stage variables, where the variable name is the key and the variable value is the value.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + } + }, + "definitions": { + "CanarySetting": { + "description": "Specifies settings for the canary deployment in this stage.", + "type": "object", + "additionalProperties": false, + "properties": { + "DeploymentId": { + "description": "The identifier of the deployment that the stage points to.", + "type": "string" + }, + "PercentTraffic": { + "description": "The percentage (0-100) of traffic diverted to a canary deployment.", + "type": "number", + "minimum": 0, + "maximum": 100 + }, + "StageVariableOverrides": { + "description": "Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values.", + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "UseStageCache": { + "description": "Whether the canary deployment uses the stage cache or not.", + "type": "boolean" + } + } + }, + "AccessLogSetting": { + "description": "Specifies settings for logging access in this stage.", + "type": "object", + "additionalProperties": false, + "properties": { + "DestinationArn": { + "description": "The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging.", + "type": "string" + }, + "Format": { + "description": "A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging.", + "type": "string" + } + } + }, + "MethodSetting": { + "description": "Configures settings for all methods in a stage.", + "type": "object", + "additionalProperties": false, + "properties": { + "CacheDataEncrypted": { + "description": "Indicates whether the cached responses are encrypted.", + "type": "boolean" + }, + "CacheTtlInSeconds": { + "description": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.", + "type": "integer" + }, + "CachingEnabled": { + "description": "Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses.", + "type": "boolean" + }, + "DataTraceEnabled": { + "description": "Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs.", + "type": "boolean" + }, + "HttpMethod": { + "description": "The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.", + "type": "string" + }, + "LoggingLevel": { + "description": "The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference.", + "type": "string" + }, + "MetricsEnabled": { + "description": "Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage.", + "type": "boolean" + }, + "ResourcePath": { + "description": "The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods.", + "type": "string" + }, + "ThrottlingBurstLimit": { + "description": "The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "integer", + "minimum": 0 + }, + "ThrottlingRateLimit": { + "description": "The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account.", + "type": "number", + "minimum": 0 + } + } + }, + "Tag": { + "description": "Identify and categorize resources.", + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:.", + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:.", + "type": "string", + "minLength": 0, + "maxLength": 256 + } + }, + "required": [ + "Key", + "Value" + ] + } + }, + "required": [ + "RestApiId" + ], + "createOnlyProperties": [ + "/properties/RestApiId", + "/properties/StageName" + ], + "primaryIdentifier": [ + "/properties/RestApiId", + "/properties/StageName" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:PATCH", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:PATCH", + "apigateway:PUT", + "apigateway:DELETE" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "RestApiId": { + "$ref": "resource-schema.json#/properties/RestApiId" + } + }, + "required": [ + "RestApiId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py new file mode 100644 index 0000000000000..e0898bae2c695 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_stage_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayStageProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::Stage" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_stage import ( + ApiGatewayStageProvider, + ) + + self.factory = ApiGatewayStageProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py new file mode 100644 index 0000000000000..1e10c9badfc3f --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.py @@ -0,0 +1,215 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.aws.arns import get_partition +from localstack.utils.objects import keys_to_lower +from localstack.utils.strings import first_char_to_lower + + +class ApiGatewayUsagePlanProperties(TypedDict): + ApiStages: Optional[list[ApiStage]] + Description: Optional[str] + Id: Optional[str] + Quota: Optional[QuotaSettings] + Tags: Optional[list[Tag]] + Throttle: Optional[ThrottleSettings] + UsagePlanName: Optional[str] + + +class ApiStage(TypedDict): + ApiId: Optional[str] + Stage: Optional[str] + Throttle: Optional[dict] + + +class QuotaSettings(TypedDict): + Limit: Optional[int] + Offset: Optional[int] + Period: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class ThrottleSettings(TypedDict): + BurstLimit: Optional[int] + RateLimit: Optional[float] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayUsagePlanProvider(ResourceProvider[ApiGatewayUsagePlanProperties]): + TYPE = "AWS::ApiGateway::UsagePlan" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayUsagePlanProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Read-only properties: + - /properties/Id + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + plan_name = model.get("UsagePlanName") + if not plan_name: + model["UsagePlanName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + params = util.select_attributes(model, ["Description", "ApiStages", "Quota", "Throttle"]) + params = keys_to_lower(params.copy()) + params["name"] = model["UsagePlanName"] + + if model.get("Tags"): + params["tags"] = {tag["Key"]: tag["Value"] for tag in model["Tags"]} + + # set int and float types + if params.get("quota"): + params["quota"]["limit"] = int(params["quota"]["limit"]) + + if params.get("throttle"): + params["throttle"]["burstLimit"] = int(params["throttle"]["burstLimit"]) + params["throttle"]["rateLimit"] = float(params["throttle"]["rateLimit"]) + + response = apigw.create_usage_plan(**params) + + model["Id"] = response["id"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayUsagePlanProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayUsagePlanProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + apigw.delete_usage_plan(usagePlanId=model["Id"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayUsagePlanProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanProperties]: + """ + Update a resource + + IAM permissions required: + - apigateway:GET + - apigateway:DELETE + - apigateway:PATCH + - apigateway:PUT + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + parameters_to_select = [ + "UsagePlanName", + "Description", + "ApiStages", + "Quota", + "Throttle", + "Tags", + ] + update_config_props = util.select_attributes(model, parameters_to_select) + + updated_tags = update_config_props.pop("Tags", []) + + usage_plan_id = request.previous_state["Id"] + + patch_operations = [] + + for parameter in update_config_props: + value = update_config_props[parameter] + if parameter == "ApiStages": + for stage in value: + patch_operations.append( + { + "op": "replace", + "path": f"/{first_char_to_lower(parameter)}", + "value": f"{stage['ApiId']}:{stage['Stage']}", + } + ) + + if "Throttle" in stage: + patch_operations.append( + { + "op": "replace", + "path": f"/{first_char_to_lower(parameter)}/{stage['ApiId']}:{stage['Stage']}", + "value": json.dumps(stage["Throttle"]), + } + ) + + elif isinstance(value, dict): + for item in value: + last_value = value[item] + path = f"/{first_char_to_lower(parameter)}/{first_char_to_lower(item)}" + patch_operations.append({"op": "replace", "path": path, "value": last_value}) + else: + patch_operations.append( + {"op": "replace", "path": f"/{first_char_to_lower(parameter)}", "value": value} + ) + apigw.update_usage_plan(usagePlanId=usage_plan_id, patchOperations=patch_operations) + + if updated_tags: + tags = {tag["Key"]: tag["Value"] for tag in updated_tags} + usage_plan_arn = f"arn:{get_partition(request.region_name)}:apigateway:{request.region_name}::/usageplans/{usage_plan_id}" + apigw.tag_resource(resourceArn=usage_plan_arn, tags=tags) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={**request.previous_state, **request.desired_state}, + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json new file mode 100644 index 0000000000000..96f6f07bb01ca --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan.schema.json @@ -0,0 +1,173 @@ +{ + "typeName": "AWS::ApiGateway::UsagePlan", + "description": "Resource Type definition for AWS::ApiGateway::UsagePlan", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway.git", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string", + "description": "The provider-assigned unique ID for this managed resource." + }, + "ApiStages": { + "type": "array", + "description": "The API stages to associate with this usage plan.", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/ApiStage" + } + }, + "Description": { + "type": "string", + "description": "A description of the usage plan." + }, + "Quota": { + "$ref": "#/definitions/QuotaSettings", + "description": "Configures the number of requests that users can make within a given interval." + }, + "Tags": { + "type": "array", + "description": "An array of arbitrary tags (key-value pairs) to associate with the usage plan.", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Throttle": { + "$ref": "#/definitions/ThrottleSettings", + "description": "Configures the overall request rate (average requests per second) and burst capacity." + }, + "UsagePlanName": { + "type": "string", + "description": "A name for the usage plan." + } + }, + "definitions": { + "ApiStage": { + "type": "object", + "additionalProperties": false, + "properties": { + "ApiId": { + "type": "string", + "description": "The ID of an API that is in the specified Stage property that you want to associate with the usage plan." + }, + "Stage": { + "type": "string", + "description": "The name of the stage to associate with the usage plan." + }, + "Throttle": { + "type": "object", + "description": "Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed.", + "additionalProperties": false, + "patternProperties": { + ".*": { + "$ref": "#/definitions/ThrottleSettings" + } + } + } + } + }, + "ThrottleSettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "BurstLimit": { + "type": "integer", + "minimum": 0, + "description": "The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity." + }, + "RateLimit": { + "type": "number", + "minimum": 0, + "description": "The API request steady-state rate limit (average requests per second over an extended period of time)." + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128, + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + }, + "Value": { + "type": "string", + "minLength": 0, + "maxLength": 256, + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + } + }, + "required": [ + "Value", + "Key" + ] + }, + "QuotaSettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "Limit": { + "type": "integer", + "minimum": 0, + "description": "The maximum number of requests that users can make within the specified time period." + }, + "Offset": { + "type": "integer", + "minimum": 0, + "description": "For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period." + }, + "Period": { + "type": "string", + "description": "The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference." + } + } + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "update": { + "permissions": [ + "apigateway:GET", + "apigateway:DELETE", + "apigateway:PATCH", + "apigateway:PUT" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py new file mode 100644 index 0000000000000..154207ac69b58 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplan_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayUsagePlanProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::UsagePlan" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_usageplan import ( + ApiGatewayUsagePlanProvider, + ) + + self.factory = ApiGatewayUsagePlanProvider diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py new file mode 100644 index 0000000000000..33a6e155d5c4f --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.py @@ -0,0 +1,114 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.objects import keys_to_lower + + +class ApiGatewayUsagePlanKeyProperties(TypedDict): + KeyId: Optional[str] + KeyType: Optional[str] + UsagePlanId: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ApiGatewayUsagePlanKeyProvider(ResourceProvider[ApiGatewayUsagePlanKeyProperties]): + TYPE = "AWS::ApiGateway::UsagePlanKey" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ApiGatewayUsagePlanKeyProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - KeyType + - UsagePlanId + - KeyId + + Create-only properties: + - /properties/KeyId + - /properties/UsagePlanId + - /properties/KeyType + + Read-only properties: + - /properties/Id + + IAM permissions required: + - apigateway:POST + - apigateway:GET + + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + params = keys_to_lower(model.copy()) + result = apigw.create_usage_plan_key(**params) + + model["Id"] = result["id"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ApiGatewayUsagePlanKeyProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]: + """ + Fetch resource information + + IAM permissions required: + - apigateway:GET + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ApiGatewayUsagePlanKeyProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]: + """ + Delete a resource + + IAM permissions required: + - apigateway:DELETE + """ + model = request.desired_state + apigw = request.aws_client_factory.apigateway + + apigw.delete_usage_plan_key(usagePlanId=model["UsagePlanId"], keyId=model["KeyId"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ApiGatewayUsagePlanKeyProperties], + ) -> ProgressEvent[ApiGatewayUsagePlanKeyProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json new file mode 100644 index 0000000000000..997f3be9a0d49 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey.schema.json @@ -0,0 +1,77 @@ +{ + "typeName": "AWS::ApiGateway::UsagePlanKey", + "description": "Resource Type definition for AWS::ApiGateway::UsagePlanKey", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-apigateway", + "additionalProperties": false, + "properties": { + "KeyId": { + "description": "The ID of the usage plan key.", + "type": "string" + }, + "KeyType": { + "description": "The type of usage plan key. Currently, the only valid key type is API_KEY.", + "type": "string", + "enum": [ + "API_KEY" + ] + }, + "UsagePlanId": { + "description": "The ID of the usage plan.", + "type": "string" + }, + "Id": { + "description": "An autogenerated ID which is a combination of the ID of the key and ID of the usage plan combined with a : such as 123abcdef:abc123.", + "type": "string" + } + }, + "taggable": false, + "required": [ + "KeyType", + "UsagePlanId", + "KeyId" + ], + "createOnlyProperties": [ + "/properties/KeyId", + "/properties/UsagePlanId", + "/properties/KeyType" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "apigateway:POST", + "apigateway:GET" + ] + }, + "read": { + "permissions": [ + "apigateway:GET" + ] + }, + "delete": { + "permissions": [ + "apigateway:DELETE" + ] + }, + "list": { + "handlerSchema": { + "properties": { + "UsagePlanId": { + "$ref": "resource-schema.json#/properties/UsagePlanId" + } + }, + "required": [ + "UsagePlanId" + ] + }, + "permissions": [ + "apigateway:GET" + ] + } + } +} diff --git a/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py new file mode 100644 index 0000000000000..eb21b610bfc22 --- /dev/null +++ b/localstack-core/localstack/services/apigateway/resource_providers/aws_apigateway_usageplankey_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ApiGatewayUsagePlanKeyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ApiGateway::UsagePlanKey" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.apigateway.resource_providers.aws_apigateway_usageplankey import ( + ApiGatewayUsagePlanKeyProvider, + ) + + self.factory = ApiGatewayUsagePlanKeyProvider diff --git a/localstack/services/firehose/__init__.py b/localstack-core/localstack/services/cdk/__init__.py similarity index 100% rename from localstack/services/firehose/__init__.py rename to localstack-core/localstack/services/cdk/__init__.py diff --git a/localstack/services/iam/__init__.py b/localstack-core/localstack/services/cdk/resource_providers/__init__.py similarity index 100% rename from localstack/services/iam/__init__.py rename to localstack-core/localstack/services/cdk/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py new file mode 100644 index 0000000000000..7e5eb5ca2f988 --- /dev/null +++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.py @@ -0,0 +1,90 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CDKMetadataProperties(TypedDict): + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CDKMetadataProvider(ResourceProvider[CDKMetadataProperties]): + TYPE = "AWS::CDK::Metadata" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CDKMetadataProperties], + ) -> ProgressEvent[CDKMetadataProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + """ + model = request.desired_state + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[CDKMetadataProperties], + ) -> ProgressEvent[CDKMetadataProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CDKMetadataProperties], + ) -> ProgressEvent[CDKMetadataProperties]: + """ + Delete a resource + + + """ + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=request.previous_state, + ) + + def update( + self, + request: ResourceRequest[CDKMetadataProperties], + ) -> ProgressEvent[CDKMetadataProperties]: + """ + Update a resource + + + """ + model = request.desired_state + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json new file mode 100644 index 0000000000000..636fc68e2e9c0 --- /dev/null +++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata.schema.json @@ -0,0 +1,22 @@ +{ + "typeName": "AWS::CDK::Metadata" , + "description": "Resource Type definition for AWS::CDK::Metadata", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + } + }, + "definitions": { + }, + "required": [ + ], + "createOnlyProperties": [ + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py new file mode 100644 index 0000000000000..924ca3cb79eae --- /dev/null +++ b/localstack-core/localstack/services/cdk/resource_providers/cdk_metadata_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaAliasProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CDK::Metadata" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cdk.resource_providers.cdk_metadata import CDKMetadataProvider + + self.factory = CDKMetadataProvider diff --git a/localstack/services/kinesis/__init__.py b/localstack-core/localstack/services/certificatemanager/__init__.py similarity index 100% rename from localstack/services/kinesis/__init__.py rename to localstack-core/localstack/services/certificatemanager/__init__.py diff --git a/localstack/services/kms/__init__.py b/localstack-core/localstack/services/certificatemanager/resource_providers/__init__.py similarity index 100% rename from localstack/services/kms/__init__.py rename to localstack-core/localstack/services/certificatemanager/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py new file mode 100644 index 0000000000000..d79d62975e87f --- /dev/null +++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.py @@ -0,0 +1,151 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CertificateManagerCertificateProperties(TypedDict): + DomainName: Optional[str] + CertificateAuthorityArn: Optional[str] + CertificateTransparencyLoggingPreference: Optional[str] + DomainValidationOptions: Optional[list[DomainValidationOption]] + Id: Optional[str] + SubjectAlternativeNames: Optional[list[str]] + Tags: Optional[list[Tag]] + ValidationMethod: Optional[str] + + +class DomainValidationOption(TypedDict): + DomainName: Optional[str] + HostedZoneId: Optional[str] + ValidationDomain: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CertificateManagerCertificateProvider( + ResourceProvider[CertificateManagerCertificateProperties] +): + TYPE = "AWS::CertificateManager::Certificate" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CertificateManagerCertificateProperties], + ) -> ProgressEvent[CertificateManagerCertificateProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - DomainName + + Create-only properties: + - /properties/SubjectAlternativeNames + - /properties/DomainValidationOptions + - /properties/ValidationMethod + - /properties/DomainName + - /properties/CertificateAuthorityArn + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + acm = request.aws_client_factory.acm + + params = util.select_attributes( + model, + [ + "CertificateAuthorityArn", + "DomainName", + "DomainValidationOptions", + "SubjectAlternativeNames", + "Tags", + "ValidationMethod", + ], + ) + # adjust domain validation options + valid_opts = params.get("DomainValidationOptions") + if valid_opts: + + def _convert(opt): + res = util.select_attributes(opt, ["DomainName", "ValidationDomain"]) + res.setdefault("ValidationDomain", res["DomainName"]) + return res + + params["DomainValidationOptions"] = [_convert(opt) for opt in valid_opts] + + # adjust logging preferences + logging_pref = params.get("CertificateTransparencyLoggingPreference") + if logging_pref: + params["Options"] = {"CertificateTransparencyLoggingPreference": logging_pref} + + response = acm.request_certificate(**params) + model["Id"] = response["CertificateArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[CertificateManagerCertificateProperties], + ) -> ProgressEvent[CertificateManagerCertificateProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CertificateManagerCertificateProperties], + ) -> ProgressEvent[CertificateManagerCertificateProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + acm = request.aws_client_factory.acm + + acm.delete_certificate(CertificateArn=model["Id"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[CertificateManagerCertificateProperties], + ) -> ProgressEvent[CertificateManagerCertificateProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json new file mode 100644 index 0000000000000..a4d90a42f0839 --- /dev/null +++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate.schema.json @@ -0,0 +1,95 @@ +{ + "typeName": "AWS::CertificateManager::Certificate", + "description": "Resource Type definition for AWS::CertificateManager::Certificate", + "additionalProperties": false, + "properties": { + "CertificateAuthorityArn": { + "type": "string" + }, + "DomainValidationOptions": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/DomainValidationOption" + } + }, + "CertificateTransparencyLoggingPreference": { + "type": "string" + }, + "DomainName": { + "type": "string" + }, + "ValidationMethod": { + "type": "string" + }, + "SubjectAlternativeNames": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Id": { + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "DomainValidationOption": { + "type": "object", + "additionalProperties": false, + "properties": { + "DomainName": { + "type": "string" + }, + "ValidationDomain": { + "type": "string" + }, + "HostedZoneId": { + "type": "string" + } + }, + "required": [ + "DomainName" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "DomainName" + ], + "createOnlyProperties": [ + "/properties/SubjectAlternativeNames", + "/properties/DomainValidationOptions", + "/properties/ValidationMethod", + "/properties/DomainName", + "/properties/CertificateAuthorityArn" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py new file mode 100644 index 0000000000000..5aae4de01c7b3 --- /dev/null +++ b/localstack-core/localstack/services/certificatemanager/resource_providers/aws_certificatemanager_certificate_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CertificateManagerCertificateProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CertificateManager::Certificate" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.certificatemanager.resource_providers.aws_certificatemanager_certificate import ( + CertificateManagerCertificateProvider, + ) + + self.factory = CertificateManagerCertificateProvider diff --git a/localstack/services/logs/__init__.py b/localstack-core/localstack/services/cloudformation/__init__.py similarity index 100% rename from localstack/services/logs/__init__.py rename to localstack-core/localstack/services/cloudformation/__init__.py diff --git a/localstack-core/localstack/services/cloudformation/api_utils.py b/localstack-core/localstack/services/cloudformation/api_utils.py new file mode 100644 index 0000000000000..c4172974cec35 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/api_utils.py @@ -0,0 +1,162 @@ +import logging +import re +from urllib.parse import urlparse + +from localstack import config, constants +from localstack.aws.connect import connect_to +from localstack.services.cloudformation.engine.validations import ValidationError +from localstack.services.s3.utils import ( + extract_bucket_name_and_key_from_headers_and_path, + normalize_bucket_name, +) +from localstack.utils.functions import run_safe +from localstack.utils.http import safe_requests +from localstack.utils.strings import to_str +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + + +def prepare_template_body(req_data: dict) -> str | bytes | None: # TODO: mutating and returning + template_url = req_data.get("TemplateURL") + if template_url: + req_data["TemplateURL"] = convert_s3_to_local_url(template_url) + url = req_data.get("TemplateURL", "") + if is_local_service_url(url): + modified_template_body = get_template_body(req_data) + if modified_template_body: + req_data.pop("TemplateURL", None) + req_data["TemplateBody"] = modified_template_body + modified_template_body = get_template_body(req_data) + if modified_template_body: + req_data["TemplateBody"] = modified_template_body + return modified_template_body + + +def extract_template_body(request: dict) -> str: + """ + Given a request payload, fetch the body of the template either from S3 or from the payload itself + """ + if template_body := request.get("TemplateBody"): + if request.get("TemplateURL"): + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + return template_body + + elif template_url := request.get("TemplateURL"): + template_url = convert_s3_to_local_url(template_url) + return get_remote_template_body(template_url) + + else: + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + +def get_remote_template_body(url: str) -> str: + response = run_safe(lambda: safe_requests.get(url, verify=False)) + # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884 + status_code = 0 if response is None else response.status_code + if 200 <= status_code < 300: + # request was ok + return response.text + elif response is None or status_code == 301 or status_code >= 400: + # check if this is an S3 URL, then get the file directly from there + url = convert_s3_to_local_url(url) + if is_local_service_url(url): + parsed_path = urlparse(url).path.lstrip("/") + parts = parsed_path.partition("/") + client = connect_to().s3 + LOG.debug( + "Download CloudFormation template content from local S3: %s - %s", + parts[0], + parts[2], + ) + result = client.get_object(Bucket=parts[0], Key=parts[2]) + body = to_str(result["Body"].read()) + return body + raise RuntimeError( + "Unable to fetch template body (code %s) from URL %s" % (status_code, url) + ) + else: + raise RuntimeError( + f"Bad status code from fetching template from url '{url}' ({status_code})", + url, + status_code, + ) + + +def get_template_body(req_data: dict) -> str: + body = req_data.get("TemplateBody") + if body: + return body + url = req_data.get("TemplateURL") + if url: + response = run_safe(lambda: safe_requests.get(url, verify=False)) + # check error codes, and code 301 - fixes https://github.com/localstack/localstack/issues/1884 + status_code = 0 if response is None else response.status_code + if response is None or status_code == 301 or status_code >= 400: + # check if this is an S3 URL, then get the file directly from there + url = convert_s3_to_local_url(url) + if is_local_service_url(url): + parsed_path = urlparse(url).path.lstrip("/") + parts = parsed_path.partition("/") + client = connect_to().s3 + LOG.debug( + "Download CloudFormation template content from local S3: %s - %s", + parts[0], + parts[2], + ) + result = client.get_object(Bucket=parts[0], Key=parts[2]) + body = to_str(result["Body"].read()) + return body + raise Exception( + "Unable to fetch template body (code %s) from URL %s" % (status_code, url) + ) + return to_str(response.content) + raise Exception("Unable to get template body from input: %s" % req_data) + + +def is_local_service_url(url: str) -> bool: + if not url: + return False + candidates = ( + constants.LOCALHOST, + constants.LOCALHOST_HOSTNAME, + localstack_host().host, + ) + if any(re.match(r"^[^:]+://[^:/]*%s([:/]|$)" % host, url) for host in candidates): + return True + host = url.split("://")[-1].split("/")[0] + return "localhost" in host + + +def convert_s3_to_local_url(url: str) -> str: + from localstack.services.cloudformation.provider import ValidationError + + url_parsed = urlparse(url) + path = url_parsed.path + + headers = {"host": url_parsed.netloc} + bucket_name, key_name = extract_bucket_name_and_key_from_headers_and_path(headers, path) + + if url_parsed.scheme == "s3": + raise ValidationError( + f"S3 error: Domain name specified in {url_parsed.netloc} is not a valid S3 domain" + ) + + if not bucket_name or not key_name: + if not (url_parsed.netloc.startswith("s3.") or ".s3." in url_parsed.netloc): + raise ValidationError("TemplateURL must be a supported URL.") + + # note: make sure to normalize the bucket name here! + bucket_name = normalize_bucket_name(bucket_name) + local_url = f"{config.internal_service_url()}/{bucket_name}/{key_name}" + return local_url + + +def validate_stack_name(stack_name): + pattern = r"[a-zA-Z][-a-zA-Z0-9]*|arn:[-a-zA-Z0-9:/._+]*" + return re.match(pattern, stack_name) is not None diff --git a/localstack-core/localstack/services/cloudformation/cfn_utils.py b/localstack-core/localstack/services/cloudformation/cfn_utils.py new file mode 100644 index 0000000000000..6fcc5d16fb573 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/cfn_utils.py @@ -0,0 +1,84 @@ +import json +from typing import Callable + +from localstack.utils.objects import recurse_object + + +def rename_params(func, rename_map): + def do_rename(account_id, region_name, params, logical_resource_id, *args, **kwargs): + values = ( + func(account_id, region_name, params, logical_resource_id, *args, **kwargs) + if func + else params + ) + for old_param, new_param in rename_map.items(): + values[new_param] = values.pop(old_param, None) + return values + + return do_rename + + +def lambda_convert_types(func, types): + return ( + lambda account_id, region_name, params, logical_resource_id, *args, **kwargs: convert_types( + func(account_id, region_name, params, *args, **kwargs), types + ) + ) + + +def lambda_to_json(attr): + return lambda account_id, region_name, params, logical_resource_id, *args, **kwargs: json.dumps( + params[attr] + ) + + +def lambda_rename_attributes(attrs, func=None): + def recurse(o, path): + if isinstance(o, dict): + for k in list(o.keys()): + for a in attrs.keys(): + if k == a: + o[attrs[k]] = o.pop(k) + return o + + func = func or (lambda account_id, region_name, x, logical_resource_id, *args, **kwargs: x) + return ( + lambda account_id, + region_name, + params, + logical_resource_id, + *args, + **kwargs: recurse_object( + func(account_id, region_name, params, logical_resource_id, *args, **kwargs), recurse + ) + ) + + +def convert_types(obj, types): + def fix_types(key, type_class): + def recurse(o, path): + if isinstance(o, dict): + for k, v in dict(o).items(): + key_path = "%s%s" % (path or ".", k) + if key in [k, key_path]: + o[k] = type_class(v) + return o + + return recurse_object(obj, recurse) + + for key, type_class in types.items(): + fix_types(key, type_class) + return obj + + +def get_tags_param(resource_type: str) -> Callable: + """Return a tag parameters creation function for the given resource type""" + + def _param(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + tags = params.get("Tags") + if not tags: + return None + + return [{"ResourceType": resource_type, "Tags": tags}] + + return _param diff --git a/localstack/services/cloudformation/deploy.html b/localstack-core/localstack/services/cloudformation/deploy.html similarity index 100% rename from localstack/services/cloudformation/deploy.html rename to localstack-core/localstack/services/cloudformation/deploy.html diff --git a/localstack-core/localstack/services/cloudformation/deploy_ui.py b/localstack-core/localstack/services/cloudformation/deploy_ui.py new file mode 100644 index 0000000000000..deac95b408b1f --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/deploy_ui.py @@ -0,0 +1,47 @@ +import json +import logging +import os + +import requests +from rolo import Response + +from localstack import constants +from localstack.utils.files import load_file +from localstack.utils.json import parse_json_or_yaml + +LOG = logging.getLogger(__name__) + + +class CloudFormationUi: + def on_get(self, request): + from localstack.utils.aws.aws_stack import get_valid_regions + + deploy_html_file = os.path.join( + constants.MODULE_MAIN_PATH, "services", "cloudformation", "deploy.html" + ) + deploy_html = load_file(deploy_html_file) + req_params = request.values + params = { + "stackName": "stack1", + "templateBody": "{}", + "errorMessage": "''", + "regions": json.dumps(sorted(get_valid_regions())), + } + + download_url = req_params.get("templateURL") + if download_url: + try: + LOG.debug("Attempting to download CloudFormation template URL: %s", download_url) + template_body = requests.get(download_url).text + template_body = parse_json_or_yaml(template_body) + params["templateBody"] = json.dumps(template_body) + except Exception as e: + msg = f"Unable to download CloudFormation template URL: {e}" + LOG.info(msg) + params["errorMessage"] = json.dumps(msg.replace("\n", " - ")) + + # using simple string replacement here, for simplicity (could be replaced with, e.g., jinja) + for key, value in params.items(): + deploy_html = deploy_html.replace(f"<{key}>", value) + + return Response(deploy_html, mimetype="text/html") diff --git a/localstack-core/localstack/services/cloudformation/deployment_utils.py b/localstack-core/localstack/services/cloudformation/deployment_utils.py new file mode 100644 index 0000000000000..6355db6b5c27a --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/deployment_utils.py @@ -0,0 +1,319 @@ +import builtins +import json +import logging +import re +from copy import deepcopy +from typing import Callable, List + +from localstack import config +from localstack.utils import common +from localstack.utils.aws import aws_stack +from localstack.utils.common import select_attributes, short_uid +from localstack.utils.functions import run_safe +from localstack.utils.json import json_safe +from localstack.utils.objects import recurse_object +from localstack.utils.strings import is_string + +# placeholders +PLACEHOLDER_AWS_NO_VALUE = "__aws_no_value__" + +LOG = logging.getLogger(__name__) + + +def dump_json_params(param_func=None, *param_names): + def replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + result = ( + param_func(account_id, region_name, params, logical_resource_id, *args, **kwargs) + if param_func + else params + ) + for name in param_names: + if isinstance(result.get(name), (dict, list)): + # Fix for https://github.com/localstack/localstack/issues/2022 + # Convert any date instances to date strings, etc, Version: "2012-10-17" + param_value = common.json_safe(result[name]) + result[name] = json.dumps(param_value) + return result + + return replace + + +# TODO: remove +def param_defaults(param_func, defaults): + def replace( + account_id: str, + region_name: str, + properties: dict, + logical_resource_id: str, + *args, + **kwargs, + ): + result = param_func( + account_id, region_name, properties, logical_resource_id, *args, **kwargs + ) + for key, value in defaults.items(): + if result.get(key) in ["", None]: + result[key] = value + return result + + return replace + + +def remove_none_values(params): + """Remove None values and AWS::NoValue placeholders (recursively) in the given object.""" + + def remove_nones(o, **kwargs): + if isinstance(o, dict): + for k, v in dict(o).items(): + if v in [None, PLACEHOLDER_AWS_NO_VALUE]: + o.pop(k) + if isinstance(o, list): + common.run_safe(o.remove, None) + common.run_safe(o.remove, PLACEHOLDER_AWS_NO_VALUE) + return o + + result = common.recurse_object(params, remove_nones) + return result + + +def params_list_to_dict(param_name, key_attr_name="Key", value_attr_name="Value"): + def do_replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + result = {} + for entry in params.get(param_name, []): + key = entry[key_attr_name] + value = entry[value_attr_name] + result[key] = value + return result + + return do_replace + + +def lambda_keys_to_lower(key=None, skip_children_of: List[str] = None): + return ( + lambda account_id, + region_name, + params, + logical_resource_id, + *args, + **kwargs: common.keys_to_lower( + obj=(params.get(key) if key else params), skip_children_of=skip_children_of + ) + ) + + +def merge_parameters(func1, func2): + return ( + lambda account_id, + region_name, + properties, + logical_resource_id, + *args, + **kwargs: common.merge_dicts( + func1(account_id, region_name, properties, logical_resource_id, *args, **kwargs), + func2(account_id, region_name, properties, logical_resource_id, *args, **kwargs), + ) + ) + + +def str_or_none(o): + return o if o is None else json.dumps(o) if isinstance(o, (dict, list)) else str(o) + + +def params_dict_to_list(param_name, key_attr_name="Key", value_attr_name="Value", wrapper=None): + def do_replace(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + result = [] + for key, value in params.get(param_name, {}).items(): + result.append({key_attr_name: key, value_attr_name: value}) + if wrapper: + result = {wrapper: result} + return result + + return do_replace + + +# TODO: remove +def params_select_attributes(*attrs): + def do_select(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + result = {} + for attr in attrs: + if params.get(attr) is not None: + result[attr] = str_or_none(params.get(attr)) + return result + + return do_select + + +def param_json_to_str(name): + def _convert(account_id: str, region_name: str, params, logical_resource_id, *args, **kwargs): + result = params.get(name) + if result: + result = json.dumps(result) + return result + + return _convert + + +def lambda_select_params(*selected): + # TODO: remove and merge with function below + return select_parameters(*selected) + + +def select_parameters(*param_names): + return ( + lambda account_id, + region_name, + properties, + logical_resource_id, + *args, + **kwargs: select_attributes(properties, param_names) + ) + + +def is_none_or_empty_value(value): + return not value or value == PLACEHOLDER_AWS_NO_VALUE + + +def generate_default_name(stack_name: str, logical_resource_id: str): + random_id_part = short_uid() + resource_id_part = logical_resource_id[:24] + stack_name_part = stack_name[: 63 - 2 - (len(random_id_part) + len(resource_id_part))] + return f"{stack_name_part}-{resource_id_part}-{random_id_part}" + + +def generate_default_name_without_stack(logical_resource_id: str): + random_id_part = short_uid() + resource_id_part = logical_resource_id[: 63 - 1 - len(random_id_part)] + return f"{resource_id_part}-{random_id_part}" + + +# Utils for parameter conversion + +# TODO: handling of multiple valid types +param_validation = re.compile( + r"Invalid type for parameter (?P[\w.]+), value: (?P\w+), type: \w+)'>, valid types: \w+)'>" +) + + +def get_nested(obj: dict, path: str): + parts = path.split(".") + result = obj + for p in parts[:-1]: + result = result.get(p, {}) + return result.get(parts[-1]) + + +def set_nested(obj: dict, path: str, value): + parts = path.split(".") + result = obj + for p in parts[:-1]: + result = result.get(p, {}) + result[parts[-1]] = value + + +def fix_boto_parameters_based_on_report(original_params: dict, report: str) -> dict: + """ + Fix invalid type parameter validation errors in boto request parameters + + :param original_params: original boto request parameters that lead to the parameter validation error + :param report: error report from botocore ParamValidator + :return: a copy of original_params with all values replaced by their correctly cast ones + """ + params = deepcopy(original_params) + for found in param_validation.findall(report): + param_name, value, wrong_class, valid_class = found + cast_class = getattr(builtins, valid_class) + old_value = get_nested(params, param_name) + + if cast_class == bool and str(old_value).lower() in ["true", "false"]: + new_value = str(old_value).lower() == "true" + else: + new_value = cast_class(old_value) + set_nested(params, param_name, new_value) + return params + + +def fix_account_id_in_arns(params: dict, replacement_account_id: str) -> dict: + def fix_ids(o, **kwargs): + if isinstance(o, dict): + for k, v in o.items(): + if is_string(v, exclude_binary=True): + o[k] = aws_stack.fix_account_id_in_arns(v, replacement=replacement_account_id) + elif is_string(o, exclude_binary=True): + o = aws_stack.fix_account_id_in_arns(o, replacement=replacement_account_id) + return o + + result = recurse_object(params, fix_ids) + return result + + +def convert_data_types(type_conversions: dict[str, Callable], params: dict) -> dict: + """Convert data types in the "params" object, with the type defs + specified in the 'types' attribute of "func_details".""" + attr_names = type_conversions.keys() or [] + + def cast(_obj, _type): + if _type == bool: + return _obj in ["True", "true", True] + if _type == str: + if isinstance(_obj, bool): + return str(_obj).lower() + return str(_obj) + if _type in (int, float): + return _type(_obj) + return _obj + + def fix_types(o, **kwargs): + if isinstance(o, dict): + for k, v in o.items(): + if k in attr_names: + o[k] = cast(v, type_conversions[k]) + return o + + result = recurse_object(params, fix_types) + return result + + +def log_not_available_message(resource_type: str, message: str): + LOG.warning( + "%s. To find out if %s is supported in LocalStack Pro, " + "please check out our docs at https://docs.localstack.cloud/user-guide/aws/cloudformation/#resources-pro--enterprise-edition", + message, + resource_type, + ) + + +def dump_resource_as_json(resource: dict) -> str: + return str(run_safe(lambda: json.dumps(json_safe(resource))) or resource) + + +def get_action_name_for_resource_change(res_change: str) -> str: + return {"Add": "CREATE", "Remove": "DELETE", "Modify": "UPDATE"}.get(res_change) + + +def check_not_found_exception(e, resource_type, resource, resource_status=None): + # we expect this to be a "not found" exception + markers = [ + "NoSuchBucket", + "ResourceNotFound", + "NoSuchEntity", + "NotFoundException", + "404", + "not found", + "not exist", + ] + + markers_hit = [m for m in markers if m in str(e)] + if not markers_hit: + LOG.warning( + "Unexpected error processing resource type %s: Exception: %s - %s - status: %s", + resource_type, + str(e), + resource, + resource_status, + ) + if config.CFN_VERBOSE_ERRORS: + raise e + else: + return False + + return True diff --git a/localstack/services/opensearch/__init__.py b/localstack-core/localstack/services/cloudformation/engine/__init__.py similarity index 100% rename from localstack/services/opensearch/__init__.py rename to localstack-core/localstack/services/cloudformation/engine/__init__.py diff --git a/localstack-core/localstack/services/cloudformation/engine/changes.py b/localstack-core/localstack/services/cloudformation/engine/changes.py new file mode 100644 index 0000000000000..ae6ced9e5563e --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/changes.py @@ -0,0 +1,18 @@ +from typing import Literal, Optional, TypedDict + +Action = str + + +class ResourceChange(TypedDict): + Action: Action + LogicalResourceId: str + PhysicalResourceId: Optional[str] + ResourceType: str + Scope: list + Details: list + Replacement: Optional[Literal["False"]] + + +class ChangeConfig(TypedDict): + Type: str + ResourceChange: ResourceChange diff --git a/localstack-core/localstack/services/cloudformation/engine/entities.py b/localstack-core/localstack/services/cloudformation/engine/entities.py new file mode 100644 index 0000000000000..d9f07f0281e0b --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/entities.py @@ -0,0 +1,438 @@ +import logging +from typing import Optional, TypedDict + +from localstack.aws.api.cloudformation import Capability, ChangeSetType, Parameter +from localstack.services.cloudformation.engine.parameters import ( + StackParameter, + convert_stack_parameters_to_list, + mask_no_echo, + strip_parameter_type, +) +from localstack.services.cloudformation.engine.v2.change_set_model import ( + ChangeSetModel, + NodeTemplate, +) +from localstack.utils.aws import arns +from localstack.utils.collections import select_attributes +from localstack.utils.id_generator import ExistingIds, ResourceIdentifier, Tags, generate_short_uid +from localstack.utils.json import clone_safe +from localstack.utils.objects import recurse_object +from localstack.utils.strings import long_uid, short_uid +from localstack.utils.time import timestamp_millis + +LOG = logging.getLogger(__name__) + + +class StackSet: + """A stack set contains multiple stack instances.""" + + # FIXME: confusing name. metadata is the complete incoming request object + def __init__(self, metadata: dict): + self.metadata = metadata + # list of stack instances + self.stack_instances = [] + # maps operation ID to stack set operation details + self.operations = {} + + @property + def stack_set_name(self): + return self.metadata.get("StackSetName") + + +class StackInstance: + """A stack instance belongs to a stack set and is specific to a region / account ID.""" + + # FIXME: confusing name. metadata is the complete incoming request object + def __init__(self, metadata: dict): + self.metadata = metadata + # reference to the deployed stack belonging to this stack instance + self.stack = None + + +class CreateChangeSetInput(TypedDict): + StackName: str + Capabilities: list[Capability] + ChangeSetName: Optional[str] + ChangSetType: Optional[ChangeSetType] + Parameters: list[Parameter] + + +class StackTemplate(TypedDict): + StackName: str + ChangeSetName: Optional[str] + Outputs: dict + Resources: dict + + +class StackIdentifier(ResourceIdentifier): + service = "cloudformation" + resource = "stack" + + def __init__(self, account_id: str, region: str, stack_name: str): + super().__init__(account_id, region, stack_name) + + def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str: + return generate_short_uid(resource_identifier=self, existing_ids=existing_ids, tags=tags) + + +# TODO: remove metadata (flatten into individual fields) +class Stack: + change_sets: list["StackChangeSet"] + + def __init__( + self, + account_id: str, + region_name: str, + metadata: Optional[CreateChangeSetInput] = None, + template: Optional[StackTemplate] = None, + template_body: Optional[str] = None, + ): + self.account_id = account_id + self.region_name = region_name + + if template is None: + template = {} + + self.resolved_outputs = list() # TODO + self.resolved_parameters: dict[str, StackParameter] = {} + self.resolved_conditions: dict[str, bool] = {} + + self.metadata = metadata or {} + self.template = template or {} + self.template_body = template_body + self._template_raw = clone_safe(self.template) + self.template_original = clone_safe(self.template) + # initialize resources + for resource_id, resource in self.template_resources.items(): + resource["LogicalResourceId"] = self.template_original["Resources"][resource_id][ + "LogicalResourceId" + ] = resource.get("LogicalResourceId") or resource_id + # initialize stack template attributes + stack_id = self.metadata.get("StackId") or arns.cloudformation_stack_arn( + self.stack_name, + stack_id=StackIdentifier( + account_id=account_id, region=region_name, stack_name=metadata.get("StackName") + ).generate(tags=metadata.get("tags")), + account_id=account_id, + region_name=region_name, + ) + self.template["StackId"] = self.metadata["StackId"] = stack_id + self.template["Parameters"] = self.template.get("Parameters") or {} + self.template["Outputs"] = self.template.get("Outputs") or {} + self.template["Conditions"] = self.template.get("Conditions") or {} + # initialize metadata + self.metadata["Parameters"] = self.metadata.get("Parameters") or [] + self.metadata["StackStatus"] = "CREATE_IN_PROGRESS" + self.metadata["CreationTime"] = self.metadata.get("CreationTime") or timestamp_millis() + self.metadata["LastUpdatedTime"] = self.metadata["CreationTime"] + self.metadata.setdefault("Description", self.template.get("Description")) + self.metadata.setdefault("RollbackConfiguration", {}) + self.metadata.setdefault("DisableRollback", False) + self.metadata.setdefault("EnableTerminationProtection", False) + # maps resource id to resource state + self._resource_states = {} + # list of stack events + self.events = [] + # list of stack change sets + self.change_sets = [] + # self.evaluated_conditions = {} + + def set_resolved_parameters(self, resolved_parameters: dict[str, StackParameter]): + self.resolved_parameters = resolved_parameters + if resolved_parameters: + self.metadata["Parameters"] = list(resolved_parameters.values()) + + def set_resolved_stack_conditions(self, resolved_conditions: dict[str, bool]): + self.resolved_conditions = resolved_conditions + + def describe_details(self): + attrs = [ + "StackId", + "StackName", + "Description", + "StackStatusReason", + "StackStatus", + "Capabilities", + "ParentId", + "RootId", + "RoleARN", + "CreationTime", + "DeletionTime", + "LastUpdatedTime", + "ChangeSetId", + "RollbackConfiguration", + "DisableRollback", + "EnableTerminationProtection", + "DriftInformation", + ] + result = select_attributes(self.metadata, attrs) + result["Tags"] = self.tags + outputs = self.resolved_outputs + if outputs: + result["Outputs"] = outputs + stack_parameters = convert_stack_parameters_to_list(self.resolved_parameters) + if stack_parameters: + result["Parameters"] = [ + mask_no_echo(strip_parameter_type(sp)) for sp in stack_parameters + ] + if not result.get("DriftInformation"): + result["DriftInformation"] = {"StackDriftStatus": "NOT_CHECKED"} + for attr in ["Tags", "NotificationARNs"]: + result.setdefault(attr, []) + return result + + def set_stack_status(self, status: str, status_reason: Optional[str] = None): + self.metadata["StackStatus"] = status + if "FAILED" in status: + self.metadata["StackStatusReason"] = status_reason or "Deployment failed" + self.log_stack_errors() + self.add_stack_event( + self.stack_name, self.stack_id, status, status_reason=status_reason or "" + ) + + def log_stack_errors(self, level=logging.WARNING): + for event in self.events: + if event["ResourceStatus"].endswith("FAILED"): + if reason := event.get("ResourceStatusReason"): + reason = reason.replace("\n", "; ") + LOG.log( + level, + "CFn resource failed to deploy: %s (%s)", + event["LogicalResourceId"], + reason, + ) + else: + LOG.warning("CFn resource failed to deploy: %s", event["LogicalResourceId"]) + + def set_time_attribute(self, attribute, new_time=None): + self.metadata[attribute] = new_time or timestamp_millis() + + def add_stack_event( + self, + resource_id: str = None, + physical_res_id: str = None, + status: str = "", + status_reason: str = "", + ): + resource_id = resource_id or self.stack_name + physical_res_id = physical_res_id or self.stack_id + resource_type = ( + self.template.get("Resources", {}) + .get(resource_id, {}) + .get("Type", "AWS::CloudFormation::Stack") + ) + + event = { + "EventId": long_uid(), + "Timestamp": timestamp_millis(), + "StackId": self.stack_id, + "StackName": self.stack_name, + "LogicalResourceId": resource_id, + "PhysicalResourceId": physical_res_id, + "ResourceStatus": status, + "ResourceType": resource_type, + } + + if status_reason: + event["ResourceStatusReason"] = status_reason + + self.events.insert(0, event) + + def set_resource_status(self, resource_id: str, status: str, status_reason: str = ""): + """Update the deployment status of the given resource ID and publish a corresponding stack event.""" + physical_res_id = self.resources.get(resource_id, {}).get("PhysicalResourceId") + self._set_resource_status_details(resource_id, physical_res_id=physical_res_id) + state = self.resource_states.setdefault(resource_id, {}) + state["PreviousResourceStatus"] = state.get("ResourceStatus") + state["ResourceStatus"] = status + state["LastUpdatedTimestamp"] = timestamp_millis() + self.add_stack_event(resource_id, physical_res_id, status, status_reason=status_reason) + + def _set_resource_status_details(self, resource_id: str, physical_res_id: str = None): + """Helper function to ensure that the status details for the given resource ID are up-to-date.""" + resource = self.resources.get(resource_id) + if resource is None or resource.get("Type") == "Parameter": + # make sure we delete the states for any non-existing/deleted resources + self._resource_states.pop(resource_id, None) + return + state = self._resource_states.setdefault(resource_id, {}) + attr_defaults = ( + ("LogicalResourceId", resource_id), + ("PhysicalResourceId", physical_res_id), + ) + for res in [resource, state]: + for attr, default in attr_defaults: + res[attr] = res.get(attr) or default + state["StackName"] = state.get("StackName") or self.stack_name + state["StackId"] = state.get("StackId") or self.stack_id + state["ResourceType"] = state.get("ResourceType") or self.resources[resource_id].get("Type") + state["Timestamp"] = timestamp_millis() + return state + + def resource_status(self, resource_id: str): + result = self._lookup(self.resource_states, resource_id) + return result + + def latest_template_raw(self): + if self.change_sets: + return self.change_sets[-1]._template_raw + return self._template_raw + + @property + def resource_states(self): + for resource_id in list(self._resource_states.keys()): + self._set_resource_status_details(resource_id) + return self._resource_states + + @property + def stack_name(self): + return self.metadata["StackName"] + + @property + def stack_id(self): + return self.metadata["StackId"] + + @property + def resources(self): + """Return dict of resources""" + return dict(self.template_resources) + + @resources.setter + def resources(self, resources: dict): + self.template["Resources"] = resources + + @property + def template_resources(self): + return self.template.setdefault("Resources", {}) + + @property + def tags(self): + return self.metadata.get("Tags", []) + + @property + def imports(self): + def _collect(o, **kwargs): + if isinstance(o, dict): + import_val = o.get("Fn::ImportValue") + if import_val: + result.add(import_val) + return o + + result = set() + recurse_object(self.resources, _collect) + return result + + @property + def template_parameters(self): + return self.template["Parameters"] + + @property + def conditions(self): + """Returns the (mutable) dict of stack conditions.""" + return self.template.setdefault("Conditions", {}) + + @property + def mappings(self): + """Returns the (mutable) dict of stack mappings.""" + return self.template.setdefault("Mappings", {}) + + @property + def outputs(self): + """Returns the (mutable) dict of stack outputs.""" + return self.template.setdefault("Outputs", {}) + + @property + def status(self): + return self.metadata["StackStatus"] + + @property + def resource_types(self): + return [r.get("Type") for r in self.template_resources.values()] + + def resource(self, resource_id): + return self._lookup(self.resources, resource_id) + + def _lookup(self, resource_map, resource_id): + resource = resource_map.get(resource_id) + if not resource: + raise Exception( + 'Unable to find details for resource "%s" in stack "%s"' + % (resource_id, self.stack_name) + ) + return resource + + def copy(self): + return Stack( + account_id=self.account_id, + region_name=self.region_name, + metadata=dict(self.metadata), + template=dict(self.template), + ) + + +# FIXME: remove inheritance +# TODO: what functionality of the Stack object do we rely on here? +class StackChangeSet(Stack): + update_graph: NodeTemplate | None + change_set_type: ChangeSetType | None + + def __init__( + self, + account_id: str, + region_name: str, + stack: Stack, + params=None, + template=None, + change_set_type: ChangeSetType | None = None, + ): + if template is None: + template = {} + if params is None: + params = {} + super(StackChangeSet, self).__init__(account_id, region_name, params, template) + + name = self.metadata["ChangeSetName"] + if not self.metadata.get("ChangeSetId"): + self.metadata["ChangeSetId"] = arns.cloudformation_change_set_arn( + name, change_set_id=short_uid(), account_id=account_id, region_name=region_name + ) + + self.account_id = account_id + self.region_name = region_name + self.stack = stack + self.metadata["StackId"] = stack.stack_id + self.metadata["Status"] = "CREATE_PENDING" + self.change_set_type = change_set_type + + @property + def change_set_id(self): + return self.metadata["ChangeSetId"] + + @property + def change_set_name(self): + return self.metadata["ChangeSetName"] + + @property + def resources(self): + return dict(self.stack.resources) + + @property + def changes(self): + result = self.metadata["Changes"] = self.metadata.get("Changes", []) + return result + + # V2 only + def populate_update_graph( + self, + before_template: Optional[dict], + after_template: Optional[dict], + before_parameters: Optional[dict], + after_parameters: Optional[dict], + ) -> None: + change_set_model = ChangeSetModel( + before_template=before_template, + after_template=after_template, + before_parameters=before_parameters, + after_parameters=after_parameters, + ) + self.update_graph = change_set_model.get_update_model() diff --git a/localstack-core/localstack/services/cloudformation/engine/errors.py b/localstack-core/localstack/services/cloudformation/engine/errors.py new file mode 100644 index 0000000000000..0ee44f3530e58 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/errors.py @@ -0,0 +1,4 @@ +class TemplateError(RuntimeError): + """ + Error thrown on a programming error from the user + """ diff --git a/localstack-core/localstack/services/cloudformation/engine/parameters.py b/localstack-core/localstack/services/cloudformation/engine/parameters.py new file mode 100644 index 0000000000000..ba39fafc40db2 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/parameters.py @@ -0,0 +1,209 @@ +""" +TODO: ordering & grouping of parameters +TODO: design proper structure for parameters to facilitate validation etc. +TODO: clearer language around both parameters and "resolving" + +Documentation extracted from AWS docs (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html): + The following requirements apply when using parameters: + + You can have a maximum of 200 parameters in an AWS CloudFormation template. + Each parameter must be given a logical name (also called logical ID), which must be alphanumeric and unique among all logical names within the template. + Each parameter must be assigned a parameter type that is supported by AWS CloudFormation. For more information, see Type. + Each parameter must be assigned a value at runtime for AWS CloudFormation to successfully provision the stack. You can optionally specify a default value for AWS CloudFormation to use unless another value is provided. + Parameters must be declared and referenced from within the same template. You can reference parameters from the Resources and Outputs sections of the template. + + When you create or update stacks and create change sets, AWS CloudFormation uses whatever values exist in Parameter Store at the time the operation is run. If a specified parameter doesn't exist in Parameter Store under the caller's AWS account, AWS CloudFormation returns a validation error. + + For stack updates, the Use existing value option in the console and the UsePreviousValue attribute for update-stack tell AWS CloudFormation to use the existing Systems Manager parameter key—not its value. AWS CloudFormation always fetches the latest values from Parameter Store when it updates stacks. + +""" + +import logging +from typing import Literal, Optional, TypedDict + +from botocore.exceptions import ClientError + +from localstack.aws.api.cloudformation import Parameter, ParameterDeclaration +from localstack.aws.connect import connect_to + +LOG = logging.getLogger(__name__) + + +def extract_stack_parameter_declarations(template: dict) -> dict[str, ParameterDeclaration]: + """ + Extract and build a dict of stack parameter declarations from a CloudFormation stack templatef + + :param template: the parsed CloudFormation stack template + :return: a dictionary of declared parameters, mapping logical IDs to the corresponding parameter declaration + """ + result = {} + for param_key, param in template.get("Parameters", {}).items(): + result[param_key] = ParameterDeclaration( + ParameterKey=param_key, + DefaultValue=param.get("Default"), + ParameterType=param.get("Type"), + NoEcho=param.get("NoEcho", False), + # TODO: test & implement rest here + # ParameterConstraints=?, + # Description=? + ) + return result + + +class StackParameter(Parameter): + # we need the type information downstream when actually using the resolved value + # e.g. in case of lists so that we know that we should interpret the string as a comma-separated list. + ParameterType: str + + +def resolve_parameters( + account_id: str, + region_name: str, + parameter_declarations: dict[str, ParameterDeclaration], + new_parameters: dict[str, Parameter], + old_parameters: dict[str, Parameter], +) -> dict[str, StackParameter]: + """ + Resolves stack parameters or raises an exception if any parameter can not be resolved. + + Assumptions: + - There are no extra undeclared parameters given (validate before calling this method) + + TODO: is UsePreviousValue=False equivalent to not specifying it, in all situations? + + :param parameter_declarations: The parameter declaration from the (potentially new) template, i.e. the "Parameters" section + :param new_parameters: The parameters to resolve + :param old_parameters: The old parameters from the previous stack deployment, if available + :return: a copy of new_parameters with resolved values + """ + resolved_parameters = dict() + + # populate values for every parameter declared in the template + for pm in parameter_declarations.values(): + pm_key = pm["ParameterKey"] + resolved_param = StackParameter(ParameterKey=pm_key, ParameterType=pm["ParameterType"]) + new_parameter = new_parameters.get(pm_key) + old_parameter = old_parameters.get(pm_key) + + if new_parameter is None: + # since no value has been specified for the deployment, we need to be able to resolve the default or fail + default_value = pm["DefaultValue"] + if default_value is None: + LOG.error("New parameter without a default value: %s", pm_key) + raise Exception( + f"Invalid. Parameter '{pm_key}' needs to have either param specified or Default." + ) # TODO: test and verify + + resolved_param["ParameterValue"] = default_value + else: + if ( + new_parameter.get("UsePreviousValue", False) + and new_parameter.get("ParameterValue") is not None + ): + raise Exception( + f"Can't set both 'UsePreviousValue' and a concrete value for parameter '{pm_key}'." + ) # TODO: test and verify + + if new_parameter.get("UsePreviousValue", False): + if old_parameter is None: + raise Exception( + f"Set 'UsePreviousValue' but stack has no previous value for parameter '{pm_key}'." + ) # TODO: test and verify + + resolved_param["ParameterValue"] = old_parameter["ParameterValue"] + else: + resolved_param["ParameterValue"] = new_parameter["ParameterValue"] + + resolved_param["NoEcho"] = pm.get("NoEcho", False) + resolved_parameters[pm_key] = resolved_param + + # Note that SSM parameters always need to be resolved anew here + # TODO: support more parameter types + if pm["ParameterType"].startswith("AWS::SSM"): + if pm["ParameterType"] in [ + "AWS::SSM::Parameter::Value", + "AWS::SSM::Parameter::Value", + "AWS::SSM::Parameter::Value", + ]: + # TODO: error handling (e.g. no permission to lookup SSM parameter or SSM parameter doesn't exist) + resolved_param["ResolvedValue"] = resolve_ssm_parameter( + account_id, region_name, resolved_param["ParameterValue"] + ) + else: + raise Exception(f"Unsupported stack parameter type: {pm['ParameterType']}") + + return resolved_parameters + + +# TODO: inject credentials / client factory for proper account/region lookup +def resolve_ssm_parameter(account_id: str, region_name: str, stack_parameter_value: str) -> str: + """ + Resolve the SSM stack parameter from the SSM service with a name equal to the stack parameter value. + """ + ssm_client = connect_to(aws_access_key_id=account_id, region_name=region_name).ssm + try: + return ssm_client.get_parameter(Name=stack_parameter_value)["Parameter"]["Value"] + except ClientError: + LOG.error("client error fetching parameter '%s'", stack_parameter_value) + raise + + +def strip_parameter_type(in_param: StackParameter) -> Parameter: + result = in_param.copy() + result.pop("ParameterType", None) + return result + + +def mask_no_echo(in_param: StackParameter) -> Parameter: + result = in_param.copy() + no_echo = result.pop("NoEcho", False) + if no_echo: + result["ParameterValue"] = "****" + return result + + +def convert_stack_parameters_to_list( + in_params: dict[str, StackParameter] | None, +) -> list[StackParameter]: + if not in_params: + return [] + return list(in_params.values()) + + +def convert_stack_parameters_to_dict(in_params: list[Parameter] | None) -> dict[str, Parameter]: + if not in_params: + return {} + return {p["ParameterKey"]: p for p in in_params} + + +class LegacyParameterProperties(TypedDict): + Value: str + ParameterType: str + ParameterValue: Optional[str] + ResolvedValue: Optional[str] + + +class LegacyParameter(TypedDict): + LogicalResourceId: str + Type: Literal["Parameter"] + Properties: LegacyParameterProperties + + +# TODO: not actually parameter_type but the logical "ID" +def map_to_legacy_structure(parameter_name: str, new_parameter: StackParameter) -> LegacyParameter: + """ + Helper util to convert a normal (resolved) stack parameter to a legacy parameter structure that can then be merged with stack resources. + + :param new_parameter: a resolved stack parameter + :return: legacy parameter that can be merged with stack resources for uniform lookup based on logical ID + """ + return LegacyParameter( + LogicalResourceId=new_parameter["ParameterKey"], + Type="Parameter", + Properties=LegacyParameterProperties( + ParameterType=new_parameter.get("ParameterType"), + ParameterValue=new_parameter.get("ParameterValue"), + ResolvedValue=new_parameter.get("ResolvedValue"), + Value=new_parameter.get("ResolvedValue", new_parameter.get("ParameterValue")), + ), + ) diff --git a/localstack/services/cloudformation/engine/policy_loader.py b/localstack-core/localstack/services/cloudformation/engine/policy_loader.py similarity index 77% rename from localstack/services/cloudformation/engine/policy_loader.py rename to localstack-core/localstack/services/cloudformation/engine/policy_loader.py index f69722b217001..8f3d11be79244 100644 --- a/localstack/services/cloudformation/engine/policy_loader.py +++ b/localstack-core/localstack/services/cloudformation/engine/policy_loader.py @@ -2,7 +2,7 @@ from samtranslator.translator.managed_policy_translator import ManagedPolicyLoader -from localstack.utils.aws import aws_stack +from localstack.aws.connect import connect_to LOG = logging.getLogger(__name__) @@ -13,6 +13,6 @@ def create_policy_loader() -> ManagedPolicyLoader: global policy_loader if not policy_loader: - iam_client = aws_stack.connect_to_service("iam") + iam_client = connect_to().iam policy_loader = ManagedPolicyLoader(iam_client=iam_client) return policy_loader diff --git a/localstack-core/localstack/services/cloudformation/engine/quirks.py b/localstack-core/localstack/services/cloudformation/engine/quirks.py new file mode 100644 index 0000000000000..964d5b603d960 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/quirks.py @@ -0,0 +1,68 @@ +""" +We can't always automatically determine which value serves as the physical resource ID. +=> This needs to be determined manually by testing against AWS (!) + +There's also a reason that the mapping is located here instead of closer to the resource providers themselves. +If the resources were compliant with the generic AWS resource provider framework that AWS provides for your own resource types, we wouldn't need this. +For legacy resources (and even some of the ones where they are open-sourced), AWS still has a layer of "secret sauce" that defines what the actual physical resource ID is. +An extension schema only defines the primary identifiers but not directly the physical resource ID that is generated based on those. +Since this is therefore rather part of the cloudformation layer and *not* the resource providers responsibility, we've put the mapping closer to the cloudformation engine. +""" + +# note: format here is subject to change (e.g. it might not be a pure str -> str mapping, it could also involve more sophisticated handlers +PHYSICAL_RESOURCE_ID_SPECIAL_CASES = { + "AWS::ApiGateway::Authorizer": "/properties/AuthorizerId", + "AWS::ApiGateway::RequestValidator": "/properties/RequestValidatorId", + "AWS::ApiGatewayV2::Authorizer": "/properties/AuthorizerId", + "AWS::ApiGatewayV2::Deployment": "/properties/DeploymentId", + "AWS::ApiGatewayV2::IntegrationResponse": "/properties/IntegrationResponseId", + "AWS::ApiGatewayV2::Route": "/properties/RouteId", + "AWS::ApiGateway::BasePathMapping": "/properties/RestApiId", + "AWS::ApiGateway::Deployment": "/properties/DeploymentId", + "AWS::ApiGateway::Model": "/properties/Name", + "AWS::ApiGateway::Resource": "/properties/ResourceId", + "AWS::ApiGateway::Stage": "/properties/StageName", + "AWS::Cognito::UserPoolClient": "/properties/ClientId", + "AWS::ECS::Service": "/properties/ServiceArn", + "AWS::EKS::FargateProfile": "|", # composite + "AWS::Events::EventBus": "/properties/Name", + "AWS::Logs::LogStream": "/properties/LogStreamName", + "AWS::Logs::SubscriptionFilter": "/properties/LogGroupName", + "AWS::RDS::DBProxyTargetGroup": "/properties/TargetGroupName", + "AWS::Glue::SchemaVersionMetadata": "||", # composite + "AWS::VerifiedPermissions::IdentitySource": "|", # composite + "AWS::VerifiedPermissions::Policy": "|", # composite + "AWS::VerifiedPermissions::PolicyTemplate": "|", # composite + "AWS::WAFv2::WebACL": "||", + "AWS::WAFv2::WebACLAssociation": "|", + "AWS::WAFv2::IPSet": "||", + # composite +} + +# You can usually find the available GetAtt targets in the official resource documentation: +# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html +# Use the scaffolded exploration test to verify against AWS which attributes you can access. +# This mapping is not in use yet (!) +VALID_GETATT_PROPERTIES = { + # Other Examples + # "AWS::ApiGateway::Resource": ["ResourceId"], + # "AWS::IAM::User": ["Arn"], # TODO: not validated yet + "AWS::SSM::Parameter": ["Type", "Value"], # TODO: not validated yet + # "AWS::OpenSearchService::Domain": [ + # "AdvancedSecurityOptions.AnonymousAuthDisableDate", + # "Arn", + # "DomainArn", + # "DomainEndpoint", + # "DomainEndpoints", + # "Id", + # "ServiceSoftwareOptions", + # "ServiceSoftwareOptions.AutomatedUpdateDate", + # "ServiceSoftwareOptions.Cancellable", + # "ServiceSoftwareOptions.CurrentVersion", + # "ServiceSoftwareOptions.Description", + # "ServiceSoftwareOptions.NewVersion", + # "ServiceSoftwareOptions.OptionalDeployment", + # "ServiceSoftwareOptions.UpdateAvailable", + # "ServiceSoftwareOptions.UpdateStatus", + # ], # TODO: not validated yet +} diff --git a/localstack-core/localstack/services/cloudformation/engine/resource_ordering.py b/localstack-core/localstack/services/cloudformation/engine/resource_ordering.py new file mode 100644 index 0000000000000..f65f57093ed50 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/resource_ordering.py @@ -0,0 +1,109 @@ +from collections import OrderedDict + +from localstack.services.cloudformation.engine.changes import ChangeConfig +from localstack.services.cloudformation.engine.parameters import StackParameter +from localstack.services.cloudformation.engine.template_utils import get_deps_for_resource + + +class NoResourceInStack(ValueError): + """Raised when we preprocess the template and do not find a resource""" + + def __init__(self, logical_resource_id: str): + msg = f"Template format error: Unresolved resource dependencies [{logical_resource_id}] in the Resources block of the template" + + super().__init__(msg) + + +def order_resources( + resources: dict, + resolved_parameters: dict[str, StackParameter], + resolved_conditions: dict[str, bool], + reverse: bool = False, +) -> OrderedDict: + """ + Given a dictionary of resources, topologically sort the resources based on + inter-resource dependencies (e.g. usages of intrinsic functions). + """ + nodes: dict[str, list[str]] = {} + for logical_resource_id, properties in resources.items(): + nodes.setdefault(logical_resource_id, []) + deps = get_deps_for_resource(properties, resolved_conditions) + for dep in deps: + if dep in resolved_parameters: + # we only care about other resources + continue + nodes.setdefault(dep, []) + nodes[dep].append(logical_resource_id) + + # implementation from https://dev.to/leopfeiffer/topological-sort-with-kahns-algorithm-3dl1 + indegrees = dict.fromkeys(nodes.keys(), 0) + for dependencies in nodes.values(): + for dependency in dependencies: + indegrees[dependency] += 1 + + # Place all elements with indegree 0 in queue + queue = [k for k in nodes.keys() if indegrees[k] == 0] + + sorted_logical_resource_ids = [] + + # Continue until all nodes have been dealt with + while len(queue) > 0: + # node of current iteration is the first one from the queue + curr = queue.pop(0) + sorted_logical_resource_ids.append(curr) + + # remove the current node from other dependencies + for dependency in nodes[curr]: + indegrees[dependency] -= 1 + + if indegrees[dependency] == 0: + queue.append(dependency) + + # check for circular dependencies + if len(sorted_logical_resource_ids) != len(nodes): + raise Exception("Circular dependency found.") + + sorted_mapping = [] + for logical_resource_id in sorted_logical_resource_ids: + if properties := resources.get(logical_resource_id): + sorted_mapping.append((logical_resource_id, properties)) + else: + if ( + logical_resource_id not in resolved_parameters + and logical_resource_id not in resolved_conditions + ): + raise NoResourceInStack(logical_resource_id) + + if reverse: + sorted_mapping = sorted_mapping[::-1] + return OrderedDict(sorted_mapping) + + +def order_changes( + given_changes: list[ChangeConfig], + resources: dict, + resolved_parameters: dict[str, StackParameter], + # TODO: remove resolved conditions somehow + resolved_conditions: dict[str, bool], + reverse: bool = False, +) -> list[ChangeConfig]: + """ + Given a list of changes, a dictionary of resources and a dictionary of resolved conditions, topologically sort the + changes based on inter-resource dependencies (e.g. usages of intrinsic functions). + """ + ordered_resources = order_resources( + resources=resources, + resolved_parameters=resolved_parameters, + resolved_conditions=resolved_conditions, + reverse=reverse, + ) + sorted_changes = [] + for logical_resource_id in ordered_resources.keys(): + for change in given_changes: + if change["ResourceChange"]["LogicalResourceId"] == logical_resource_id: + sorted_changes.append(change) + break + assert len(sorted_changes) > 0 + if reverse: + sorted_changes = sorted_changes[::-1] + return sorted_changes diff --git a/localstack-core/localstack/services/cloudformation/engine/schema.py b/localstack-core/localstack/services/cloudformation/engine/schema.py new file mode 100644 index 0000000000000..1a8e3d0a9d402 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/schema.py @@ -0,0 +1,15 @@ +import json +import os +import zipfile + + +# TODO: unify with scaffolding +class SchemaProvider: + def __init__(self, zipfile_path: str | os.PathLike[str]): + self.schemas = {} + with zipfile.ZipFile(zipfile_path) as infile: + for filename in infile.namelist(): + with infile.open(filename) as schema_file: + schema = json.load(schema_file) + typename = schema["typeName"] + self.schemas[typename] = schema diff --git a/localstack-core/localstack/services/cloudformation/engine/template_deployer.py b/localstack-core/localstack/services/cloudformation/engine/template_deployer.py new file mode 100644 index 0000000000000..a0ae9c286d61c --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/template_deployer.py @@ -0,0 +1,1609 @@ +import base64 +import json +import logging +import re +import traceback +import uuid +from typing import Optional + +from botocore.exceptions import ClientError + +from localstack import config +from localstack.aws.connect import connect_to +from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY +from localstack.services.cloudformation.deployment_utils import ( + PLACEHOLDER_AWS_NO_VALUE, + get_action_name_for_resource_change, + remove_none_values, +) +from localstack.services.cloudformation.engine.changes import ChangeConfig, ResourceChange +from localstack.services.cloudformation.engine.entities import Stack, StackChangeSet +from localstack.services.cloudformation.engine.parameters import StackParameter +from localstack.services.cloudformation.engine.quirks import VALID_GETATT_PROPERTIES +from localstack.services.cloudformation.engine.resource_ordering import ( + order_changes, + order_resources, +) +from localstack.services.cloudformation.engine.template_utils import ( + AWS_URL_SUFFIX, + fn_equals_type_conversion, + get_deps_for_resource, +) +from localstack.services.cloudformation.resource_provider import ( + Credentials, + OperationStatus, + ProgressEvent, + ResourceProviderExecutor, + ResourceProviderPayload, + get_resource_type, +) +from localstack.services.cloudformation.service_models import ( + DependencyNotYetSatisfied, +) +from localstack.services.cloudformation.stores import exports_map, find_stack +from localstack.utils.aws.arns import get_partition +from localstack.utils.functions import prevent_stack_overflow +from localstack.utils.json import clone_safe +from localstack.utils.strings import to_bytes, to_str +from localstack.utils.threads import start_worker_thread + +from localstack.services.cloudformation.models import * # noqa: F401, F403, isort:skip +from localstack.utils.urls import localstack_host + +ACTION_CREATE = "create" +ACTION_DELETE = "delete" + +REGEX_OUTPUT_APIGATEWAY = re.compile( + rf"^(https?://.+\.execute-api\.)(?:[^-]+-){{2,3}}\d\.(amazonaws\.com|{AWS_URL_SUFFIX})/?(.*)$" +) +REGEX_DYNAMIC_REF = re.compile("{{resolve:([^:]+):(.+)}}") + +LOG = logging.getLogger(__name__) + +# list of static attribute references to be replaced in {'Fn::Sub': '...'} strings +STATIC_REFS = ["AWS::Region", "AWS::Partition", "AWS::StackName", "AWS::AccountId"] + +# Mock value for unsupported type references +MOCK_REFERENCE = "unknown" + + +class NoStackUpdates(Exception): + """Exception indicating that no actions are to be performed in a stack update (which is not allowed)""" + + pass + + +# --------------------- +# CF TEMPLATE HANDLING +# --------------------- + + +def get_attr_from_model_instance( + resource: dict, + attribute_name: str, + resource_type: str, + resource_id: str, + attribute_sub_name: Optional[str] = None, +) -> str: + if resource["PhysicalResourceId"] == MOCK_REFERENCE: + LOG.warning( + "Attribute '%s' requested from unsupported resource with id %s", + attribute_name, + resource_id, + ) + return MOCK_REFERENCE + + properties = resource.get("Properties", {}) + # if there's no entry in VALID_GETATT_PROPERTIES for the resource type we still default to "open" and accept anything + valid_atts = VALID_GETATT_PROPERTIES.get(resource_type) + if valid_atts is not None and attribute_name not in valid_atts: + LOG.warning( + "Invalid attribute in Fn::GetAtt for %s: | %s.%s", + resource_type, + resource_id, + attribute_name, + ) + raise Exception( + f"Resource type {resource_type} does not support attribute {{{attribute_name}}}" + ) # TODO: check CFn behavior via snapshot + + attribute_candidate = properties.get(attribute_name) + if attribute_sub_name: + return attribute_candidate.get(attribute_sub_name) + if "." in attribute_name: + # was used for legacy, but keeping it since it might have to work for a custom resource as well + if attribute_candidate: + return attribute_candidate + + # some resources (e.g. ElastiCache) have their readOnly attributes defined as Aa.Bb but the property is named AaBb + if attribute_candidate := properties.get(attribute_name.replace(".", "")): + return attribute_candidate + + # accessing nested properties + parts = attribute_name.split(".") + attribute = properties + # TODO: the attribute fetching below is a temporary workaround for the dependency resolution. + # It is caused by trying to access the resource attribute that has not been deployed yet. + # This should be a hard error.“ + for part in parts: + if attribute is None: + return None + attribute = attribute.get(part) + return attribute + + # If we couldn't find the attribute, this is actually an irrecoverable error. + # After the resource has a state of CREATE_COMPLETE, all attributes should already be set. + # TODO: raise here instead + # if attribute_candidate is None: + # raise Exception( + # f"Failed to resolve attribute for Fn::GetAtt in {resource_type}: {resource_id}.{attribute_name}" + # ) # TODO: check CFn behavior via snapshot + return attribute_candidate + + +def resolve_ref( + account_id: str, + region_name: str, + stack_name: str, + resources: dict, + parameters: dict[str, StackParameter], + ref: str, +): + """ + ref always needs to be a static string + ref can be one of these: + 1. a pseudo-parameter (e.g. AWS::Region) + 2. a parameter + 3. the id of a resource (PhysicalResourceId + """ + # pseudo parameter + if ref == "AWS::Region": + return region_name + if ref == "AWS::Partition": + return get_partition(region_name) + if ref == "AWS::StackName": + return stack_name + if ref == "AWS::StackId": + stack = find_stack(account_id, region_name, stack_name) + if not stack: + raise ValueError(f"No stack {stack_name} found") + return stack.stack_id + if ref == "AWS::AccountId": + return account_id + if ref == "AWS::NoValue": + return PLACEHOLDER_AWS_NO_VALUE + if ref == "AWS::NotificationARNs": + # TODO! + return {} + if ref == "AWS::URLSuffix": + return AWS_URL_SUFFIX + + # parameter + if parameter := parameters.get(ref): + parameter_type: str = parameter["ParameterType"] + parameter_value = parameter.get("ResolvedValue") or parameter.get("ParameterValue") + + if "CommaDelimitedList" in parameter_type or parameter_type.startswith("List<"): + return [p.strip() for p in parameter_value.split(",")] + else: + return parameter_value + + # resource + resource = resources.get(ref) + if not resource: + raise Exception( + f"Resource target for `Ref {ref}` could not be found. Is there a resource with name {ref} in your stack?" + ) + + return resources[ref].get("PhysicalResourceId") + + +# Using a @prevent_stack_overflow decorator here to avoid infinite recursion +# in case we load stack exports that have circular dependencies (see issue 3438) +# TODO: Potentially think about a better approach in the future +@prevent_stack_overflow(match_parameters=True) +def resolve_refs_recursively( + account_id: str, + region_name: str, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict[str, bool], + parameters: dict, + value, +): + result = _resolve_refs_recursively( + account_id, region_name, stack_name, resources, mappings, conditions, parameters, value + ) + + # localstack specific patches + if isinstance(result, str): + # we're trying to filter constructed API urls here (e.g. via Join in the template) + api_match = REGEX_OUTPUT_APIGATEWAY.match(result) + if api_match and result in config.CFN_STRING_REPLACEMENT_DENY_LIST: + return result + elif api_match: + prefix = api_match[1] + host = api_match[2] + path = api_match[3] + port = localstack_host().port + return f"{prefix}{host}:{port}/{path}" + + # basic dynamic reference support + # see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html + # technically there are more restrictions for each of these services but checking each of these + # isn't really necessary for the current level of emulation + dynamic_ref_match = REGEX_DYNAMIC_REF.match(result) + if dynamic_ref_match: + service_name = dynamic_ref_match[1] + reference_key = dynamic_ref_match[2] + + # only these 3 services are supported for dynamic references right now + if service_name == "ssm": + ssm_client = connect_to(aws_access_key_id=account_id, region_name=region_name).ssm + try: + return ssm_client.get_parameter(Name=reference_key)["Parameter"]["Value"] + except ClientError as e: + LOG.error("client error accessing SSM parameter '%s': %s", reference_key, e) + raise + elif service_name == "ssm-secure": + ssm_client = connect_to(aws_access_key_id=account_id, region_name=region_name).ssm + try: + return ssm_client.get_parameter(Name=reference_key, WithDecryption=True)[ + "Parameter" + ]["Value"] + except ClientError as e: + LOG.error("client error accessing SSM parameter '%s': %s", reference_key, e) + raise + elif service_name == "secretsmanager": + # reference key needs to be parsed further + # because {{resolve:secretsmanager:secret-id:secret-string:json-key:version-stage:version-id}} + # we match for "secret-id:secret-string:json-key:version-stage:version-id" + # where + # secret-id can either be the secret name or the full ARN of the secret + # secret-string *must* be SecretString + # all other values are optional + secret_id = reference_key + [json_key, version_stage, version_id] = [None, None, None] + if "SecretString" in reference_key: + parts = reference_key.split(":SecretString:") + secret_id = parts[0] + # json-key, version-stage and version-id are optional. + [json_key, version_stage, version_id] = f"{parts[1]}::".split(":")[:3] + + kwargs = {} # optional args for get_secret_value + if version_id: + kwargs["VersionId"] = version_id + if version_stage: + kwargs["VersionStage"] = version_stage + + secretsmanager_client = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).secretsmanager + try: + secret_value = secretsmanager_client.get_secret_value( + SecretId=secret_id, **kwargs + )["SecretString"] + except ClientError: + LOG.error("client error while trying to access key '%s': %s", secret_id) + raise + + if json_key: + json_secret = json.loads(secret_value) + if json_key not in json_secret: + raise DependencyNotYetSatisfied( + resource_ids=secret_id, + message=f"Key {json_key} is not yet available in secret {secret_id}.", + ) + return json_secret[json_key] + else: + return secret_value + else: + LOG.warning( + "Unsupported service for dynamic parameter: service_name=%s", service_name + ) + + return result + + +@prevent_stack_overflow(match_parameters=True) +def _resolve_refs_recursively( + account_id: str, + region_name: str, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict, + parameters: dict, + value: dict | list | str | bytes | None, +): + if isinstance(value, dict): + keys_list = list(value.keys()) + stripped_fn_lower = keys_list[0].lower().split("::")[-1] if len(keys_list) == 1 else None + + # process special operators + if keys_list == ["Ref"]: + ref = resolve_ref( + account_id, region_name, stack_name, resources, parameters, value["Ref"] + ) + if ref is None: + msg = 'Unable to resolve Ref for resource "%s" (yet)' % value["Ref"] + LOG.debug("%s - %s", msg, resources.get(value["Ref"]) or set(resources.keys())) + + raise DependencyNotYetSatisfied(resource_ids=value["Ref"], message=msg) + + ref = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + ref, + ) + return ref + + if stripped_fn_lower == "getatt": + attr_ref = value[keys_list[0]] + attr_ref = attr_ref.split(".") if isinstance(attr_ref, str) else attr_ref + resource_logical_id = attr_ref[0] + attribute_name = attr_ref[1] + attribute_sub_name = attr_ref[2] if len(attr_ref) > 2 else None + + # the attribute name can be a Ref + attribute_name = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + attribute_name, + ) + resource = resources.get(resource_logical_id) + + resource_type = get_resource_type(resource) + resolved_getatt = get_attr_from_model_instance( + resource, + attribute_name, + resource_type, + resource_logical_id, + attribute_sub_name, + ) + + # TODO: we should check the deployment state and not try to GetAtt from a resource that is still IN_PROGRESS or hasn't started yet. + if resolved_getatt is None: + raise DependencyNotYetSatisfied( + resource_ids=resource_logical_id, + message=f"Could not resolve attribute '{attribute_name}' on resource '{resource_logical_id}'", + ) + + return resolved_getatt + + if stripped_fn_lower == "join": + join_values = value[keys_list[0]][1] + + # this can actually be another ref that produces a list as output + if isinstance(join_values, dict): + join_values = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + join_values, + ) + + # resolve reference in the items list + assert isinstance(join_values, list) + join_values = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + join_values, + ) + + none_values = [v for v in join_values if v is None] + if none_values: + LOG.warning( + "Cannot resolve Fn::Join '%s' due to null values: '%s'", value, join_values + ) + raise Exception( + f"Cannot resolve CF Fn::Join {value} due to null values: {join_values}" + ) + return value[keys_list[0]][0].join([str(v) for v in join_values]) + + if stripped_fn_lower == "sub": + item_to_sub = value[keys_list[0]] + + attr_refs = {r: {"Ref": r} for r in STATIC_REFS} + if not isinstance(item_to_sub, list): + item_to_sub = [item_to_sub, {}] + result = item_to_sub[0] + item_to_sub[1].update(attr_refs) + + for key, val in item_to_sub[1].items(): + resolved_val = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + val, + ) + + if isinstance(resolved_val, (list, dict, tuple)): + # We don't have access to the resource that's a dependency in this case, + # so do the best we can with the resource ids + raise DependencyNotYetSatisfied( + resource_ids=key, message=f"Could not resolve {val} to terminal value type" + ) + result = result.replace("${%s}" % key, str(resolved_val)) + + # resolve placeholders + result = resolve_placeholders_in_string( + account_id, + region_name, + result, + stack_name, + resources, + mappings, + conditions, + parameters, + ) + return result + + if stripped_fn_lower == "findinmap": + # "Fn::FindInMap" + mapping_id = value[keys_list[0]][0] + + if isinstance(mapping_id, dict) and "Ref" in mapping_id: + # TODO: ?? + mapping_id = resolve_ref( + account_id, region_name, stack_name, resources, parameters, mapping_id["Ref"] + ) + + selected_map = mappings.get(mapping_id) + if not selected_map: + raise Exception( + f"Cannot find Mapping with ID {mapping_id} for Fn::FindInMap: {value[keys_list[0]]} {list(resources.keys())}" + # TODO: verify + ) + + first_level_attribute = value[keys_list[0]][1] + first_level_attribute = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + first_level_attribute, + ) + + if first_level_attribute not in selected_map: + raise Exception( + f"Cannot find map key '{first_level_attribute}' in mapping '{mapping_id}'" + ) + first_level_mapping = selected_map[first_level_attribute] + + second_level_attribute = value[keys_list[0]][2] + if not isinstance(second_level_attribute, str): + second_level_attribute = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + second_level_attribute, + ) + if second_level_attribute not in first_level_mapping: + raise Exception( + f"Cannot find map key '{second_level_attribute}' in mapping '{mapping_id}' under key '{first_level_attribute}'" + ) + + return first_level_mapping[second_level_attribute] + + if stripped_fn_lower == "importvalue": + import_value_key = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + value[keys_list[0]], + ) + exports = exports_map(account_id, region_name) + stack_export = exports.get(import_value_key) or {} + if not stack_export.get("Value"): + LOG.info( + 'Unable to find export "%s" in stack "%s", existing export names: %s', + import_value_key, + stack_name, + list(exports.keys()), + ) + return None + return stack_export["Value"] + + if stripped_fn_lower == "if": + condition, option1, option2 = value[keys_list[0]] + condition = conditions.get(condition) + if condition is None: + LOG.warning( + "Cannot find condition '%s' in conditions mapping: '%s'", + condition, + conditions.keys(), + ) + raise KeyError( + f"Cannot find condition '{condition}' in conditions mapping: '{conditions.keys()}'" + ) + + result = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + option1 if condition else option2, + ) + return result + + if stripped_fn_lower == "condition": + # FIXME: this should only allow strings, no evaluation should be performed here + # see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-condition.html + key = value[keys_list[0]] + result = conditions.get(key) + if result is None: + LOG.warning("Cannot find key '%s' in conditions: '%s'", key, conditions.keys()) + raise KeyError(f"Cannot find key '{key}' in conditions: '{conditions.keys()}'") + return result + + if stripped_fn_lower == "not": + condition = value[keys_list[0]][0] + condition = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + condition, + ) + return not condition + + if stripped_fn_lower in ["and", "or"]: + conditions = value[keys_list[0]] + results = [ + resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + cond, + ) + for cond in conditions + ] + result = all(results) if stripped_fn_lower == "and" else any(results) + return result + + if stripped_fn_lower == "equals": + operand1, operand2 = value[keys_list[0]] + operand1 = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + operand1, + ) + operand2 = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + operand2, + ) + # TODO: investigate type coercion here + return fn_equals_type_conversion(operand1) == fn_equals_type_conversion(operand2) + + if stripped_fn_lower == "select": + index, values = value[keys_list[0]] + index = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + index, + ) + values = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + values, + ) + try: + return values[index] + except TypeError: + return values[int(index)] + + if stripped_fn_lower == "split": + delimiter, string = value[keys_list[0]] + delimiter = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + delimiter, + ) + string = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + string, + ) + return string.split(delimiter) + + if stripped_fn_lower == "getazs": + region = ( + resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + value["Fn::GetAZs"], + ) + or region_name + ) + + ec2_client = connect_to(aws_access_key_id=account_id, region_name=region).ec2 + try: + get_availability_zones = ec2_client.describe_availability_zones()[ + "AvailabilityZones" + ] + except ClientError: + LOG.error("client error describing availability zones") + raise + + azs = [az["ZoneName"] for az in get_availability_zones] + + return azs + + if stripped_fn_lower == "base64": + value_to_encode = value[keys_list[0]] + value_to_encode = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + value_to_encode, + ) + return to_str(base64.b64encode(to_bytes(value_to_encode))) + + for key, val in dict(value).items(): + value[key] = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + val, + ) + + if isinstance(value, list): + # in some cases, intrinsic functions are passed in as, e.g., `[['Fn::Sub', '${MyRef}']]` + if len(value) == 1 and isinstance(value[0], list) and len(value[0]) == 2: + inner_list = value[0] + if str(inner_list[0]).lower().startswith("fn::"): + return resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + {inner_list[0]: inner_list[1]}, + ) + + # remove _aws_no_value_ from resulting references + clean_list = [] + for item in value: + temp_value = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + item, + ) + if not (isinstance(temp_value, str) and temp_value == PLACEHOLDER_AWS_NO_VALUE): + clean_list.append(temp_value) + value = clean_list + + return value + + +def resolve_placeholders_in_string( + account_id: str, + region_name: str, + result, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict[str, bool], + parameters: dict, +): + """ + Resolve individual Fn::Sub variable replacements + + Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map + """ + + def _validate_result_type(value: str): + is_another_account_id = value.isdigit() and len(value) == len(account_id) + if value == account_id or is_another_account_id: + return value + + if value.isdigit(): + return int(value) + else: + try: + res = float(value) + return res + except ValueError: + return value + + def _replace(match): + ref_expression = match.group(1) + parts = ref_expression.split(".") + if len(parts) >= 2: + # Resource attributes specified => Use GetAtt to resolve + logical_resource_id, _, attr_name = ref_expression.partition(".") + resolved = get_attr_from_model_instance( + resources[logical_resource_id], + attr_name, + get_resource_type(resources[logical_resource_id]), + logical_resource_id, + ) + if resolved is None: + raise DependencyNotYetSatisfied( + resource_ids=logical_resource_id, + message=f"Unable to resolve attribute ref {ref_expression}", + ) + if not isinstance(resolved, str): + resolved = str(resolved) + return resolved + if len(parts) == 1: + if parts[0] in resources or parts[0].startswith("AWS::"): + # Logical resource ID or parameter name specified => Use Ref for lookup + result = resolve_ref( + account_id, region_name, stack_name, resources, parameters, parts[0] + ) + + if result is None: + raise DependencyNotYetSatisfied( + resource_ids=parts[0], + message=f"Unable to resolve attribute ref {ref_expression}", + ) + # TODO: is this valid? + # make sure we resolve any functions/placeholders in the extracted string + result = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + parameters, + result, + ) + # make sure we convert the result to string + # TODO: do this more systematically + result = "" if result is None else str(result) + return result + elif parts[0] in parameters: + parameter = parameters[parts[0]] + parameter_type: str = parameter["ParameterType"] + parameter_value = parameter.get("ResolvedValue") or parameter.get("ParameterValue") + + if parameter_type in ["CommaDelimitedList"] or parameter_type.startswith("List<"): + return [p.strip() for p in parameter_value.split(",")] + elif parameter_type == "Number": + return str(parameter_value) + else: + return parameter_value + else: + raise DependencyNotYetSatisfied( + resource_ids=parts[0], + message=f"Unable to resolve attribute ref {ref_expression}", + ) + # TODO raise exception here? + return match.group(0) + + regex = r"\$\{([^\}]+)\}" + result = re.sub(regex, _replace, result) + return _validate_result_type(result) + + +def evaluate_resource_condition(conditions: dict[str, bool], resource: dict) -> bool: + if condition := resource.get("Condition"): + return conditions.get(condition, True) + return True + + +# ----------------------- +# MAIN TEMPLATE DEPLOYER +# ----------------------- + + +class TemplateDeployer: + def __init__(self, account_id: str, region_name: str, stack): + self.stack = stack + self.account_id = account_id + self.region_name = region_name + + @property + def resources(self): + return self.stack.resources + + @property + def mappings(self): + return self.stack.mappings + + @property + def stack_name(self): + return self.stack.stack_name + + # ------------------ + # MAIN ENTRY POINTS + # ------------------ + + def deploy_stack(self): + self.stack.set_stack_status("CREATE_IN_PROGRESS") + try: + self.apply_changes( + self.stack, + self.stack, + initialize=True, + action="CREATE", + ) + except Exception as e: + log_method = LOG.info + if config.CFN_VERBOSE_ERRORS: + log_method = LOG.exception + log_method("Unable to create stack %s: %s", self.stack.stack_name, e) + self.stack.set_stack_status("CREATE_FAILED") + raise + + def apply_change_set(self, change_set: StackChangeSet): + action = ( + "UPDATE" + if change_set.stack.status in {"CREATE_COMPLETE", "UPDATE_COMPLETE"} + else "CREATE" + ) + change_set.stack.set_stack_status(f"{action}_IN_PROGRESS") + # update parameters on parent stack + change_set.stack.set_resolved_parameters(change_set.resolved_parameters) + # update conditions on parent stack + change_set.stack.set_resolved_stack_conditions(change_set.resolved_conditions) + + # update attributes that the stack inherits from the changeset + change_set.stack.metadata["Capabilities"] = change_set.metadata.get("Capabilities") + + try: + self.apply_changes( + change_set.stack, + change_set, + action=action, + ) + except Exception as e: + LOG.info( + "Unable to apply change set %s: %s", change_set.metadata.get("ChangeSetName"), e + ) + change_set.metadata["Status"] = f"{action}_FAILED" + self.stack.set_stack_status(f"{action}_FAILED") + raise + + def update_stack(self, new_stack): + self.stack.set_stack_status("UPDATE_IN_PROGRESS") + # apply changes + self.apply_changes(self.stack, new_stack, action="UPDATE") + self.stack.set_time_attribute("LastUpdatedTime") + + # ---------------------------- + # DEPENDENCY RESOLUTION UTILS + # ---------------------------- + + def is_deployed(self, resource): + return self.stack.resource_states.get(resource["LogicalResourceId"], {}).get( + "ResourceStatus" + ) in [ + "CREATE_COMPLETE", + "UPDATE_COMPLETE", + ] + + def all_resource_dependencies_satisfied(self, resource) -> bool: + unsatisfied = self.get_unsatisfied_dependencies(resource) + return not unsatisfied + + def get_unsatisfied_dependencies(self, resource): + res_deps = self.get_resource_dependencies( + resource + ) # the output here is currently a set of merged IDs from both resources and parameters + parameter_deps = {d for d in res_deps if d in self.stack.resolved_parameters} + resource_deps = res_deps.difference(parameter_deps) + res_deps_mapped = {v: self.stack.resources.get(v) for v in resource_deps} + return self.get_unsatisfied_dependencies_for_resources(res_deps_mapped, resource) + + def get_unsatisfied_dependencies_for_resources( + self, resources, depending_resource=None, return_first=True + ): + result = {} + for resource_id, resource in resources.items(): + if not resource: + raise Exception( + f"Resource '{resource_id}' not found in stack {self.stack.stack_name}" + ) + if not self.is_deployed(resource): + LOG.debug( + "Dependency for resource %s not yet deployed: %s %s", + depending_resource, + resource_id, + resource, + ) + result[resource_id] = resource + if return_first: + break + return result + + def get_resource_dependencies(self, resource: dict) -> set[str]: + """ + Takes a resource and returns its dependencies on other resources via a str -> str mapping + """ + # Note: using the original, unmodified template here to preserve Ref's ... + raw_resources = self.stack.template_original["Resources"] + raw_resource = raw_resources[resource["LogicalResourceId"]] + return get_deps_for_resource(raw_resource, self.stack.resolved_conditions) + + # ----------------- + # DEPLOYMENT UTILS + # ----------------- + + def init_resource_status(self, resources=None, stack=None, action="CREATE"): + resources = resources or self.resources + stack = stack or self.stack + for resource_id, resource in resources.items(): + stack.set_resource_status(resource_id, f"{action}_IN_PROGRESS") + + def get_change_config( + self, action: str, resource: dict, change_set_id: Optional[str] = None + ) -> ChangeConfig: + result = ChangeConfig( + **{ + "Type": "Resource", + "ResourceChange": ResourceChange( + **{ + "Action": action, + # TODO(srw): how can the resource not contain a logical resource id? + "LogicalResourceId": resource.get("LogicalResourceId"), + "PhysicalResourceId": resource.get("PhysicalResourceId"), + "ResourceType": resource["Type"], + # TODO ChangeSetId is only set for *nested* change sets + # "ChangeSetId": change_set_id, + "Scope": [], # TODO + "Details": [], # TODO + } + ), + } + ) + if action == "Modify": + result["ResourceChange"]["Replacement"] = "False" + return result + + def resource_config_differs(self, resource_new): + """Return whether the given resource properties differ from the existing config (for stack updates).""" + # TODO: this is broken for default fields and result_handler property modifications when they're added to the properties in the model + resource_id = resource_new["LogicalResourceId"] + resource_old = self.resources[resource_id] + props_old = resource_old.get("SpecifiedProperties", {}) + props_new = resource_new["Properties"] + ignored_keys = ["LogicalResourceId", "PhysicalResourceId"] + old_keys = set(props_old.keys()) - set(ignored_keys) + new_keys = set(props_new.keys()) - set(ignored_keys) + if old_keys != new_keys: + return True + for key in old_keys: + if props_old[key] != props_new[key]: + return True + old_status = self.stack.resource_states.get(resource_id) or {} + previous_state = ( + old_status.get("PreviousResourceStatus") or old_status.get("ResourceStatus") or "" + ) + if old_status and "DELETE" in previous_state: + return True + + # TODO: ? + def merge_properties(self, resource_id: str, old_stack, new_stack) -> None: + old_resources = old_stack.template["Resources"] + new_resources = new_stack.template["Resources"] + new_resource = new_resources[resource_id] + + old_resource = old_resources[resource_id] = old_resources.get(resource_id) or {} + for key, value in new_resource.items(): + if key == "Properties": + continue + old_resource[key] = old_resource.get(key, value) + old_res_props = old_resource["Properties"] = old_resource.get("Properties", {}) + for key, value in new_resource["Properties"].items(): + old_res_props[key] = value + + old_res_props = { + k: v for k, v in old_res_props.items() if k in new_resource["Properties"].keys() + } + old_resource["Properties"] = old_res_props + + # overwrite original template entirely + old_stack.template_original["Resources"][resource_id] = new_stack.template_original[ + "Resources" + ][resource_id] + + def construct_changes( + self, + existing_stack, + new_stack, + # TODO: remove initialize argument from here, and determine action based on resource status + initialize: Optional[bool] = False, + change_set_id=None, + append_to_changeset: Optional[bool] = False, + filter_unchanged_resources: Optional[bool] = False, + ) -> list[ChangeConfig]: + old_resources = existing_stack.template["Resources"] + new_resources = new_stack.template["Resources"] + deletes = [val for key, val in old_resources.items() if key not in new_resources] + adds = [val for key, val in new_resources.items() if initialize or key not in old_resources] + modifies = [ + val for key, val in new_resources.items() if not initialize and key in old_resources + ] + + changes = [] + for action, items in (("Remove", deletes), ("Add", adds), ("Modify", modifies)): + for item in items: + item["Properties"] = item.get("Properties", {}) + if ( + not filter_unchanged_resources # TODO: find out purpose of this + or action != "Modify" + or self.resource_config_differs(item) + ): + change = self.get_change_config(action, item, change_set_id=change_set_id) + changes.append(change) + + # append changes to change set + if append_to_changeset and isinstance(new_stack, StackChangeSet): + new_stack.changes.extend(changes) + + return changes + + def apply_changes( + self, + existing_stack: Stack, + new_stack: StackChangeSet, + change_set_id: Optional[str] = None, + initialize: Optional[bool] = False, + action: Optional[str] = None, + ): + old_resources = existing_stack.template["Resources"] + new_resources = new_stack.template["Resources"] + action = action or "CREATE" + # TODO: this seems wrong, not every resource here will be in an UPDATE_IN_PROGRESS state? (only the ones that will actually be updated) + self.init_resource_status(old_resources, action="UPDATE") + + # apply parameter changes to existing stack + # self.apply_parameter_changes(existing_stack, new_stack) + + # construct changes + changes = self.construct_changes( + existing_stack, + new_stack, + initialize=initialize, + change_set_id=change_set_id, + ) + + # check if we have actual changes in the stack, and prepare properties + contains_changes = False + for change in changes: + res_action = change["ResourceChange"]["Action"] + resource = new_resources.get(change["ResourceChange"]["LogicalResourceId"]) + # FIXME: we need to resolve refs before diffing to detect if for example a parameter causes the change or not + # unfortunately this would currently cause issues because we might not be able to resolve everything yet + # resource = resolve_refs_recursively( + # self.stack_name, + # self.resources, + # self.mappings, + # self.stack.resolved_conditions, + # self.stack.resolved_parameters, + # resource, + # ) + if res_action in ["Add", "Remove"] or self.resource_config_differs(resource): + contains_changes = True + if res_action in ["Modify", "Add"]: + # mutating call that overwrites resource properties with new properties and overwrites the template in old stack with new template + self.merge_properties(resource["LogicalResourceId"], existing_stack, new_stack) + if not contains_changes: + raise NoStackUpdates("No updates are to be performed.") + + # merge stack outputs and conditions + existing_stack.outputs.update(new_stack.outputs) + existing_stack.conditions.update(new_stack.conditions) + + # TODO: ideally the entire template has to be replaced, but tricky at this point + existing_stack.template["Metadata"] = new_stack.template.get("Metadata") + existing_stack.template_body = new_stack.template_body + + # start deployment loop + return self.apply_changes_in_loop( + changes, existing_stack, action=action, new_stack=new_stack + ) + + def apply_changes_in_loop( + self, + changes: list[ChangeConfig], + stack: Stack, + action: Optional[str] = None, + new_stack=None, + ): + def _run(*args): + status_reason = None + try: + self.do_apply_changes_in_loop(changes, stack) + status = f"{action}_COMPLETE" + except Exception as e: + log_method = LOG.debug + if config.CFN_VERBOSE_ERRORS: + log_method = LOG.exception + log_method( + 'Error applying changes for CloudFormation stack "%s": %s %s', + stack.stack_name, + e, + traceback.format_exc(), + ) + status = f"{action}_FAILED" + status_reason = str(e) + stack.set_stack_status(status, status_reason) + if isinstance(new_stack, StackChangeSet): + new_stack.metadata["Status"] = status + exec_result = "EXECUTE_FAILED" if "FAILED" in status else "EXECUTE_COMPLETE" + new_stack.metadata["ExecutionStatus"] = exec_result + result = "failed" if "FAILED" in status else "succeeded" + new_stack.metadata["StatusReason"] = status_reason or f"Deployment {result}" + + # run deployment in background loop, to avoid client network timeouts + return start_worker_thread(_run) + + def prepare_should_deploy_change( + self, resource_id: str, change: ResourceChange, stack, new_resources: dict + ) -> bool: + """ + TODO: document + """ + resource = new_resources[resource_id] + res_change = change["ResourceChange"] + action = res_change["Action"] + + # check resource condition, if present + if not evaluate_resource_condition(stack.resolved_conditions, resource): + LOG.debug( + 'Skipping deployment of "%s", as resource condition evaluates to false', resource_id + ) + return False + + # resolve refs in resource details + resolve_refs_recursively( + self.account_id, + self.region_name, + stack.stack_name, + stack.resources, + stack.mappings, + stack.resolved_conditions, + stack.resolved_parameters, + resource, + ) + + if action in ["Add", "Modify"]: + is_deployed = self.is_deployed(resource) + # TODO: Attaching the cached _deployed info here, as we should not change the "Add"/"Modify" attribute + # here, which is used further down the line to determine the resource action CREATE/UPDATE. This is a + # temporary workaround for now - to be refactored once we introduce proper stack resource state models. + res_change["_deployed"] = is_deployed + if not is_deployed: + return True + if action == "Add": + return False + elif action == "Remove": + return True + return True + + # Stack is needed here + def apply_change(self, change: ChangeConfig, stack: Stack) -> None: + change_details = change["ResourceChange"] + action = change_details["Action"] + resource_id = change_details["LogicalResourceId"] + resources = stack.resources + resource = resources[resource_id] + + # TODO: this should not be needed as resources are filtered out if the + # condition evaluates to False. + if not evaluate_resource_condition(stack.resolved_conditions, resource): + return + + # remove AWS::NoValue entries + resource_props = resource.get("Properties") + if resource_props: + resource["Properties"] = remove_none_values(resource_props) + + executor = self.create_resource_provider_executor() + resource_provider_payload = self.create_resource_provider_payload( + action, logical_resource_id=resource_id + ) + + resource_provider = executor.try_load_resource_provider(get_resource_type(resource)) + if resource_provider is not None: + # add in-progress event + resource_status = f"{get_action_name_for_resource_change(action)}_IN_PROGRESS" + physical_resource_id = None + if action in ("Modify", "Remove"): + previous_state = self.resources[resource_id].get("_last_deployed_state") + if not previous_state: + # TODO: can this happen? + previous_state = self.resources[resource_id]["Properties"] + physical_resource_id = executor.extract_physical_resource_id_from_model_with_schema( + resource_model=previous_state, + resource_type=resource["Type"], + resource_type_schema=resource_provider.SCHEMA, + ) + stack.add_stack_event( + resource_id=resource_id, + physical_res_id=physical_resource_id, + status=resource_status, + ) + + # perform the deploy + progress_event = executor.deploy_loop( + resource_provider, resource, resource_provider_payload + ) + else: + resource["PhysicalResourceId"] = MOCK_REFERENCE + progress_event = ProgressEvent(OperationStatus.SUCCESS, resource_model={}) + + # TODO: clean up the surrounding loop (do_apply_changes_in_loop) so that the responsibilities are clearer + stack_action = get_action_name_for_resource_change(action) + match progress_event.status: + case OperationStatus.FAILED: + stack.set_resource_status( + resource_id, + f"{stack_action}_FAILED", + status_reason=progress_event.message or "", + ) + # TODO: remove exception raising here? + # TODO: fix request token + raise Exception( + f'Resource handler returned message: "{progress_event.message}" (RequestToken: 10c10335-276a-33d3-5c07-018b684c3d26, HandlerErrorCode: InvalidRequest){progress_event.error_code}' + ) + case OperationStatus.SUCCESS: + stack.set_resource_status(resource_id, f"{stack_action}_COMPLETE") + case OperationStatus.PENDING: + # signal to the main loop that we should come back to this resource in the future + raise DependencyNotYetSatisfied( + resource_ids=[], message="Resource dependencies not yet satisfied" + ) + case OperationStatus.IN_PROGRESS: + raise Exception("Resource deployment loop should not finish in this state") + case unknown_status: + raise Exception(f"Unknown operation status: {unknown_status}") + + # TODO: this is probably already done in executor, try removing this + resource["Properties"] = progress_event.resource_model + + def create_resource_provider_executor(self) -> ResourceProviderExecutor: + return ResourceProviderExecutor( + stack_name=self.stack.stack_name, + stack_id=self.stack.stack_id, + ) + + def create_resource_provider_payload( + self, action: str, logical_resource_id: str + ) -> ResourceProviderPayload: + # FIXME: use proper credentials + creds: Credentials = { + "accessKeyId": self.account_id, + "secretAccessKey": INTERNAL_AWS_SECRET_ACCESS_KEY, + "sessionToken": "", + } + resource = self.resources[logical_resource_id] + + resource_provider_payload: ResourceProviderPayload = { + "awsAccountId": self.account_id, + "callbackContext": {}, + "stackId": self.stack.stack_name, + "resourceType": resource["Type"], + "resourceTypeVersion": "000000", + # TODO: not actually a UUID + "bearerToken": str(uuid.uuid4()), + "region": self.region_name, + "action": action, + "requestData": { + "logicalResourceId": logical_resource_id, + "resourceProperties": resource["Properties"], + "previousResourceProperties": resource.get("_last_deployed_state"), # TODO + "callerCredentials": creds, + "providerCredentials": creds, + "systemTags": {}, + "previousSystemTags": {}, + "stackTags": {}, + "previousStackTags": {}, + }, + } + return resource_provider_payload + + def delete_stack(self): + if not self.stack: + return + self.stack.set_stack_status("DELETE_IN_PROGRESS") + stack_resources = list(self.stack.resources.values()) + resources = {r["LogicalResourceId"]: clone_safe(r) for r in stack_resources} + original_resources = self.stack.template_original["Resources"] + + # TODO: what is this doing? + for key, resource in resources.items(): + resource["Properties"] = resource.get( + "Properties", clone_safe(resource) + ) # TODO: why is there a fallback? + resource["ResourceType"] = get_resource_type(resource) + + ordered_resource_ids = list( + order_resources( + resources=original_resources, + resolved_conditions=self.stack.resolved_conditions, + resolved_parameters=self.stack.resolved_parameters, + reverse=True, + ).keys() + ) + for i, resource_id in enumerate(ordered_resource_ids): + resource = resources[resource_id] + try: + # TODO: cache condition value in resource details on deployment and use cached value here + if not evaluate_resource_condition( + self.stack.resolved_conditions, + resource, + ): + continue + + executor = self.create_resource_provider_executor() + resource_provider_payload = self.create_resource_provider_payload( + "Remove", logical_resource_id=resource_id + ) + LOG.debug( + 'Handling "Remove" for resource "%s" (%s/%s) type "%s"', + resource_id, + i + 1, + len(resources), + resource["ResourceType"], + ) + resource_provider = executor.try_load_resource_provider(get_resource_type(resource)) + if resource_provider is not None: + event = executor.deploy_loop( + resource_provider, resource, resource_provider_payload + ) + else: + event = ProgressEvent(OperationStatus.SUCCESS, resource_model={}) + match event.status: + case OperationStatus.SUCCESS: + self.stack.set_resource_status(resource_id, "DELETE_COMPLETE") + case OperationStatus.PENDING: + # the resource is still being deleted, specifically the provider has + # signalled that the deployment loop should skip this resource this + # time and come back to it later, likely due to unmet child + # resources still existing because we don't delete things in the + # correct order yet. + continue + case OperationStatus.FAILED: + LOG.exception( + "Failed to delete resource with id %s. Reason: %s", + resource_id, + event.message or "unknown", + ) + case OperationStatus.IN_PROGRESS: + # the resource provider executor should not return this state, so + # this state is a programming error + raise Exception( + "Programming error: ResourceProviderExecutor cannot return IN_PROGRESS" + ) + case other_status: + raise Exception(f"Use of unsupported status found: {other_status}") + + except Exception as e: + LOG.exception( + "Failed to delete resource with id %s. Final exception: %s", + resource_id, + e, + ) + + # update status + self.stack.set_stack_status("DELETE_COMPLETE") + self.stack.set_time_attribute("DeletionTime") + + def do_apply_changes_in_loop(self, changes: list[ChangeConfig], stack: Stack) -> list: + # apply changes in a retry loop, to resolve resource dependencies and converge to the target state + changes_done = [] + new_resources = stack.resources + + sorted_changes = order_changes( + given_changes=changes, + resources=new_resources, + resolved_conditions=stack.resolved_conditions, + resolved_parameters=stack.resolved_parameters, + ) + for change_idx, change in enumerate(sorted_changes): + res_change = change["ResourceChange"] + action = res_change["Action"] + is_add_or_modify = action in ["Add", "Modify"] + resource_id = res_change["LogicalResourceId"] + + # TODO: do resolve_refs_recursively once here + try: + if is_add_or_modify: + should_deploy = self.prepare_should_deploy_change( + resource_id, change, stack, new_resources + ) + LOG.debug( + 'Handling "%s" for resource "%s" (%s/%s) type "%s" (should_deploy=%s)', + action, + resource_id, + change_idx + 1, + len(changes), + res_change["ResourceType"], + should_deploy, + ) + if not should_deploy: + stack_action = get_action_name_for_resource_change(action) + stack.set_resource_status(resource_id, f"{stack_action}_COMPLETE") + continue + elif action == "Remove": + should_remove = self.prepare_should_deploy_change( + resource_id, change, stack, new_resources + ) + if not should_remove: + continue + LOG.debug( + 'Handling "%s" for resource "%s" (%s/%s) type "%s"', + action, + resource_id, + change_idx + 1, + len(changes), + res_change["ResourceType"], + ) + self.apply_change(change, stack=stack) + changes_done.append(change) + except Exception as e: + status_action = { + "Add": "CREATE", + "Modify": "UPDATE", + "Dynamic": "UPDATE", + "Remove": "DELETE", + }[action] + stack.add_stack_event( + resource_id=resource_id, + physical_res_id=new_resources[resource_id].get("PhysicalResourceId"), + status=f"{status_action}_FAILED", + status_reason=str(e), + ) + if config.CFN_VERBOSE_ERRORS: + LOG.exception("Failed to deploy resource %s, stack deploy failed", resource_id) + raise + + # clean up references to deleted resources in stack + deletes = [c for c in changes_done if c["ResourceChange"]["Action"] == "Remove"] + for delete in deletes: + stack.template["Resources"].pop(delete["ResourceChange"]["LogicalResourceId"], None) + + # resolve outputs + stack.resolved_outputs = resolve_outputs(self.account_id, self.region_name, stack) + + return changes_done + + +# FIXME: resolve_refs_recursively should not be needed, the resources themselves should have those values available already +def resolve_outputs(account_id: str, region_name: str, stack) -> list[dict]: + result = [] + for k, details in stack.outputs.items(): + if not evaluate_resource_condition(stack.resolved_conditions, details): + continue + value = None + try: + resolve_refs_recursively( + account_id, + region_name, + stack.stack_name, + stack.resources, + stack.mappings, + stack.resolved_conditions, + stack.resolved_parameters, + details, + ) + value = details["Value"] + except Exception as e: + log_method = LOG.debug + if config.CFN_VERBOSE_ERRORS: + raise # unresolvable outputs cause a stack failure + # log_method = getattr(LOG, "exception") + log_method("Unable to resolve references in stack outputs: %s - %s", details, e) + exports = details.get("Export") or {} + export = exports.get("Name") + export = resolve_refs_recursively( + account_id, + region_name, + stack.stack_name, + stack.resources, + stack.mappings, + stack.resolved_conditions, + stack.resolved_parameters, + export, + ) + description = details.get("Description") + entry = { + "OutputKey": k, + "OutputValue": value, + "Description": description, + "ExportName": export, + } + result.append(entry) + return result diff --git a/localstack-core/localstack/services/cloudformation/engine/template_preparer.py b/localstack-core/localstack/services/cloudformation/engine/template_preparer.py new file mode 100644 index 0000000000000..8206a7d6a99fc --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/template_preparer.py @@ -0,0 +1,68 @@ +import json +import logging + +from localstack.services.cloudformation.engine import yaml_parser +from localstack.services.cloudformation.engine.transformers import ( + apply_global_transformations, + apply_intrinsic_transformations, +) +from localstack.utils.json import clone_safe + +LOG = logging.getLogger(__name__) + + +def parse_template(template: str) -> dict: + try: + return json.loads(template) + except Exception: + try: + return clone_safe(yaml_parser.parse_yaml(template)) + except Exception as e: + LOG.debug("Unable to parse CloudFormation template (%s): %s", e, template) + raise + + +def template_to_json(template: str) -> str: + template = parse_template(template) + return json.dumps(template) + + +# TODO: consider moving to transformers.py as well +def transform_template( + account_id: str, + region_name: str, + template: dict, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict[str, bool], + resolved_parameters: dict, +) -> dict: + proccesed_template = dict(template) + + # apply 'Fn::Transform' intrinsic functions (note: needs to be applied before global + # transforms below, as some utils - incl samtransformer - expect them to be resolved already) + proccesed_template = apply_intrinsic_transformations( + account_id, + region_name, + proccesed_template, + stack_name, + resources, + mappings, + conditions, + resolved_parameters, + ) + + # apply global transforms + proccesed_template = apply_global_transformations( + account_id, + region_name, + proccesed_template, + stack_name, + resources, + mappings, + conditions, + resolved_parameters, + ) + + return proccesed_template diff --git a/localstack-core/localstack/services/cloudformation/engine/template_utils.py b/localstack-core/localstack/services/cloudformation/engine/template_utils.py new file mode 100644 index 0000000000000..062e4a3f1f840 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/template_utils.py @@ -0,0 +1,430 @@ +import re +from typing import Any + +from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_AWS_NO_VALUE +from localstack.services.cloudformation.engine.errors import TemplateError +from localstack.utils.urls import localstack_host + +AWS_URL_SUFFIX = localstack_host().host # value is "amazonaws.com" in real AWS + + +def get_deps_for_resource(resource: dict, evaluated_conditions: dict[str, bool]) -> set[str]: + """ + :param resource: the resource definition to be checked for dependencies + :param evaluated_conditions: + :return: a set of logical resource IDs which this resource depends on + """ + property_dependencies = resolve_dependencies( + resource.get("Properties", {}), evaluated_conditions + ) + explicit_dependencies = resource.get("DependsOn", []) + if not isinstance(explicit_dependencies, list): + explicit_dependencies = [explicit_dependencies] + return property_dependencies.union(explicit_dependencies) + + +def resolve_dependencies(d: dict, evaluated_conditions: dict[str, bool]) -> set[str]: + items = set() + + if isinstance(d, dict): + for k, v in d.items(): + if k == "Fn::If": + # check the condition and only traverse down the correct path + condition_name, true_value, false_value = v + if evaluated_conditions[condition_name]: + items = items.union(resolve_dependencies(true_value, evaluated_conditions)) + else: + items = items.union(resolve_dependencies(false_value, evaluated_conditions)) + elif k == "Ref": + items.add(v) + elif k == "Fn::GetAtt": + items.add(v[0] if isinstance(v, list) else v.split(".")[0]) + elif k == "Fn::Sub": + # we can assume anything in there is a ref + if isinstance(v, str): + # { "Fn::Sub" : "Hello ${Name}" } + variables_found = re.findall("\\${([^}]+)}", v) + for var in variables_found: + if "." in var: + var = var.split(".")[0] + items.add(var) + elif isinstance(v, list): + # { "Fn::Sub" : [ "Hello ${Name}", { "Name": "SomeName" } ] } + variables_found = re.findall("\\${([^}]+)}", v[0]) + for var in variables_found: + if var in v[1]: + # variable is included in provided mapping and can either be a static value or another reference + if isinstance(v[1][var], dict): + # e.g. { "Fn::Sub" : [ "Hello ${Name}", { "Name": {"Ref": "NameParam"} } ] } + # the values can have references, so we need to go deeper + items = items.union( + resolve_dependencies(v[1][var], evaluated_conditions) + ) + else: + # it's now either a GetAtt call or a direct reference + if "." in var: + var = var.split(".")[0] + items.add(var) + else: + raise Exception(f"Invalid template structure in Fn::Sub: {v}") + elif isinstance(v, dict): + items = items.union(resolve_dependencies(v, evaluated_conditions)) + elif isinstance(v, list): + for item in v: + # TODO: assumption that every element is a dict might not be true + items = items.union(resolve_dependencies(item, evaluated_conditions)) + else: + pass + elif isinstance(d, list): + for item in d: + items = items.union(resolve_dependencies(item, evaluated_conditions)) + r = {i for i in items if not i.startswith("AWS::")} + return r + + +def resolve_stack_conditions( + account_id: str, + region_name: str, + conditions: dict, + parameters: dict, + mappings: dict, + stack_name: str, +) -> dict[str, bool]: + """ + Within each condition, you can reference another: + condition + parameter value + mapping + + You can use the following intrinsic functions to define conditions: + Fn::And + Fn::Equals + Fn::If + Fn::Not + Fn::Or + + TODO: more checks on types from references (e.g. in a mapping value) + TODO: does a ref ever return a non-string value? + TODO: when unifying/reworking intrinsic functions rework this to a class structure + """ + result = {} + for condition_name, condition in conditions.items(): + result[condition_name] = resolve_condition( + account_id, region_name, condition, conditions, parameters, mappings, stack_name + ) + return result + + +def resolve_pseudo_parameter( + account_id: str, region_name: str, pseudo_parameter: str, stack_name: str +) -> Any: + """ + TODO: this function needs access to more stack context + """ + # pseudo parameters + match pseudo_parameter: + case "AWS::Region": + return region_name + case "AWS::Partition": + return "aws" + case "AWS::StackName": + return stack_name + case "AWS::StackId": + # TODO return proper stack id! + return stack_name + case "AWS::AccountId": + return account_id + case "AWS::NoValue": + return PLACEHOLDER_AWS_NO_VALUE + case "AWS::NotificationARNs": + # TODO! + return {} + case "AWS::URLSuffix": + return AWS_URL_SUFFIX + + +def resolve_conditional_mapping_ref( + ref_name, account_id: str, region_name: str, stack_name: str, parameters +): + if ref_name.startswith("AWS::"): + ref_value = resolve_pseudo_parameter(account_id, region_name, ref_name, stack_name) + if ref_value is None: + raise TemplateError(f"Invalid pseudo parameter '{ref_name}'") + else: + param = parameters.get(ref_name) + if not param: + raise TemplateError( + f"Invalid reference: '{ref_name}' does not exist in parameters: '{parameters}'" + ) + ref_value = param.get("ResolvedValue") or param.get("ParameterValue") + + return ref_value + + +def resolve_condition( + account_id: str, region_name: str, condition, conditions, parameters, mappings, stack_name +): + if isinstance(condition, dict): + for k, v in condition.items(): + match k: + case "Ref": + if isinstance(v, str) and v.startswith("AWS::"): + return resolve_pseudo_parameter( + account_id, region_name, v, stack_name + ) # TODO: this pseudo parameter resolving needs context(!) + # TODO: add util function for resolving individual refs (e.g. one util for resolving pseudo parameters) + # TODO: pseudo-parameters like AWS::Region + # can only really be a parameter here + # TODO: how are conditions references written here? as {"Condition": "ConditionA"} or via Ref? + # TODO: test for a boolean parameter? + param = parameters[v] + parameter_type: str = param["ParameterType"] + parameter_value = param.get("ResolvedValue") or param.get("ParameterValue") + + if parameter_type in ["CommaDelimitedList"] or parameter_type.startswith( + "List<" + ): + return [p.strip() for p in parameter_value.split(",")] + else: + return parameter_value + + case "Condition": + return resolve_condition( + account_id, + region_name, + conditions[v], + conditions, + parameters, + mappings, + stack_name, + ) + case "Fn::FindInMap": + map_name, top_level_key, second_level_key = v + if isinstance(map_name, dict) and "Ref" in map_name: + ref_name = map_name["Ref"] + map_name = resolve_conditional_mapping_ref( + ref_name, account_id, region_name, stack_name, parameters + ) + + if isinstance(top_level_key, dict) and "Ref" in top_level_key: + ref_name = top_level_key["Ref"] + top_level_key = resolve_conditional_mapping_ref( + ref_name, account_id, region_name, stack_name, parameters + ) + + if isinstance(second_level_key, dict) and "Ref" in second_level_key: + ref_name = second_level_key["Ref"] + second_level_key = resolve_conditional_mapping_ref( + ref_name, account_id, region_name, stack_name, parameters + ) + + mapping = mappings.get(map_name) + if not mapping: + raise TemplateError( + f"Invalid reference: '{map_name}' could not be found in the template mappings: '{list(mappings.keys())}'" + ) + + top_level_map = mapping.get(top_level_key) + if not top_level_map: + raise TemplateError( + f"Invalid reference: '{top_level_key}' could not be found in the '{map_name}' mapping: '{list(mapping.keys())}'" + ) + + value = top_level_map.get(second_level_key) + if not value: + raise TemplateError( + f"Invalid reference: '{second_level_key}' could not be found in the '{top_level_key}' mapping: '{top_level_map}'" + ) + + return value + case "Fn::If": + if_condition_name, true_branch, false_branch = v + if resolve_condition( + account_id, + region_name, + if_condition_name, + conditions, + parameters, + mappings, + stack_name, + ): + return resolve_condition( + account_id, + region_name, + true_branch, + conditions, + parameters, + mappings, + stack_name, + ) + else: + return resolve_condition( + account_id, + region_name, + false_branch, + conditions, + parameters, + mappings, + stack_name, + ) + case "Fn::Not": + return not resolve_condition( + account_id, region_name, v[0], conditions, parameters, mappings, stack_name + ) + case "Fn::And": + # TODO: should actually restrict this a bit + return resolve_condition( + account_id, region_name, v[0], conditions, parameters, mappings, stack_name + ) and resolve_condition( + account_id, region_name, v[1], conditions, parameters, mappings, stack_name + ) + case "Fn::Or": + return resolve_condition( + account_id, region_name, v[0], conditions, parameters, mappings, stack_name + ) or resolve_condition( + account_id, region_name, v[1], conditions, parameters, mappings, stack_name + ) + case "Fn::Equals": + left = resolve_condition( + account_id, region_name, v[0], conditions, parameters, mappings, stack_name + ) + right = resolve_condition( + account_id, region_name, v[1], conditions, parameters, mappings, stack_name + ) + return fn_equals_type_conversion(left) == fn_equals_type_conversion(right) + case "Fn::Join": + join_list = v[1] + if isinstance(v[1], dict): + join_list = resolve_condition( + account_id, + region_name, + v[1], + conditions, + parameters, + mappings, + stack_name, + ) + result = v[0].join( + [ + resolve_condition( + account_id, + region_name, + x, + conditions, + parameters, + mappings, + stack_name, + ) + for x in join_list + ] + ) + return result + case "Fn::Select": + index = v[0] + options = v[1] + for i, option in enumerate(options): + if isinstance(option, dict): + options[i] = resolve_condition( + account_id, + region_name, + option, + conditions, + parameters, + mappings, + stack_name, + ) + return options[index] + case "Fn::Sub": + # we can assume anything in there is a ref + if isinstance(v, str): + # { "Fn::Sub" : "Hello ${Name}" } + result = v + variables_found = re.findall("\\${([^}]+)}", v) + for var in variables_found: + # can't be a resource here (!), so also not attribute access + if var.startswith("AWS::"): + # pseudo-parameter + resolved_pseudo_param = resolve_pseudo_parameter( + account_id, region_name, var, stack_name + ) + result = result.replace(f"${{{var}}}", resolved_pseudo_param) + else: + # parameter + param = parameters[var] + parameter_type: str = param["ParameterType"] + resolved_parameter = param.get("ResolvedValue") or param.get( + "ParameterValue" + ) + + if parameter_type in [ + "CommaDelimitedList" + ] or parameter_type.startswith("List<"): + resolved_parameter = [ + p.strip() for p in resolved_parameter.split(",") + ] + + result = result.replace(f"${{{var}}}", resolved_parameter) + + return result + elif isinstance(v, list): + # { "Fn::Sub" : [ "Hello ${Name}", { "Name": "SomeName" } ] } + result = v[0] + variables_found = re.findall("\\${([^}]+)}", v[0]) + for var in variables_found: + if var in v[1]: + # variable is included in provided mapping and can either be a static value or another reference + if isinstance(v[1][var], dict): + # e.g. { "Fn::Sub" : [ "Hello ${Name}", { "Name": {"Ref": "NameParam"} } ] } + # the values can have references, so we need to go deeper + resolved_var = resolve_condition( + account_id, + region_name, + v[1][var], + conditions, + parameters, + mappings, + stack_name, + ) + result = result.replace(f"${{{var}}}", resolved_var) + else: + result = result.replace(f"${{{var}}}", v[1][var]) + else: + # it's now either a GetAtt call or a direct reference + if var.startswith("AWS::"): + # pseudo-parameter + resolved_pseudo_param = resolve_pseudo_parameter( + account_id, region_name, var, stack_name + ) + result = result.replace(f"${{{var}}}", resolved_pseudo_param) + else: + # parameter + param = parameters[var] + parameter_type: str = param["ParameterType"] + resolved_parameter = param.get("ResolvedValue") or param.get( + "ParameterValue" + ) + + if parameter_type in [ + "CommaDelimitedList" + ] or parameter_type.startswith("List<"): + resolved_parameter = [ + p.strip() for p in resolved_parameter.split(",") + ] + + result = result.replace(f"${{{var}}}", resolved_parameter) + return result + else: + raise Exception(f"Invalid template structure in Fn::Sub: {v}") + case _: + raise Exception(f"Invalid condition structure encountered: {condition=}") + else: + return condition + + +def fn_equals_type_conversion(value) -> str: + if isinstance(value, str): + return value + elif isinstance(value, bool): + return "true" if value else "false" + else: + return str(value) # TODO: investigate correct behavior diff --git a/localstack-core/localstack/services/cloudformation/engine/transformers.py b/localstack-core/localstack/services/cloudformation/engine/transformers.py new file mode 100644 index 0000000000000..fea83f5ca4533 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/transformers.py @@ -0,0 +1,304 @@ +import json +import logging +import os +from copy import deepcopy +from typing import Dict, Optional, Type, Union + +import boto3 +from botocore.exceptions import ClientError +from samtranslator.translator.transform import transform as transform_sam + +from localstack.aws.api import CommonServiceException +from localstack.aws.connect import connect_to +from localstack.services.cloudformation.engine.policy_loader import create_policy_loader +from localstack.services.cloudformation.engine.template_deployer import resolve_refs_recursively +from localstack.services.cloudformation.stores import get_cloudformation_store +from localstack.utils import testutil +from localstack.utils.objects import recurse_object +from localstack.utils.strings import long_uid + +LOG = logging.getLogger(__name__) + +SERVERLESS_TRANSFORM = "AWS::Serverless-2016-10-31" +EXTENSIONS_TRANSFORM = "AWS::LanguageExtensions" +SECRETSMANAGER_TRANSFORM = "AWS::SecretsManager-2020-07-23" + +TransformResult = Union[dict, str] + + +class Transformer: + """Abstract class for Fn::Transform intrinsic functions""" + + def transform(self, account_id: str, region_name: str, parameters: dict) -> TransformResult: + """Apply the transformer to the given parameters and return the modified construct""" + + +class AwsIncludeTransformer(Transformer): + """Implements the 'AWS::Include' transform intrinsic function""" + + def transform(self, account_id: str, region_name: str, parameters: dict) -> TransformResult: + from localstack.services.cloudformation.engine.template_preparer import parse_template + + location = parameters.get("Location") + if location and location.startswith("s3://"): + s3_client = connect_to(aws_access_key_id=account_id, region_name=region_name).s3 + bucket, _, path = location.removeprefix("s3://").partition("/") + try: + content = testutil.download_s3_object(s3_client, bucket, path) + except ClientError: + LOG.error("client error downloading S3 object '%s/%s'", bucket, path) + raise + content = parse_template(content) + return content + else: + LOG.warning("Unexpected Location parameter for AWS::Include transformer: %s", location) + return parameters + + +# maps transformer names to implementing classes +transformers: Dict[str, Type] = {"AWS::Include": AwsIncludeTransformer} + + +def apply_intrinsic_transformations( + account_id: str, + region_name: str, + template: dict, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict[str, bool], + stack_parameters: dict, +) -> dict: + """Resolve constructs using the 'Fn::Transform' intrinsic function.""" + + def _visit(obj, path, **_): + if isinstance(obj, dict) and "Fn::Transform" in obj: + transform = ( + obj["Fn::Transform"] + if isinstance(obj["Fn::Transform"], dict) + else {"Name": obj["Fn::Transform"]} + ) + transform_name = transform.get("Name") + transformer_class = transformers.get(transform_name) + macro_store = get_cloudformation_store(account_id, region_name).macros + parameters = transform.get("Parameters") or {} + parameters = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + stack_parameters, + parameters, + ) + if transformer_class: + transformer = transformer_class() + transformed = transformer.transform(account_id, region_name, parameters) + obj_copy = deepcopy(obj) + obj_copy.pop("Fn::Transform") + obj_copy.update(transformed) + return obj_copy + + elif transform_name in macro_store: + obj_copy = deepcopy(obj) + obj_copy.pop("Fn::Transform") + result = execute_macro( + account_id, region_name, obj_copy, transform, stack_parameters, parameters, True + ) + return result + else: + LOG.warning( + "Unsupported transform function '%s' used in %s", transform_name, stack_name + ) + return obj + + return recurse_object(template, _visit) + + +def apply_global_transformations( + account_id: str, + region_name: str, + template: dict, + stack_name: str, + resources: dict, + mappings: dict, + conditions: dict[str, bool], + stack_parameters: dict, +) -> dict: + processed_template = deepcopy(template) + transformations = format_template_transformations_into_list( + processed_template.get("Transform", []) + ) + for transformation in transformations: + transformation_parameters = resolve_refs_recursively( + account_id, + region_name, + stack_name, + resources, + mappings, + conditions, + stack_parameters, + transformation.get("Parameters", {}), + ) + + if not isinstance(transformation["Name"], str): + # TODO this should be done during template validation + raise CommonServiceException( + code="ValidationError", + status_code=400, + message="Key Name of transform definition must be a string.", + sender_fault=True, + ) + elif transformation["Name"] == SERVERLESS_TRANSFORM: + processed_template = apply_serverless_transformation( + account_id, region_name, processed_template, stack_parameters + ) + elif transformation["Name"] == EXTENSIONS_TRANSFORM: + continue + elif transformation["Name"] == SECRETSMANAGER_TRANSFORM: + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-secretsmanager.html + LOG.warning("%s is not yet supported. Ignoring.", SECRETSMANAGER_TRANSFORM) + else: + processed_template = execute_macro( + account_id, + region_name, + parsed_template=template, + macro=transformation, + stack_parameters=stack_parameters, + transformation_parameters=transformation_parameters, + ) + + return processed_template + + +def format_template_transformations_into_list(transforms: list | dict | str) -> list[dict]: + """ + The value of the Transform attribute can be: + - a transformation name + - an object like {Name: transformation, Parameters:{}} + - a list a list of names of the transformations to apply + - a list of objects defining a transformation + so the objective of this function is to normalize the list of transformations to apply into a list of transformation objects + """ + formatted_transformations = [] + if isinstance(transforms, str): + formatted_transformations.append({"Name": transforms}) + + if isinstance(transforms, dict): + formatted_transformations.append(transforms) + + if isinstance(transforms, list): + for transformation in transforms: + if isinstance(transformation, str): + formatted_transformations.append({"Name": transformation}) + if isinstance(transformation, dict): + formatted_transformations.append(transformation) + + return formatted_transformations + + +def execute_macro( + account_id: str, + region_name: str, + parsed_template: dict, + macro: dict, + stack_parameters: dict, + transformation_parameters: dict, + is_intrinsic=False, +) -> str: + macro_definition = get_cloudformation_store(account_id, region_name).macros.get(macro["Name"]) + if not macro_definition: + raise FailedTransformationException( + macro["Name"], f"Transformation {macro['Name']} is not supported." + ) + + formatted_stack_parameters = {} + for key, value in stack_parameters.items(): + # TODO: we want to support other types of parameters + if value.get("ParameterType") == "CommaDelimitedList": + formatted_stack_parameters[key] = value.get("ParameterValue").split(",") + else: + formatted_stack_parameters[key] = value.get("ParameterValue") + + transformation_id = f"{account_id}::{macro['Name']}" + event = { + "region": region_name, + "accountId": account_id, + "fragment": parsed_template, + "transformId": transformation_id, + "params": transformation_parameters, + "requestId": long_uid(), + "templateParameterValues": formatted_stack_parameters, + } + + client = connect_to(aws_access_key_id=account_id, region_name=region_name).lambda_ + try: + invocation = client.invoke( + FunctionName=macro_definition["FunctionName"], Payload=json.dumps(event) + ) + except ClientError: + LOG.error( + "client error executing lambda function '%s' with payload '%s'", + macro_definition["FunctionName"], + json.dumps(event), + ) + raise + if invocation.get("StatusCode") != 200 or invocation.get("FunctionError") == "Unhandled": + raise FailedTransformationException( + transformation=macro["Name"], + message=f"Received malformed response from transform {transformation_id}. Rollback requested by user.", + ) + result = json.loads(invocation["Payload"].read()) + + if result.get("status") != "success": + error_message = result.get("errorMessage") + message = ( + f"Transform {transformation_id} failed with: {error_message}. Rollback requested by user." + if error_message + else f"Transform {transformation_id} failed without an error message.. Rollback requested by user." + ) + raise FailedTransformationException(transformation=macro["Name"], message=message) + + if not isinstance(result.get("fragment"), dict) and not is_intrinsic: + raise FailedTransformationException( + transformation=macro["Name"], + message="Template format error: unsupported structure.. Rollback requested by user.", + ) + + return result.get("fragment") + + +def apply_serverless_transformation( + account_id: str, region_name: str, parsed_template: dict, template_parameters: dict +) -> Optional[str]: + """only returns string when parsing SAM template, otherwise None""" + # TODO: we might also want to override the access key ID to account ID + region_before = os.environ.get("AWS_DEFAULT_REGION") + if boto3.session.Session().region_name is None: + os.environ["AWS_DEFAULT_REGION"] = region_name + loader = create_policy_loader() + simplified_parameters = { + k: v.get("ResolvedValue") or v["ParameterValue"] for k, v in template_parameters.items() + } + + try: + transformed = transform_sam(parsed_template, simplified_parameters, loader) + return transformed + except Exception as e: + raise FailedTransformationException(transformation=SERVERLESS_TRANSFORM, message=str(e)) + finally: + # Note: we need to fix boto3 region, otherwise AWS SAM transformer fails + os.environ.pop("AWS_DEFAULT_REGION", None) + if region_before is not None: + os.environ["AWS_DEFAULT_REGION"] = region_before + + +class FailedTransformationException(Exception): + transformation: str + msg: str + + def __init__(self, transformation: str, message: str = ""): + self.transformation = transformation + self.message = message + super().__init__(self.message) diff --git a/localstack-core/localstack/services/cloudformation/engine/types.py b/localstack-core/localstack/services/cloudformation/engine/types.py new file mode 100644 index 0000000000000..2a4f6efa06031 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/types.py @@ -0,0 +1,45 @@ +from typing import Any, Callable, Optional, TypedDict + +# --------------------- +# TYPES +# --------------------- + +# Callable here takes the arguments: +# - resource_props +# - stack_name +# - resources +# - resource_id +ResourceProp = str | Callable[[dict, str, dict, str], dict] +ResourceDefinition = dict[str, ResourceProp] + + +class FuncDetailsValue(TypedDict): + # Callable here takes the arguments: + # - logical_resource_id + # - resource + # - stack_name + function: str | Callable[[str, dict, str], Any] + """Either an api method to call directly with `parameters` or a callable to directly invoke""" + # Callable here takes the arguments: + # - resource_props + # - stack_name + # - resources + # - resource_id + parameters: Optional[ResourceDefinition | Callable[[dict, str, list[dict], str], dict]] + """arguments to the function, or a function that generates the arguments to the function""" + # Callable here takes the arguments + # - result + # - resource_id + # - resources + # - resource_type + result_handler: Optional[Callable[[dict, str, list[dict], str], None]] + """Take the result of the operation and patch the state of the resources, yuck...""" + types: Optional[dict[str, Callable]] + """Possible type conversions""" + + +# Type definition for func_details supplied to invoke_function +FuncDetails = list[FuncDetailsValue] | FuncDetailsValue + +# Type definition returned by GenericBaseModel.get_deploy_templates +DeployTemplates = dict[str, FuncDetails | Callable] diff --git a/localstack/services/redshift/__init__.py b/localstack-core/localstack/services/cloudformation/engine/v2/__init__.py similarity index 100% rename from localstack/services/redshift/__init__.py rename to localstack-core/localstack/services/cloudformation/engine/v2/__init__.py diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py new file mode 100644 index 0000000000000..5a4cae3e042d1 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model.py @@ -0,0 +1,1280 @@ +from __future__ import annotations + +import abc +import enum +from itertools import zip_longest +from typing import Any, Final, Generator, Optional, Union, cast + +from typing_extensions import TypeVar + +from localstack.utils.strings import camel_to_snake_case + +T = TypeVar("T") + + +class NothingType: + """A sentinel that denotes 'no value' (distinct from None).""" + + _singleton = None + __slots__ = () + + def __new__(cls): + if cls._singleton is None: + cls._singleton = super().__new__(cls) + return cls._singleton + + def __eq__(self, other): + return is_nothing(other) + + def __str__(self): + return repr(self) + + def __repr__(self) -> str: + return "Nothing" + + def __bool__(self): + return False + + def __iter__(self): + return iter(()) + + def __contains__(self, item): + return False + + +Maybe = Union[T, NothingType] +Nothing = NothingType() + + +def is_nothing(value: Any) -> bool: + return isinstance(value, NothingType) + + +def is_created(before: Maybe[Any], after: Maybe[Any]) -> bool: + return is_nothing(before) and not is_nothing(after) + + +def is_removed(before: Maybe[Any], after: Maybe[Any]) -> bool: + return not is_nothing(before) and is_nothing(after) + + +def parent_change_type_of(children: list[Maybe[ChangeSetEntity]]): + change_types = [c.change_type for c in children if not is_nothing(c)] + if not change_types: + return ChangeType.UNCHANGED + first_type = change_types[0] + if all(ct == first_type for ct in change_types): + return first_type + return ChangeType.MODIFIED + + +def change_type_of(before: Maybe[Any], after: Maybe[Any], children: list[Maybe[ChangeSetEntity]]): + if is_created(before, after): + change_type = ChangeType.CREATED + elif is_removed(before, after): + change_type = ChangeType.REMOVED + else: + change_type = parent_change_type_of(children) + return change_type + + +class Scope(str): + _ROOT_SCOPE: Final[str] = str() + _SEPARATOR: Final[str] = "/" + + def __new__(cls, scope: str = _ROOT_SCOPE) -> Scope: + return cast(Scope, super().__new__(cls, scope)) + + def open_scope(self, name: Scope | str) -> Scope: + return Scope(self._SEPARATOR.join([self, name])) + + def open_index(self, index: int) -> Scope: + return Scope(self._SEPARATOR.join([self, str(index)])) + + def unwrap(self) -> list[str]: + return self.split(self._SEPARATOR) + + +class ChangeType(enum.Enum): + UNCHANGED = "Unchanged" + CREATED = "Created" + MODIFIED = "Modified" + REMOVED = "Removed" + + def __str__(self): + return self.value + + +class ChangeSetEntity(abc.ABC): + scope: Final[Scope] + change_type: Final[ChangeType] + + def __init__(self, scope: Scope, change_type: ChangeType): + self.scope = scope + self.change_type = change_type + + def get_children(self) -> Generator[ChangeSetEntity]: + for child in self.__dict__.values(): + yield from self._get_children_in(child) + + @staticmethod + def _get_children_in(obj: Any) -> Generator[ChangeSetEntity]: + # TODO: could avoid the inductive logic here, and check for loops? + if isinstance(obj, ChangeSetEntity): + yield obj + elif isinstance(obj, list): + for item in obj: + yield from ChangeSetEntity._get_children_in(item) + elif isinstance(obj, dict): + for item in obj.values(): + yield from ChangeSetEntity._get_children_in(item) + + def __str__(self): + return f"({self.__class__.__name__}| {vars(self)}" + + def __repr__(self): + return str(self) + + +class ChangeSetNode(ChangeSetEntity, abc.ABC): ... + + +class ChangeSetTerminal(ChangeSetEntity, abc.ABC): ... + + +class NodeTemplate(ChangeSetNode): + mappings: Final[NodeMappings] + parameters: Final[NodeParameters] + conditions: Final[NodeConditions] + resources: Final[NodeResources] + outputs: Final[NodeOutputs] + + def __init__( + self, + scope: Scope, + mappings: NodeMappings, + parameters: NodeParameters, + conditions: NodeConditions, + resources: NodeResources, + outputs: NodeOutputs, + ): + change_type = parent_change_type_of([resources, outputs]) + super().__init__(scope=scope, change_type=change_type) + self.mappings = mappings + self.parameters = parameters + self.conditions = conditions + self.resources = resources + self.outputs = outputs + + +class NodeDivergence(ChangeSetNode): + value: Final[ChangeSetEntity] + divergence: Final[ChangeSetEntity] + + def __init__(self, scope: Scope, value: ChangeSetEntity, divergence: ChangeSetEntity): + super().__init__(scope=scope, change_type=ChangeType.MODIFIED) + self.value = value + self.divergence = divergence + + +class NodeParameter(ChangeSetNode): + name: Final[str] + type_: Final[ChangeSetEntity] + dynamic_value: Final[ChangeSetEntity] + default_value: Final[Maybe[ChangeSetEntity]] + + def __init__( + self, + scope: Scope, + name: str, + type_: ChangeSetEntity, + dynamic_value: ChangeSetEntity, + default_value: Maybe[ChangeSetEntity], + ): + change_type = parent_change_type_of([type_, default_value, dynamic_value]) + super().__init__(scope=scope, change_type=change_type) + self.name = name + self.type_ = type_ + self.dynamic_value = dynamic_value + self.default_value = default_value + + +class NodeParameters(ChangeSetNode): + parameters: Final[list[NodeParameter]] + + def __init__(self, scope: Scope, parameters: list[NodeParameter]): + change_type = parent_change_type_of(parameters) + super().__init__(scope=scope, change_type=change_type) + self.parameters = parameters + + +class NodeMapping(ChangeSetNode): + name: Final[str] + bindings: Final[NodeObject] + + def __init__(self, scope: Scope, name: str, bindings: NodeObject): + super().__init__(scope=scope, change_type=bindings.change_type) + self.name = name + self.bindings = bindings + + +class NodeMappings(ChangeSetNode): + mappings: Final[list[NodeMapping]] + + def __init__(self, scope: Scope, mappings: list[NodeMapping]): + change_type = parent_change_type_of(mappings) + super().__init__(scope=scope, change_type=change_type) + self.mappings = mappings + + +class NodeOutput(ChangeSetNode): + name: Final[str] + value: Final[ChangeSetEntity] + export: Final[Maybe[ChangeSetEntity]] + condition_reference: Final[Maybe[TerminalValue]] + + def __init__( + self, + scope: Scope, + name: str, + value: ChangeSetEntity, + export: Maybe[ChangeSetEntity], + conditional_reference: Maybe[TerminalValue], + ): + change_type = parent_change_type_of([value, export, conditional_reference]) + super().__init__(scope=scope, change_type=change_type) + self.name = name + self.value = value + self.export = export + self.condition_reference = conditional_reference + + +class NodeOutputs(ChangeSetNode): + outputs: Final[list[NodeOutput]] + + def __init__(self, scope: Scope, outputs: list[NodeOutput]): + change_type = parent_change_type_of(outputs) + super().__init__(scope=scope, change_type=change_type) + self.outputs = outputs + + +class NodeCondition(ChangeSetNode): + name: Final[str] + body: Final[ChangeSetEntity] + + def __init__(self, scope: Scope, name: str, body: ChangeSetEntity): + super().__init__(scope=scope, change_type=body.change_type) + self.name = name + self.body = body + + +class NodeConditions(ChangeSetNode): + conditions: Final[list[NodeCondition]] + + def __init__(self, scope: Scope, conditions: list[NodeCondition]): + change_type = parent_change_type_of(conditions) + super().__init__(scope=scope, change_type=change_type) + self.conditions = conditions + + +class NodeResources(ChangeSetNode): + resources: Final[list[NodeResource]] + + def __init__(self, scope: Scope, resources: list[NodeResource]): + change_type = parent_change_type_of(resources) + super().__init__(scope=scope, change_type=change_type) + self.resources = resources + + +class NodeResource(ChangeSetNode): + name: Final[str] + type_: Final[ChangeSetTerminal] + properties: Final[NodeProperties] + condition_reference: Final[Maybe[TerminalValue]] + depends_on: Final[Maybe[NodeDependsOn]] + + def __init__( + self, + scope: Scope, + change_type: ChangeType, + name: str, + type_: ChangeSetTerminal, + properties: NodeProperties, + condition_reference: Maybe[TerminalValue], + depends_on: Maybe[NodeDependsOn], + ): + super().__init__(scope=scope, change_type=change_type) + self.name = name + self.type_ = type_ + self.properties = properties + self.condition_reference = condition_reference + self.depends_on = depends_on + + +class NodeProperties(ChangeSetNode): + properties: Final[list[NodeProperty]] + + def __init__(self, scope: Scope, properties: list[NodeProperty]): + change_type = parent_change_type_of(properties) + super().__init__(scope=scope, change_type=change_type) + self.properties = properties + + +class NodeDependsOn(ChangeSetNode): + depends_on: Final[NodeArray] + + def __init__(self, scope: Scope, depends_on: NodeArray): + super().__init__(scope=scope, change_type=depends_on.change_type) + self.depends_on = depends_on + + +class NodeProperty(ChangeSetNode): + name: Final[str] + value: Final[ChangeSetEntity] + + def __init__(self, scope: Scope, name: str, value: ChangeSetEntity): + super().__init__(scope=scope, change_type=value.change_type) + self.name = name + self.value = value + + +class NodeIntrinsicFunction(ChangeSetNode): + intrinsic_function: Final[str] + arguments: Final[ChangeSetEntity] + + def __init__( + self, + scope: Scope, + change_type: ChangeType, + intrinsic_function: str, + arguments: ChangeSetEntity, + ): + super().__init__(scope=scope, change_type=change_type) + self.intrinsic_function = intrinsic_function + self.arguments = arguments + + +class NodeObject(ChangeSetNode): + bindings: Final[dict[str, ChangeSetEntity]] + + def __init__(self, scope: Scope, change_type: ChangeType, bindings: dict[str, ChangeSetEntity]): + super().__init__(scope=scope, change_type=change_type) + self.bindings = bindings + + +class NodeArray(ChangeSetNode): + array: Final[list[ChangeSetEntity]] + + def __init__(self, scope: Scope, change_type: ChangeType, array: list[ChangeSetEntity]): + super().__init__(scope=scope, change_type=change_type) + self.array = array + + +class TerminalValue(ChangeSetTerminal, abc.ABC): + value: Final[Any] + + def __init__(self, scope: Scope, change_type: ChangeType, value: Any): + super().__init__(scope=scope, change_type=change_type) + self.value = value + + +class TerminalValueModified(TerminalValue): + modified_value: Final[Any] + + def __init__(self, scope: Scope, value: Any, modified_value: Any): + super().__init__(scope=scope, change_type=ChangeType.MODIFIED, value=value) + self.modified_value = modified_value + + +class TerminalValueCreated(TerminalValue): + def __init__(self, scope: Scope, value: Any): + super().__init__(scope=scope, change_type=ChangeType.CREATED, value=value) + + +class TerminalValueRemoved(TerminalValue): + def __init__(self, scope: Scope, value: Any): + super().__init__(scope=scope, change_type=ChangeType.REMOVED, value=value) + + +class TerminalValueUnchanged(TerminalValue): + def __init__(self, scope: Scope, value: Any): + super().__init__(scope=scope, change_type=ChangeType.UNCHANGED, value=value) + + +TypeKey: Final[str] = "Type" +ConditionKey: Final[str] = "Condition" +ConditionsKey: Final[str] = "Conditions" +MappingsKey: Final[str] = "Mappings" +ResourcesKey: Final[str] = "Resources" +PropertiesKey: Final[str] = "Properties" +ParametersKey: Final[str] = "Parameters" +DefaultKey: Final[str] = "Default" +ValueKey: Final[str] = "Value" +ExportKey: Final[str] = "Export" +OutputsKey: Final[str] = "Outputs" +DependsOnKey: Final[str] = "DependsOn" +# TODO: expand intrinsic functions set. +RefKey: Final[str] = "Ref" +RefConditionKey: Final[str] = "Condition" +FnIfKey: Final[str] = "Fn::If" +FnAnd: Final[str] = "Fn::And" +FnOr: Final[str] = "Fn::Or" +FnNotKey: Final[str] = "Fn::Not" +FnJoinKey: Final[str] = "Fn::Join" +FnGetAttKey: Final[str] = "Fn::GetAtt" +FnEqualsKey: Final[str] = "Fn::Equals" +FnFindInMapKey: Final[str] = "Fn::FindInMap" +FnSubKey: Final[str] = "Fn::Sub" +FnTransform: Final[str] = "Fn::Transform" +FnSelect: Final[str] = "Fn::Select" +FnSplit: Final[str] = "Fn::Split" +FnGetAZs: Final[str] = "Fn::GetAZs" +FnBase64: Final[str] = "Fn::Base64" +INTRINSIC_FUNCTIONS: Final[set[str]] = { + RefKey, + RefConditionKey, + FnIfKey, + FnAnd, + FnOr, + FnNotKey, + FnJoinKey, + FnEqualsKey, + FnGetAttKey, + FnFindInMapKey, + FnSubKey, + FnTransform, + FnSelect, + FnSplit, + FnGetAZs, + FnBase64, +} + + +class ChangeSetModel: + # TODO: should this instead be generalised to work on "Stack" objects instead of just "Template"s? + + # TODO: can probably improve the typehints to use CFN's 'language' eg. dict -> Template|Properties, etc. + + # TODO: add support for 'replacement' computation, and ensure this state is propagated in tree traversals + # such as intrinsic functions. + + _before_template: Final[Maybe[dict]] + _after_template: Final[Maybe[dict]] + _before_parameters: Final[Maybe[dict]] + _after_parameters: Final[Maybe[dict]] + _visited_scopes: Final[dict[str, ChangeSetEntity]] + _node_template: Final[NodeTemplate] + + def __init__( + self, + before_template: Optional[dict], + after_template: Optional[dict], + before_parameters: Optional[dict], + after_parameters: Optional[dict], + ): + self._before_template = before_template or Nothing + self._after_template = after_template or Nothing + self._before_parameters = before_parameters or Nothing + self._after_parameters = after_parameters or Nothing + self._visited_scopes = dict() + self._node_template = self._model( + before_template=self._before_template, after_template=self._after_template + ) + # TODO: need to do template preprocessing e.g. parameter resolution, conditions etc. + + def get_update_model(self) -> NodeTemplate: + # TODO: rethink naming of this for outer utils + return self._node_template + + def _visit_terminal_value( + self, scope: Scope, before_value: Maybe[Any], after_value: Maybe[Any] + ) -> TerminalValue: + terminal_value = self._visited_scopes.get(scope) + if isinstance(terminal_value, TerminalValue): + return terminal_value + if is_created(before=before_value, after=after_value): + terminal_value = TerminalValueCreated(scope=scope, value=after_value) + elif is_removed(before=before_value, after=after_value): + terminal_value = TerminalValueRemoved(scope=scope, value=before_value) + elif before_value == after_value: + terminal_value = TerminalValueUnchanged(scope=scope, value=before_value) + else: + terminal_value = TerminalValueModified( + scope=scope, value=before_value, modified_value=after_value + ) + self._visited_scopes[scope] = terminal_value + return terminal_value + + def _visit_intrinsic_function( + self, + scope: Scope, + intrinsic_function: str, + before_arguments: Maybe[Any], + after_arguments: Maybe[Any], + ) -> NodeIntrinsicFunction: + node_intrinsic_function = self._visited_scopes.get(scope) + if isinstance(node_intrinsic_function, NodeIntrinsicFunction): + return node_intrinsic_function + arguments = self._visit_value( + scope=scope, before_value=before_arguments, after_value=after_arguments + ) + if is_created(before=before_arguments, after=after_arguments): + change_type = ChangeType.CREATED + elif is_removed(before=before_arguments, after=after_arguments): + change_type = ChangeType.REMOVED + else: + function_name = intrinsic_function.replace("::", "_") + function_name = camel_to_snake_case(function_name) + resolve_function_name = f"_resolve_intrinsic_function_{function_name}" + if hasattr(self, resolve_function_name): + resolve_function = getattr(self, resolve_function_name) + change_type = resolve_function(arguments) + else: + change_type = arguments.change_type + node_intrinsic_function = NodeIntrinsicFunction( + scope=scope, + change_type=change_type, + intrinsic_function=intrinsic_function, + arguments=arguments, + ) + self._visited_scopes[scope] = node_intrinsic_function + return node_intrinsic_function + + def _resolve_intrinsic_function_fn_sub(self, arguments: ChangeSetEntity) -> ChangeType: + # TODO: This routine should instead export the implicit Ref and GetAtt calls within the first + # string template parameter and compute the respective change set types. Currently, + # changes referenced by Fn::Sub templates are only picked up during preprocessing; not + # at modelling. + return arguments.change_type + + def _resolve_intrinsic_function_fn_get_att(self, arguments: ChangeSetEntity) -> ChangeType: + # TODO: add support for nested intrinsic functions. + # TODO: validate arguments structure and type. + # TODO: should this check for deletion of resources and/or properties, if so what error should be raised? + + if not isinstance(arguments, NodeArray) or not arguments.array: + raise RuntimeError() + logical_name_of_resource_entity = arguments.array[0] + if not isinstance(logical_name_of_resource_entity, TerminalValue): + raise RuntimeError() + logical_name_of_resource: str = logical_name_of_resource_entity.value + if not isinstance(logical_name_of_resource, str): + raise RuntimeError() + node_resource: NodeResource = self._retrieve_or_visit_resource( + resource_name=logical_name_of_resource + ) + + node_property_attribute_name = arguments.array[1] + if not isinstance(node_property_attribute_name, TerminalValue): + raise RuntimeError() + if isinstance(node_property_attribute_name, TerminalValueModified): + attribute_name = node_property_attribute_name.modified_value + else: + attribute_name = node_property_attribute_name.value + + # TODO: this is another use case for which properties should be referenced by name + for node_property in node_resource.properties.properties: + if node_property.name == attribute_name: + return node_property.change_type + + return ChangeType.UNCHANGED + + def _resolve_intrinsic_function_ref(self, arguments: ChangeSetEntity) -> ChangeType: + if arguments.change_type != ChangeType.UNCHANGED: + return arguments.change_type + if not isinstance(arguments, TerminalValue): + return arguments.change_type + + logical_id = arguments.value + + node_condition = self._retrieve_condition_if_exists(condition_name=logical_id) + if isinstance(node_condition, NodeCondition): + return node_condition.change_type + + node_parameter = self._retrieve_parameter_if_exists(parameter_name=logical_id) + if isinstance(node_parameter, NodeParameter): + return node_parameter.change_type + + # TODO: this should check the replacement flag for a resource update. + node_resource = self._retrieve_or_visit_resource(resource_name=logical_id) + return node_resource.change_type + + def _resolve_intrinsic_function_condition(self, arguments: ChangeSetEntity) -> ChangeType: + if arguments.change_type != ChangeType.UNCHANGED: + return arguments.change_type + if not isinstance(arguments, TerminalValue): + return arguments.change_type + + condition_name = arguments.value + node_condition = self._retrieve_condition_if_exists(condition_name=condition_name) + if isinstance(node_condition, NodeCondition): + return node_condition.change_type + raise RuntimeError(f"Undefined condition '{condition_name}'") + + def _resolve_intrinsic_function_fn_find_in_map(self, arguments: ChangeSetEntity) -> ChangeType: + if arguments.change_type != ChangeType.UNCHANGED: + return arguments.change_type + # TODO: validate arguments structure and type. + # TODO: add support for nested functions, here we assume the arguments are string literals. + + if not isinstance(arguments, NodeArray) or not arguments.array: + raise RuntimeError() + argument_mapping_name = arguments.array[0] + if not isinstance(argument_mapping_name, TerminalValue): + raise NotImplementedError() + argument_top_level_key = arguments.array[1] + if not isinstance(argument_top_level_key, TerminalValue): + raise NotImplementedError() + argument_second_level_key = arguments.array[2] + if not isinstance(argument_second_level_key, TerminalValue): + raise NotImplementedError() + mapping_name = argument_mapping_name.value + top_level_key = argument_top_level_key.value + second_level_key = argument_second_level_key.value + + node_mapping = self._retrieve_mapping(mapping_name=mapping_name) + # TODO: a lookup would be beneficial in this scenario too; + # consider implications downstream and for replication. + top_level_object = node_mapping.bindings.bindings.get(top_level_key) + if not isinstance(top_level_object, NodeObject): + raise RuntimeError() + target_map_value = top_level_object.bindings.get(second_level_key) + return target_map_value.change_type + + def _resolve_intrinsic_function_fn_if(self, arguments: ChangeSetEntity) -> ChangeType: + # TODO: validate arguments structure and type. + if not isinstance(arguments, NodeArray) or not arguments.array: + raise RuntimeError() + logical_name_of_condition_entity = arguments.array[0] + if not isinstance(logical_name_of_condition_entity, TerminalValue): + raise RuntimeError() + logical_name_of_condition: str = logical_name_of_condition_entity.value + if not isinstance(logical_name_of_condition, str): + raise RuntimeError() + + node_condition = self._retrieve_condition_if_exists( + condition_name=logical_name_of_condition + ) + if not isinstance(node_condition, NodeCondition): + raise RuntimeError() + change_type = parent_change_type_of([node_condition, *arguments[1:]]) + return change_type + + def _visit_array( + self, scope: Scope, before_array: Maybe[list], after_array: Maybe[list] + ) -> NodeArray: + array: list[ChangeSetEntity] = list() + for index, (before_value, after_value) in enumerate( + zip_longest(before_array, after_array, fillvalue=Nothing) + ): + value_scope = scope.open_index(index=index) + value = self._visit_value( + scope=value_scope, before_value=before_value, after_value=after_value + ) + array.append(value) + change_type = change_type_of(before_array, after_array, array) + return NodeArray(scope=scope, change_type=change_type, array=array) + + def _visit_object( + self, scope: Scope, before_object: Maybe[dict], after_object: Maybe[dict] + ) -> NodeObject: + node_object = self._visited_scopes.get(scope) + if isinstance(node_object, NodeObject): + return node_object + binding_names = self._safe_keys_of(before_object, after_object) + bindings: dict[str, ChangeSetEntity] = dict() + for binding_name in binding_names: + binding_scope, (before_value, after_value) = self._safe_access_in( + scope, binding_name, before_object, after_object + ) + value = self._visit_value( + scope=binding_scope, before_value=before_value, after_value=after_value + ) + bindings[binding_name] = value + change_type = change_type_of(before_object, after_object, list(bindings.values())) + node_object = NodeObject(scope=scope, change_type=change_type, bindings=bindings) + self._visited_scopes[scope] = node_object + return node_object + + def _visit_divergence( + self, scope: Scope, before_value: Maybe[Any], after_value: Maybe[Any] + ) -> NodeDivergence: + scope_value = scope.open_scope("value") + value = self._visit_value(scope=scope_value, before_value=before_value, after_value=Nothing) + scope_divergence = scope.open_scope("divergence") + divergence = self._visit_value( + scope=scope_divergence, before_value=Nothing, after_value=after_value + ) + return NodeDivergence(scope=scope, value=value, divergence=divergence) + + def _visit_value( + self, scope: Scope, before_value: Maybe[Any], after_value: Maybe[Any] + ) -> ChangeSetEntity: + value = self._visited_scopes.get(scope) + if isinstance(value, ChangeSetEntity): + return value + + before_type_name = self._type_name_of(before_value) + after_type_name = self._type_name_of(after_value) + unset = object() + if before_type_name == after_type_name: + dominant_value = before_value + elif is_created(before=before_value, after=after_value): + dominant_value = after_value + elif is_removed(before=before_value, after=after_value): + dominant_value = before_value + else: + dominant_value = unset + if dominant_value is not unset: + dominant_type_name = self._type_name_of(dominant_value) + if self._is_terminal(value=dominant_value): + value = self._visit_terminal_value( + scope=scope, before_value=before_value, after_value=after_value + ) + elif self._is_object(value=dominant_value): + value = self._visit_object( + scope=scope, before_object=before_value, after_object=after_value + ) + elif self._is_array(value=dominant_value): + value = self._visit_array( + scope=scope, before_array=before_value, after_array=after_value + ) + elif self._is_intrinsic_function_name(dominant_type_name): + intrinsic_function_scope, (before_arguments, after_arguments) = ( + self._safe_access_in(scope, dominant_type_name, before_value, after_value) + ) + value = self._visit_intrinsic_function( + scope=scope, + intrinsic_function=dominant_type_name, + before_arguments=before_arguments, + after_arguments=after_arguments, + ) + else: + raise RuntimeError(f"Unsupported type {type(dominant_value)}") + # Case: type divergence. + else: + value = self._visit_divergence( + scope=scope, before_value=before_value, after_value=after_value + ) + self._visited_scopes[scope] = value + return value + + def _visit_property( + self, + scope: Scope, + property_name: str, + before_property: Maybe[Any], + after_property: Maybe[Any], + ) -> NodeProperty: + node_property = self._visited_scopes.get(scope) + if isinstance(node_property, NodeProperty): + return node_property + value = self._visit_value( + scope=scope, before_value=before_property, after_value=after_property + ) + node_property = NodeProperty(scope=scope, name=property_name, value=value) + self._visited_scopes[scope] = node_property + return node_property + + def _visit_properties( + self, scope: Scope, before_properties: Maybe[dict], after_properties: Maybe[dict] + ) -> NodeProperties: + node_properties = self._visited_scopes.get(scope) + if isinstance(node_properties, NodeProperties): + return node_properties + property_names: list[str] = self._safe_keys_of(before_properties, after_properties) + properties: list[NodeProperty] = list() + for property_name in property_names: + property_scope, (before_property, after_property) = self._safe_access_in( + scope, property_name, before_properties, after_properties + ) + property_ = self._visit_property( + scope=property_scope, + property_name=property_name, + before_property=before_property, + after_property=after_property, + ) + properties.append(property_) + node_properties = NodeProperties(scope=scope, properties=properties) + self._visited_scopes[scope] = node_properties + return node_properties + + def _visit_type(self, scope: Scope, before_type: Any, after_type: Any) -> TerminalValue: + value = self._visit_value(scope=scope, before_value=before_type, after_value=after_type) + if not isinstance(value, TerminalValue): + # TODO: decide where template schema validation should occur. + raise RuntimeError() + return value + + def _visit_resource( + self, + scope: Scope, + resource_name: str, + before_resource: Maybe[dict], + after_resource: Maybe[dict], + ) -> NodeResource: + node_resource = self._visited_scopes.get(scope) + if isinstance(node_resource, NodeResource): + return node_resource + + scope_type, (before_type, after_type) = self._safe_access_in( + scope, TypeKey, before_resource, after_resource + ) + terminal_value_type = self._visit_type( + scope=scope_type, before_type=before_type, after_type=after_type + ) + + condition_reference = Nothing + scope_condition, (before_condition, after_condition) = self._safe_access_in( + scope, ConditionKey, before_resource, after_resource + ) + if before_condition or after_condition: + condition_reference = self._visit_terminal_value( + scope_condition, before_condition, after_condition + ) + + depends_on = Nothing + scope_depends_on, (before_depends_on, after_depends_on) = self._safe_access_in( + scope, DependsOnKey, before_resource, after_resource + ) + if before_depends_on or after_depends_on: + depends_on = self._visit_depends_on( + scope_depends_on, before_depends_on, after_depends_on + ) + + scope_properties, (before_properties, after_properties) = self._safe_access_in( + scope, PropertiesKey, before_resource, after_resource + ) + properties = self._visit_properties( + scope=scope_properties, + before_properties=before_properties, + after_properties=after_properties, + ) + + change_type = change_type_of( + before_resource, after_resource, [properties, condition_reference, depends_on] + ) + node_resource = NodeResource( + scope=scope, + change_type=change_type, + name=resource_name, + type_=terminal_value_type, + properties=properties, + condition_reference=condition_reference, + depends_on=depends_on, + ) + self._visited_scopes[scope] = node_resource + return node_resource + + def _visit_resources( + self, scope: Scope, before_resources: Maybe[dict], after_resources: Maybe[dict] + ) -> NodeResources: + # TODO: investigate type changes behavior. + resources: list[NodeResource] = list() + resource_names = self._safe_keys_of(before_resources, after_resources) + for resource_name in resource_names: + resource_scope, (before_resource, after_resource) = self._safe_access_in( + scope, resource_name, before_resources, after_resources + ) + resource = self._visit_resource( + scope=resource_scope, + resource_name=resource_name, + before_resource=before_resource, + after_resource=after_resource, + ) + resources.append(resource) + return NodeResources(scope=scope, resources=resources) + + def _visit_mapping( + self, scope: Scope, name: str, before_mapping: Maybe[dict], after_mapping: Maybe[dict] + ) -> NodeMapping: + bindings = self._visit_object( + scope=scope, before_object=before_mapping, after_object=after_mapping + ) + return NodeMapping(scope=scope, name=name, bindings=bindings) + + def _visit_mappings( + self, scope: Scope, before_mappings: Maybe[dict], after_mappings: Maybe[dict] + ) -> NodeMappings: + mappings: list[NodeMapping] = list() + mapping_names = self._safe_keys_of(before_mappings, after_mappings) + for mapping_name in mapping_names: + scope_mapping, (before_mapping, after_mapping) = self._safe_access_in( + scope, mapping_name, before_mappings, after_mappings + ) + mapping = self._visit_mapping( + scope=scope, + name=mapping_name, + before_mapping=before_mapping, + after_mapping=after_mapping, + ) + mappings.append(mapping) + return NodeMappings(scope=scope, mappings=mappings) + + def _visit_dynamic_parameter(self, parameter_name: str) -> ChangeSetEntity: + scope = Scope("Dynamic").open_scope("Parameters") + scope_parameter, (before_parameter, after_parameter) = self._safe_access_in( + scope, parameter_name, self._before_parameters, self._after_parameters + ) + parameter = self._visit_value( + scope=scope_parameter, before_value=before_parameter, after_value=after_parameter + ) + return parameter + + def _visit_parameter( + self, + scope: Scope, + parameter_name: str, + before_parameter: Maybe[dict], + after_parameter: Maybe[dict], + ) -> NodeParameter: + node_parameter = self._visited_scopes.get(scope) + if isinstance(node_parameter, NodeParameter): + return node_parameter + + type_scope, (before_type, after_type) = self._safe_access_in( + scope, TypeKey, before_parameter, after_parameter + ) + type_ = self._visit_value(type_scope, before_type, after_type) + + default_scope, (before_default, after_default) = self._safe_access_in( + scope, DefaultKey, before_parameter, after_parameter + ) + default_value = self._visit_value(default_scope, before_default, after_default) + + dynamic_value = self._visit_dynamic_parameter(parameter_name=parameter_name) + + node_parameter = NodeParameter( + scope=scope, + name=parameter_name, + type_=type_, + default_value=default_value, + dynamic_value=dynamic_value, + ) + self._visited_scopes[scope] = node_parameter + return node_parameter + + def _visit_parameters( + self, scope: Scope, before_parameters: Maybe[dict], after_parameters: Maybe[dict] + ) -> NodeParameters: + node_parameters = self._visited_scopes.get(scope) + if isinstance(node_parameters, NodeParameters): + return node_parameters + parameter_names: list[str] = self._safe_keys_of(before_parameters, after_parameters) + parameters: list[NodeParameter] = list() + for parameter_name in parameter_names: + parameter_scope, (before_parameter, after_parameter) = self._safe_access_in( + scope, parameter_name, before_parameters, after_parameters + ) + parameter = self._visit_parameter( + scope=parameter_scope, + parameter_name=parameter_name, + before_parameter=before_parameter, + after_parameter=after_parameter, + ) + parameters.append(parameter) + node_parameters = NodeParameters(scope=scope, parameters=parameters) + self._visited_scopes[scope] = node_parameters + return node_parameters + + @staticmethod + def _normalise_depends_on_value(value: Maybe[str | list[str]]) -> Maybe[list[str]]: + # To simplify downstream logics, reduce the type options to array of strings. + # TODO: Add integrations tests for DependsOn validations (invalid types, duplicate identifiers, etc.) + if isinstance(value, NothingType): + return value + if isinstance(value, str): + value = [value] + elif isinstance(value, list): + value.sort() + else: + raise RuntimeError( + f"Invalid type for DependsOn, expected a String or Array of String, but got: '{value}'" + ) + return value + + def _visit_depends_on( + self, + scope: Scope, + before_depends_on: Maybe[str | list[str]], + after_depends_on: Maybe[str | list[str]], + ) -> NodeDependsOn: + before_depends_on = self._normalise_depends_on_value(value=before_depends_on) + after_depends_on = self._normalise_depends_on_value(value=after_depends_on) + node_array = self._visit_array( + scope=scope, before_array=before_depends_on, after_array=after_depends_on + ) + node_depends_on = NodeDependsOn(scope=scope, depends_on=node_array) + return node_depends_on + + def _visit_condition( + self, + scope: Scope, + condition_name: str, + before_condition: Maybe[dict], + after_condition: Maybe[dict], + ) -> NodeCondition: + node_condition = self._visited_scopes.get(scope) + if isinstance(node_condition, NodeCondition): + return node_condition + body = self._visit_value( + scope=scope, before_value=before_condition, after_value=after_condition + ) + node_condition = NodeCondition(scope=scope, name=condition_name, body=body) + self._visited_scopes[scope] = node_condition + return node_condition + + def _visit_conditions( + self, scope: Scope, before_conditions: Maybe[dict], after_conditions: Maybe[dict] + ) -> NodeConditions: + node_conditions = self._visited_scopes.get(scope) + if isinstance(node_conditions, NodeConditions): + return node_conditions + condition_names: list[str] = self._safe_keys_of(before_conditions, after_conditions) + conditions: list[NodeCondition] = list() + for condition_name in condition_names: + condition_scope, (before_condition, after_condition) = self._safe_access_in( + scope, condition_name, before_conditions, after_conditions + ) + condition = self._visit_condition( + scope=condition_scope, + condition_name=condition_name, + before_condition=before_condition, + after_condition=after_condition, + ) + conditions.append(condition) + node_conditions = NodeConditions(scope=scope, conditions=conditions) + self._visited_scopes[scope] = node_conditions + return node_conditions + + def _visit_output( + self, scope: Scope, name: str, before_output: Maybe[dict], after_output: Maybe[dict] + ) -> NodeOutput: + scope_value, (before_value, after_value) = self._safe_access_in( + scope, ValueKey, before_output, after_output + ) + value = self._visit_value(scope_value, before_value, after_value) + + export: Maybe[ChangeSetEntity] = Nothing + scope_export, (before_export, after_export) = self._safe_access_in( + scope, ExportKey, before_output, after_output + ) + if before_export or after_export: + export = self._visit_value(scope_export, before_export, after_export) + + # TODO: condition references should be resolved for the condition's change_type? + condition_reference: Maybe[TerminalValue] = Nothing + scope_condition, (before_condition, after_condition) = self._safe_access_in( + scope, ConditionKey, before_output, after_output + ) + if before_condition or after_condition: + condition_reference = self._visit_terminal_value( + scope_condition, before_condition, after_condition + ) + + return NodeOutput( + scope=scope, + name=name, + value=value, + export=export, + conditional_reference=condition_reference, + ) + + def _visit_outputs( + self, scope: Scope, before_outputs: Maybe[dict], after_outputs: Maybe[dict] + ) -> NodeOutputs: + outputs: list[NodeOutput] = list() + output_names: list[str] = self._safe_keys_of(before_outputs, after_outputs) + for output_name in output_names: + scope_output, (before_output, after_output) = self._safe_access_in( + scope, output_name, before_outputs, after_outputs + ) + output = self._visit_output( + scope=scope_output, + name=output_name, + before_output=before_output, + after_output=after_output, + ) + outputs.append(output) + return NodeOutputs(scope=scope, outputs=outputs) + + def _model(self, before_template: Maybe[dict], after_template: Maybe[dict]) -> NodeTemplate: + root_scope = Scope() + # TODO: visit other child types + + mappings_scope, (before_mappings, after_mappings) = self._safe_access_in( + root_scope, MappingsKey, before_template, after_template + ) + mappings = self._visit_mappings( + scope=mappings_scope, before_mappings=before_mappings, after_mappings=after_mappings + ) + + parameters_scope, (before_parameters, after_parameters) = self._safe_access_in( + root_scope, ParametersKey, before_template, after_template + ) + parameters = self._visit_parameters( + scope=parameters_scope, + before_parameters=before_parameters, + after_parameters=after_parameters, + ) + + conditions_scope, (before_conditions, after_conditions) = self._safe_access_in( + root_scope, ConditionsKey, before_template, after_template + ) + conditions = self._visit_conditions( + scope=conditions_scope, + before_conditions=before_conditions, + after_conditions=after_conditions, + ) + + resources_scope, (before_resources, after_resources) = self._safe_access_in( + root_scope, ResourcesKey, before_template, after_template + ) + resources = self._visit_resources( + scope=resources_scope, + before_resources=before_resources, + after_resources=after_resources, + ) + + outputs_scope, (before_outputs, after_outputs) = self._safe_access_in( + root_scope, OutputsKey, before_template, after_template + ) + outputs = self._visit_outputs( + scope=outputs_scope, before_outputs=before_outputs, after_outputs=after_outputs + ) + + # TODO: compute the change_type of the template properly. + return NodeTemplate( + scope=root_scope, + mappings=mappings, + parameters=parameters, + conditions=conditions, + resources=resources, + outputs=outputs, + ) + + def _retrieve_condition_if_exists(self, condition_name: str) -> Maybe[NodeCondition]: + conditions_scope, (before_conditions, after_conditions) = self._safe_access_in( + Scope(), ConditionsKey, self._before_template, self._after_template + ) + before_conditions = before_conditions or dict() + after_conditions = after_conditions or dict() + if condition_name in before_conditions or condition_name in after_conditions: + condition_scope, (before_condition, after_condition) = self._safe_access_in( + conditions_scope, condition_name, before_conditions, after_conditions + ) + node_condition = self._visit_condition( + conditions_scope, + condition_name, + before_condition=before_condition, + after_condition=after_condition, + ) + return node_condition + return Nothing + + def _retrieve_parameter_if_exists(self, parameter_name: str) -> Maybe[NodeParameter]: + parameters_scope, (before_parameters, after_parameters) = self._safe_access_in( + Scope(), ParametersKey, self._before_template, self._after_template + ) + if parameter_name in before_parameters or parameter_name in after_parameters: + parameter_scope, (before_parameter, after_parameter) = self._safe_access_in( + parameters_scope, parameter_name, before_parameters, after_parameters + ) + node_parameter = self._visit_parameter( + parameter_scope, + parameter_name, + before_parameter=before_parameter, + after_parameter=after_parameter, + ) + return node_parameter + return Nothing + + def _retrieve_mapping(self, mapping_name) -> NodeMapping: + # TODO: add caching mechanism, and raise appropriate error if missing. + scope_mappings, (before_mappings, after_mappings) = self._safe_access_in( + Scope(), MappingsKey, self._before_template, self._after_template + ) + if mapping_name in before_mappings or mapping_name in after_mappings: + scope_mapping, (before_mapping, after_mapping) = self._safe_access_in( + scope_mappings, mapping_name, before_mappings, after_mappings + ) + node_mapping = self._visit_mapping( + scope_mapping, mapping_name, before_mapping, after_mapping + ) + return node_mapping + raise RuntimeError() + + def _retrieve_or_visit_resource(self, resource_name: str) -> NodeResource: + resources_scope, (before_resources, after_resources) = self._safe_access_in( + Scope(), + ResourcesKey, + self._before_template, + self._after_template, + ) + resource_scope, (before_resource, after_resource) = self._safe_access_in( + resources_scope, resource_name, before_resources, after_resources + ) + return self._visit_resource( + scope=resource_scope, + resource_name=resource_name, + before_resource=before_resource, + after_resource=after_resource, + ) + + @staticmethod + def _is_intrinsic_function_name(function_name: str) -> bool: + # TODO: are intrinsic functions soft keywords? + return function_name in INTRINSIC_FUNCTIONS + + @staticmethod + def _safe_access_in(scope: Scope, key: str, *objects: Maybe[dict]) -> tuple[Scope, Maybe[Any]]: + results = list() + for obj in objects: + # TODO: raise errors if not dict + if not isinstance(obj, NothingType): + results.append(obj.get(key, Nothing)) + else: + results.append(obj) + new_scope = scope.open_scope(name=key) + return new_scope, results[0] if len(objects) == 1 else tuple(results) + + @staticmethod + def _safe_keys_of(*objects: Maybe[dict]) -> list[str]: + key_set: set[str] = set() + for obj in objects: + # TODO: raise errors if not dict + if isinstance(obj, dict): + key_set.update(obj.keys()) + # The keys list is sorted to increase reproducibility of the + # update graph build process or downstream logics. + keys = sorted(key_set) + return keys + + @staticmethod + def _name_if_intrinsic_function(value: Maybe[Any]) -> Optional[str]: + if isinstance(value, dict): + keys = ChangeSetModel._safe_keys_of(value) + if len(keys) == 1: + key_name = keys[0] + if ChangeSetModel._is_intrinsic_function_name(key_name): + return key_name + return None + + @staticmethod + def _type_name_of(value: Maybe[Any]) -> str: + maybe_intrinsic_function_name = ChangeSetModel._name_if_intrinsic_function(value) + if maybe_intrinsic_function_name is not None: + return maybe_intrinsic_function_name + return type(value).__name__ + + @staticmethod + def _is_terminal(value: Any) -> bool: + return type(value) in {int, float, bool, str, None, NothingType} + + @staticmethod + def _is_object(value: Any) -> bool: + return isinstance(value, dict) and ChangeSetModel._name_if_intrinsic_function(value) is None + + @staticmethod + def _is_array(value: Any) -> bool: + return isinstance(value, list) diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py new file mode 100644 index 0000000000000..8c5f19b900a16 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_describer.py @@ -0,0 +1,195 @@ +from __future__ import annotations + +import json +from typing import Final, Optional + +import localstack.aws.api.cloudformation as cfn_api +from localstack.services.cloudformation.engine.v2.change_set_model import ( + NodeIntrinsicFunction, + NodeProperty, + NodeResource, + PropertiesKey, + is_nothing, +) +from localstack.services.cloudformation.engine.v2.change_set_model_preproc import ( + ChangeSetModelPreproc, + PreprocEntityDelta, + PreprocProperties, + PreprocResource, +) +from localstack.services.cloudformation.v2.entities import ChangeSet + +CHANGESET_KNOWN_AFTER_APPLY: Final[str] = "{{changeSet:KNOWN_AFTER_APPLY}}" + + +class ChangeSetModelDescriber(ChangeSetModelPreproc): + _include_property_values: Final[bool] + _changes: Final[cfn_api.Changes] + + def __init__( + self, + change_set: ChangeSet, + include_property_values: bool, + ): + super().__init__(change_set=change_set) + self._include_property_values = include_property_values + self._changes = list() + + def get_changes(self) -> cfn_api.Changes: + self._changes.clear() + self.process() + return self._changes + + def _resolve_attribute(self, arguments: str | list[str], select_before: bool) -> str: + if select_before: + return super()._resolve_attribute(arguments=arguments, select_before=select_before) + + # Replicate AWS's limitations in describing change set's updated values. + # Consideration: If we can properly compute the before and after value, why should we + # artificially limit the precision of our output to match AWS's? + + arguments_list: list[str] + if isinstance(arguments, str): + arguments_list = arguments.split(".") + else: + arguments_list = arguments + logical_name_of_resource = arguments_list[0] + attribute_name = arguments_list[1] + + node_resource = self._get_node_resource_for( + resource_name=logical_name_of_resource, node_template=self._node_template + ) + node_property: Optional[NodeProperty] = self._get_node_property_for( + property_name=attribute_name, node_resource=node_resource + ) + if node_property is not None: + property_delta = self.visit(node_property) + if property_delta.before == property_delta.after: + value = property_delta.after + else: + value = CHANGESET_KNOWN_AFTER_APPLY + else: + try: + value = self._after_deployed_property_value_of( + resource_logical_id=logical_name_of_resource, + property_name=attribute_name, + ) + except RuntimeError: + value = CHANGESET_KNOWN_AFTER_APPLY + + return value + + def visit_node_intrinsic_function_fn_join( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + # TODO: investigate the behaviour and impact of this logic with the user defining + # {{changeSet:KNOWN_AFTER_APPLY}} string literals as delimiters or arguments. + delta = super().visit_node_intrinsic_function_fn_join( + node_intrinsic_function=node_intrinsic_function + ) + delta_before = delta.before + if isinstance(delta_before, str) and CHANGESET_KNOWN_AFTER_APPLY in delta_before: + delta.before = CHANGESET_KNOWN_AFTER_APPLY + delta_after = delta.after + if isinstance(delta_after, str) and CHANGESET_KNOWN_AFTER_APPLY in delta_after: + delta.after = CHANGESET_KNOWN_AFTER_APPLY + return delta + + def _register_resource_change( + self, + logical_id: str, + type_: str, + physical_id: Optional[str], + before_properties: Optional[PreprocProperties], + after_properties: Optional[PreprocProperties], + ) -> None: + action = cfn_api.ChangeAction.Modify + if before_properties is None: + action = cfn_api.ChangeAction.Add + elif after_properties is None: + action = cfn_api.ChangeAction.Remove + + resource_change = cfn_api.ResourceChange() + resource_change["Action"] = action + resource_change["LogicalResourceId"] = logical_id + resource_change["ResourceType"] = type_ + if physical_id: + resource_change["PhysicalResourceId"] = physical_id + if self._include_property_values and before_properties is not None: + before_context_properties = {PropertiesKey: before_properties.properties} + before_context_properties_json_str = json.dumps(before_context_properties) + resource_change["BeforeContext"] = before_context_properties_json_str + if self._include_property_values and after_properties is not None: + after_context_properties = {PropertiesKey: after_properties.properties} + after_context_properties_json_str = json.dumps(after_context_properties) + resource_change["AfterContext"] = after_context_properties_json_str + self._changes.append( + cfn_api.Change(Type=cfn_api.ChangeType.Resource, ResourceChange=resource_change) + ) + + def _describe_resource_change( + self, name: str, before: Optional[PreprocResource], after: Optional[PreprocResource] + ) -> None: + if before == after: + # unchanged: nothing to do. + return + if not is_nothing(before) and not is_nothing(after): + # Case: change on same type. + if before.resource_type == after.resource_type: + # Register a Modified if changed. + self._register_resource_change( + logical_id=name, + physical_id=before.physical_resource_id, + type_=before.resource_type, + before_properties=before.properties, + after_properties=after.properties, + ) + # Case: type migration. + # TODO: Add test to assert that on type change the resources are replaced. + else: + # Register a Removed for the previous type. + self._register_resource_change( + logical_id=name, + physical_id=before.physical_resource_id, + type_=before.resource_type, + before_properties=before.properties, + after_properties=None, + ) + # Register a Create for the next type. + self._register_resource_change( + logical_id=name, + physical_id=None, + type_=after.resource_type, + before_properties=None, + after_properties=after.properties, + ) + elif not is_nothing(before): + # Case: removal + self._register_resource_change( + logical_id=name, + physical_id=before.physical_resource_id, + type_=before.resource_type, + before_properties=before.properties, + after_properties=None, + ) + elif not is_nothing(after): + # Case: addition + self._register_resource_change( + logical_id=name, + physical_id=None, + type_=after.resource_type, + before_properties=None, + after_properties=after.properties, + ) + + def visit_node_resource( + self, node_resource: NodeResource + ) -> PreprocEntityDelta[PreprocResource, PreprocResource]: + delta = super().visit_node_resource(node_resource=node_resource) + after_resource = delta.after + if not is_nothing(after_resource) and after_resource.physical_resource_id is None: + after_resource.physical_resource_id = CHANGESET_KNOWN_AFTER_APPLY + self._describe_resource_change( + name=node_resource.name, before=delta.before, after=delta.after + ) + return delta diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py new file mode 100644 index 0000000000000..d80b7e5ecf067 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_executor.py @@ -0,0 +1,416 @@ +import copy +import logging +import uuid +from dataclasses import dataclass +from typing import Final, Optional + +from localstack.aws.api.cloudformation import ( + ChangeAction, + ResourceStatus, + StackStatus, +) +from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY +from localstack.services.cloudformation.engine.parameters import resolve_ssm_parameter +from localstack.services.cloudformation.engine.v2.change_set_model import ( + NodeDependsOn, + NodeOutput, + NodeParameter, + NodeResource, + is_nothing, +) +from localstack.services.cloudformation.engine.v2.change_set_model_preproc import ( + ChangeSetModelPreproc, + PreprocEntityDelta, + PreprocOutput, + PreprocProperties, + PreprocResource, +) +from localstack.services.cloudformation.resource_provider import ( + Credentials, + OperationStatus, + ProgressEvent, + ResourceProviderExecutor, + ResourceProviderPayload, +) +from localstack.services.cloudformation.v2.entities import ChangeSet + +LOG = logging.getLogger(__name__) + + +@dataclass +class ChangeSetModelExecutorResult: + resources: dict + parameters: dict + outputs: dict + + +class ChangeSetModelExecutor(ChangeSetModelPreproc): + # TODO: add typing for resolved resources and parameters. + resources: Final[dict] + outputs: Final[dict] + resolved_parameters: Final[dict] + + def __init__(self, change_set: ChangeSet): + super().__init__(change_set=change_set) + self.resources = dict() + self.outputs = dict() + self.resolved_parameters = dict() + + # TODO: use a structured type for the return value + def execute(self) -> ChangeSetModelExecutorResult: + self.process() + return ChangeSetModelExecutorResult( + resources=self.resources, parameters=self.resolved_parameters, outputs=self.outputs + ) + + def visit_node_parameter(self, node_parameter: NodeParameter) -> PreprocEntityDelta: + delta = super().visit_node_parameter(node_parameter) + + # handle dynamic references, e.g. references to SSM parameters + # TODO: support more parameter types + parameter_type: str = node_parameter.type_.value + if parameter_type.startswith("AWS::SSM"): + if parameter_type in [ + "AWS::SSM::Parameter::Value", + "AWS::SSM::Parameter::Value", + "AWS::SSM::Parameter::Value", + ]: + delta.after = resolve_ssm_parameter( + account_id=self._change_set.account_id, + region_name=self._change_set.region_name, + stack_parameter_value=delta.after, + ) + else: + raise Exception(f"Unsupported stack parameter type: {parameter_type}") + + self.resolved_parameters[node_parameter.name] = delta.after + return delta + + def _after_deployed_property_value_of( + self, resource_logical_id: str, property_name: str + ) -> str: + after_resolved_resources = self.resources + return self._deployed_property_value_of( + resource_logical_id=resource_logical_id, + property_name=property_name, + resolved_resources=after_resolved_resources, + ) + + def _after_resource_physical_id(self, resource_logical_id: str) -> str: + after_resolved_resources = self.resources + return self._resource_physical_resource_id_from( + logical_resource_id=resource_logical_id, resolved_resources=after_resolved_resources + ) + + def visit_node_depends_on(self, node_depends_on: NodeDependsOn) -> PreprocEntityDelta: + array_identifiers_delta = super().visit_node_depends_on(node_depends_on=node_depends_on) + + # Visit depends_on resources before returning. + depends_on_resource_logical_ids: set[str] = set() + if array_identifiers_delta.before: + depends_on_resource_logical_ids.update(array_identifiers_delta.before) + if array_identifiers_delta.after: + depends_on_resource_logical_ids.update(array_identifiers_delta.after) + for depends_on_resource_logical_id in depends_on_resource_logical_ids: + node_resource = self._get_node_resource_for( + resource_name=depends_on_resource_logical_id, node_template=self._node_template + ) + self.visit_node_resource(node_resource) + + return array_identifiers_delta + + def visit_node_resource( + self, node_resource: NodeResource + ) -> PreprocEntityDelta[PreprocResource, PreprocResource]: + """ + Overrides the default preprocessing for NodeResource objects by annotating the + `after` delta with the physical resource ID, if side effects resulted in an update. + """ + delta = super().visit_node_resource(node_resource=node_resource) + before = delta.before + after = delta.after + + if before != after: + # There are changes for this resource. + self._execute_resource_change(name=node_resource.name, before=before, after=after) + else: + # There are no updates for this resource; iff the resource was previously + # deployed, then the resolved details are copied in the current state for + # references or other downstream operations. + if not is_nothing(before): + before_logical_id = delta.before.logical_id + before_resource = self._before_resolved_resources.get(before_logical_id, dict()) + self.resources[before_logical_id] = before_resource + + # Update the latest version of this resource for downstream references. + if not is_nothing(after): + after_logical_id = after.logical_id + after_physical_id: str = self._after_resource_physical_id( + resource_logical_id=after_logical_id + ) + after.physical_resource_id = after_physical_id + return delta + + def visit_node_output( + self, node_output: NodeOutput + ) -> PreprocEntityDelta[PreprocOutput, PreprocOutput]: + delta = super().visit_node_output(node_output=node_output) + after = delta.after + if is_nothing(after) or (isinstance(after, PreprocOutput) and after.condition is False): + return delta + self.outputs[delta.after.name] = delta.after.value + return delta + + def _execute_resource_change( + self, name: str, before: Optional[PreprocResource], after: Optional[PreprocResource] + ) -> None: + # Changes are to be made about this resource. + # TODO: this logic is a POC and should be revised. + if not is_nothing(before) and not is_nothing(after): + # Case: change on same type. + if before.resource_type == after.resource_type: + # Register a Modified if changed. + # XXX hacky, stick the previous resources' properties into the payload + before_properties = self._merge_before_properties(name, before) + + self._execute_resource_action( + action=ChangeAction.Modify, + logical_resource_id=name, + resource_type=before.resource_type, + before_properties=before_properties, + after_properties=after.properties, + ) + # Case: type migration. + # TODO: Add test to assert that on type change the resources are replaced. + else: + # XXX hacky, stick the previous resources' properties into the payload + before_properties = self._merge_before_properties(name, before) + # Register a Removed for the previous type. + self._execute_resource_action( + action=ChangeAction.Remove, + logical_resource_id=name, + resource_type=before.resource_type, + before_properties=before_properties, + after_properties=None, + ) + # Register a Create for the next type. + self._execute_resource_action( + action=ChangeAction.Add, + logical_resource_id=name, + resource_type=after.resource_type, + before_properties=None, + after_properties=after.properties, + ) + elif not is_nothing(before): + # Case: removal + # XXX hacky, stick the previous resources' properties into the payload + # XXX hacky, stick the previous resources' properties into the payload + before_properties = self._merge_before_properties(name, before) + + self._execute_resource_action( + action=ChangeAction.Remove, + logical_resource_id=name, + resource_type=before.resource_type, + before_properties=before_properties, + after_properties=None, + ) + elif not is_nothing(after): + # Case: addition + self._execute_resource_action( + action=ChangeAction.Add, + logical_resource_id=name, + resource_type=after.resource_type, + before_properties=None, + after_properties=after.properties, + ) + + def _merge_before_properties( + self, name: str, preproc_resource: PreprocResource + ) -> PreprocProperties: + if previous_resource_properties := self._change_set.stack.resolved_resources.get( + name, {} + ).get("Properties"): + return PreprocProperties(properties=previous_resource_properties) + + # XXX fall back to returning the input value + return copy.deepcopy(preproc_resource.properties) + + def _execute_resource_action( + self, + action: ChangeAction, + logical_resource_id: str, + resource_type: str, + before_properties: Optional[PreprocProperties], + after_properties: Optional[PreprocProperties], + ) -> None: + LOG.debug("Executing resource action: %s for resource '%s'", action, logical_resource_id) + resource_provider_executor = ResourceProviderExecutor( + stack_name=self._change_set.stack.stack_name, stack_id=self._change_set.stack.stack_id + ) + payload = self.create_resource_provider_payload( + action=action, + logical_resource_id=logical_resource_id, + resource_type=resource_type, + before_properties=before_properties, + after_properties=after_properties, + ) + resource_provider = resource_provider_executor.try_load_resource_provider(resource_type) + + extra_resource_properties = {} + if resource_provider is not None: + # TODO: stack events + try: + event = resource_provider_executor.deploy_loop( + resource_provider, extra_resource_properties, payload + ) + except Exception as e: + reason = str(e) + LOG.warning( + "Resource provider operation failed: '%s'", + reason, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + stack = self._change_set.stack + stack_status = stack.status + if stack_status == StackStatus.CREATE_IN_PROGRESS: + stack.set_stack_status(StackStatus.CREATE_FAILED, reason=reason) + elif stack_status == StackStatus.UPDATE_IN_PROGRESS: + stack.set_stack_status(StackStatus.UPDATE_FAILED, reason=reason) + # update resource status + stack.set_resource_status( + logical_resource_id=logical_resource_id, + # TODO, + physical_resource_id="", + resource_type=resource_type, + status=ResourceStatus.CREATE_FAILED + if action == ChangeAction.Add + else ResourceStatus.UPDATE_FAILED, + resource_status_reason=reason, + ) + return + else: + event = ProgressEvent(OperationStatus.SUCCESS, resource_model={}) + + self.resources.setdefault(logical_resource_id, {"Properties": {}}) + match event.status: + case OperationStatus.SUCCESS: + # merge the resources state with the external state + # TODO: this is likely a duplicate of updating from extra_resource_properties + + # TODO: add typing + # TODO: avoid the use of string literals for sampling from the object, use typed classes instead + # TODO: avoid sampling from resources and use tmp var reference + # TODO: add utils functions to abstract this logic away (resource.update(..)) + # TODO: avoid the use of setdefault (debuggability/readability) + # TODO: review the use of merge + + self.resources[logical_resource_id]["Properties"].update(event.resource_model) + self.resources[logical_resource_id].update(extra_resource_properties) + # XXX for legacy delete_stack compatibility + self.resources[logical_resource_id]["LogicalResourceId"] = logical_resource_id + self.resources[logical_resource_id]["Type"] = resource_type + + # TODO: review why the physical id is returned as None during updates + # TODO: abstract this in member function of resource classes instead + physical_resource_id = None + try: + physical_resource_id = self._after_resource_physical_id(logical_resource_id) + except RuntimeError: + # The physical id is missing or is set to None, which is invalid. + pass + if physical_resource_id is None: + # The physical resource id is None after an update that didn't rewrite the resource, the previous + # resource id is therefore the current physical id of this resource. + physical_resource_id = self._before_resource_physical_id(logical_resource_id) + self.resources[logical_resource_id]["PhysicalResourceId"] = physical_resource_id + + self._change_set.stack.set_resource_status( + logical_resource_id=logical_resource_id, + physical_resource_id=physical_resource_id, + resource_type=resource_type, + status=ResourceStatus.CREATE_COMPLETE + if action == ChangeAction.Add + else ResourceStatus.UPDATE_COMPLETE, + ) + + case OperationStatus.FAILED: + reason = event.message + LOG.warning( + "Resource provider operation failed: '%s'", + reason, + ) + # TODO: duplication + stack = self._change_set.stack + stack_status = stack.status + if stack_status == StackStatus.CREATE_IN_PROGRESS: + stack.set_stack_status(StackStatus.CREATE_FAILED, reason=reason) + elif stack_status == StackStatus.UPDATE_IN_PROGRESS: + stack.set_stack_status(StackStatus.UPDATE_FAILED, reason=reason) + else: + raise NotImplementedError(f"Unhandled stack status: '{stack.status}'") + stack.set_resource_status( + logical_resource_id=logical_resource_id, + # TODO + physical_resource_id="", + resource_type=resource_type, + status=ResourceStatus.CREATE_FAILED + if action == ChangeAction.Add + else ResourceStatus.UPDATE_FAILED, + resource_status_reason=reason, + ) + case any: + raise NotImplementedError(f"Event status '{any}' not handled") + + def create_resource_provider_payload( + self, + action: ChangeAction, + logical_resource_id: str, + resource_type: str, + before_properties: Optional[PreprocProperties], + after_properties: Optional[PreprocProperties], + ) -> Optional[ResourceProviderPayload]: + # FIXME: use proper credentials + creds: Credentials = { + "accessKeyId": self._change_set.stack.account_id, + "secretAccessKey": INTERNAL_AWS_SECRET_ACCESS_KEY, + "sessionToken": "", + } + before_properties_value = before_properties.properties if before_properties else None + after_properties_value = after_properties.properties if after_properties else None + + match action: + case ChangeAction.Add: + resource_properties = after_properties_value or {} + previous_resource_properties = None + case ChangeAction.Modify | ChangeAction.Dynamic: + resource_properties = after_properties_value or {} + previous_resource_properties = before_properties_value or {} + case ChangeAction.Remove: + resource_properties = before_properties_value or {} + previous_resource_properties = None + case _: + raise NotImplementedError(f"Action '{action}' not handled") + + resource_provider_payload: ResourceProviderPayload = { + "awsAccountId": self._change_set.stack.account_id, + "callbackContext": {}, + "stackId": self._change_set.stack.stack_name, + "resourceType": resource_type, + "resourceTypeVersion": "000000", + # TODO: not actually a UUID + "bearerToken": str(uuid.uuid4()), + "region": self._change_set.stack.region_name, + "action": str(action), + "requestData": { + "logicalResourceId": logical_resource_id, + "resourceProperties": resource_properties, + "previousResourceProperties": previous_resource_properties, + "callerCredentials": creds, + "providerCredentials": creds, + "systemTags": {}, + "previousSystemTags": {}, + "stackTags": {}, + "previousStackTags": {}, + }, + } + return resource_provider_payload diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py new file mode 100644 index 0000000000000..5fc274f0e5107 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_preproc.py @@ -0,0 +1,1148 @@ +from __future__ import annotations + +import base64 +import re +from typing import Any, Final, Generic, Optional, TypeVar + +from botocore.exceptions import ClientError + +from localstack.aws.api.ec2 import AvailabilityZoneList, DescribeAvailabilityZonesResult +from localstack.aws.connect import connect_to +from localstack.services.cloudformation.engine.transformers import ( + Transformer, + execute_macro, + transformers, +) +from localstack.services.cloudformation.engine.v2.change_set_model import ( + ChangeSetEntity, + ChangeType, + Maybe, + NodeArray, + NodeCondition, + NodeDependsOn, + NodeDivergence, + NodeIntrinsicFunction, + NodeMapping, + NodeObject, + NodeOutput, + NodeOutputs, + NodeParameter, + NodeProperties, + NodeProperty, + NodeResource, + NodeTemplate, + Nothing, + Scope, + TerminalValue, + TerminalValueCreated, + TerminalValueModified, + TerminalValueRemoved, + TerminalValueUnchanged, + is_nothing, +) +from localstack.services.cloudformation.engine.v2.change_set_model_visitor import ( + ChangeSetModelVisitor, +) +from localstack.services.cloudformation.stores import get_cloudformation_store +from localstack.services.cloudformation.v2.entities import ChangeSet +from localstack.utils.aws.arns import get_partition +from localstack.utils.run import to_str +from localstack.utils.strings import to_bytes +from localstack.utils.urls import localstack_host + +_AWS_URL_SUFFIX = localstack_host().host # The value in AWS is "amazonaws.com" + +_PSEUDO_PARAMETERS: Final[set[str]] = { + "AWS::Partition", + "AWS::AccountId", + "AWS::Region", + "AWS::StackName", + "AWS::StackId", + "AWS::URLSuffix", + "AWS::NoValue", + "AWS::NotificationARNs", +} + +TBefore = TypeVar("TBefore") +TAfter = TypeVar("TAfter") + + +class PreprocEntityDelta(Generic[TBefore, TAfter]): + before: Maybe[TBefore] + after: Maybe[TAfter] + + def __init__(self, before: Maybe[TBefore] = Nothing, after: Maybe[TAfter] = Nothing): + self.before = before + self.after = after + + def __eq__(self, other): + if not isinstance(other, PreprocEntityDelta): + return False + return self.before == other.before and self.after == other.after + + +class PreprocProperties: + properties: dict[str, Any] + + def __init__(self, properties: dict[str, Any]): + self.properties = properties + + def __eq__(self, other): + if not isinstance(other, PreprocProperties): + return False + return self.properties == other.properties + + +class PreprocResource: + logical_id: str + physical_resource_id: Optional[str] + condition: Optional[bool] + resource_type: str + properties: PreprocProperties + depends_on: Optional[list[str]] + + def __init__( + self, + logical_id: str, + physical_resource_id: str, + condition: Optional[bool], + resource_type: str, + properties: PreprocProperties, + depends_on: Optional[list[str]], + ): + self.logical_id = logical_id + self.physical_resource_id = physical_resource_id + self.condition = condition + self.resource_type = resource_type + self.properties = properties + self.depends_on = depends_on + + @staticmethod + def _compare_conditions(c1: bool, c2: bool): + # The lack of condition equates to a true condition. + c1 = c1 if isinstance(c1, bool) else True + c2 = c2 if isinstance(c2, bool) else True + return c1 == c2 + + def __eq__(self, other): + if not isinstance(other, PreprocResource): + return False + return all( + [ + self.logical_id == other.logical_id, + self._compare_conditions(self.condition, other.condition), + self.resource_type == other.resource_type, + self.properties == other.properties, + ] + ) + + +class PreprocOutput: + name: str + value: Any + export: Optional[Any] + condition: Optional[bool] + + def __init__(self, name: str, value: Any, export: Optional[Any], condition: Optional[bool]): + self.name = name + self.value = value + self.export = export + self.condition = condition + + def __eq__(self, other): + if not isinstance(other, PreprocOutput): + return False + return all( + [ + self.name == other.name, + self.value == other.value, + self.export == other.export, + self.condition == other.condition, + ] + ) + + +class ChangeSetModelPreproc(ChangeSetModelVisitor): + _change_set: Final[ChangeSet] + _node_template: Final[NodeTemplate] + _before_resolved_resources: Final[dict] + _processed: dict[Scope, Any] + + def __init__(self, change_set: ChangeSet): + self._change_set = change_set + self._node_template = change_set.update_graph + self._before_resolved_resources = change_set.stack.resolved_resources + self._processed = dict() + + def process(self) -> None: + self._processed.clear() + self.visit(self._node_template) + + def _get_node_resource_for( + self, resource_name: str, node_template: NodeTemplate + ) -> NodeResource: + # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists. + for node_resource in node_template.resources.resources: + if node_resource.name == resource_name: + self.visit(node_resource) + return node_resource + raise RuntimeError(f"No resource '{resource_name}' was found") + + def _get_node_property_for( + self, property_name: str, node_resource: NodeResource + ) -> Optional[NodeProperty]: + # TODO: this could be improved with hashmap lookups if the Node contained bindings and not lists. + for node_property in node_resource.properties.properties: + if node_property.name == property_name: + self.visit(node_property) + return node_property + return None + + def _deployed_property_value_of( + self, resource_logical_id: str, property_name: str, resolved_resources: dict + ) -> Any: + # TODO: typing around resolved resources is needed and should be reflected here. + + # Before we can obtain deployed value for a resource, we need to first ensure to + # process the resource if this wasn't processed already. Ideally, values should only + # be accessible through delta objects, to ensure computation is always complete at + # every level. + _ = self._get_node_resource_for( + resource_name=resource_logical_id, node_template=self._node_template + ) + resolved_resource = resolved_resources.get(resource_logical_id) + if resolved_resource is None: + raise RuntimeError( + f"No deployed instances of resource '{resource_logical_id}' were found" + ) + properties = resolved_resource.get("Properties", dict()) + property_value: Optional[Any] = properties.get(property_name) + if property_value is None: + raise RuntimeError( + f"No '{property_name}' found for deployed resource '{resource_logical_id}' was found" + ) + return property_value + + def _before_deployed_property_value_of( + self, resource_logical_id: str, property_name: str + ) -> Any: + return self._deployed_property_value_of( + resource_logical_id=resource_logical_id, + property_name=property_name, + resolved_resources=self._before_resolved_resources, + ) + + def _after_deployed_property_value_of( + self, resource_logical_id: str, property_name: str + ) -> Optional[str]: + return self._before_deployed_property_value_of( + resource_logical_id=resource_logical_id, property_name=property_name + ) + + def _get_node_mapping(self, map_name: str) -> NodeMapping: + mappings: list[NodeMapping] = self._node_template.mappings.mappings + # TODO: another scenarios suggesting property lookups might be preferable. + for mapping in mappings: + if mapping.name == map_name: + self.visit(mapping) + return mapping + raise RuntimeError(f"Undefined '{map_name}' mapping") + + def _get_node_parameter_if_exists(self, parameter_name: str) -> Maybe[NodeParameter]: + parameters: list[NodeParameter] = self._node_template.parameters.parameters + # TODO: another scenarios suggesting property lookups might be preferable. + for parameter in parameters: + if parameter.name == parameter_name: + self.visit(parameter) + return parameter + return Nothing + + def _get_node_condition_if_exists(self, condition_name: str) -> Maybe[NodeCondition]: + conditions: list[NodeCondition] = self._node_template.conditions.conditions + # TODO: another scenarios suggesting property lookups might be preferable. + for condition in conditions: + if condition.name == condition_name: + self.visit(condition) + return condition + return Nothing + + def _resolve_condition(self, logical_id: str) -> PreprocEntityDelta: + node_condition = self._get_node_condition_if_exists(condition_name=logical_id) + if isinstance(node_condition, NodeCondition): + condition_delta = self.visit(node_condition) + return condition_delta + raise RuntimeError(f"No condition '{logical_id}' was found.") + + def _resolve_pseudo_parameter(self, pseudo_parameter_name: str) -> Any: + match pseudo_parameter_name: + case "AWS::Partition": + return get_partition(self._change_set.region_name) + case "AWS::AccountId": + return self._change_set.stack.account_id + case "AWS::Region": + return self._change_set.stack.region_name + case "AWS::StackName": + return self._change_set.stack.stack_name + case "AWS::StackId": + return self._change_set.stack.stack_id + case "AWS::URLSuffix": + return _AWS_URL_SUFFIX + case "AWS::NoValue": + return None + case _: + raise RuntimeError(f"The use of '{pseudo_parameter_name}' is currently unsupported") + + def _resolve_reference(self, logical_id: str) -> PreprocEntityDelta: + if logical_id in _PSEUDO_PARAMETERS: + pseudo_parameter_value = self._resolve_pseudo_parameter( + pseudo_parameter_name=logical_id + ) + # Pseudo parameters are constants within the lifecycle of a template. + return PreprocEntityDelta(before=pseudo_parameter_value, after=pseudo_parameter_value) + + node_parameter = self._get_node_parameter_if_exists(parameter_name=logical_id) + if isinstance(node_parameter, NodeParameter): + parameter_delta = self.visit(node_parameter) + return parameter_delta + + node_resource = self._get_node_resource_for( + resource_name=logical_id, node_template=self._node_template + ) + resource_delta = self.visit(node_resource) + before = resource_delta.before + after = resource_delta.after + return PreprocEntityDelta(before=before, after=after) + + def _resolve_mapping( + self, map_name: str, top_level_key: str, second_level_key + ) -> PreprocEntityDelta: + # TODO: add support for nested intrinsic functions, and KNOWN AFTER APPLY logical ids. + node_mapping: NodeMapping = self._get_node_mapping(map_name=map_name) + top_level_value = node_mapping.bindings.bindings.get(top_level_key) + if not isinstance(top_level_value, NodeObject): + raise RuntimeError() + second_level_value = top_level_value.bindings.get(second_level_key) + mapping_value_delta = self.visit(second_level_value) + return mapping_value_delta + + def visit(self, change_set_entity: ChangeSetEntity) -> PreprocEntityDelta: + scope = change_set_entity.scope + if scope in self._processed: + delta = self._processed[scope] + return delta + delta = super().visit(change_set_entity=change_set_entity) + self._processed[scope] = delta + return delta + + def visit_terminal_value_modified( + self, terminal_value_modified: TerminalValueModified + ) -> PreprocEntityDelta: + return PreprocEntityDelta( + before=terminal_value_modified.value, + after=terminal_value_modified.modified_value, + ) + + def visit_terminal_value_created( + self, terminal_value_created: TerminalValueCreated + ) -> PreprocEntityDelta: + return PreprocEntityDelta(after=terminal_value_created.value) + + def visit_terminal_value_removed( + self, terminal_value_removed: TerminalValueRemoved + ) -> PreprocEntityDelta: + return PreprocEntityDelta(before=terminal_value_removed.value) + + def visit_terminal_value_unchanged( + self, terminal_value_unchanged: TerminalValueUnchanged + ) -> PreprocEntityDelta: + return PreprocEntityDelta( + before=terminal_value_unchanged.value, + after=terminal_value_unchanged.value, + ) + + def visit_node_divergence(self, node_divergence: NodeDivergence) -> PreprocEntityDelta: + before_delta = self.visit(node_divergence.value) + after_delta = self.visit(node_divergence.divergence) + return PreprocEntityDelta(before=before_delta.before, after=after_delta.after) + + def visit_node_object(self, node_object: NodeObject) -> PreprocEntityDelta: + node_change_type = node_object.change_type + before = dict() if node_change_type != ChangeType.CREATED else Nothing + after = dict() if node_change_type != ChangeType.REMOVED else Nothing + for name, change_set_entity in node_object.bindings.items(): + delta: PreprocEntityDelta = self.visit(change_set_entity=change_set_entity) + delta_before = delta.before + delta_after = delta.after + if not is_nothing(before) and not is_nothing(delta_before) and delta_before is not None: + before[name] = delta_before + if not is_nothing(after) and not is_nothing(delta_after) and delta_after is not None: + after[name] = delta_after + return PreprocEntityDelta(before=before, after=after) + + def _resolve_attribute(self, arguments: str | list[str], select_before: bool) -> str: + # TODO: add arguments validation. + arguments_list: list[str] + if isinstance(arguments, str): + arguments_list = arguments.split(".") + else: + arguments_list = arguments + logical_name_of_resource = arguments_list[0] + attribute_name = arguments_list[1] + + node_resource = self._get_node_resource_for( + resource_name=logical_name_of_resource, node_template=self._node_template + ) + node_property: Optional[NodeProperty] = self._get_node_property_for( + property_name=attribute_name, node_resource=node_resource + ) + if node_property is not None: + # The property is statically defined in the template and its value can be computed. + property_delta = self.visit(node_property) + value = property_delta.before if select_before else property_delta.after + else: + # The property is not statically defined and must therefore be available in + # the properties deployed set. + if select_before: + value = self._before_deployed_property_value_of( + resource_logical_id=logical_name_of_resource, + property_name=attribute_name, + ) + else: + value = self._after_deployed_property_value_of( + resource_logical_id=logical_name_of_resource, + property_name=attribute_name, + ) + return value + + def visit_node_intrinsic_function_fn_get_att( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + # TODO: validate the return value according to the spec. + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_arguments: Maybe[str | list[str]] = arguments_delta.before + after_arguments: Maybe[str | list[str]] = arguments_delta.after + + before = Nothing + if not is_nothing(before_arguments): + before = self._resolve_attribute(arguments=before_arguments, select_before=True) + + after = Nothing + if not is_nothing(after_arguments): + after = self._resolve_attribute(arguments=after_arguments, select_before=False) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_equals( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_values = arguments_delta.before + after_values = arguments_delta.after + before = Nothing + if before_values: + before = before_values[0] == before_values[1] + after = Nothing + if after_values: + after = after_values[0] == after_values[1] + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_if( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_delta_for_if_statement(args: list[Any]) -> PreprocEntityDelta: + condition_name = args[0] + boolean_expression_delta = self._resolve_condition(logical_id=condition_name) + return PreprocEntityDelta( + before=args[1] if boolean_expression_delta.before else args[2], + after=args[1] if boolean_expression_delta.after else args[2], + ) + + # TODO: add support for this being created or removed. + before = Nothing + if not is_nothing(arguments_before): + before_outcome_delta = _compute_delta_for_if_statement(arguments_before) + before = before_outcome_delta.before + after = Nothing + if not is_nothing(arguments_after): + after_outcome_delta = _compute_delta_for_if_statement(arguments_after) + after = after_outcome_delta.after + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_and( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_and(args: list[bool]): + result = all(args) + return result + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_and(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_and(arguments_after) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_or( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_and(args: list[bool]): + result = any(args) + return result + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_and(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_and(arguments_after) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_not( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_condition = arguments_delta.before + after_condition = arguments_delta.after + before = Nothing + if not is_nothing(before_condition): + before_condition_outcome = before_condition[0] + before = not before_condition_outcome + after = Nothing + if not is_nothing(after_condition): + after_condition_outcome = after_condition[0] + after = not after_condition_outcome + # Implicit change type computation. + return PreprocEntityDelta(before=before, after=after) + + def _compute_fn_transform(self, args: dict[str, Any]) -> Any: + # TODO: add typing to arguments before this level. + # TODO: add schema validation + # TODO: add support for other transform types + + account_id = self._change_set.account_id + region_name = self._change_set.region_name + transform_name: str = args.get("Name") + if not isinstance(transform_name, str): + raise RuntimeError("Invalid or missing Fn::Transform 'Name' argument") + transform_parameters: dict = args.get("Parameters") + if not isinstance(transform_parameters, dict): + raise RuntimeError("Invalid or missing Fn::Transform 'Parameters' argument") + + if transform_name in transformers: + # TODO: port and refactor this 'transformers' logic to this package. + builtin_transformer_class = transformers[transform_name] + builtin_transformer: Transformer = builtin_transformer_class() + transform_output: Any = builtin_transformer.transform( + account_id=account_id, region_name=region_name, parameters=transform_parameters + ) + return transform_output + + macros_store = get_cloudformation_store( + account_id=account_id, region_name=region_name + ).macros + if transform_name in macros_store: + # TODO: this formatting of stack parameters is odd but required to integrate with v1 execute_macro util. + # consider porting this utils and passing the plain list of parameters instead. + stack_parameters = { + parameter["ParameterKey"]: parameter + for parameter in self._change_set.stack.parameters + } + transform_output: Any = execute_macro( + account_id=account_id, + region_name=region_name, + parsed_template=dict(), # TODO: review the requirements for this argument. + macro=args, # TODO: review support for non dict bindings (v1). + stack_parameters=stack_parameters, + transformation_parameters=transform_parameters, + is_intrinsic=True, + ) + return transform_output + + raise RuntimeError( + f"Unsupported transform function '{transform_name}' in '{self._change_set.stack.stack_name}'" + ) + + def visit_node_intrinsic_function_fn_transform( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + # TODO: review the use of cache in self.precessed from the 'before' run to + # ensure changes to the lambda (such as after UpdateFunctionCode) do not + # generalise tot he before value at this depth (thus making it seems as + # though for this transformation before==after). Another options may be to + # have specialised caching for transformations. + + # TODO: add tests to review the behaviour of CFN with changes to transformation + # function code and no changes to the template. + + before = Nothing + if not is_nothing(arguments_before): + before = self._compute_fn_transform(args=arguments_before) + after = Nothing + if not is_nothing(arguments_after): + after = self._compute_fn_transform(args=arguments_after) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_sub( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_sub(args: str | list[Any], select_before: bool) -> str: + # TODO: add further schema validation. + string_template: str + sub_parameters: dict + if isinstance(args, str): + string_template = args + sub_parameters = dict() + elif ( + isinstance(args, list) + and len(args) == 2 + and isinstance(args[0], str) + and isinstance(args[1], dict) + ): + string_template = args[0] + sub_parameters = args[1] + else: + raise RuntimeError( + "Invalid arguments shape for Fn::Sub, expected a String " + f"or a Tuple of String and Map but got '{args}'" + ) + sub_string = string_template + template_variable_names = re.findall("\\${([^}]+)}", string_template) + for template_variable_name in template_variable_names: + template_variable_value = Nothing + + # Try to resolve the variable name as pseudo parameter. + if template_variable_name in _PSEUDO_PARAMETERS: + template_variable_value = self._resolve_pseudo_parameter( + pseudo_parameter_name=template_variable_name + ) + + # Try to resolve the variable name as an entry to the defined parameters. + elif template_variable_name in sub_parameters: + template_variable_value = sub_parameters[template_variable_name] + + # Try to resolve the variable name as GetAtt. + elif "." in template_variable_name: + try: + template_variable_value = self._resolve_attribute( + arguments=template_variable_name, select_before=select_before + ) + except RuntimeError: + pass + + # Try to resolve the variable name as Ref. + else: + try: + resource_delta = self._resolve_reference(logical_id=template_variable_name) + template_variable_value = ( + resource_delta.before if select_before else resource_delta.after + ) + if isinstance(template_variable_value, PreprocResource): + template_variable_value = template_variable_value.physical_resource_id + except RuntimeError: + pass + + if is_nothing(template_variable_value): + raise RuntimeError( + f"Undefined variable name in Fn::Sub string template '{template_variable_name}'" + ) + + if not isinstance(template_variable_value, str): + template_variable_value = str(template_variable_value) + + sub_string = sub_string.replace( + f"${{{template_variable_name}}}", template_variable_value + ) + + # FIXME: the following type reduction is ported from v1; however it appears as though such + # reduction is not performed by the engine, and certainly not at this depth given the + # lack of context. This section should be removed with Fn::Sub always retuning a string + # and the resource providers reviewed. + account_id = self._change_set.account_id + is_another_account_id = sub_string.isdigit() and len(sub_string) == len(account_id) + if sub_string == account_id or is_another_account_id: + result = sub_string + elif sub_string.isdigit(): + result = int(sub_string) + else: + try: + result = float(sub_string) + except ValueError: + result = sub_string + return result + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_sub(args=arguments_before, select_before=True) + after = Nothing + if not is_nothing(arguments_after): + after = _compute_sub(args=arguments_after, select_before=False) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_join( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_join(args: list[Any]) -> str: + # TODO: add support for schema validation. + # TODO: add tests for joining non string values. + delimiter: str = str(args[0]) + values: list[Any] = args[1] + if not isinstance(values, list): + # shortcut if values is the empty string, for example: + # {"Fn::Join": ["", {"Ref": }]} + # CDK bootstrap does this + if values == "": + return "" + raise RuntimeError(f"Invalid arguments list definition for Fn::Join: '{args}'") + str_values: list[str] = list() + for value in values: + if value is None: + continue + str_value = str(value) + str_values.append(str_value) + join_result = delimiter.join(str_values) + return join_result + + before = Nothing + if isinstance(arguments_before, list) and len(arguments_before) == 2: + before = _compute_join(arguments_before) + after = Nothing + if isinstance(arguments_after, list) and len(arguments_after) == 2: + after = _compute_join(arguments_after) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_select( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + # TODO: add further support for schema validation + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_select(args: list[Any]) -> Any: + values: list[Any] = args[1] + if not isinstance(values, list) or not values: + raise RuntimeError(f"Invalid arguments list value for Fn::Select: '{values}'") + values_len = len(values) + index: int = int(args[0]) + if not isinstance(index, int) or index < 0 or index > values_len: + raise RuntimeError(f"Invalid or out of range index value for Fn::Select: '{index}'") + selection = values[index] + return selection + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_select(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_select(arguments_after) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_split( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + # TODO: add further support for schema validation + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_split(args: list[Any]) -> Any: + delimiter = args[0] + if not isinstance(delimiter, str) or not delimiter: + raise RuntimeError(f"Invalid delimiter value for Fn::Split: '{delimiter}'") + source_string = args[1] + if not isinstance(source_string, str): + raise RuntimeError(f"Invalid source string value for Fn::Split: '{source_string}'") + split_string = source_string.split(delimiter) + return split_string + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_split(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_split(arguments_after) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_get_a_zs( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + # TODO: add further support for schema validation + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_get_a_zs(region) -> Any: + if not isinstance(region, str): + raise RuntimeError(f"Invalid region value for Fn::GetAZs: '{region}'") + + if not region: + region = self._change_set.region_name + + account_id = self._change_set.account_id + ec2_client = connect_to(aws_access_key_id=account_id, region_name=region).ec2 + try: + get_availability_zones_result: DescribeAvailabilityZonesResult = ( + ec2_client.describe_availability_zones() + ) + except ClientError: + raise RuntimeError( + "Could not describe zones availability whilst evaluating Fn::GetAZs" + ) + availability_zones: AvailabilityZoneList = get_availability_zones_result[ + "AvailabilityZones" + ] + azs = [az["ZoneName"] for az in availability_zones] + return azs + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_get_a_zs(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_get_a_zs(arguments_after) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_base64( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + # TODO: add further support for schema validation + arguments_delta = self.visit(node_intrinsic_function.arguments) + arguments_before = arguments_delta.before + arguments_after = arguments_delta.after + + def _compute_fn_base_64(string) -> Any: + if not isinstance(string, str): + raise RuntimeError(f"Invalid valueToEncode for Fn::Base64: '{string}'") + # Ported from v1: + base64_string = to_str(base64.b64encode(to_bytes(string))) + return base64_string + + before = Nothing + if not is_nothing(arguments_before): + before = _compute_fn_base_64(arguments_before) + + after = Nothing + if not is_nothing(arguments_after): + after = _compute_fn_base_64(arguments_after) + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_fn_find_in_map( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + # TODO: add type checking/validation for result unit? + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_arguments = arguments_delta.before + after_arguments = arguments_delta.after + before = Nothing + if before_arguments: + before_value_delta = self._resolve_mapping(*before_arguments) + before = before_value_delta.before + after = Nothing + if after_arguments: + after_value_delta = self._resolve_mapping(*after_arguments) + after = after_value_delta.after + return PreprocEntityDelta(before=before, after=after) + + def visit_node_mapping(self, node_mapping: NodeMapping) -> PreprocEntityDelta: + bindings_delta = self.visit(node_mapping.bindings) + return bindings_delta + + def visit_node_parameter(self, node_parameter: NodeParameter) -> PreprocEntityDelta: + dynamic_value = node_parameter.dynamic_value + dynamic_delta = self.visit(dynamic_value) + + default_value = node_parameter.default_value + default_delta = self.visit(default_value) + + before = dynamic_delta.before or default_delta.before + after = dynamic_delta.after or default_delta.after + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_depends_on(self, node_depends_on: NodeDependsOn) -> PreprocEntityDelta: + array_identifiers_delta = self.visit(node_depends_on.depends_on) + return array_identifiers_delta + + def visit_node_condition(self, node_condition: NodeCondition) -> PreprocEntityDelta: + delta = self.visit(node_condition.body) + return delta + + def _resource_physical_resource_id_from( + self, logical_resource_id: str, resolved_resources: dict + ) -> str: + # TODO: typing around resolved resources is needed and should be reflected here. + resolved_resource = resolved_resources.get(logical_resource_id, dict()) + physical_resource_id: Optional[str] = resolved_resource.get("PhysicalResourceId") + if not isinstance(physical_resource_id, str): + raise RuntimeError(f"No PhysicalResourceId found for resource '{logical_resource_id}'") + return physical_resource_id + + def _before_resource_physical_id(self, resource_logical_id: str) -> str: + # TODO: typing around resolved resources is needed and should be reflected here. + return self._resource_physical_resource_id_from( + logical_resource_id=resource_logical_id, + resolved_resources=self._before_resolved_resources, + ) + + def _after_resource_physical_id(self, resource_logical_id: str) -> str: + return self._before_resource_physical_id(resource_logical_id=resource_logical_id) + + def visit_node_intrinsic_function_ref( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_logical_id = arguments_delta.before + after_logical_id = arguments_delta.after + + # TODO: extend this to support references to other types. + before = Nothing + if not is_nothing(before_logical_id): + before_delta = self._resolve_reference(logical_id=before_logical_id) + before = before_delta.before + if isinstance(before, PreprocResource): + before = before.physical_resource_id + + after = Nothing + if not is_nothing(after_logical_id): + after_delta = self._resolve_reference(logical_id=after_logical_id) + after = after_delta.after + if isinstance(after, PreprocResource): + after = after.physical_resource_id + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_intrinsic_function_condition( + self, node_intrinsic_function: NodeIntrinsicFunction + ) -> PreprocEntityDelta: + arguments_delta = self.visit(node_intrinsic_function.arguments) + before_condition_name = arguments_delta.before + after_condition_name = arguments_delta.after + + def _delta_of_condition(name: str) -> PreprocEntityDelta: + node_condition = self._get_node_condition_if_exists(condition_name=name) + if is_nothing(node_condition): + raise RuntimeError(f"Undefined condition '{name}'") + delta = self.visit(node_condition) + return delta + + before = Nothing + if not is_nothing(before_condition_name): + before_delta = _delta_of_condition(before_condition_name) + before = before_delta.before + + after = Nothing + if not is_nothing(after_condition_name): + after_delta = _delta_of_condition(after_condition_name) + after = after_delta.after + + return PreprocEntityDelta(before=before, after=after) + + def visit_node_array(self, node_array: NodeArray) -> PreprocEntityDelta: + node_change_type = node_array.change_type + before = list() if node_change_type != ChangeType.CREATED else Nothing + after = list() if node_change_type != ChangeType.REMOVED else Nothing + for change_set_entity in node_array.array: + delta: PreprocEntityDelta = self.visit(change_set_entity=change_set_entity) + delta_before = delta.before + delta_after = delta.after + if not is_nothing(before) and not is_nothing(delta_before): + before.append(delta_before) + if not is_nothing(after) and not is_nothing(delta_after): + after.append(delta_after) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_property(self, node_property: NodeProperty) -> PreprocEntityDelta: + return self.visit(node_property.value) + + def visit_node_properties( + self, node_properties: NodeProperties + ) -> PreprocEntityDelta[PreprocProperties, PreprocProperties]: + node_change_type = node_properties.change_type + before_bindings = dict() if node_change_type != ChangeType.CREATED else Nothing + after_bindings = dict() if node_change_type != ChangeType.REMOVED else Nothing + for node_property in node_properties.properties: + property_name = node_property.name + delta = self.visit(node_property) + delta_before = delta.before + delta_after = delta.after + if ( + not is_nothing(before_bindings) + and not is_nothing(delta_before) + and delta_before is not None + ): + before_bindings[property_name] = delta_before + if ( + not is_nothing(after_bindings) + and not is_nothing(delta_after) + and delta_after is not None + ): + after_bindings[property_name] = delta_after + before = Nothing + if not is_nothing(before_bindings): + before = PreprocProperties(properties=before_bindings) + after = Nothing + if not is_nothing(after_bindings): + after = PreprocProperties(properties=after_bindings) + return PreprocEntityDelta(before=before, after=after) + + def _resolve_resource_condition_reference(self, reference: TerminalValue) -> PreprocEntityDelta: + reference_delta = self.visit(reference) + before_reference = reference_delta.before + before = Nothing + if isinstance(before_reference, str): + before_delta = self._resolve_condition(logical_id=before_reference) + before = before_delta.before + after = Nothing + after_reference = reference_delta.after + if isinstance(after_reference, str): + after_delta = self._resolve_condition(logical_id=after_reference) + after = after_delta.after + return PreprocEntityDelta(before=before, after=after) + + def visit_node_resource( + self, node_resource: NodeResource + ) -> PreprocEntityDelta[PreprocResource, PreprocResource]: + change_type = node_resource.change_type + condition_before = Nothing + condition_after = Nothing + if not is_nothing(node_resource.condition_reference): + condition_delta = self._resolve_resource_condition_reference( + node_resource.condition_reference + ) + condition_before = condition_delta.before + condition_after = condition_delta.after + + depends_on_before = Nothing + depends_on_after = Nothing + if not is_nothing(node_resource.depends_on): + depends_on_delta = self.visit(node_resource.depends_on) + depends_on_before = depends_on_delta.before + depends_on_after = depends_on_delta.after + + type_delta = self.visit(node_resource.type_) + properties_delta: PreprocEntityDelta[PreprocProperties, PreprocProperties] = self.visit( + node_resource.properties + ) + + before = Nothing + after = Nothing + if change_type != ChangeType.CREATED and is_nothing(condition_before) or condition_before: + logical_resource_id = node_resource.name + before_physical_resource_id = self._before_resource_physical_id( + resource_logical_id=logical_resource_id + ) + before = PreprocResource( + logical_id=logical_resource_id, + physical_resource_id=before_physical_resource_id, + condition=condition_before, + resource_type=type_delta.before, + properties=properties_delta.before, + depends_on=depends_on_before, + ) + if change_type != ChangeType.REMOVED and is_nothing(condition_after) or condition_after: + logical_resource_id = node_resource.name + try: + after_physical_resource_id = self._after_resource_physical_id( + resource_logical_id=logical_resource_id + ) + except RuntimeError: + after_physical_resource_id = None + after = PreprocResource( + logical_id=logical_resource_id, + physical_resource_id=after_physical_resource_id, + condition=condition_after, + resource_type=type_delta.after, + properties=properties_delta.after, + depends_on=depends_on_after, + ) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_output( + self, node_output: NodeOutput + ) -> PreprocEntityDelta[PreprocOutput, PreprocOutput]: + change_type = node_output.change_type + value_delta = self.visit(node_output.value) + + condition_delta = Nothing + if not is_nothing(node_output.condition_reference): + condition_delta = self._resolve_resource_condition_reference( + node_output.condition_reference + ) + condition_before = condition_delta.before + condition_after = condition_delta.after + if not condition_before and condition_after: + change_type = ChangeType.CREATED + elif condition_before and not condition_after: + change_type = ChangeType.REMOVED + + export_delta = Nothing + if not is_nothing(node_output.export): + export_delta = self.visit(node_output.export) + + before: Maybe[PreprocOutput] = Nothing + if change_type != ChangeType.CREATED: + before = PreprocOutput( + name=node_output.name, + value=value_delta.before, + export=export_delta.before if export_delta else None, + condition=condition_delta.before if condition_delta else None, + ) + after: Maybe[PreprocOutput] = Nothing + if change_type != ChangeType.REMOVED: + after = PreprocOutput( + name=node_output.name, + value=value_delta.after, + export=export_delta.after if export_delta else None, + condition=condition_delta.after if condition_delta else None, + ) + return PreprocEntityDelta(before=before, after=after) + + def visit_node_outputs( + self, node_outputs: NodeOutputs + ) -> PreprocEntityDelta[list[PreprocOutput], list[PreprocOutput]]: + before: list[PreprocOutput] = list() + after: list[PreprocOutput] = list() + for node_output in node_outputs.outputs: + output_delta: PreprocEntityDelta[PreprocOutput, PreprocOutput] = self.visit(node_output) + output_before = output_delta.before + output_after = output_delta.after + if not is_nothing(output_before): + before.append(output_before) + if not is_nothing(output_after): + after.append(output_after) + return PreprocEntityDelta(before=before, after=after) diff --git a/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py new file mode 100644 index 0000000000000..732141270fb65 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/v2/change_set_model_visitor.py @@ -0,0 +1,191 @@ +import abc + +from localstack.services.cloudformation.engine.v2.change_set_model import ( + ChangeSetEntity, + NodeArray, + NodeCondition, + NodeConditions, + NodeDependsOn, + NodeDivergence, + NodeIntrinsicFunction, + NodeMapping, + NodeMappings, + NodeObject, + NodeOutput, + NodeOutputs, + NodeParameter, + NodeParameters, + NodeProperties, + NodeProperty, + NodeResource, + NodeResources, + NodeTemplate, + TerminalValueCreated, + TerminalValueModified, + TerminalValueRemoved, + TerminalValueUnchanged, +) +from localstack.utils.strings import camel_to_snake_case + + +class ChangeSetModelVisitor(abc.ABC): + # TODO: this class should be auto generated. + + # TODO: add visitors for abstract classes so shared logic can be implemented + # just once in classes extending this. + + def visit(self, change_set_entity: ChangeSetEntity): + # TODO: speed up this lookup logic + type_str = change_set_entity.__class__.__name__ + type_str = camel_to_snake_case(type_str) + visit_function_name = f"visit_{type_str}" + visit_function = getattr(self, visit_function_name) + return visit_function(change_set_entity) + + def visit_children(self, change_set_entity: ChangeSetEntity): + children = change_set_entity.get_children() + for child in children: + self.visit(child) + + def visit_node_template(self, node_template: NodeTemplate): + # Visit the resources, which will lazily evaluate all the referenced (direct and indirect) + # entities (parameters, mappings, conditions, etc.). Then compute the output fields; computing + # only the output fields would only result in the deployment logic of the referenced outputs + # being evaluated, hence enforce the visiting of all the resources first. + self.visit(node_template.resources) + self.visit(node_template.outputs) + + def visit_node_outputs(self, node_outputs: NodeOutputs): + self.visit_children(node_outputs) + + def visit_node_output(self, node_output: NodeOutput): + self.visit_children(node_output) + + def visit_node_mapping(self, node_mapping: NodeMapping): + self.visit_children(node_mapping) + + def visit_node_mappings(self, node_mappings: NodeMappings): + self.visit_children(node_mappings) + + def visit_node_parameters(self, node_parameters: NodeParameters): + self.visit_children(node_parameters) + + def visit_node_parameter(self, node_parameter: NodeParameter): + self.visit_children(node_parameter) + + def visit_node_conditions(self, node_conditions: NodeConditions): + self.visit_children(node_conditions) + + def visit_node_condition(self, node_condition: NodeCondition): + self.visit_children(node_condition) + + def visit_node_depends_on(self, node_depends_on: NodeDependsOn): + self.visit_children(node_depends_on) + + def visit_node_resources(self, node_resources: NodeResources): + self.visit_children(node_resources) + + def visit_node_resource(self, node_resource: NodeResource): + self.visit_children(node_resource) + + def visit_node_properties(self, node_properties: NodeProperties): + self.visit_children(node_properties) + + def visit_node_property(self, node_property: NodeProperty): + self.visit_children(node_property) + + def visit_node_intrinsic_function(self, node_intrinsic_function: NodeIntrinsicFunction): + # TODO: speed up this lookup logic + function_name = node_intrinsic_function.intrinsic_function + function_name = function_name.replace("::", "_") + function_name = camel_to_snake_case(function_name) + visit_function_name = f"visit_node_intrinsic_function_{function_name}" + visit_function = getattr(self, visit_function_name) + return visit_function(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_get_att( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_equals( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_transform( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_select( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_split( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_get_a_zs( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_base64( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_sub(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_if(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_and(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_or(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_not(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_join(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_fn_find_in_map( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_ref(self, node_intrinsic_function: NodeIntrinsicFunction): + self.visit_children(node_intrinsic_function) + + def visit_node_intrinsic_function_condition( + self, node_intrinsic_function: NodeIntrinsicFunction + ): + self.visit_children(node_intrinsic_function) + + def visit_node_divergence(self, node_divergence: NodeDivergence): + self.visit_children(node_divergence) + + def visit_node_object(self, node_object: NodeObject): + self.visit_children(node_object) + + def visit_node_array(self, node_array: NodeArray): + self.visit_children(node_array) + + def visit_terminal_value_modified(self, terminal_value_modified: TerminalValueModified): + self.visit_children(terminal_value_modified) + + def visit_terminal_value_created(self, terminal_value_created: TerminalValueCreated): + self.visit_children(terminal_value_created) + + def visit_terminal_value_removed(self, terminal_value_removed: TerminalValueRemoved): + self.visit_children(terminal_value_removed) + + def visit_terminal_value_unchanged(self, terminal_value_unchanged: TerminalValueUnchanged): + self.visit_children(terminal_value_unchanged) diff --git a/localstack-core/localstack/services/cloudformation/engine/validations.py b/localstack-core/localstack/services/cloudformation/engine/validations.py new file mode 100644 index 0000000000000..c65d0a5b307fc --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/engine/validations.py @@ -0,0 +1,86 @@ +""" +Provide validations for use within the CFn engine +""" + +from typing import Protocol + +from localstack.aws.api import CommonServiceException + + +class ValidationError(CommonServiceException): + """General validation error type (defined in the AWS docs, but not part of the botocore spec)""" + + def __init__(self, message=None): + super().__init__("ValidationError", message=message, sender_fault=True) + + +class TemplateValidationStep(Protocol): + """ + Base class for static analysis of the template + """ + + def __call__(self, template: dict): + """ + Execute a specific validation on the template + """ + + +def outputs_have_values(template: dict): + outputs: dict[str, dict] = template.get("Outputs", {}) + + for output_name, output_defn in outputs.items(): + if "Value" not in output_defn: + raise ValidationError( + "Template format error: Every Outputs member must contain a Value object" + ) + + if output_defn["Value"] is None: + key = f"/Outputs/{output_name}/Value" + raise ValidationError(f"[{key}] 'null' values are not allowed in templates") + + +# TODO: this would need to be split into different validations pre- and post- transform +def resources_top_level_keys(template: dict): + """ + Validate that each resource + - there is a resources key + - includes the `Properties` key + - does not include any other keys that should not be there + """ + resources = template.get("Resources") + if resources is None: + raise ValidationError( + "Template format error: At least one Resources member must be defined." + ) + + allowed_keys = { + "Type", + "Properties", + "DependsOn", + "CreationPolicy", + "DeletionPolicy", + "Metadata", + "UpdatePolicy", + "UpdateReplacePolicy", + "Condition", + } + for resource_id, resource in resources.items(): + if "Type" not in resource: + raise ValidationError( + f"Template format error: [/Resources/{resource_id}] Every Resources object must contain a Type member." + ) + + # check for invalid keys + for key in resource: + if key not in allowed_keys: + raise ValidationError(f"Invalid template resource property '{key}'") + + +DEFAULT_TEMPLATE_VALIDATIONS: list[TemplateValidationStep] = [ + # FIXME: disabled for now due to the template validation not fitting well with the template that we use here. + # We don't have access to a "raw" processed template here and it's questionable if we should have it at all, + # since later transformations can again introduce issues. + # => Reevaluate this when reworking how we mutate the template dict in the provider + # outputs_have_values, + # resources_top_level_keys, +] diff --git a/localstack/services/cloudformation/engine/yaml_parser.py b/localstack-core/localstack/services/cloudformation/engine/yaml_parser.py similarity index 91% rename from localstack/services/cloudformation/engine/yaml_parser.py rename to localstack-core/localstack/services/cloudformation/engine/yaml_parser.py index ef7fda0330d11..c0b72ead58f8f 100644 --- a/localstack/services/cloudformation/engine/yaml_parser.py +++ b/localstack-core/localstack/services/cloudformation/engine/yaml_parser.py @@ -30,7 +30,13 @@ def shorthand_constructor(loader: yaml.Loader, tag_suffix: str, node: yaml.Node) !Select [2, !Split [",", !ImportValue AccountSubnetIDs]] shorthand: 2 => canonical "2" """ - fn_name = "Ref" if tag_suffix == "Ref" else f"Fn::{tag_suffix}" + match tag_suffix: + case "Ref": + fn_name = "Ref" + case "Condition": + fn_name = "Condition" + case _: + fn_name = f"Fn::{tag_suffix}" if tag_suffix == "GetAtt" and isinstance(node, yaml.ScalarNode): # !GetAtt A.B.C => {"Fn::GetAtt": ["A", "B.C"]} diff --git a/localstack-core/localstack/services/cloudformation/models/__init__.py b/localstack-core/localstack/services/cloudformation/models/__init__.py new file mode 100644 index 0000000000000..a9a2c5b3bb437 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/models/__init__.py @@ -0,0 +1 @@ +__all__ = [] diff --git a/localstack-core/localstack/services/cloudformation/plugins.py b/localstack-core/localstack/services/cloudformation/plugins.py new file mode 100644 index 0000000000000..72ef0104aaeb2 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/plugins.py @@ -0,0 +1,12 @@ +from rolo import Resource + +from localstack.runtime import hooks + + +@hooks.on_infra_start() +def register_cloudformation_deploy_ui(): + from localstack.services.internal import get_internal_apis + + from .deploy_ui import CloudFormationUi + + get_internal_apis().add(Resource("/_localstack/cloudformation/deploy", CloudFormationUi())) diff --git a/localstack-core/localstack/services/cloudformation/provider.py b/localstack-core/localstack/services/cloudformation/provider.py new file mode 100644 index 0000000000000..f1ba0d6cfeb07 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/provider.py @@ -0,0 +1,1334 @@ +import copy +import json +import logging +import re +from collections import defaultdict +from copy import deepcopy + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.cloudformation import ( + AlreadyExistsException, + CallAs, + ChangeSetNameOrId, + ChangeSetNotFoundException, + ChangeSetType, + ClientRequestToken, + CloudformationApi, + CreateChangeSetInput, + CreateChangeSetOutput, + CreateStackInput, + CreateStackInstancesInput, + CreateStackInstancesOutput, + CreateStackOutput, + CreateStackSetInput, + CreateStackSetOutput, + DeleteChangeSetOutput, + DeleteStackInstancesInput, + DeleteStackInstancesOutput, + DeleteStackSetOutput, + DeletionMode, + DescribeChangeSetOutput, + DescribeStackEventsOutput, + DescribeStackResourceOutput, + DescribeStackResourcesOutput, + DescribeStackSetOperationOutput, + DescribeStackSetOutput, + DescribeStacksOutput, + DisableRollback, + EnableTerminationProtection, + ExecuteChangeSetOutput, + ExecutionStatus, + ExportName, + GetTemplateOutput, + GetTemplateSummaryInput, + GetTemplateSummaryOutput, + IncludePropertyValues, + InsufficientCapabilitiesException, + InvalidChangeSetStatusException, + ListChangeSetsOutput, + ListExportsOutput, + ListImportsOutput, + ListStackInstancesInput, + ListStackInstancesOutput, + ListStackResourcesOutput, + ListStackSetsInput, + ListStackSetsOutput, + ListStacksOutput, + ListTypesInput, + ListTypesOutput, + LogicalResourceId, + NextToken, + Parameter, + PhysicalResourceId, + RegisterTypeInput, + RegisterTypeOutput, + RegistryType, + RetainExceptOnCreate, + RetainResources, + RoleARN, + StackName, + StackNameOrId, + StackSetName, + StackStatus, + StackStatusFilter, + TemplateParameter, + TemplateStage, + TypeSummary, + UpdateStackInput, + UpdateStackOutput, + UpdateStackSetInput, + UpdateStackSetOutput, + UpdateTerminationProtectionOutput, + ValidateTemplateInput, + ValidateTemplateOutput, +) +from localstack.aws.connect import connect_to +from localstack.services.cloudformation import api_utils +from localstack.services.cloudformation.engine import parameters as param_resolver +from localstack.services.cloudformation.engine import template_deployer, template_preparer +from localstack.services.cloudformation.engine.entities import ( + Stack, + StackChangeSet, + StackInstance, + StackSet, +) +from localstack.services.cloudformation.engine.parameters import mask_no_echo, strip_parameter_type +from localstack.services.cloudformation.engine.resource_ordering import ( + NoResourceInStack, + order_resources, +) +from localstack.services.cloudformation.engine.template_deployer import ( + NoStackUpdates, +) +from localstack.services.cloudformation.engine.template_utils import resolve_stack_conditions +from localstack.services.cloudformation.engine.transformers import ( + FailedTransformationException, +) +from localstack.services.cloudformation.engine.validations import ( + DEFAULT_TEMPLATE_VALIDATIONS, + ValidationError, +) +from localstack.services.cloudformation.resource_provider import ( + PRO_RESOURCE_PROVIDERS, + ResourceProvider, +) +from localstack.services.cloudformation.stores import ( + cloudformation_stores, + find_active_stack_by_name_or_id, + find_change_set, + find_stack, + find_stack_by_id, + get_cloudformation_store, +) +from localstack.state import StateVisitor +from localstack.utils.collections import ( + remove_attributes, + select_attributes, + select_from_typed_dict, +) +from localstack.utils.json import clone +from localstack.utils.strings import long_uid, short_uid + +LOG = logging.getLogger(__name__) + +ARN_CHANGESET_REGEX = re.compile( + r"arn:(aws|aws-us-gov|aws-cn):cloudformation:[-a-zA-Z0-9]+:\d{12}:changeSet/[a-zA-Z][-a-zA-Z0-9]*/[-a-zA-Z0-9:/._+]+" +) +ARN_STACK_REGEX = re.compile( + r"arn:(aws|aws-us-gov|aws-cn):cloudformation:[-a-zA-Z0-9]+:\d{12}:stack/[a-zA-Z][-a-zA-Z0-9]*/[-a-zA-Z0-9:/._+]+" +) + + +def clone_stack_params(stack_params): + try: + return clone(stack_params) + except Exception as e: + LOG.info("Unable to clone stack parameters: %s", e) + return stack_params + + +def find_stack_instance(stack_set: StackSet, account: str, region: str): + for instance in stack_set.stack_instances: + if instance.metadata["Account"] == account and instance.metadata["Region"] == region: + return instance + return None + + +def stack_not_found_error(stack_name: str): + # FIXME + raise ValidationError("Stack with id %s does not exist" % stack_name) + + +def not_found_error(message: str): + # FIXME + raise ResourceNotFoundException(message) + + +class ResourceNotFoundException(CommonServiceException): + def __init__(self, message=None): + super().__init__("ResourceNotFoundException", status_code=404, message=message) + + +class InternalFailure(CommonServiceException): + def __init__(self, message=None): + super().__init__("InternalFailure", status_code=500, message=message, sender_fault=False) + + +class CloudformationProvider(CloudformationApi): + def _stack_status_is_active(self, stack_status: str) -> bool: + return stack_status not in [StackStatus.DELETE_COMPLETE] + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(cloudformation_stores) + + @handler("CreateStack", expand=False) + def create_stack(self, context: RequestContext, request: CreateStackInput) -> CreateStackOutput: + # TODO: test what happens when both TemplateUrl and Body are specified + state = get_cloudformation_store(context.account_id, context.region) + + stack_name = request.get("StackName") + + # get stacks by name + active_stack_candidates = [ + s + for s in state.stacks.values() + if s.stack_name == stack_name and self._stack_status_is_active(s.status) + ] + + # TODO: fix/implement this code path + # this needs more investigation how Cloudformation handles it (e.g. normal stack create or does it create a separate changeset?) + # REVIEW_IN_PROGRESS is another special status + # in this case existing changesets are set to obsolete and the stack is created + # review_stack_candidates = [s for s in stack_candidates if s.status == StackStatus.REVIEW_IN_PROGRESS] + # if review_stack_candidates: + # set changesets to obsolete + # for cs in review_stack_candidates[0].change_sets: + # cs.execution_status = ExecutionStatus.OBSOLETE + + if active_stack_candidates: + raise AlreadyExistsException(f"Stack [{stack_name}] already exists") + + template_body = request.get("TemplateBody") or "" + if len(template_body) > 51200: + raise ValidationError( + f"1 validation error detected: Value '{request['TemplateBody']}' at 'templateBody' " + "failed to satisfy constraint: Member must have length less than or equal to 51200" + ) + api_utils.prepare_template_body(request) # TODO: avoid mutating request directly + + template = template_preparer.parse_template(request["TemplateBody"]) + + stack_name = template["StackName"] = request.get("StackName") + if api_utils.validate_stack_name(stack_name) is False: + raise ValidationError( + f"1 validation error detected: Value '{stack_name}' at 'stackName' failed to satisfy constraint:\ + Member must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9]*|arn:[-a-zA-Z0-9:/._+]*" + ) + + if ( + "CAPABILITY_AUTO_EXPAND" not in request.get("Capabilities", []) + and "Transform" in template.keys() + ): + raise InsufficientCapabilitiesException( + "Requires capabilities : [CAPABILITY_AUTO_EXPAND]" + ) + + # resolve stack parameters + new_parameters = param_resolver.convert_stack_parameters_to_dict(request.get("Parameters")) + parameter_declarations = param_resolver.extract_stack_parameter_declarations(template) + resolved_parameters = param_resolver.resolve_parameters( + account_id=context.account_id, + region_name=context.region, + parameter_declarations=parameter_declarations, + new_parameters=new_parameters, + old_parameters={}, + ) + + # handle conditions + stack = Stack(context.account_id, context.region, request, template) + + try: + template = template_preparer.transform_template( + context.account_id, + context.region, + template, + stack.stack_name, + stack.resources, + stack.mappings, + {}, # TODO + resolved_parameters, + ) + except FailedTransformationException as e: + stack.add_stack_event( + stack.stack_name, + stack.stack_id, + status="ROLLBACK_IN_PROGRESS", + status_reason=e.message, + ) + stack.set_stack_status("ROLLBACK_COMPLETE") + state.stacks[stack.stack_id] = stack + return CreateStackOutput(StackId=stack.stack_id) + + # perform basic static analysis on the template + for validation_fn in DEFAULT_TEMPLATE_VALIDATIONS: + validation_fn(template) + + stack = Stack(context.account_id, context.region, request, template) + + # resolve conditions + raw_conditions = template.get("Conditions", {}) + resolved_stack_conditions = resolve_stack_conditions( + account_id=context.account_id, + region_name=context.region, + conditions=raw_conditions, + parameters=resolved_parameters, + mappings=stack.mappings, + stack_name=stack_name, + ) + stack.set_resolved_stack_conditions(resolved_stack_conditions) + + stack.set_resolved_parameters(resolved_parameters) + stack.template_body = template_body + state.stacks[stack.stack_id] = stack + LOG.debug( + 'Creating stack "%s" with %s resources ...', + stack.stack_name, + len(stack.template_resources), + ) + deployer = template_deployer.TemplateDeployer(context.account_id, context.region, stack) + try: + deployer.deploy_stack() + except Exception as e: + stack.set_stack_status("CREATE_FAILED") + msg = 'Unable to create stack "%s": %s' % (stack.stack_name, e) + LOG.exception("%s") + raise ValidationError(msg) from e + + return CreateStackOutput(StackId=stack.stack_id) + + @handler("DeleteStack") + def delete_stack( + self, + context: RequestContext, + stack_name: StackName, + retain_resources: RetainResources = None, + role_arn: RoleARN = None, + client_request_token: ClientRequestToken = None, + deletion_mode: DeletionMode = None, + **kwargs, + ) -> None: + stack = find_active_stack_by_name_or_id(context.account_id, context.region, stack_name) + if not stack: + # aws will silently ignore invalid stack names - we should do the same + return + deployer = template_deployer.TemplateDeployer(context.account_id, context.region, stack) + deployer.delete_stack() + + @handler("UpdateStack", expand=False) + def update_stack( + self, + context: RequestContext, + request: UpdateStackInput, + ) -> UpdateStackOutput: + stack_name = request.get("StackName") + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + return not_found_error(f'Unable to update non-existing stack "{stack_name}"') + + api_utils.prepare_template_body(request) + template = template_preparer.parse_template(request["TemplateBody"]) + + if ( + "CAPABILITY_AUTO_EXPAND" not in request.get("Capabilities", []) + and "Transform" in template.keys() + ): + raise InsufficientCapabilitiesException( + "Requires capabilities : [CAPABILITY_AUTO_EXPAND]" + ) + + new_parameters: dict[str, Parameter] = param_resolver.convert_stack_parameters_to_dict( + request.get("Parameters") + ) + parameter_declarations = param_resolver.extract_stack_parameter_declarations(template) + resolved_parameters = param_resolver.resolve_parameters( + account_id=context.account_id, + region_name=context.region, + parameter_declarations=parameter_declarations, + new_parameters=new_parameters, + old_parameters=stack.resolved_parameters, + ) + + resolved_stack_conditions = resolve_stack_conditions( + account_id=context.account_id, + region_name=context.region, + conditions=template.get("Conditions", {}), + parameters=resolved_parameters, + mappings=template.get("Mappings", {}), + stack_name=stack_name, + ) + + raw_new_template = copy.deepcopy(template) + try: + template = template_preparer.transform_template( + context.account_id, + context.region, + template, + stack.stack_name, + stack.resources, + stack.mappings, + resolved_stack_conditions, + resolved_parameters, + ) + processed_template = copy.deepcopy( + template + ) # copying it here since it's being mutated somewhere downstream + except FailedTransformationException as e: + stack.add_stack_event( + stack.stack_name, + stack.stack_id, + status="ROLLBACK_IN_PROGRESS", + status_reason=e.message, + ) + stack.set_stack_status("ROLLBACK_COMPLETE") + return CreateStackOutput(StackId=stack.stack_id) + + # perform basic static analysis on the template + for validation_fn in DEFAULT_TEMPLATE_VALIDATIONS: + validation_fn(template) + + # update the template + stack.template_original = template + + deployer = template_deployer.TemplateDeployer(context.account_id, context.region, stack) + # TODO: there shouldn't be a "new" stack on update + new_stack = Stack( + context.account_id, context.region, request, template, request["TemplateBody"] + ) + new_stack.set_resolved_parameters(resolved_parameters) + stack.set_resolved_parameters(resolved_parameters) + stack.set_resolved_stack_conditions(resolved_stack_conditions) + try: + deployer.update_stack(new_stack) + except NoStackUpdates as e: + stack.set_stack_status("UPDATE_COMPLETE") + if raw_new_template != processed_template: + # processed templates seem to never return an exception here + return UpdateStackOutput(StackId=stack.stack_id) + raise ValidationError(str(e)) + except Exception as e: + stack.set_stack_status("UPDATE_FAILED") + msg = f'Unable to update stack "{stack_name}": {e}' + LOG.exception("%s", msg) + raise ValidationError(msg) from e + + return UpdateStackOutput(StackId=stack.stack_id) + + @handler("DescribeStacks") + def describe_stacks( + self, + context: RequestContext, + stack_name: StackName = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeStacksOutput: + # TODO: test & implement pagination + state = get_cloudformation_store(context.account_id, context.region) + + if stack_name: + if ARN_STACK_REGEX.match(stack_name): + # we can get the stack directly since we index the store by ARN/stackID + stack = state.stacks.get(stack_name) + stacks = [stack.describe_details()] if stack else [] + else: + # otherwise we have to find the active stack with the given name + stack_candidates: list[Stack] = [ + s for stack_arn, s in state.stacks.items() if s.stack_name == stack_name + ] + active_stack_candidates = [ + s for s in stack_candidates if self._stack_status_is_active(s.status) + ] + stacks = [s.describe_details() for s in active_stack_candidates] + else: + # return all active stacks + stack_list = list(state.stacks.values()) + stacks = [ + s.describe_details() for s in stack_list if self._stack_status_is_active(s.status) + ] + + if stack_name and not stacks: + raise ValidationError(f"Stack with id {stack_name} does not exist") + + return DescribeStacksOutput(Stacks=stacks) + + @handler("ListStacks") + def list_stacks( + self, + context: RequestContext, + next_token: NextToken = None, + stack_status_filter: StackStatusFilter = None, + **kwargs, + ) -> ListStacksOutput: + state = get_cloudformation_store(context.account_id, context.region) + + stacks = [ + s.describe_details() + for s in state.stacks.values() + if not stack_status_filter or s.status in stack_status_filter + ] + + attrs = [ + "StackId", + "StackName", + "TemplateDescription", + "CreationTime", + "LastUpdatedTime", + "DeletionTime", + "StackStatus", + "StackStatusReason", + "ParentId", + "RootId", + "DriftInformation", + ] + stacks = [select_attributes(stack, attrs) for stack in stacks] + return ListStacksOutput(StackSummaries=stacks) + + @handler("GetTemplate") + def get_template( + self, + context: RequestContext, + stack_name: StackName = None, + change_set_name: ChangeSetNameOrId = None, + template_stage: TemplateStage = None, + **kwargs, + ) -> GetTemplateOutput: + if change_set_name: + stack = find_change_set( + context.account_id, context.region, stack_name=stack_name, cs_name=change_set_name + ) + else: + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + return stack_not_found_error(stack_name) + + if template_stage == TemplateStage.Processed and "Transform" in stack.template_body: + copy_template = clone(stack.template_original) + copy_template.pop("ChangeSetName", None) + copy_template.pop("StackName", None) + for resource in copy_template.get("Resources", {}).values(): + resource.pop("LogicalResourceId", None) + template_body = json.dumps(copy_template) + else: + template_body = stack.template_body + + return GetTemplateOutput( + TemplateBody=template_body, + StagesAvailable=[TemplateStage.Original, TemplateStage.Processed], + ) + + @handler("GetTemplateSummary", expand=False) + def get_template_summary( + self, + context: RequestContext, + request: GetTemplateSummaryInput, + ) -> GetTemplateSummaryOutput: + stack_name = request.get("StackName") + + if stack_name: + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + return stack_not_found_error(stack_name) + template = stack.template + else: + api_utils.prepare_template_body(request) + template = template_preparer.parse_template(request["TemplateBody"]) + request["StackName"] = "tmp-stack" + stack = Stack(context.account_id, context.region, request, template) + + result: GetTemplateSummaryOutput = stack.describe_details() + + # build parameter declarations + result["Parameters"] = list( + param_resolver.extract_stack_parameter_declarations(template).values() + ) + + id_summaries = defaultdict(list) + for resource_id, resource in stack.template_resources.items(): + res_type = resource["Type"] + id_summaries[res_type].append(resource_id) + + result["ResourceTypes"] = list(id_summaries.keys()) + result["ResourceIdentifierSummaries"] = [ + {"ResourceType": key, "LogicalResourceIds": values} + for key, values in id_summaries.items() + ] + result["Metadata"] = stack.template.get("Metadata") + result["Version"] = stack.template.get("AWSTemplateFormatVersion", "2010-09-09") + # these do not appear in the output + result.pop("Capabilities", None) + + return select_from_typed_dict(GetTemplateSummaryOutput, result) + + def update_termination_protection( + self, + context: RequestContext, + enable_termination_protection: EnableTerminationProtection, + stack_name: StackNameOrId, + **kwargs, + ) -> UpdateTerminationProtectionOutput: + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + raise ValidationError(f"Stack '{stack_name}' does not exist.") + stack.metadata["EnableTerminationProtection"] = enable_termination_protection + return UpdateTerminationProtectionOutput(StackId=stack.stack_id) + + @handler("CreateChangeSet", expand=False) + def create_change_set( + self, context: RequestContext, request: CreateChangeSetInput + ) -> CreateChangeSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + + req_params = request + change_set_type = req_params.get("ChangeSetType", "UPDATE") + stack_name = req_params.get("StackName") + change_set_name = req_params.get("ChangeSetName") + template_body = req_params.get("TemplateBody") + # s3 or secretsmanager url + template_url = req_params.get("TemplateURL") + + # validate and resolve template + if template_body and template_url: + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + if not template_body and not template_url: + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + api_utils.prepare_template_body( + req_params + ) # TODO: function has too many unclear responsibilities + if not template_body: + template_body = req_params[ + "TemplateBody" + ] # should then have been set by prepare_template_body + template = template_preparer.parse_template(req_params["TemplateBody"]) + + del req_params["TemplateBody"] # TODO: stop mutating req_params + template["StackName"] = stack_name + # TODO: validate with AWS what this is actually doing? + template["ChangeSetName"] = change_set_name + + # this is intentionally not in a util yet. Let's first see how the different operations deal with these before generalizing + # handle ARN stack_name here (not valid for initial CREATE, since stack doesn't exist yet) + if ARN_STACK_REGEX.match(stack_name): + if not (stack := state.stacks.get(stack_name)): + raise ValidationError(f"Stack '{stack_name}' does not exist.") + else: + # stack name specified, so fetch the stack by name + stack_candidates: list[Stack] = [ + s for stack_arn, s in state.stacks.items() if s.stack_name == stack_name + ] + active_stack_candidates = [ + s for s in stack_candidates if self._stack_status_is_active(s.status) + ] + + # on a CREATE an empty Stack should be generated if we didn't find an active one + if not active_stack_candidates and change_set_type == ChangeSetType.CREATE: + empty_stack_template = dict(template) + empty_stack_template["Resources"] = {} + req_params_copy = clone_stack_params(req_params) + stack = Stack( + context.account_id, + context.region, + req_params_copy, + empty_stack_template, + template_body=template_body, + ) + state.stacks[stack.stack_id] = stack + stack.set_stack_status("REVIEW_IN_PROGRESS") + else: + if not active_stack_candidates: + raise ValidationError(f"Stack '{stack_name}' does not exist.") + stack = active_stack_candidates[0] + + # TODO: test if rollback status is allowed as well + if ( + change_set_type == ChangeSetType.CREATE + and stack.status != StackStatus.REVIEW_IN_PROGRESS + ): + raise ValidationError( + f"Stack [{stack_name}] already exists and cannot be created again with the changeSet [{change_set_name}]." + ) + + old_parameters: dict[str, Parameter] = {} + match change_set_type: + case ChangeSetType.UPDATE: + # add changeset to existing stack + old_parameters = { + k: mask_no_echo(strip_parameter_type(v)) + for k, v in stack.resolved_parameters.items() + } + case ChangeSetType.IMPORT: + raise NotImplementedError() # TODO: implement importing resources + case ChangeSetType.CREATE: + pass + case _: + msg = ( + f"1 validation error detected: Value '{change_set_type}' at 'changeSetType' failed to satisfy " + f"constraint: Member must satisfy enum value set: [IMPORT, UPDATE, CREATE] " + ) + raise ValidationError(msg) + + # resolve parameters + new_parameters: dict[str, Parameter] = param_resolver.convert_stack_parameters_to_dict( + request.get("Parameters") + ) + parameter_declarations = param_resolver.extract_stack_parameter_declarations(template) + resolved_parameters = param_resolver.resolve_parameters( + account_id=context.account_id, + region_name=context.region, + parameter_declarations=parameter_declarations, + new_parameters=new_parameters, + old_parameters=old_parameters, + ) + + # TODO: remove this when fixing Stack.resources and transformation order + # currently we need to create a stack with existing resources + parameters so that resolve refs recursively in here will work. + # The correct way to do it would be at a later stage anyway just like a normal intrinsic function + req_params_copy = clone_stack_params(req_params) + temp_stack = Stack(context.account_id, context.region, req_params_copy, template) + temp_stack.set_resolved_parameters(resolved_parameters) + + # TODO: everything below should be async + # apply template transformations + transformed_template = template_preparer.transform_template( + context.account_id, + context.region, + template, + stack_name=temp_stack.stack_name, + resources=temp_stack.resources, + mappings=temp_stack.mappings, + conditions={}, # TODO: we don't have any resolved conditions yet at this point but we need the conditions because of the samtranslator... + resolved_parameters=resolved_parameters, + ) + + # perform basic static analysis on the template + for validation_fn in DEFAULT_TEMPLATE_VALIDATIONS: + validation_fn(template) + + # create change set for the stack and apply changes + change_set = StackChangeSet( + context.account_id, context.region, stack, req_params, transformed_template + ) + # only set parameters for the changeset, then switch to stack on execute_change_set + change_set.set_resolved_parameters(resolved_parameters) + change_set.template_body = template_body + + # TODO: evaluate conditions + raw_conditions = transformed_template.get("Conditions", {}) + resolved_stack_conditions = resolve_stack_conditions( + account_id=context.account_id, + region_name=context.region, + conditions=raw_conditions, + parameters=resolved_parameters, + mappings=temp_stack.mappings, + stack_name=stack_name, + ) + change_set.set_resolved_stack_conditions(resolved_stack_conditions) + + # a bit gross but use the template ordering to validate missing resources + try: + order_resources( + transformed_template["Resources"], + resolved_parameters=resolved_parameters, + resolved_conditions=resolved_stack_conditions, + ) + except NoResourceInStack as e: + raise ValidationError(str(e)) from e + + deployer = template_deployer.TemplateDeployer( + context.account_id, context.region, change_set + ) + changes = deployer.construct_changes( + stack, + change_set, + change_set_id=change_set.change_set_id, + append_to_changeset=True, + filter_unchanged_resources=True, + ) + stack.change_sets.append(change_set) + if not changes: + change_set.metadata["Status"] = "FAILED" + change_set.metadata["ExecutionStatus"] = "UNAVAILABLE" + change_set.metadata["StatusReason"] = ( + "The submitted information didn't contain changes. Submit different information to create a change set." + ) + else: + change_set.metadata["Status"] = ( + "CREATE_COMPLETE" # technically for some time this should first be CREATE_PENDING + ) + change_set.metadata["ExecutionStatus"] = ( + "AVAILABLE" # technically for some time this should first be UNAVAILABLE + ) + + return CreateChangeSetOutput(StackId=change_set.stack_id, Id=change_set.change_set_id) + + @handler("DescribeChangeSet") + def describe_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId = None, + next_token: NextToken = None, + include_property_values: IncludePropertyValues = None, + **kwargs, + ) -> DescribeChangeSetOutput: + # TODO add support for include_property_values + # only relevant if change_set_name isn't an ARN + if not ARN_CHANGESET_REGEX.match(change_set_name): + if not stack_name: + raise ValidationError( + "StackName must be specified if ChangeSetName is not specified as an ARN." + ) + + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + raise ValidationError(f"Stack [{stack_name}] does not exist") + + change_set = find_change_set( + context.account_id, context.region, change_set_name, stack_name=stack_name + ) + if not change_set: + raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist") + + attrs = [ + "ChangeSetType", + "StackStatus", + "LastUpdatedTime", + "DisableRollback", + "EnableTerminationProtection", + "Transform", + ] + result = remove_attributes(deepcopy(change_set.metadata), attrs) + # TODO: replace this patch with a better solution + result["Parameters"] = [ + mask_no_echo(strip_parameter_type(p)) for p in result.get("Parameters", []) + ] + return result + + @handler("DeleteChangeSet") + def delete_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId = None, + **kwargs, + ) -> DeleteChangeSetOutput: + # only relevant if change_set_name isn't an ARN + if not ARN_CHANGESET_REGEX.match(change_set_name): + if not stack_name: + raise ValidationError( + "StackName must be specified if ChangeSetName is not specified as an ARN." + ) + + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + raise ValidationError(f"Stack [{stack_name}] does not exist") + + change_set = find_change_set( + context.account_id, context.region, change_set_name, stack_name=stack_name + ) + if not change_set: + raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist") + change_set.stack.change_sets = [ + cs + for cs in change_set.stack.change_sets + if change_set_name not in (cs.change_set_name, cs.change_set_id) + ] + return DeleteChangeSetOutput() + + @handler("ExecuteChangeSet") + def execute_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId = None, + client_request_token: ClientRequestToken = None, + disable_rollback: DisableRollback = None, + retain_except_on_create: RetainExceptOnCreate = None, + **kwargs, + ) -> ExecuteChangeSetOutput: + change_set = find_change_set( + context.account_id, + context.region, + change_set_name, + stack_name=stack_name, + active_only=True, + ) + if not change_set: + raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist") + if change_set.metadata.get("ExecutionStatus") != ExecutionStatus.AVAILABLE: + LOG.debug("Change set %s not in execution status 'AVAILABLE'", change_set_name) + raise InvalidChangeSetStatusException( + f"ChangeSet [{change_set.metadata['ChangeSetId']}] cannot be executed in its current status of [{change_set.metadata.get('Status')}]" + ) + stack_name = change_set.stack.stack_name + LOG.debug( + 'Executing change set "%s" for stack "%s" with %s resources ...', + change_set_name, + stack_name, + len(change_set.template_resources), + ) + deployer = template_deployer.TemplateDeployer( + context.account_id, context.region, change_set.stack + ) + try: + deployer.apply_change_set(change_set) + change_set.stack.metadata["ChangeSetId"] = change_set.change_set_id + except NoStackUpdates: + # TODO: parity-check if this exception should be re-raised or swallowed + raise ValidationError("No updates to be performed for stack change set") + + return ExecuteChangeSetOutput() + + @handler("ListChangeSets") + def list_change_sets( + self, + context: RequestContext, + stack_name: StackNameOrId, + next_token: NextToken = None, + **kwargs, + ) -> ListChangeSetsOutput: + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + return not_found_error(f'Unable to find stack "{stack_name}"') + result = [cs.metadata for cs in stack.change_sets] + return ListChangeSetsOutput(Summaries=result) + + @handler("ListExports") + def list_exports( + self, context: RequestContext, next_token: NextToken = None, **kwargs + ) -> ListExportsOutput: + state = get_cloudformation_store(context.account_id, context.region) + return ListExportsOutput(Exports=state.exports) + + @handler("ListImports") + def list_imports( + self, + context: RequestContext, + export_name: ExportName, + next_token: NextToken = None, + **kwargs, + ) -> ListImportsOutput: + state = get_cloudformation_store(context.account_id, context.region) + + importing_stack_names = [] + for stack in state.stacks.values(): + if export_name in stack.imports: + importing_stack_names.append(stack.stack_name) + + return ListImportsOutput(Imports=importing_stack_names) + + @handler("DescribeStackEvents") + def describe_stack_events( + self, + context: RequestContext, + stack_name: StackName = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeStackEventsOutput: + if stack_name is None: + raise ValidationError( + "1 validation error detected: Value null at 'stackName' failed to satisfy constraint: Member must not be null" + ) + + stack = find_active_stack_by_name_or_id(context.account_id, context.region, stack_name) + if not stack: + stack = find_stack_by_id( + account_id=context.account_id, region_name=context.region, stack_id=stack_name + ) + if not stack: + raise ValidationError(f"Stack [{stack_name}] does not exist") + return DescribeStackEventsOutput(StackEvents=stack.events) + + @handler("DescribeStackResource") + def describe_stack_resource( + self, + context: RequestContext, + stack_name: StackName, + logical_resource_id: LogicalResourceId, + **kwargs, + ) -> DescribeStackResourceOutput: + stack = find_stack(context.account_id, context.region, stack_name) + + if not stack: + return stack_not_found_error(stack_name) + + try: + details = stack.resource_status(logical_resource_id) + except Exception as e: + if "Unable to find details" in str(e): + raise ValidationError( + f"Resource {logical_resource_id} does not exist for stack {stack_name}" + ) + raise + + return DescribeStackResourceOutput(StackResourceDetail=details) + + @handler("DescribeStackResources") + def describe_stack_resources( + self, + context: RequestContext, + stack_name: StackName = None, + logical_resource_id: LogicalResourceId = None, + physical_resource_id: PhysicalResourceId = None, + **kwargs, + ) -> DescribeStackResourcesOutput: + if physical_resource_id and stack_name: + raise ValidationError("Cannot specify both StackName and PhysicalResourceId") + # TODO: filter stack by PhysicalResourceId! + stack = find_stack(context.account_id, context.region, stack_name) + if not stack: + return stack_not_found_error(stack_name) + statuses = [ + res_status + for res_id, res_status in stack.resource_states.items() + if logical_resource_id in [res_id, None] + ] + for status in statuses: + status.setdefault("DriftInformation", {"StackResourceDriftStatus": "NOT_CHECKED"}) + return DescribeStackResourcesOutput(StackResources=statuses) + + @handler("ListStackResources") + def list_stack_resources( + self, context: RequestContext, stack_name: StackName, next_token: NextToken = None, **kwargs + ) -> ListStackResourcesOutput: + result = self.describe_stack_resources(context, stack_name) + + resources = deepcopy(result.get("StackResources", [])) + for resource in resources: + attrs = ["StackName", "StackId", "Timestamp", "PreviousResourceStatus"] + remove_attributes(resource, attrs) + + return ListStackResourcesOutput(StackResourceSummaries=resources) + + @handler("ValidateTemplate", expand=False) + def validate_template( + self, context: RequestContext, request: ValidateTemplateInput + ) -> ValidateTemplateOutput: + try: + # TODO implement actual validation logic + template_body = api_utils.get_template_body(request) + valid_template = json.loads(template_preparer.template_to_json(template_body)) + + parameters = [ + TemplateParameter( + ParameterKey=k, + DefaultValue=v.get("Default", ""), + NoEcho=v.get("NoEcho", False), + Description=v.get("Description", ""), + ) + for k, v in valid_template.get("Parameters", {}).items() + ] + + return ValidateTemplateOutput( + Description=valid_template.get("Description"), Parameters=parameters + ) + except Exception as e: + LOG.exception("Error validating template") + raise ValidationError("Template Validation Error") from e + + # ======================================= + # ============= Stack Set ============= + # ======================================= + + @handler("CreateStackSet", expand=False) + def create_stack_set( + self, context: RequestContext, request: CreateStackSetInput + ) -> CreateStackSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + stack_set = StackSet(request) + stack_set_id = f"{stack_set.stack_set_name}:{long_uid()}" + stack_set.metadata["StackSetId"] = stack_set_id + state.stack_sets[stack_set_id] = stack_set + + return CreateStackSetOutput(StackSetId=stack_set_id) + + @handler("DescribeStackSetOperation") + def describe_stack_set_operation( + self, + context: RequestContext, + stack_set_name: StackSetName, + operation_id: ClientRequestToken, + call_as: CallAs = None, + **kwargs, + ) -> DescribeStackSetOperationOutput: + state = get_cloudformation_store(context.account_id, context.region) + + set_name = stack_set_name + + stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name] + if not stack_set: + return not_found_error(f'Unable to find stack set "{set_name}"') + stack_set = stack_set[0] + result = stack_set.operations.get(operation_id) + if not result: + LOG.debug( + 'Unable to find operation ID "%s" for stack set "%s" in list: %s', + operation_id, + set_name, + list(stack_set.operations.keys()), + ) + return not_found_error( + f'Unable to find operation ID "{operation_id}" for stack set "{set_name}"' + ) + + return DescribeStackSetOperationOutput(StackSetOperation=result) + + @handler("DescribeStackSet") + def describe_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + call_as: CallAs = None, + **kwargs, + ) -> DescribeStackSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + result = [ + sset.metadata + for sset in state.stack_sets.values() + if sset.stack_set_name == stack_set_name + ] + if not result: + return not_found_error(f'Unable to find stack set "{stack_set_name}"') + + return DescribeStackSetOutput(StackSet=result[0]) + + @handler("ListStackSets", expand=False) + def list_stack_sets( + self, context: RequestContext, request: ListStackSetsInput + ) -> ListStackSetsOutput: + state = get_cloudformation_store(context.account_id, context.region) + result = [sset.metadata for sset in state.stack_sets.values()] + return ListStackSetsOutput(Summaries=result) + + @handler("UpdateStackSet", expand=False) + def update_stack_set( + self, context: RequestContext, request: UpdateStackSetInput + ) -> UpdateStackSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + set_name = request.get("StackSetName") + stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name] + if not stack_set: + return not_found_error(f'Stack set named "{set_name}" does not exist') + stack_set = stack_set[0] + stack_set.metadata.update(request) + op_id = request.get("OperationId") or short_uid() + operation = { + "OperationId": op_id, + "StackSetId": stack_set.metadata["StackSetId"], + "Action": "UPDATE", + "Status": "SUCCEEDED", + } + stack_set.operations[op_id] = operation + return UpdateStackSetOutput(OperationId=op_id) + + @handler("DeleteStackSet") + def delete_stack_set( + self, + context: RequestContext, + stack_set_name: StackSetName, + call_as: CallAs = None, + **kwargs, + ) -> DeleteStackSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + stack_set = [ + sset for sset in state.stack_sets.values() if sset.stack_set_name == stack_set_name + ] + + if not stack_set: + return not_found_error(f'Stack set named "{stack_set_name}" does not exist') + + # TODO: add a check for remaining stack instances + + for instance in stack_set[0].stack_instances: + deployer = template_deployer.TemplateDeployer( + context.account_id, context.region, instance.stack + ) + deployer.delete_stack() + return DeleteStackSetOutput() + + @handler("CreateStackInstances", expand=False) + def create_stack_instances( + self, + context: RequestContext, + request: CreateStackInstancesInput, + ) -> CreateStackInstancesOutput: + state = get_cloudformation_store(context.account_id, context.region) + + set_name = request.get("StackSetName") + stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name] + + if not stack_set: + return not_found_error(f'Stack set named "{set_name}" does not exist') + + stack_set = stack_set[0] + op_id = request.get("OperationId") or short_uid() + sset_meta = stack_set.metadata + accounts = request["Accounts"] + regions = request["Regions"] + + stacks_to_await = [] + for account in accounts: + for region in regions: + # deploy new stack + LOG.debug( + 'Deploying instance for stack set "%s" in account: %s region %s', + set_name, + account, + region, + ) + cf_client = connect_to(aws_access_key_id=account, region_name=region).cloudformation + kwargs = select_attributes(sset_meta, ["TemplateBody"]) or select_attributes( + sset_meta, ["TemplateURL"] + ) + stack_name = f"sset-{set_name}-{account}" + + # skip creation of existing stacks + if find_stack(context.account_id, context.region, stack_name): + continue + + result = cf_client.create_stack(StackName=stack_name, **kwargs) + stacks_to_await.append((stack_name, account, region)) + # store stack instance + instance = { + "StackSetId": sset_meta["StackSetId"], + "OperationId": op_id, + "Account": account, + "Region": region, + "StackId": result["StackId"], + "Status": "CURRENT", + "StackInstanceStatus": {"DetailedStatus": "SUCCEEDED"}, + } + instance = StackInstance(instance) + stack_set.stack_instances.append(instance) + + # wait for completion of stack + for stack_name, account_id, region_name in stacks_to_await: + client = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).cloudformation + client.get_waiter("stack_create_complete").wait(StackName=stack_name) + + # record operation + operation = { + "OperationId": op_id, + "StackSetId": stack_set.metadata["StackSetId"], + "Action": "CREATE", + "Status": "SUCCEEDED", + } + stack_set.operations[op_id] = operation + + return CreateStackInstancesOutput(OperationId=op_id) + + @handler("ListStackInstances", expand=False) + def list_stack_instances( + self, + context: RequestContext, + request: ListStackInstancesInput, + ) -> ListStackInstancesOutput: + set_name = request.get("StackSetName") + state = get_cloudformation_store(context.account_id, context.region) + stack_set = [sset for sset in state.stack_sets.values() if sset.stack_set_name == set_name] + if not stack_set: + return not_found_error(f'Stack set named "{set_name}" does not exist') + + stack_set = stack_set[0] + result = [inst.metadata for inst in stack_set.stack_instances] + return ListStackInstancesOutput(Summaries=result) + + @handler("DeleteStackInstances", expand=False) + def delete_stack_instances( + self, + context: RequestContext, + request: DeleteStackInstancesInput, + ) -> DeleteStackInstancesOutput: + op_id = request.get("OperationId") or short_uid() + + accounts = request["Accounts"] + regions = request["Regions"] + + state = get_cloudformation_store(context.account_id, context.region) + stack_sets = state.stack_sets.values() + + set_name = request.get("StackSetName") + stack_set = next((sset for sset in stack_sets if sset.stack_set_name == set_name), None) + + if not stack_set: + return not_found_error(f'Stack set named "{set_name}" does not exist') + + for account in accounts: + for region in regions: + instance = find_stack_instance(stack_set, account, region) + if instance: + stack_set.stack_instances.remove(instance) + + # record operation + operation = { + "OperationId": op_id, + "StackSetId": stack_set.metadata["StackSetId"], + "Action": "DELETE", + "Status": "SUCCEEDED", + } + stack_set.operations[op_id] = operation + + return DeleteStackInstancesOutput(OperationId=op_id) + + @handler("RegisterType", expand=False) + def register_type( + self, + context: RequestContext, + request: RegisterTypeInput, + ) -> RegisterTypeOutput: + return RegisterTypeOutput() + + def list_types( + self, context: RequestContext, request: ListTypesInput, **kwargs + ) -> ListTypesOutput: + def is_list_overridden(child_class, parent_class): + if hasattr(child_class, "list"): + import inspect + + child_method = child_class.list + parent_method = parent_class.list + return inspect.unwrap(child_method) is not inspect.unwrap(parent_method) + return False + + def get_listable_types_summaries(plugin_manager): + plugins = plugin_manager.list_names() + type_summaries = [] + for plugin in plugins: + type_summary = TypeSummary( + Type=RegistryType.RESOURCE, + TypeName=plugin, + ) + provider = plugin_manager.load(plugin) + if is_list_overridden(provider.factory, ResourceProvider): + type_summaries.append(type_summary) + return type_summaries + + from localstack.services.cloudformation.resource_provider import ( + plugin_manager, + ) + + type_summaries = get_listable_types_summaries(plugin_manager) + if PRO_RESOURCE_PROVIDERS: + from localstack.services.cloudformation.resource_provider import ( + pro_plugin_manager, + ) + + type_summaries.extend(get_listable_types_summaries(pro_plugin_manager)) + + return ListTypesOutput(TypeSummaries=type_summaries) diff --git a/localstack-core/localstack/services/cloudformation/provider_utils.py b/localstack-core/localstack/services/cloudformation/provider_utils.py new file mode 100644 index 0000000000000..d7e3eb49b79f2 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/provider_utils.py @@ -0,0 +1,252 @@ +""" +A set of utils for use in resource providers. + +Avoid any imports to localstack here and keep external imports to a minimum! +This is because we want to be able to package a resource provider without including localstack code. +""" + +import builtins +import json +import re +import uuid +from copy import deepcopy +from pathlib import Path +from typing import Callable, List, Optional + +from botocore.model import Shape, StructureShape + + +def generate_default_name(stack_name: str, logical_resource_id: str): + random_id_part = str(uuid.uuid4())[0:8] + resource_id_part = logical_resource_id[:24] + stack_name_part = stack_name[: 63 - 2 - (len(random_id_part) + len(resource_id_part))] + return f"{stack_name_part}-{resource_id_part}-{random_id_part}" + + +def generate_default_name_without_stack(logical_resource_id: str): + random_id_part = str(uuid.uuid4())[0:8] + resource_id_part = logical_resource_id[: 63 - 1 - len(random_id_part)] + return f"{resource_id_part}-{random_id_part}" + + +# ========= Helpers for boto calls ========== +# (equivalent to the old ones in deployment_utils.py) + + +def deselect_attributes(model: dict, params: list[str]) -> dict: + return {k: v for k, v in model.items() if k not in params} + + +def select_attributes(model: dict, params: list[str]) -> dict: + return {k: v for k, v in model.items() if k in params} + + +def keys_lower(model: dict) -> dict: + return {k.lower(): v for k, v in model.items()} + + +def convert_pascalcase_to_lower_camelcase(item: str) -> str: + if len(item) <= 1: + return item.lower() + else: + return f"{item[0].lower()}{item[1:]}" + + +def convert_lower_camelcase_to_pascalcase(item: str) -> str: + if len(item) <= 1: + return item.upper() + else: + return f"{item[0].upper()}{item[1:]}" + + +def _recurse_properties(obj: dict | list, fn: Callable) -> dict | list: + obj = fn(obj) + if isinstance(obj, dict): + return {k: _recurse_properties(v, fn) for k, v in obj.items()} + elif isinstance(obj, list): + return [_recurse_properties(v, fn) for v in obj] + else: + return obj + + +def recurse_properties(properties: dict, fn: Callable) -> dict: + return _recurse_properties(deepcopy(properties), fn) + + +def keys_pascalcase_to_lower_camelcase(model: dict) -> dict: + """Recursively change any dicts keys to lower camelcase""" + + def _keys_pascalcase_to_lower_camelcase(obj): + if isinstance(obj, dict): + return {convert_pascalcase_to_lower_camelcase(k): v for k, v in obj.items()} + else: + return obj + + return _recurse_properties(model, _keys_pascalcase_to_lower_camelcase) + + +def keys_lower_camelcase_to_pascalcase(model: dict) -> dict: + """Recursively change any dicts keys to PascalCase""" + + def _keys_lower_camelcase_to_pascalcase(obj): + if isinstance(obj, dict): + return {convert_lower_camelcase_to_pascalcase(k): v for k, v in obj.items()} + else: + return obj + + return _recurse_properties(model, _keys_lower_camelcase_to_pascalcase) + + +def transform_list_to_dict(param, key_attr_name="Key", value_attr_name="Value"): + result = {} + for entry in param: + key = entry[key_attr_name] + value = entry[value_attr_name] + result[key] = value + return result + + +def remove_none_values(obj): + """Remove None values (recursively) in the given object.""" + if isinstance(obj, dict): + return {k: remove_none_values(v) for k, v in obj.items() if v is not None} + elif isinstance(obj, list): + return [o for o in obj if o is not None] + else: + return obj + + +# FIXME: this shouldn't be necessary in the future +param_validation = re.compile( + r"Invalid type for parameter (?P[\w.]+), value: (?P\w+), type: \w+)'>, valid types: \w+)'>" +) + + +def get_nested(obj: dict, path: str): + parts = path.split(".") + result = obj + for p in parts[:-1]: + result = result.get(p, {}) + return result.get(parts[-1]) + + +def set_nested(obj: dict, path: str, value): + parts = path.split(".") + result = obj + for p in parts[:-1]: + result = result.get(p, {}) + result[parts[-1]] = value + + +def fix_boto_parameters_based_on_report(original_params: dict, report: str) -> dict: + """ + Fix invalid type parameter validation errors in boto request parameters + + :param original_params: original boto request parameters that lead to the parameter validation error + :param report: error report from botocore ParamValidator + :return: a copy of original_params with all values replaced by their correctly cast ones + """ + params = deepcopy(original_params) + for found in param_validation.findall(report): + param_name, value, wrong_class, valid_class = found + cast_class = getattr(builtins, valid_class) + old_value = get_nested(params, param_name) + + if cast_class == bool and str(old_value).lower() in ["true", "false"]: + new_value = str(old_value).lower() == "true" + else: + new_value = cast_class(old_value) + set_nested(params, param_name, new_value) + return params + + +def convert_request_kwargs(parameters: dict, input_shape: StructureShape) -> dict: + """ + Transform a dict of request kwargs for a boto3 request by making sure the keys in the structure recursively conform to the specified input shape. + :param parameters: the kwargs that would be passed to the boto3 client call, e.g. boto3.client("s3").create_bucket(**parameters) + :param input_shape: The botocore input shape of the operation that you want to call later with the fixed inputs + :return: a transformed dictionary with the correct casing recursively applied + """ + + def get_fixed_key(key: str, members: dict[str, Shape]) -> str: + """return the case-insensitively matched key from the shape or default to the current key""" + for k in members: + if k.lower() == key.lower(): + return k + return key + + def transform_value(value, member_shape): + if isinstance(value, dict) and hasattr(member_shape, "members"): + return convert_request_kwargs(value, member_shape) + elif isinstance(value, list) and hasattr(member_shape, "member"): + return [transform_value(item, member_shape.member) for item in value] + + # fix the typing of the value + match member_shape.type_name: + case "string": + return str(value) + case "integer" | "long": + return int(value) + case "boolean": + if isinstance(value, bool): + return value + return True if value.lower() == "true" else False + case _: + return value + + transformed_dict = {} + for key, value in parameters.items(): + correct_key = get_fixed_key(key, input_shape.members) + member_shape = input_shape.members.get(correct_key) + + if member_shape is None: + continue # skipping this entry, so it's not included in the transformed dict + elif isinstance(value, dict) and hasattr(member_shape, "members"): + transformed_dict[correct_key] = convert_request_kwargs(value, member_shape) + elif isinstance(value, list) and hasattr(member_shape, "member"): + transformed_dict[correct_key] = [ + transform_value(item, member_shape.member) for item in value + ] + else: + transformed_dict[correct_key] = transform_value(value, member_shape) + + return transformed_dict + + +def convert_values_to_numbers(input_dict: dict, keys_to_skip: Optional[List[str]] = None): + """ + Recursively converts all string values that represent valid integers + in a dictionary (including nested dictionaries and lists) to integers. + + Example: + original_dict = {'Gid': '1322', 'SecondaryGids': ['1344', '1452'], 'Uid': '13234'} + output_dict = {'Gid': 1322, 'SecondaryGids': [1344, 1452], 'Uid': 13234} + + :param input_dict input dict with values to convert + :param keys_to_skip keys to which values are not meant to be converted + :return output_dict + """ + + keys_to_skip = keys_to_skip or [] + + def recursive_convert(obj): + if isinstance(obj, dict): + return { + key: recursive_convert(value) if key not in keys_to_skip else value + for key, value in obj.items() + } + elif isinstance(obj, list): + return [recursive_convert(item) for item in obj] + elif isinstance(obj, str) and obj.isdigit(): + return int(obj) + else: + return obj + + return recursive_convert(input_dict) + + +# LocalStack specific utilities +def get_schema_path(file_path: Path) -> dict: + file_name_base = file_path.name.removesuffix(".py").removesuffix(".py.enc") + with Path(file_path).parent.joinpath(f"{file_name_base}.schema.json").open() as fd: + return json.load(fd) diff --git a/localstack-core/localstack/services/cloudformation/resource_provider.py b/localstack-core/localstack/services/cloudformation/resource_provider.py new file mode 100644 index 0000000000000..7e48ed8ca5703 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_provider.py @@ -0,0 +1,644 @@ +from __future__ import annotations + +import copy +import logging +import re +import time +import uuid +from dataclasses import dataclass, field +from enum import Enum, auto +from logging import Logger +from math import ceil +from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Type, TypedDict, TypeVar + +import botocore +from botocore.client import BaseClient +from botocore.exceptions import ClientError +from botocore.model import OperationModel +from plux import Plugin, PluginManager + +from localstack import config +from localstack.aws.connect import InternalClientFactory, ServiceLevelClientFactory +from localstack.services.cloudformation import usage +from localstack.services.cloudformation.deployment_utils import ( + check_not_found_exception, + convert_data_types, + fix_account_id_in_arns, + fix_boto_parameters_based_on_report, + log_not_available_message, + remove_none_values, +) +from localstack.services.cloudformation.engine.quirks import PHYSICAL_RESOURCE_ID_SPECIAL_CASES +from localstack.services.cloudformation.provider_utils import convert_request_kwargs +from localstack.services.cloudformation.service_models import KEY_RESOURCE_STATE + +PRO_RESOURCE_PROVIDERS = False +try: + from localstack.pro.core.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPluginExt, + ) + + PRO_RESOURCE_PROVIDERS = True +except ImportError: + pass + +if TYPE_CHECKING: + from localstack.services.cloudformation.engine.types import ( + FuncDetails, + FuncDetailsValue, + ResourceDefinition, + ) + +LOG = logging.getLogger(__name__) + +Properties = TypeVar("Properties") + +PUBLIC_REGISTRY: dict[str, Type[ResourceProvider]] = {} + +PROVIDER_DEFAULTS = {} # TODO: remove this after removing patching in -ext + + +class OperationStatus(Enum): + PENDING = auto() + IN_PROGRESS = auto() + SUCCESS = auto() + FAILED = auto() + + +@dataclass +class ProgressEvent(Generic[Properties]): + status: OperationStatus + resource_model: Optional[Properties] = None + resource_models: Optional[list[Properties]] = None + + message: str = "" + result: Optional[str] = None + error_code: Optional[str] = None # TODO: enum + custom_context: dict = field(default_factory=dict) + + +class Credentials(TypedDict): + accessKeyId: str + secretAccessKey: str + sessionToken: str + + +class ResourceProviderPayloadRequestData(TypedDict): + logicalResourceId: str + resourceProperties: Properties + previousResourceProperties: Optional[Properties] + callerCredentials: Credentials + providerCredentials: Credentials + systemTags: dict[str, str] + previousSystemTags: dict[str, str] + stackTags: dict[str, str] + previousStackTags: dict[str, str] + + +class ResourceProviderPayload(TypedDict): + callbackContext: dict + stackId: str + requestData: ResourceProviderPayloadRequestData + resourceType: str + resourceTypeVersion: str + awsAccountId: str + bearerToken: str + region: str + action: str + + +ResourceProperties = TypeVar("ResourceProperties") + + +def _handler_provide_client_params(event_name: str, params: dict, model: OperationModel, **kwargs): + """ + A botocore hook handler that will try to convert the passed parameters according to the given operation model + """ + return convert_request_kwargs(params, model.input_shape) + + +class ConvertingInternalClientFactory(InternalClientFactory): + def _get_client_post_hook(self, client: BaseClient) -> BaseClient: + """ + Register handlers that modify the passed properties to make them compatible with the API structure + """ + + client.meta.events.register( + "provide-client-params.*.*", handler=_handler_provide_client_params + ) + + return super()._get_client_post_hook(client) + + +_cfn_resource_client_factory = ConvertingInternalClientFactory(use_ssl=config.DISTRIBUTED_MODE) + + +def convert_payload( + stack_name: str, stack_id: str, payload: ResourceProviderPayload +) -> ResourceRequest[Properties]: + client_factory = _cfn_resource_client_factory( + aws_access_key_id=payload["requestData"]["callerCredentials"]["accessKeyId"], + aws_session_token=payload["requestData"]["callerCredentials"]["sessionToken"], + aws_secret_access_key=payload["requestData"]["callerCredentials"]["secretAccessKey"], + region_name=payload["region"], + ) + desired_state = payload["requestData"]["resourceProperties"] + rr = ResourceRequest( + _original_payload=desired_state, + aws_client_factory=client_factory, + request_token=str(uuid.uuid4()), # TODO: not actually a UUID + stack_name=stack_name, + stack_id=stack_id, + account_id=payload["awsAccountId"], + region_name=payload["region"], + desired_state=desired_state, + logical_resource_id=payload["requestData"]["logicalResourceId"], + resource_type=payload["resourceType"], + logger=logging.getLogger("abc"), + custom_context=payload["callbackContext"], + action=payload["action"], + ) + + if previous_properties := payload["requestData"].get("previousResourceProperties"): + rr.previous_state = previous_properties + + return rr + + +@dataclass +class ResourceRequest(Generic[Properties]): + _original_payload: Properties + + aws_client_factory: ServiceLevelClientFactory + request_token: str + stack_name: str + stack_id: str + account_id: str + region_name: str + action: str + + desired_state: Properties + + logical_resource_id: str + resource_type: str + + logger: Logger + + custom_context: dict = field(default_factory=dict) + + previous_state: Optional[Properties] = None + previous_tags: Optional[dict[str, str]] = None + tags: dict[str, str] = field(default_factory=dict) + + +class CloudFormationResourceProviderPlugin(Plugin): + """ + Base class for resource provider plugins. + """ + + namespace = "localstack.cloudformation.resource_providers" + + +class ResourceProvider(Generic[Properties]): + """ + This provides a base class onto which service-specific resource providers are built. + """ + + SCHEMA: dict + + def create(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + raise NotImplementedError + + def update(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + raise NotImplementedError + + def delete(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + raise NotImplementedError + + def read(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + raise NotImplementedError + + def list(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + raise NotImplementedError + + +# legacy helpers +def get_resource_type(resource: dict) -> str: + """this is currently overwritten in PRO to add support for custom resources""" + if isinstance(resource, str): + raise ValueError(f"Invalid argument: {resource}") + try: + resource_type: str = resource["Type"] + + if resource_type.startswith("Custom::"): + return "AWS::CloudFormation::CustomResource" + return resource_type + except Exception: + LOG.warning( + "Failed to retrieve resource type %s", + resource.get("Type"), + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + +def invoke_function( + account_id: str, + region_name: str, + function: Callable, + params: dict, + resource_type: str, + func_details: FuncDetails, + action_name: str, + resource: Any, +) -> Any: + try: + LOG.debug( + 'Request for resource type "%s" in account %s region %s: %s %s', + resource_type, + account_id, + region_name, + func_details["function"], + params, + ) + try: + result = function(**params) + except botocore.exceptions.ParamValidationError as e: + # alternatively we could also use the ParamValidator directly + report = e.kwargs.get("report") + if not report: + raise + + LOG.debug("Converting parameters to allowed types") + LOG.debug("Report: %s", report) + converted_params = fix_boto_parameters_based_on_report(params, report) + LOG.debug("Original parameters: %s", params) + LOG.debug("Converted parameters: %s", converted_params) + + result = function(**converted_params) + except Exception as e: + if action_name == "Remove" and check_not_found_exception(e, resource_type, resource): + return + log_method = LOG.warning + if config.CFN_VERBOSE_ERRORS: + log_method = LOG.exception + log_method("Error calling %s with params: %s for resource: %s", function, params, resource) + raise e + + return result + + +def get_service_name(resource): + res_type = resource["Type"] + parts = res_type.split("::") + if len(parts) == 1: + return None + if "Cognito::IdentityPool" in res_type: + return "cognito-identity" + if res_type.endswith("Cognito::UserPool"): + return "cognito-idp" + if parts[-2] == "Cognito": + return "cognito-idp" + if parts[-2] == "Elasticsearch": + return "es" + if parts[-2] == "OpenSearchService": + return "opensearch" + if parts[-2] == "KinesisFirehose": + return "firehose" + if parts[-2] == "ResourceGroups": + return "resource-groups" + if parts[-2] == "CertificateManager": + return "acm" + if "ElasticLoadBalancing::" in res_type: + return "elb" + if "ElasticLoadBalancingV2::" in res_type: + return "elbv2" + if "ApplicationAutoScaling::" in res_type: + return "application-autoscaling" + if "MSK::" in res_type: + return "kafka" + if "Timestream::" in res_type: + return "timestream-write" + return parts[1].lower() + + +def resolve_resource_parameters( + account_id_: str, + region_name_: str, + stack_name: str, + resource_definition: ResourceDefinition, + resources: dict[str, ResourceDefinition], + resource_id: str, + func_details: FuncDetailsValue, +) -> dict | None: + params = func_details.get("parameters") or ( + lambda account_id, region_name, properties, logical_resource_id, *args, **kwargs: properties + ) + resource_props = resource_definition["Properties"] = resource_definition.get("Properties", {}) + resource_props = dict(resource_props) + resource_state = resource_definition.get(KEY_RESOURCE_STATE, {}) + last_deployed_state = resource_definition.get("_last_deployed_state", {}) + + if callable(params): + # resolve parameter map via custom function + params = params( + account_id_, region_name_, resource_props, resource_id, resource_definition, stack_name + ) + else: + # it could be a list like ['param1', 'param2', {'apiCallParamName': 'cfResourcePropName'}] + if isinstance(params, list): + _params = {} + for param in params: + if isinstance(param, dict): + _params.update(param) + else: + _params[param] = param + params = _params + + params = dict(params) + # TODO(srw): mutably mapping params :( + for param_key, prop_keys in dict(params).items(): + params.pop(param_key, None) + if not isinstance(prop_keys, list): + prop_keys = [prop_keys] + for prop_key in prop_keys: + if callable(prop_key): + prop_value = prop_key( + account_id_, + region_name_, + resource_props, + resource_id, + resource_definition, + stack_name, + ) + else: + prop_value = resource_props.get( + prop_key, + resource_definition.get( + prop_key, + resource_state.get(prop_key, last_deployed_state.get(prop_key)), + ), + ) + if prop_value is not None: + params[param_key] = prop_value + break + + # this is an indicator that we should skip this resource deployment, and return + if params is None: + return + + # FIXME: move this to a single place after template processing is finished + # convert any moto account IDs (123456789012) in ARNs to our format (000000000000) + params = fix_account_id_in_arns(params, account_id_) + # convert data types (e.g., boolean strings to bool) + # TODO: this might not be needed anymore + params = convert_data_types(func_details.get("types", {}), params) + # remove None values, as they usually raise boto3 errors + params = remove_none_values(params) + + return params + + +class NoResourceProvider(Exception): + pass + + +def resolve_json_pointer(resource_props: Properties, primary_id_path: str) -> str: + primary_id_path = primary_id_path.replace("/properties", "") + parts = [p for p in primary_id_path.split("/") if p] + + resolved_part = resource_props.copy() + for i in range(len(parts)): + part = parts[i] + resolved_part = resolved_part.get(part) + if i == len(parts) - 1: + # last part + return resolved_part + + raise Exception(f"Resource properties is missing field: {part}") + + +class ResourceProviderExecutor: + """ + Point of abstraction between our integration with generic base models, and the new providers. + """ + + def __init__( + self, + *, + stack_name: str, + stack_id: str, + ): + self.stack_name = stack_name + self.stack_id = stack_id + + def deploy_loop( + self, + resource_provider: ResourceProvider, + resource: dict, + raw_payload: ResourceProviderPayload, + max_timeout: int = config.CFN_PER_RESOURCE_TIMEOUT, + sleep_time: float = 5, + ) -> ProgressEvent[Properties]: + payload = copy.deepcopy(raw_payload) + + max_iterations = max(ceil(max_timeout / sleep_time), 2) + + for current_iteration in range(max_iterations): + resource_type = get_resource_type({"Type": raw_payload["resourceType"]}) + resource["SpecifiedProperties"] = raw_payload["requestData"]["resourceProperties"] + + try: + event = self.execute_action(resource_provider, payload) + except ClientError: + LOG.error( + "client error invoking '%s' handler for resource '%s' (type '%s')", + raw_payload["action"], + raw_payload["requestData"]["logicalResourceId"], + resource_type, + ) + raise + + match event.status: + case OperationStatus.FAILED: + return event + case OperationStatus.SUCCESS: + if not hasattr(resource_provider, "SCHEMA"): + raise Exception( + "A ResourceProvider should always have a SCHEMA property defined." + ) + resource_type_schema = resource_provider.SCHEMA + physical_resource_id = self.extract_physical_resource_id_from_model_with_schema( + event.resource_model, + raw_payload["resourceType"], + resource_type_schema, + ) + + resource["PhysicalResourceId"] = physical_resource_id + resource["Properties"] = event.resource_model + resource["_last_deployed_state"] = copy.deepcopy(event.resource_model) + return event + case OperationStatus.IN_PROGRESS: + # update the shared state + context = {**payload["callbackContext"], **event.custom_context} + payload["callbackContext"] = context + payload["requestData"]["resourceProperties"] = event.resource_model + resource["Properties"] = event.resource_model + + if current_iteration == 0: + time.sleep(0) + else: + time.sleep(sleep_time) + case OperationStatus.PENDING: + # come back to this resource in another iteration + return event + case invalid_status: + raise ValueError( + f"Invalid OperationStatus ({invalid_status}) returned for resource {raw_payload['requestData']['logicalResourceId']} (type {raw_payload['resourceType']})" + ) + + else: + raise TimeoutError( + f"Resource deployment for resource {raw_payload['requestData']['logicalResourceId']} (type {raw_payload['resourceType']}) timed out." + ) + + def execute_action( + self, resource_provider: ResourceProvider, raw_payload: ResourceProviderPayload + ) -> ProgressEvent[Properties]: + change_type = raw_payload["action"] + request = convert_payload( + stack_name=self.stack_name, stack_id=self.stack_id, payload=raw_payload + ) + + match change_type: + case "Add": + return resource_provider.create(request) + case "Dynamic" | "Modify": + try: + return resource_provider.update(request) + except NotImplementedError: + LOG.warning( + 'Unable to update resource type "%s", id "%s"', + request.resource_type, + request.logical_resource_id, + ) + if request.previous_state is None: + # this is an issue with our update detection. We should never be in this state. + request.action = "Add" + return resource_provider.create(request) + + return ProgressEvent( + status=OperationStatus.SUCCESS, resource_model=request.previous_state + ) + except Exception as e: + # FIXME: this fallback should be removed after fixing updates in general (order/dependenies) + # catch-all for any exception that looks like a not found exception + if check_not_found_exception(e, request.resource_type, request.desired_state): + return ProgressEvent( + status=OperationStatus.SUCCESS, resource_model=request.previous_state + ) + + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model={}, + message=f"Failed to delete resource with id {request.logical_resource_id} of type {request.resource_type}", + ) + case "Remove": + try: + return resource_provider.delete(request) + except Exception as e: + # catch-all for any exception that looks like a not found exception + if check_not_found_exception(e, request.resource_type, request.desired_state): + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model={}, + message=f"Failed to delete resource with id {request.logical_resource_id} of type {request.resource_type}", + ) + case _: + raise NotImplementedError(change_type) # TODO: change error type + + @staticmethod + def try_load_resource_provider(resource_type: str) -> ResourceProvider | None: + # TODO: unify namespace of plugins + + # 1. try to load pro resource provider + # prioritise pro resource providers + if PRO_RESOURCE_PROVIDERS: + try: + plugin = pro_plugin_manager.load(resource_type) + return plugin.factory() + except ValueError: + # could not find a plugin for that name + pass + except Exception: + LOG.warning( + "Failed to load PRO resource type %s as a ResourceProvider.", + resource_type, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + # 2. try to load community resource provider + try: + plugin = plugin_manager.load(resource_type) + usage.resources.labels(resource_type=resource_type, missing=False).increment() + return plugin.factory() + except ValueError: + # could not find a plugin for that name + pass + except Exception: + if config.CFN_VERBOSE_ERRORS: + LOG.warning( + "Failed to load community resource type %s as a ResourceProvider.", + resource_type, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + # 3. we could not find the resource provider so log the missing resource provider + log_not_available_message( + resource_type, + f'No resource provider found for "{resource_type}"', + ) + + usage.resources.labels(resource_type=resource_type, missing=True).increment() + + if config.CFN_IGNORE_UNSUPPORTED_RESOURCE_TYPES: + # TODO: figure out a better way to handle non-implemented here? + return None + else: + raise NoResourceProvider + + def extract_physical_resource_id_from_model_with_schema( + self, resource_model: Properties, resource_type: str, resource_type_schema: dict + ) -> str: + if resource_type in PHYSICAL_RESOURCE_ID_SPECIAL_CASES: + primary_id_path = PHYSICAL_RESOURCE_ID_SPECIAL_CASES[resource_type] + + if "<" in primary_id_path: + # composite quirk, e.g. something like MyRef|MyName + # try to extract parts + physical_resource_id = primary_id_path + find_results = re.findall("<([^>]+)>", primary_id_path) + for found_part in find_results: + resolved_part = resolve_json_pointer(resource_model, found_part) + physical_resource_id = physical_resource_id.replace( + f"<{found_part}>", resolved_part + ) + else: + physical_resource_id = resolve_json_pointer(resource_model, primary_id_path) + else: + primary_id_paths = resource_type_schema["primaryIdentifier"] + if len(primary_id_paths) > 1: + # TODO: auto-merge. Verify logic here with AWS + physical_resource_id = "-".join( + [resolve_json_pointer(resource_model, pip) for pip in primary_id_paths] + ) + else: + physical_resource_id = resolve_json_pointer(resource_model, primary_id_paths[0]) + + return physical_resource_id + + +plugin_manager = PluginManager(CloudFormationResourceProviderPlugin.namespace) +if PRO_RESOURCE_PROVIDERS: + pro_plugin_manager = PluginManager(CloudFormationResourceProviderPluginExt.namespace) diff --git a/localstack/services/resourcegroups/__init__.py b/localstack-core/localstack/services/cloudformation/resource_providers/__init__.py similarity index 100% rename from localstack/services/resourcegroups/__init__.py rename to localstack-core/localstack/services/cloudformation/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.py new file mode 100644 index 0000000000000..8f17b3d36368e --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.py @@ -0,0 +1,102 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.services.cloudformation.stores import get_cloudformation_store + + +class CloudFormationMacroProperties(TypedDict): + FunctionName: Optional[str] + Name: Optional[str] + Description: Optional[str] + Id: Optional[str] + LogGroupName: Optional[str] + LogRoleARN: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudFormationMacroProvider(ResourceProvider[CloudFormationMacroProperties]): + TYPE = "AWS::CloudFormation::Macro" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudFormationMacroProperties], + ) -> ProgressEvent[CloudFormationMacroProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + - Name + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + + # TODO: fix or validate that we want to keep this here. + # AWS::CloudFormation:: resources need special handling since they seem to require access to internal APIs + store = get_cloudformation_store(request.account_id, request.region_name) + store.macros[model["Name"]] = model + model["Id"] = model["Name"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[CloudFormationMacroProperties], + ) -> ProgressEvent[CloudFormationMacroProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudFormationMacroProperties], + ) -> ProgressEvent[CloudFormationMacroProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + + store = get_cloudformation_store(request.account_id, request.region_name) + store.macros.pop(model["Name"], None) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def update( + self, + request: ResourceRequest[CloudFormationMacroProperties], + ) -> ProgressEvent[CloudFormationMacroProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.schema.json b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.schema.json new file mode 100644 index 0000000000000..a04056992eb09 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro.schema.json @@ -0,0 +1,38 @@ +{ + "typeName": "AWS::CloudFormation::Macro", + "description": "Resource Type definition for AWS::CloudFormation::Macro", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "FunctionName": { + "type": "string" + }, + "LogGroupName": { + "type": "string" + }, + "LogRoleARN": { + "type": "string" + }, + "Name": { + "type": "string" + } + }, + "required": [ + "FunctionName", + "Name" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro_plugin.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro_plugin.py new file mode 100644 index 0000000000000..9c6572792fc21 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_macro_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudFormationMacroProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudFormation::Macro" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudformation.resource_providers.aws_cloudformation_macro import ( + CloudFormationMacroProvider, + ) + + self.factory = CloudFormationMacroProvider diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.py new file mode 100644 index 0000000000000..b30c629682cc6 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.py @@ -0,0 +1,220 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CloudFormationStackProperties(TypedDict): + TemplateURL: Optional[str] + Id: Optional[str] + NotificationARNs: Optional[list[str]] + Parameters: Optional[dict] + Tags: Optional[list[Tag]] + TimeoutInMinutes: Optional[int] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudFormationStackProvider(ResourceProvider[CloudFormationStackProperties]): + TYPE = "AWS::CloudFormation::Stack" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudFormationStackProperties], + ) -> ProgressEvent[CloudFormationStackProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - TemplateURL + + + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + + # TODO: validations + + if not request.custom_context.get(REPEATED_INVOCATION): + if not model.get("StackName"): + model["StackName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + create_params = util.select_attributes( + model, + [ + "StackName", + "Parameters", + "NotificationARNs", + "TemplateURL", + "TimeoutInMinutes", + "Tags", + ], + ) + + create_params["Capabilities"] = [ + "CAPABILITY_IAM", + "CAPABILITY_NAMED_IAM", + "CAPABILITY_AUTO_EXPAND", + ] + + create_params["Parameters"] = [ + { + "ParameterKey": k, + "ParameterValue": str(v).lower() if isinstance(v, bool) else str(v), + } + for k, v in create_params.get("Parameters", {}).items() + ] + + result = request.aws_client_factory.cloudformation.create_stack(**create_params) + model["Id"] = result["StackId"] + + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + stack = request.aws_client_factory.cloudformation.describe_stacks(StackName=model["Id"])[ + "Stacks" + ][0] + match stack["StackStatus"]: + case "CREATE_COMPLETE": + # only store nested stack outputs when we know the deploy has completed + model["Outputs"] = { + o["OutputKey"]: o["OutputValue"] for o in stack.get("Outputs", []) + } + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + case "CREATE_IN_PROGRESS": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + case "CREATE_FAILED": + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + ) + case _: + raise Exception(f"Unexpected status: {stack['StackStatus']}") + + def read( + self, + request: ResourceRequest[CloudFormationStackProperties], + ) -> ProgressEvent[CloudFormationStackProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudFormationStackProperties], + ) -> ProgressEvent[CloudFormationStackProperties]: + """ + Delete a resource + """ + + model = request.desired_state + if not request.custom_context.get(REPEATED_INVOCATION): + request.aws_client_factory.cloudformation.delete_stack(StackName=model["Id"]) + + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + try: + stack = request.aws_client_factory.cloudformation.describe_stacks( + StackName=model["Id"] + )["Stacks"][0] + except Exception as e: + if "does not exist" in str(e): + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + raise e + + match stack["StackStatus"]: + case "DELETE_COMPLETE": + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + case "DELETE_IN_PROGRESS": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + case "DELETE_FAILED": + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + ) + case _: + raise Exception(f"Unexpected status: {stack['StackStatus']}") + + def update( + self, + request: ResourceRequest[CloudFormationStackProperties], + ) -> ProgressEvent[CloudFormationStackProperties]: + """ + Update a resource + + + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[CloudFormationStackProperties], + ) -> ProgressEvent[CloudFormationStackProperties]: + resources = request.aws_client_factory.cloudformation.describe_stacks() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + CloudFormationStackProperties(Id=resource["StackId"]) + for resource in resources["Stacks"] + ], + ) diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.schema.json b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.schema.json new file mode 100644 index 0000000000000..a26835e77ba10 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack.schema.json @@ -0,0 +1,65 @@ +{ + "typeName": "AWS::CloudFormation::Stack", + "description": "Resource Type definition for AWS::CloudFormation::Stack", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "NotificationARNs": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Parameters": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "TemplateURL": { + "type": "string" + }, + "TimeoutInMinutes": { + "type": "integer" + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "TemplateURL" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack_plugin.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack_plugin.py new file mode 100644 index 0000000000000..9dc020a564aa4 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_stack_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudFormationStackProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudFormation::Stack" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudformation.resource_providers.aws_cloudformation_stack import ( + CloudFormationStackProvider, + ) + + self.factory = CloudFormationStackProvider diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.py new file mode 100644 index 0000000000000..051c901e425d9 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.py @@ -0,0 +1,83 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import uuid +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CloudFormationWaitConditionProperties(TypedDict): + Count: Optional[int] + Data: Optional[dict] + Handle: Optional[str] + Id: Optional[str] + Timeout: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudFormationWaitConditionProvider(ResourceProvider[CloudFormationWaitConditionProperties]): + TYPE = "AWS::CloudFormation::WaitCondition" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudFormationWaitConditionProperties], + ) -> ProgressEvent[CloudFormationWaitConditionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Read-only properties: + - /properties/Data + - /properties/Id + + """ + model = request.desired_state + model["Data"] = {} # TODO + model["Id"] = f"{request.stack_id}/{uuid.uuid4()}/{request.logical_resource_id}" + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[CloudFormationWaitConditionProperties], + ) -> ProgressEvent[CloudFormationWaitConditionProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudFormationWaitConditionProperties], + ) -> ProgressEvent[CloudFormationWaitConditionProperties]: + """ + Delete a resource + + + """ + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) # NO-OP + + def update( + self, + request: ResourceRequest[CloudFormationWaitConditionProperties], + ) -> ProgressEvent[CloudFormationWaitConditionProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.schema.json b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.schema.json new file mode 100644 index 0000000000000..232d5c012e745 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition.schema.json @@ -0,0 +1,29 @@ +{ + "typeName": "AWS::CloudFormation::WaitCondition", + "description": "Resource Type definition for AWS::CloudFormation::WaitCondition", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Data": { + "type": "object" + }, + "Count": { + "type": "integer" + }, + "Handle": { + "type": "string" + }, + "Timeout": { + "type": "string" + } + }, + "readOnlyProperties": [ + "/properties/Data", + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition_plugin.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition_plugin.py new file mode 100644 index 0000000000000..bdc8b49fd2e6d --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitcondition_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudFormationWaitConditionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudFormation::WaitCondition" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudformation.resource_providers.aws_cloudformation_waitcondition import ( + CloudFormationWaitConditionProvider, + ) + + self.factory = CloudFormationWaitConditionProvider diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.py new file mode 100644 index 0000000000000..f2b5237876fe0 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.py @@ -0,0 +1,94 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CloudFormationWaitConditionHandleProperties(TypedDict): + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudFormationWaitConditionHandleProvider( + ResourceProvider[CloudFormationWaitConditionHandleProperties] +): + TYPE = "AWS::CloudFormation::WaitConditionHandle" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudFormationWaitConditionHandleProperties], + ) -> ProgressEvent[CloudFormationWaitConditionHandleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + + + Read-only properties: + - /properties/Id + + + + """ + # TODO: properly test this and fix s3 bucket usage + model = request.desired_state + + s3 = request.aws_client_factory.s3 + region = s3.meta.region_name + + bucket = f"cloudformation-waitcondition-{region}" + waitcondition_url = s3.generate_presigned_url( + "put_object", Params={"Bucket": bucket, "Key": request.stack_id} + ) + model["Id"] = waitcondition_url + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[CloudFormationWaitConditionHandleProperties], + ) -> ProgressEvent[CloudFormationWaitConditionHandleProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudFormationWaitConditionHandleProperties], + ) -> ProgressEvent[CloudFormationWaitConditionHandleProperties]: + """ + Delete a resource + + + """ + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[CloudFormationWaitConditionHandleProperties], + ) -> ProgressEvent[CloudFormationWaitConditionHandleProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.schema.json b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.schema.json new file mode 100644 index 0000000000000..34c317b900bf4 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle.schema.json @@ -0,0 +1,16 @@ +{ + "typeName": "AWS::CloudFormation::WaitConditionHandle", + "description": "Resource Type definition for AWS::CloudFormation::WaitConditionHandle", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + } + }, + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle_plugin.py b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle_plugin.py new file mode 100644 index 0000000000000..f5888171517ab --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/resource_providers/aws_cloudformation_waitconditionhandle_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudFormationWaitConditionHandleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudFormation::WaitConditionHandle" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudformation.resource_providers.aws_cloudformation_waitconditionhandle import ( + CloudFormationWaitConditionHandleProvider, + ) + + self.factory = CloudFormationWaitConditionHandleProvider diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/CloudformationSchema.zip b/localstack-core/localstack/services/cloudformation/scaffolding/CloudformationSchema.zip new file mode 100644 index 0000000000000..f9c8e2f6dbf4d Binary files /dev/null and b/localstack-core/localstack/services/cloudformation/scaffolding/CloudformationSchema.zip differ diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/__main__.py b/localstack-core/localstack/services/cloudformation/scaffolding/__main__.py new file mode 100644 index 0000000000000..d6eb97f8dbbf1 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/__main__.py @@ -0,0 +1,824 @@ +from __future__ import annotations + +import json +import os +import zipfile +from dataclasses import dataclass +from enum import Enum, auto +from functools import reduce +from pathlib import Path +from typing import Any, Generator, Literal, Optional, TypedDict, TypeVar + +import click +from jinja2 import Environment, FileSystemLoader +from yaml import safe_dump + +from .propgen import generate_ir_for_type + +try: + from rich.console import Console + from rich.syntax import Syntax +except ImportError: + + class Console: + def print(self, text: str): + print("# " + text.replace("[underline]", "").replace("[/underline]", "")) + + def Syntax(text: str, *args, **kwargs) -> str: + return text + + +# increase when any major changes are done to the scaffolding, +# so that we can reason better about previously scaffolded resources in the future +SCAFFOLDING_VERSION = 2 + +# Some services require their names to be re-written as we know them by different names +SERVICE_NAME_MAP = { + "OpenSearchService": "OpenSearch", + "Lambda": "lambda_", +} + + +class Property(TypedDict): + type: Optional[Literal["str"]] + items: Optional[dict] + + +class HandlerDefinition(TypedDict): + permissions: Optional[list[str]] + + +class HandlersDefinition(TypedDict): + create: HandlerDefinition + read: HandlerDefinition + update: HandlerDefinition + delete: HandlerDefinition + list: HandlerDefinition + + +class ResourceSchema(TypedDict): + typeName: str + description: Optional[str] + required: Optional[list[str]] + properties: dict[str, Property] + handlers: HandlersDefinition + + +def resolve_ref(schema: ResourceSchema, target: str) -> dict: + """ + Given a schema {"a": {"b": "c"}} and the ref "#/a/b" return "c" + """ + target_path = filter(None, (elem.strip() for elem in target.lstrip("#").split("/"))) + + T = TypeVar("T") + + def lookup(d: dict[str, T], key: str) -> dict | T: + return d[key] + + return reduce(lookup, target_path, schema) + + +@dataclass +class ResourceName: + full_name: str + namespace: str + service: str + resource: str + python_compatible_service_name: str + + def provider_name(self) -> str: + return f"{self.service}{self.resource}" + + def schema_filename(self) -> str: + return f"{self.namespace.lower()}-{self.service.lower()}-{self.resource.lower()}.json" + + def path_compatible_full_name(self) -> str: + return f"{self.namespace.lower()}_{self.service.lower()}_{self.resource.lower()}" + + @classmethod + def from_name(cls, name: str) -> ResourceName: + parts = name.split("::") + if len(parts) != 3 or parts[0] != "AWS": + raise ValueError(f"Invalid CloudFormation resource name {name}") + + raw_service_name = parts[1].strip() + renamed_service = SERVICE_NAME_MAP.get(raw_service_name, raw_service_name) + + return ResourceName( + full_name=name, + namespace=parts[0], + service=raw_service_name, + python_compatible_service_name=renamed_service, + resource=parts[2].strip(), + ) + + +def get_formatted_template_output( + env: Environment, template_name: str, *render_args, **render_kwargs +) -> str: + template = env.get_template(template_name) + return template.render(*render_args, **render_kwargs) + + +class SchemaProvider: + def __init__(self, zipfile_path: Path): + self.schemas = {} + with zipfile.ZipFile(zipfile_path) as infile: + for filename in infile.namelist(): + with infile.open(filename) as schema_file: + schema = json.load(schema_file) + typename = schema["typeName"] + self.schemas[typename] = schema + + def schema(self, resource_name: ResourceName) -> ResourceSchema: + try: + return self.schemas[resource_name.full_name] + except KeyError as e: + raise click.ClickException( + f"Could not find schema for CloudFormation resource type: {resource_name.full_name}" + ) from e + + +LOCALSTACK_ROOT_DIR = Path(__file__).parent.joinpath("../../../../..").resolve() +LOCALSTACK_PRO_ROOT_DIR = LOCALSTACK_ROOT_DIR.joinpath("../localstack-ext").resolve() +TESTS_ROOT_DIR = LOCALSTACK_ROOT_DIR.joinpath( + "tests/aws/services/cloudformation/resource_providers" +) +TESTS_PRO_ROOT_DIR = LOCALSTACK_PRO_ROOT_DIR.joinpath( + "localstack-pro-core/tests/aws/services/cloudformation/resource_providers" +) + +assert LOCALSTACK_ROOT_DIR.is_dir(), f"{LOCALSTACK_ROOT_DIR} does not exist" +assert LOCALSTACK_PRO_ROOT_DIR.is_dir(), f"{LOCALSTACK_PRO_ROOT_DIR} does not exist" +assert TESTS_ROOT_DIR.is_dir(), f"{TESTS_ROOT_DIR} does not exist" +assert TESTS_PRO_ROOT_DIR.is_dir(), f"{TESTS_PRO_ROOT_DIR} does not exist" + + +def root_dir(pro: bool = False) -> Path: + if pro: + return LOCALSTACK_PRO_ROOT_DIR + else: + return LOCALSTACK_ROOT_DIR + + +def tests_root_dir(pro: bool = False) -> Path: + if pro: + return TESTS_PRO_ROOT_DIR + else: + return TESTS_ROOT_DIR + + +def template_path( + resource_name: ResourceName, + file_type: FileType, + root: Optional[Path] = None, + pro: bool = False, +) -> Path: + """ + Given a resource name and file type, return the path of the template relative to the template root. + """ + match file_type: + case FileType.minimal_template: + stub = "basic.yaml" + case FileType.attribute_template: + stub = "getatt_exploration.yaml" + case FileType.update_without_replacement_template: + stub = "update.yaml" + case FileType.autogenerated_template: + stub = "basic_autogenerated.yaml" + case _: + raise ValueError(f"File type {file_type} is not a template") + + output_path = ( + tests_root_dir(pro) + .joinpath( + f"{resource_name.python_compatible_service_name.lower()}/{resource_name.path_compatible_full_name()}/templates/{stub}" + ) + .resolve() + ) + + if root: + test_path = ( + root_dir(pro) + .joinpath( + f"tests/aws/cloudformation/resource_providers/{resource_name.python_compatible_service_name.lower()}/{resource_name.path_compatible_full_name()}" + ) + .resolve() + ) + + common_root = os.path.relpath(output_path, test_path) + return Path(common_root) + else: + return output_path + + +class FileType(Enum): + # service code + plugin = auto() + provider = auto() + + # test files + integration_test = auto() + getatt_test = auto() + # cloudcontrol_test = auto() + parity_test = auto() + + # templates + attribute_template = auto() + minimal_template = auto() + update_without_replacement_template = auto() + autogenerated_template = auto() + + # schema + schema = auto() + + +class TemplateRenderer: + def __init__(self, schema: ResourceSchema, environment: Environment, pro: bool = False): + self.schema = schema + self.environment = environment + self.pro = pro + + def render( + self, + file_type: FileType, + resource_name: ResourceName, + ) -> str: + # Generated outputs (template, schema) + # templates + if file_type == FileType.attribute_template: + return self.render_attribute_template(resource_name) + elif file_type == FileType.minimal_template: + return self.render_minimal_template(resource_name) + elif file_type == FileType.update_without_replacement_template: + return self.render_update_without_replacement_template(resource_name) + elif file_type == FileType.autogenerated_template: + return self.render_autogenerated_template(resource_name) + # schema + elif file_type == FileType.schema: + return json.dumps(self.schema, indent=2) + + template_mapping = { + FileType.plugin: "plugin_template.py.j2", + FileType.provider: "provider_template.py.j2", + FileType.getatt_test: "test_getatt_template.py.j2", + FileType.integration_test: "test_integration_template.py.j2", + # FileType.cloudcontrol_test: "test_cloudcontrol_template.py.j2", + FileType.parity_test: "test_parity_template.py.j2", + } + kwargs = dict( + name=resource_name.full_name, # AWS::SNS::Topic + resource=resource_name.provider_name(), # SNSTopic + scaffolding_version=f"v{SCAFFOLDING_VERSION}", + ) + # TODO: we might want to segregate each provider in its own directory + # e.g. .../resource_providers/aws_iam_role/test_X.py vs. .../resource_providers/iam/test_X.py + # add extra parameters + tests_output_path = root_dir(self.pro).joinpath( + f"tests/aws/cloudformation/resource_providers/{resource_name.python_compatible_service_name.lower()}/{resource_name.full_name.lower()}" + ) + match file_type: + case FileType.getatt_test: + kwargs["getatt_targets"] = list(self.get_getatt_targets()) + kwargs["service"] = resource_name.service.lower() + kwargs["resource"] = resource_name.resource.lower() + kwargs["template_path"] = str( + template_path(resource_name, FileType.attribute_template, tests_output_path) + ) + case FileType.provider: + property_ir = generate_ir_for_type( + [self.schema], + resource_name.full_name, + provider_prefix=resource_name.provider_name(), + ) + kwargs["provider_properties"] = property_ir + kwargs["required_properties"] = self.schema.get("required") + kwargs["create_only_properties"] = self.schema.get("createOnlyProperties") + kwargs["read_only_properties"] = self.schema.get("readOnlyProperties") + kwargs["primary_identifier"] = self.schema.get("primaryIdentifier") + kwargs["create_permissions"] = ( + self.schema.get("handlers", {}).get("create", {}).get("permissions") + ) + kwargs["delete_permissions"] = ( + self.schema.get("handlers", {}).get("delete", {}).get("permissions") + ) + kwargs["read_permissions"] = ( + self.schema.get("handlers", {}).get("read", {}).get("permissions") + ) + kwargs["update_permissions"] = ( + self.schema.get("handlers", {}).get("update", {}).get("permissions") + ) + kwargs["list_permissions"] = ( + self.schema.get("handlers", {}).get("list", {}).get("permissions") + ) + case FileType.plugin: + kwargs["service"] = resource_name.python_compatible_service_name.lower() + kwargs["lower_resource"] = resource_name.resource.lower() + kwargs["pro"] = self.pro + case FileType.integration_test: + kwargs["black_box_template_path"] = str( + template_path(resource_name, FileType.minimal_template, tests_output_path) + ) + kwargs["update_template_path"] = str( + template_path( + resource_name, + FileType.update_without_replacement_template, + tests_output_path, + ) + ) + kwargs["autogenerated_template_path"] = str( + template_path(resource_name, FileType.autogenerated_template, tests_output_path) + ) + # case FileType.cloudcontrol_test: + case FileType.parity_test: + kwargs["parity_test_filename"] = "test_parity.py" + case _: + raise NotImplementedError(f"Rendering template of type {file_type}") + + return get_formatted_template_output( + self.environment, template_mapping[file_type], **kwargs + ) + + def get_getatt_targets(self) -> Generator[str, None, None]: + for name, defn in self.schema["properties"].items(): + if "type" in defn and defn["type"] in ["string"]: + yield name + + def render_minimal_template(self, resource_name: ResourceName) -> str: + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": f"Template to exercise create and delete operations for {resource_name.full_name}", + "Resources": { + "MyResource": { + "Type": resource_name.full_name, + "Properties": {}, + }, + }, + "Outputs": { + "MyRef": { + "Value": { + "Ref": "MyResource", + }, + }, + }, + } + + return safe_dump(template, sort_keys=False) + + def render_update_without_replacement_template(self, resource_name: ResourceName) -> str: + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": f"Template to exercise updating {resource_name.full_name}", + "Parameters": { + "AttributeValue": { + "Type": "String", + "Description": "Value of property to change to force an update", + }, + }, + "Resources": { + "MyResource": { + "Type": resource_name.full_name, + "Properties": { + "SomeProperty": "!Ref AttributeValue", + }, + }, + }, + "Outputs": { + "MyRef": { + "Value": { + "Ref": "MyResource", + }, + }, + "MyOutput": { + "Value": "# TODO: the value to verify", + }, + }, + } + return safe_dump(template, sort_keys=False) + + def render_autogenerated_template(self, resource_name: ResourceName) -> str: + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": f"Template to exercise updating autogenerated properties of {resource_name.full_name}", + "Resources": { + "MyResource": { + "Type": resource_name.full_name, + }, + }, + "Outputs": { + "MyRef": { + "Value": { + "Ref": "MyResource", + }, + }, + }, + } + return safe_dump(template, sort_keys=False) + + def render_attribute_template(self, resource_name: ResourceName) -> str: + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": f"Template to exercise getting attributes of {resource_name.full_name}", + "Parameters": { + "AttributeName": { + "Type": "String", + "Description": "Name of the attribute to fetch from the resource", + }, + }, + "Resources": { + "MyResource": { + "Type": resource_name.full_name, + "Properties": {}, + }, + }, + "Outputs": self.render_outputs(), + } + + return safe_dump(template, sort_keys=False) + + def required_properties(self) -> dict[str, Property]: + return PropertyRenderer(self.schema).properties() + + def render_outputs(self) -> dict: + """ + Generate an output for each property in the schema + """ + outputs = {} + + # ref + outputs["MyRef"] = {"Value": {"Ref": "MyResource"}} + + # getatt + outputs["MyOutput"] = {"Value": {"Fn::GetAtt": ["MyResource", {"Ref": "AttributeName"}]}} + + return outputs + + +class PropertyRenderer: + def __init__(self, schema: ResourceSchema): + self.schema = schema + + def properties(self) -> dict: + required_properties = self.schema.get("required", []) + + result = {} + for name, defn in self.schema["properties"].items(): + if name not in required_properties: + continue + + value = self.render_property(defn) + result[name] = value + + return result + + def render_property(self, property: Property) -> str | dict | list: + if prop_type := property.get("type"): + if prop_type in {"string"}: + return self._render_basic(prop_type) + elif prop_type == "array": + return [self.render_property(item) for item in property["items"]] + elif oneof := property.get("oneOf"): + return self._render_one_of(oneof) + else: + raise NotImplementedError(property) + + def _render_basic(self, type: str) -> str: + return "CHANGEME" + + def _render_one_of(self, options: list[Property]) -> Any: + return self.render_property(options[0]) + + +class FileWriter: + destination_files: dict[FileType, Path] + + def __init__( + self, resource_name: ResourceName, console: Console, overwrite: bool, pro: bool = False + ): + self.resource_name = resource_name + self.console = console + self.overwrite = overwrite + self.pro = pro + + base_path = ( + ["localstack-pro-core", "localstack", "pro", "core"] + if self.pro + else ["localstack-core", "localstack"] + ) + + self.destination_files = { + FileType.provider: root_dir(self.pro).joinpath( + *base_path, + "services", + self.resource_name.python_compatible_service_name.lower(), + "resource_providers", + f"{self.resource_name.namespace.lower()}_{self.resource_name.service.lower()}_{self.resource_name.resource.lower()}.py", + ), + FileType.plugin: root_dir(self.pro).joinpath( + *base_path, + "services", + self.resource_name.python_compatible_service_name.lower(), + "resource_providers", + f"{self.resource_name.namespace.lower()}_{self.resource_name.service.lower()}_{self.resource_name.resource.lower()}_plugin.py", + ), + FileType.schema: root_dir(self.pro).joinpath( + *base_path, + "services", + self.resource_name.python_compatible_service_name.lower(), + "resource_providers", + f"aws_{self.resource_name.service.lower()}_{self.resource_name.resource.lower()}.schema.json", + ), + FileType.integration_test: tests_root_dir(self.pro).joinpath( + self.resource_name.python_compatible_service_name.lower(), + self.resource_name.path_compatible_full_name(), + "test_basic.py", + ), + FileType.getatt_test: tests_root_dir(self.pro).joinpath( + self.resource_name.python_compatible_service_name.lower(), + self.resource_name.path_compatible_full_name(), + "test_exploration.py", + ), + # FileType.cloudcontrol_test: tests_root_dir(self.pro).joinpath( + # self.resource_name.python_compatible_service_name.lower(), + # f"test_aws_{self.resource_name.service.lower()}_{self.resource_name.resource.lower()}_cloudcontrol.py", + # ), + FileType.parity_test: tests_root_dir(self.pro).joinpath( + self.resource_name.python_compatible_service_name.lower(), + self.resource_name.path_compatible_full_name(), + "test_parity.py", + ), + } + + # output files that are templates + templates = [ + FileType.attribute_template, + FileType.minimal_template, + FileType.update_without_replacement_template, + FileType.autogenerated_template, + ] + for template_type in templates: + self.destination_files[template_type] = template_path(self.resource_name, template_type) + + def write(self, file_type: FileType, contents: str): + file_destination = self.destination_files[file_type] + destination_path = file_destination.parent + destination_path.mkdir(parents=True, exist_ok=True) + + if file_destination.exists(): + should_overwrite = self.confirm_overwrite(file_destination) + if not should_overwrite: + self.console.print(f"Skipping {file_destination}") + return + + match file_type: + # provider + case FileType.provider: + self.ensure_python_init_files(destination_path) + self.write_text(contents, file_destination) + self.console.print(f"Written provider to {file_destination}") + case FileType.plugin: + self.ensure_python_init_files(destination_path) + self.write_text(contents, file_destination) + self.console.print(f"Written plugin to {file_destination}") + + # tests + case FileType.integration_test: + self.ensure_python_init_files(destination_path) + self.write_text(contents, file_destination) + self.console.print(f"Written integration test to {file_destination}") + case FileType.getatt_test: + self.write_text(contents, file_destination) + self.console.print(f"Written getatt tests to {file_destination}") + # case FileType.cloudcontrol_test: + # self.write_text(contents, file_destination) + # self.console.print(f"Written cloudcontrol tests to {file_destination}") + case FileType.parity_test: + self.write_text(contents, file_destination) + self.console.print(f"Written parity tests to {file_destination}") + + # templates + case FileType.attribute_template: + self.write_text(contents, file_destination) + self.console.print(f"Written attribute template to {file_destination}") + case FileType.minimal_template: + self.write_text(contents, file_destination) + self.console.print(f"Written minimal template to {file_destination}") + case FileType.update_without_replacement_template: + self.write_text(contents, file_destination) + self.console.print( + f"Written update without replacement template to {file_destination}" + ) + case FileType.autogenerated_template: + self.write_text(contents, file_destination) + self.console.print( + f"Written autogenerated properties template to {file_destination}" + ) + + # schema + case FileType.schema: + self.write_text(contents, file_destination) + self.console.print(f"Written schema to {file_destination}") + case _: + raise NotImplementedError(f"Writing {file_type}") + + def confirm_overwrite(self, destination_file: Path) -> bool: + """ + If a file we are about to write to exists, overwrite or ignore. + + :return True if file should be (over-)written, False otherwise + """ + return self.overwrite or click.confirm("Destination files already exist, overwrite?") + + @staticmethod + def write_text(contents: str, destination: Path): + with destination.open("wt") as outfile: + print(contents, file=outfile) + + @staticmethod + def ensure_python_init_files(path: Path): + """ + Make sure __init__.py files are created correctly + """ + project_root = path.parent.parent.parent.parent + path_relative_to_root = path.relative_to(project_root) + dir = project_root + for part in path_relative_to_root.parts: + dir = dir / part + test_path = dir.joinpath("__init__.py") + if not test_path.is_file(): + # touch file + with test_path.open("w"): + pass + + +class OutputFactory: + def __init__( + self, + template_renderer: TemplateRenderer, + printer: Console, + writer: FileWriter, + ): + self.template_renderer = template_renderer + self.printer = printer + self.writer = writer + + def get(self, file_type: FileType, resource_name: ResourceName) -> Output: + contents = self.template_renderer.render(file_type, resource_name) + return Output(contents, file_type, self.printer, self.writer, resource_name) + + +class Output: + def __init__( + self, + contents: str, + file_type: FileType, + printer: Console, + writer: FileWriter, + resource_name: ResourceName, + ): + self.contents = contents + self.file_type = file_type + self.printer = printer + self.writer = writer + self.resource_name = resource_name + + def handle(self, should_write: bool = False): + if should_write: + self.write() + else: + self.print() + + def write(self): + self.writer.write(self.file_type, self.contents) + + def print(self): + match self.file_type: + # service code + case FileType.provider: + self.printer.print("\n[underline]Provider template[/underline]\n") + self.printer.print(Syntax(self.contents, "python")) + case FileType.plugin: + self.printer.print("\n[underline]Plugin[/underline]\n") + self.printer.print(Syntax(self.contents, "python")) + # tests + case FileType.integration_test: + self.printer.print("\n[underline]Integration test file[/underline]\n") + self.printer.print(Syntax(self.contents, "python")) + case FileType.getatt_test: + self.printer.print("\n[underline]GetAtt test file[/underline]\n") + self.printer.print(Syntax(self.contents, "python")) + # case FileType.cloudcontrol_test: + # self.printer.print("\n[underline]CloudControl test[/underline]\n") + # self.printer.print(Syntax(self.contents, "python")) + case FileType.parity_test: + self.printer.print("\n[underline]Parity test[/underline]\n") + self.printer.print(Syntax(self.contents, "python")) + + # templates + case FileType.attribute_template: + self.printer.print("\n[underline]Attribute Test Template[/underline]\n") + self.printer.print(Syntax(self.contents, "yaml")) + case FileType.minimal_template: + self.printer.print("\n[underline]Minimal template[/underline]\n") + self.printer.print(Syntax(self.contents, "yaml")) + case FileType.update_without_replacement_template: + self.printer.print("\n[underline]Update test template[/underline]\n") + self.printer.print(Syntax(self.contents, "yaml")) + case FileType.autogenerated_template: + self.printer.print("\n[underline]Autogenerated properties template[/underline]\n") + self.printer.print(Syntax(self.contents, "yaml")) + + # schema + case FileType.schema: + self.printer.print("\n[underline]Schema[/underline]\n") + self.printer.print(Syntax(self.contents, "json")) + case _: + raise NotImplementedError(self.file_type) + + +@click.group() +def cli(): + pass + + +@cli.command() +@click.option( + "-r", + "--resource-type", + required=True, + help="CloudFormation resource type (e.g. 'AWS::SSM::Parameter') to generate", +) +@click.option("-w", "--write/--no-write", default=False) +@click.option("--overwrite", is_flag=True, default=False) +@click.option("-t", "--write-tests/--no-write-tests", default=False) +@click.option("--pro", is_flag=True, default=False) +def generate( + resource_type: str, + write: bool, + write_tests: bool, + overwrite: bool, + pro: bool, +): + console = Console() + console.rule(title=resource_type) + + schema_provider = SchemaProvider( + zipfile_path=Path(__file__).parent.joinpath("CloudformationSchema.zip") + ) + + template_root = Path(__file__).parent.joinpath("templates") + env = Environment( + loader=FileSystemLoader(template_root), + ) + + parts = resource_type.rpartition("::") + if parts[-1] == "*": + # generate all resource types for that service + matching_resources = [x for x in schema_provider.schemas.keys() if x.startswith(parts[0])] + else: + matching_resources = [resource_type] + + for matching_resource in matching_resources: + console.rule(title=matching_resource) + resource_name = ResourceName.from_name(matching_resource) + schema = schema_provider.schema(resource_name) + + template_renderer = TemplateRenderer(schema, env, pro) + writer = FileWriter(resource_name, console, overwrite, pro) + output_factory = OutputFactory(template_renderer, console, writer) # noqa + for file_type in FileType: + if not write_tests and file_type in { + FileType.integration_test, + FileType.getatt_test, + FileType.parity_test, + FileType.minimal_template, + FileType.update_without_replacement_template, + FileType.attribute_template, + FileType.autogenerated_template, + }: + # skip test generation + continue + output_factory.get(file_type, resource_name).handle(should_write=write) + + console.rule(title="Resources & Instructions") + console.print( + "Resource types: https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-types.html" + ) + # TODO: print for every resource + for matching_resource in matching_resources: + resource_name = ResourceName.from_name(matching_resource) + console.print( + # lambda_ should become lambda (re-use the same list we use for generating the models) + f"{matching_resource}: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-{resource_name.service.lower()}-{resource_name.resource.lower()}.html" + ) + console.print("\nWondering where to get started?") + console.print( + "First run `make entrypoints` to make sure your resource provider plugin is actually registered." + ) + console.print( + 'Then start off by finalizing the generated minimal ("basic") template and get it to deploy against AWS.' + ) + + +if __name__ == "__main__": + cli() diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/propgen.py b/localstack-core/localstack/services/cloudformation/scaffolding/propgen.py new file mode 100644 index 0000000000000..6a7e90166b490 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/propgen.py @@ -0,0 +1,227 @@ +""" +Implementation of generating the types for a provider from the schema +""" + +from __future__ import annotations + +import logging +import textwrap +from dataclasses import dataclass +from typing import Optional, TypedDict + +LOG = logging.getLogger(__name__) + + +@dataclass +class Item: + """An Item is a single field definition""" + + name: str + type: str + required: bool + + def __str__(self) -> str: + return f"{self.name}: {self.type}" + + @classmethod + def new(cls, name: str, type: str, required: bool = False) -> Item: + if required: + return cls(name=name, type=type, required=required) + else: + return cls(name=name, type=f"Optional[{type}]", required=required) + + +@dataclass +class PrimitiveStruct: + name: str + primitive_type: str + + def __str__(self) -> str: + return f""" +{self.name} = {self.primitive_type} +""" + + +@dataclass +class Struct: + """A struct represents a single rendered class""" + + name: str + items: list[Item] + + def __str__(self) -> str: + if self.items: + raw_text = "\n".join(map(str, self.sorted_items)) + else: + raw_text = "pass" + formatted_items = textwrap.indent(raw_text, " ") + return f""" +class {self.name}(TypedDict): +{formatted_items} +""" + + @property + def sorted_items(self) -> list[Item]: + required_items = sorted( + [item for item in self.items if item.required], key=lambda item: item.name + ) + optional_items = sorted( + [item for item in self.items if not item.required], key=lambda item: item.name + ) + return required_items + optional_items + + +@dataclass +class IR: + structs: list[Struct] + + def __str__(self) -> str: + """ + Pretty print the IR + """ + return "\n\n".join(map(str, self.structs)) + + +class Schema(TypedDict): + properties: dict + definitions: dict + typeName: str + required: Optional[list[str]] + + +TYPE_MAP = { + "string": "str", + "boolean": "bool", + "integer": "int", + "number": "float", + "object": "dict", + "array": "list", +} + + +class PropertyTypeScaffolding: + resource_type: str + provider_prefix: str + schema: Schema + + structs: list[Struct] + + required_properties: list[str] + + def __init__(self, resource_type: str, provider_prefix: str, schema: Schema): + self.resource_type = resource_type + self.provider_prefix = provider_prefix + self.schema = schema + self.structs = [] + self.required_properties = schema.get("required", []) + + def get_structs(self) -> list[Struct]: + root_struct = Struct(f"{self.provider_prefix}Properties", items=[]) + self._add_struct(root_struct) + + for property_name, property_def in self.schema["properties"].items(): + is_required = property_name in self.required_properties + item = self.property_to_item(property_name, property_def, is_required) + root_struct.items.append(item) + + return self.structs + + def _add_struct(self, struct: Struct): + if struct.name in [s.name for s in self.structs]: + return + else: + self.structs.append(struct) + + def get_ref_definition(self, property_ref: str) -> dict: + property_ref_name = property_ref.lstrip("#").rpartition("/")[-1] + return self.schema["definitions"][property_ref_name] + + def resolve_type_of_property(self, property_def: dict) -> str: + if property_ref := property_def.get("$ref"): + ref_definition = self.get_ref_definition(property_ref) + ref_type = ref_definition.get("type") + if ref_type not in ["object", "array"]: + # in this case we simply flatten it (instead of for example creating a type alias) + resolved_type = TYPE_MAP.get(ref_type) + if resolved_type is None: + LOG.warning( + "Type for %s not found in the TYPE_MAP. Using `Any` as fallback.", ref_type + ) + resolved_type = "Any" + else: + if ref_type == "object": + # the object might only have a pattern defined and no actual properties + if "properties" not in ref_definition: + resolved_type = "dict" + else: + nested_struct = self.ref_to_struct(property_ref) + resolved_type = nested_struct.name + self._add_struct(nested_struct) + elif ref_type == "array": + item_def = ref_definition["items"] + item_type = self.resolve_type_of_property(item_def) + resolved_type = f"list[{item_type}]" + else: + raise Exception(f"Unknown property type encountered: {ref_type}") + else: + match property_type := property_def.get("type"): + # primitives + case "string": + resolved_type = "str" + case "boolean": + resolved_type = "bool" + case "integer": + resolved_type = "int" + case "number": + resolved_type = "float" + # complex objects + case "object": + resolved_type = "dict" # TODO: any cases where we need to continue here? + case "array": + try: + item_type = self.resolve_type_of_property(property_def["items"]) + resolved_type = f"list[{item_type}]" + except RecursionError: + resolved_type = "list[Any]" + case _: + # TODO: allOf, anyOf, patternProperties (?) + # AWS::ApiGateway::RestApi passes a ["object", "string"] here for the "Body" property + # it probably makes sense to assume this behaves the same as a "oneOf" + if one_of := property_def.get("oneOf"): + resolved_type = "|".join([self.resolve_type_of_property(o) for o in one_of]) + elif isinstance(property_type, list): + resolved_type = "|".join([TYPE_MAP[pt] for pt in property_type]) + else: + raise Exception(f"Unknown property type: {property_type}") + return resolved_type + + def property_to_item(self, property_name: str, property_def: dict, required: bool) -> Item: + resolved_type = self.resolve_type_of_property(property_def) + return Item(name=property_name, type=f"Optional[{resolved_type}]", required=required) + + def ref_to_struct(self, property_ref: str) -> Struct: + property_ref_name = property_ref.lstrip("#").rpartition("/")[-1] + resolved_def = self.schema["definitions"][property_ref_name] + nested_struct = Struct(name=property_ref_name, items=[]) + if resolved_properties := resolved_def.get("properties"): + required_props = resolved_def.get("required", []) + for k, v in resolved_properties.items(): + is_required = k in required_props + item = self.property_to_item(k, v, is_required) + nested_struct.items.append(item) + else: + raise Exception("Unknown resource format. Expected properties on object") + + return nested_struct + + +def generate_ir_for_type(schema: list[Schema], type_name: str, provider_prefix: str = "") -> IR: + try: + resource_schema = [every for every in schema if every["typeName"] == type_name][0] + except IndexError: + raise ValueError(f"could not find schema for type {type_name}") + + structs = PropertyTypeScaffolding( + resource_type=type_name, provider_prefix=provider_prefix, schema=resource_schema + ).get_structs() + return IR(structs=structs) diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/templates/plugin_template.py.j2 b/localstack-core/localstack/services/cloudformation/scaffolding/templates/plugin_template.py.j2 new file mode 100644 index 0000000000000..0a9a530cdfccc --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/templates/plugin_template.py.j2 @@ -0,0 +1,22 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ResourceProvider +{%- if pro %} +{%- set base_class = "CloudFormationResourceProviderPluginExt" %} +{%- set root_module = "localstack.pro.core" %} +{%- else %} +{%- set base_class = "CloudFormationResourceProviderPlugin" %} +{%- set root_module = "localstack" %} +{%- endif %} +from {{ root_module }}.services.cloudformation.resource_provider import {{ base_class }} + +class {{ resource }}ProviderPlugin({{ base_class }}): + name = "{{ name }}" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from {{ root_module }}.services.{{ service }}.resource_providers.aws_{{ service }}_{{ lower_resource }} import {{ resource }}Provider + + self.factory = {{ resource }}Provider diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/templates/provider_template.py.j2 b/localstack-core/localstack/services/cloudformation/scaffolding/templates/provider_template.py.j2 new file mode 100644 index 0000000000000..3d52dbd6b7a83 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/templates/provider_template.py.j2 @@ -0,0 +1,138 @@ +# LocalStack Resource Provider Scaffolding {{ scaffolding_version }} +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + +{{ provider_properties }} + + +REPEATED_INVOCATION = "repeated_invocation" + +class {{ resource }}Provider(ResourceProvider[{{ resource }}Properties]): + + TYPE = "{{ name }}" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[{{ resource }}Properties], + ) -> ProgressEvent[{{ resource }}Properties]: + """ + Create a new resource. + + {% if primary_identifier -%} + Primary identifier fields: + {%- for property in primary_identifier %} + - {{ property }} + {%- endfor %} + {%- endif %} + + {% if required_properties -%} + Required properties: + {%- for property in required_properties %} + - {{ property }} + {%- endfor %} + {%- endif %} + + {% if create_only_properties -%} + Create-only properties: + {%- for property in create_only_properties %} + - {{ property }} + {%- endfor %} + {%- endif %} + + {% if read_only_properties -%} + Read-only properties: + {%- for property in read_only_properties %} + - {{ property }} + {%- endfor %} + {%- endif %} + + {% if create_permissions -%} + IAM permissions required: + {%- for permission in create_permissions %} + - {{ permission }} + {%- endfor -%} + {%- endif %} + + """ + model = request.desired_state + + # TODO: validations + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: defaults + # TODO: idempotency + # TODO: actually create the resource + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + # TODO: check the status of the resource + # - if finished, update the model with all fields and return success event: + # return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + # - else + # return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + raise NotImplementedError + + def read( + self, + request: ResourceRequest[{{ resource }}Properties], + ) -> ProgressEvent[{{ resource }}Properties]: + """ + Fetch resource information + + {% if read_permissions -%} + IAM permissions required: + {%- for permission in read_permissions %} + - {{ permission }} + {%- endfor %} + {%- endif %} + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[{{ resource }}Properties], + ) -> ProgressEvent[{{ resource }}Properties]: + """ + Delete a resource + + {% if delete_permissions -%} + IAM permissions required: + {%- for permission in delete_permissions %} + - {{ permission }} + {%- endfor %} + {%- endif %} + """ + raise NotImplementedError + + def update( + self, + request: ResourceRequest[{{ resource }}Properties], + ) -> ProgressEvent[{{ resource }}Properties]: + """ + Update a resource + + {% if update_permissions -%} + IAM permissions required: + {%- for permission in update_permissions %} + - {{ permission }} + {%- endfor %} + {%- endif %} + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_getatt_template.py.j2 b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_getatt_template.py.j2 new file mode 100644 index 0000000000000..24f59945903b5 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_getatt_template.py.j2 @@ -0,0 +1,41 @@ +# LocalStack Resource Provider Scaffolding {{ scaffolding_version }} +import os + +import pytest + +from localstack.testing.aws.util import is_aws_cloud + + +RESOURCE_GETATT_TARGETS = {{getatt_targets}} + + +class TestAttributeAccess: + @pytest.mark.parametrize("attribute", RESOURCE_GETATT_TARGETS) + @pytest.mark.skipif(condition=not is_aws_cloud(), reason="Exploratory test only") + def test_getatt( + self, + aws_client, + deploy_cfn_template, + attribute, + snapshot, + ): + """ + Use this test to find out which properties support GetAtt access + + Fn::GetAtt documentation: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html + """ + + stack = deploy_cfn_template( + template_path=os.path.join( + os.path.dirname(__file__), + "{{ template_path }}", + ), + parameters={"AttributeName": attribute}, + ) + snapshot.match("stack_outputs", stack.outputs) + + # check physical resource id + res = aws_client.cloudformation.describe_stack_resource( + StackName=stack.stack_name, LogicalResourceId="MyResource" + )["StackResourceDetail"] + snapshot.match("physical_resource_id", res.get("PhysicalResourceId")) diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_integration_template.py.j2 b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_integration_template.py.j2 new file mode 100644 index 0000000000000..98bd596be3b89 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_integration_template.py.j2 @@ -0,0 +1,93 @@ +# LocalStack Resource Provider Scaffolding {{ scaffolding_version }} +import os + +import pytest +# from botocore.exceptions import ClientError + + +class TestBasicCRD: + + def test_black_box(self, deploy_cfn_template, aws_client, snapshot): + """ + Simple test that + - deploys a stack containing the resource + - verifies that the resource has been created correctly by querying the service directly + - deletes the stack ensuring that the delete operation has been implemented correctly + - verifies that the resource no longer exists by querying the service directly + """ + stack = deploy_cfn_template( + template_path=os.path.join( + os.path.dirname(__file__), + "{{ black_box_template_path }}", + ), + ) + snapshot.match("stack-outputs", stack.outputs) + + # TODO: fetch the resource and perform any required validations here + # e.g. + # parameter_name = stack.outputs["MyRef"] + # snapshot.add_transformer(snapshot.transform.regex(parameter_name, "")) + + # res = aws_client.ssm.get_parameter(Name=stack.outputs["MyRef"]) + # - this snapshot also asserts that the value set in the template is correct + # snapshot.match("describe-resource", res) + + # verify that the delete operation works + stack.destroy() + + # TODO: fetch the resource again and assert that it no longer exists + # e.g. + # with pytest.raises(ClientError): + # aws_client.ssm.get_parameter(Name=stack.outputs["MyRef"]) + + def test_autogenerated_values(self, aws_client, deploy_cfn_template, snapshot): + stack = deploy_cfn_template( + template_path=os.path.join( + os.path.dirname(__file__), + "{{ autogenerated_template_path }}", + ), + ) + snapshot.match("stack_outputs", stack.outputs) + + # user_name = stack.outputs["MyRef"] + + # verify resource has been correctly deployed with the autogenerated field + # e.g. aws_client.iam.get_user(UserName=user_name) + + # check the auto-generated pattern + # TODO: add a sample of the auto-generated value here for reference, e.g. "TestStack-CustomUser-13AA838" + + +class TestUpdates: + @pytest.mark.skip(reason="TODO") + def test_update_without_replacement(self, deploy_cfn_template, aws_client, snapshot): + """ + Test an UPDATE of a simple property that does not require replacing the entire resource. + Check out the official resource documentation at https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html to see if a property needs replacement + """ + stack = deploy_cfn_template( + template_path=os.path.join( + os.path.dirname(__file__), + "{{ update_template_path }}", + ), + parameters={"AttributeValue": "first"}, + ) + + # TODO: implement fetching the resource and performing any required validations here + res = aws_client.ssm.get_parameter(Name=stack.outputs["MyRef"]) + snapshot.match("describe-resource-before-update", res) + + # TODO: update the stack + deploy_cfn_template( + stack_name=stack.stack_name, + template_path=os.path.join( + os.path.dirname(__file__), + "{{ update_template_path }}", + ), + parameters={"AttributeValue": "second"}, + is_update=True, + ) + + # TODO: check the value has changed + res = aws_client.ssm.get_parameter(Name=stack.outputs["MyRef"]) + snapshot.match("describe-resource-after-update", res) diff --git a/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_parity_template.py.j2 b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_parity_template.py.j2 new file mode 100644 index 0000000000000..6cf269aa392db --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/scaffolding/templates/test_parity_template.py.j2 @@ -0,0 +1,30 @@ +# ruff: noqa +# LocalStack Resource Provider Scaffolding {{ scaffolding_version }} + +import pytest + + +@pytest.mark.skip(reason="TODO") +class TestParity: + """ + Pro-active parity-focused tests that go into more detailed than the basic test skeleton + + TODO: add more focused detailed tests for updates, different combinations, etc. + Use snapshots here to capture detailed parity with AWS + + Other ideas for tests in here: + - Negative test: invalid combination of properties + - Negative test: missing required properties + """ + + def test_create_with_full_properties(self, aws_client, deploy_cfn_template): + """ A sort of smoke test that simply covers as many properties as possible """ + ... + + + + +@pytest.mark.skip(reason="TODO") +class TestSamples: + """ User-provided samples and other reactively added scenarios (e.g. reported and reproduced GitHub issues) """ + ... diff --git a/localstack-core/localstack/services/cloudformation/service_models.py b/localstack-core/localstack/services/cloudformation/service_models.py new file mode 100644 index 0000000000000..aeadbeb85f305 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/service_models.py @@ -0,0 +1,128 @@ +import logging +from typing import TypedDict + +from localstack.services.cloudformation.deployment_utils import check_not_found_exception + +LOG = logging.getLogger(__name__) + +# dict key used to store the deployment state of a resource +KEY_RESOURCE_STATE = "_state_" + + +class DependencyNotYetSatisfied(Exception): + """Exception indicating that a resource dependency is not (yet) deployed/available.""" + + def __init__(self, resource_ids, message=None): + message = message or "Unresolved dependencies: %s" % resource_ids + super(DependencyNotYetSatisfied, self).__init__(message) + resource_ids = resource_ids if isinstance(resource_ids, list) else [resource_ids] + self.resource_ids = resource_ids + + +class ResourceJson(TypedDict): + Type: str + Properties: dict + + +class GenericBaseModel: + """Abstract base class representing a resource model class in LocalStack. + This class keeps references to a combination of (1) the CF resource + properties (as defined in the template), and (2) the current deployment + state of a resource. + + Concrete subclasses will implement convenience methods to manage resources, + e.g., fetching the latest deployment state, getting the resource name, etc. + """ + + def __init__(self, account_id: str, region_name: str, resource_json: dict, **params): + # self.stack_name = stack_name # TODO: add stack name to params + self.account_id = account_id + self.region_name = region_name + self.resource_json = resource_json + self.resource_type = resource_json["Type"] + # Properties, as defined in the resource template + self.properties = resource_json["Properties"] = resource_json.get("Properties") or {} + # State, as determined from the deployed resource; use a special dict key here to keep + # track of state changes within resource_json (this way we encapsulate all state details + # in `resource_json` and the changes will survive creation of multiple instances of this class) + self.state = resource_json[KEY_RESOURCE_STATE] = resource_json.get(KEY_RESOURCE_STATE) or {} + + # ---------------------- + # ABSTRACT BASE METHODS + # ---------------------- + + def fetch_state(self, stack_name, resources): + """Fetch the latest deployment state of this resource, or return None if not currently deployed (NOTE: THIS IS NOT ALWAYS TRUE).""" + return None + + def update_resource(self, new_resource, stack_name, resources): + """Update the deployment of this resource, using the updated properties (implemented by subclasses).""" + raise NotImplementedError + + def is_updatable(self) -> bool: + return type(self).update_resource != GenericBaseModel.update_resource + + @classmethod + def cloudformation_type(cls): + """Return the CloudFormation resource type name, e.g., "AWS::S3::Bucket" (implemented by subclasses).""" + pass + + @staticmethod + def get_deploy_templates(): + """Return template configurations used to create the final API requests (implemented by subclasses).""" + pass + + # TODO: rework to normal instance method when resources aren't mutated in different place anymore + @staticmethod + def add_defaults(resource, stack_name: str): + """Set any defaults required, including auto-generating names. Must be called before deploying the resource""" + pass + + # --------------------- + # GENERIC UTIL METHODS + # --------------------- + + # TODO: remove + def fetch_and_update_state(self, *args, **kwargs): + if self.physical_resource_id is None: + return None + + try: + state = self.fetch_state(*args, **kwargs) + self.update_state(state) + return state + except Exception as e: + if not check_not_found_exception(e, self.resource_type, self.properties): + LOG.warning( + "Unable to fetch state for resource %s: %s", + self, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + # TODO: remove + def update_state(self, details): + """Update the deployment state of this resource (existing attributes will be overwritten).""" + details = details or {} + self.state.update(details) + + @property + def physical_resource_id(self) -> str | None: + """Return the (cached) physical resource ID.""" + return self.resource_json.get("PhysicalResourceId") + + @property + def logical_resource_id(self) -> str: + """Return the logical resource ID.""" + return self.resource_json["LogicalResourceId"] + + # TODO: rename? make it clearer what props are in comparison with state, properties and resource_json + @property + def props(self) -> dict: + """Return a copy of (1) the resource properties (from the template), combined with + (2) the current deployment state properties of the resource.""" + result = dict(self.properties) + result.update(self.state or {}) + last_state = self.resource_json.get("_last_deployed_state", {}) + result.update(last_state) + return result diff --git a/localstack-core/localstack/services/cloudformation/stores.py b/localstack-core/localstack/services/cloudformation/stores.py new file mode 100644 index 0000000000000..7191f5491b4e1 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/stores.py @@ -0,0 +1,134 @@ +import logging +from typing import Optional + +from localstack.aws.api.cloudformation import StackStatus +from localstack.services.cloudformation.engine.entities import Stack, StackChangeSet, StackSet +from localstack.services.cloudformation.v2.entities import ChangeSet as ChangeSetV2 +from localstack.services.cloudformation.v2.entities import Stack as StackV2 +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute + +LOG = logging.getLogger(__name__) + + +class CloudFormationStore(BaseStore): + # maps stack ID to stack details + stacks: dict[str, Stack] = LocalAttribute(default=dict) + stacks_v2: dict[str, StackV2] = LocalAttribute(default=dict) + + change_sets: dict[str, ChangeSetV2] = LocalAttribute(default=dict) + + # maps stack set ID to stack set details + stack_sets: dict[str, StackSet] = LocalAttribute(default=dict) + + # maps macro ID to macros + macros: dict[str, dict] = LocalAttribute(default=dict) + + # exports: dict[str, str] + @property + def exports(self): + exports = [] + output_keys = {} + for stack_id, stack in self.stacks.items(): + for output in stack.resolved_outputs: + export_name = output.get("ExportName") + if not export_name: + continue + if export_name in output_keys: + # TODO: raise exception on stack creation in case of duplicate exports + LOG.warning( + "Found duplicate export name %s in stacks: %s %s", + export_name, + output_keys[export_name], + stack.stack_id, + ) + entry = { + "ExportingStackId": stack.stack_id, + "Name": export_name, + "Value": output["OutputValue"], + } + exports.append(entry) + output_keys[export_name] = stack.stack_id + return exports + + +cloudformation_stores = AccountRegionBundle("cloudformation", CloudFormationStore) + + +def get_cloudformation_store(account_id: str, region_name: str) -> CloudFormationStore: + return cloudformation_stores[account_id][region_name] + + +# TODO: rework / fix usage of this +def find_stack(account_id: str, region_name: str, stack_name: str) -> Stack | None: + # Warning: This function may not return the correct stack if multiple stacks with same name exist. + state = get_cloudformation_store(account_id, region_name) + return ( + [s for s in state.stacks.values() if stack_name in [s.stack_name, s.stack_id]] or [None] + )[0] + + +def find_stack_by_id(account_id: str, region_name: str, stack_id: str) -> Stack | None: + """ + Find the stack by id. + + :param account_id: account of the stack + :param region_name: region of the stack + :param stack_id: stack id + :return: Stack if it is found, None otherwise + """ + state = get_cloudformation_store(account_id, region_name) + for stack in state.stacks.values(): + # there can only be one stack with an id + if stack_id == stack.stack_id: + return stack + return None + + +def find_active_stack_by_name_or_id( + account_id: str, region_name: str, stack_name_or_id: str +) -> Stack | None: + """ + Find the active stack by name. Some cloudformation operations only allow referencing by slack name if the stack is + "active", which we currently interpret as not DELETE_COMPLETE. + + :param account_id: account of the stack + :param region_name: region of the stack + :param stack_name_or_id: stack name or stack id + :return: Stack if it is found, None otherwise + """ + state = get_cloudformation_store(account_id, region_name) + for stack in state.stacks.values(): + # there can only be one stack where this condition is true for each region + # as there can only be one active stack with a given name + if ( + stack_name_or_id in [stack.stack_name, stack.stack_id] + and stack.status != "DELETE_COMPLETE" + ): + return stack + return None + + +def find_change_set( + account_id: str, + region_name: str, + cs_name: str, + stack_name: Optional[str] = None, + active_only: bool = False, +) -> Optional[StackChangeSet]: + store = get_cloudformation_store(account_id, region_name) + for stack in store.stacks.values(): + if active_only and stack.status == StackStatus.DELETE_COMPLETE: + continue + if stack_name in (stack.stack_name, stack.stack_id, None): + for change_set in stack.change_sets: + if cs_name in (change_set.change_set_id, change_set.change_set_name): + return change_set + return None + + +def exports_map(account_id: str, region_name: str): + result = {} + store = get_cloudformation_store(account_id, region_name) + for export in store.exports: + result[export["Name"]] = export + return result diff --git a/localstack-core/localstack/services/cloudformation/usage.py b/localstack-core/localstack/services/cloudformation/usage.py new file mode 100644 index 0000000000000..66d99b2e4cab0 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/usage.py @@ -0,0 +1,7 @@ +from localstack.utils.analytics.metrics import Counter + +COUNTER_NAMESPACE = "cloudformation" + +resources = Counter( + namespace=COUNTER_NAMESPACE, name="resources", labels=["resource_type", "missing"] +) diff --git a/localstack/services/resourcegroupstaggingapi/__init__.py b/localstack-core/localstack/services/cloudformation/v2/__init__.py similarity index 100% rename from localstack/services/resourcegroupstaggingapi/__init__.py rename to localstack-core/localstack/services/cloudformation/v2/__init__.py diff --git a/localstack-core/localstack/services/cloudformation/v2/entities.py b/localstack-core/localstack/services/cloudformation/v2/entities.py new file mode 100644 index 0000000000000..481cbdbd9896c --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/v2/entities.py @@ -0,0 +1,208 @@ +from datetime import datetime, timezone +from typing import TypedDict + +from localstack.aws.api.cloudformation import ( + ChangeSetStatus, + ChangeSetType, + CreateChangeSetInput, + ExecutionStatus, + Output, + Parameter, + ResourceStatus, + StackDriftInformation, + StackDriftStatus, + StackResource, + StackStatus, + StackStatusReason, +) +from localstack.aws.api.cloudformation import ( + Stack as ApiStack, +) +from localstack.services.cloudformation.engine.entities import ( + StackIdentifier, + StackTemplate, +) +from localstack.services.cloudformation.engine.v2.change_set_model import ( + ChangeSetModel, + NodeTemplate, +) +from localstack.utils.aws import arns +from localstack.utils.strings import short_uid + + +class ResolvedResource(TypedDict): + Properties: dict + + +class Stack: + stack_name: str + parameters: list[Parameter] + change_set_id: str | None + change_set_name: str | None + status: StackStatus + status_reason: StackStatusReason | None + stack_id: str + creation_time: datetime + + # state after deploy + resolved_parameters: dict[str, str] + resolved_resources: dict[str, ResolvedResource] + resolved_outputs: dict[str, str] + resource_states: dict[str, StackResource] + + def __init__( + self, + account_id: str, + region_name: str, + request_payload: CreateChangeSetInput, + template: StackTemplate | None = None, + template_body: str | None = None, + change_set_ids: list[str] | None = None, + ): + self.account_id = account_id + self.region_name = region_name + self.template = template + self.template_body = template_body + self.status = StackStatus.CREATE_IN_PROGRESS + self.status_reason = None + self.change_set_ids = change_set_ids or [] + self.creation_time = datetime.now(tz=timezone.utc) + + self.stack_name = request_payload["StackName"] + self.change_set_name = request_payload.get("ChangeSetName") + self.parameters = request_payload.get("Parameters", []) + self.stack_id = arns.cloudformation_stack_arn( + self.stack_name, + stack_id=StackIdentifier( + account_id=self.account_id, region=self.region_name, stack_name=self.stack_name + ).generate(tags=request_payload.get("Tags")), + account_id=self.account_id, + region_name=self.region_name, + ) + + # TODO: only kept for v1 compatibility + self.request_payload = request_payload + + # state after deploy + self.resolved_parameters = {} + self.resolved_resources = {} + self.resolved_outputs = {} + self.resource_states = {} + + def set_stack_status(self, status: StackStatus, reason: StackStatusReason | None = None): + self.status = status + if reason: + self.status_reason = reason + + def set_resource_status( + self, + *, + logical_resource_id: str, + physical_resource_id: str | None, + resource_type: str, + status: ResourceStatus, + resource_status_reason: str | None = None, + ): + self.resource_states[logical_resource_id] = StackResource( + StackName=self.stack_name, + StackId=self.stack_id, + LogicalResourceId=logical_resource_id, + PhysicalResourceId=physical_resource_id, + ResourceType=resource_type, + Timestamp=datetime.now(tz=timezone.utc), + ResourceStatus=status, + ResourceStatusReason=resource_status_reason, + ) + + def describe_details(self) -> ApiStack: + result = { + "ChangeSetId": self.change_set_id, + "CreationTime": self.creation_time, + "StackId": self.stack_id, + "StackName": self.stack_name, + "StackStatus": self.status, + "StackStatusReason": self.status_reason, + # fake values + "DisableRollback": False, + "DriftInformation": StackDriftInformation( + StackDriftStatus=StackDriftStatus.NOT_CHECKED + ), + "EnableTerminationProtection": False, + "LastUpdatedTime": self.creation_time, + "RollbackConfiguration": {}, + "Tags": [], + } + if self.resolved_outputs: + describe_outputs = [] + for key, value in self.resolved_outputs.items(): + describe_outputs.append( + Output( + # TODO(parity): Description, ExportName + # TODO(parity): what happens on describe stack when the stack has not been deployed yet? + OutputKey=key, + OutputValue=value, + ) + ) + result["Outputs"] = describe_outputs + return result + + +class ChangeSet: + change_set_name: str + change_set_id: str + change_set_type: ChangeSetType + update_graph: NodeTemplate | None + status: ChangeSetStatus + execution_status: ExecutionStatus + creation_time: datetime + + def __init__( + self, + stack: Stack, + request_payload: CreateChangeSetInput, + template: StackTemplate | None = None, + ): + self.stack = stack + self.template = template + self.status = ChangeSetStatus.CREATE_IN_PROGRESS + self.execution_status = ExecutionStatus.AVAILABLE + self.update_graph = None + self.creation_time = datetime.now(tz=timezone.utc) + + self.change_set_name = request_payload["ChangeSetName"] + self.change_set_type = request_payload.get("ChangeSetType", ChangeSetType.UPDATE) + self.change_set_id = arns.cloudformation_change_set_arn( + self.change_set_name, + change_set_id=short_uid(), + account_id=self.stack.account_id, + region_name=self.stack.region_name, + ) + + def set_change_set_status(self, status: ChangeSetStatus): + self.status = status + + def set_execution_status(self, execution_status: ExecutionStatus): + self.execution_status = execution_status + + @property + def account_id(self) -> str: + return self.stack.account_id + + @property + def region_name(self) -> str: + return self.stack.region_name + + def populate_update_graph( + self, + before_template: dict | None = None, + after_template: dict | None = None, + before_parameters: dict | None = None, + after_parameters: dict | None = None, + ) -> None: + change_set_model = ChangeSetModel( + before_template=before_template, + after_template=after_template, + before_parameters=before_parameters, + after_parameters=after_parameters, + ) + self.update_graph = change_set_model.get_update_model() diff --git a/localstack-core/localstack/services/cloudformation/v2/provider.py b/localstack-core/localstack/services/cloudformation/v2/provider.py new file mode 100644 index 0000000000000..07f09a0cd2ae5 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/v2/provider.py @@ -0,0 +1,462 @@ +import copy +import logging +from typing import Any + +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.cloudformation import ( + Changes, + ChangeSetNameOrId, + ChangeSetNotFoundException, + ChangeSetStatus, + ChangeSetType, + ClientRequestToken, + CreateChangeSetInput, + CreateChangeSetOutput, + DeletionMode, + DescribeChangeSetOutput, + DescribeStackEventsOutput, + DescribeStackResourcesOutput, + DescribeStacksOutput, + DisableRollback, + ExecuteChangeSetOutput, + ExecutionStatus, + IncludePropertyValues, + InvalidChangeSetStatusException, + LogicalResourceId, + NextToken, + Parameter, + PhysicalResourceId, + RetainExceptOnCreate, + RetainResources, + RoleARN, + RollbackConfiguration, + StackName, + StackNameOrId, + StackStatus, +) +from localstack.services.cloudformation import api_utils +from localstack.services.cloudformation.engine import template_preparer +from localstack.services.cloudformation.engine.v2.change_set_model_describer import ( + ChangeSetModelDescriber, +) +from localstack.services.cloudformation.engine.v2.change_set_model_executor import ( + ChangeSetModelExecutor, +) +from localstack.services.cloudformation.engine.validations import ValidationError +from localstack.services.cloudformation.provider import ( + ARN_CHANGESET_REGEX, + ARN_STACK_REGEX, + CloudformationProvider, +) +from localstack.services.cloudformation.stores import ( + CloudFormationStore, + get_cloudformation_store, +) +from localstack.services.cloudformation.v2.entities import ChangeSet, Stack +from localstack.utils.threads import start_worker_thread + +LOG = logging.getLogger(__name__) + + +def is_stack_arn(stack_name_or_id: str) -> bool: + return ARN_STACK_REGEX.match(stack_name_or_id) is not None + + +def is_changeset_arn(change_set_name_or_id: str) -> bool: + return ARN_CHANGESET_REGEX.match(change_set_name_or_id) is not None + + +def find_stack_v2(state: CloudFormationStore, stack_name: str | None) -> Stack: + if stack_name: + if is_stack_arn(stack_name): + return state.stacks_v2[stack_name] + else: + stack_candidates = [] + for stack in state.stacks_v2.values(): + if stack.stack_name == stack_name and stack.status != StackStatus.DELETE_COMPLETE: + stack_candidates.append(stack) + if len(stack_candidates) == 0: + raise ValidationError(f"No stack with name {stack_name} found") + elif len(stack_candidates) > 1: + raise RuntimeError("Programing error, duplicate stacks found") + else: + return stack_candidates[0] + else: + raise NotImplementedError + + +def find_change_set_v2( + state: CloudFormationStore, change_set_name: str, stack_name: str | None = None +) -> ChangeSet | None: + change_set: ChangeSet | None = None + if is_changeset_arn(change_set_name): + change_set = state.change_sets[change_set_name] + else: + if stack_name is not None: + stack: Stack | None = None + if is_stack_arn(stack_name): + stack = state.stacks_v2[stack_name] + else: + for stack_candidate in state.stacks_v2.values(): + # TODO: check for active stacks + if ( + stack_candidate.stack_name == stack_name + and stack.status != StackStatus.DELETE_COMPLETE + ): + stack = stack_candidate + break + + if not stack: + raise NotImplementedError(f"no stack found for change set {change_set_name}") + + for change_set_id in stack.change_set_ids: + change_set_candidate = state.change_sets[change_set_id] + if change_set_candidate.change_set_name == change_set_name: + change_set = change_set_candidate + break + else: + raise NotImplementedError + + return change_set + + +class CloudformationProviderV2(CloudformationProvider): + @handler("CreateChangeSet", expand=False) + def create_change_set( + self, context: RequestContext, request: CreateChangeSetInput + ) -> CreateChangeSetOutput: + try: + stack_name = request["StackName"] + except KeyError: + # TODO: proper exception + raise ValidationError("StackName must be specified") + try: + change_set_name = request["ChangeSetName"] + except KeyError: + # TODO: proper exception + raise ValidationError("StackName must be specified") + + state = get_cloudformation_store(context.account_id, context.region) + + change_set_type = request.get("ChangeSetType", "UPDATE") + template_body = request.get("TemplateBody") + # s3 or secretsmanager url + template_url = request.get("TemplateURL") + + # validate and resolve template + if template_body and template_url: + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + if not template_body and not template_url: + raise ValidationError( + "Specify exactly one of 'TemplateBody' or 'TemplateUrl'" + ) # TODO: check proper message + + template_body = api_utils.extract_template_body(request) + structured_template = template_preparer.parse_template(template_body) + + # this is intentionally not in a util yet. Let's first see how the different operations deal with these before generalizing + # handle ARN stack_name here (not valid for initial CREATE, since stack doesn't exist yet) + if is_stack_arn(stack_name): + stack = state.stacks_v2.get(stack_name) + if not stack: + raise ValidationError(f"Stack '{stack_name}' does not exist.") + else: + # stack name specified, so fetch the stack by name + stack_candidates: list[Stack] = [ + s for stack_arn, s in state.stacks_v2.items() if s.stack_name == stack_name + ] + active_stack_candidates = [ + s for s in stack_candidates if self._stack_status_is_active(s.status) + ] + + # on a CREATE an empty Stack should be generated if we didn't find an active one + if not active_stack_candidates and change_set_type == ChangeSetType.CREATE: + stack = Stack( + context.account_id, + context.region, + request, + structured_template, + template_body=template_body, + ) + state.stacks_v2[stack.stack_id] = stack + else: + if not active_stack_candidates: + raise ValidationError(f"Stack '{stack_name}' does not exist.") + stack = active_stack_candidates[0] + + stack.set_stack_status(StackStatus.REVIEW_IN_PROGRESS) + + # TODO: test if rollback status is allowed as well + if ( + change_set_type == ChangeSetType.CREATE + and stack.status != StackStatus.REVIEW_IN_PROGRESS + ): + raise ValidationError( + f"Stack [{stack_name}] already exists and cannot be created again with the changeSet [{change_set_name}]." + ) + + before_parameters: dict[str, Parameter] | None = None + match change_set_type: + case ChangeSetType.UPDATE: + before_parameters = stack.resolved_parameters + # add changeset to existing stack + # old_parameters = { + # k: mask_no_echo(strip_parameter_type(v)) + # for k, v in stack.resolved_parameters.items() + # } + case ChangeSetType.IMPORT: + raise NotImplementedError() # TODO: implement importing resources + case ChangeSetType.CREATE: + pass + case _: + msg = ( + f"1 validation error detected: Value '{change_set_type}' at 'changeSetType' failed to satisfy " + f"constraint: Member must satisfy enum value set: [IMPORT, UPDATE, CREATE] " + ) + raise ValidationError(msg) + + # TDOO: transformations + + # TODO: reconsider the way parameters are modelled in the update graph process. + # The options might be reduce to using the current style, or passing the extra information + # as a metadata object. The choice should be made considering when the extra information + # is needed for the update graph building, or only looked up in downstream tasks (metadata). + request_parameters = request.get("Parameters", list()) + # TODO: handle parameter defaults and resolution + after_parameters: dict[str, Any] = { + parameter["ParameterKey"]: parameter["ParameterValue"] + for parameter in request_parameters + } + + # TODO: update this logic to always pass the clean template object if one exists. The + # current issue with relaying on stack.template_original is that this appears to have + # its parameters and conditions populated. + before_template = None + if change_set_type == ChangeSetType.UPDATE: + before_template = stack.template + after_template = structured_template + + # create change set for the stack and apply changes + change_set = ChangeSet(stack, request) + + # only set parameters for the changeset, then switch to stack on execute_change_set + change_set.populate_update_graph( + before_template=before_template, + after_template=after_template, + before_parameters=before_parameters, + after_parameters=after_parameters, + ) + change_set.set_change_set_status(ChangeSetStatus.CREATE_COMPLETE) + stack.change_set_id = change_set.change_set_id + stack.change_set_id = change_set.change_set_id + state.change_sets[change_set.change_set_id] = change_set + + return CreateChangeSetOutput(StackId=stack.stack_id, Id=change_set.change_set_id) + + @handler("ExecuteChangeSet") + def execute_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + client_request_token: ClientRequestToken | None = None, + disable_rollback: DisableRollback | None = None, + retain_except_on_create: RetainExceptOnCreate | None = None, + **kwargs, + ) -> ExecuteChangeSetOutput: + state = get_cloudformation_store(context.account_id, context.region) + + change_set = find_change_set_v2(state, change_set_name, stack_name) + if not change_set: + raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist") + + if change_set.execution_status != ExecutionStatus.AVAILABLE: + LOG.debug("Change set %s not in execution status 'AVAILABLE'", change_set_name) + raise InvalidChangeSetStatusException( + f"ChangeSet [{change_set.change_set_id}] cannot be executed in its current status of [{change_set.status}]" + ) + # LOG.debug( + # 'Executing change set "%s" for stack "%s" with %s resources ...', + # change_set_name, + # stack_name, + # len(change_set.template_resources), + # ) + if not change_set.update_graph: + raise RuntimeError("Programming error: no update graph found for change set") + + change_set.set_execution_status(ExecutionStatus.EXECUTE_IN_PROGRESS) + change_set.stack.set_stack_status( + StackStatus.UPDATE_IN_PROGRESS + if change_set.change_set_type == ChangeSetType.UPDATE + else StackStatus.CREATE_IN_PROGRESS + ) + + change_set_executor = ChangeSetModelExecutor( + change_set, + ) + + def _run(*args): + try: + result = change_set_executor.execute() + new_stack_status = StackStatus.UPDATE_COMPLETE + if change_set.change_set_type == ChangeSetType.CREATE: + new_stack_status = StackStatus.CREATE_COMPLETE + change_set.stack.set_stack_status(new_stack_status) + change_set.set_execution_status(ExecutionStatus.EXECUTE_COMPLETE) + change_set.stack.resolved_resources = result.resources + change_set.stack.resolved_parameters = result.parameters + change_set.stack.resolved_outputs = result.outputs + except Exception as e: + LOG.error( + "Execute change set failed: %s", e, exc_info=LOG.isEnabledFor(logging.WARNING) + ) + new_stack_status = StackStatus.UPDATE_FAILED + if change_set.change_set_type == ChangeSetType.CREATE: + new_stack_status = StackStatus.CREATE_FAILED + + change_set.stack.set_stack_status(new_stack_status) + change_set.set_execution_status(ExecutionStatus.EXECUTE_FAILED) + + start_worker_thread(_run) + + return ExecuteChangeSetOutput() + + def _describe_change_set( + self, change_set: ChangeSet, include_property_values: bool + ) -> DescribeChangeSetOutput: + # TODO: The ChangeSetModelDescriber currently matches AWS behavior by listing + # resource changes in the order they appear in the template. However, when + # a resource change is triggered indirectly (e.g., via Ref or GetAtt), the + # dependency's change appears first in the list. + # Snapshot tests using the `capture_update_process` fixture rely on a + # normalizer to account for this ordering. This should be removed in the + # future by enforcing a consistently correct change ordering at the source. + change_set_describer = ChangeSetModelDescriber( + change_set=change_set, include_property_values=include_property_values + ) + changes: Changes = change_set_describer.get_changes() + + result = DescribeChangeSetOutput( + Status=change_set.status, + ChangeSetId=change_set.change_set_id, + ChangeSetName=change_set.change_set_name, + ExecutionStatus=change_set.execution_status, + RollbackConfiguration=RollbackConfiguration(), + StackId=change_set.stack.stack_id, + StackName=change_set.stack.stack_name, + CreationTime=change_set.creation_time, + Parameters=[ + # TODO: add masking support. + Parameter(ParameterKey=key, ParameterValue=value) + for (key, value) in change_set.stack.resolved_parameters.items() + ], + Changes=changes, + ) + return result + + @handler("DescribeChangeSet") + def describe_change_set( + self, + context: RequestContext, + change_set_name: ChangeSetNameOrId, + stack_name: StackNameOrId | None = None, + next_token: NextToken | None = None, + include_property_values: IncludePropertyValues | None = None, + **kwargs, + ) -> DescribeChangeSetOutput: + # TODO add support for include_property_values + # only relevant if change_set_name isn't an ARN + state = get_cloudformation_store(context.account_id, context.region) + change_set = find_change_set_v2(state, change_set_name, stack_name) + if not change_set: + raise ChangeSetNotFoundException(f"ChangeSet [{change_set_name}] does not exist") + result = self._describe_change_set( + change_set=change_set, include_property_values=include_property_values or False + ) + return result + + @handler("DescribeStacks") + def describe_stacks( + self, + context: RequestContext, + stack_name: StackName = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeStacksOutput: + state = get_cloudformation_store(context.account_id, context.region) + stack = find_stack_v2(state, stack_name) + return DescribeStacksOutput(Stacks=[stack.describe_details()]) + + @handler("DescribeStackResources") + def describe_stack_resources( + self, + context: RequestContext, + stack_name: StackName = None, + logical_resource_id: LogicalResourceId = None, + physical_resource_id: PhysicalResourceId = None, + **kwargs, + ) -> DescribeStackResourcesOutput: + if physical_resource_id and stack_name: + raise ValidationError("Cannot specify both StackName and PhysicalResourceId") + state = get_cloudformation_store(context.account_id, context.region) + stack = find_stack_v2(state, stack_name) + # TODO: filter stack by PhysicalResourceId! + statuses = [] + for resource_id, resource_status in stack.resource_states.items(): + if resource_id == logical_resource_id or logical_resource_id is None: + status = copy.deepcopy(resource_status) + status.setdefault("DriftInformation", {"StackResourceDriftStatus": "NOT_CHECKED"}) + statuses.append(status) + return DescribeStackResourcesOutput(StackResources=statuses) + + @handler("DescribeStackEvents") + def describe_stack_events( + self, + context: RequestContext, + stack_name: StackName = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeStackEventsOutput: + return DescribeStackEventsOutput(StackEvents=[]) + + @handler("DeleteStack") + def delete_stack( + self, + context: RequestContext, + stack_name: StackName, + retain_resources: RetainResources = None, + role_arn: RoleARN = None, + client_request_token: ClientRequestToken = None, + deletion_mode: DeletionMode = None, + **kwargs, + ) -> None: + state = get_cloudformation_store(context.account_id, context.region) + if stack_name: + if is_stack_arn(stack_name): + stack = state.stacks_v2[stack_name] + else: + stack_candidates = [] + for stack in state.stacks_v2.values(): + if ( + stack.stack_name == stack_name + and stack.status != StackStatus.DELETE_COMPLETE + ): + stack_candidates.append(stack) + if len(stack_candidates) == 0: + raise ValidationError(f"No stack with name {stack_name} found") + elif len(stack_candidates) > 1: + raise RuntimeError("Programing error, duplicate stacks found") + else: + stack = stack_candidates[0] + else: + raise NotImplementedError + + if not stack: + # aws will silently ignore invalid stack names - we should do the same + return + + # TODO: actually delete + stack.set_stack_status(StackStatus.DELETE_COMPLETE) diff --git a/localstack-core/localstack/services/cloudformation/v2/utils.py b/localstack-core/localstack/services/cloudformation/v2/utils.py new file mode 100644 index 0000000000000..02a6cbb971a99 --- /dev/null +++ b/localstack-core/localstack/services/cloudformation/v2/utils.py @@ -0,0 +1,5 @@ +from localstack import config + + +def is_v2_engine() -> bool: + return config.SERVICE_PROVIDER_CONFIG.get_provider("cloudformation") == "engine-v2" diff --git a/localstack/services/route53/__init__.py b/localstack-core/localstack/services/cloudwatch/__init__.py similarity index 100% rename from localstack/services/route53/__init__.py rename to localstack-core/localstack/services/cloudwatch/__init__.py diff --git a/localstack/services/cloudwatch/alarm_scheduler.py b/localstack-core/localstack/services/cloudwatch/alarm_scheduler.py similarity index 93% rename from localstack/services/cloudwatch/alarm_scheduler.py rename to localstack-core/localstack/services/cloudwatch/alarm_scheduler.py index a429566273e8f..2b0675f121450 100644 --- a/localstack/services/cloudwatch/alarm_scheduler.py +++ b/localstack-core/localstack/services/cloudwatch/alarm_scheduler.py @@ -5,7 +5,8 @@ from datetime import datetime, timedelta, timezone from typing import TYPE_CHECKING, List, Optional -from localstack.aws.api.cloudwatch import MetricAlarm, MetricDataQuery, StateValue +from localstack.aws.api.cloudwatch import MetricAlarm, MetricDataQuery, MetricStat, StateValue +from localstack.aws.connect import connect_to from localstack.utils.aws import arns, aws_stack from localstack.utils.scheduler import Scheduler @@ -37,7 +38,7 @@ def __init__(self) -> None: """ super().__init__() self.scheduler = Scheduler() - self.thread = threading.Thread(target=self.scheduler.run) + self.thread = threading.Thread(target=self.scheduler.run, name="cloudwatch-scheduler") self.thread.start() self.scheduled_alarms = {} @@ -46,7 +47,7 @@ def shutdown_scheduler(self) -> None: Shutsdown the scheduler, must be called before application stops """ self.scheduler.close() - self.thread.join(5) + self.thread.join(10) def schedule_metric_alarm(self, alarm_arn: str) -> None: """(Re-)schedules the alarm, if the alarm is re-scheduled, the running alarm scheduler will be cancelled before @@ -94,9 +95,8 @@ def restart_existing_alarms(self) -> None: """ Only used re-create persistent state. Reschedules alarms that already exist """ - service = "cloudwatch" - for region in aws_stack.get_valid_regions_for_service(service): - client = aws_stack.connect_to_service(service, region_name=region) + for region in aws_stack.get_valid_regions_for_service("cloudwatch"): + client = connect_to(region_name=region).cloudwatch result = client.describe_alarms() for metric_alarm in result["MetricAlarms"]: arn = metric_alarm["AlarmArn"] @@ -105,14 +105,16 @@ def restart_existing_alarms(self) -> None: def _is_alarm_supported(self, alarm_details: MetricAlarm) -> bool: required_parameters = ["Period", "Statistic", "MetricName", "Threshold"] for param in required_parameters: - if param not in alarm_details.keys(): + if param not in alarm_details: LOG.debug( - f"Currently only simple MetricAlarm are supported. Alarm is missing '{param}'. ExtendedStatistic is not yet supported." + "Currently only simple MetricAlarm are supported. Alarm is missing '%s'. ExtendedStatistic is not yet supported.", + param, ) return False - if alarm_details["ComparisonOperator"] not in COMPARISON_OPS.keys(): + if alarm_details["ComparisonOperator"] not in COMPARISON_OPS: LOG.debug( - f"ComparisonOperator '{alarm_details['ComparisonOperator']}' not yet supported." + "ComparisonOperator '%s' not yet supported.", + alarm_details["ComparisonOperator"], ) return False return True @@ -126,8 +128,10 @@ def get_metric_alarm_details_for_alarm_arn(alarm_arn: str) -> Optional[MetricAla def get_cloudwatch_client_for_region_of_alarm(alarm_arn: str) -> "CloudWatchClient": - region = arns.extract_region_from_arn(alarm_arn) - return aws_stack.connect_to_service("cloudwatch", region_name=region) + parsed_arn = arns.parse_arn(alarm_arn) + region = parsed_arn["region"] + access_key_id = parsed_arn["account"] + return connect_to(region_name=region, aws_access_key_id=access_key_id).cloudwatch def generate_metric_query(alarm_details: MetricAlarm) -> MetricDataQuery: @@ -140,15 +144,15 @@ def generate_metric_query(alarm_details: MetricAlarm) -> MetricDataQuery: metric["Namespace"] = alarm_details["Namespace"] if alarm_details.get("Dimensions"): metric["Dimensions"] = alarm_details["Dimensions"] - return { - "Id": alarm_details["AlarmName"], - "MetricStat": { - "Metric": metric, - "Period": alarm_details["Period"], - "Stat": alarm_details["Statistic"].capitalize(), - }, + return MetricDataQuery( + Id=alarm_details["AlarmName"], + MetricStat=MetricStat( + Metric=metric, + Period=alarm_details["Period"], + Stat=alarm_details["Statistic"], + ), # TODO other fields might be required in the future - } + ) def is_threshold_exceeded(metric_values: List[float], alarm_details: MetricAlarm) -> bool: diff --git a/localstack-core/localstack/services/cloudwatch/cloudwatch_database_helper.py b/localstack-core/localstack/services/cloudwatch/cloudwatch_database_helper.py new file mode 100644 index 0000000000000..43383cf2782ad --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/cloudwatch_database_helper.py @@ -0,0 +1,460 @@ +import logging +import os +import sqlite3 +import threading +from datetime import datetime, timezone +from typing import Dict, List, Optional + +from localstack import config +from localstack.aws.api.cloudwatch import MetricData, MetricDataQuery, ScanBy +from localstack.utils.files import mkdir + +LOG = logging.getLogger(__name__) + +STAT_TO_SQLITE_AGGREGATION_FUNC = { + "Sum": "SUM(value)", + "Average": "SUM(value)", # we need to calculate the avg manually as we have also a table with aggregated data + "Minimum": "MIN(value)", + "Maximum": "MAX(value)", + "SampleCount": "Sum(count)", +} + +STAT_TO_SQLITE_COL_NAME_HELPER = { + "Sum": "sum", + "Average": "sum", + "Minimum": "min", + "Maximum": "max", + "SampleCount": "sample_count", +} + + +class CloudwatchDatabase: + DB_NAME = "metrics.db" + CLOUDWATCH_DATA_ROOT: str = os.path.join(config.dirs.data, "cloudwatch") + METRICS_DB: str = os.path.join(CLOUDWATCH_DATA_ROOT, DB_NAME) + METRICS_DB_READ_ONLY: str = f"file:{METRICS_DB}?mode=ro" + TABLE_SINGLE_METRICS = "SINGLE_METRICS" + TABLE_AGGREGATED_METRICS = "AGGREGATED_METRICS" + DATABASE_LOCK: threading.RLock + + def __init__(self): + self.DATABASE_LOCK = threading.RLock() + if os.path.exists(self.METRICS_DB): + LOG.debug("database for metrics already exists (%s)", self.METRICS_DB) + return + + mkdir(self.CLOUDWATCH_DATA_ROOT) + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB) as conn: + cur = conn.cursor() + common_columns = """ + "id" INTEGER, + "account_id" TEXT, + "region" TEXT, + "metric_name" TEXT, + "namespace" TEXT, + "timestamp" NUMERIC, + "dimensions" TEXT, + "unit" TEXT, + "storage_resolution" INTEGER + """ + cur.execute( + f""" + CREATE TABLE "{self.TABLE_SINGLE_METRICS}" ( + {common_columns}, + "value" NUMERIC, + PRIMARY KEY("id") + ); + """ + ) + + cur.execute( + f""" + CREATE TABLE "{self.TABLE_AGGREGATED_METRICS}" ( + {common_columns}, + "sample_count" NUMERIC, + "sum" NUMERIC, + "min" NUMERIC, + "max" NUMERIC, + PRIMARY KEY("id") + ); + """ + ) + # create indexes + cur.executescript( + """ + CREATE INDEX idx_single_metrics_comp ON SINGLE_METRICS (metric_name, namespace); + CREATE INDEX idx_aggregated_metrics_comp ON AGGREGATED_METRICS (metric_name, namespace); + """ + ) + conn.commit() + + def add_metric_data( + self, account_id: str, region: str, namespace: str, metric_data: MetricData + ): + def _get_current_unix_timestamp_utc(): + now = datetime.utcnow().replace(tzinfo=timezone.utc) + return int(now.timestamp()) + + for metric in metric_data: + unix_timestamp = ( + self._convert_timestamp_to_unix(metric.get("Timestamp")) + if metric.get("Timestamp") + else _get_current_unix_timestamp_utc() + ) + + inserts = [] + if metric.get("Value") is not None: + inserts.append({"Value": metric.get("Value"), "TimesToInsert": 1}) + elif metric.get("Values"): + counts = metric.get("Counts", [1] * len(metric.get("Values"))) + inserts = [ + {"Value": value, "TimesToInsert": int(counts[indexValue])} + for indexValue, value in enumerate(metric.get("Values")) + ] + all_data = [] + for insert in inserts: + times_to_insert = insert.get("TimesToInsert") + + data = ( + account_id, + region, + metric.get("MetricName"), + namespace, + unix_timestamp, + self._get_ordered_dimensions_with_separator(metric.get("Dimensions")), + metric.get("Unit"), + metric.get("StorageResolution"), + insert.get("Value"), + ) + all_data.extend([data] * times_to_insert) + + if all_data: + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB) as conn: + cur = conn.cursor() + query = f"INSERT INTO {self.TABLE_SINGLE_METRICS} (account_id, region, metric_name, namespace, timestamp, dimensions, unit, storage_resolution, value) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" + cur.executemany(query, all_data) + conn.commit() + + if statistic_values := metric.get("StatisticValues"): + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB) as conn: + cur = conn.cursor() + cur.execute( + f"""INSERT INTO {self.TABLE_AGGREGATED_METRICS} + ("account_id", "region", "metric_name", "namespace", "timestamp", "dimensions", "unit", "storage_resolution", "sample_count", "sum", "min", "max") + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + ( + account_id, + region, + metric.get("MetricName"), + namespace, + unix_timestamp, + self._get_ordered_dimensions_with_separator(metric.get("Dimensions")), + metric.get("Unit"), + metric.get("StorageResolution"), + statistic_values.get("SampleCount"), + statistic_values.get("Sum"), + statistic_values.get("Minimum"), + statistic_values.get("Maximum"), + ), + ) + + conn.commit() + + def get_units_for_metric_data_stat( + self, + account_id: str, + region: str, + start_time: datetime, + end_time: datetime, + metric_name: str, + namespace: str, + ): + # prepare SQL query + start_time_unix = self._convert_timestamp_to_unix(start_time) + end_time_unix = self._convert_timestamp_to_unix(end_time) + + data = ( + account_id, + region, + namespace, + metric_name, + start_time_unix, + end_time_unix, + ) + + sql_query = f""" + SELECT GROUP_CONCAT(unit) AS unit_values + FROM( + SELECT + DISTINCT COALESCE(unit, 'NULL_VALUE') AS unit + FROM ( + SELECT + account_id, region, metric_name, namespace, timestamp, unit + FROM {self.TABLE_SINGLE_METRICS} + UNION ALL + SELECT + account_id, region, metric_name, namespace, timestamp, unit + FROM {self.TABLE_AGGREGATED_METRICS} + ) AS combined + WHERE account_id = ? AND region = ? + AND namespace = ? AND metric_name = ? + AND timestamp >= ? AND timestamp < ? + ) AS subquery + """ + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB_READ_ONLY, uri=True) as conn: + cur = conn.cursor() + cur.execute( + sql_query, + data, + ) + result_row = cur.fetchone() + return result_row[0].split(",") if result_row[0] else ["NULL_VALUE"] + + def get_metric_data_stat( + self, + account_id: str, + region: str, + query: MetricDataQuery, + start_time: datetime, + end_time: datetime, + scan_by: str, + ) -> Dict[str, List]: + metric_stat = query.get("MetricStat") + metric = metric_stat.get("Metric") + period = metric_stat.get("Period") + stat = metric_stat.get("Stat") + dimensions = metric.get("Dimensions", []) + unit = metric_stat.get("Unit") + + # prepare SQL query + start_time_unix = self._convert_timestamp_to_unix(start_time) + end_time_unix = self._convert_timestamp_to_unix(end_time) + + data = ( + account_id, + region, + metric.get("Namespace"), + metric.get("MetricName"), + ) + + dimension_filter = "AND dimensions is null " if not dimensions else "AND dimensions LIKE ? " + if dimensions: + data = data + ( + self._get_ordered_dimensions_with_separator(dimensions, for_search=True), + ) + + unit_filter = "" + if unit: + if unit == "NULL_VALUE": + unit_filter = "AND unit IS NULL" + else: + unit_filter = "AND unit = ? " + data += (unit,) + + sql_query = f""" + SELECT + {STAT_TO_SQLITE_AGGREGATION_FUNC[stat]}, + SUM(count) + FROM ( + SELECT + value, 1 as count, + account_id, region, metric_name, namespace, timestamp, dimensions, unit, storage_resolution + FROM {self.TABLE_SINGLE_METRICS} + UNION ALL + SELECT + {STAT_TO_SQLITE_COL_NAME_HELPER[stat]} as value, sample_count as count, + account_id, region, metric_name, namespace, timestamp, dimensions, unit, storage_resolution + FROM {self.TABLE_AGGREGATED_METRICS} + ) AS combined + WHERE account_id = ? AND region = ? + AND namespace = ? AND metric_name = ? + {dimension_filter} + {unit_filter} + AND timestamp >= ? AND timestamp < ? + ORDER BY timestamp ASC + """ + + timestamps = [] + values = [] + query_params = [] + + # Prepare all the query parameters + while start_time_unix < end_time_unix: + next_start_time = start_time_unix + period + query_params.append(data + (start_time_unix, next_start_time)) + start_time_unix = next_start_time + + all_results = [] + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB_READ_ONLY, uri=True) as conn: + cur = conn.cursor() + batch_size = 500 + for i in range(0, len(query_params), batch_size): + batch = query_params[i : i + batch_size] + cur.execute( + f""" + SELECT * FROM ( + {" UNION ALL ".join(["SELECT * FROM (" + sql_query + ")"] * len(batch))} + ) + """, + sum(batch, ()), # flatten the list of tuples in batch into a single tuple + ) + all_results.extend(cur.fetchall()) + + # Process results outside the lock + for i, result_row in enumerate(all_results): + if result_row[1]: + calculated_result = ( + result_row[0] / result_row[1] if stat == "Average" else result_row[0] + ) + timestamps.append(query_params[i][-2]) # start_time_unix + values.append(calculated_result) + + # The while loop while always give us the timestamps in ascending order as we start with the start_time + # and increase it by the period until we reach the end_time + # If we want the timestamps in descending order we need to reverse the list + if scan_by is None or scan_by == ScanBy.TimestampDescending: + timestamps = timestamps[::-1] + values = values[::-1] + + return { + "timestamps": timestamps, + "values": values, + } + + def list_metrics( + self, + account_id: str, + region: str, + namespace: str, + metric_name: str, + dimensions: list[dict[str, str]], + ) -> dict: + data = (account_id, region) + + namespace_filter = "" + if namespace: + namespace_filter = " AND namespace = ?" + data = data + (namespace,) + + metric_name_filter = "" + if metric_name: + metric_name_filter = " AND metric_name = ?" + data = data + (metric_name,) + + dimension_filter = "" if not dimensions else " AND dimensions LIKE ? " + if dimensions: + data = data + ( + self._get_ordered_dimensions_with_separator(dimensions, for_search=True), + ) + + query = f""" + SELECT DISTINCT metric_name, namespace, dimensions + FROM ( + SELECT metric_name, namespace, dimensions, account_id, region, timestamp + FROM SINGLE_METRICS + UNION + SELECT metric_name, namespace, dimensions, account_id, region, timestamp + FROM AGGREGATED_METRICS + ) AS combined + WHERE account_id = ? AND region = ? + {namespace_filter} + {metric_name_filter} + {dimension_filter} + ORDER BY timestamp DESC + """ + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB_READ_ONLY, uri=True) as conn: + cur = conn.cursor() + + cur.execute( + query, + data, + ) + metrics_result = [ + { + "metric_name": r[0], + "namespace": r[1], + "dimensions": self._restore_dimensions_from_string(r[2]), + } + for r in cur.fetchall() + ] + + return {"metrics": metrics_result} + + def clear_tables(self): + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB) as conn: + cur = conn.cursor() + cur.execute(f"DELETE FROM {self.TABLE_SINGLE_METRICS}") + cur.execute(f"DELETE FROM {self.TABLE_AGGREGATED_METRICS}") + conn.commit() + cur.execute("VACUUM") + conn.commit() + + def _get_ordered_dimensions_with_separator(self, dims: Optional[List[Dict]], for_search=False): + """ + Returns a string with the dimensions in the format "Name=Value\tName=Value\tName=Value" in order to store the metric + with the dimensions in a single column in the database + + :param dims: List of dimensions in the format [{"Name": "name", "Value": "value"}, ...] + :param for_search: If True, the dimensions will be formatted in a way that can be used in a LIKE query to search. Default is False. Example: " %{Name}={Value}% " + :return: String with the dimensions in the format "Name=Value\tName=Value\tName=Value" + """ + if not dims: + return None + dims.sort(key=lambda d: d["Name"]) + dimensions = "" + if not for_search: + for d in dims: + dimensions += f"{d['Name']}={d['Value']}\t" # aws does not allow ascii control characters, we can use it a sa separator + else: + for d in dims: + dimensions += f"%{d.get('Name')}={d.get('Value', '')}%" + + return dimensions + + def _restore_dimensions_from_string(self, dimensions: str): + if not dimensions: + return None + dims = [] + for d in dimensions.split("\t"): + if not d: + continue + name, value = d.split("=") + dims.append({"Name": name, "Value": value}) + + return dims + + def _convert_timestamp_to_unix( + self, timestamp: datetime + ): # TODO verify if this is the standard format, might need to convert + return int(timestamp.timestamp()) + + def get_all_metric_data(self): + with self.DATABASE_LOCK, sqlite3.connect(self.METRICS_DB_READ_ONLY, uri=True) as conn: + cur = conn.cursor() + """ shape for each data entry: + { + "ns": r.namespace, + "n": r.name, + "v": r.value, + "t": r.timestamp, + "d": [{"n": d.name, "v": d.value} for d in r.dimensions], + "account": account-id, # new for v2 + "region": region_name, # new for v2 + } + """ + query = f"SELECT namespace, metric_name, value, timestamp, dimensions, account_id, region from {self.TABLE_SINGLE_METRICS}" + cur.execute(query) + metrics_result = [ + { + "ns": r[0], + "n": r[1], + "v": r[2], + "t": r[3], + "d": r[4], + "account": r[5], + "region": r[6], + } + for r in cur.fetchall() + ] + # TODO add aggregated metrics (was not handled by v1 either) + return metrics_result diff --git a/localstack-core/localstack/services/cloudwatch/models.py b/localstack-core/localstack/services/cloudwatch/models.py new file mode 100644 index 0000000000000..a1246569f4f97 --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/models.py @@ -0,0 +1,109 @@ +import datetime +from datetime import timezone +from typing import Dict, List + +from localstack.aws.api.cloudwatch import CompositeAlarm, DashboardBody, MetricAlarm, StateValue +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossRegionAttribute, + LocalAttribute, +) +from localstack.utils.aws import arns +from localstack.utils.tagging import TaggingService + + +class LocalStackMetricAlarm: + region: str + account_id: str + alarm: MetricAlarm + + def __init__(self, account_id: str, region: str, alarm: MetricAlarm): + self.account_id = account_id + self.region = region + self.alarm = alarm + self.set_default_attributes() + + def set_default_attributes(self): + current_time = datetime.datetime.now(timezone.utc) + self.alarm["AlarmArn"] = arns.cloudwatch_alarm_arn( + self.alarm["AlarmName"], account_id=self.account_id, region_name=self.region + ) + self.alarm["AlarmConfigurationUpdatedTimestamp"] = current_time + self.alarm.setdefault("ActionsEnabled", True) + self.alarm.setdefault("OKActions", []) + self.alarm.setdefault("AlarmActions", []) + self.alarm.setdefault("InsufficientDataActions", []) + self.alarm["StateValue"] = StateValue.INSUFFICIENT_DATA + self.alarm["StateReason"] = "Unchecked: Initial alarm creation" + self.alarm["StateUpdatedTimestamp"] = current_time + self.alarm.setdefault("Dimensions", []) + self.alarm["StateTransitionedTimestamp"] = current_time + + +class LocalStackCompositeAlarm: + region: str + account_id: str + alarm: CompositeAlarm + + def __init__(self, account_id: str, region: str, alarm: CompositeAlarm): + self.account_id = account_id + self.region = region + self.alarm = alarm + self.set_default_attributes() + + def set_default_attributes(self): + current_time = datetime.datetime.now(timezone.utc) + self.alarm["AlarmArn"] = arns.cloudwatch_alarm_arn( + self.alarm["AlarmName"], account_id=self.account_id, region_name=self.region + ) + self.alarm["AlarmConfigurationUpdatedTimestamp"] = current_time + self.alarm.setdefault("ActionsEnabled", True) + self.alarm.setdefault("OKActions", []) + self.alarm.setdefault("AlarmActions", []) + self.alarm.setdefault("InsufficientDataActions", []) + self.alarm["StateValue"] = StateValue.INSUFFICIENT_DATA + self.alarm["StateReason"] = "Unchecked: Initial alarm creation" + self.alarm["StateUpdatedTimestamp"] = current_time + self.alarm["StateTransitionedTimestamp"] = current_time + + +class LocalStackDashboard: + region: str + account_id: str + dashboard_name: str + dashboard_arn: str + dashboard_body: DashboardBody + + def __init__( + self, account_id: str, region: str, dashboard_name: str, dashboard_body: DashboardBody + ): + self.account_id = account_id + self.region = region + self.dashboard_name = dashboard_name + self.dashboard_arn = arns.cloudwatch_dashboard_arn( + self.dashboard_name, account_id=self.account_id, region_name=self.region + ) + self.dashboard_body = dashboard_body + self.last_modified = datetime.datetime.now() + self.size = 225 # TODO: calculate size + + +LocalStackAlarm = LocalStackMetricAlarm | LocalStackCompositeAlarm + + +class CloudWatchStore(BaseStore): + # maps resource ARN to tags + TAGS: TaggingService = CrossRegionAttribute(default=TaggingService) + + # maps resource ARN to alarms + alarms: Dict[str, LocalStackAlarm] = LocalAttribute(default=dict) + + # Contains all the Alarm Histories. Per documentation, an alarm history is retained even if the alarm is deleted, + # making it necessary to save this at store level + histories: List[Dict] = LocalAttribute(default=list) + + dashboards: Dict[str, LocalStackDashboard] = LocalAttribute(default=dict) + + +cloudwatch_stores = AccountRegionBundle("cloudwatch", CloudWatchStore) diff --git a/localstack-core/localstack/services/cloudwatch/provider.py b/localstack-core/localstack/services/cloudwatch/provider.py new file mode 100644 index 0000000000000..42e4b5fe94e58 --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/provider.py @@ -0,0 +1,530 @@ +import json +import logging +import uuid +from typing import Any, Optional +from xml.sax.saxutils import escape + +from moto.cloudwatch import cloudwatch_backends +from moto.cloudwatch.models import CloudWatchBackend, FakeAlarm, MetricDatum + +from localstack.aws.accounts import get_account_id_from_access_key_id +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.cloudwatch import ( + AlarmNames, + AmazonResourceName, + CloudwatchApi, + DescribeAlarmsInput, + DescribeAlarmsOutput, + GetMetricDataInput, + GetMetricDataOutput, + GetMetricStatisticsInput, + GetMetricStatisticsOutput, + ListTagsForResourceOutput, + PutCompositeAlarmInput, + PutMetricAlarmInput, + StateValue, + TagKeyList, + TagList, + TagResourceOutput, + UntagResourceOutput, +) +from localstack.aws.connect import connect_to +from localstack.constants import DEFAULT_AWS_ACCOUNT_ID +from localstack.http import Request +from localstack.services import moto +from localstack.services.cloudwatch.alarm_scheduler import AlarmScheduler +from localstack.services.edge import ROUTER +from localstack.services.plugins import SERVICE_PLUGINS, ServiceLifecycleHook +from localstack.utils.aws import arns +from localstack.utils.aws.arns import extract_account_id_from_arn, lambda_function_name +from localstack.utils.aws.request_context import ( + extract_access_key_id_from_auth_header, + extract_region_from_auth_header, +) +from localstack.utils.patch import patch +from localstack.utils.strings import camel_to_snake_case +from localstack.utils.sync import poll_condition +from localstack.utils.tagging import TaggingService +from localstack.utils.threads import start_worker_thread + +PATH_GET_RAW_METRICS = "/_aws/cloudwatch/metrics/raw" +DEPRECATED_PATH_GET_RAW_METRICS = "/cloudwatch/metrics/raw" +MOTO_INITIAL_UNCHECKED_REASON = "Unchecked: Initial alarm creation" + +LOG = logging.getLogger(__name__) + + +@patch(target=FakeAlarm.update_state) +def update_state(target, self, reason, reason_data, state_value): + if reason_data is None: + reason_data = "" + if self.state_reason == MOTO_INITIAL_UNCHECKED_REASON: + old_state = StateValue.INSUFFICIENT_DATA + else: + old_state = self.state_value + + old_state_reason = self.state_reason + old_state_update_timestamp = self.state_updated_timestamp + target(self, reason, reason_data, state_value) + + # check the state and trigger required actions + if not self.actions_enabled or old_state == self.state_value: + return + if self.state_value == "OK": + actions = self.ok_actions + elif self.state_value == "ALARM": + actions = self.alarm_actions + else: + actions = self.insufficient_data_actions + for action in actions: + data = arns.parse_arn(action) + if data["service"] == "sns": + service = connect_to(region_name=data["region"], aws_access_key_id=data["account"]).sns + subject = f"""{self.state_value}: "{self.name}" in {self.region_name}""" + message = create_message_response_update_state_sns(self, old_state) + service.publish(TopicArn=action, Subject=subject, Message=message) + elif data["service"] == "lambda": + service = connect_to( + region_name=data["region"], aws_access_key_id=data["account"] + ).lambda_ + message = create_message_response_update_state_lambda( + self, old_state, old_state_reason, old_state_update_timestamp + ) + service.invoke(FunctionName=lambda_function_name(action), Payload=message) + else: + # TODO: support other actions + LOG.warning( + "Action for service %s not implemented, action '%s' will not be triggered.", + data["service"], + action, + ) + + +@patch(target=CloudWatchBackend.put_metric_alarm) +def put_metric_alarm( + target, + self, + name: str, + namespace: str, + metric_name: str, + comparison_operator: str, + evaluation_periods: int, + period: int, + threshold: float, + statistic: str, + description: str, + dimensions: list[dict[str, str]], + alarm_actions: list[str], + metric_data_queries: Optional[list[Any]] = None, + datapoints_to_alarm: Optional[int] = None, + extended_statistic: Optional[str] = None, + ok_actions: Optional[list[str]] = None, + insufficient_data_actions: Optional[list[str]] = None, + unit: Optional[str] = None, + actions_enabled: bool = True, + treat_missing_data: Optional[str] = None, + evaluate_low_sample_count_percentile: Optional[str] = None, + threshold_metric_id: Optional[str] = None, + rule: Optional[str] = None, + tags: Optional[list[dict[str, str]]] = None, +) -> FakeAlarm: + if description: + description = escape(description) + return target( + self, + name, + namespace, + metric_name, + comparison_operator, + evaluation_periods, + period, + threshold, + statistic, + description, + dimensions, + alarm_actions, + metric_data_queries, + datapoints_to_alarm, + extended_statistic, + ok_actions, + insufficient_data_actions, + unit, + actions_enabled, + treat_missing_data, + evaluate_low_sample_count_percentile, + threshold_metric_id, + rule, + tags, + ) + + +def create_metric_data_query_from_alarm(alarm: FakeAlarm): + # TODO may need to be adapted for other use cases + # verified return value with a snapshot test + return [ + { + "id": str(uuid.uuid4()), + "metricStat": { + "metric": { + "namespace": alarm.namespace, + "name": alarm.metric_name, + "dimensions": alarm.dimensions or {}, + }, + "period": int(alarm.period), + "stat": alarm.statistic, + }, + "returnData": True, + } + ] + + +def create_message_response_update_state_lambda( + alarm: FakeAlarm, old_state, old_state_reason, old_state_timestamp +): + response = { + "accountId": extract_account_id_from_arn(alarm.alarm_arn), + "alarmArn": alarm.alarm_arn, + "alarmData": { + "alarmName": alarm.name, + "state": { + "value": alarm.state_value, + "reason": alarm.state_reason, + "timestamp": alarm.state_updated_timestamp, + }, + "previousState": { + "value": old_state, + "reason": old_state_reason, + "timestamp": old_state_timestamp, + }, + "configuration": { + "description": alarm.description or "", + "metrics": alarm.metric_data_queries + or create_metric_data_query_from_alarm( + alarm + ), # TODO: add test with metric_data_queries + }, + }, + "time": alarm.state_updated_timestamp, + "region": alarm.region_name, + "source": "aws.cloudwatch", + } + return json.dumps(response) + + +def create_message_response_update_state_sns(alarm, old_state): + response = { + "AWSAccountId": extract_account_id_from_arn(alarm.alarm_arn), + "OldStateValue": old_state, + "AlarmName": alarm.name, + "AlarmDescription": alarm.description or "", + "AlarmConfigurationUpdatedTimestamp": alarm.configuration_updated_timestamp, + "NewStateValue": alarm.state_value, + "NewStateReason": alarm.state_reason, + "StateChangeTime": alarm.state_updated_timestamp, + # the long-name for 'region' should be used - as we don't have it, we use the short name + # which needs to be slightly changed to make snapshot tests work + "Region": alarm.region_name.replace("-", " ").capitalize(), + "AlarmArn": alarm.alarm_arn, + "OKActions": alarm.ok_actions or [], + "AlarmActions": alarm.alarm_actions or [], + "InsufficientDataActions": alarm.insufficient_data_actions or [], + } + + # collect trigger details + details = { + "MetricName": alarm.metric_name or "", + "Namespace": alarm.namespace or "", + "Unit": alarm.unit or None, # testing with AWS revealed this currently returns None + "Period": int(alarm.period) if alarm.period else 0, + "EvaluationPeriods": int(alarm.evaluation_periods) if alarm.evaluation_periods else 0, + "ComparisonOperator": alarm.comparison_operator or "", + "Threshold": float(alarm.threshold) if alarm.threshold else 0.0, + "TreatMissingData": alarm.treat_missing_data or "", + "EvaluateLowSampleCountPercentile": alarm.evaluate_low_sample_count_percentile or "", + } + + # Dimensions not serializable + dimensions = [] + if alarm.dimensions: + for d in alarm.dimensions: + dimensions.append({"value": d.value, "name": d.name}) + + details["Dimensions"] = dimensions or "" + + if alarm.statistic: + details["StatisticType"] = "Statistic" + details["Statistic"] = camel_to_snake_case(alarm.statistic).upper() # AWS returns uppercase + elif alarm.extended_statistic: + details["StatisticType"] = "ExtendedStatistic" + details["ExtendedStatistic"] = alarm.extended_statistic + + response["Trigger"] = details + + return json.dumps(response) + + +class ValidationError(CommonServiceException): + def __init__(self, message: str): + super().__init__("ValidationError", message, 400, True) + + +def _set_alarm_actions(context, alarm_names, enabled): + backend = cloudwatch_backends[context.account_id][context.region] + for name in alarm_names: + alarm = backend.alarms.get(name) + if alarm: + alarm.actions_enabled = enabled + + +def _cleanup_describe_output(alarm): + if "Metrics" in alarm and len(alarm["Metrics"]) == 0: + alarm.pop("Metrics") + reason_data = alarm.get("StateReasonData") + if reason_data is not None and reason_data in ("{}", ""): + alarm.pop("StateReasonData") + if ( + alarm.get("StateReason", "") == MOTO_INITIAL_UNCHECKED_REASON + and alarm.get("StateValue") != StateValue.INSUFFICIENT_DATA + ): + alarm["StateValue"] = StateValue.INSUFFICIENT_DATA + + +class CloudwatchProvider(CloudwatchApi, ServiceLifecycleHook): + """ + Cloudwatch provider. + + LIMITATIONS: + - no alarm rule evaluation + """ + + def __init__(self): + self.tags = TaggingService() + self.alarm_scheduler = None + + def on_after_init(self): + ROUTER.add(PATH_GET_RAW_METRICS, self.get_raw_metrics) + self.start_alarm_scheduler() + + def on_before_state_reset(self): + self.shutdown_alarm_scheduler() + + def on_after_state_reset(self): + self.start_alarm_scheduler() + + def on_before_state_load(self): + self.shutdown_alarm_scheduler() + + def on_after_state_load(self): + self.start_alarm_scheduler() + + def restart_alarms(*args): + poll_condition(lambda: SERVICE_PLUGINS.is_running("cloudwatch")) + self.alarm_scheduler.restart_existing_alarms() + + start_worker_thread(restart_alarms) + + def on_before_stop(self): + self.shutdown_alarm_scheduler() + + def start_alarm_scheduler(self): + if not self.alarm_scheduler: + LOG.debug("starting cloudwatch scheduler") + self.alarm_scheduler = AlarmScheduler() + + def shutdown_alarm_scheduler(self): + LOG.debug("stopping cloudwatch scheduler") + self.alarm_scheduler.shutdown_scheduler() + self.alarm_scheduler = None + + def delete_alarms(self, context: RequestContext, alarm_names: AlarmNames, **kwargs) -> None: + moto.call_moto(context) + for alarm_name in alarm_names: + arn = arns.cloudwatch_alarm_arn(alarm_name, context.account_id, context.region) + self.alarm_scheduler.delete_scheduler_for_alarm(arn) + + def get_raw_metrics(self, request: Request): + region = extract_region_from_auth_header(request.headers) + account_id = ( + get_account_id_from_access_key_id( + extract_access_key_id_from_auth_header(request.headers) + ) + or DEFAULT_AWS_ACCOUNT_ID + ) + backend = cloudwatch_backends[account_id][region] + if backend: + result = [m for m in backend.metric_data if isinstance(m, MetricDatum)] + # TODO handle aggregated metrics as well (MetricAggregatedDatum) + else: + result = [] + + result = [ + { + "ns": r.namespace, + "n": r.name, + "v": r.value, + "t": r.timestamp, + "d": [{"n": d.name, "v": d.value} for d in r.dimensions], + "account": account_id, + "region": region, + } + for r in result + ] + return {"metrics": result} + + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceOutput: + tags = self.tags.list_tags_for_resource(resource_arn) + return ListTagsForResourceOutput(Tags=tags.get("Tags", [])) + + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceOutput: + self.tags.untag_resource(resource_arn, tag_keys) + return UntagResourceOutput() + + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> TagResourceOutput: + self.tags.tag_resource(resource_arn, tags) + return TagResourceOutput() + + @handler("GetMetricData", expand=False) + def get_metric_data( + self, context: RequestContext, request: GetMetricDataInput + ) -> GetMetricDataOutput: + result = moto.call_moto(context) + # moto currently uses hardcoded label metric_name + stat + # parity tests shows that default is MetricStat, but there might also be a label explicitly set + metric_data_queries = request["MetricDataQueries"] + for i in range(0, len(metric_data_queries)): + metric_query = metric_data_queries[i] + label = metric_query.get("Label") or metric_query.get("MetricStat", {}).get( + "Metric", {} + ).get("MetricName") + if label: + result["MetricDataResults"][i]["Label"] = label + if "Messages" not in result: + # parity tests reveals that an empty messages list is added + result["Messages"] = [] + return result + + @handler("PutMetricAlarm", expand=False) + def put_metric_alarm( + self, + context: RequestContext, + request: PutMetricAlarmInput, + ) -> None: + # missing will be the default, when not set (but it will not explicitly be set) + if request.get("TreatMissingData", "missing") not in [ + "breaching", + "notBreaching", + "ignore", + "missing", + ]: + raise ValidationError( + f"The value {request['TreatMissingData']} is not supported for TreatMissingData parameter. Supported values are [breaching, notBreaching, ignore, missing]." + ) + # do some sanity checks: + if request.get("Period"): + # Valid values are 10, 30, and any multiple of 60. + value = request.get("Period") + if value not in (10, 30): + if value % 60 != 0: + raise ValidationError("Period must be 10, 30 or a multiple of 60") + if request.get("Statistic"): + if request.get("Statistic") not in [ + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum", + ]: + raise ValidationError( + f"Value '{request.get('Statistic')}' at 'statistic' failed to satisfy constraint: Member must satisfy enum value set: [Maximum, SampleCount, Sum, Minimum, Average]" + ) + + moto.call_moto(context) + + name = request.get("AlarmName") + arn = arns.cloudwatch_alarm_arn(name, context.account_id, context.region) + self.tags.tag_resource(arn, request.get("Tags")) + self.alarm_scheduler.schedule_metric_alarm(arn) + + @handler("PutCompositeAlarm", expand=False) + def put_composite_alarm( + self, + context: RequestContext, + request: PutCompositeAlarmInput, + ) -> None: + backend = cloudwatch_backends[context.account_id][context.region] + backend.put_metric_alarm( + name=request.get("AlarmName"), + namespace=None, + metric_name=None, + metric_data_queries=None, + comparison_operator=None, + evaluation_periods=None, + datapoints_to_alarm=None, + period=None, + threshold=None, + statistic=None, + extended_statistic=None, + description=request.get("AlarmDescription"), + dimensions=[], + alarm_actions=request.get("AlarmActions", []), + ok_actions=request.get("OKActions", []), + insufficient_data_actions=request.get("InsufficientDataActions", []), + unit=None, + actions_enabled=request.get("ActionsEnabled"), + treat_missing_data=None, + evaluate_low_sample_count_percentile=None, + threshold_metric_id=None, + rule=request.get("AlarmRule"), + tags=request.get("Tags", []), + ) + LOG.warning( + "Composite Alarms configuration is not yet supported, alarm state will not be evaluated" + ) + + @handler("EnableAlarmActions") + def enable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + _set_alarm_actions(context, alarm_names, enabled=True) + + @handler("DisableAlarmActions") + def disable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + _set_alarm_actions(context, alarm_names, enabled=False) + + @handler("DescribeAlarms", expand=False) + def describe_alarms( + self, context: RequestContext, request: DescribeAlarmsInput + ) -> DescribeAlarmsOutput: + response = moto.call_moto(context) + + for c in response["CompositeAlarms"]: + _cleanup_describe_output(c) + for m in response["MetricAlarms"]: + _cleanup_describe_output(m) + + return response + + @handler("GetMetricStatistics", expand=False) + def get_metric_statistics( + self, context: RequestContext, request: GetMetricStatisticsInput + ) -> GetMetricStatisticsOutput: + response = moto.call_moto(context) + + # cleanup -> ExtendendStatics is not included in AWS response if it returned empty + for datapoint in response.get("Datapoints"): + if "ExtendedStatistics" in datapoint and not datapoint.get("ExtendedStatistics"): + datapoint.pop("ExtendedStatistics") + + return response diff --git a/localstack-core/localstack/services/cloudwatch/provider_v2.py b/localstack-core/localstack/services/cloudwatch/provider_v2.py new file mode 100644 index 0000000000000..88b700e8d562f --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/provider_v2.py @@ -0,0 +1,1109 @@ +import datetime +import json +import logging +import re +import threading +import uuid +from datetime import timezone +from typing import List + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.cloudwatch import ( + AccountId, + ActionPrefix, + AlarmName, + AlarmNamePrefix, + AlarmNames, + AlarmTypes, + AmazonResourceName, + CloudwatchApi, + DashboardBody, + DashboardName, + DashboardNamePrefix, + DashboardNames, + Datapoint, + DeleteDashboardsOutput, + DescribeAlarmHistoryOutput, + DescribeAlarmsForMetricOutput, + DescribeAlarmsOutput, + DimensionFilters, + Dimensions, + EntityMetricDataList, + ExtendedStatistic, + ExtendedStatistics, + GetDashboardOutput, + GetMetricDataMaxDatapoints, + GetMetricDataOutput, + GetMetricStatisticsOutput, + HistoryItemType, + IncludeLinkedAccounts, + InvalidParameterCombinationException, + InvalidParameterValueException, + LabelOptions, + ListDashboardsOutput, + ListMetricsOutput, + ListTagsForResourceOutput, + MaxRecords, + MetricData, + MetricDataQueries, + MetricDataQuery, + MetricDataResult, + MetricDataResultMessages, + MetricName, + MetricStat, + Namespace, + NextToken, + Period, + PutCompositeAlarmInput, + PutDashboardOutput, + PutMetricAlarmInput, + RecentlyActive, + ResourceNotFound, + ScanBy, + StandardUnit, + StateReason, + StateReasonData, + StateValue, + Statistic, + Statistics, + StrictEntityValidation, + TagKeyList, + TagList, + TagResourceOutput, + Timestamp, + UntagResourceOutput, +) +from localstack.aws.connect import connect_to +from localstack.http import Request +from localstack.services.cloudwatch.alarm_scheduler import AlarmScheduler +from localstack.services.cloudwatch.cloudwatch_database_helper import CloudwatchDatabase +from localstack.services.cloudwatch.models import ( + CloudWatchStore, + LocalStackAlarm, + LocalStackCompositeAlarm, + LocalStackDashboard, + LocalStackMetricAlarm, + cloudwatch_stores, +) +from localstack.services.edge import ROUTER +from localstack.services.plugins import SERVICE_PLUGINS, ServiceLifecycleHook +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws import arns +from localstack.utils.aws.arns import extract_account_id_from_arn, lambda_function_name +from localstack.utils.collections import PaginatedList +from localstack.utils.json import CustomEncoder as JSONEncoder +from localstack.utils.strings import camel_to_snake_case +from localstack.utils.sync import poll_condition +from localstack.utils.threads import start_worker_thread +from localstack.utils.time import timestamp_millis + +PATH_GET_RAW_METRICS = "/_aws/cloudwatch/metrics/raw" +MOTO_INITIAL_UNCHECKED_REASON = "Unchecked: Initial alarm creation" +LIST_METRICS_MAX_RESULTS = 500 +# If the values in these fields are not the same, their values are added when generating labels +LABEL_DIFFERENTIATORS = ["Stat", "Period"] +HISTORY_VERSION = "1.0" + +LOG = logging.getLogger(__name__) +_STORE_LOCK = threading.RLock() +AWS_MAX_DATAPOINTS_ACCEPTED: int = 1440 + + +class ValidationError(CommonServiceException): + # TODO: check this error against AWS (doesn't exist in the API) + def __init__(self, message: str): + super().__init__("ValidationError", message, 400, True) + + +class InvalidParameterCombination(CommonServiceException): + def __init__(self, message: str): + super().__init__("InvalidParameterCombination", message, 400, True) + + +def _validate_parameters_for_put_metric_data(metric_data: MetricData) -> None: + for index, metric_item in enumerate(metric_data): + indexplusone = index + 1 + if metric_item.get("Value") and metric_item.get("Values"): + raise InvalidParameterCombinationException( + f"The parameters MetricData.member.{indexplusone}.Value and MetricData.member.{indexplusone}.Values are mutually exclusive and you have specified both." + ) + + if metric_item.get("StatisticValues") and metric_item.get("Value"): + raise InvalidParameterCombinationException( + f"The parameters MetricData.member.{indexplusone}.Value and MetricData.member.{indexplusone}.StatisticValues are mutually exclusive and you have specified both." + ) + + if metric_item.get("Values") and metric_item.get("Counts"): + values = metric_item.get("Values") + counts = metric_item.get("Counts") + if len(values) != len(counts): + raise InvalidParameterValueException( + f"The parameters MetricData.member.{indexplusone}.Values and MetricData.member.{indexplusone}.Counts must be of the same size." + ) + + +class CloudwatchProvider(CloudwatchApi, ServiceLifecycleHook): + """ + Cloudwatch provider. + + LIMITATIONS: + - simplified composite alarm rule evaluation: + - only OR operator is supported + - only ALARM expression is supported + - only metric alarms can be included in the rule and they should be referenced by ARN only + """ + + def __init__(self): + self.alarm_scheduler: AlarmScheduler = None + self.store = None + self.cloudwatch_database = CloudwatchDatabase() + + @staticmethod + def get_store(account_id: str, region: str) -> CloudWatchStore: + return cloudwatch_stores[account_id][region] + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(cloudwatch_stores) + visitor.visit(AssetDirectory(self.service, CloudwatchDatabase.CLOUDWATCH_DATA_ROOT)) + + def on_after_init(self): + ROUTER.add(PATH_GET_RAW_METRICS, self.get_raw_metrics) + self.start_alarm_scheduler() + + def on_before_state_reset(self): + self.shutdown_alarm_scheduler() + self.cloudwatch_database.clear_tables() + + def on_after_state_reset(self): + self.start_alarm_scheduler() + + def on_before_state_load(self): + self.shutdown_alarm_scheduler() + + def on_after_state_load(self): + self.start_alarm_scheduler() + + def restart_alarms(*args): + poll_condition(lambda: SERVICE_PLUGINS.is_running("cloudwatch")) + self.alarm_scheduler.restart_existing_alarms() + + start_worker_thread(restart_alarms) + + def on_before_stop(self): + self.shutdown_alarm_scheduler() + + def start_alarm_scheduler(self): + if not self.alarm_scheduler: + LOG.debug("starting cloudwatch scheduler") + self.alarm_scheduler = AlarmScheduler() + + def shutdown_alarm_scheduler(self): + LOG.debug("stopping cloudwatch scheduler") + self.alarm_scheduler.shutdown_scheduler() + self.alarm_scheduler = None + + def delete_alarms(self, context: RequestContext, alarm_names: AlarmNames, **kwargs) -> None: + """ + Delete alarms. + """ + with _STORE_LOCK: + for alarm_name in alarm_names: + alarm_arn = arns.cloudwatch_alarm_arn( + alarm_name, account_id=context.account_id, region_name=context.region + ) # obtain alarm ARN from alarm name + self.alarm_scheduler.delete_scheduler_for_alarm(alarm_arn) + store = self.get_store(context.account_id, context.region) + store.alarms.pop(alarm_arn, None) + + def put_metric_data( + self, + context: RequestContext, + namespace: Namespace, + metric_data: MetricData = None, + entity_metric_data: EntityMetricDataList = None, + strict_entity_validation: StrictEntityValidation = None, + **kwargs, + ) -> None: + # TODO add support for entity_metric_data and strict_entity_validation + _validate_parameters_for_put_metric_data(metric_data) + + self.cloudwatch_database.add_metric_data( + context.account_id, context.region, namespace, metric_data + ) + + def get_metric_data( + self, + context: RequestContext, + metric_data_queries: MetricDataQueries, + start_time: Timestamp, + end_time: Timestamp, + next_token: NextToken = None, + scan_by: ScanBy = None, + max_datapoints: GetMetricDataMaxDatapoints = None, + label_options: LabelOptions = None, + **kwargs, + ) -> GetMetricDataOutput: + results: List[MetricDataResult] = [] + limit = max_datapoints or 100_800 + messages: MetricDataResultMessages = [] + nxt = None + label_additions = [] + + for diff in LABEL_DIFFERENTIATORS: + non_unique = [] + for query in metric_data_queries: + non_unique.append(query["MetricStat"][diff]) + if len(set(non_unique)) > 1: + label_additions.append(diff) + + for query in metric_data_queries: + query_result = self.cloudwatch_database.get_metric_data_stat( + account_id=context.account_id, + region=context.region, + query=query, + start_time=start_time, + end_time=end_time, + scan_by=scan_by, + ) + if query_result.get("messages"): + messages.extend(query_result.get("messages")) + + label = query.get("Label") or f"{query['MetricStat']['Metric']['MetricName']}" + # TODO: does this happen even if a label is set in the query? + for label_addition in label_additions: + label = f"{label} {query['MetricStat'][label_addition]}" + + timestamps = query_result.get("timestamps", {}) + values = query_result.get("values", {}) + + # Paginate + timestamp_value_dicts = [ + { + "Timestamp": timestamp, + "Value": value, + } + for timestamp, value in zip(timestamps, values, strict=False) + ] + + pagination = PaginatedList(timestamp_value_dicts) + timestamp_page, nxt = pagination.get_page( + lambda item: item.get("Timestamp"), + next_token=next_token, + page_size=limit, + ) + + timestamps = [item.get("Timestamp") for item in timestamp_page] + values = [item.get("Value") for item in timestamp_page] + + metric_data_result = { + "Id": query.get("Id"), + "Label": label, + "StatusCode": "Complete", + "Timestamps": timestamps, + "Values": values, + } + results.append(MetricDataResult(**metric_data_result)) + + return GetMetricDataOutput(MetricDataResults=results, NextToken=nxt, Messages=messages) + + def set_alarm_state( + self, + context: RequestContext, + alarm_name: AlarmName, + state_value: StateValue, + state_reason: StateReason, + state_reason_data: StateReasonData = None, + **kwargs, + ) -> None: + try: + if state_reason_data: + state_reason_data = json.loads(state_reason_data) + except ValueError: + raise InvalidParameterValueException( + "TODO: check right error message: Json was not correctly formatted" + ) + with _STORE_LOCK: + store = self.get_store(context.account_id, context.region) + alarm = store.alarms.get( + arns.cloudwatch_alarm_arn( + alarm_name, account_id=context.account_id, region_name=context.region + ) + ) + if not alarm: + raise ResourceNotFound() + + old_state = alarm.alarm["StateValue"] + if state_value not in ("OK", "ALARM", "INSUFFICIENT_DATA"): + raise ValidationError( + f"1 validation error detected: Value '{state_value}' at 'stateValue' failed to satisfy constraint: Member must satisfy enum value set: [INSUFFICIENT_DATA, ALARM, OK]" + ) + + old_state_reason = alarm.alarm["StateReason"] + old_state_update_timestamp = alarm.alarm["StateUpdatedTimestamp"] + + if old_state == state_value: + return + + alarm.alarm["StateTransitionedTimestamp"] = datetime.datetime.now(timezone.utc) + # update startDate (=last ALARM date) - should only update when a new alarm is triggered + # the date is only updated if we have a reason-data, which is set by an alarm + if state_reason_data: + state_reason_data["startDate"] = state_reason_data.get("queryDate") + + self._update_state( + context, + alarm, + state_value, + state_reason, + state_reason_data, + ) + + self._evaluate_composite_alarms(context, alarm) + + if not alarm.alarm["ActionsEnabled"]: + return + if state_value == "OK": + actions = alarm.alarm["OKActions"] + elif state_value == "ALARM": + actions = alarm.alarm["AlarmActions"] + else: + actions = alarm.alarm["InsufficientDataActions"] + for action in actions: + data = arns.parse_arn(action) + # test for sns - can this be done in a more generic way? + if data["service"] == "sns": + service = connect_to( + region_name=data["region"], aws_access_key_id=data["account"] + ).sns + subject = f"""{state_value}: "{alarm_name}" in {context.region}""" + message = create_message_response_update_state_sns(alarm, old_state) + service.publish(TopicArn=action, Subject=subject, Message=message) + elif data["service"] == "lambda": + service = connect_to( + region_name=data["region"], aws_access_key_id=data["account"] + ).lambda_ + message = create_message_response_update_state_lambda( + alarm, old_state, old_state_reason, old_state_update_timestamp + ) + service.invoke(FunctionName=lambda_function_name(action), Payload=message) + else: + # TODO: support other actions + LOG.warning( + "Action for service %s not implemented, action '%s' will not be triggered.", + data["service"], + action, + ) + + def get_raw_metrics(self, request: Request): + """this feature was introduced with https://github.com/localstack/localstack/pull/3535 + # in the meantime, it required a valid aws-header so that the account-id/region could be extracted + # with the new implementation, we want to return all data, but add the account-id/region as additional attributes + + # TODO endpoint should be refactored or deprecated at some point + # - result should be paginated + # - include aggregated metrics (but we would also need to change/adapt the shape of "metrics" that we return) + :returns: json {"metrics": [{"ns": "namespace", "n": "metric_name", "v": value, "t": timestamp, + "d": [],"account": account, "region": region}]} + """ + return {"metrics": self.cloudwatch_database.get_all_metric_data() or []} + + @handler("PutMetricAlarm", expand=False) + def put_metric_alarm(self, context: RequestContext, request: PutMetricAlarmInput) -> None: + # missing will be the default, when not set (but it will not explicitly be set) + if request.get("TreatMissingData", "missing") not in [ + "breaching", + "notBreaching", + "ignore", + "missing", + ]: + raise ValidationError( + f"The value {request['TreatMissingData']} is not supported for TreatMissingData parameter. Supported values are [breaching, notBreaching, ignore, missing]." + ) + # do some sanity checks: + if request.get("Period"): + # Valid values are 10, 30, and any multiple of 60. + value = request.get("Period") + if value not in (10, 30): + if value % 60 != 0: + raise ValidationError("Period must be 10, 30 or a multiple of 60") + if request.get("Statistic"): + if request.get("Statistic") not in [ + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum", + ]: + raise ValidationError( + f"Value '{request.get('Statistic')}' at 'statistic' failed to satisfy constraint: Member must satisfy enum value set: [Maximum, SampleCount, Sum, Minimum, Average]" + ) + + extended_statistic = request.get("ExtendedStatistic") + if extended_statistic and not extended_statistic.startswith("p"): + raise InvalidParameterValueException( + f"The value {extended_statistic} for parameter ExtendedStatistic is not supported." + ) + evaluate_low_sample_count_percentile = request.get("EvaluateLowSampleCountPercentile") + if evaluate_low_sample_count_percentile and evaluate_low_sample_count_percentile not in ( + "evaluate", + "ignore", + ): + raise ValidationError( + f"Option {evaluate_low_sample_count_percentile} is not supported. " + "Supported options for parameter EvaluateLowSampleCountPercentile are evaluate and ignore." + ) + with _STORE_LOCK: + store = self.get_store(context.account_id, context.region) + metric_alarm = LocalStackMetricAlarm(context.account_id, context.region, {**request}) + alarm_arn = metric_alarm.alarm["AlarmArn"] + store.alarms[alarm_arn] = metric_alarm + self.alarm_scheduler.schedule_metric_alarm(alarm_arn) + + @handler("PutCompositeAlarm", expand=False) + def put_composite_alarm(self, context: RequestContext, request: PutCompositeAlarmInput) -> None: + with _STORE_LOCK: + store = self.get_store(context.account_id, context.region) + composite_alarm = LocalStackCompositeAlarm( + context.account_id, context.region, {**request} + ) + + alarm_rule = composite_alarm.alarm["AlarmRule"] + rule_expression_validation_result = self._validate_alarm_rule_expression(alarm_rule) + [LOG.warning(w) for w in rule_expression_validation_result] + + alarm_arn = composite_alarm.alarm["AlarmArn"] + store.alarms[alarm_arn] = composite_alarm + + def describe_alarms( + self, + context: RequestContext, + alarm_names: AlarmNames = None, + alarm_name_prefix: AlarmNamePrefix = None, + alarm_types: AlarmTypes = None, + children_of_alarm_name: AlarmName = None, + parents_of_alarm_name: AlarmName = None, + state_value: StateValue = None, + action_prefix: ActionPrefix = None, + max_records: MaxRecords = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeAlarmsOutput: + store = self.get_store(context.account_id, context.region) + alarms = list(store.alarms.values()) + if action_prefix: + alarms = [a.alarm for a in alarms if a.alarm["AlarmAction"].startswith(action_prefix)] + elif alarm_name_prefix: + alarms = [a.alarm for a in alarms if a.alarm["AlarmName"].startswith(alarm_name_prefix)] + elif alarm_names: + alarms = [a.alarm for a in alarms if a.alarm["AlarmName"] in alarm_names] + elif state_value: + alarms = [a.alarm for a in alarms if a.alarm["StateValue"] == state_value] + else: + alarms = [a.alarm for a in list(store.alarms.values())] + + # TODO: Pagination + metric_alarms = [a for a in alarms if a.get("AlarmRule") is None] + composite_alarms = [a for a in alarms if a.get("AlarmRule") is not None] + return DescribeAlarmsOutput(CompositeAlarms=composite_alarms, MetricAlarms=metric_alarms) + + def describe_alarms_for_metric( + self, + context: RequestContext, + metric_name: MetricName, + namespace: Namespace, + statistic: Statistic = None, + extended_statistic: ExtendedStatistic = None, + dimensions: Dimensions = None, + period: Period = None, + unit: StandardUnit = None, + **kwargs, + ) -> DescribeAlarmsForMetricOutput: + store = self.get_store(context.account_id, context.region) + alarms = [ + a.alarm + for a in store.alarms.values() + if isinstance(a, LocalStackMetricAlarm) + and a.alarm.get("MetricName") == metric_name + and a.alarm.get("Namespace") == namespace + ] + + if statistic: + alarms = [a for a in alarms if a.get("Statistic") == statistic] + if dimensions: + alarms = [a for a in alarms if a.get("Dimensions") == dimensions] + if period: + alarms = [a for a in alarms if a.get("Period") == period] + if unit: + alarms = [a for a in alarms if a.get("Unit") == unit] + return DescribeAlarmsForMetricOutput(MetricAlarms=alarms) + + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceOutput: + store = self.get_store(context.account_id, context.region) + tags = store.TAGS.list_tags_for_resource(resource_arn) + return ListTagsForResourceOutput(Tags=tags.get("Tags", [])) + + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceOutput: + store = self.get_store(context.account_id, context.region) + store.TAGS.untag_resource(resource_arn, tag_keys) + return UntagResourceOutput() + + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> TagResourceOutput: + store = self.get_store(context.account_id, context.region) + store.TAGS.tag_resource(resource_arn, tags) + return TagResourceOutput() + + def put_dashboard( + self, + context: RequestContext, + dashboard_name: DashboardName, + dashboard_body: DashboardBody, + **kwargs, + ) -> PutDashboardOutput: + pattern = r"^[a-zA-Z0-9_-]+$" + if not re.match(pattern, dashboard_name): + raise InvalidParameterValueException( + "The value for field DashboardName contains invalid characters. " + "It can only contain alphanumerics, dash (-) and underscore (_).\n" + ) + + store = self.get_store(context.account_id, context.region) + store.dashboards[dashboard_name] = LocalStackDashboard( + context.account_id, context.region, dashboard_name, dashboard_body + ) + return PutDashboardOutput() + + def get_dashboard( + self, context: RequestContext, dashboard_name: DashboardName, **kwargs + ) -> GetDashboardOutput: + store = self.get_store(context.account_id, context.region) + dashboard = store.dashboards.get(dashboard_name) + if not dashboard: + raise InvalidParameterValueException(f"Dashboard {dashboard_name} does not exist.") + + return GetDashboardOutput( + DashboardName=dashboard_name, + DashboardBody=dashboard.dashboard_body, + DashboardArn=dashboard.dashboard_arn, + ) + + def delete_dashboards( + self, context: RequestContext, dashboard_names: DashboardNames, **kwargs + ) -> DeleteDashboardsOutput: + store = self.get_store(context.account_id, context.region) + for dashboard_name in dashboard_names: + store.dashboards.pop(dashboard_name, None) + return DeleteDashboardsOutput() + + def list_dashboards( + self, + context: RequestContext, + dashboard_name_prefix: DashboardNamePrefix = None, + next_token: NextToken = None, + **kwargs, + ) -> ListDashboardsOutput: + store = self.get_store(context.account_id, context.region) + dashboard_names = list(store.dashboards.keys()) + dashboard_names = [ + name for name in dashboard_names if name.startswith(dashboard_name_prefix or "") + ] + + entries = [ + { + "DashboardName": name, + "DashboardArn": store.dashboards[name].dashboard_arn, + "LastModified": store.dashboards[name].last_modified, + "Size": store.dashboards[name].size, + } + for name in dashboard_names + ] + return ListDashboardsOutput( + DashboardEntries=entries, + ) + + def list_metrics( + self, + context: RequestContext, + namespace: Namespace = None, + metric_name: MetricName = None, + dimensions: DimensionFilters = None, + next_token: NextToken = None, + recently_active: RecentlyActive = None, + include_linked_accounts: IncludeLinkedAccounts = None, + owning_account: AccountId = None, + **kwargs, + ) -> ListMetricsOutput: + result = self.cloudwatch_database.list_metrics( + context.account_id, + context.region, + namespace, + metric_name, + dimensions or [], + ) + + metrics = [ + { + "Namespace": metric.get("namespace"), + "MetricName": metric.get("metric_name"), + "Dimensions": metric.get("dimensions"), + } + for metric in result.get("metrics", []) + ] + aliases_list = PaginatedList(metrics) + page, nxt = aliases_list.get_page( + lambda metric: f"{metric.get('Namespace')}-{metric.get('MetricName')}-{metric.get('Dimensions')}", + next_token=next_token, + page_size=LIST_METRICS_MAX_RESULTS, + ) + return ListMetricsOutput(Metrics=page, NextToken=nxt) + + def get_metric_statistics( + self, + context: RequestContext, + namespace: Namespace, + metric_name: MetricName, + start_time: Timestamp, + end_time: Timestamp, + period: Period, + dimensions: Dimensions = None, + statistics: Statistics = None, + extended_statistics: ExtendedStatistics = None, + unit: StandardUnit = None, + **kwargs, + ) -> GetMetricStatisticsOutput: + start_time_unix = int(start_time.timestamp()) + end_time_unix = int(end_time.timestamp()) + + if not start_time_unix < end_time_unix: + raise InvalidParameterValueException( + "The parameter StartTime must be less than the parameter EndTime." + ) + + expected_datapoints = (end_time_unix - start_time_unix) / period + + if expected_datapoints > AWS_MAX_DATAPOINTS_ACCEPTED: + raise InvalidParameterCombination( + f"You have requested up to {int(expected_datapoints)} datapoints, which exceeds the limit of {AWS_MAX_DATAPOINTS_ACCEPTED}. " + f"You may reduce the datapoints requested by increasing Period, or decreasing the time range." + ) + + stat_datapoints = {} + + units = ( + [unit] + if unit + else self.cloudwatch_database.get_units_for_metric_data_stat( + account_id=context.account_id, + region=context.region, + start_time=start_time, + end_time=end_time, + metric_name=metric_name, + namespace=namespace, + ) + ) + + for stat in statistics: + for selected_unit in units: + query_result = self.cloudwatch_database.get_metric_data_stat( + account_id=context.account_id, + region=context.region, + start_time=start_time, + end_time=end_time, + scan_by="TimestampDescending", + query=MetricDataQuery( + MetricStat=MetricStat( + Metric={ + "MetricName": metric_name, + "Namespace": namespace, + "Dimensions": dimensions or [], + }, + Period=period, + Stat=stat, + Unit=selected_unit, + ) + ), + ) + + timestamps = query_result.get("timestamps", []) + values = query_result.get("values", []) + for i, timestamp in enumerate(timestamps): + stat_datapoints.setdefault(selected_unit, {}) + stat_datapoints[selected_unit].setdefault(timestamp, {}) + stat_datapoints[selected_unit][timestamp][stat] = values[i] + stat_datapoints[selected_unit][timestamp]["Unit"] = selected_unit + + datapoints: list[Datapoint] = [] + for selected_unit, results in stat_datapoints.items(): + for timestamp, stats in results.items(): + datapoints.append( + Datapoint( + Timestamp=timestamp, + SampleCount=stats.get("SampleCount"), + Average=stats.get("Average"), + Sum=stats.get("Sum"), + Minimum=stats.get("Minimum"), + Maximum=stats.get("Maximum"), + Unit="None" if selected_unit == "NULL_VALUE" else selected_unit, + ) + ) + + return GetMetricStatisticsOutput(Datapoints=datapoints, Label=metric_name) + + def _update_state( + self, + context: RequestContext, + alarm: LocalStackAlarm, + state_value: str, + state_reason: str, + state_reason_data: dict = None, + ): + old_state = alarm.alarm["StateValue"] + old_state_reason = alarm.alarm["StateReason"] + store = self.get_store(context.account_id, context.region) + current_time = datetime.datetime.now() + # version is not present in state reason data for composite alarm, hence the check + if state_reason_data and isinstance(alarm, LocalStackMetricAlarm): + state_reason_data["version"] = HISTORY_VERSION + history_data = { + "version": HISTORY_VERSION, + "oldState": {"stateValue": old_state, "stateReason": old_state_reason}, + "newState": { + "stateValue": state_value, + "stateReason": state_reason, + "stateReasonData": state_reason_data, + }, + } + store.histories.append( + { + "Timestamp": timestamp_millis(alarm.alarm["StateUpdatedTimestamp"]), + "HistoryItemType": HistoryItemType.StateUpdate, + "AlarmName": alarm.alarm["AlarmName"], + "HistoryData": json.dumps(history_data), + "HistorySummary": f"Alarm updated from {old_state} to {state_value}", + "AlarmType": "MetricAlarm" + if isinstance(alarm, LocalStackMetricAlarm) + else "CompositeAlarm", + } + ) + alarm.alarm["StateValue"] = state_value + alarm.alarm["StateReason"] = state_reason + if state_reason_data: + alarm.alarm["StateReasonData"] = json.dumps(state_reason_data) + alarm.alarm["StateUpdatedTimestamp"] = current_time + + def disable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + self._set_alarm_actions(context, alarm_names, enabled=False) + + def enable_alarm_actions( + self, context: RequestContext, alarm_names: AlarmNames, **kwargs + ) -> None: + self._set_alarm_actions(context, alarm_names, enabled=True) + + def _set_alarm_actions(self, context, alarm_names, enabled): + store = self.get_store(context.account_id, context.region) + for name in alarm_names: + alarm_arn = arns.cloudwatch_alarm_arn( + name, account_id=context.account_id, region_name=context.region + ) + alarm = store.alarms.get(alarm_arn) + if alarm: + alarm.alarm["ActionsEnabled"] = enabled + + def describe_alarm_history( + self, + context: RequestContext, + alarm_name: AlarmName = None, + alarm_types: AlarmTypes = None, + history_item_type: HistoryItemType = None, + start_date: Timestamp = None, + end_date: Timestamp = None, + max_records: MaxRecords = None, + next_token: NextToken = None, + scan_by: ScanBy = None, + **kwargs, + ) -> DescribeAlarmHistoryOutput: + store = self.get_store(context.account_id, context.region) + history = store.histories + if alarm_name: + history = [h for h in history if h["AlarmName"] == alarm_name] + + def _get_timestamp(input: dict): + if timestamp_string := input.get("Timestamp"): + return datetime.datetime.fromisoformat(timestamp_string) + return None + + if start_date: + history = [h for h in history if (date := _get_timestamp(h)) and date >= start_date] + if end_date: + history = [h for h in history if (date := _get_timestamp(h)) and date <= end_date] + return DescribeAlarmHistoryOutput(AlarmHistoryItems=history) + + def _evaluate_composite_alarms(self, context: RequestContext, triggering_alarm): + # TODO either pass store as a parameter or acquire RLock (with _STORE_LOCK:) + # everything works ok now but better ensure protection of critical section in front of future changes + store = self.get_store(context.account_id, context.region) + alarms = list(store.alarms.values()) + composite_alarms = [a for a in alarms if isinstance(a, LocalStackCompositeAlarm)] + for composite_alarm in composite_alarms: + self._evaluate_composite_alarm(context, composite_alarm, triggering_alarm) + + def _evaluate_composite_alarm(self, context, composite_alarm, triggering_alarm): + store = self.get_store(context.account_id, context.region) + alarm_rule = composite_alarm.alarm["AlarmRule"] + rule_expression_validation = self._validate_alarm_rule_expression(alarm_rule) + if rule_expression_validation: + LOG.warning( + "Alarm rule contains unsupported expressions and will not be evaluated: %s", + rule_expression_validation, + ) + return + new_state_value = StateValue.OK + # assuming that a rule consists only of ALARM evaluations of metric alarms, with OR logic applied + for metric_alarm_arn in self._get_alarm_arns(alarm_rule): + metric_alarm = store.alarms.get(metric_alarm_arn) + if not metric_alarm: + LOG.warning( + "Alarm rule won't be evaluated as there is no alarm with ARN %s", + metric_alarm_arn, + ) + return + if metric_alarm.alarm["StateValue"] == StateValue.ALARM: + triggering_alarm = metric_alarm + new_state_value = StateValue.ALARM + break + old_state_value = composite_alarm.alarm["StateValue"] + if old_state_value == new_state_value: + return + triggering_alarm_arn = triggering_alarm.alarm.get("AlarmArn") + triggering_alarm_state = triggering_alarm.alarm.get("StateValue") + triggering_alarm_state_change_timestamp = triggering_alarm.alarm.get( + "StateTransitionedTimestamp" + ) + state_reason_formatted_timestamp = triggering_alarm_state_change_timestamp.strftime( + "%A %d %B, %Y %H:%M:%S %Z" + ) + state_reason = ( + f"{triggering_alarm_arn} " + f"transitioned to {triggering_alarm_state} " + f"at {state_reason_formatted_timestamp}" + ) + state_reason_data = { + "triggeringAlarms": [ + { + "arn": triggering_alarm_arn, + "state": { + "value": triggering_alarm_state, + "timestamp": timestamp_millis(triggering_alarm_state_change_timestamp), + }, + } + ] + } + self._update_state( + context, composite_alarm, new_state_value, state_reason, state_reason_data + ) + if composite_alarm.alarm["ActionsEnabled"]: + self._run_composite_alarm_actions( + context, composite_alarm, old_state_value, triggering_alarm + ) + + def _validate_alarm_rule_expression(self, alarm_rule): + validation_result = [] + alarms_conditions = [alarm.strip() for alarm in alarm_rule.split("OR")] + for alarm_condition in alarms_conditions: + if not alarm_condition.startswith("ALARM"): + validation_result.append( + f"Unsupported expression in alarm rule condition {alarm_condition}: Only ALARM expression is supported by Localstack as of now" + ) + return validation_result + + def _get_alarm_arns(self, composite_alarm_rule): + # regexp for everything within (" ") + return re.findall(r'\("([^"]*)"\)', composite_alarm_rule) + + def _run_composite_alarm_actions( + self, context, composite_alarm, old_state_value, triggering_alarm + ): + new_state_value = composite_alarm.alarm["StateValue"] + if new_state_value == StateValue.OK: + actions = composite_alarm.alarm["OKActions"] + elif new_state_value == StateValue.ALARM: + actions = composite_alarm.alarm["AlarmActions"] + else: + actions = composite_alarm.alarm["InsufficientDataActions"] + for action in actions: + data = arns.parse_arn(action) + if data["service"] == "sns": + service = connect_to( + region_name=data["region"], aws_access_key_id=data["account"] + ).sns + subject = f"""{new_state_value}: "{composite_alarm.alarm["AlarmName"]}" in {context.region}""" + message = create_message_response_update_composite_alarm_state_sns( + composite_alarm, triggering_alarm, old_state_value + ) + service.publish(TopicArn=action, Subject=subject, Message=message) + else: + # TODO: support other actions + LOG.warning( + "Action for service %s not implemented, action '%s' will not be triggered.", + data["service"], + action, + ) + + +def create_metric_data_query_from_alarm(alarm: LocalStackMetricAlarm): + # TODO may need to be adapted for other use cases + # verified return value with a snapshot test + return [ + { + "id": str(uuid.uuid4()), + "metricStat": { + "metric": { + "namespace": alarm.alarm["Namespace"], + "name": alarm.alarm["MetricName"], + "dimensions": alarm.alarm.get("Dimensions") or {}, + }, + "period": int(alarm.alarm["Period"]), + "stat": alarm.alarm["Statistic"], + }, + "returnData": True, + } + ] + + +def create_message_response_update_state_lambda( + alarm: LocalStackMetricAlarm, old_state, old_state_reason, old_state_timestamp +): + _alarm = alarm.alarm + response = { + "accountId": extract_account_id_from_arn(_alarm["AlarmArn"]), + "alarmArn": _alarm["AlarmArn"], + "alarmData": { + "alarmName": _alarm["AlarmName"], + "state": { + "value": _alarm["StateValue"], + "reason": _alarm["StateReason"], + "timestamp": _alarm["StateUpdatedTimestamp"], + }, + "previousState": { + "value": old_state, + "reason": old_state_reason, + "timestamp": old_state_timestamp, + }, + "configuration": { + "description": _alarm.get("AlarmDescription", ""), + "metrics": _alarm.get( + "Metrics", create_metric_data_query_from_alarm(alarm) + ), # TODO: add test with metric_data_queries + }, + }, + "time": _alarm["StateUpdatedTimestamp"], + "region": alarm.region, + "source": "aws.cloudwatch", + } + return json.dumps(response, cls=JSONEncoder) + + +def create_message_response_update_state_sns(alarm: LocalStackMetricAlarm, old_state: StateValue): + _alarm = alarm.alarm + response = { + "AWSAccountId": alarm.account_id, + "OldStateValue": old_state, + "AlarmName": _alarm["AlarmName"], + "AlarmDescription": _alarm.get("AlarmDescription"), + "AlarmConfigurationUpdatedTimestamp": _alarm["AlarmConfigurationUpdatedTimestamp"], + "NewStateValue": _alarm["StateValue"], + "NewStateReason": _alarm["StateReason"], + "StateChangeTime": _alarm["StateUpdatedTimestamp"], + # the long-name for 'region' should be used - as we don't have it, we use the short name + # which needs to be slightly changed to make snapshot tests work + "Region": alarm.region.replace("-", " ").capitalize(), + "AlarmArn": _alarm["AlarmArn"], + "OKActions": _alarm.get("OKActions", []), + "AlarmActions": _alarm.get("AlarmActions", []), + "InsufficientDataActions": _alarm.get("InsufficientDataActions", []), + } + + # collect trigger details + details = { + "MetricName": _alarm.get("MetricName", ""), + "Namespace": _alarm.get("Namespace", ""), + "Unit": _alarm.get("Unit", None), # testing with AWS revealed this currently returns None + "Period": int(_alarm.get("Period", 0)), + "EvaluationPeriods": int(_alarm.get("EvaluationPeriods", 0)), + "ComparisonOperator": _alarm.get("ComparisonOperator", ""), + "Threshold": float(_alarm.get("Threshold", 0.0)), + "TreatMissingData": _alarm.get("TreatMissingData", ""), + "EvaluateLowSampleCountPercentile": _alarm.get("EvaluateLowSampleCountPercentile", ""), + } + + # Dimensions not serializable + dimensions = [] + alarm_dimensions = _alarm.get("Dimensions", []) + if alarm_dimensions: + for d in _alarm["Dimensions"]: + dimensions.append({"value": d["Value"], "name": d["Name"]}) + details["Dimensions"] = dimensions or "" + + alarm_statistic = _alarm.get("Statistic") + alarm_extended_statistic = _alarm.get("ExtendedStatistic") + + if alarm_statistic: + details["StatisticType"] = "Statistic" + details["Statistic"] = camel_to_snake_case(alarm_statistic).upper() # AWS returns uppercase + elif alarm_extended_statistic: + details["StatisticType"] = "ExtendedStatistic" + details["ExtendedStatistic"] = alarm_extended_statistic + + response["Trigger"] = details + + return json.dumps(response, cls=JSONEncoder) + + +def create_message_response_update_composite_alarm_state_sns( + composite_alarm: LocalStackCompositeAlarm, + triggering_alarm: LocalStackMetricAlarm, + old_state: StateValue, +): + _alarm = composite_alarm.alarm + response = { + "AWSAccountId": composite_alarm.account_id, + "AlarmName": _alarm["AlarmName"], + "AlarmDescription": _alarm.get("AlarmDescription"), + "AlarmRule": _alarm.get("AlarmRule"), + "OldStateValue": old_state, + "NewStateValue": _alarm["StateValue"], + "NewStateReason": _alarm["StateReason"], + "StateChangeTime": _alarm["StateUpdatedTimestamp"], + # the long-name for 'region' should be used - as we don't have it, we use the short name + # which needs to be slightly changed to make snapshot tests work + "Region": composite_alarm.region.replace("-", " ").capitalize(), + "AlarmArn": _alarm["AlarmArn"], + "OKActions": _alarm.get("OKActions", []), + "AlarmActions": _alarm.get("AlarmActions", []), + "InsufficientDataActions": _alarm.get("InsufficientDataActions", []), + } + + triggering_children = [ + { + "Arn": triggering_alarm.alarm.get("AlarmArn"), + "State": { + "Value": triggering_alarm.alarm["StateValue"], + "Timestamp": triggering_alarm.alarm["StateUpdatedTimestamp"], + }, + } + ] + + response["TriggeringChildren"] = triggering_children + + return json.dumps(response, cls=JSONEncoder) diff --git a/localstack/services/route53resolver/__init__.py b/localstack-core/localstack/services/cloudwatch/resource_providers/__init__.py similarity index 100% rename from localstack/services/route53resolver/__init__.py rename to localstack-core/localstack/services/cloudwatch/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.py b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.py new file mode 100644 index 0000000000000..56aa3292de1f4 --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.py @@ -0,0 +1,194 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class CloudWatchAlarmProperties(TypedDict): + ComparisonOperator: Optional[str] + EvaluationPeriods: Optional[int] + ActionsEnabled: Optional[bool] + AlarmActions: Optional[list[str]] + AlarmDescription: Optional[str] + AlarmName: Optional[str] + Arn: Optional[str] + DatapointsToAlarm: Optional[int] + Dimensions: Optional[list[Dimension]] + EvaluateLowSampleCountPercentile: Optional[str] + ExtendedStatistic: Optional[str] + Id: Optional[str] + InsufficientDataActions: Optional[list[str]] + MetricName: Optional[str] + Metrics: Optional[list[MetricDataQuery]] + Namespace: Optional[str] + OKActions: Optional[list[str]] + Period: Optional[int] + Statistic: Optional[str] + Threshold: Optional[float] + ThresholdMetricId: Optional[str] + TreatMissingData: Optional[str] + Unit: Optional[str] + + +class Dimension(TypedDict): + Name: Optional[str] + Value: Optional[str] + + +class Metric(TypedDict): + Dimensions: Optional[list[Dimension]] + MetricName: Optional[str] + Namespace: Optional[str] + + +class MetricStat(TypedDict): + Metric: Optional[Metric] + Period: Optional[int] + Stat: Optional[str] + Unit: Optional[str] + + +class MetricDataQuery(TypedDict): + Id: Optional[str] + AccountId: Optional[str] + Expression: Optional[str] + Label: Optional[str] + MetricStat: Optional[MetricStat] + Period: Optional[int] + ReturnData: Optional[bool] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudWatchAlarmProvider(ResourceProvider[CloudWatchAlarmProperties]): + TYPE = "AWS::CloudWatch::Alarm" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudWatchAlarmProperties], + ) -> ProgressEvent[CloudWatchAlarmProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - ComparisonOperator + - EvaluationPeriods + + Create-only properties: + - /properties/AlarmName + + Read-only properties: + - /properties/Id + - /properties/Arn + + + + """ + model = request.desired_state + cloudwatch = request.aws_client_factory.cloudwatch + + if not model.get("AlarmName"): + model["AlarmName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + create_params = util.select_attributes( + model, + [ + "AlarmName", + "ComparisonOperator", + "EvaluationPeriods", + "Period", + "MetricName", + "Namespace", + "Statistic", + "Threshold", + "ActionsEnabled", + "AlarmActions", + "AlarmDescription", + "DatapointsToAlarm", + "Dimensions", + "EvaluateLowSampleCountPercentile", + "ExtendedStatistic", + "InsufficientDataActions", + "Metrics", + "OKActions", + "ThresholdMetricId", + "TreatMissingData", + "Unit", + ], + ) + + cloudwatch.put_metric_alarm(**create_params) + alarms = cloudwatch.describe_alarms(AlarmNames=[model["AlarmName"]])["MetricAlarms"] + if not alarms: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="Alarm not found", + ) + + alarm = alarms[0] + model["Arn"] = alarm["AlarmArn"] + model["Id"] = alarm["AlarmName"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[CloudWatchAlarmProperties], + ) -> ProgressEvent[CloudWatchAlarmProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudWatchAlarmProperties], + ) -> ProgressEvent[CloudWatchAlarmProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + cloud_watch = request.aws_client_factory.cloudwatch + cloud_watch.delete_alarms(AlarmNames=[model["AlarmName"]]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[CloudWatchAlarmProperties], + ) -> ProgressEvent[CloudWatchAlarmProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.schema.json b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.schema.json new file mode 100644 index 0000000000000..c30c227e6aff9 --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm.schema.json @@ -0,0 +1,200 @@ +{ + "typeName": "AWS::CloudWatch::Alarm", + "description": "Resource Type definition for AWS::CloudWatch::Alarm", + "additionalProperties": false, + "properties": { + "ThresholdMetricId": { + "type": "string" + }, + "EvaluateLowSampleCountPercentile": { + "type": "string" + }, + "ExtendedStatistic": { + "type": "string" + }, + "ComparisonOperator": { + "type": "string" + }, + "TreatMissingData": { + "type": "string" + }, + "Dimensions": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Dimension" + } + }, + "Period": { + "type": "integer" + }, + "EvaluationPeriods": { + "type": "integer" + }, + "Unit": { + "type": "string" + }, + "Namespace": { + "type": "string" + }, + "OKActions": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "AlarmActions": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "MetricName": { + "type": "string" + }, + "ActionsEnabled": { + "type": "boolean" + }, + "Metrics": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/MetricDataQuery" + } + }, + "AlarmDescription": { + "type": "string" + }, + "AlarmName": { + "type": "string" + }, + "Statistic": { + "type": "string" + }, + "InsufficientDataActions": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "DatapointsToAlarm": { + "type": "integer" + }, + "Threshold": { + "type": "number" + } + }, + "definitions": { + "MetricStat": { + "type": "object", + "additionalProperties": false, + "properties": { + "Period": { + "type": "integer" + }, + "Metric": { + "$ref": "#/definitions/Metric" + }, + "Stat": { + "type": "string" + }, + "Unit": { + "type": "string" + } + }, + "required": [ + "Stat", + "Period", + "Metric" + ] + }, + "Metric": { + "type": "object", + "additionalProperties": false, + "properties": { + "MetricName": { + "type": "string" + }, + "Dimensions": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Dimension" + } + }, + "Namespace": { + "type": "string" + } + } + }, + "Dimension": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Name": { + "type": "string" + } + }, + "required": [ + "Value", + "Name" + ] + }, + "MetricDataQuery": { + "type": "object", + "additionalProperties": false, + "properties": { + "AccountId": { + "type": "string" + }, + "ReturnData": { + "type": "boolean" + }, + "Expression": { + "type": "string" + }, + "Label": { + "type": "string" + }, + "MetricStat": { + "$ref": "#/definitions/MetricStat" + }, + "Period": { + "type": "integer" + }, + "Id": { + "type": "string" + } + }, + "required": [ + "Id" + ] + } + }, + "required": [ + "ComparisonOperator", + "EvaluationPeriods" + ], + "createOnlyProperties": [ + "/properties/AlarmName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Arn" + ] +} diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm_plugin.py b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm_plugin.py new file mode 100644 index 0000000000000..6dfffe39b52a4 --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_alarm_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudWatchAlarmProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudWatch::Alarm" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudwatch.resource_providers.aws_cloudwatch_alarm import ( + CloudWatchAlarmProvider, + ) + + self.factory = CloudWatchAlarmProvider diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.py b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.py new file mode 100644 index 0000000000000..b6ca22b2e9f3f --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.py @@ -0,0 +1,168 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import str_to_bool + + +class CloudWatchCompositeAlarmProperties(TypedDict): + AlarmRule: Optional[str] + ActionsEnabled: Optional[bool] + ActionsSuppressor: Optional[str] + ActionsSuppressorExtensionPeriod: Optional[int] + ActionsSuppressorWaitPeriod: Optional[int] + AlarmActions: Optional[list[str]] + AlarmDescription: Optional[str] + AlarmName: Optional[str] + Arn: Optional[str] + InsufficientDataActions: Optional[list[str]] + OKActions: Optional[list[str]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class CloudWatchCompositeAlarmProvider(ResourceProvider[CloudWatchCompositeAlarmProperties]): + TYPE = "AWS::CloudWatch::CompositeAlarm" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[CloudWatchCompositeAlarmProperties], + ) -> ProgressEvent[CloudWatchCompositeAlarmProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/AlarmName + + Required properties: + - AlarmRule + + Create-only properties: + - /properties/AlarmName + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - cloudwatch:DescribeAlarms + - cloudwatch:PutCompositeAlarm + + """ + model = request.desired_state + cloud_watch = request.aws_client_factory.cloudwatch + + params = util.select_attributes( + model, + [ + "AlarmName", + "AlarmRule", + "ActionsEnabled", + "ActionsSuppressor", + "ActionsSuppressorWaitPeriod", + "ActionsSuppressorExtensionPeriod", + "AlarmActions", + "AlarmDescription", + "InsufficientDataActions", + "OKActions", + ], + ) + if not params.get("AlarmName"): + model["AlarmName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + params["AlarmName"] = model["AlarmName"] + + if "ActionsEnabled" in params: + params["ActionsEnabled"] = str_to_bool(params["ActionsEnabled"]) + + create_params = util.select_attributes( + model, + [ + "AlarmName", + "AlarmRule", + "ActionsEnabled", + "ActionsSuppressor", + "ActionsSuppressorExtensionPeriod", + "ActionsSuppressorWaitPeriod", + "AlarmActions", + "AlarmDescription", + "InsufficientDataActions", + "OKActions", + ], + ) + + cloud_watch.put_composite_alarm(**create_params) + alarms = cloud_watch.describe_alarms( + AlarmNames=[model["AlarmName"]], AlarmTypes=["CompositeAlarm"] + )["CompositeAlarms"] + + if not alarms: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="Composite Alarm not found", + ) + model["Arn"] = alarms[0]["AlarmArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[CloudWatchCompositeAlarmProperties], + ) -> ProgressEvent[CloudWatchCompositeAlarmProperties]: + """ + Fetch resource information + + IAM permissions required: + - cloudwatch:DescribeAlarms + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[CloudWatchCompositeAlarmProperties], + ) -> ProgressEvent[CloudWatchCompositeAlarmProperties]: + """ + Delete a resource + + IAM permissions required: + - cloudwatch:DescribeAlarms + - cloudwatch:DeleteAlarms + """ + model = request.desired_state + cloud_watch = request.aws_client_factory.cloudwatch + cloud_watch.delete_alarms(AlarmNames=[model["AlarmName"]]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[CloudWatchCompositeAlarmProperties], + ) -> ProgressEvent[CloudWatchCompositeAlarmProperties]: + """ + Update a resource + + IAM permissions required: + - cloudwatch:DescribeAlarms + - cloudwatch:PutCompositeAlarm + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.schema.json b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.schema.json new file mode 100644 index 0000000000000..36464ecf204be --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm.schema.json @@ -0,0 +1,130 @@ +{ + "typeName": "AWS::CloudWatch::CompositeAlarm", + "description": "The AWS::CloudWatch::CompositeAlarm type specifies an alarm which aggregates the states of other Alarms (Metric or Composite Alarms) as defined by the AlarmRule expression", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-cloudwatch.git", + "properties": { + "Arn": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the alarm", + "minLength": 1, + "maxLength": 1600 + }, + "AlarmName": { + "description": "The name of the Composite Alarm", + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "AlarmRule": { + "type": "string", + "description": "Expression which aggregates the state of other Alarms (Metric or Composite Alarms)", + "minLength": 1, + "maxLength": 10240 + }, + "AlarmDescription": { + "type": "string", + "description": "The description of the alarm", + "minLength": 0, + "maxLength": 1024 + }, + "ActionsEnabled": { + "description": "Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.", + "type": "boolean" + }, + "OKActions": { + "type": "array", + "items": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the action", + "minLength": 1, + "maxLength": 1024 + }, + "description": "The actions to execute when this alarm transitions to the OK state from any other state. Each action is specified as an Amazon Resource Name (ARN).", + "maxItems": 5 + }, + "AlarmActions": { + "type": "array", + "items": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the action", + "minLength": 1, + "maxLength": 1024 + }, + "description": "The list of actions to execute when this alarm transitions into an ALARM state from any other state. Specify each action as an Amazon Resource Name (ARN).", + "maxItems": 5 + }, + "InsufficientDataActions": { + "type": "array", + "items": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the action", + "minLength": 1, + "maxLength": 1024 + }, + "description": "The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN).", + "maxItems": 5 + }, + "ActionsSuppressor": { + "description": "Actions will be suppressed if the suppressor alarm is in the ALARM state. ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. ", + "type": "string", + "minLength": 1, + "maxLength": 1600 + }, + "ActionsSuppressorWaitPeriod": { + "description": "Actions will be suppressed if ExtensionPeriod is active. The length of time that actions are suppressed is in seconds.", + "type": "integer", + "minimum": 0 + }, + "ActionsSuppressorExtensionPeriod": { + "description": "Actions will be suppressed if WaitPeriod is active. The length of time that actions are suppressed is in seconds.", + "type": "integer", + "minimum": 0 + } + }, + "required": [ + "AlarmRule" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "createOnlyProperties": [ + "/properties/AlarmName" + ], + "primaryIdentifier": [ + "/properties/AlarmName" + ], + "additionalProperties": false, + "handlers": { + "create": { + "permissions": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:PutCompositeAlarm" + ] + }, + "read": { + "permissions": [ + "cloudwatch:DescribeAlarms" + ] + }, + "update": { + "permissions": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:PutCompositeAlarm" + ] + }, + "delete": { + "permissions": [ + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ] + }, + "list": { + "permissions": [ + "cloudwatch:DescribeAlarms" + ] + } + }, + "tagging": { + "taggable": false + } +} diff --git a/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm_plugin.py b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm_plugin.py new file mode 100644 index 0000000000000..867cebdbfe31d --- /dev/null +++ b/localstack-core/localstack/services/cloudwatch/resource_providers/aws_cloudwatch_compositealarm_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class CloudWatchCompositeAlarmProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::CloudWatch::CompositeAlarm" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.cloudwatch.resource_providers.aws_cloudwatch_compositealarm import ( + CloudWatchCompositeAlarmProvider, + ) + + self.factory = CloudWatchCompositeAlarmProvider diff --git a/localstack/services/s3/__init__.py b/localstack-core/localstack/services/configservice/__init__.py similarity index 100% rename from localstack/services/s3/__init__.py rename to localstack-core/localstack/services/configservice/__init__.py diff --git a/localstack/services/configservice/provider.py b/localstack-core/localstack/services/configservice/provider.py similarity index 100% rename from localstack/services/configservice/provider.py rename to localstack-core/localstack/services/configservice/provider.py diff --git a/localstack/services/s3control/__init__.py b/localstack-core/localstack/services/dynamodb/__init__.py similarity index 100% rename from localstack/services/s3control/__init__.py rename to localstack-core/localstack/services/dynamodb/__init__.py diff --git a/localstack-core/localstack/services/dynamodb/models.py b/localstack-core/localstack/services/dynamodb/models.py new file mode 100644 index 0000000000000..cc6d7ee2e4939 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/models.py @@ -0,0 +1,122 @@ +import dataclasses +from typing import TypedDict + +from localstack.aws.api.dynamodb import ( + AttributeMap, + Key, + RegionName, + ReplicaDescription, + StreamViewType, + TableName, + TimeToLiveSpecification, +) +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossRegionAttribute, + LocalAttribute, +) + + +@dataclasses.dataclass +class TableStreamType: + """ + When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. + - KEYS_ONLY - Only the key attributes of the modified item are written to the stream. + - NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream. + - OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream. + - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream. + Special case: + is_kinesis: equivalent to NEW_AND_OLD_IMAGES, can be set at the same time as StreamViewType + """ + + stream_view_type: StreamViewType | None + is_kinesis: bool + + @property + def needs_old_image(self): + return self.is_kinesis or self.stream_view_type in ( + StreamViewType.OLD_IMAGE, + StreamViewType.NEW_AND_OLD_IMAGES, + ) + + @property + def needs_new_image(self): + return self.is_kinesis or self.stream_view_type in ( + StreamViewType.NEW_IMAGE, + StreamViewType.NEW_AND_OLD_IMAGES, + ) + + +class DynamoDbStreamRecord(TypedDict, total=False): + ApproximateCreationDateTime: int + SizeBytes: int + Keys: Key + StreamViewType: StreamViewType | None + OldImage: AttributeMap | None + NewImage: AttributeMap | None + SequenceNumber: int | None + + +class StreamRecord(TypedDict, total=False): + """ + Related to DynamoDB Streams and Kinesis Destinations + This class contains data necessary for both KinesisRecord and DynamoDBStreams record + """ + + eventName: str + eventID: str + eventVersion: str + dynamodb: DynamoDbStreamRecord + awsRegion: str + eventSource: str + + +StreamRecords = list[StreamRecord] + + +class TableRecords(TypedDict): + """ + Container class used to forward events from DynamoDB to DDB Streams and Kinesis destinations. + It contains the records to be forwarded and data about the streams to be forwarded to. + """ + + table_stream_type: TableStreamType + records: StreamRecords + + +# the RecordsMap maps the TableName to TableRecords, allowing forwarding to the destinations +# some DynamoDB calls can modify several tables at once, which is why we need to group those events per table, as each +# table can have different destinations +RecordsMap = dict[TableName, TableRecords] + + +class DynamoDBStore(BaseStore): + # maps global table names to configurations (for the legacy v.2017 tables) + GLOBAL_TABLES: dict[str, dict] = CrossRegionAttribute(default=dict) + + # Maps table name to the region they exist in on DDBLocal (for v.2019 global tables) + TABLE_REGION: dict[TableName, RegionName] = CrossRegionAttribute(default=dict) + + # Maps the table replicas (for v.2019 global tables) + REPLICAS: dict[TableName, dict[RegionName, ReplicaDescription]] = CrossRegionAttribute( + default=dict + ) + + # cache table taggings - maps table ARN to tags dict + TABLE_TAGS: dict[str, dict] = CrossRegionAttribute(default=dict) + + # maps table names to cached table definitions + table_definitions: dict[str, dict] = LocalAttribute(default=dict) + + # maps table names to additional table properties that are not stored upstream (e.g., ReplicaUpdates) + table_properties: dict[str, dict] = LocalAttribute(default=dict) + + # maps table names to TTL specifications + ttl_specifications: dict[str, TimeToLiveSpecification] = LocalAttribute(default=dict) + + # maps backups + backups: dict[str, dict] = LocalAttribute(default=dict) + + +dynamodb_stores = AccountRegionBundle("dynamodb", DynamoDBStore) diff --git a/localstack-core/localstack/services/dynamodb/packages.py b/localstack-core/localstack/services/dynamodb/packages.py new file mode 100644 index 0000000000000..db2ca14c49bf6 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/packages.py @@ -0,0 +1,105 @@ +import os +from typing import List + +from localstack import config +from localstack.constants import ARTIFACTS_REPO, MAVEN_REPO_URL +from localstack.packages import InstallTarget, Package, PackageInstaller +from localstack.packages.java import java_package +from localstack.utils.archives import ( + download_and_extract_with_retry, + update_jar_manifest, + upgrade_jar_file, +) +from localstack.utils.files import rm_rf, save_file +from localstack.utils.functions import run_safe +from localstack.utils.http import download +from localstack.utils.run import run + +DDB_AGENT_JAR_URL = f"{ARTIFACTS_REPO}/raw/388cd73f45bfd3bcf7ad40aa35499093061c7962/dynamodb-local-patch/target/ddb-local-loader-0.1.jar" +JAVASSIST_JAR_URL = f"{MAVEN_REPO_URL}/org/javassist/javassist/3.30.2-GA/javassist-3.30.2-GA.jar" + +DDBLOCAL_URL = "https://d1ni2b6xgvw0s0.cloudfront.net/v2.x/dynamodb_local_latest.zip" + + +class DynamoDBLocalPackage(Package): + def __init__(self): + super().__init__(name="DynamoDBLocal", default_version="2") + + def _get_installer(self, _) -> PackageInstaller: + return DynamoDBLocalPackageInstaller() + + def get_versions(self) -> List[str]: + return ["2"] + + +class DynamoDBLocalPackageInstaller(PackageInstaller): + def __init__(self): + super().__init__("dynamodb-local", "2") + + # DDBLocal v2 requires JRE 17+ + # See: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.DownloadingAndRunning.html + self.java_version = "21" + + def _prepare_installation(self, target: InstallTarget) -> None: + java_package.get_installer(self.java_version).install(target) + + def get_java_env_vars(self) -> dict[str, str]: + java_home = java_package.get_installer(self.java_version).get_java_home() + path = f"{java_home}/bin:{os.environ['PATH']}" + + return { + "JAVA_HOME": java_home, + "PATH": path, + } + + def _install(self, target: InstallTarget): + # download and extract archive + tmp_archive = os.path.join(config.dirs.cache, f"DynamoDBLocal-{self.version}.zip") + install_dir = self._get_install_dir(target) + + download_and_extract_with_retry(DDBLOCAL_URL, tmp_archive, install_dir) + rm_rf(tmp_archive) + + # Use custom log formatting + log4j2_config = """ + + + + + + + + + + + + """ + log4j2_file = os.path.join(install_dir, "log4j2.xml") + run_safe(lambda: save_file(log4j2_file, log4j2_config)) + run_safe(lambda: run(["zip", "-u", "DynamoDBLocal.jar", "log4j2.xml"], cwd=install_dir)) + + # Add patch that enables 20+ GSIs + ddb_agent_jar_path = self.get_ddb_agent_jar_path() + if not os.path.exists(ddb_agent_jar_path): + download(DDB_AGENT_JAR_URL, ddb_agent_jar_path) + + javassit_jar_path = os.path.join(install_dir, "javassist.jar") + if not os.path.exists(javassit_jar_path): + download(JAVASSIST_JAR_URL, javassit_jar_path) + + # Add javassist in the manifest classpath + update_jar_manifest( + "DynamoDBLocal.jar", install_dir, "Class-Path: .", "Class-Path: javassist.jar ." + ) + + ddb_local_lib_dir = os.path.join(install_dir, "DynamoDBLocal_lib") + upgrade_jar_file(ddb_local_lib_dir, "slf4j-ext-*.jar", "org/slf4j/slf4j-ext:2.0.13") + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "DynamoDBLocal.jar") + + def get_ddb_agent_jar_path(self): + return os.path.join(self.get_installed_dir(), "ddb-local-loader-0.1.jar") + + +dynamodblocal_package = DynamoDBLocalPackage() diff --git a/localstack/services/dynamodb/plugins.py b/localstack-core/localstack/services/dynamodb/plugins.py similarity index 100% rename from localstack/services/dynamodb/plugins.py rename to localstack-core/localstack/services/dynamodb/plugins.py diff --git a/localstack-core/localstack/services/dynamodb/provider.py b/localstack-core/localstack/services/dynamodb/provider.py new file mode 100644 index 0000000000000..407e6400414ca --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/provider.py @@ -0,0 +1,2271 @@ +import copy +import json +import logging +import os +import random +import re +import threading +import time +import traceback +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from contextlib import contextmanager +from datetime import datetime +from operator import itemgetter +from typing import Dict, List, Optional + +import requests +import werkzeug + +from localstack import config +from localstack.aws import handlers +from localstack.aws.api import ( + CommonServiceException, + RequestContext, + ServiceRequest, + ServiceResponse, + handler, +) +from localstack.aws.api.dynamodb import ( + AttributeMap, + BatchExecuteStatementOutput, + BatchGetItemOutput, + BatchGetRequestMap, + BatchGetResponseMap, + BatchWriteItemInput, + BatchWriteItemOutput, + BatchWriteItemRequestMap, + BillingMode, + ContinuousBackupsDescription, + ContinuousBackupsStatus, + CreateGlobalTableOutput, + CreateTableInput, + CreateTableOutput, + Delete, + DeleteItemInput, + DeleteItemOutput, + DeleteRequest, + DeleteTableOutput, + DescribeContinuousBackupsOutput, + DescribeGlobalTableOutput, + DescribeKinesisStreamingDestinationOutput, + DescribeTableOutput, + DescribeTimeToLiveOutput, + DestinationStatus, + DynamodbApi, + EnableKinesisStreamingConfiguration, + ExecuteStatementInput, + ExecuteStatementOutput, + ExecuteTransactionInput, + ExecuteTransactionOutput, + GetItemInput, + GetItemOutput, + GlobalTableAlreadyExistsException, + GlobalTableNotFoundException, + KinesisStreamingDestinationOutput, + ListGlobalTablesOutput, + ListTablesInputLimit, + ListTablesOutput, + ListTagsOfResourceOutput, + NextTokenString, + PartiQLBatchRequest, + PointInTimeRecoveryDescription, + PointInTimeRecoverySpecification, + PointInTimeRecoveryStatus, + PositiveIntegerObject, + ProvisionedThroughputExceededException, + Put, + PutItemInput, + PutItemOutput, + PutRequest, + QueryInput, + QueryOutput, + RegionName, + ReplicaDescription, + ReplicaList, + ReplicaStatus, + ReplicaUpdateList, + ResourceArnString, + ResourceInUseException, + ResourceNotFoundException, + ReturnConsumedCapacity, + ScanInput, + ScanOutput, + StreamArn, + TableDescription, + TableName, + TagKeyList, + TagList, + TimeToLiveSpecification, + TransactGetItemList, + TransactGetItemsOutput, + TransactWriteItem, + TransactWriteItemList, + TransactWriteItemsInput, + TransactWriteItemsOutput, + Update, + UpdateContinuousBackupsOutput, + UpdateGlobalTableOutput, + UpdateItemInput, + UpdateItemOutput, + UpdateTableInput, + UpdateTableOutput, + UpdateTimeToLiveOutput, + WriteRequest, +) +from localstack.aws.api.dynamodbstreams import StreamStatus +from localstack.aws.connect import connect_to +from localstack.constants import ( + AUTH_CREDENTIAL_REGEX, + AWS_REGION_US_EAST_1, + INTERNAL_AWS_SECRET_ACCESS_KEY, +) +from localstack.http import Request, Response, route +from localstack.services.dynamodb.models import ( + DynamoDBStore, + RecordsMap, + StreamRecord, + StreamRecords, + TableRecords, + TableStreamType, + dynamodb_stores, +) +from localstack.services.dynamodb.server import DynamodbServer +from localstack.services.dynamodb.utils import ( + ItemFinder, + ItemSet, + SchemaExtractor, + de_dynamize_record, + extract_table_name_from_partiql_update, + get_ddb_access_key, + modify_ddblocal_arns, +) +from localstack.services.dynamodbstreams import dynamodbstreams_api +from localstack.services.dynamodbstreams.models import dynamodbstreams_stores +from localstack.services.edge import ROUTER +from localstack.services.plugins import ServiceLifecycleHook +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws import arns +from localstack.utils.aws.arns import ( + extract_account_id_from_arn, + extract_region_from_arn, + get_partition, +) +from localstack.utils.aws.aws_stack import get_valid_regions_for_service +from localstack.utils.aws.request_context import ( + extract_account_id_from_headers, + extract_region_from_headers, +) +from localstack.utils.collections import select_attributes, select_from_typed_dict +from localstack.utils.common import short_uid, to_bytes +from localstack.utils.json import BytesEncoder, canonical_json +from localstack.utils.scheduler import Scheduler +from localstack.utils.strings import long_uid, md5, to_str +from localstack.utils.threads import FuncThread, start_thread + +# set up logger +LOG = logging.getLogger(__name__) + +# action header prefix +ACTION_PREFIX = "DynamoDB_20120810." + +# list of actions subject to throughput limitations +READ_THROTTLED_ACTIONS = [ + "GetItem", + "Query", + "Scan", + "TransactGetItems", + "BatchGetItem", +] +WRITE_THROTTLED_ACTIONS = [ + "PutItem", + "BatchWriteItem", + "UpdateItem", + "DeleteItem", + "TransactWriteItems", +] +THROTTLED_ACTIONS = READ_THROTTLED_ACTIONS + WRITE_THROTTLED_ACTIONS + +MANAGED_KMS_KEYS = {} + + +def dynamodb_table_exists(table_name: str, client=None) -> bool: + client = client or connect_to().dynamodb + paginator = client.get_paginator("list_tables") + pages = paginator.paginate(PaginationConfig={"PageSize": 100}) + table_name = to_str(table_name) + return any(table_name in page["TableNames"] for page in pages) + + +class EventForwarder: + def __init__(self, num_thread: int = 10): + self.executor = ThreadPoolExecutor(num_thread, thread_name_prefix="ddb_stream_fwd") + + def shutdown(self): + self.executor.shutdown(wait=False) + + def forward_to_targets( + self, account_id: str, region_name: str, records_map: RecordsMap, background: bool = True + ) -> None: + if background: + self._submit_records( + account_id=account_id, + region_name=region_name, + records_map=records_map, + ) + else: + self._forward(account_id, region_name, records_map) + + def _submit_records(self, account_id: str, region_name: str, records_map: RecordsMap): + """Required for patching submit with local thread context for EventStudio""" + self.executor.submit( + self._forward, + account_id, + region_name, + records_map, + ) + + def _forward(self, account_id: str, region_name: str, records_map: RecordsMap) -> None: + try: + self.forward_to_kinesis_stream(account_id, region_name, records_map) + except Exception as e: + LOG.debug( + "Error while publishing to Kinesis streams: '%s'", + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + try: + self.forward_to_ddb_stream(account_id, region_name, records_map) + except Exception as e: + LOG.debug( + "Error while publishing to DynamoDB streams, '%s'", + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + @staticmethod + def forward_to_ddb_stream(account_id: str, region_name: str, records_map: RecordsMap) -> None: + dynamodbstreams_api.forward_events(account_id, region_name, records_map) + + @staticmethod + def forward_to_kinesis_stream( + account_id: str, region_name: str, records_map: RecordsMap + ) -> None: + # You can only stream data from DynamoDB to Kinesis Data Streams in the same AWS account and AWS Region as your + # table. + # You can only stream data from a DynamoDB table to one Kinesis data stream. + store = get_store(account_id, region_name) + + for table_name, table_records in records_map.items(): + table_stream_type = table_records["table_stream_type"] + if not table_stream_type.is_kinesis: + continue + + kinesis_records = [] + + table_arn = arns.dynamodb_table_arn(table_name, account_id, region_name) + records = table_records["records"] + table_def = store.table_definitions.get(table_name) or {} + stream_arn = table_def["KinesisDataStreamDestinations"][-1]["StreamArn"] + for record in records: + kinesis_record = dict( + tableName=table_name, + recordFormat="application/json", + userIdentity=None, + **record, + ) + fields_to_remove = {"StreamViewType", "SequenceNumber"} + kinesis_record["dynamodb"] = { + k: v for k, v in record["dynamodb"].items() if k not in fields_to_remove + } + kinesis_record.pop("eventVersion", None) + + hash_keys = list( + filter(lambda key: key["KeyType"] == "HASH", table_def["KeySchema"]) + ) + # TODO: reverse properly how AWS creates the partition key, it seems to be an MD5 hash + kinesis_partition_key = md5(f"{table_name}{hash_keys[0]['AttributeName']}") + + kinesis_records.append( + { + "Data": json.dumps(kinesis_record, cls=BytesEncoder), + "PartitionKey": kinesis_partition_key, + } + ) + + kinesis = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).kinesis.request_metadata(service_principal="dynamodb", source_arn=table_arn) + + kinesis.put_records( + StreamARN=stream_arn, + Records=kinesis_records, + ) + + @classmethod + def is_kinesis_stream_exists(cls, stream_arn): + account_id = extract_account_id_from_arn(stream_arn) + region_name = extract_region_from_arn(stream_arn) + + kinesis = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).kinesis + stream_name_from_arn = stream_arn.split("/", 1)[1] + # check if the stream exists in kinesis for the user + filtered = list( + filter( + lambda stream_name: stream_name == stream_name_from_arn, + kinesis.list_streams()["StreamNames"], + ) + ) + return bool(filtered) + + +class SSEUtils: + """Utils for server-side encryption (SSE)""" + + @classmethod + def get_sse_kms_managed_key(cls, account_id: str, region_name: str): + from localstack.services.kms import provider + + existing_key = MANAGED_KMS_KEYS.get(region_name) + if existing_key: + return existing_key + kms_client = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).kms + key_data = kms_client.create_key( + Description="Default key that protects my DynamoDB data when no other key is defined" + ) + key_id = key_data["KeyMetadata"]["KeyId"] + + provider.set_key_managed(key_id, account_id, region_name) + MANAGED_KMS_KEYS[region_name] = key_id + return key_id + + @classmethod + def get_sse_description(cls, account_id: str, region_name: str, data): + if data.get("Enabled"): + kms_master_key_id = data.get("KMSMasterKeyId") + if not kms_master_key_id: + # this is of course not the actual key for dynamodb, just a better, since existing, mock + kms_master_key_id = cls.get_sse_kms_managed_key(account_id, region_name) + kms_master_key_id = arns.kms_key_arn(kms_master_key_id, account_id, region_name) + return { + "Status": "ENABLED", + "SSEType": "KMS", # no other value is allowed here + "KMSMasterKeyArn": kms_master_key_id, + } + return {} + + +class ValidationException(CommonServiceException): + def __init__(self, message: str): + super().__init__(code="ValidationException", status_code=400, message=message) + + +def get_store(account_id: str, region_name: str) -> DynamoDBStore: + # special case: AWS NoSQL Workbench sends "localhost" as region - replace with proper region here + region_name = DynamoDBProvider.ddb_region_name(region_name) + return dynamodb_stores[account_id][region_name] + + +@contextmanager +def modify_context_region(context: RequestContext, region: str): + """ + Context manager that modifies the region of a `RequestContext`. At the exit, the context is restored to its + original state. + + :param context: the context to modify + :param region: the modified region + :return: a modified `RequestContext` + """ + original_region = context.region + original_authorization = context.request.headers.get("Authorization") + + key = get_ddb_access_key(context.account_id, region) + + context.region = region + context.request.headers["Authorization"] = re.sub( + AUTH_CREDENTIAL_REGEX, + rf"Credential={key}/\2/{region}/\4/", + original_authorization or "", + flags=re.IGNORECASE, + ) + + try: + yield context + except Exception: + raise + finally: + # revert the original context + context.region = original_region + context.request.headers["Authorization"] = original_authorization + + +class DynamoDBDeveloperEndpoints: + """ + Developer endpoints for DynamoDB + DELETE /_aws/dynamodb/expired - delete expired items from tables with TTL enabled; return the number of expired + items deleted + """ + + @route("/_aws/dynamodb/expired", methods=["DELETE"]) + def delete_expired_messages(self, _: Request): + no_expired_items = delete_expired_items() + return {"ExpiredItems": no_expired_items} + + +def delete_expired_items() -> int: + """ + This utility function iterates over all stores, looks for tables with TTL enabled, + scan such tables and delete expired items. + """ + no_expired_items = 0 + for account_id, region_name, state in dynamodb_stores.iter_stores(): + ttl_specs = state.ttl_specifications + client = connect_to(aws_access_key_id=account_id, region_name=region_name).dynamodb + for table_name, ttl_spec in ttl_specs.items(): + if ttl_spec.get("Enabled", False): + attribute_name = ttl_spec.get("AttributeName") + current_time = int(datetime.now().timestamp()) + try: + result = client.scan( + TableName=table_name, + FilterExpression="#ttl <= :threshold", + ExpressionAttributeValues={":threshold": {"N": str(current_time)}}, + ExpressionAttributeNames={"#ttl": attribute_name}, + ) + items_to_delete = result.get("Items", []) + no_expired_items += len(items_to_delete) + table_description = client.describe_table(TableName=table_name) + partition_key, range_key = _get_hash_and_range_key(table_description) + keys_to_delete = [ + {partition_key: item.get(partition_key)} + if range_key is None + else { + partition_key: item.get(partition_key), + range_key: item.get(range_key), + } + for item in items_to_delete + ] + delete_requests = [{"DeleteRequest": {"Key": key}} for key in keys_to_delete] + for i in range(0, len(delete_requests), 25): + batch = delete_requests[i : i + 25] + client.batch_write_item(RequestItems={table_name: batch}) + except Exception as e: + LOG.warning( + "An error occurred when deleting expired items from table %s: %s", + table_name, + e, + ) + return no_expired_items + + +def _get_hash_and_range_key(table_description: DescribeTableOutput) -> [str, str | None]: + key_schema = table_description.get("Table", {}).get("KeySchema", []) + hash_key, range_key = None, None + for key in key_schema: + if key["KeyType"] == "HASH": + hash_key = key["AttributeName"] + if key["KeyType"] == "RANGE": + range_key = key["AttributeName"] + return hash_key, range_key + + +class ExpiredItemsWorker: + """A worker that periodically computes and deletes expired items from DynamoDB tables""" + + def __init__(self) -> None: + super().__init__() + self.scheduler = Scheduler() + self.thread: Optional[FuncThread] = None + self.mutex = threading.RLock() + + def start(self): + with self.mutex: + if self.thread: + return + + self.scheduler = Scheduler() + self.scheduler.schedule( + delete_expired_items, period=60 * 60 + ) # the background process seems slow on AWS + + def _run(*_args): + self.scheduler.run() + + self.thread = start_thread(_run, name="ddb-remove-expired-items") + + def stop(self): + with self.mutex: + if self.scheduler: + self.scheduler.close() + + if self.thread: + self.thread.stop() + + self.thread = None + self.scheduler = None + + +class DynamoDBProvider(DynamodbApi, ServiceLifecycleHook): + server: DynamodbServer + """The instance of the server managing the instance of DynamoDB local""" + + def __init__(self): + self.server = self._new_dynamodb_server() + self._expired_items_worker = ExpiredItemsWorker() + self._router_rules = [] + self._event_forwarder = EventForwarder() + + def on_before_start(self): + self.server.start_dynamodb() + if config.DYNAMODB_REMOVE_EXPIRED_ITEMS: + self._expired_items_worker.start() + self._router_rules = ROUTER.add(DynamoDBDeveloperEndpoints()) + + def on_before_stop(self): + self._expired_items_worker.stop() + ROUTER.remove(self._router_rules) + self._event_forwarder.shutdown() + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(dynamodb_stores) + visitor.visit(dynamodbstreams_stores) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, self.service))) + + def on_before_state_reset(self): + self.server.stop_dynamodb() + + def on_before_state_load(self): + self.server.stop_dynamodb() + + def on_after_state_reset(self): + self.server.start_dynamodb() + + @staticmethod + def _new_dynamodb_server() -> DynamodbServer: + return DynamodbServer.get() + + def on_after_state_load(self): + self.server.start_dynamodb() + + def on_after_init(self): + # add response processor specific to ddblocal + handlers.modify_service_response.append(self.service, modify_ddblocal_arns) + + # routes for the shell ui + ROUTER.add( + path="/shell", + endpoint=self.handle_shell_ui_redirect, + methods=["GET"], + ) + ROUTER.add( + path="/shell/", + endpoint=self.handle_shell_ui_request, + ) + + def _forward_request( + self, + context: RequestContext, + region: str | None, + service_request: ServiceRequest | None = None, + ) -> ServiceResponse: + """ + Modify the context region and then forward request to DynamoDB Local. + + This is used for operations impacted by global tables. In LocalStack, a single copy of global table + is kept, and any requests to replicated tables are forwarded to this original table. + """ + if region: + with modify_context_region(context, region): + return self.forward_request(context, service_request=service_request) + return self.forward_request(context, service_request=service_request) + + def forward_request( + self, context: RequestContext, service_request: ServiceRequest = None + ) -> ServiceResponse: + """ + Forward a request to DynamoDB Local. + """ + self.check_provisioned_throughput(context.operation.name) + self.prepare_request_headers( + context.request.headers, account_id=context.account_id, region_name=context.region + ) + return self.server.proxy(context, service_request) + + def get_forward_url(self, account_id: str, region_name: str) -> str: + """Return the URL of the backend DynamoDBLocal server to forward requests to""" + return self.server.url + + def handle_shell_ui_redirect(self, request: werkzeug.Request) -> Response: + headers = {"Refresh": f"0; url={config.external_service_url()}/shell/index.html"} + return Response("", headers=headers) + + def handle_shell_ui_request(self, request: werkzeug.Request, req_path: str) -> Response: + # TODO: "DynamoDB Local Web Shell was deprecated with version 1.16.X and is not available any + # longer from 1.17.X to latest. There are no immediate plans for a new Web Shell to be introduced." + # -> keeping this for now, to allow configuring custom installs; should consider removing it in the future + # https://repost.aws/questions/QUHyIzoEDqQ3iOKlUEp1LPWQ#ANdBm9Nz9TRf6VqR3jZtcA1g + req_path = f"/{req_path}" if not req_path.startswith("/") else req_path + account_id = extract_account_id_from_headers(request.headers) + region_name = extract_region_from_headers(request.headers) + url = f"{self.get_forward_url(account_id, region_name)}/shell{req_path}" + result = requests.request( + method=request.method, url=url, headers=request.headers, data=request.data + ) + return Response(result.content, headers=dict(result.headers), status=result.status_code) + + # + # Table ops + # + + @handler("CreateTable", expand=False) + def create_table( + self, + context: RequestContext, + create_table_input: CreateTableInput, + ) -> CreateTableOutput: + table_name = create_table_input["TableName"] + + # Return this specific error message to keep parity with AWS + if self.table_exists(context.account_id, context.region, table_name): + raise ResourceInUseException(f"Table already exists: {table_name}") + + billing_mode = create_table_input.get("BillingMode") + provisioned_throughput = create_table_input.get("ProvisionedThroughput") + if billing_mode == BillingMode.PAY_PER_REQUEST and provisioned_throughput is not None: + raise ValidationException( + "One or more parameter values were invalid: Neither ReadCapacityUnits nor WriteCapacityUnits can be " + "specified when BillingMode is PAY_PER_REQUEST" + ) + + result = self.forward_request(context) + + table_description = result["TableDescription"] + table_description["TableArn"] = table_arn = self.fix_table_arn( + context.account_id, context.region, table_description["TableArn"] + ) + + backend = get_store(context.account_id, context.region) + backend.table_definitions[table_name] = table_definitions = dict(create_table_input) + backend.TABLE_REGION[table_name] = context.region + + if "TableId" not in table_definitions: + table_definitions["TableId"] = long_uid() + + if "SSESpecification" in table_definitions: + sse_specification = table_definitions.pop("SSESpecification") + table_definitions["SSEDescription"] = SSEUtils.get_sse_description( + context.account_id, context.region, sse_specification + ) + + if table_definitions: + table_content = result.get("Table", {}) + table_content.update(table_definitions) + table_description.update(table_content) + + if "StreamSpecification" in table_definitions: + create_dynamodb_stream( + context.account_id, + context.region, + table_definitions, + table_description.get("LatestStreamLabel"), + ) + + if "TableClass" in table_definitions: + table_class = table_description.pop("TableClass", None) or table_definitions.pop( + "TableClass" + ) + table_description["TableClassSummary"] = {"TableClass": table_class} + + if "GlobalSecondaryIndexes" in table_description: + gsis = copy.deepcopy(table_description["GlobalSecondaryIndexes"]) + # update the different values, as DynamoDB-local v2 has a regression around GSI and does not return anything + # anymore + for gsi in gsis: + index_name = gsi.get("IndexName", "") + gsi.update( + { + "IndexArn": f"{table_arn}/index/{index_name}", + "IndexSizeBytes": 0, + "IndexStatus": "ACTIVE", + "ItemCount": 0, + } + ) + gsi_provisioned_throughput = gsi.setdefault("ProvisionedThroughput", {}) + gsi_provisioned_throughput["NumberOfDecreasesToday"] = 0 + + if billing_mode == BillingMode.PAY_PER_REQUEST: + gsi_provisioned_throughput["ReadCapacityUnits"] = 0 + gsi_provisioned_throughput["WriteCapacityUnits"] = 0 + + table_description["GlobalSecondaryIndexes"] = gsis + + if "ProvisionedThroughput" in table_description: + if "NumberOfDecreasesToday" not in table_description["ProvisionedThroughput"]: + table_description["ProvisionedThroughput"]["NumberOfDecreasesToday"] = 0 + + tags = table_definitions.pop("Tags", []) + if tags: + get_store(context.account_id, context.region).TABLE_TAGS[table_arn] = { + tag["Key"]: tag["Value"] for tag in tags + } + + # remove invalid attributes from result + table_description.pop("Tags", None) + table_description.pop("BillingMode", None) + + return result + + def delete_table( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DeleteTableOutput: + global_table_region = self.get_global_table_region(context, table_name) + + # Limitation note: On AWS, for a replicated table, if the source table is deleted, the replicated tables continue to exist. + # This is not the case for LocalStack, where all replicated tables will also be removed if source is deleted. + + result = self._forward_request(context=context, region=global_table_region) + + table_arn = result.get("TableDescription", {}).get("TableArn") + table_arn = self.fix_table_arn(context.account_id, context.region, table_arn) + dynamodbstreams_api.delete_streams(context.account_id, context.region, table_arn) + + store = get_store(context.account_id, context.region) + store.TABLE_TAGS.pop(table_arn, None) + store.REPLICAS.pop(table_name, None) + + return result + + def describe_table( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeTableOutput: + global_table_region = self.get_global_table_region(context, table_name) + + result = self._forward_request(context=context, region=global_table_region) + table_description: TableDescription = result["Table"] + + # Update table properties from LocalStack stores + if table_props := get_store(context.account_id, context.region).table_properties.get( + table_name + ): + table_description.update(table_props) + + store = get_store(context.account_id, context.region) + + # Update replication details + replicas: Dict[RegionName, ReplicaDescription] = store.REPLICAS.get(table_name, {}) + + replica_description_list = [] + + if global_table_region != context.region: + replica_description_list.append( + ReplicaDescription( + RegionName=global_table_region, ReplicaStatus=ReplicaStatus.ACTIVE + ) + ) + + for replica_region, replica_description in replicas.items(): + # The replica in the region being queried must not be returned + if replica_region != context.region: + replica_description_list.append(replica_description) + + if replica_description_list: + table_description.update({"Replicas": replica_description_list}) + + # update only TableId and SSEDescription if present + if table_definitions := store.table_definitions.get(table_name): + for key in ["TableId", "SSEDescription"]: + if table_definitions.get(key): + table_description[key] = table_definitions[key] + if "TableClass" in table_definitions: + table_description["TableClassSummary"] = { + "TableClass": table_definitions["TableClass"] + } + + if "GlobalSecondaryIndexes" in table_description: + for gsi in table_description["GlobalSecondaryIndexes"]: + default_values = { + "NumberOfDecreasesToday": 0, + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + } + # even if the billing mode is PAY_PER_REQUEST, AWS returns the Read and Write Capacity Units + # Terraform depends on this parity for update operations + gsi["ProvisionedThroughput"] = default_values | gsi.get("ProvisionedThroughput", {}) + + return DescribeTableOutput( + Table=select_from_typed_dict(TableDescription, table_description) + ) + + @handler("UpdateTable", expand=False) + def update_table( + self, context: RequestContext, update_table_input: UpdateTableInput + ) -> UpdateTableOutput: + table_name = update_table_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + try: + result = self._forward_request(context=context, region=global_table_region) + except CommonServiceException as exc: + # DynamoDBLocal refuses to update certain table params and raises. + # But we still need to update this info in LocalStack stores + if not (exc.code == "ValidationException" and exc.message == "Nothing to update"): + raise + + if table_class := update_table_input.get("TableClass"): + table_definitions = get_store( + context.account_id, context.region + ).table_definitions.setdefault(table_name, {}) + table_definitions["TableClass"] = table_class + + if replica_updates := update_table_input.get("ReplicaUpdates"): + store = get_store(context.account_id, global_table_region) + + # Dict with source region to set of replicated regions + replicas: Dict[RegionName, ReplicaDescription] = store.REPLICAS.get(table_name, {}) + + for replica_update in replica_updates: + for key, details in replica_update.items(): + # Replicated region + target_region = details.get("RegionName") + + # Check if replicated region is valid + if target_region not in get_valid_regions_for_service("dynamodb"): + raise ValidationException(f"Region {target_region} is not supported") + + match key: + case "Create": + if target_region in replicas: + raise ValidationException( + f"Failed to create a the new replica of table with name: '{table_name}' because one or more replicas already existed as tables." + ) + replicas[target_region] = ReplicaDescription( + RegionName=target_region, + KMSMasterKeyId=details.get("KMSMasterKeyId"), + ProvisionedThroughputOverride=details.get( + "ProvisionedThroughputOverride" + ), + GlobalSecondaryIndexes=details.get("GlobalSecondaryIndexes"), + ReplicaStatus=ReplicaStatus.ACTIVE, + ) + case "Delete": + try: + replicas.pop(target_region) + except KeyError: + raise ValidationException( + "Update global table operation failed because one or more replicas were not part of the global table." + ) + + store.REPLICAS[table_name] = replicas + + # update response content + SchemaExtractor.invalidate_table_schema( + table_name, context.account_id, global_table_region + ) + + schema = SchemaExtractor.get_table_schema( + table_name, context.account_id, global_table_region + ) + + if sse_specification_input := update_table_input.get("SSESpecification"): + # If SSESpecification is changed, update store and return the 'UPDATING' status in the response + table_definition = get_store( + context.account_id, context.region + ).table_definitions.setdefault(table_name, {}) + if not sse_specification_input["Enabled"]: + table_definition.pop("SSEDescription", None) + schema["Table"]["SSEDescription"]["Status"] = "UPDATING" + + return UpdateTableOutput(TableDescription=schema["Table"]) + + SchemaExtractor.invalidate_table_schema(table_name, context.account_id, global_table_region) + + schema = SchemaExtractor.get_table_schema( + table_name, context.account_id, global_table_region + ) + + # TODO: DDB streams must also be created for replicas + if update_table_input.get("StreamSpecification"): + create_dynamodb_stream( + context.account_id, + context.region, + update_table_input, + result["TableDescription"].get("LatestStreamLabel"), + ) + + return UpdateTableOutput(TableDescription=schema["Table"]) + + def list_tables( + self, + context: RequestContext, + exclusive_start_table_name: TableName = None, + limit: ListTablesInputLimit = None, + **kwargs, + ) -> ListTablesOutput: + response = self.forward_request(context) + + # Add replicated tables + replicas = get_store(context.account_id, context.region).REPLICAS + for replicated_table, replications in replicas.items(): + for replica_region, replica_description in replications.items(): + if context.region == replica_region: + response["TableNames"].append(replicated_table) + + return response + + # + # Item ops + # + + @handler("PutItem", expand=False) + def put_item(self, context: RequestContext, put_item_input: PutItemInput) -> PutItemOutput: + table_name = put_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + has_return_values = put_item_input.get("ReturnValues") == "ALL_OLD" + stream_type = get_table_stream_type(context.account_id, context.region, table_name) + + # if the request doesn't ask for ReturnValues and we have stream enabled, we need to modify the request to + # force DDBLocal to return those values + if stream_type and not has_return_values: + service_req = copy.copy(context.service_request) + service_req["ReturnValues"] = "ALL_OLD" + result = self._forward_request( + context=context, region=global_table_region, service_request=service_req + ) + else: + result = self._forward_request(context=context, region=global_table_region) + + # Since this operation makes use of global table region, we need to use the same region for all + # calls made via the inter-service client. This is taken care of by passing the account ID and + # region, e.g. when getting the stream spec + + # Get stream specifications details for the table + if stream_type: + item = put_item_input["Item"] + # prepare record keys + keys = SchemaExtractor.extract_keys( + item=item, + table_name=table_name, + account_id=context.account_id, + region_name=global_table_region, + ) + # because we modified the request, we will always have the ReturnValues if we have streams enabled + if has_return_values: + existing_item = result.get("Attributes") + else: + # remove the ReturnValues if the client didn't ask for it + existing_item = result.pop("Attributes", None) + + if existing_item == item: + return result + + # create record + record = self.get_record_template( + context.region, + ) + record["eventName"] = "INSERT" if not existing_item else "MODIFY" + record["dynamodb"]["Keys"] = keys + record["dynamodb"]["SizeBytes"] = _get_size_bytes(item) + + if stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = item + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + if existing_item and stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + + records_map = { + table_name: TableRecords(records=[record], table_stream_type=stream_type) + } + self.forward_stream_records(context.account_id, context.region, records_map) + return result + + @handler("DeleteItem", expand=False) + def delete_item( + self, + context: RequestContext, + delete_item_input: DeleteItemInput, + ) -> DeleteItemOutput: + table_name = delete_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + has_return_values = delete_item_input.get("ReturnValues") == "ALL_OLD" + stream_type = get_table_stream_type(context.account_id, context.region, table_name) + + # if the request doesn't ask for ReturnValues and we have stream enabled, we need to modify the request to + # force DDBLocal to return those values + if stream_type and not has_return_values: + service_req = copy.copy(context.service_request) + service_req["ReturnValues"] = "ALL_OLD" + result = self._forward_request( + context=context, region=global_table_region, service_request=service_req + ) + else: + result = self._forward_request(context=context, region=global_table_region) + + # determine and forward stream record + if stream_type: + # because we modified the request, we will always have the ReturnValues if we have streams enabled + if has_return_values: + existing_item = result.get("Attributes") + else: + # remove the ReturnValues if the client didn't ask for it + existing_item = result.pop("Attributes", None) + + if not existing_item: + return result + + # create record + record = self.get_record_template(context.region) + record["eventName"] = "REMOVE" + record["dynamodb"]["Keys"] = delete_item_input["Key"] + record["dynamodb"]["SizeBytes"] = _get_size_bytes(existing_item) + + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + if stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + + records_map = { + table_name: TableRecords(records=[record], table_stream_type=stream_type) + } + self.forward_stream_records(context.account_id, context.region, records_map) + + return result + + @handler("UpdateItem", expand=False) + def update_item( + self, + context: RequestContext, + update_item_input: UpdateItemInput, + ) -> UpdateItemOutput: + # TODO: UpdateItem is harder to use ReturnValues for Streams, because it needs the Before and After images. + table_name = update_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + existing_item = None + stream_type = get_table_stream_type(context.account_id, context.region, table_name) + + # even if we don't need the OldImage, we still need to fetch the existing item to know if the event is INSERT + # or MODIFY (UpdateItem will create the object if it doesn't exist, and you don't use a ConditionExpression) + if stream_type: + existing_item = ItemFinder.find_existing_item( + put_item=update_item_input, + table_name=table_name, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + + result = self._forward_request(context=context, region=global_table_region) + + # construct and forward stream record + if stream_type: + updated_item = ItemFinder.find_existing_item( + put_item=update_item_input, + table_name=table_name, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + if not updated_item or updated_item == existing_item: + return result + + record = self.get_record_template(context.region) + record["eventName"] = "INSERT" if not existing_item else "MODIFY" + record["dynamodb"]["Keys"] = update_item_input["Key"] + record["dynamodb"]["SizeBytes"] = _get_size_bytes(updated_item) + + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + if existing_item and stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + if stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = updated_item + + records_map = { + table_name: TableRecords(records=[record], table_stream_type=stream_type) + } + self.forward_stream_records(context.account_id, context.region, records_map) + + return result + + @handler("GetItem", expand=False) + def get_item(self, context: RequestContext, get_item_input: GetItemInput) -> GetItemOutput: + table_name = get_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + self.fix_consumed_capacity(get_item_input, result) + return result + + # + # Queries + # + + @handler("Query", expand=False) + def query(self, context: RequestContext, query_input: QueryInput) -> QueryOutput: + index_name = query_input.get("IndexName") + if index_name: + if not is_index_query_valid(context.account_id, context.region, query_input): + raise ValidationException( + "One or more parameter values were invalid: Select type ALL_ATTRIBUTES " + "is not supported for global secondary index id-index because its projection " + "type is not ALL", + ) + + table_name = query_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + self.fix_consumed_capacity(query_input, result) + return result + + @handler("Scan", expand=False) + def scan(self, context: RequestContext, scan_input: ScanInput) -> ScanOutput: + table_name = scan_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + return result + + # + # Batch ops + # + + @handler("BatchWriteItem", expand=False) + def batch_write_item( + self, + context: RequestContext, + batch_write_item_input: BatchWriteItemInput, + ) -> BatchWriteItemOutput: + # TODO: add global table support + existing_items = {} + existing_items_to_fetch: BatchWriteItemRequestMap = {} + # UnprocessedItems should have the same format as RequestItems + unprocessed_items = {} + request_items = batch_write_item_input["RequestItems"] + + tables_stream_type: dict[TableName, TableStreamType] = {} + + for table_name, items in sorted(request_items.items(), key=itemgetter(0)): + if stream_type := get_table_stream_type(context.account_id, context.region, table_name): + tables_stream_type[table_name] = stream_type + + for request in items: + request: WriteRequest + for key, inner_request in request.items(): + inner_request: PutRequest | DeleteRequest + if self.should_throttle("BatchWriteItem"): + unprocessed_items_for_table = unprocessed_items.setdefault(table_name, []) + unprocessed_items_for_table.append(request) + + elif stream_type: + existing_items_to_fetch_for_table = existing_items_to_fetch.setdefault( + table_name, [] + ) + existing_items_to_fetch_for_table.append(inner_request) + + if existing_items_to_fetch: + existing_items = ItemFinder.find_existing_items( + put_items_per_table=existing_items_to_fetch, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + + try: + result = self.forward_request(context) + except CommonServiceException as e: + # TODO: validate if DynamoDB still raises `One of the required keys was not given a value` + # for now, replace with the schema error validation + if e.message == "One of the required keys was not given a value": + raise ValidationException("The provided key element does not match the schema") + raise e + + # determine and forward stream records + if tables_stream_type: + records_map = self.prepare_batch_write_item_records( + account_id=context.account_id, + region_name=context.region, + tables_stream_type=tables_stream_type, + request_items=request_items, + existing_items=existing_items, + ) + self.forward_stream_records(context.account_id, context.region, records_map) + + # TODO: should unprocessed item which have mutated by `prepare_batch_write_item_records` be returned + for table_name, unprocessed_items_in_table in unprocessed_items.items(): + unprocessed: dict = result["UnprocessedItems"] + result_unprocessed_table = unprocessed.setdefault(table_name, []) + + # add the Unprocessed items to the response + # TODO: check before if the same request has not been Unprocessed by DDB local already? + # those might actually have been processed? shouldn't we remove them from the proxied request? + for request in unprocessed_items_in_table: + result_unprocessed_table.append(request) + + # remove any table entry if it's empty + result["UnprocessedItems"] = {k: v for k, v in unprocessed.items() if v} + + return result + + @handler("BatchGetItem") + def batch_get_item( + self, + context: RequestContext, + request_items: BatchGetRequestMap, + return_consumed_capacity: ReturnConsumedCapacity = None, + **kwargs, + ) -> BatchGetItemOutput: + # TODO: add global table support + return self.forward_request(context) + + # + # Transactions + # + + @handler("TransactWriteItems", expand=False) + def transact_write_items( + self, + context: RequestContext, + transact_write_items_input: TransactWriteItemsInput, + ) -> TransactWriteItemsOutput: + # TODO: add global table support + existing_items = {} + existing_items_to_fetch: dict[str, list[Put | Update | Delete]] = {} + updated_items_to_fetch: dict[str, list[Update]] = {} + transact_items = transact_write_items_input["TransactItems"] + tables_stream_type: dict[TableName, TableStreamType] = {} + no_stream_tables = set() + + for item in transact_items: + item: TransactWriteItem + for key in ["Put", "Update", "Delete"]: + inner_item: Put | Delete | Update = item.get(key) + if inner_item: + table_name = inner_item["TableName"] + # if we've seen the table already and it does not have streams, skip + if table_name in no_stream_tables: + continue + + # if we have not seen the table, fetch its streaming status + if table_name not in tables_stream_type: + if stream_type := get_table_stream_type( + context.account_id, context.region, table_name + ): + tables_stream_type[table_name] = stream_type + else: + # no stream, + no_stream_tables.add(table_name) + continue + + existing_items_to_fetch_for_table = existing_items_to_fetch.setdefault( + table_name, [] + ) + existing_items_to_fetch_for_table.append(inner_item) + if key == "Update": + updated_items_to_fetch_for_table = updated_items_to_fetch.setdefault( + table_name, [] + ) + updated_items_to_fetch_for_table.append(inner_item) + + continue + + if existing_items_to_fetch: + existing_items = ItemFinder.find_existing_items( + put_items_per_table=existing_items_to_fetch, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + + client_token: str | None = transact_write_items_input.get("ClientRequestToken") + + if client_token: + # we sort the payload since identical payload but with different order could cause + # IdempotentParameterMismatchException error if a client token is provided + context.request.data = to_bytes(canonical_json(json.loads(context.request.data))) + + result = self.forward_request(context) + + # determine and forward stream records + if tables_stream_type: + updated_items = ( + ItemFinder.find_existing_items( + put_items_per_table=existing_items_to_fetch, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + if updated_items_to_fetch + else {} + ) + + records_map = self.prepare_transact_write_item_records( + account_id=context.account_id, + region_name=context.region, + transact_items=transact_items, + existing_items=existing_items, + updated_items=updated_items, + tables_stream_type=tables_stream_type, + ) + self.forward_stream_records(context.account_id, context.region, records_map) + + return result + + @handler("TransactGetItems", expand=False) + def transact_get_items( + self, + context: RequestContext, + transact_items: TransactGetItemList, + return_consumed_capacity: ReturnConsumedCapacity = None, + ) -> TransactGetItemsOutput: + return self.forward_request(context) + + @handler("ExecuteTransaction", expand=False) + def execute_transaction( + self, context: RequestContext, execute_transaction_input: ExecuteTransactionInput + ) -> ExecuteTransactionOutput: + result = self.forward_request(context) + return result + + @handler("ExecuteStatement", expand=False) + def execute_statement( + self, + context: RequestContext, + execute_statement_input: ExecuteStatementInput, + ) -> ExecuteStatementOutput: + # TODO: this operation is still really slow with streams enabled + # find a way to make it better, same way as the other operations, by using returnvalues + # see https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.update.html + statement = execute_statement_input["Statement"] + # We found out that 'Parameters' can be an empty list when the request comes from the AWS JS client. + if execute_statement_input.get("Parameters", None) == []: # noqa + raise ValidationException( + "1 validation error detected: Value '[]' at 'parameters' failed to satisfy constraint: Member must have length greater than or equal to 1" + ) + table_name = extract_table_name_from_partiql_update(statement) + existing_items = None + stream_type = table_name and get_table_stream_type( + context.account_id, context.region, table_name + ) + if stream_type: + # Note: fetching the entire list of items is hugely inefficient, especially for larger tables + # TODO: find a mechanism to hook into the PartiQL update mechanism of DynamoDB Local directly! + existing_items = ItemFinder.list_existing_items_for_statement( + partiql_statement=statement, + account_id=context.account_id, + region_name=context.region, + endpoint_url=self.server.url, + ) + + result = self.forward_request(context) + + # construct and forward stream record + if stream_type: + records = get_updated_records( + account_id=context.account_id, + region_name=context.region, + table_name=table_name, + existing_items=existing_items, + server_url=self.server.url, + table_stream_type=stream_type, + ) + self.forward_stream_records(context.account_id, context.region, records) + + return result + + # + # Tags + # + + def tag_resource( + self, context: RequestContext, resource_arn: ResourceArnString, tags: TagList, **kwargs + ) -> None: + table_tags = get_store(context.account_id, context.region).TABLE_TAGS + if resource_arn not in table_tags: + table_tags[resource_arn] = {} + table_tags[resource_arn].update({tag["Key"]: tag["Value"] for tag in tags}) + + def untag_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + for tag_key in tag_keys or []: + get_store(context.account_id, context.region).TABLE_TAGS.get(resource_arn, {}).pop( + tag_key, None + ) + + def list_tags_of_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + next_token: NextTokenString = None, + **kwargs, + ) -> ListTagsOfResourceOutput: + result = [ + {"Key": k, "Value": v} + for k, v in get_store(context.account_id, context.region) + .TABLE_TAGS.get(resource_arn, {}) + .items() + ] + return ListTagsOfResourceOutput(Tags=result) + + # + # TTLs + # + + def describe_time_to_live( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeTimeToLiveOutput: + if not self.table_exists(context.account_id, context.region, table_name): + raise ResourceNotFoundException( + f"Requested resource not found: Table: {table_name} not found" + ) + + backend = get_store(context.account_id, context.region) + ttl_spec = backend.ttl_specifications.get(table_name) + + result = {"TimeToLiveStatus": "DISABLED"} + if ttl_spec: + if ttl_spec.get("Enabled"): + ttl_status = "ENABLED" + else: + ttl_status = "DISABLED" + result = { + "AttributeName": ttl_spec.get("AttributeName"), + "TimeToLiveStatus": ttl_status, + } + + return DescribeTimeToLiveOutput(TimeToLiveDescription=result) + + def update_time_to_live( + self, + context: RequestContext, + table_name: TableName, + time_to_live_specification: TimeToLiveSpecification, + **kwargs, + ) -> UpdateTimeToLiveOutput: + if not self.table_exists(context.account_id, context.region, table_name): + raise ResourceNotFoundException( + f"Requested resource not found: Table: {table_name} not found" + ) + + # TODO: TTL status is maintained/mocked but no real expiry is happening for items + backend = get_store(context.account_id, context.region) + backend.ttl_specifications[table_name] = time_to_live_specification + return UpdateTimeToLiveOutput(TimeToLiveSpecification=time_to_live_specification) + + # + # Global tables + # + + def create_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replication_group: ReplicaList, + **kwargs, + ) -> CreateGlobalTableOutput: + global_tables: Dict = get_store(context.account_id, context.region).GLOBAL_TABLES + if global_table_name in global_tables: + raise GlobalTableAlreadyExistsException("Global table with this name already exists") + replication_group = [grp.copy() for grp in replication_group or []] + data = {"GlobalTableName": global_table_name, "ReplicationGroup": replication_group} + global_tables[global_table_name] = data + for group in replication_group: + group["ReplicaStatus"] = "ACTIVE" + group["ReplicaStatusDescription"] = "Replica active" + return CreateGlobalTableOutput(GlobalTableDescription=data) + + def describe_global_table( + self, context: RequestContext, global_table_name: TableName, **kwargs + ) -> DescribeGlobalTableOutput: + details = get_store(context.account_id, context.region).GLOBAL_TABLES.get(global_table_name) + if not details: + raise GlobalTableNotFoundException("Global table with this name does not exist") + return DescribeGlobalTableOutput(GlobalTableDescription=details) + + def list_global_tables( + self, + context: RequestContext, + exclusive_start_global_table_name: TableName = None, + limit: PositiveIntegerObject = None, + region_name: RegionName = None, + **kwargs, + ) -> ListGlobalTablesOutput: + # TODO: add paging support + result = [ + select_attributes(tab, ["GlobalTableName", "ReplicationGroup"]) + for tab in get_store(context.account_id, context.region).GLOBAL_TABLES.values() + ] + return ListGlobalTablesOutput(GlobalTables=result) + + def update_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replica_updates: ReplicaUpdateList, + **kwargs, + ) -> UpdateGlobalTableOutput: + details = get_store(context.account_id, context.region).GLOBAL_TABLES.get(global_table_name) + if not details: + raise GlobalTableNotFoundException("Global table with this name does not exist") + for update in replica_updates or []: + repl_group = details["ReplicationGroup"] + # delete existing + delete = update.get("Delete") + if delete: + details["ReplicationGroup"] = [ + g for g in repl_group if g["RegionName"] != delete["RegionName"] + ] + # create new + create = update.get("Create") + if create: + exists = [g for g in repl_group if g["RegionName"] == create["RegionName"]] + if exists: + continue + new_group = { + "RegionName": create["RegionName"], + "ReplicaStatus": "ACTIVE", + "ReplicaStatusDescription": "Replica active", + } + details["ReplicationGroup"].append(new_group) + return UpdateGlobalTableOutput(GlobalTableDescription=details) + + # + # Kinesis Streaming + # + + def enable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableName, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + + stream = self._event_forwarder.is_kinesis_stream_exists(stream_arn=stream_arn) + if not stream: + raise ValidationException("User does not have a permission to use kinesis stream") + + table_def = get_store(context.account_id, context.region).table_definitions.setdefault( + table_name, {} + ) + + dest_status = table_def.get("KinesisDataStreamDestinationStatus") + if dest_status not in ["DISABLED", "ENABLE_FAILED", None]: + raise ValidationException( + "Table is not in a valid state to enable Kinesis Streaming " + "Destination:EnableKinesisStreamingDestination must be DISABLED or ENABLE_FAILED " + "to perform ENABLE operation." + ) + + table_def["KinesisDataStreamDestinations"] = ( + table_def.get("KinesisDataStreamDestinations") or [] + ) + # remove the stream destination if already present + table_def["KinesisDataStreamDestinations"] = [ + t for t in table_def["KinesisDataStreamDestinations"] if t["StreamArn"] != stream_arn + ] + # append the active stream destination at the end of the list + table_def["KinesisDataStreamDestinations"].append( + { + "DestinationStatus": DestinationStatus.ACTIVE, + "DestinationStatusDescription": "Stream is active", + "StreamArn": stream_arn, + } + ) + table_def["KinesisDataStreamDestinationStatus"] = DestinationStatus.ACTIVE + return KinesisStreamingDestinationOutput( + DestinationStatus=DestinationStatus.ACTIVE, StreamArn=stream_arn, TableName=table_name + ) + + def disable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableName, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + + stream = self._event_forwarder.is_kinesis_stream_exists(stream_arn=stream_arn) + if not stream: + raise ValidationException( + "User does not have a permission to use kinesis stream", + ) + + table_def = get_store(context.account_id, context.region).table_definitions.setdefault( + table_name, {} + ) + + stream_destinations = table_def.get("KinesisDataStreamDestinations") + if stream_destinations: + if table_def["KinesisDataStreamDestinationStatus"] == DestinationStatus.ACTIVE: + for dest in stream_destinations: + if ( + dest["StreamArn"] == stream_arn + and dest["DestinationStatus"] == DestinationStatus.ACTIVE + ): + dest["DestinationStatus"] = DestinationStatus.DISABLED + dest["DestinationStatusDescription"] = ("Stream is disabled",) + table_def["KinesisDataStreamDestinationStatus"] = DestinationStatus.DISABLED + return KinesisStreamingDestinationOutput( + DestinationStatus=DestinationStatus.DISABLED, + StreamArn=stream_arn, + TableName=table_name, + ) + raise ValidationException( + "Table is not in a valid state to disable Kinesis Streaming Destination:" + "DisableKinesisStreamingDestination must be ACTIVE to perform DISABLE operation." + ) + + def describe_kinesis_streaming_destination( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeKinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + + table_def = ( + get_store(context.account_id, context.region).table_definitions.get(table_name) or {} + ) + + stream_destinations = table_def.get("KinesisDataStreamDestinations") or [] + return DescribeKinesisStreamingDestinationOutput( + KinesisDataStreamDestinations=stream_destinations, TableName=table_name + ) + + # + # Continuous Backups + # + + def describe_continuous_backups( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeContinuousBackupsOutput: + self.get_global_table_region(context, table_name) + store = get_store(context.account_id, context.region) + continuous_backup_description = ( + store.table_properties.get(table_name, {}).get("ContinuousBackupsDescription") + ) or ContinuousBackupsDescription( + ContinuousBackupsStatus=ContinuousBackupsStatus.ENABLED, + PointInTimeRecoveryDescription=PointInTimeRecoveryDescription( + PointInTimeRecoveryStatus=PointInTimeRecoveryStatus.DISABLED + ), + ) + + return DescribeContinuousBackupsOutput( + ContinuousBackupsDescription=continuous_backup_description + ) + + def update_continuous_backups( + self, + context: RequestContext, + table_name: TableName, + point_in_time_recovery_specification: PointInTimeRecoverySpecification, + **kwargs, + ) -> UpdateContinuousBackupsOutput: + self.get_global_table_region(context, table_name) + + store = get_store(context.account_id, context.region) + pit_recovery_status = ( + PointInTimeRecoveryStatus.ENABLED + if point_in_time_recovery_specification["PointInTimeRecoveryEnabled"] + else PointInTimeRecoveryStatus.DISABLED + ) + continuous_backup_description = ContinuousBackupsDescription( + ContinuousBackupsStatus=ContinuousBackupsStatus.ENABLED, + PointInTimeRecoveryDescription=PointInTimeRecoveryDescription( + PointInTimeRecoveryStatus=pit_recovery_status + ), + ) + table_props = store.table_properties.setdefault(table_name, {}) + table_props["ContinuousBackupsDescription"] = continuous_backup_description + + return UpdateContinuousBackupsOutput( + ContinuousBackupsDescription=continuous_backup_description + ) + + # + # Helpers + # + + @staticmethod + def ddb_region_name(region_name: str) -> str: + """Map `local` or `localhost` region to the us-east-1 region. These values are used by NoSQL Workbench.""" + # TODO: could this be somehow moved into the request handler chain? + if region_name in ("local", "localhost"): + region_name = AWS_REGION_US_EAST_1 + + return region_name + + @staticmethod + def table_exists(account_id: str, region_name: str, table_name: str) -> bool: + region_name = DynamoDBProvider.ddb_region_name(region_name) + + client = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).dynamodb + return dynamodb_table_exists(table_name, client) + + @staticmethod + def ensure_table_exists(account_id: str, region_name: str, table_name: str): + """ + Raise ResourceNotFoundException if the given table does not exist. + + :param account_id: account id + :param region_name: region name + :param table_name: table name + :raise: ResourceNotFoundException if table does not exist in DynamoDB Local + """ + if not DynamoDBProvider.table_exists(account_id, region_name, table_name): + raise ResourceNotFoundException("Cannot do operations on a non-existent table") + + @staticmethod + def get_global_table_region(context: RequestContext, table_name: str) -> str: + """ + Return the table region considering that it might be a replicated table. + + Replication in LocalStack works by keeping a single copy of a table and forwarding + requests to the region where this table exists. + + This method does not check whether the table actually exists in DDBLocal. + + :param context: request context + :param table_name: table name + :return: region + """ + store = get_store(context.account_id, context.region) + + table_region = store.TABLE_REGION.get(table_name) + replicated_at = store.REPLICAS.get(table_name, {}).keys() + + if context.region == table_region or context.region in replicated_at: + return table_region + + return context.region + + @staticmethod + def prepare_request_headers(headers: Dict, account_id: str, region_name: str): + """ + Modify the Credentials field of Authorization header to achieve namespacing in DynamoDBLocal. + """ + region_name = DynamoDBProvider.ddb_region_name(region_name) + key = get_ddb_access_key(account_id, region_name) + + # DynamoDBLocal namespaces based on the value of Credentials + # Since we want to namespace by both account ID and region, use an aggregate key + # We also replace the region to keep compatibility with NoSQL Workbench + headers["Authorization"] = re.sub( + AUTH_CREDENTIAL_REGEX, + rf"Credential={key}/\2/{region_name}/\4/", + headers.get("Authorization") or "", + flags=re.IGNORECASE, + ) + + def fix_consumed_capacity(self, request: Dict, result: Dict): + # make sure we append 'ConsumedCapacity', which is properly + # returned by dynalite, but not by AWS's DynamoDBLocal + table_name = request.get("TableName") + return_cap = request.get("ReturnConsumedCapacity") + if "ConsumedCapacity" not in result and return_cap in ["TOTAL", "INDEXES"]: + request["ConsumedCapacity"] = { + "TableName": table_name, + "CapacityUnits": 5, # TODO hardcoded + "ReadCapacityUnits": 2, + "WriteCapacityUnits": 3, + } + + def fix_table_arn(self, account_id: str, region_name: str, arn: str) -> str: + """ + Set the correct account ID and region in ARNs returned by DynamoDB Local. + """ + partition = get_partition(region_name) + return ( + arn.replace("arn:aws:", f"arn:{partition}:") + .replace(":ddblocal:", f":{region_name}:") + .replace(":000000000000:", f":{account_id}:") + ) + + def prepare_transact_write_item_records( + self, + account_id: str, + region_name: str, + transact_items: TransactWriteItemList, + existing_items: BatchGetResponseMap, + updated_items: BatchGetResponseMap, + tables_stream_type: dict[TableName, TableStreamType], + ) -> RecordsMap: + records_only_map: dict[TableName, StreamRecords] = defaultdict(list) + + for request in transact_items: + record = self.get_record_template(region_name) + match request: + case {"Put": {"TableName": table_name, "Item": new_item}}: + if not (stream_type := tables_stream_type.get(table_name)): + continue + keys = SchemaExtractor.extract_keys( + item=new_item, + table_name=table_name, + account_id=account_id, + region_name=region_name, + ) + existing_item = find_item_for_keys_values_in_batch( + table_name, keys, existing_items + ) + if existing_item == new_item: + continue + + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + + record["eventID"] = short_uid() + record["eventName"] = "INSERT" if not existing_item else "MODIFY" + record["dynamodb"]["Keys"] = keys + if stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = new_item + if existing_item and stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + + record_item = de_dynamize_record(new_item) + record["dynamodb"]["SizeBytes"] = _get_size_bytes(record_item) + records_only_map[table_name].append(record) + continue + + case {"Update": {"TableName": table_name, "Key": keys}}: + if not (stream_type := tables_stream_type.get(table_name)): + continue + updated_item = find_item_for_keys_values_in_batch( + table_name, keys, updated_items + ) + if not updated_item: + continue + + existing_item = find_item_for_keys_values_in_batch( + table_name, keys, existing_items + ) + if existing_item == updated_item: + # if the item is the same as the previous version, AWS does not send an event + continue + + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + + record["eventID"] = short_uid() + record["eventName"] = "MODIFY" if existing_item else "INSERT" + record["dynamodb"]["Keys"] = keys + + if existing_item and stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + if stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = updated_item + + record["dynamodb"]["SizeBytes"] = _get_size_bytes(updated_item) + records_only_map[table_name].append(record) + continue + + case {"Delete": {"TableName": table_name, "Key": keys}}: + if not (stream_type := tables_stream_type.get(table_name)): + continue + + existing_item = find_item_for_keys_values_in_batch( + table_name, keys, existing_items + ) + if not existing_item: + continue + + if stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_type.stream_view_type + + record["eventID"] = short_uid() + record["eventName"] = "REMOVE" + record["dynamodb"]["Keys"] = keys + if stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + record_item = de_dynamize_record(existing_item) + record["dynamodb"]["SizeBytes"] = _get_size_bytes(record_item) + + records_only_map[table_name].append(record) + continue + + records_map = { + table_name: TableRecords( + records=records, table_stream_type=tables_stream_type[table_name] + ) + for table_name, records in records_only_map.items() + } + + return records_map + + def batch_execute_statement( + self, + context: RequestContext, + statements: PartiQLBatchRequest, + return_consumed_capacity: ReturnConsumedCapacity = None, + **kwargs, + ) -> BatchExecuteStatementOutput: + result = self.forward_request(context) + return result + + def prepare_batch_write_item_records( + self, + account_id: str, + region_name: str, + tables_stream_type: dict[TableName, TableStreamType], + request_items: BatchWriteItemRequestMap, + existing_items: BatchGetResponseMap, + ) -> RecordsMap: + records_map: RecordsMap = {} + + # only iterate over tables with streams + for table_name, stream_type in tables_stream_type.items(): + existing_items_for_table_unordered = existing_items.get(table_name, []) + table_records: StreamRecords = [] + + def find_existing_item_for_keys_values(item_keys: dict) -> AttributeMap | None: + """ + This function looks up in the existing items for the provided item keys subset. If present, returns the + full item. + :param item_keys: the request item keys + :return: + """ + keys_items = item_keys.items() + for item in existing_items_for_table_unordered: + if keys_items <= item.items(): + return item + + for write_request in request_items[table_name]: + record = self.get_record_template( + region_name, + stream_view_type=stream_type.stream_view_type, + ) + match write_request: + case {"PutRequest": request}: + keys = SchemaExtractor.extract_keys( + item=request["Item"], + table_name=table_name, + account_id=account_id, + region_name=region_name, + ) + # we need to find if there was an existing item even if we don't need it for `OldImage`, because + # of the `eventName` + existing_item = find_existing_item_for_keys_values(keys) + if existing_item == request["Item"]: + # if the item is the same as the previous version, AWS does not send an event + continue + record["eventID"] = short_uid() + record["dynamodb"]["SizeBytes"] = _get_size_bytes(request["Item"]) + record["eventName"] = "INSERT" if not existing_item else "MODIFY" + record["dynamodb"]["Keys"] = keys + + if stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = request["Item"] + if existing_item and stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + + table_records.append(record) + continue + + case {"DeleteRequest": request}: + keys = request["Key"] + if not (existing_item := find_existing_item_for_keys_values(keys)): + continue + + record["eventID"] = short_uid() + record["eventName"] = "REMOVE" + record["dynamodb"]["Keys"] = keys + if stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = existing_item + record["dynamodb"]["SizeBytes"] = _get_size_bytes(existing_item) + table_records.append(record) + continue + + records_map[table_name] = TableRecords( + records=table_records, table_stream_type=stream_type + ) + + return records_map + + def forward_stream_records( + self, + account_id: str, + region_name: str, + records_map: RecordsMap, + ) -> None: + if not records_map: + return + + self._event_forwarder.forward_to_targets( + account_id, region_name, records_map, background=True + ) + + @staticmethod + def get_record_template(region_name: str, stream_view_type: str | None = None) -> StreamRecord: + record = { + "eventID": short_uid(), + "eventVersion": "1.1", + "dynamodb": { + # expects nearest second rounded down + "ApproximateCreationDateTime": int(time.time()), + "SizeBytes": -1, + }, + "awsRegion": region_name, + "eventSource": "aws:dynamodb", + } + if stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_view_type + + return record + + def check_provisioned_throughput(self, action): + """ + Check rate limiting for an API operation and raise an error if provisioned throughput is exceeded. + """ + if self.should_throttle(action): + message = ( + "The level of configured provisioned throughput for the table was exceeded. " + + "Consider increasing your provisioning level with the UpdateTable API" + ) + raise ProvisionedThroughputExceededException(message) + + def action_should_throttle(self, action, actions): + throttled = [f"{ACTION_PREFIX}{a}" for a in actions] + return (action in throttled) or (action in actions) + + def should_throttle(self, action): + if ( + not config.DYNAMODB_READ_ERROR_PROBABILITY + and not config.DYNAMODB_ERROR_PROBABILITY + and not config.DYNAMODB_WRITE_ERROR_PROBABILITY + ): + # early exit so we don't need to call random() + return False + + rand = random.random() + if rand < config.DYNAMODB_READ_ERROR_PROBABILITY and self.action_should_throttle( + action, READ_THROTTLED_ACTIONS + ): + return True + elif rand < config.DYNAMODB_WRITE_ERROR_PROBABILITY and self.action_should_throttle( + action, WRITE_THROTTLED_ACTIONS + ): + return True + elif rand < config.DYNAMODB_ERROR_PROBABILITY and self.action_should_throttle( + action, THROTTLED_ACTIONS + ): + return True + return False + + +# --- +# Misc. util functions +# --- + + +def _get_size_bytes(item: dict) -> int: + try: + size_bytes = len(json.dumps(item, separators=(",", ":"))) + except TypeError: + size_bytes = len(str(item)) + return size_bytes + + +def get_global_secondary_index(account_id: str, region_name: str, table_name: str, index_name: str): + schema = SchemaExtractor.get_table_schema(table_name, account_id, region_name) + for index in schema["Table"].get("GlobalSecondaryIndexes", []): + if index["IndexName"] == index_name: + return index + raise ResourceNotFoundException("Index not found") + + +def is_local_secondary_index( + account_id: str, region_name: str, table_name: str, index_name: str +) -> bool: + schema = SchemaExtractor.get_table_schema(table_name, account_id, region_name) + for index in schema["Table"].get("LocalSecondaryIndexes", []): + if index["IndexName"] == index_name: + return True + return False + + +def is_index_query_valid(account_id: str, region_name: str, query_data: dict) -> bool: + table_name = to_str(query_data["TableName"]) + index_name = to_str(query_data["IndexName"]) + if is_local_secondary_index(account_id, region_name, table_name, index_name): + return True + index_query_type = query_data.get("Select") + index = get_global_secondary_index(account_id, region_name, table_name, index_name) + index_projection_type = index.get("Projection").get("ProjectionType") + if index_query_type == "ALL_ATTRIBUTES" and index_projection_type != "ALL": + return False + return True + + +def get_table_stream_type( + account_id: str, region_name: str, table_name_or_arn: str +) -> TableStreamType | None: + """ + :param account_id: the account id of the table + :param region_name: the region of the table + :param table_name_or_arn: the table name or ARN + :return: a TableStreamViewType object if the table has streams enabled. If not, return None + """ + if not table_name_or_arn: + return + + table_name = table_name_or_arn.split(":table/")[-1] + + is_kinesis = False + stream_view_type = None + + if table_definition := get_store(account_id, region_name).table_definitions.get(table_name): + if table_definition.get("KinesisDataStreamDestinationStatus") == "ACTIVE": + is_kinesis = True + + table_arn = arns.dynamodb_table_arn(table_name, account_id=account_id, region_name=region_name) + + if ( + stream := dynamodbstreams_api.get_stream_for_table(account_id, region_name, table_arn) + ) and stream["StreamStatus"] in (StreamStatus.ENABLING, StreamStatus.ENABLED): + stream_view_type = stream["StreamViewType"] + + if is_kinesis or stream_view_type: + return TableStreamType(stream_view_type, is_kinesis=is_kinesis) + + +def get_updated_records( + account_id: str, + region_name: str, + table_name: str, + existing_items: List, + server_url: str, + table_stream_type: TableStreamType, +) -> RecordsMap: + """ + Determine the list of record updates, to be sent to a DDB stream after a PartiQL update operation. + + Note: This is currently a fairly expensive operation, as we need to retrieve the list of all items + from the table, and compare the items to the previously available. This is a limitation as + we're currently using the DynamoDB Local backend as a blackbox. In future, we should consider hooking + into the PartiQL query execution inside DynamoDB Local and directly extract the list of updated items. + """ + result = [] + + key_schema = SchemaExtractor.get_key_schema(table_name, account_id, region_name) + before = ItemSet(existing_items, key_schema=key_schema) + all_table_items = ItemFinder.get_all_table_items( + account_id=account_id, + region_name=region_name, + table_name=table_name, + endpoint_url=server_url, + ) + after = ItemSet(all_table_items, key_schema=key_schema) + + def _add_record(item, comparison_set: ItemSet): + matching_item = comparison_set.find_item(item) + if matching_item == item: + return + + # determine event type + if comparison_set == after: + if matching_item: + return + event_name = "REMOVE" + else: + event_name = "INSERT" if not matching_item else "MODIFY" + + old_image = item if event_name == "REMOVE" else matching_item + new_image = matching_item if event_name == "REMOVE" else item + + # prepare record + keys = SchemaExtractor.extract_keys_for_schema(item=item, key_schema=key_schema) + + record = DynamoDBProvider.get_record_template(region_name) + record["eventName"] = event_name + record["dynamodb"]["Keys"] = keys + record["dynamodb"]["SizeBytes"] = _get_size_bytes(item) + + if table_stream_type.stream_view_type: + record["dynamodb"]["StreamViewType"] = table_stream_type.stream_view_type + if table_stream_type.needs_new_image: + record["dynamodb"]["NewImage"] = new_image + if old_image and table_stream_type.needs_old_image: + record["dynamodb"]["OldImage"] = old_image + + result.append(record) + + # loop over items in new item list (find INSERT/MODIFY events) + for item in after.items_list: + _add_record(item, before) + # loop over items in old item list (find REMOVE events) + for item in before.items_list: + _add_record(item, after) + + return {table_name: TableRecords(records=result, table_stream_type=table_stream_type)} + + +def create_dynamodb_stream(account_id: str, region_name: str, data, latest_stream_label): + stream = data["StreamSpecification"] + enabled = stream.get("StreamEnabled") + + if enabled not in [False, "False"]: + table_name = data["TableName"] + view_type = stream["StreamViewType"] + + dynamodbstreams_api.add_dynamodb_stream( + account_id=account_id, + region_name=region_name, + table_name=table_name, + latest_stream_label=latest_stream_label, + view_type=view_type, + enabled=enabled, + ) + + +def dynamodb_get_table_stream_specification(account_id: str, region_name: str, table_name: str): + try: + table_schema = SchemaExtractor.get_table_schema( + table_name, account_id=account_id, region_name=region_name + ) + return table_schema["Table"].get("StreamSpecification") + except Exception as e: + LOG.info( + "Unable to get stream specification for table %s: %s %s", + table_name, + e, + traceback.format_exc(), + ) + raise e + + +def find_item_for_keys_values_in_batch( + table_name: str, item_keys: dict, batch: BatchGetResponseMap +) -> AttributeMap | None: + """ + This function looks up in the existing items for the provided item keys subset. If present, returns the + full item. + :param table_name: the table name for the item + :param item_keys: the request item keys + :param batch: the values in which to look for the item + :return: a DynamoDB Item (AttributeMap) + """ + keys = item_keys.items() + for item in batch.get(table_name, []): + if keys <= item.items(): + return item diff --git a/localstack/services/secretsmanager/__init__.py b/localstack-core/localstack/services/dynamodb/resource_providers/__init__.py similarity index 100% rename from localstack/services/secretsmanager/__init__.py rename to localstack-core/localstack/services/dynamodb/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.py b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.py new file mode 100644 index 0000000000000..af199a479576c --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.py @@ -0,0 +1,423 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class DynamoDBGlobalTableProperties(TypedDict): + AttributeDefinitions: Optional[list[AttributeDefinition]] + KeySchema: Optional[list[KeySchema]] + Replicas: Optional[list[ReplicaSpecification]] + Arn: Optional[str] + BillingMode: Optional[str] + GlobalSecondaryIndexes: Optional[list[GlobalSecondaryIndex]] + LocalSecondaryIndexes: Optional[list[LocalSecondaryIndex]] + SSESpecification: Optional[SSESpecification] + StreamArn: Optional[str] + StreamSpecification: Optional[StreamSpecification] + TableId: Optional[str] + TableName: Optional[str] + TimeToLiveSpecification: Optional[TimeToLiveSpecification] + WriteProvisionedThroughputSettings: Optional[WriteProvisionedThroughputSettings] + + +class AttributeDefinition(TypedDict): + AttributeName: Optional[str] + AttributeType: Optional[str] + + +class KeySchema(TypedDict): + AttributeName: Optional[str] + KeyType: Optional[str] + + +class Projection(TypedDict): + NonKeyAttributes: Optional[list[str]] + ProjectionType: Optional[str] + + +class TargetTrackingScalingPolicyConfiguration(TypedDict): + TargetValue: Optional[float] + DisableScaleIn: Optional[bool] + ScaleInCooldown: Optional[int] + ScaleOutCooldown: Optional[int] + + +class CapacityAutoScalingSettings(TypedDict): + MaxCapacity: Optional[int] + MinCapacity: Optional[int] + TargetTrackingScalingPolicyConfiguration: Optional[TargetTrackingScalingPolicyConfiguration] + SeedCapacity: Optional[int] + + +class WriteProvisionedThroughputSettings(TypedDict): + WriteCapacityAutoScalingSettings: Optional[CapacityAutoScalingSettings] + + +class GlobalSecondaryIndex(TypedDict): + IndexName: Optional[str] + KeySchema: Optional[list[KeySchema]] + Projection: Optional[Projection] + WriteProvisionedThroughputSettings: Optional[WriteProvisionedThroughputSettings] + + +class LocalSecondaryIndex(TypedDict): + IndexName: Optional[str] + KeySchema: Optional[list[KeySchema]] + Projection: Optional[Projection] + + +class ContributorInsightsSpecification(TypedDict): + Enabled: Optional[bool] + + +class ReadProvisionedThroughputSettings(TypedDict): + ReadCapacityAutoScalingSettings: Optional[CapacityAutoScalingSettings] + ReadCapacityUnits: Optional[int] + + +class ReplicaGlobalSecondaryIndexSpecification(TypedDict): + IndexName: Optional[str] + ContributorInsightsSpecification: Optional[ContributorInsightsSpecification] + ReadProvisionedThroughputSettings: Optional[ReadProvisionedThroughputSettings] + + +class PointInTimeRecoverySpecification(TypedDict): + PointInTimeRecoveryEnabled: Optional[bool] + + +class ReplicaSSESpecification(TypedDict): + KMSMasterKeyId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class KinesisStreamSpecification(TypedDict): + StreamArn: Optional[str] + + +class ReplicaSpecification(TypedDict): + Region: Optional[str] + ContributorInsightsSpecification: Optional[ContributorInsightsSpecification] + DeletionProtectionEnabled: Optional[bool] + GlobalSecondaryIndexes: Optional[list[ReplicaGlobalSecondaryIndexSpecification]] + KinesisStreamSpecification: Optional[KinesisStreamSpecification] + PointInTimeRecoverySpecification: Optional[PointInTimeRecoverySpecification] + ReadProvisionedThroughputSettings: Optional[ReadProvisionedThroughputSettings] + SSESpecification: Optional[ReplicaSSESpecification] + TableClass: Optional[str] + Tags: Optional[list[Tag]] + + +class SSESpecification(TypedDict): + SSEEnabled: Optional[bool] + SSEType: Optional[str] + + +class StreamSpecification(TypedDict): + StreamViewType: Optional[str] + + +class TimeToLiveSpecification(TypedDict): + Enabled: Optional[bool] + AttributeName: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class DynamoDBGlobalTableProvider(ResourceProvider[DynamoDBGlobalTableProperties]): + TYPE = "AWS::DynamoDB::GlobalTable" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[DynamoDBGlobalTableProperties], + ) -> ProgressEvent[DynamoDBGlobalTableProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/TableName + + Required properties: + - KeySchema + - AttributeDefinitions + - Replicas + + Create-only properties: + - /properties/LocalSecondaryIndexes + - /properties/TableName + - /properties/KeySchema + + Read-only properties: + - /properties/Arn + - /properties/StreamArn + - /properties/TableId + + IAM permissions required: + - dynamodb:CreateTable + - dynamodb:CreateTableReplica + - dynamodb:Describe* + - dynamodb:UpdateTimeToLive + - dynamodb:UpdateContributorInsights + - dynamodb:UpdateContinuousBackups + - dynamodb:ListTagsOfResource + - dynamodb:Query + - dynamodb:Scan + - dynamodb:UpdateItem + - dynamodb:PutItem + - dynamodb:GetItem + - dynamodb:DeleteItem + - dynamodb:BatchWriteItem + - dynamodb:TagResource + - dynamodb:EnableKinesisStreamingDestination + - dynamodb:DisableKinesisStreamingDestination + - dynamodb:DescribeKinesisStreamingDestination + - dynamodb:DescribeTableReplicaAutoScaling + - dynamodb:UpdateTableReplicaAutoScaling + - dynamodb:TagResource + - application-autoscaling:DeleteScalingPolicy + - application-autoscaling:DeleteScheduledAction + - application-autoscaling:DeregisterScalableTarget + - application-autoscaling:Describe* + - application-autoscaling:PutScalingPolicy + - application-autoscaling:PutScheduledAction + - application-autoscaling:RegisterScalableTarget + - kinesis:ListStreams + - kinesis:DescribeStream + - kinesis:PutRecords + - kms:CreateGrant + - kms:Describe* + - kms:Get* + - kms:List* + - kms:RevokeGrant + - cloudwatch:PutMetricData + + """ + model = request.desired_state + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + + if not model.get("TableName"): + model["TableName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + create_params = util.select_attributes( + model, + [ + "AttributeDefinitions", + "BillingMode", + "GlobalSecondaryIndexes", + "KeySchema", + "LocalSecondaryIndexes", + "Replicas", + "SSESpecification", + "StreamSpecification", + "TableName", + "WriteProvisionedThroughputSettings", + ], + ) + + replicas = create_params.pop("Replicas", []) + + if sse_specification := create_params.get("SSESpecification"): + # rename bool attribute to fit boto call + sse_specification["Enabled"] = sse_specification.pop("SSEEnabled") + + if stream_spec := model.get("StreamSpecification"): + create_params["StreamSpecification"] = { + "StreamEnabled": True, + **stream_spec, + } + + creation_response = request.aws_client_factory.dynamodb.create_table(**create_params) + model["Arn"] = creation_response["TableDescription"]["TableArn"] + model["TableId"] = creation_response["TableDescription"]["TableId"] + + if creation_response["TableDescription"].get("LatestStreamArn"): + model["StreamArn"] = creation_response["TableDescription"]["LatestStreamArn"] + + replicas_to_create = [] + for replica in replicas: + create = { + "RegionName": replica.get("Region"), + "KMSMasterKeyId": replica.get("KMSMasterKeyId"), + "ProvisionedThroughputOverride": replica.get("ProvisionedThroughputOverride"), + "GlobalSecondaryIndexes": replica.get("GlobalSecondaryIndexes"), + "TableClassOverride": replica.get("TableClassOverride"), + } + + create = {k: v for k, v in create.items() if v is not None} + + replicas_to_create.append({"Create": create}) + + request.aws_client_factory.dynamodb.update_table( + ReplicaUpdates=replicas_to_create, TableName=model["TableName"] + ) + + # add TTL config + if ttl_config := model.get("TimeToLiveSpecification"): + request.aws_client_factory.dynamodb.update_time_to_live( + TableName=model["TableName"], TimeToLiveSpecification=ttl_config + ) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + status = request.aws_client_factory.dynamodb.describe_table(TableName=model["TableName"])[ + "Table" + ]["TableStatus"] + if status == "ACTIVE": + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + elif status == "CREATING": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + else: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message=f"Table creation failed with status {status}", + ) + + def read( + self, + request: ResourceRequest[DynamoDBGlobalTableProperties], + ) -> ProgressEvent[DynamoDBGlobalTableProperties]: + """ + Fetch resource information + + IAM permissions required: + - dynamodb:Describe* + - application-autoscaling:Describe* + - cloudwatch:PutMetricData + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[DynamoDBGlobalTableProperties], + ) -> ProgressEvent[DynamoDBGlobalTableProperties]: + """ + Delete a resource + + IAM permissions required: + - dynamodb:Describe* + - application-autoscaling:DeleteScalingPolicy + - application-autoscaling:DeleteScheduledAction + - application-autoscaling:DeregisterScalableTarget + - application-autoscaling:Describe* + - application-autoscaling:PutScalingPolicy + - application-autoscaling:PutScheduledAction + - application-autoscaling:RegisterScalableTarget + """ + + model = request.desired_state + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + request.aws_client_factory.dynamodb.delete_table(TableName=model["TableName"]) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + try: + request.aws_client_factory.dynamodb.describe_table(TableName=model["TableName"]) + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + except Exception as ex: + if "ResourceNotFoundException" in str(ex): + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message=str(ex), + ) + + def update( + self, + request: ResourceRequest[DynamoDBGlobalTableProperties], + ) -> ProgressEvent[DynamoDBGlobalTableProperties]: + """ + Update a resource + + IAM permissions required: + - dynamodb:Describe* + - dynamodb:CreateTableReplica + - dynamodb:UpdateTable + - dynamodb:UpdateTimeToLive + - dynamodb:UpdateContinuousBackups + - dynamodb:UpdateContributorInsights + - dynamodb:ListTagsOfResource + - dynamodb:Query + - dynamodb:Scan + - dynamodb:UpdateItem + - dynamodb:PutItem + - dynamodb:GetItem + - dynamodb:DeleteItem + - dynamodb:BatchWriteItem + - dynamodb:DeleteTable + - dynamodb:DeleteTableReplica + - dynamodb:UpdateItem + - dynamodb:TagResource + - dynamodb:UntagResource + - dynamodb:EnableKinesisStreamingDestination + - dynamodb:DisableKinesisStreamingDestination + - dynamodb:DescribeKinesisStreamingDestination + - dynamodb:DescribeTableReplicaAutoScaling + - dynamodb:UpdateTableReplicaAutoScaling + - application-autoscaling:DeleteScalingPolicy + - application-autoscaling:DeleteScheduledAction + - application-autoscaling:DeregisterScalableTarget + - application-autoscaling:Describe* + - application-autoscaling:PutScalingPolicy + - application-autoscaling:PutScheduledAction + - application-autoscaling:RegisterScalableTarget + - kinesis:ListStreams + - kinesis:DescribeStream + - kinesis:PutRecords + - kms:CreateGrant + - kms:Describe* + - kms:Get* + - kms:List* + - kms:RevokeGrant + - cloudwatch:PutMetricData + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.schema.json b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.schema.json new file mode 100644 index 0000000000000..3caa6a203393a --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable.schema.json @@ -0,0 +1,574 @@ +{ + "typeName": "AWS::DynamoDB::GlobalTable", + "description": "Version: None. Resource Type definition for AWS::DynamoDB::GlobalTable", + "additionalProperties": false, + "properties": { + "Arn": { + "type": "string" + }, + "StreamArn": { + "type": "string" + }, + "AttributeDefinitions": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/AttributeDefinition" + }, + "minItems": 1 + }, + "BillingMode": { + "type": "string" + }, + "GlobalSecondaryIndexes": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/GlobalSecondaryIndex" + } + }, + "KeySchema": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + }, + "minItems": 1, + "maxItems": 2 + }, + "LocalSecondaryIndexes": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/LocalSecondaryIndex" + } + }, + "WriteProvisionedThroughputSettings": { + "$ref": "#/definitions/WriteProvisionedThroughputSettings" + }, + "Replicas": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/ReplicaSpecification" + }, + "minItems": 1 + }, + "SSESpecification": { + "$ref": "#/definitions/SSESpecification" + }, + "StreamSpecification": { + "$ref": "#/definitions/StreamSpecification" + }, + "TableName": { + "type": "string" + }, + "TableId": { + "type": "string" + }, + "TimeToLiveSpecification": { + "$ref": "#/definitions/TimeToLiveSpecification" + } + }, + "definitions": { + "StreamSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "StreamViewType": { + "type": "string" + } + }, + "required": [ + "StreamViewType" + ] + }, + "KinesisStreamSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "StreamArn": { + "type": "string" + } + }, + "required": [ + "StreamArn" + ] + }, + "KeySchema": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "KeyType": { + "type": "string" + } + }, + "required": [ + "KeyType", + "AttributeName" + ] + }, + "PointInTimeRecoverySpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "PointInTimeRecoveryEnabled": { + "type": "boolean" + } + } + }, + "ReplicaSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "Region": { + "type": "string" + }, + "GlobalSecondaryIndexes": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/ReplicaGlobalSecondaryIndexSpecification" + } + }, + "ContributorInsightsSpecification": { + "$ref": "#/definitions/ContributorInsightsSpecification" + }, + "PointInTimeRecoverySpecification": { + "$ref": "#/definitions/PointInTimeRecoverySpecification" + }, + "TableClass": { + "type": "string" + }, + "DeletionProtectionEnabled": { + "type": "boolean" + }, + "SSESpecification": { + "$ref": "#/definitions/ReplicaSSESpecification" + }, + "Tags": { + "type": "array", + "insertionOrder": false, + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "ReadProvisionedThroughputSettings": { + "$ref": "#/definitions/ReadProvisionedThroughputSettings" + }, + "KinesisStreamSpecification": { + "$ref": "#/definitions/KinesisStreamSpecification" + } + }, + "required": [ + "Region" + ] + }, + "TimeToLiveSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + }, + "required": [ + "Enabled" + ] + }, + "LocalSecondaryIndex": { + "type": "object", + "additionalProperties": false, + "properties": { + "IndexName": { + "type": "string", + "minLength": 3, + "maxLength": 255 + }, + "KeySchema": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + }, + "maxItems": 2 + }, + "Projection": { + "$ref": "#/definitions/Projection" + } + }, + "required": [ + "IndexName", + "Projection", + "KeySchema" + ] + }, + "GlobalSecondaryIndex": { + "type": "object", + "additionalProperties": false, + "properties": { + "IndexName": { + "type": "string", + "minLength": 3, + "maxLength": 255 + }, + "KeySchema": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + }, + "minItems": 1, + "maxItems": 2 + }, + "Projection": { + "$ref": "#/definitions/Projection" + }, + "WriteProvisionedThroughputSettings": { + "$ref": "#/definitions/WriteProvisionedThroughputSettings" + } + }, + "required": [ + "IndexName", + "Projection", + "KeySchema" + ] + }, + "SSESpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "SSEEnabled": { + "type": "boolean" + }, + "SSEType": { + "type": "string" + } + }, + "required": [ + "SSEEnabled" + ] + }, + "ReplicaSSESpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "KMSMasterKeyId": { + "type": "string" + } + }, + "required": [ + "KMSMasterKeyId" + ] + }, + "AttributeDefinition": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "AttributeType": { + "type": "string" + } + }, + "required": [ + "AttributeName", + "AttributeType" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "Projection": { + "type": "object", + "additionalProperties": false, + "properties": { + "NonKeyAttributes": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + }, + "maxItems": 20 + }, + "ProjectionType": { + "type": "string" + } + } + }, + "ReplicaGlobalSecondaryIndexSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "IndexName": { + "type": "string", + "minLength": 3, + "maxLength": 255 + }, + "ContributorInsightsSpecification": { + "$ref": "#/definitions/ContributorInsightsSpecification" + }, + "ReadProvisionedThroughputSettings": { + "$ref": "#/definitions/ReadProvisionedThroughputSettings" + } + }, + "required": [ + "IndexName" + ] + }, + "ContributorInsightsSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + }, + "required": [ + "Enabled" + ] + }, + "ReadProvisionedThroughputSettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "ReadCapacityUnits": { + "type": "integer", + "minimum": 1 + }, + "ReadCapacityAutoScalingSettings": { + "$ref": "#/definitions/CapacityAutoScalingSettings" + } + } + }, + "WriteProvisionedThroughputSettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "WriteCapacityAutoScalingSettings": { + "$ref": "#/definitions/CapacityAutoScalingSettings" + } + } + }, + "CapacityAutoScalingSettings": { + "type": "object", + "additionalProperties": false, + "properties": { + "MinCapacity": { + "type": "integer", + "minimum": 1 + }, + "MaxCapacity": { + "type": "integer", + "minimum": 1 + }, + "SeedCapacity": { + "type": "integer", + "minimum": 1 + }, + "TargetTrackingScalingPolicyConfiguration": { + "$ref": "#/definitions/TargetTrackingScalingPolicyConfiguration" + } + }, + "required": [ + "MinCapacity", + "MaxCapacity", + "TargetTrackingScalingPolicyConfiguration" + ] + }, + "TargetTrackingScalingPolicyConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "DisableScaleIn": { + "type": "boolean" + }, + "ScaleInCooldown": { + "type": "integer", + "minimum": 0 + }, + "ScaleOutCooldown": { + "type": "integer", + "minimum": 0 + }, + "TargetValue": { + "type": "number", + "format": "double" + } + }, + "required": [ + "TargetValue" + ] + } + }, + "required": [ + "KeySchema", + "AttributeDefinitions", + "Replicas" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/StreamArn", + "/properties/TableId" + ], + "createOnlyProperties": [ + "/properties/LocalSecondaryIndexes", + "/properties/TableName", + "/properties/KeySchema" + ], + "primaryIdentifier": [ + "/properties/TableName" + ], + "additionalIdentifiers": [ + [ + "/properties/Arn" + ], + [ + "/properties/StreamArn" + ] + ], + "handlers": { + "create": { + "permissions": [ + "dynamodb:CreateTable", + "dynamodb:CreateTableReplica", + "dynamodb:Describe*", + "dynamodb:UpdateTimeToLive", + "dynamodb:UpdateContributorInsights", + "dynamodb:UpdateContinuousBackups", + "dynamodb:ListTagsOfResource", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:UpdateItem", + "dynamodb:PutItem", + "dynamodb:GetItem", + "dynamodb:DeleteItem", + "dynamodb:BatchWriteItem", + "dynamodb:TagResource", + "dynamodb:EnableKinesisStreamingDestination", + "dynamodb:DisableKinesisStreamingDestination", + "dynamodb:DescribeKinesisStreamingDestination", + "dynamodb:DescribeTableReplicaAutoScaling", + "dynamodb:UpdateTableReplicaAutoScaling", + "dynamodb:TagResource", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeleteScheduledAction", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:Describe*", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:PutScheduledAction", + "application-autoscaling:RegisterScalableTarget", + "kinesis:ListStreams", + "kinesis:DescribeStream", + "kinesis:PutRecords", + "kms:CreateGrant", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "kms:RevokeGrant", + "cloudwatch:PutMetricData" + ] + }, + "read": { + "permissions": [ + "dynamodb:Describe*", + "application-autoscaling:Describe*", + "cloudwatch:PutMetricData" + ] + }, + "update": { + "permissions": [ + "dynamodb:Describe*", + "dynamodb:CreateTableReplica", + "dynamodb:UpdateTable", + "dynamodb:UpdateTimeToLive", + "dynamodb:UpdateContinuousBackups", + "dynamodb:UpdateContributorInsights", + "dynamodb:ListTagsOfResource", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:UpdateItem", + "dynamodb:PutItem", + "dynamodb:GetItem", + "dynamodb:DeleteItem", + "dynamodb:BatchWriteItem", + "dynamodb:DeleteTable", + "dynamodb:DeleteTableReplica", + "dynamodb:UpdateItem", + "dynamodb:TagResource", + "dynamodb:UntagResource", + "dynamodb:EnableKinesisStreamingDestination", + "dynamodb:DisableKinesisStreamingDestination", + "dynamodb:DescribeKinesisStreamingDestination", + "dynamodb:DescribeTableReplicaAutoScaling", + "dynamodb:UpdateTableReplicaAutoScaling", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeleteScheduledAction", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:Describe*", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:PutScheduledAction", + "application-autoscaling:RegisterScalableTarget", + "kinesis:ListStreams", + "kinesis:DescribeStream", + "kinesis:PutRecords", + "kms:CreateGrant", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "kms:RevokeGrant", + "cloudwatch:PutMetricData" + ], + "timeoutInMinutes": 1200 + }, + "delete": { + "permissions": [ + "dynamodb:Describe*", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeleteScheduledAction", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:Describe*", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:PutScheduledAction", + "application-autoscaling:RegisterScalableTarget" + ] + }, + "list": { + "permissions": [ + "dynamodb:ListTables", + "cloudwatch:PutMetricData" + ] + } + } +} diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable_plugin.py b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable_plugin.py new file mode 100644 index 0000000000000..8de0265d3d5f1 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_globaltable_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class DynamoDBGlobalTableProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::DynamoDB::GlobalTable" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.dynamodb.resource_providers.aws_dynamodb_globaltable import ( + DynamoDBGlobalTableProvider, + ) + + self.factory = DynamoDBGlobalTableProvider diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.py b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.py new file mode 100644 index 0000000000000..469c944cca898 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.py @@ -0,0 +1,442 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class DynamoDBTableProperties(TypedDict): + KeySchema: Optional[list[KeySchema] | dict] + Arn: Optional[str] + AttributeDefinitions: Optional[list[AttributeDefinition]] + BillingMode: Optional[str] + ContributorInsightsSpecification: Optional[ContributorInsightsSpecification] + DeletionProtectionEnabled: Optional[bool] + GlobalSecondaryIndexes: Optional[list[GlobalSecondaryIndex]] + ImportSourceSpecification: Optional[ImportSourceSpecification] + KinesisStreamSpecification: Optional[KinesisStreamSpecification] + LocalSecondaryIndexes: Optional[list[LocalSecondaryIndex]] + PointInTimeRecoverySpecification: Optional[PointInTimeRecoverySpecification] + ProvisionedThroughput: Optional[ProvisionedThroughput] + SSESpecification: Optional[SSESpecification] + StreamArn: Optional[str] + StreamSpecification: Optional[StreamSpecification] + TableClass: Optional[str] + TableName: Optional[str] + Tags: Optional[list[Tag]] + TimeToLiveSpecification: Optional[TimeToLiveSpecification] + + +class AttributeDefinition(TypedDict): + AttributeName: Optional[str] + AttributeType: Optional[str] + + +class KeySchema(TypedDict): + AttributeName: Optional[str] + KeyType: Optional[str] + + +class Projection(TypedDict): + NonKeyAttributes: Optional[list[str]] + ProjectionType: Optional[str] + + +class ProvisionedThroughput(TypedDict): + ReadCapacityUnits: Optional[int] + WriteCapacityUnits: Optional[int] + + +class ContributorInsightsSpecification(TypedDict): + Enabled: Optional[bool] + + +class GlobalSecondaryIndex(TypedDict): + IndexName: Optional[str] + KeySchema: Optional[list[KeySchema]] + Projection: Optional[Projection] + ContributorInsightsSpecification: Optional[ContributorInsightsSpecification] + ProvisionedThroughput: Optional[ProvisionedThroughput] + + +class LocalSecondaryIndex(TypedDict): + IndexName: Optional[str] + KeySchema: Optional[list[KeySchema]] + Projection: Optional[Projection] + + +class PointInTimeRecoverySpecification(TypedDict): + PointInTimeRecoveryEnabled: Optional[bool] + + +class SSESpecification(TypedDict): + SSEEnabled: Optional[bool] + KMSMasterKeyId: Optional[str] + SSEType: Optional[str] + + +class StreamSpecification(TypedDict): + StreamViewType: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class TimeToLiveSpecification(TypedDict): + AttributeName: Optional[str] + Enabled: Optional[bool] + + +class KinesisStreamSpecification(TypedDict): + StreamArn: Optional[str] + + +class S3BucketSource(TypedDict): + S3Bucket: Optional[str] + S3BucketOwner: Optional[str] + S3KeyPrefix: Optional[str] + + +class Csv(TypedDict): + Delimiter: Optional[str] + HeaderList: Optional[list[str]] + + +class InputFormatOptions(TypedDict): + Csv: Optional[Csv] + + +class ImportSourceSpecification(TypedDict): + InputFormat: Optional[str] + S3BucketSource: Optional[S3BucketSource] + InputCompressionType: Optional[str] + InputFormatOptions: Optional[InputFormatOptions] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class DynamoDBTableProvider(ResourceProvider[DynamoDBTableProperties]): + TYPE = "AWS::DynamoDB::Table" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[DynamoDBTableProperties], + ) -> ProgressEvent[DynamoDBTableProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/TableName + + Required properties: + - KeySchema + + Create-only properties: + - /properties/TableName + - /properties/ImportSourceSpecification + + Read-only properties: + - /properties/Arn + - /properties/StreamArn + + IAM permissions required: + - dynamodb:CreateTable + - dynamodb:DescribeImport + - dynamodb:DescribeTable + - dynamodb:DescribeTimeToLive + - dynamodb:UpdateTimeToLive + - dynamodb:UpdateContributorInsights + - dynamodb:UpdateContinuousBackups + - dynamodb:DescribeContinuousBackups + - dynamodb:DescribeContributorInsights + - dynamodb:EnableKinesisStreamingDestination + - dynamodb:DisableKinesisStreamingDestination + - dynamodb:DescribeKinesisStreamingDestination + - dynamodb:ImportTable + - dynamodb:ListTagsOfResource + - dynamodb:TagResource + - dynamodb:UpdateTable + - kinesis:DescribeStream + - kinesis:PutRecords + - iam:CreateServiceLinkedRole + - kms:CreateGrant + - kms:Decrypt + - kms:Describe* + - kms:Encrypt + - kms:Get* + - kms:List* + - kms:RevokeGrant + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:DescribeLogGroups + - logs:DescribeLogStreams + - logs:PutLogEvents + - logs:PutRetentionPolicy + - s3:GetObject + - s3:GetObjectMetadata + - s3:ListBucket + + """ + model = request.desired_state + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + + if not model.get("TableName"): + model["TableName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + if model.get("ProvisionedThroughput"): + model["ProvisionedThroughput"] = self.get_ddb_provisioned_throughput(model) + + if model.get("GlobalSecondaryIndexes"): + model["GlobalSecondaryIndexes"] = self.get_ddb_global_sec_indexes(model) + + properties = [ + "TableName", + "AttributeDefinitions", + "KeySchema", + "BillingMode", + "ProvisionedThroughput", + "LocalSecondaryIndexes", + "GlobalSecondaryIndexes", + "Tags", + "SSESpecification", + ] + create_params = util.select_attributes(model, properties) + + if sse_specification := create_params.get("SSESpecification"): + # rename bool attribute to fit boto call + sse_specification["Enabled"] = sse_specification.pop("SSEEnabled") + + if stream_spec := model.get("StreamSpecification"): + create_params["StreamSpecification"] = { + "StreamEnabled": True, + **(stream_spec or {}), + } + + response = request.aws_client_factory.dynamodb.create_table(**create_params) + model["Arn"] = response["TableDescription"]["TableArn"] + + if model.get("KinesisStreamSpecification"): + request.aws_client_factory.dynamodb.enable_kinesis_streaming_destination( + **self.get_ddb_kinesis_stream_specification(model) + ) + + # add TTL config + if ttl_config := model.get("TimeToLiveSpecification"): + request.aws_client_factory.dynamodb.update_time_to_live( + TableName=model["TableName"], TimeToLiveSpecification=ttl_config + ) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + description = request.aws_client_factory.dynamodb.describe_table( + TableName=model["TableName"] + ) + + if description["Table"]["TableStatus"] != "ACTIVE": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + if model.get("TimeToLiveSpecification"): + request.aws_client_factory.dynamodb.update_time_to_live( + TableName=model["TableName"], + TimeToLiveSpecification=model["TimeToLiveSpecification"], + ) + + if description["Table"].get("LatestStreamArn"): + model["StreamArn"] = description["Table"]["LatestStreamArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[DynamoDBTableProperties], + ) -> ProgressEvent[DynamoDBTableProperties]: + """ + Fetch resource information + + IAM permissions required: + - dynamodb:DescribeTable + - dynamodb:DescribeContinuousBackups + - dynamodb:DescribeContributorInsights + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[DynamoDBTableProperties], + ) -> ProgressEvent[DynamoDBTableProperties]: + """ + Delete a resource + + IAM permissions required: + - dynamodb:DeleteTable + - dynamodb:DescribeTable + """ + model = request.desired_state + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + request.aws_client_factory.dynamodb.delete_table(TableName=model["TableName"]) + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + try: + table_state = request.aws_client_factory.dynamodb.describe_table( + TableName=model["TableName"] + ) + + match table_state["Table"]["TableStatus"]: + case "DELETING": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + case invalid_state: + return ProgressEvent( + status=OperationStatus.FAILED, + message=f"Table deletion failed. Table {model['TableName']} found in state {invalid_state}", # TODO: not validated yet + resource_model={}, + ) + except request.aws_client_factory.dynamodb.exceptions.TableNotFoundException: + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[DynamoDBTableProperties], + ) -> ProgressEvent[DynamoDBTableProperties]: + """ + Update a resource + + IAM permissions required: + - dynamodb:UpdateTable + - dynamodb:DescribeTable + - dynamodb:DescribeTimeToLive + - dynamodb:UpdateTimeToLive + - dynamodb:UpdateContinuousBackups + - dynamodb:UpdateContributorInsights + - dynamodb:DescribeContinuousBackups + - dynamodb:DescribeKinesisStreamingDestination + - dynamodb:ListTagsOfResource + - dynamodb:TagResource + - dynamodb:UntagResource + - dynamodb:DescribeContributorInsights + - dynamodb:EnableKinesisStreamingDestination + - dynamodb:DisableKinesisStreamingDestination + - kinesis:DescribeStream + - kinesis:PutRecords + - iam:CreateServiceLinkedRole + - kms:CreateGrant + - kms:Describe* + - kms:Get* + - kms:List* + - kms:RevokeGrant + """ + raise NotImplementedError + + def get_ddb_provisioned_throughput( + self, + properties: dict, + ) -> dict | None: + # see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-provisionedthroughput + args = properties.get("ProvisionedThroughput") + if args == "AWS::NoValue": + return None + is_ondemand = properties.get("BillingMode") == "PAY_PER_REQUEST" + # if the BillingMode is set to PAY_PER_REQUEST, you cannot specify ProvisionedThroughput + # if the BillingMode is set to PROVISIONED (default), you have to specify ProvisionedThroughput + + if args is None: + if is_ondemand: + # do not return default value if it's on demand + return + + # return default values if it's not on demand + return { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5, + } + + if isinstance(args["ReadCapacityUnits"], str): + args["ReadCapacityUnits"] = int(args["ReadCapacityUnits"]) + if isinstance(args["WriteCapacityUnits"], str): + args["WriteCapacityUnits"] = int(args["WriteCapacityUnits"]) + + return args + + def get_ddb_global_sec_indexes( + self, + properties: dict, + ) -> list | None: + args: list = properties.get("GlobalSecondaryIndexes") + is_ondemand = properties.get("BillingMode") == "PAY_PER_REQUEST" + if not args: + return + + for index in args: + # we ignore ContributorInsightsSpecification as not supported yet in DynamoDB and CloudWatch + index.pop("ContributorInsightsSpecification", None) + provisioned_throughput = index.get("ProvisionedThroughput") + if is_ondemand and provisioned_throughput is None: + pass # optional for API calls + elif provisioned_throughput is not None: + # convert types + if isinstance((read_units := provisioned_throughput["ReadCapacityUnits"]), str): + provisioned_throughput["ReadCapacityUnits"] = int(read_units) + if isinstance((write_units := provisioned_throughput["WriteCapacityUnits"]), str): + provisioned_throughput["WriteCapacityUnits"] = int(write_units) + else: + raise Exception("Can't specify ProvisionedThroughput with PAY_PER_REQUEST") + return args + + def get_ddb_kinesis_stream_specification( + self, + properties: dict, + ) -> dict: + args = properties.get("KinesisStreamSpecification") + if args: + args["TableName"] = properties["TableName"] + return args + + def list( + self, + request: ResourceRequest[DynamoDBTableProperties], + ) -> ProgressEvent[DynamoDBTableProperties]: + resources = request.aws_client_factory.dynamodb.list_tables() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + DynamoDBTableProperties(TableName=resource) for resource in resources["TableNames"] + ], + ) diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.schema.json b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.schema.json new file mode 100644 index 0000000000000..c4dd5ef70eb3d --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table.schema.json @@ -0,0 +1,514 @@ +{ + "typeName": "AWS::DynamoDB::Table", + "description": "Version: None. Resource Type definition for AWS::DynamoDB::Table", + "additionalProperties": false, + "properties": { + "Arn": { + "type": "string" + }, + "StreamArn": { + "type": "string" + }, + "AttributeDefinitions": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/AttributeDefinition" + } + }, + "BillingMode": { + "type": "string" + }, + "DeletionProtectionEnabled": { + "type": "boolean" + }, + "GlobalSecondaryIndexes": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/GlobalSecondaryIndex" + } + }, + "KeySchema": { + "oneOf": [ + { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + } + }, + { + "type": "object" + } + ] + }, + "LocalSecondaryIndexes": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/LocalSecondaryIndex" + } + }, + "PointInTimeRecoverySpecification": { + "$ref": "#/definitions/PointInTimeRecoverySpecification" + }, + "TableClass": { + "type": "string" + }, + "ProvisionedThroughput": { + "$ref": "#/definitions/ProvisionedThroughput" + }, + "SSESpecification": { + "$ref": "#/definitions/SSESpecification" + }, + "StreamSpecification": { + "$ref": "#/definitions/StreamSpecification" + }, + "TableName": { + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "TimeToLiveSpecification": { + "$ref": "#/definitions/TimeToLiveSpecification" + }, + "ContributorInsightsSpecification": { + "$ref": "#/definitions/ContributorInsightsSpecification" + }, + "KinesisStreamSpecification": { + "$ref": "#/definitions/KinesisStreamSpecification" + }, + "ImportSourceSpecification": { + "$ref": "#/definitions/ImportSourceSpecification" + } + }, + "propertyTransform": { + "/properties/SSESpecification/KMSMasterKeyId": "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,4}[-]{1}[1-4]{1}:[0-9]{12}[:]{1}key\\/\", SSESpecification.KMSMasterKeyId])" + }, + "definitions": { + "StreamSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "StreamViewType": { + "type": "string" + } + }, + "required": [ + "StreamViewType" + ] + }, + "DeprecatedKeySchema": { + "type": "object", + "additionalProperties": false, + "properties": { + "HashKeyElement": { + "$ref": "#/definitions/DeprecatedHashKeyElement" + } + }, + "required": [ + "HashKeyElement" + ] + }, + "DeprecatedHashKeyElement": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeType": { + "type": "string" + }, + "AttributeName": { + "type": "string" + } + }, + "required": [ + "AttributeType", + "AttributeName" + ] + }, + "KeySchema": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string" + }, + "KeyType": { + "type": "string" + } + }, + "required": [ + "KeyType", + "AttributeName" + ] + }, + "PointInTimeRecoverySpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "PointInTimeRecoveryEnabled": { + "type": "boolean" + } + } + }, + "KinesisStreamSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "StreamArn": { + "type": "string" + } + }, + "required": [ + "StreamArn" + ] + }, + "TimeToLiveSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + }, + "required": [ + "Enabled", + "AttributeName" + ] + }, + "LocalSecondaryIndex": { + "type": "object", + "additionalProperties": false, + "properties": { + "IndexName": { + "type": "string" + }, + "KeySchema": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + } + }, + "Projection": { + "$ref": "#/definitions/Projection" + } + }, + "required": [ + "IndexName", + "Projection", + "KeySchema" + ] + }, + "GlobalSecondaryIndex": { + "type": "object", + "additionalProperties": false, + "properties": { + "IndexName": { + "type": "string" + }, + "KeySchema": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/KeySchema" + } + }, + "Projection": { + "$ref": "#/definitions/Projection" + }, + "ProvisionedThroughput": { + "$ref": "#/definitions/ProvisionedThroughput" + }, + "ContributorInsightsSpecification": { + "$ref": "#/definitions/ContributorInsightsSpecification" + } + }, + "required": [ + "IndexName", + "Projection", + "KeySchema" + ] + }, + "SSESpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "KMSMasterKeyId": { + "type": "string" + }, + "SSEEnabled": { + "type": "boolean" + }, + "SSEType": { + "type": "string" + } + }, + "required": [ + "SSEEnabled" + ] + }, + "AttributeDefinition": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string" + }, + "AttributeType": { + "type": "string" + } + }, + "required": [ + "AttributeName", + "AttributeType" + ] + }, + "ProvisionedThroughput": { + "type": "object", + "additionalProperties": false, + "properties": { + "ReadCapacityUnits": { + "type": "integer" + }, + "WriteCapacityUnits": { + "type": "integer" + } + }, + "required": [ + "WriteCapacityUnits", + "ReadCapacityUnits" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "Projection": { + "type": "object", + "additionalProperties": false, + "properties": { + "NonKeyAttributes": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "ProjectionType": { + "type": "string" + } + } + }, + "ContributorInsightsSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + }, + "required": [ + "Enabled" + ] + }, + "ImportSourceSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "S3BucketSource": { + "$ref": "#/definitions/S3BucketSource" + }, + "InputFormat": { + "type": "string" + }, + "InputFormatOptions": { + "$ref": "#/definitions/InputFormatOptions" + }, + "InputCompressionType": { + "type": "string" + } + }, + "required": [ + "S3BucketSource", + "InputFormat" + ] + }, + "S3BucketSource": { + "type": "object", + "additionalProperties": false, + "properties": { + "S3BucketOwner": { + "type": "string" + }, + "S3Bucket": { + "type": "string" + }, + "S3KeyPrefix": { + "type": "string" + } + }, + "required": [ + "S3Bucket" + ] + }, + "InputFormatOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Csv": { + "$ref": "#/definitions/Csv" + } + } + }, + "Csv": { + "type": "object", + "additionalProperties": false, + "properties": { + "HeaderList": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Delimiter": { + "type": "string" + } + } + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": false, + "tagProperty": "/properties/Tags" + }, + "required": [ + "KeySchema" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/StreamArn" + ], + "createOnlyProperties": [ + "/properties/TableName", + "/properties/ImportSourceSpecification" + ], + "primaryIdentifier": [ + "/properties/TableName" + ], + "writeOnlyProperties": [ + "/properties/ImportSourceSpecification" + ], + "handlers": { + "create": { + "permissions": [ + "dynamodb:CreateTable", + "dynamodb:DescribeImport", + "dynamodb:DescribeTable", + "dynamodb:DescribeTimeToLive", + "dynamodb:UpdateTimeToLive", + "dynamodb:UpdateContributorInsights", + "dynamodb:UpdateContinuousBackups", + "dynamodb:DescribeContinuousBackups", + "dynamodb:DescribeContributorInsights", + "dynamodb:EnableKinesisStreamingDestination", + "dynamodb:DisableKinesisStreamingDestination", + "dynamodb:DescribeKinesisStreamingDestination", + "dynamodb:ImportTable", + "dynamodb:ListTagsOfResource", + "dynamodb:TagResource", + "dynamodb:UpdateTable", + "kinesis:DescribeStream", + "kinesis:PutRecords", + "iam:CreateServiceLinkedRole", + "kms:CreateGrant", + "kms:Decrypt", + "kms:Describe*", + "kms:Encrypt", + "kms:Get*", + "kms:List*", + "kms:RevokeGrant", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:PutRetentionPolicy", + "s3:GetObject", + "s3:GetObjectMetadata", + "s3:ListBucket" + ], + "timeoutInMinutes": 720 + }, + "read": { + "permissions": [ + "dynamodb:DescribeTable", + "dynamodb:DescribeContinuousBackups", + "dynamodb:DescribeContributorInsights" + ] + }, + "update": { + "permissions": [ + "dynamodb:UpdateTable", + "dynamodb:DescribeTable", + "dynamodb:DescribeTimeToLive", + "dynamodb:UpdateTimeToLive", + "dynamodb:UpdateContinuousBackups", + "dynamodb:UpdateContributorInsights", + "dynamodb:DescribeContinuousBackups", + "dynamodb:DescribeKinesisStreamingDestination", + "dynamodb:ListTagsOfResource", + "dynamodb:TagResource", + "dynamodb:UntagResource", + "dynamodb:DescribeContributorInsights", + "dynamodb:EnableKinesisStreamingDestination", + "dynamodb:DisableKinesisStreamingDestination", + "kinesis:DescribeStream", + "kinesis:PutRecords", + "iam:CreateServiceLinkedRole", + "kms:CreateGrant", + "kms:Describe*", + "kms:Get*", + "kms:List*", + "kms:RevokeGrant" + ], + "timeoutInMinutes": 720 + }, + "delete": { + "permissions": [ + "dynamodb:DeleteTable", + "dynamodb:DescribeTable" + ], + "timeoutInMinutes": 720 + }, + "list": { + "permissions": [ + "dynamodb:ListTables" + ] + } + } +} diff --git a/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table_plugin.py b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table_plugin.py new file mode 100644 index 0000000000000..5f263b9e9d068 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/resource_providers/aws_dynamodb_table_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class DynamoDBTableProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::DynamoDB::Table" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.dynamodb.resource_providers.aws_dynamodb_table import ( + DynamoDBTableProvider, + ) + + self.factory = DynamoDBTableProvider diff --git a/localstack-core/localstack/services/dynamodb/server.py b/localstack-core/localstack/services/dynamodb/server.py new file mode 100644 index 0000000000000..dba7c321ebbd2 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/server.py @@ -0,0 +1,220 @@ +import logging +import os +import threading + +from localstack import config +from localstack.aws.connect import connect_externally_to +from localstack.aws.forwarder import AwsRequestProxy +from localstack.config import is_env_true +from localstack.constants import DEFAULT_AWS_ACCOUNT_ID +from localstack.services.dynamodb.packages import dynamodblocal_package +from localstack.utils.common import TMP_THREADS, ShellCommandThread, get_free_tcp_port, mkdir +from localstack.utils.functions import run_safe +from localstack.utils.net import wait_for_port_closed +from localstack.utils.objects import singleton_factory +from localstack.utils.platform import Arch, get_arch +from localstack.utils.run import FuncThread, run +from localstack.utils.serving import Server +from localstack.utils.sync import retry, synchronized + +LOG = logging.getLogger(__name__) +RESTART_LOCK = threading.RLock() + + +def _log_listener(line, **_kwargs): + LOG.debug(line.rstrip()) + + +class DynamodbServer(Server): + db_path: str | None + heap_size: str + + delay_transient_statuses: bool + optimize_db_before_startup: bool + share_db: bool + cors: str | None + + proxy: AwsRequestProxy + + def __init__( + self, + port: int | None = None, + host: str = "localhost", + db_path: str | None = None, + ) -> None: + """ + Creates a DynamoDB server from the local configuration. + + :param port: optional, the port to start the server on (defaults to a random port) + :param host: localhost by default + :param db_path: path to the persistence state files used by the DynamoDB Local process + """ + + port = port or get_free_tcp_port() + super().__init__(port, host) + + self.db_path = ( + f"{config.dirs.data}/dynamodb" if not db_path and config.dirs.data else db_path + ) + + # the DYNAMODB_IN_MEMORY variable takes precedence and will set the DB path to None which forces inMemory=true + if is_env_true("DYNAMODB_IN_MEMORY"): + # note: with DYNAMODB_IN_MEMORY we do not support persistence + self.db_path = None + + if self.db_path: + self.db_path = os.path.abspath(self.db_path) + + self.heap_size = config.DYNAMODB_HEAP_SIZE + self.delay_transient_statuses = is_env_true("DYNAMODB_DELAY_TRANSIENT_STATUSES") + self.optimize_db_before_startup = is_env_true("DYNAMODB_OPTIMIZE_DB_BEFORE_STARTUP") + self.share_db = is_env_true("DYNAMODB_SHARE_DB") + self.cors = os.getenv("DYNAMODB_CORS", None) + self.proxy = AwsRequestProxy(self.url) + + @staticmethod + @singleton_factory + def get() -> "DynamodbServer": + return DynamodbServer(config.DYNAMODB_LOCAL_PORT) + + @synchronized(lock=RESTART_LOCK) + def start_dynamodb(self) -> bool: + """Start the DynamoDB server.""" + + # We want this method to be idempotent. + if self.is_running() and self.is_up(): + return True + + # For the v2 provider, the DynamodbServer has been made a singleton. Yet, the Server abstraction is modelled + # after threading.Thread, where Start -> Stop -> Start is not allowed. This flow happens during state resets. + # The following is a workaround that permits this flow + self._started.clear() + self._stopped.clear() + + # Note: when starting the server, we had a flag for wiping the assets directory before the actual start. + # This behavior was needed in some particular cases: + # - pod load with some assets already lying in the asset folder + # - ... + # The cleaning is now done via the reset endpoint + if self.db_path: + mkdir(self.db_path) + + started = self.start() + self.wait_for_dynamodb() + return started + + @synchronized(lock=RESTART_LOCK) + def stop_dynamodb(self) -> None: + """Stop the DynamoDB server.""" + import psutil + + if self._thread is None: + return + self._thread.auto_restart = False + self.shutdown() + self.join(timeout=10) + try: + wait_for_port_closed(self.port, sleep_time=0.8, retries=10) + except Exception: + LOG.warning( + "DynamoDB server port %s (%s) unexpectedly still open; running processes: %s", + self.port, + self._thread, + run(["ps", "aux"]), + ) + + # attempt to terminate/kill the process manually + server_pid = self._thread.process.pid # noqa + LOG.info("Attempting to kill DynamoDB process %s", server_pid) + process = psutil.Process(server_pid) + run_safe(process.terminate) + run_safe(process.kill) + wait_for_port_closed(self.port, sleep_time=0.5, retries=8) + + @property + def in_memory(self) -> bool: + return self.db_path is None + + @property + def jar_path(self) -> str: + return f"{dynamodblocal_package.get_installed_dir()}/DynamoDBLocal.jar" + + @property + def library_path(self) -> str: + return f"{dynamodblocal_package.get_installed_dir()}/DynamoDBLocal_lib" + + def _get_java_vm_options(self) -> list[str]: + # Workaround for JVM SIGILL crash on Apple Silicon M4 + # See https://bugs.openjdk.org/browse/JDK-8345296 + # To be removed after Java is bumped to 17.0.15+ and 21.0.7+ + return ["-XX:UseSVE=0"] if Arch.arm64 == get_arch() else [] + + def _create_shell_command(self) -> list[str]: + cmd = [ + "java", + *self._get_java_vm_options(), + "-Xmx%s" % self.heap_size, + f"-javaagent:{dynamodblocal_package.get_installer().get_ddb_agent_jar_path()}", + f"-Djava.library.path={self.library_path}", + "-jar", + self.jar_path, + ] + parameters = [] + + parameters.extend(["-port", str(self.port)]) + if self.in_memory: + parameters.append("-inMemory") + if self.db_path: + parameters.extend(["-dbPath", self.db_path]) + if self.delay_transient_statuses: + parameters.extend(["-delayTransientStatuses"]) + if self.optimize_db_before_startup: + parameters.extend(["-optimizeDbBeforeStartup"]) + if self.share_db: + parameters.extend(["-sharedDb"]) + + return cmd + parameters + + def do_start_thread(self) -> FuncThread: + dynamodblocal_installer = dynamodblocal_package.get_installer() + dynamodblocal_installer.install() + + cmd = self._create_shell_command() + env_vars = { + **dynamodblocal_installer.get_java_env_vars(), + "DDB_LOCAL_TELEMETRY": "0", + } + + LOG.debug("Starting DynamoDB Local: %s", cmd) + t = ShellCommandThread( + cmd, + strip_color=True, + log_listener=_log_listener, + auto_restart=True, + name="dynamodb-local", + env_vars=env_vars, + ) + TMP_THREADS.append(t) + t.start() + return t + + def check_dynamodb(self, expect_shutdown: bool = False) -> None: + """Checks if DynamoDB server is up""" + out = None + + try: + self.wait_is_up() + out = connect_externally_to( + endpoint_url=self.url, + aws_access_key_id=DEFAULT_AWS_ACCOUNT_ID, + aws_secret_access_key=DEFAULT_AWS_ACCOUNT_ID, + ).dynamodb.list_tables() + except Exception: + LOG.exception("DynamoDB health check failed") + if expect_shutdown: + assert out is None + else: + assert isinstance(out["TableNames"], list) + + def wait_for_dynamodb(self) -> None: + retry(self.check_dynamodb, sleep=0.4, retries=10) diff --git a/localstack-core/localstack/services/dynamodb/utils.py b/localstack-core/localstack/services/dynamodb/utils.py new file mode 100644 index 0000000000000..4ff065440abec --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/utils.py @@ -0,0 +1,387 @@ +import logging +import re +from binascii import crc32 +from typing import Dict, List, Optional + +from boto3.dynamodb.types import TypeDeserializer, TypeSerializer +from cachetools import TTLCache +from moto.core.exceptions import JsonRESTError + +from localstack.aws.api import RequestContext +from localstack.aws.api.dynamodb import ( + AttributeMap, + BatchGetRequestMap, + BatchGetResponseMap, + Delete, + DeleteRequest, + Put, + PutRequest, + ResourceNotFoundException, + TableName, + Update, +) +from localstack.aws.api.dynamodbstreams import ( + ResourceNotFoundException as DynamoDBStreamsResourceNotFoundException, +) +from localstack.aws.connect import connect_to +from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY +from localstack.http import Response +from localstack.utils.aws.arns import ( + dynamodb_stream_arn, + dynamodb_table_arn, + get_partition, + parse_arn, +) +from localstack.utils.json import canonical_json +from localstack.utils.testutil import list_all_resources + +LOG = logging.getLogger(__name__) + +# cache schema definitions +SCHEMA_CACHE = TTLCache(maxsize=50, ttl=20) + +_ddb_local_arn_pattern = re.compile( + r'("TableArn"|"LatestStreamArn"|"StreamArn"|"ShardIterator"|"IndexArn")\s*:\s*"arn:[a-z-]+:dynamodb:ddblocal:000000000000:([^"]+)"' +) +_ddb_local_region_pattern = re.compile(r'"awsRegion"\s*:\s*"([^"]+)"') +_ddb_local_exception_arn_pattern = re.compile(r'arn:[a-z-]+:dynamodb:ddblocal:000000000000:([^"]+)') + + +def get_ddb_access_key(account_id: str, region_name: str) -> str: + """ + Get the access key to be used while communicating with DynamoDB Local. + + DDBLocal supports namespacing as an undocumented feature. It works based on the value of the `Credentials` + field of the `Authorization` header. We use a concatenated value of account ID and region to achieve + namespacing. + """ + return f"{account_id}{region_name}".replace("-", "") + + +class ItemSet: + """Represents a set of items and provides utils to find individual items in the set""" + + def __init__(self, items: List[Dict], key_schema: List[Dict]): + self.items_list = items + self.key_schema = key_schema + self._build_dict() + + def _build_dict(self): + self.items_dict = {} + for item in self.items_list: + self.items_dict[self._hashable_key(item)] = item + + def _hashable_key(self, item: Dict): + keys = SchemaExtractor.extract_keys_for_schema(item=item, key_schema=self.key_schema) + return canonical_json(keys) + + def find_item(self, item: Dict) -> Optional[Dict]: + key = self._hashable_key(item) + return self.items_dict.get(key) + + +class SchemaExtractor: + @classmethod + def extract_keys( + cls, item: Dict, table_name: str, account_id: str, region_name: str + ) -> Optional[Dict]: + key_schema = cls.get_key_schema(table_name, account_id, region_name) + return cls.extract_keys_for_schema(item, key_schema) + + @classmethod + def extract_keys_for_schema(cls, item: Dict, key_schema: List[Dict]): + result = {} + for key in key_schema: + attr_name = key["AttributeName"] + if attr_name not in item: + raise JsonRESTError( + error_type="ValidationException", + message="One of the required keys was not given a value", + ) + result[attr_name] = item[attr_name] + return result + + @classmethod + def get_key_schema( + cls, table_name: str, account_id: str, region_name: str + ) -> Optional[List[Dict]]: + from localstack.services.dynamodb.provider import get_store + + table_definitions: Dict = get_store( + account_id=account_id, + region_name=region_name, + ).table_definitions + table_def = table_definitions.get(table_name) + if not table_def: + # Try fetching from the backend in case table_definitions has been reset + schema = cls.get_table_schema( + table_name=table_name, account_id=account_id, region_name=region_name + ) + if not schema: + raise ResourceNotFoundException(f"Unknown table: {table_name} not found") + # Save the schema in the cache + table_definitions[table_name] = schema["Table"] + table_def = table_definitions[table_name] + return table_def["KeySchema"] + + @classmethod + def get_table_schema(cls, table_name: str, account_id: str, region_name: str): + key = dynamodb_table_arn( + table_name=table_name, account_id=account_id, region_name=region_name + ) + schema = SCHEMA_CACHE.get(key) + if not schema: + # TODO: consider making in-memory lookup instead of API call + ddb_client = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).dynamodb + try: + schema = ddb_client.describe_table(TableName=table_name) + SCHEMA_CACHE[key] = schema + except Exception as e: + if "ResourceNotFoundException" in str(e): + raise ResourceNotFoundException(f"Unknown table: {table_name}") from e + raise + return schema + + @classmethod + def invalidate_table_schema(cls, table_name: str, account_id: str, region_name: str): + """ + Allow cached table schemas to be invalidated without waiting for the TTL to expire + """ + key = dynamodb_table_arn( + table_name=table_name, account_id=account_id, region_name=region_name + ) + SCHEMA_CACHE.pop(key, None) + + +class ItemFinder: + @staticmethod + def get_ddb_local_client(account_id: str, region_name: str, endpoint_url: str): + ddb_client = connect_to( + aws_access_key_id=get_ddb_access_key(account_id, region_name), + region_name=region_name, + endpoint_url=endpoint_url, + ).dynamodb + return ddb_client + + @staticmethod + def find_existing_item( + put_item: Dict, + table_name: str, + account_id: str, + region_name: str, + endpoint_url: str, + ) -> Optional[AttributeMap]: + from localstack.services.dynamodb.provider import ValidationException + + ddb_client = ItemFinder.get_ddb_local_client(account_id, region_name, endpoint_url) + + search_key = {} + if "Key" in put_item: + search_key = put_item["Key"] + else: + schema = SchemaExtractor.get_table_schema(table_name, account_id, region_name) + schemas = [schema["Table"]["KeySchema"]] + for index in schema["Table"].get("GlobalSecondaryIndexes", []): + # TODO + # schemas.append(index['KeySchema']) + pass + for schema in schemas: + for key in schema: + key_name = key["AttributeName"] + key_value = put_item["Item"].get(key_name) + if not key_value: + raise ValidationException( + "The provided key element does not match the schema" + ) + search_key[key_name] = key_value + if not search_key: + return + + try: + existing_item = ddb_client.get_item(TableName=table_name, Key=search_key) + except ddb_client.exceptions.ClientError as e: + LOG.warning( + "Unable to get item from DynamoDB table '%s': %s", + table_name, + e, + ) + return + + return existing_item.get("Item") + + @staticmethod + def find_existing_items( + put_items_per_table: dict[ + TableName, list[PutRequest | DeleteRequest | Put | Update | Delete] + ], + account_id: str, + region_name: str, + endpoint_url: str, + ) -> BatchGetResponseMap: + from localstack.services.dynamodb.provider import ValidationException + + ddb_client = ItemFinder.get_ddb_local_client(account_id, region_name, endpoint_url) + + get_items_request: BatchGetRequestMap = {} + for table_name, put_item_reqs in put_items_per_table.items(): + table_schema = None + for put_item in put_item_reqs: + search_key = {} + if "Key" in put_item: + search_key = put_item["Key"] + else: + if not table_schema: + table_schema = SchemaExtractor.get_table_schema( + table_name, account_id, region_name + ) + + schemas = [table_schema["Table"]["KeySchema"]] + for index in table_schema["Table"].get("GlobalSecondaryIndexes", []): + # TODO + # schemas.append(index['KeySchema']) + pass + for schema in schemas: + for key in schema: + key_name = key["AttributeName"] + key_value = put_item["Item"].get(key_name) + if not key_value: + raise ValidationException( + "The provided key element does not match the schema" + ) + search_key[key_name] = key_value + if not search_key: + continue + table_keys = get_items_request.setdefault(table_name, {"Keys": []}) + table_keys["Keys"].append(search_key) + + try: + existing_items = ddb_client.batch_get_item(RequestItems=get_items_request) + except ddb_client.exceptions.ClientError as e: + LOG.warning( + "Unable to get items from DynamoDB tables '%s': %s", + list(put_items_per_table.values()), + e, + ) + return {} + + return existing_items.get("Responses", {}) + + @classmethod + def list_existing_items_for_statement( + cls, partiql_statement: str, account_id: str, region_name: str, endpoint_url: str + ) -> List: + table_name = extract_table_name_from_partiql_update(partiql_statement) + if not table_name: + return [] + all_items = cls.get_all_table_items( + account_id=account_id, + region_name=region_name, + table_name=table_name, + endpoint_url=endpoint_url, + ) + return all_items + + @staticmethod + def get_all_table_items( + account_id: str, region_name: str, table_name: str, endpoint_url: str + ) -> List: + ddb_client = ItemFinder.get_ddb_local_client(account_id, region_name, endpoint_url) + dynamodb_kwargs = {"TableName": table_name} + all_items = list_all_resources( + lambda kwargs: ddb_client.scan(**{**kwargs, **dynamodb_kwargs}), + last_token_attr_name="LastEvaluatedKey", + next_token_attr_name="ExclusiveStartKey", + list_attr_name="Items", + ) + return all_items + + +def extract_table_name_from_partiql_update(statement: str) -> Optional[str]: + regex = r"^\s*(UPDATE|INSERT\s+INTO|DELETE\s+FROM)\s+([^\s]+).*" + match = re.match(regex, statement, flags=re.IGNORECASE | re.MULTILINE) + return match and match.group(2) + + +def dynamize_value(value) -> dict: + """ + Take a scalar Python value or dict/list and return a dict consisting of the Amazon DynamoDB type specification and + the value that needs to be sent to Amazon DynamoDB. If the type of the value is not supported, raise a TypeError + """ + return TypeSerializer().serialize(value) + + +def de_dynamize_record(item: dict) -> dict: + """ + Return the given item in DynamoDB format parsed as regular dict object, i.e., convert + something like `{'foo': {'S': 'test'}, 'bar': {'N': 123}}` to `{'foo': 'test', 'bar': 123}`. + Note: This is the reverse operation of `dynamize_value(...)` above. + """ + deserializer = TypeDeserializer() + return {k: deserializer.deserialize(v) for k, v in item.items()} + + +def modify_ddblocal_arns(chain, context: RequestContext, response: Response): + """A service response handler that modifies the dynamodb backend response.""" + if response_content := response.get_data(as_text=True): + partition = get_partition(context.region) + + def _convert_arn(matchobj): + key = matchobj.group(1) + table_name = matchobj.group(2) + return f'{key}: "arn:{partition}:dynamodb:{context.region}:{context.account_id}:{table_name}"' + + # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region) + content_replaced = _ddb_local_arn_pattern.sub( + _convert_arn, + response_content, + ) + if context.service.service_name == "dynamodbstreams": + content_replaced = _ddb_local_region_pattern.sub( + f'"awsRegion": "{context.region}"', content_replaced + ) + if context.service_exception: + content_replaced = _ddb_local_exception_arn_pattern.sub( + rf"arn:{partition}:dynamodb:{context.region}:{context.account_id}:\g<1>", + content_replaced, + ) + + if content_replaced != response_content: + response.data = content_replaced + # make sure the service response is parsed again later + context.service_response = None + + # update x-amz-crc32 header required by some clients + response.headers["x-amz-crc32"] = crc32(response.data) & 0xFFFFFFFF + + +def change_region_in_ddb_stream_arn(arn: str, region: str) -> str: + """ + Modify the ARN or a DynamoDB Stream by changing its region. + We need this logic when dealing with global tables, as we create a stream only in the originating region, and we + need to modify the ARN to mimic the stream of the replica regions. + """ + arn_data = parse_arn(arn) + if arn_data["region"] == region: + return arn + + if arn_data["service"] != "dynamodb": + raise Exception(f"{arn} is not a DynamoDB Streams ARN") + + # Note: a DynamoDB Streams ARN has the following pattern: + # arn:aws:dynamodb:::table//stream/ + resource_splits = arn_data["resource"].split("/") + if len(resource_splits) != 4: + raise DynamoDBStreamsResourceNotFoundException( + f"The format of the '{arn}' ARN is not valid" + ) + + return dynamodb_stream_arn( + table_name=resource_splits[1], + latest_stream_label=resource_splits[-1], + account_id=arn_data["account"], + region_name=region, + ) diff --git a/localstack/services/ses/__init__.py b/localstack-core/localstack/services/dynamodb/v2/__init__.py similarity index 100% rename from localstack/services/ses/__init__.py rename to localstack-core/localstack/services/dynamodb/v2/__init__.py diff --git a/localstack-core/localstack/services/dynamodb/v2/provider.py b/localstack-core/localstack/services/dynamodb/v2/provider.py new file mode 100644 index 0000000000000..f6dee3a68e854 --- /dev/null +++ b/localstack-core/localstack/services/dynamodb/v2/provider.py @@ -0,0 +1,1477 @@ +import copy +import json +import logging +import os +import random +import re +import threading +import time +from contextlib import contextmanager +from datetime import datetime +from operator import itemgetter +from typing import Dict, Optional + +import requests +import werkzeug + +from localstack import config +from localstack.aws import handlers +from localstack.aws.api import ( + CommonServiceException, + RequestContext, + ServiceRequest, + ServiceResponse, + handler, +) +from localstack.aws.api.dynamodb import ( + BatchExecuteStatementOutput, + BatchGetItemOutput, + BatchGetRequestMap, + BatchWriteItemInput, + BatchWriteItemOutput, + BillingMode, + ContinuousBackupsDescription, + ContinuousBackupsStatus, + CreateGlobalTableOutput, + CreateTableInput, + CreateTableOutput, + DeleteItemInput, + DeleteItemOutput, + DeleteRequest, + DeleteTableOutput, + DescribeContinuousBackupsOutput, + DescribeGlobalTableOutput, + DescribeKinesisStreamingDestinationOutput, + DescribeTableOutput, + DescribeTimeToLiveOutput, + DestinationStatus, + DynamodbApi, + EnableKinesisStreamingConfiguration, + ExecuteStatementInput, + ExecuteStatementOutput, + ExecuteTransactionInput, + ExecuteTransactionOutput, + GetItemInput, + GetItemOutput, + GlobalTableAlreadyExistsException, + GlobalTableNotFoundException, + KinesisStreamingDestinationOutput, + ListGlobalTablesOutput, + ListTablesInputLimit, + ListTablesOutput, + ListTagsOfResourceOutput, + NextTokenString, + PartiQLBatchRequest, + PointInTimeRecoveryDescription, + PointInTimeRecoverySpecification, + PointInTimeRecoveryStatus, + PositiveIntegerObject, + ProvisionedThroughputExceededException, + PutItemInput, + PutItemOutput, + PutRequest, + QueryInput, + QueryOutput, + RegionName, + ReplicaDescription, + ReplicaList, + ReplicaStatus, + ReplicaUpdateList, + ResourceArnString, + ResourceInUseException, + ResourceNotFoundException, + ReturnConsumedCapacity, + ScanInput, + ScanOutput, + StreamArn, + TableDescription, + TableName, + TagKeyList, + TagList, + TimeToLiveSpecification, + TransactGetItemList, + TransactGetItemsOutput, + TransactWriteItemsInput, + TransactWriteItemsOutput, + UpdateContinuousBackupsOutput, + UpdateGlobalTableOutput, + UpdateItemInput, + UpdateItemOutput, + UpdateTableInput, + UpdateTableOutput, + UpdateTimeToLiveOutput, + WriteRequest, +) +from localstack.aws.connect import connect_to +from localstack.constants import ( + AUTH_CREDENTIAL_REGEX, + AWS_REGION_US_EAST_1, + INTERNAL_AWS_SECRET_ACCESS_KEY, +) +from localstack.http import Request, Response, route +from localstack.services.dynamodb.models import ( + DynamoDBStore, + StreamRecord, + dynamodb_stores, +) +from localstack.services.dynamodb.server import DynamodbServer +from localstack.services.dynamodb.utils import ( + SchemaExtractor, + get_ddb_access_key, + modify_ddblocal_arns, +) +from localstack.services.dynamodbstreams.models import dynamodbstreams_stores +from localstack.services.edge import ROUTER +from localstack.services.plugins import ServiceLifecycleHook +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws import arns +from localstack.utils.aws.arns import ( + extract_account_id_from_arn, + extract_region_from_arn, + get_partition, +) +from localstack.utils.aws.aws_stack import get_valid_regions_for_service +from localstack.utils.aws.request_context import ( + extract_account_id_from_headers, + extract_region_from_headers, +) +from localstack.utils.collections import select_attributes, select_from_typed_dict +from localstack.utils.common import short_uid, to_bytes +from localstack.utils.json import canonical_json +from localstack.utils.scheduler import Scheduler +from localstack.utils.strings import long_uid, to_str +from localstack.utils.threads import FuncThread, start_thread + +# set up logger +LOG = logging.getLogger(__name__) + +# action header prefix +ACTION_PREFIX = "DynamoDB_20120810." + +# list of actions subject to throughput limitations +READ_THROTTLED_ACTIONS = [ + "GetItem", + "Query", + "Scan", + "TransactGetItems", + "BatchGetItem", +] +WRITE_THROTTLED_ACTIONS = [ + "PutItem", + "BatchWriteItem", + "UpdateItem", + "DeleteItem", + "TransactWriteItems", +] +THROTTLED_ACTIONS = READ_THROTTLED_ACTIONS + WRITE_THROTTLED_ACTIONS + +MANAGED_KMS_KEYS = {} + + +def dynamodb_table_exists(table_name: str, client=None) -> bool: + client = client or connect_to().dynamodb + paginator = client.get_paginator("list_tables") + pages = paginator.paginate(PaginationConfig={"PageSize": 100}) + table_name = to_str(table_name) + return any(table_name in page["TableNames"] for page in pages) + + +class SSEUtils: + """Utils for server-side encryption (SSE)""" + + @classmethod + def get_sse_kms_managed_key(cls, account_id: str, region_name: str): + from localstack.services.kms import provider + + existing_key = MANAGED_KMS_KEYS.get(region_name) + if existing_key: + return existing_key + kms_client = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).kms + key_data = kms_client.create_key( + Description="Default key that protects my DynamoDB data when no other key is defined" + ) + key_id = key_data["KeyMetadata"]["KeyId"] + + provider.set_key_managed(key_id, account_id, region_name) + MANAGED_KMS_KEYS[region_name] = key_id + return key_id + + @classmethod + def get_sse_description(cls, account_id: str, region_name: str, data): + if data.get("Enabled"): + kms_master_key_id = data.get("KMSMasterKeyId") + if not kms_master_key_id: + # this is of course not the actual key for dynamodb, just a better, since existing, mock + kms_master_key_id = cls.get_sse_kms_managed_key(account_id, region_name) + kms_master_key_id = arns.kms_key_arn(kms_master_key_id, account_id, region_name) + return { + "Status": "ENABLED", + "SSEType": "KMS", # no other value is allowed here + "KMSMasterKeyArn": kms_master_key_id, + } + return {} + + +class ValidationException(CommonServiceException): + def __init__(self, message: str): + super().__init__(code="ValidationException", status_code=400, message=message) + + +def get_store(account_id: str, region_name: str) -> DynamoDBStore: + # special case: AWS NoSQL Workbench sends "localhost" as region - replace with proper region here + region_name = DynamoDBProvider.ddb_region_name(region_name) + return dynamodb_stores[account_id][region_name] + + +@contextmanager +def modify_context_region(context: RequestContext, region: str): + """ + Context manager that modifies the region of a `RequestContext`. At the exit, the context is restored to its + original state. + + :param context: the context to modify + :param region: the modified region + :return: a modified `RequestContext` + """ + original_region = context.region + original_authorization = context.request.headers.get("Authorization") + + key = get_ddb_access_key(context.account_id, region) + + context.region = region + context.request.headers["Authorization"] = re.sub( + AUTH_CREDENTIAL_REGEX, + rf"Credential={key}/\2/{region}/\4/", + original_authorization or "", + flags=re.IGNORECASE, + ) + + try: + yield context + except Exception: + raise + finally: + # revert the original context + context.region = original_region + context.request.headers["Authorization"] = original_authorization + + +class DynamoDBDeveloperEndpoints: + """ + Developer endpoints for DynamoDB + DELETE /_aws/dynamodb/expired - delete expired items from tables with TTL enabled; return the number of expired + items deleted + """ + + @route("/_aws/dynamodb/expired", methods=["DELETE"]) + def delete_expired_messages(self, _: Request): + no_expired_items = delete_expired_items() + return {"ExpiredItems": no_expired_items} + + +def delete_expired_items() -> int: + """ + This utility function iterates over all stores, looks for tables with TTL enabled, + scan such tables and delete expired items. + """ + no_expired_items = 0 + for account_id, region_name, state in dynamodb_stores.iter_stores(): + ttl_specs = state.ttl_specifications + client = connect_to(aws_access_key_id=account_id, region_name=region_name).dynamodb + for table_name, ttl_spec in ttl_specs.items(): + if ttl_spec.get("Enabled", False): + attribute_name = ttl_spec.get("AttributeName") + current_time = int(datetime.now().timestamp()) + try: + result = client.scan( + TableName=table_name, + FilterExpression="#ttl <= :threshold", + ExpressionAttributeValues={":threshold": {"N": str(current_time)}}, + ExpressionAttributeNames={"#ttl": attribute_name}, + ) + items_to_delete = result.get("Items", []) + no_expired_items += len(items_to_delete) + table_description = client.describe_table(TableName=table_name) + partition_key, range_key = _get_hash_and_range_key(table_description) + keys_to_delete = [ + {partition_key: item.get(partition_key)} + if range_key is None + else { + partition_key: item.get(partition_key), + range_key: item.get(range_key), + } + for item in items_to_delete + ] + delete_requests = [{"DeleteRequest": {"Key": key}} for key in keys_to_delete] + for i in range(0, len(delete_requests), 25): + batch = delete_requests[i : i + 25] + client.batch_write_item(RequestItems={table_name: batch}) + except Exception as e: + LOG.warning( + "An error occurred when deleting expired items from table %s: %s", + table_name, + e, + ) + return no_expired_items + + +def _get_hash_and_range_key(table_description: DescribeTableOutput) -> [str, str | None]: + key_schema = table_description.get("Table", {}).get("KeySchema", []) + hash_key, range_key = None, None + for key in key_schema: + if key["KeyType"] == "HASH": + hash_key = key["AttributeName"] + if key["KeyType"] == "RANGE": + range_key = key["AttributeName"] + return hash_key, range_key + + +class ExpiredItemsWorker: + """A worker that periodically computes and deletes expired items from DynamoDB tables""" + + def __init__(self) -> None: + super().__init__() + self.scheduler = Scheduler() + self.thread: Optional[FuncThread] = None + self.mutex = threading.RLock() + + def start(self): + with self.mutex: + if self.thread: + return + + self.scheduler = Scheduler() + self.scheduler.schedule( + delete_expired_items, period=60 * 60 + ) # the background process seems slow on AWS + + def _run(*_args): + self.scheduler.run() + + self.thread = start_thread(_run, name="ddb-remove-expired-items") + + def stop(self): + with self.mutex: + if self.scheduler: + self.scheduler.close() + + if self.thread: + self.thread.stop() + + self.thread = None + self.scheduler = None + + +class DynamoDBProvider(DynamodbApi, ServiceLifecycleHook): + server: DynamodbServer + """The instance of the server managing the instance of DynamoDB local""" + + def __init__(self): + self.server = self._new_dynamodb_server() + self._expired_items_worker = ExpiredItemsWorker() + self._router_rules = [] + + def on_before_start(self): + self.server.start_dynamodb() + if config.DYNAMODB_REMOVE_EXPIRED_ITEMS: + self._expired_items_worker.start() + self._router_rules = ROUTER.add(DynamoDBDeveloperEndpoints()) + + def on_before_stop(self): + self._expired_items_worker.stop() + ROUTER.remove(self._router_rules) + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(dynamodb_stores) + visitor.visit(dynamodbstreams_stores) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, self.service))) + + def on_before_state_reset(self): + self.server.stop_dynamodb() + + def on_before_state_load(self): + self.server.stop_dynamodb() + + def on_after_state_reset(self): + self.server.start_dynamodb() + + @staticmethod + def _new_dynamodb_server() -> DynamodbServer: + return DynamodbServer.get() + + def on_after_state_load(self): + self.server.start_dynamodb() + + def on_after_init(self): + # add response processor specific to ddblocal + handlers.modify_service_response.append(self.service, modify_ddblocal_arns) + + # routes for the shell ui + ROUTER.add( + path="/shell", + endpoint=self.handle_shell_ui_redirect, + methods=["GET"], + ) + ROUTER.add( + path="/shell/", + endpoint=self.handle_shell_ui_request, + ) + + def _forward_request( + self, + context: RequestContext, + region: str | None, + service_request: ServiceRequest | None = None, + ) -> ServiceResponse: + """ + Modify the context region and then forward request to DynamoDB Local. + + This is used for operations impacted by global tables. In LocalStack, a single copy of global table + is kept, and any requests to replicated tables are forwarded to this original table. + """ + if region: + with modify_context_region(context, region): + return self.forward_request(context, service_request=service_request) + return self.forward_request(context, service_request=service_request) + + def forward_request( + self, context: RequestContext, service_request: ServiceRequest = None + ) -> ServiceResponse: + """ + Forward a request to DynamoDB Local. + """ + self.check_provisioned_throughput(context.operation.name) + self.prepare_request_headers( + context.request.headers, account_id=context.account_id, region_name=context.region + ) + return self.server.proxy(context, service_request) + + def get_forward_url(self, account_id: str, region_name: str) -> str: + """Return the URL of the backend DynamoDBLocal server to forward requests to""" + return self.server.url + + def handle_shell_ui_redirect(self, request: werkzeug.Request) -> Response: + headers = {"Refresh": f"0; url={config.external_service_url()}/shell/index.html"} + return Response("", headers=headers) + + def handle_shell_ui_request(self, request: werkzeug.Request, req_path: str) -> Response: + # TODO: "DynamoDB Local Web Shell was deprecated with version 1.16.X and is not available any + # longer from 1.17.X to latest. There are no immediate plans for a new Web Shell to be introduced." + # -> keeping this for now, to allow configuring custom installs; should consider removing it in the future + # https://repost.aws/questions/QUHyIzoEDqQ3iOKlUEp1LPWQ#ANdBm9Nz9TRf6VqR3jZtcA1g + req_path = f"/{req_path}" if not req_path.startswith("/") else req_path + account_id = extract_account_id_from_headers(request.headers) + region_name = extract_region_from_headers(request.headers) + url = f"{self.get_forward_url(account_id, region_name)}/shell{req_path}" + result = requests.request( + method=request.method, url=url, headers=request.headers, data=request.data + ) + return Response(result.content, headers=dict(result.headers), status=result.status_code) + + # + # Table ops + # + + @handler("CreateTable", expand=False) + def create_table( + self, + context: RequestContext, + create_table_input: CreateTableInput, + ) -> CreateTableOutput: + table_name = create_table_input["TableName"] + + # Return this specific error message to keep parity with AWS + if self.table_exists(context.account_id, context.region, table_name): + raise ResourceInUseException(f"Table already exists: {table_name}") + + billing_mode = create_table_input.get("BillingMode") + provisioned_throughput = create_table_input.get("ProvisionedThroughput") + if billing_mode == BillingMode.PAY_PER_REQUEST and provisioned_throughput is not None: + raise ValidationException( + "One or more parameter values were invalid: Neither ReadCapacityUnits nor WriteCapacityUnits can be " + "specified when BillingMode is PAY_PER_REQUEST" + ) + + result = self.forward_request(context) + + table_description = result["TableDescription"] + table_description["TableArn"] = table_arn = self.fix_table_arn( + context.account_id, context.region, table_description["TableArn"] + ) + + backend = get_store(context.account_id, context.region) + backend.table_definitions[table_name] = table_definitions = dict(create_table_input) + backend.TABLE_REGION[table_name] = context.region + + if "TableId" not in table_definitions: + table_definitions["TableId"] = long_uid() + + if "SSESpecification" in table_definitions: + sse_specification = table_definitions.pop("SSESpecification") + table_definitions["SSEDescription"] = SSEUtils.get_sse_description( + context.account_id, context.region, sse_specification + ) + + if table_definitions: + table_content = result.get("Table", {}) + table_content.update(table_definitions) + table_description.update(table_content) + + if "TableClass" in table_definitions: + table_class = table_description.pop("TableClass", None) or table_definitions.pop( + "TableClass" + ) + table_description["TableClassSummary"] = {"TableClass": table_class} + + if "GlobalSecondaryIndexes" in table_description: + gsis = copy.deepcopy(table_description["GlobalSecondaryIndexes"]) + # update the different values, as DynamoDB-local v2 has a regression around GSI and does not return anything + # anymore + for gsi in gsis: + index_name = gsi.get("IndexName", "") + gsi.update( + { + "IndexArn": f"{table_arn}/index/{index_name}", + "IndexSizeBytes": 0, + "IndexStatus": "ACTIVE", + "ItemCount": 0, + } + ) + gsi_provisioned_throughput = gsi.setdefault("ProvisionedThroughput", {}) + gsi_provisioned_throughput["NumberOfDecreasesToday"] = 0 + + if billing_mode == BillingMode.PAY_PER_REQUEST: + gsi_provisioned_throughput["ReadCapacityUnits"] = 0 + gsi_provisioned_throughput["WriteCapacityUnits"] = 0 + + # table_definitions["GlobalSecondaryIndexes"] = gsis + table_description["GlobalSecondaryIndexes"] = gsis + + if "ProvisionedThroughput" in table_description: + if "NumberOfDecreasesToday" not in table_description["ProvisionedThroughput"]: + table_description["ProvisionedThroughput"]["NumberOfDecreasesToday"] = 0 + + tags = table_definitions.pop("Tags", []) + if tags: + get_store(context.account_id, context.region).TABLE_TAGS[table_arn] = { + tag["Key"]: tag["Value"] for tag in tags + } + + # remove invalid attributes from result + table_description.pop("Tags", None) + table_description.pop("BillingMode", None) + + return result + + def delete_table( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DeleteTableOutput: + global_table_region = self.get_global_table_region(context, table_name) + + # Limitation note: On AWS, for a replicated table, if the source table is deleted, the replicated tables continue to exist. + # This is not the case for LocalStack, where all replicated tables will also be removed if source is deleted. + + result = self._forward_request(context=context, region=global_table_region) + + table_arn = result.get("TableDescription", {}).get("TableArn") + table_arn = self.fix_table_arn(context.account_id, context.region, table_arn) + + store = get_store(context.account_id, context.region) + store.TABLE_TAGS.pop(table_arn, None) + store.REPLICAS.pop(table_name, None) + + return result + + def describe_table( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeTableOutput: + global_table_region = self.get_global_table_region(context, table_name) + + result = self._forward_request(context=context, region=global_table_region) + table_description: TableDescription = result["Table"] + + # Update table properties from LocalStack stores + if table_props := get_store(context.account_id, context.region).table_properties.get( + table_name + ): + table_description.update(table_props) + + store = get_store(context.account_id, context.region) + + # Update replication details + replicas: Dict[RegionName, ReplicaDescription] = store.REPLICAS.get(table_name, {}) + + replica_description_list = [] + + if global_table_region != context.region: + replica_description_list.append( + ReplicaDescription( + RegionName=global_table_region, ReplicaStatus=ReplicaStatus.ACTIVE + ) + ) + + for replica_region, replica_description in replicas.items(): + # The replica in the region being queried must not be returned + if replica_region != context.region: + replica_description_list.append(replica_description) + + if replica_description_list: + table_description.update({"Replicas": replica_description_list}) + + # update only TableId and SSEDescription if present + if table_definitions := store.table_definitions.get(table_name): + for key in ["TableId", "SSEDescription"]: + if table_definitions.get(key): + table_description[key] = table_definitions[key] + if "TableClass" in table_definitions: + table_description["TableClassSummary"] = { + "TableClass": table_definitions["TableClass"] + } + + if "GlobalSecondaryIndexes" in table_description: + for gsi in table_description["GlobalSecondaryIndexes"]: + default_values = { + "NumberOfDecreasesToday": 0, + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + } + # even if the billing mode is PAY_PER_REQUEST, AWS returns the Read and Write Capacity Units + # Terraform depends on this parity for update operations + gsi["ProvisionedThroughput"] = default_values | gsi.get("ProvisionedThroughput", {}) + + return DescribeTableOutput( + Table=select_from_typed_dict(TableDescription, table_description) + ) + + @handler("UpdateTable", expand=False) + def update_table( + self, context: RequestContext, update_table_input: UpdateTableInput + ) -> UpdateTableOutput: + table_name = update_table_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + try: + self._forward_request(context=context, region=global_table_region) + except CommonServiceException as exc: + # DynamoDBLocal refuses to update certain table params and raises. + # But we still need to update this info in LocalStack stores + if not (exc.code == "ValidationException" and exc.message == "Nothing to update"): + raise + + if table_class := update_table_input.get("TableClass"): + table_definitions = get_store( + context.account_id, context.region + ).table_definitions.setdefault(table_name, {}) + table_definitions["TableClass"] = table_class + + if replica_updates := update_table_input.get("ReplicaUpdates"): + store = get_store(context.account_id, global_table_region) + + # Dict with source region to set of replicated regions + replicas: Dict[RegionName, ReplicaDescription] = store.REPLICAS.get(table_name, {}) + + for replica_update in replica_updates: + for key, details in replica_update.items(): + # Replicated region + target_region = details.get("RegionName") + + # Check if replicated region is valid + if target_region not in get_valid_regions_for_service("dynamodb"): + raise ValidationException(f"Region {target_region} is not supported") + + match key: + case "Create": + if target_region in replicas.keys(): + raise ValidationException( + f"Failed to create a the new replica of table with name: '{table_name}' because one or more replicas already existed as tables." + ) + replicas[target_region] = ReplicaDescription( + RegionName=target_region, + KMSMasterKeyId=details.get("KMSMasterKeyId"), + ProvisionedThroughputOverride=details.get( + "ProvisionedThroughputOverride" + ), + GlobalSecondaryIndexes=details.get("GlobalSecondaryIndexes"), + ReplicaStatus=ReplicaStatus.ACTIVE, + ) + case "Delete": + try: + replicas.pop(target_region) + except KeyError: + raise ValidationException( + "Update global table operation failed because one or more replicas were not part of the global table." + ) + + store.REPLICAS[table_name] = replicas + + # update response content + SchemaExtractor.invalidate_table_schema( + table_name, context.account_id, global_table_region + ) + + schema = SchemaExtractor.get_table_schema( + table_name, context.account_id, global_table_region + ) + + if sse_specification_input := update_table_input.get("SSESpecification"): + # If SSESpecification is changed, update store and return the 'UPDATING' status in the response + table_definition = get_store( + context.account_id, context.region + ).table_definitions.setdefault(table_name, {}) + if not sse_specification_input["Enabled"]: + table_definition.pop("SSEDescription", None) + schema["Table"]["SSEDescription"]["Status"] = "UPDATING" + + return UpdateTableOutput(TableDescription=schema["Table"]) + + SchemaExtractor.invalidate_table_schema(table_name, context.account_id, global_table_region) + + schema = SchemaExtractor.get_table_schema( + table_name, context.account_id, global_table_region + ) + + return UpdateTableOutput(TableDescription=schema["Table"]) + + def list_tables( + self, + context: RequestContext, + exclusive_start_table_name: TableName = None, + limit: ListTablesInputLimit = None, + **kwargs, + ) -> ListTablesOutput: + response = self.forward_request(context) + + # Add replicated tables + replicas = get_store(context.account_id, context.region).REPLICAS + for replicated_table, replications in replicas.items(): + for replica_region, replica_description in replications.items(): + if context.region == replica_region: + response["TableNames"].append(replicated_table) + + return response + + # + # Item ops + # + + @handler("PutItem", expand=False) + def put_item(self, context: RequestContext, put_item_input: PutItemInput) -> PutItemOutput: + table_name = put_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + return self._forward_request(context=context, region=global_table_region) + + @handler("DeleteItem", expand=False) + def delete_item( + self, + context: RequestContext, + delete_item_input: DeleteItemInput, + ) -> DeleteItemOutput: + table_name = delete_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + return self._forward_request(context=context, region=global_table_region) + + @handler("UpdateItem", expand=False) + def update_item( + self, + context: RequestContext, + update_item_input: UpdateItemInput, + ) -> UpdateItemOutput: + # TODO: UpdateItem is harder to use ReturnValues for Streams, because it needs the Before and After images. + table_name = update_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + + return self._forward_request(context=context, region=global_table_region) + + @handler("GetItem", expand=False) + def get_item(self, context: RequestContext, get_item_input: GetItemInput) -> GetItemOutput: + table_name = get_item_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + self.fix_consumed_capacity(get_item_input, result) + return result + + # + # Queries + # + + @handler("Query", expand=False) + def query(self, context: RequestContext, query_input: QueryInput) -> QueryOutput: + index_name = query_input.get("IndexName") + if index_name: + if not is_index_query_valid(context.account_id, context.region, query_input): + raise ValidationException( + "One or more parameter values were invalid: Select type ALL_ATTRIBUTES " + "is not supported for global secondary index id-index because its projection " + "type is not ALL", + ) + + table_name = query_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + self.fix_consumed_capacity(query_input, result) + return result + + @handler("Scan", expand=False) + def scan(self, context: RequestContext, scan_input: ScanInput) -> ScanOutput: + table_name = scan_input["TableName"] + global_table_region = self.get_global_table_region(context, table_name) + result = self._forward_request(context=context, region=global_table_region) + return result + + # + # Batch ops + # + + @handler("BatchWriteItem", expand=False) + def batch_write_item( + self, + context: RequestContext, + batch_write_item_input: BatchWriteItemInput, + ) -> BatchWriteItemOutput: + # TODO: add global table support + # UnprocessedItems should have the same format as RequestItems + unprocessed_items = {} + request_items = batch_write_item_input["RequestItems"] + + for table_name, items in sorted(request_items.items(), key=itemgetter(0)): + for request in items: + request: WriteRequest + for key, inner_request in request.items(): + inner_request: PutRequest | DeleteRequest + if self.should_throttle("BatchWriteItem"): + unprocessed_items_for_table = unprocessed_items.setdefault(table_name, []) + unprocessed_items_for_table.append(request) + + try: + result = self.forward_request(context) + except CommonServiceException as e: + # TODO: validate if DynamoDB still raises `One of the required keys was not given a value` + # for now, replace with the schema error validation + if e.message == "One of the required keys was not given a value": + raise ValidationException("The provided key element does not match the schema") + raise e + + # TODO: should unprocessed item which have mutated by `prepare_batch_write_item_records` be returned + for table_name, unprocessed_items_in_table in unprocessed_items.items(): + unprocessed: dict = result["UnprocessedItems"] + result_unprocessed_table = unprocessed.setdefault(table_name, []) + + # add the Unprocessed items to the response + # TODO: check before if the same request has not been Unprocessed by DDB local already? + # those might actually have been processed? shouldn't we remove them from the proxied request? + for request in unprocessed_items_in_table: + result_unprocessed_table.append(request) + + # remove any table entry if it's empty + result["UnprocessedItems"] = {k: v for k, v in unprocessed.items() if v} + + return result + + @handler("BatchGetItem") + def batch_get_item( + self, + context: RequestContext, + request_items: BatchGetRequestMap, + return_consumed_capacity: ReturnConsumedCapacity = None, + **kwargs, + ) -> BatchGetItemOutput: + # TODO: add global table support + return self.forward_request(context) + + # + # Transactions + # + + @handler("TransactWriteItems", expand=False) + def transact_write_items( + self, + context: RequestContext, + transact_write_items_input: TransactWriteItemsInput, + ) -> TransactWriteItemsOutput: + # TODO: add global table support + client_token: str | None = transact_write_items_input.get("ClientRequestToken") + + if client_token: + # we sort the payload since identical payload but with different order could cause + # IdempotentParameterMismatchException error if a client token is provided + context.request.data = to_bytes(canonical_json(json.loads(context.request.data))) + + return self.forward_request(context) + + @handler("TransactGetItems", expand=False) + def transact_get_items( + self, + context: RequestContext, + transact_items: TransactGetItemList, + return_consumed_capacity: ReturnConsumedCapacity = None, + ) -> TransactGetItemsOutput: + return self.forward_request(context) + + @handler("ExecuteTransaction", expand=False) + def execute_transaction( + self, context: RequestContext, execute_transaction_input: ExecuteTransactionInput + ) -> ExecuteTransactionOutput: + result = self.forward_request(context) + return result + + @handler("ExecuteStatement", expand=False) + def execute_statement( + self, + context: RequestContext, + execute_statement_input: ExecuteStatementInput, + ) -> ExecuteStatementOutput: + # TODO: this operation is still really slow with streams enabled + # find a way to make it better, same way as the other operations, by using returnvalues + # see https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.update.html + + # We found out that 'Parameters' can be an empty list when the request comes from the AWS JS client. + if execute_statement_input.get("Parameters", None) == []: # noqa + raise ValidationException( + "1 validation error detected: Value '[]' at 'parameters' failed to satisfy constraint: Member must have length greater than or equal to 1" + ) + return self.forward_request(context) + + # + # Tags + # + + def tag_resource( + self, context: RequestContext, resource_arn: ResourceArnString, tags: TagList, **kwargs + ) -> None: + table_tags = get_store(context.account_id, context.region).TABLE_TAGS + if resource_arn not in table_tags: + table_tags[resource_arn] = {} + table_tags[resource_arn].update({tag["Key"]: tag["Value"] for tag in tags}) + + def untag_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + for tag_key in tag_keys or []: + get_store(context.account_id, context.region).TABLE_TAGS.get(resource_arn, {}).pop( + tag_key, None + ) + + def list_tags_of_resource( + self, + context: RequestContext, + resource_arn: ResourceArnString, + next_token: NextTokenString = None, + **kwargs, + ) -> ListTagsOfResourceOutput: + result = [ + {"Key": k, "Value": v} + for k, v in get_store(context.account_id, context.region) + .TABLE_TAGS.get(resource_arn, {}) + .items() + ] + return ListTagsOfResourceOutput(Tags=result) + + # + # TTLs + # + + def describe_time_to_live( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeTimeToLiveOutput: + if not self.table_exists(context.account_id, context.region, table_name): + raise ResourceNotFoundException( + f"Requested resource not found: Table: {table_name} not found" + ) + + backend = get_store(context.account_id, context.region) + ttl_spec = backend.ttl_specifications.get(table_name) + + result = {"TimeToLiveStatus": "DISABLED"} + if ttl_spec: + if ttl_spec.get("Enabled"): + ttl_status = "ENABLED" + else: + ttl_status = "DISABLED" + result = { + "AttributeName": ttl_spec.get("AttributeName"), + "TimeToLiveStatus": ttl_status, + } + + return DescribeTimeToLiveOutput(TimeToLiveDescription=result) + + def update_time_to_live( + self, + context: RequestContext, + table_name: TableName, + time_to_live_specification: TimeToLiveSpecification, + **kwargs, + ) -> UpdateTimeToLiveOutput: + if not self.table_exists(context.account_id, context.region, table_name): + raise ResourceNotFoundException( + f"Requested resource not found: Table: {table_name} not found" + ) + + # TODO: TTL status is maintained/mocked but no real expiry is happening for items + backend = get_store(context.account_id, context.region) + backend.ttl_specifications[table_name] = time_to_live_specification + return UpdateTimeToLiveOutput(TimeToLiveSpecification=time_to_live_specification) + + # + # Global tables + # + + def create_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replication_group: ReplicaList, + **kwargs, + ) -> CreateGlobalTableOutput: + global_tables: Dict = get_store(context.account_id, context.region).GLOBAL_TABLES + if global_table_name in global_tables: + raise GlobalTableAlreadyExistsException("Global table with this name already exists") + replication_group = [grp.copy() for grp in replication_group or []] + data = {"GlobalTableName": global_table_name, "ReplicationGroup": replication_group} + global_tables[global_table_name] = data + for group in replication_group: + group["ReplicaStatus"] = "ACTIVE" + group["ReplicaStatusDescription"] = "Replica active" + return CreateGlobalTableOutput(GlobalTableDescription=data) + + def describe_global_table( + self, context: RequestContext, global_table_name: TableName, **kwargs + ) -> DescribeGlobalTableOutput: + details = get_store(context.account_id, context.region).GLOBAL_TABLES.get(global_table_name) + if not details: + raise GlobalTableNotFoundException("Global table with this name does not exist") + return DescribeGlobalTableOutput(GlobalTableDescription=details) + + def list_global_tables( + self, + context: RequestContext, + exclusive_start_global_table_name: TableName = None, + limit: PositiveIntegerObject = None, + region_name: RegionName = None, + **kwargs, + ) -> ListGlobalTablesOutput: + # TODO: add paging support + result = [ + select_attributes(tab, ["GlobalTableName", "ReplicationGroup"]) + for tab in get_store(context.account_id, context.region).GLOBAL_TABLES.values() + ] + return ListGlobalTablesOutput(GlobalTables=result) + + def update_global_table( + self, + context: RequestContext, + global_table_name: TableName, + replica_updates: ReplicaUpdateList, + **kwargs, + ) -> UpdateGlobalTableOutput: + details = get_store(context.account_id, context.region).GLOBAL_TABLES.get(global_table_name) + if not details: + raise GlobalTableNotFoundException("Global table with this name does not exist") + for update in replica_updates or []: + repl_group = details["ReplicationGroup"] + # delete existing + delete = update.get("Delete") + if delete: + details["ReplicationGroup"] = [ + g for g in repl_group if g["RegionName"] != delete["RegionName"] + ] + # create new + create = update.get("Create") + if create: + exists = [g for g in repl_group if g["RegionName"] == create["RegionName"]] + if exists: + continue + new_group = { + "RegionName": create["RegionName"], + "ReplicaStatus": "ACTIVE", + "ReplicaStatusDescription": "Replica active", + } + details["ReplicationGroup"].append(new_group) + return UpdateGlobalTableOutput(GlobalTableDescription=details) + + # + # Kinesis Streaming + # + + def enable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableName, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + + if not kinesis_stream_exists(stream_arn=stream_arn): + raise ValidationException("User does not have a permission to use kinesis stream") + + table_def = get_store(context.account_id, context.region).table_definitions.setdefault( + table_name, {} + ) + + dest_status = table_def.get("KinesisDataStreamDestinationStatus") + if dest_status not in ["DISABLED", "ENABLE_FAILED", None]: + raise ValidationException( + "Table is not in a valid state to enable Kinesis Streaming " + "Destination:EnableKinesisStreamingDestination must be DISABLED or ENABLE_FAILED " + "to perform ENABLE operation." + ) + + table_def["KinesisDataStreamDestinations"] = ( + table_def.get("KinesisDataStreamDestinations") or [] + ) + # remove the stream destination if already present + table_def["KinesisDataStreamDestinations"] = [ + t for t in table_def["KinesisDataStreamDestinations"] if t["StreamArn"] != stream_arn + ] + # append the active stream destination at the end of the list + table_def["KinesisDataStreamDestinations"].append( + { + "DestinationStatus": DestinationStatus.ACTIVE, + "DestinationStatusDescription": "Stream is active", + "StreamArn": stream_arn, + } + ) + table_def["KinesisDataStreamDestinationStatus"] = DestinationStatus.ACTIVE + return KinesisStreamingDestinationOutput( + DestinationStatus=DestinationStatus.ACTIVE, StreamArn=stream_arn, TableName=table_name + ) + + def disable_kinesis_streaming_destination( + self, + context: RequestContext, + table_name: TableName, + stream_arn: StreamArn, + enable_kinesis_streaming_configuration: EnableKinesisStreamingConfiguration = None, + **kwargs, + ) -> KinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + if not kinesis_stream_exists(stream_arn): + raise ValidationException( + "User does not have a permission to use kinesis stream", + ) + + table_def = get_store(context.account_id, context.region).table_definitions.setdefault( + table_name, {} + ) + + stream_destinations = table_def.get("KinesisDataStreamDestinations") + if stream_destinations: + if table_def["KinesisDataStreamDestinationStatus"] == DestinationStatus.ACTIVE: + for dest in stream_destinations: + if ( + dest["StreamArn"] == stream_arn + and dest["DestinationStatus"] == DestinationStatus.ACTIVE + ): + dest["DestinationStatus"] = DestinationStatus.DISABLED + dest["DestinationStatusDescription"] = ("Stream is disabled",) + table_def["KinesisDataStreamDestinationStatus"] = DestinationStatus.DISABLED + return KinesisStreamingDestinationOutput( + DestinationStatus=DestinationStatus.DISABLED, + StreamArn=stream_arn, + TableName=table_name, + ) + raise ValidationException( + "Table is not in a valid state to disable Kinesis Streaming Destination:" + "DisableKinesisStreamingDestination must be ACTIVE to perform DISABLE operation." + ) + + def describe_kinesis_streaming_destination( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeKinesisStreamingDestinationOutput: + self.ensure_table_exists(context.account_id, context.region, table_name) + + table_def = ( + get_store(context.account_id, context.region).table_definitions.get(table_name) or {} + ) + + stream_destinations = table_def.get("KinesisDataStreamDestinations") or [] + return DescribeKinesisStreamingDestinationOutput( + KinesisDataStreamDestinations=stream_destinations, TableName=table_name + ) + + # + # Continuous Backups + # + + def describe_continuous_backups( + self, context: RequestContext, table_name: TableName, **kwargs + ) -> DescribeContinuousBackupsOutput: + self.get_global_table_region(context, table_name) + store = get_store(context.account_id, context.region) + continuous_backup_description = ( + store.table_properties.get(table_name, {}).get("ContinuousBackupsDescription") + ) or ContinuousBackupsDescription( + ContinuousBackupsStatus=ContinuousBackupsStatus.ENABLED, + PointInTimeRecoveryDescription=PointInTimeRecoveryDescription( + PointInTimeRecoveryStatus=PointInTimeRecoveryStatus.DISABLED + ), + ) + + return DescribeContinuousBackupsOutput( + ContinuousBackupsDescription=continuous_backup_description + ) + + def update_continuous_backups( + self, + context: RequestContext, + table_name: TableName, + point_in_time_recovery_specification: PointInTimeRecoverySpecification, + **kwargs, + ) -> UpdateContinuousBackupsOutput: + self.get_global_table_region(context, table_name) + + store = get_store(context.account_id, context.region) + pit_recovery_status = ( + PointInTimeRecoveryStatus.ENABLED + if point_in_time_recovery_specification["PointInTimeRecoveryEnabled"] + else PointInTimeRecoveryStatus.DISABLED + ) + continuous_backup_description = ContinuousBackupsDescription( + ContinuousBackupsStatus=ContinuousBackupsStatus.ENABLED, + PointInTimeRecoveryDescription=PointInTimeRecoveryDescription( + PointInTimeRecoveryStatus=pit_recovery_status + ), + ) + table_props = store.table_properties.setdefault(table_name, {}) + table_props["ContinuousBackupsDescription"] = continuous_backup_description + + return UpdateContinuousBackupsOutput( + ContinuousBackupsDescription=continuous_backup_description + ) + + # + # Helpers + # + + @staticmethod + def ddb_region_name(region_name: str) -> str: + """Map `local` or `localhost` region to the us-east-1 region. These values are used by NoSQL Workbench.""" + # TODO: could this be somehow moved into the request handler chain? + if region_name in ("local", "localhost"): + region_name = AWS_REGION_US_EAST_1 + + return region_name + + @staticmethod + def table_exists(account_id: str, region_name: str, table_name: str) -> bool: + region_name = DynamoDBProvider.ddb_region_name(region_name) + + client = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).dynamodb + return dynamodb_table_exists(table_name, client) + + @staticmethod + def ensure_table_exists(account_id: str, region_name: str, table_name: str): + """ + Raise ResourceNotFoundException if the given table does not exist. + + :param account_id: account id + :param region_name: region name + :param table_name: table name + :raise: ResourceNotFoundException if table does not exist in DynamoDB Local + """ + if not DynamoDBProvider.table_exists(account_id, region_name, table_name): + raise ResourceNotFoundException("Cannot do operations on a non-existent table") + + @staticmethod + def get_global_table_region(context: RequestContext, table_name: str) -> str: + """ + Return the table region considering that it might be a replicated table. + + Replication in LocalStack works by keeping a single copy of a table and forwarding + requests to the region where this table exists. + + This method does not check whether the table actually exists in DDBLocal. + + :param context: request context + :param table_name: table name + :return: region + """ + store = get_store(context.account_id, context.region) + + table_region = store.TABLE_REGION.get(table_name) + replicated_at = store.REPLICAS.get(table_name, {}).keys() + + if context.region == table_region or context.region in replicated_at: + return table_region + + return context.region + + @staticmethod + def prepare_request_headers(headers: Dict, account_id: str, region_name: str): + """ + Modify the Credentials field of Authorization header to achieve namespacing in DynamoDBLocal. + """ + region_name = DynamoDBProvider.ddb_region_name(region_name) + key = get_ddb_access_key(account_id, region_name) + + # DynamoDBLocal namespaces based on the value of Credentials + # Since we want to namespace by both account ID and region, use an aggregate key + # We also replace the region to keep compatibility with NoSQL Workbench + headers["Authorization"] = re.sub( + AUTH_CREDENTIAL_REGEX, + rf"Credential={key}/\2/{region_name}/\4/", + headers.get("Authorization") or "", + flags=re.IGNORECASE, + ) + + def fix_consumed_capacity(self, request: Dict, result: Dict): + # make sure we append 'ConsumedCapacity', which is properly + # returned by dynalite, but not by AWS's DynamoDBLocal + table_name = request.get("TableName") + return_cap = request.get("ReturnConsumedCapacity") + if "ConsumedCapacity" not in result and return_cap in ["TOTAL", "INDEXES"]: + request["ConsumedCapacity"] = { + "TableName": table_name, + "CapacityUnits": 5, # TODO hardcoded + "ReadCapacityUnits": 2, + "WriteCapacityUnits": 3, + } + + def fix_table_arn(self, account_id: str, region_name: str, arn: str) -> str: + """ + Set the correct account ID and region in ARNs returned by DynamoDB Local. + """ + partition = get_partition(region_name) + return ( + arn.replace("arn:aws:", f"arn:{partition}:") + .replace(":ddblocal:", f":{region_name}:") + .replace(":000000000000:", f":{account_id}:") + ) + + def batch_execute_statement( + self, + context: RequestContext, + statements: PartiQLBatchRequest, + return_consumed_capacity: ReturnConsumedCapacity = None, + **kwargs, + ) -> BatchExecuteStatementOutput: + result = self.forward_request(context) + return result + + @staticmethod + def get_record_template(region_name: str, stream_view_type: str | None = None) -> StreamRecord: + record = { + "eventID": short_uid(), + "eventVersion": "1.1", + "dynamodb": { + # expects nearest second rounded down + "ApproximateCreationDateTime": int(time.time()), + "SizeBytes": -1, + }, + "awsRegion": region_name, + "eventSource": "aws:dynamodb", + } + if stream_view_type: + record["dynamodb"]["StreamViewType"] = stream_view_type + + return record + + def check_provisioned_throughput(self, action): + """ + Check rate limiting for an API operation and raise an error if provisioned throughput is exceeded. + """ + if self.should_throttle(action): + message = ( + "The level of configured provisioned throughput for the table was exceeded. " + + "Consider increasing your provisioning level with the UpdateTable API" + ) + raise ProvisionedThroughputExceededException(message) + + def action_should_throttle(self, action, actions): + throttled = [f"{ACTION_PREFIX}{a}" for a in actions] + return (action in throttled) or (action in actions) + + def should_throttle(self, action): + if ( + not config.DYNAMODB_READ_ERROR_PROBABILITY + and not config.DYNAMODB_ERROR_PROBABILITY + and not config.DYNAMODB_WRITE_ERROR_PROBABILITY + ): + # early exit so we don't need to call random() + return False + + rand = random.random() + if rand < config.DYNAMODB_READ_ERROR_PROBABILITY and self.action_should_throttle( + action, READ_THROTTLED_ACTIONS + ): + return True + elif rand < config.DYNAMODB_WRITE_ERROR_PROBABILITY and self.action_should_throttle( + action, WRITE_THROTTLED_ACTIONS + ): + return True + elif rand < config.DYNAMODB_ERROR_PROBABILITY and self.action_should_throttle( + action, THROTTLED_ACTIONS + ): + return True + return False + + +# --- +# Misc. util functions +# --- + + +def get_global_secondary_index(account_id: str, region_name: str, table_name: str, index_name: str): + schema = SchemaExtractor.get_table_schema(table_name, account_id, region_name) + for index in schema["Table"].get("GlobalSecondaryIndexes", []): + if index["IndexName"] == index_name: + return index + raise ResourceNotFoundException("Index not found") + + +def is_local_secondary_index( + account_id: str, region_name: str, table_name: str, index_name: str +) -> bool: + schema = SchemaExtractor.get_table_schema(table_name, account_id, region_name) + for index in schema["Table"].get("LocalSecondaryIndexes", []): + if index["IndexName"] == index_name: + return True + return False + + +def is_index_query_valid(account_id: str, region_name: str, query_data: dict) -> bool: + table_name = to_str(query_data["TableName"]) + index_name = to_str(query_data["IndexName"]) + if is_local_secondary_index(account_id, region_name, table_name, index_name): + return True + index_query_type = query_data.get("Select") + index = get_global_secondary_index(account_id, region_name, table_name, index_name) + index_projection_type = index.get("Projection").get("ProjectionType") + if index_query_type == "ALL_ATTRIBUTES" and index_projection_type != "ALL": + return False + return True + + +def kinesis_stream_exists(stream_arn): + account_id = extract_account_id_from_arn(stream_arn) + region_name = extract_region_from_arn(stream_arn) + + kinesis = connect_to( + aws_access_key_id=account_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + region_name=region_name, + ).kinesis + stream_name_from_arn = stream_arn.split("/", 1)[1] + # check if the stream exists in kinesis for the user + filtered = list( + filter( + lambda stream_name: stream_name == stream_name_from_arn, + kinesis.list_streams()["StreamNames"], + ) + ) + return bool(filtered) diff --git a/localstack/services/sns/__init__.py b/localstack-core/localstack/services/dynamodbstreams/__init__.py similarity index 100% rename from localstack/services/sns/__init__.py rename to localstack-core/localstack/services/dynamodbstreams/__init__.py diff --git a/localstack-core/localstack/services/dynamodbstreams/dynamodbstreams_api.py b/localstack-core/localstack/services/dynamodbstreams/dynamodbstreams_api.py new file mode 100644 index 0000000000000..e9164465fdd57 --- /dev/null +++ b/localstack-core/localstack/services/dynamodbstreams/dynamodbstreams_api.py @@ -0,0 +1,235 @@ +import logging +import threading +from typing import TYPE_CHECKING, Dict + +from bson.json_util import dumps + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.dynamodbstreams import StreamStatus, StreamViewType, TableName +from localstack.aws.connect import connect_to +from localstack.services.dynamodb.v2.provider import DynamoDBProvider +from localstack.services.dynamodbstreams.models import DynamoDbStreamsStore, dynamodbstreams_stores +from localstack.utils.aws import arns, resources +from localstack.utils.common import now_utc +from localstack.utils.threads import FuncThread + +if TYPE_CHECKING: + from mypy_boto3_kinesis import KinesisClient + +DDB_KINESIS_STREAM_NAME_PREFIX = "__ddb_stream_" + +LOG = logging.getLogger(__name__) + +_SEQUENCE_MTX = threading.RLock() +_SEQUENCE_NUMBER_COUNTER = 1 + + +def get_dynamodbstreams_store(account_id: str, region: str) -> DynamoDbStreamsStore: + return dynamodbstreams_stores[account_id][region] + + +def get_and_increment_sequence_number_counter() -> int: + global _SEQUENCE_NUMBER_COUNTER + with _SEQUENCE_MTX: + cnt = _SEQUENCE_NUMBER_COUNTER + _SEQUENCE_NUMBER_COUNTER += 1 + return cnt + + +def get_kinesis_client(account_id: str, region_name: str) -> "KinesisClient": + # specifically specify endpoint url here to ensure we always hit the local kinesis instance + return connect_to( + aws_access_key_id=account_id, + region_name=region_name, + endpoint_url=config.internal_service_url(), + ).kinesis + + +def add_dynamodb_stream( + account_id: str, + region_name: str, + table_name: str, + latest_stream_label: str | None = None, + view_type: StreamViewType = StreamViewType.NEW_AND_OLD_IMAGES, + enabled: bool = True, +) -> None: + if not enabled: + return + + store = get_dynamodbstreams_store(account_id, region_name) + # create kinesis stream as a backend + stream_name = get_kinesis_stream_name(table_name) + resources.create_kinesis_stream( + get_kinesis_client(account_id, region_name), + stream_name=stream_name, + ) + latest_stream_label = latest_stream_label or "latest" + stream = { + "StreamArn": arns.dynamodb_stream_arn( + table_name=table_name, + latest_stream_label=latest_stream_label, + account_id=account_id, + region_name=region_name, + ), + "TableName": table_name, + "StreamLabel": latest_stream_label, + "StreamStatus": StreamStatus.ENABLING, + "KeySchema": [], + "Shards": [], + "StreamViewType": view_type, + "shards_id_map": {}, + } + store.ddb_streams[table_name] = stream + + +def get_stream_for_table(account_id: str, region_name: str, table_arn: str) -> dict: + store = get_dynamodbstreams_store(account_id, region_name) + table_name = table_name_from_stream_arn(table_arn) + return store.ddb_streams.get(table_name) + + +def _process_forwarded_records( + account_id: str, region_name: str, table_name: TableName, table_records: dict, kinesis +) -> None: + records = table_records["records"] + stream_type = table_records["table_stream_type"] + # if the table does not have a DynamoDB Streams enabled, skip publishing anything + if not stream_type.stream_view_type: + return + + # in this case, Kinesis forces the record to have both OldImage and NewImage, so we need to filter it + # as the settings are different for DDB Streams and Kinesis + if stream_type.is_kinesis and stream_type.stream_view_type != StreamViewType.NEW_AND_OLD_IMAGES: + kinesis_records = [] + + # StreamViewType determines what information is written to the stream for the table + # When an item in the table is inserted, updated or deleted + image_filter = set() + if stream_type.stream_view_type == StreamViewType.KEYS_ONLY: + image_filter = {"OldImage", "NewImage"} + elif stream_type.stream_view_type == StreamViewType.OLD_IMAGE: + image_filter = {"NewImage"} + elif stream_type.stream_view_type == StreamViewType.NEW_IMAGE: + image_filter = {"OldImage"} + + for record in records: + record["dynamodb"] = { + k: v for k, v in record["dynamodb"].items() if k not in image_filter + } + + if "SequenceNumber" not in record["dynamodb"]: + record["dynamodb"]["SequenceNumber"] = str( + get_and_increment_sequence_number_counter() + ) + + kinesis_records.append({"Data": dumps(record), "PartitionKey": "TODO"}) + + else: + kinesis_records = [] + for record in records: + if "SequenceNumber" not in record["dynamodb"]: + # we can mutate the record for SequenceNumber, the Kinesis forwarding takes care of filtering it + record["dynamodb"]["SequenceNumber"] = str( + get_and_increment_sequence_number_counter() + ) + + # simply pass along the records, they already have the right format + kinesis_records.append({"Data": dumps(record), "PartitionKey": "TODO"}) + + stream_name = get_kinesis_stream_name(table_name) + kinesis.put_records( + StreamName=stream_name, + Records=kinesis_records, + ) + + +def forward_events(account_id: str, region_name: str, records_map: dict[TableName, dict]) -> None: + kinesis = get_kinesis_client(account_id, region_name) + + for table_name, table_records in records_map.items(): + _process_forwarded_records(account_id, region_name, table_name, table_records, kinesis) + + +def delete_streams(account_id: str, region_name: str, table_arn: str) -> None: + store = get_dynamodbstreams_store(account_id, region_name) + table_name = table_name_from_table_arn(table_arn) + if store.ddb_streams.pop(table_name, None): + stream_name = get_kinesis_stream_name(table_name) + # stream_arn = stream["StreamArn"] + + # we're basically asynchronously trying to delete the stream, or should we do this "synchronous" with the table + # deletion? + def _delete_stream(*args, **kwargs): + try: + kinesis_client = get_kinesis_client(account_id, region_name) + # needs to be active otherwise we can't delete it + kinesis_client.get_waiter("stream_exists").wait(StreamName=stream_name) + kinesis_client.delete_stream(StreamName=stream_name, EnforceConsumerDeletion=True) + kinesis_client.get_waiter("stream_not_exists").wait(StreamName=stream_name) + except Exception: + LOG.warning( + "Failed to delete underlying kinesis stream for dynamodb table table_arn=%s", + table_arn, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + FuncThread(_delete_stream).start() # fire & forget + + +def get_kinesis_stream_name(table_name: str) -> str: + return DDB_KINESIS_STREAM_NAME_PREFIX + table_name + + +def table_name_from_stream_arn(stream_arn: str) -> str: + return stream_arn.split(":table/", 1)[-1].split("/")[0] + + +def table_name_from_table_arn(table_arn: str) -> str: + return table_name_from_stream_arn(table_arn) + + +def stream_name_from_stream_arn(stream_arn: str) -> str: + table_name = table_name_from_stream_arn(stream_arn) + return get_kinesis_stream_name(table_name) + + +def shard_id(kinesis_shard_id: str) -> str: + timestamp = str(int(now_utc())) + timestamp = f"{timestamp[:-5]}00000000".rjust(20, "0") + kinesis_shard_params = kinesis_shard_id.split("-") + return f"{kinesis_shard_params[0]}-{timestamp}-{kinesis_shard_params[-1][:32]}" + + +def kinesis_shard_id(dynamodbstream_shard_id: str) -> str: + shard_params = dynamodbstream_shard_id.rsplit("-") + return f"{shard_params[0]}-{shard_params[-1]}" + + +def get_shard_id(stream: Dict, kinesis_shard_id: str) -> str: + ddb_stream_shard_id = stream.get("shards_id_map", {}).get(kinesis_shard_id) + if not ddb_stream_shard_id: + ddb_stream_shard_id = shard_id(kinesis_shard_id) + stream["shards_id_map"][kinesis_shard_id] = ddb_stream_shard_id + + return ddb_stream_shard_id + + +def get_original_region( + context: RequestContext, stream_arn: str | None = None, table_name: str | None = None +) -> str: + """ + In DDB Global tables, we forward all the requests to the original region, instead of really replicating the data. + Since each table has a separate stream associated, we need to have a similar forwarding logic for DDB Streams. + To determine the original region, we need the table name, that can be either provided here or determined from the + ARN of the stream. + """ + if not stream_arn and not table_name: + LOG.debug( + "No Stream ARN or table name provided. Returning region '%s' from the request", + context.region, + ) + return context.region + + table_name = table_name or table_name_from_stream_arn(stream_arn) + return DynamoDBProvider.get_global_table_region(context=context, table_name=table_name) diff --git a/localstack/services/dynamodbstreams/models.py b/localstack-core/localstack/services/dynamodbstreams/models.py similarity index 100% rename from localstack/services/dynamodbstreams/models.py rename to localstack-core/localstack/services/dynamodbstreams/models.py diff --git a/localstack-core/localstack/services/dynamodbstreams/provider.py b/localstack-core/localstack/services/dynamodbstreams/provider.py new file mode 100644 index 0000000000000..6c9548bb81ebf --- /dev/null +++ b/localstack-core/localstack/services/dynamodbstreams/provider.py @@ -0,0 +1,195 @@ +import copy +import logging + +from bson.json_util import loads + +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.dynamodbstreams import ( + DescribeStreamOutput, + DynamodbstreamsApi, + ExpiredIteratorException, + GetRecordsInput, + GetRecordsOutput, + GetShardIteratorOutput, + ListStreamsOutput, + PositiveIntegerObject, + ResourceNotFoundException, + SequenceNumber, + ShardId, + ShardIteratorType, + Stream, + StreamArn, + StreamDescription, + StreamStatus, + TableName, +) +from localstack.aws.connect import connect_to +from localstack.services.dynamodb.utils import change_region_in_ddb_stream_arn +from localstack.services.dynamodbstreams.dynamodbstreams_api import ( + get_dynamodbstreams_store, + get_kinesis_client, + get_kinesis_stream_name, + get_original_region, + get_shard_id, + kinesis_shard_id, + stream_name_from_stream_arn, + table_name_from_stream_arn, +) +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.collections import select_from_typed_dict + +LOG = logging.getLogger(__name__) + +STREAM_STATUS_MAP = { + "ACTIVE": StreamStatus.ENABLED, + "CREATING": StreamStatus.ENABLING, + "DELETING": StreamStatus.DISABLING, + "UPDATING": StreamStatus.ENABLING, +} + + +class DynamoDBStreamsProvider(DynamodbstreamsApi, ServiceLifecycleHook): + shard_to_region: dict[str, str] + """Map a shard iterator to the originating region. This is used in case of replica tables, as LocalStack keeps the + data in one region only, redirecting all the requests from replica regions.""" + + def __init__(self): + self.shard_to_region = {} + + def describe_stream( + self, + context: RequestContext, + stream_arn: StreamArn, + limit: PositiveIntegerObject = None, + exclusive_start_shard_id: ShardId = None, + **kwargs, + ) -> DescribeStreamOutput: + og_region = get_original_region(context=context, stream_arn=stream_arn) + store = get_dynamodbstreams_store(context.account_id, og_region) + kinesis = get_kinesis_client(account_id=context.account_id, region_name=og_region) + for stream in store.ddb_streams.values(): + _stream_arn = stream_arn + if context.region != og_region: + _stream_arn = change_region_in_ddb_stream_arn(_stream_arn, og_region) + if stream["StreamArn"] == _stream_arn: + # get stream details + dynamodb = connect_to( + aws_access_key_id=context.account_id, region_name=og_region + ).dynamodb + table_name = table_name_from_stream_arn(stream["StreamArn"]) + stream_name = get_kinesis_stream_name(table_name) + stream_details = kinesis.describe_stream(StreamName=stream_name) + table_details = dynamodb.describe_table(TableName=table_name) + stream["KeySchema"] = table_details["Table"]["KeySchema"] + stream["StreamStatus"] = STREAM_STATUS_MAP.get( + stream_details["StreamDescription"]["StreamStatus"] + ) + + # Replace Kinesis ShardIDs with ones that mimic actual + # DynamoDBStream ShardIDs. + stream_shards = copy.deepcopy(stream_details["StreamDescription"]["Shards"]) + start_index = 0 + for index, shard in enumerate(stream_shards): + shard["ShardId"] = get_shard_id(stream, shard["ShardId"]) + shard.pop("HashKeyRange", None) + # we want to ignore the shards before exclusive_start_shard_id parameters + # we store the index where we encounter then slice the shards + if exclusive_start_shard_id and exclusive_start_shard_id == shard["ShardId"]: + start_index = index + + if exclusive_start_shard_id: + # slicing the resulting shards after the exclusive_start_shard_id parameters + stream_shards = stream_shards[start_index + 1 :] + + stream["Shards"] = stream_shards + stream_description = select_from_typed_dict(StreamDescription, stream) + stream_description["StreamArn"] = _stream_arn + return DescribeStreamOutput(StreamDescription=stream_description) + + raise ResourceNotFoundException( + f"Requested resource not found: Stream: {stream_arn} not found" + ) + + @handler("GetRecords", expand=False) + def get_records(self, context: RequestContext, payload: GetRecordsInput) -> GetRecordsOutput: + _shard_iterator = payload["ShardIterator"] + region_name = context.region + if payload["ShardIterator"] in self.shard_to_region: + region_name = self.shard_to_region[_shard_iterator] + + kinesis = get_kinesis_client(account_id=context.account_id, region_name=region_name) + prefix, _, payload["ShardIterator"] = _shard_iterator.rpartition("|") + try: + kinesis_records = kinesis.get_records(**payload) + except kinesis.exceptions.ExpiredIteratorException: + self.shard_to_region.pop(_shard_iterator, None) + LOG.debug("Shard iterator for underlying kinesis stream expired") + raise ExpiredIteratorException("Shard iterator has expired") + result = { + "Records": [], + "NextShardIterator": f"{prefix}|{kinesis_records.get('NextShardIterator')}", + } + for record in kinesis_records["Records"]: + record_data = loads(record["Data"]) + record_data["dynamodb"]["SequenceNumber"] = record["SequenceNumber"] + result["Records"].append(record_data) + + # Similar as the logic in GetShardIterator, we need to track the originating region when we get the + # NextShardIterator in the results. + if region_name != context.region and "NextShardIterator" in result: + self.shard_to_region[result["NextShardIterator"]] = region_name + return GetRecordsOutput(**result) + + def get_shard_iterator( + self, + context: RequestContext, + stream_arn: StreamArn, + shard_id: ShardId, + shard_iterator_type: ShardIteratorType, + sequence_number: SequenceNumber = None, + **kwargs, + ) -> GetShardIteratorOutput: + stream_name = stream_name_from_stream_arn(stream_arn) + og_region = get_original_region(context=context, stream_arn=stream_arn) + stream_shard_id = kinesis_shard_id(shard_id) + kinesis = get_kinesis_client(account_id=context.account_id, region_name=og_region) + + kwargs = {"StartingSequenceNumber": sequence_number} if sequence_number else {} + result = kinesis.get_shard_iterator( + StreamName=stream_name, + ShardId=stream_shard_id, + ShardIteratorType=shard_iterator_type, + **kwargs, + ) + del result["ResponseMetadata"] + # TODO not quite clear what the |1| exactly denotes, because at AWS it's sometimes other numbers + result["ShardIterator"] = f"{stream_arn}|1|{result['ShardIterator']}" + + # In case of a replica table, we need to keep track of the real region originating the shard iterator. + # This region will be later used in GetRecords to redirect to the originating region, holding the data. + if og_region != context.region: + self.shard_to_region[result["ShardIterator"]] = og_region + return GetShardIteratorOutput(**result) + + def list_streams( + self, + context: RequestContext, + table_name: TableName = None, + limit: PositiveIntegerObject = None, + exclusive_start_stream_arn: StreamArn = None, + **kwargs, + ) -> ListStreamsOutput: + og_region = get_original_region(context=context, table_name=table_name) + store = get_dynamodbstreams_store(context.account_id, og_region) + result = [select_from_typed_dict(Stream, res) for res in store.ddb_streams.values()] + if table_name: + result: list[Stream] = [res for res in result if res["TableName"] == table_name] + # If this is a stream from a table replica, we need to change the region in the stream ARN, as LocalStack + # keeps a stream only in the originating region. + if context.region != og_region: + for stream in result: + stream["StreamArn"] = change_region_in_ddb_stream_arn( + stream["StreamArn"], context.region + ) + + return ListStreamsOutput(Streams=result) diff --git a/localstack/services/sqs/__init__.py b/localstack-core/localstack/services/dynamodbstreams/v2/__init__.py similarity index 100% rename from localstack/services/sqs/__init__.py rename to localstack-core/localstack/services/dynamodbstreams/v2/__init__.py diff --git a/localstack-core/localstack/services/dynamodbstreams/v2/provider.py b/localstack-core/localstack/services/dynamodbstreams/v2/provider.py new file mode 100644 index 0000000000000..a91fbc592a992 --- /dev/null +++ b/localstack-core/localstack/services/dynamodbstreams/v2/provider.py @@ -0,0 +1,130 @@ +import logging + +from localstack.aws import handlers +from localstack.aws.api import RequestContext, ServiceRequest, ServiceResponse, handler +from localstack.aws.api.dynamodbstreams import ( + DescribeStreamInput, + DescribeStreamOutput, + DynamodbstreamsApi, + GetRecordsInput, + GetRecordsOutput, + GetShardIteratorInput, + GetShardIteratorOutput, + ListStreamsInput, + ListStreamsOutput, +) +from localstack.services.dynamodb.server import DynamodbServer +from localstack.services.dynamodb.utils import modify_ddblocal_arns +from localstack.services.dynamodb.v2.provider import DynamoDBProvider, modify_context_region +from localstack.services.dynamodbstreams.dynamodbstreams_api import get_original_region +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.aws.arns import parse_arn + +LOG = logging.getLogger(__name__) + + +class DynamoDBStreamsProvider(DynamodbstreamsApi, ServiceLifecycleHook): + shard_to_region: dict[str, str] + """Map a shard iterator to the originating region. This is used in case of replica tables, as LocalStack keeps the + data in one region only, redirecting all the requests from replica regions.""" + + def __init__(self): + self.server = DynamodbServer.get() + self.shard_to_region = {} + + def on_after_init(self): + # add response processor specific to ddblocal + handlers.modify_service_response.append(self.service, modify_ddblocal_arns) + + def on_before_start(self): + self.server.start_dynamodb() + + def _forward_request( + self, context: RequestContext, region: str | None, service_request: ServiceRequest + ) -> ServiceResponse: + """ + Modify the context region and then forward request to DynamoDB Local. + + This is used for operations impacted by global tables. In LocalStack, a single copy of global table + is kept, and any requests to replicated tables are forwarded to this original table. + """ + if region: + with modify_context_region(context, region): + return self.forward_request(context, service_request=service_request) + return self.forward_request(context, service_request=service_request) + + def forward_request( + self, context: RequestContext, service_request: ServiceRequest = None + ) -> ServiceResponse: + """ + Forward a request to DynamoDB Local. + """ + DynamoDBProvider.prepare_request_headers( + context.request.headers, account_id=context.account_id, region_name=context.region + ) + return self.server.proxy(context, service_request) + + def modify_stream_arn_for_ddb_local(self, stream_arn: str) -> str: + parsed_arn = parse_arn(stream_arn) + + return f"arn:aws:dynamodb:ddblocal:000000000000:{parsed_arn['resource']}" + + @handler("DescribeStream", expand=False) + def describe_stream( + self, + context: RequestContext, + payload: DescribeStreamInput, + ) -> DescribeStreamOutput: + global_table_region = get_original_region(context=context, stream_arn=payload["StreamArn"]) + request = payload.copy() + request["StreamArn"] = self.modify_stream_arn_for_ddb_local(request.get("StreamArn", "")) + return self._forward_request( + context=context, service_request=request, region=global_table_region + ) + + @handler("GetRecords", expand=False) + def get_records(self, context: RequestContext, payload: GetRecordsInput) -> GetRecordsOutput: + request = payload.copy() + request["ShardIterator"] = self.modify_stream_arn_for_ddb_local( + request.get("ShardIterator", "") + ) + region = self.shard_to_region.pop(request["ShardIterator"], None) + response = self._forward_request(context=context, region=region, service_request=request) + # Similar as the logic in GetShardIterator, we need to track the originating region when we get the + # NextShardIterator in the results. + if ( + region + and region != context.region + and (next_shard := response.get("NextShardIterator")) + ): + self.shard_to_region[next_shard] = region + return response + + @handler("GetShardIterator", expand=False) + def get_shard_iterator( + self, context: RequestContext, payload: GetShardIteratorInput + ) -> GetShardIteratorOutput: + global_table_region = get_original_region(context=context, stream_arn=payload["StreamArn"]) + request = payload.copy() + request["StreamArn"] = self.modify_stream_arn_for_ddb_local(request.get("StreamArn", "")) + response = self._forward_request( + context=context, service_request=request, region=global_table_region + ) + + # In case of a replica table, we need to keep track of the real region originating the shard iterator. + # This region will be later used in GetRecords to redirect to the originating region, holding the data. + if global_table_region != context.region and ( + shard_iterator := response.get("ShardIterator") + ): + self.shard_to_region[shard_iterator] = global_table_region + return response + + @handler("ListStreams", expand=False) + def list_streams(self, context: RequestContext, payload: ListStreamsInput) -> ListStreamsOutput: + global_table_region = get_original_region( + context=context, stream_arn=payload.get("TableName") + ) + # TODO: look into `ExclusiveStartStreamArn` param + return self._forward_request( + context=context, service_request=payload, region=global_table_region + ) diff --git a/localstack/services/ssm/__init__.py b/localstack-core/localstack/services/ec2/__init__.py similarity index 100% rename from localstack/services/ssm/__init__.py rename to localstack-core/localstack/services/ec2/__init__.py diff --git a/localstack-core/localstack/services/ec2/exceptions.py b/localstack-core/localstack/services/ec2/exceptions.py new file mode 100644 index 0000000000000..cb968ba2e6e68 --- /dev/null +++ b/localstack-core/localstack/services/ec2/exceptions.py @@ -0,0 +1,80 @@ +from localstack.aws.api import CommonServiceException + + +class InternalError(CommonServiceException): + def __init__(self, message): + super().__init__( + code="InternalError", + message=message, + ) + + +class IncorrectInstanceStateError(CommonServiceException): + def __init__(self, instance_id): + super().__init__( + code="IncorrectInstanceState", + message=f"The instance '{instance_id}' is not in a state from which it can be started", + ) + + +class InvalidAMIIdError(CommonServiceException): + def __init__(self, ami_id): + super().__init__( + code="InvalidAMIID.NotFound", message=f"The image id '{ami_id}' does not exist" + ) + + +class InvalidInstanceIdError(CommonServiceException): + def __init__(self, instance_id): + super().__init__( + code="InvalidInstanceID.NotFound", + message=f"The instance ID '{instance_id}' does not exist", + ) + + +class MissingParameterError(CommonServiceException): + def __init__(self, parameter): + super().__init__( + code="MissingParameter", + message=f"The request must contain the parameter {parameter}", + ) + + +class InvalidLaunchTemplateNameError(CommonServiceException): + def __init__(self): + super().__init__( + code="InvalidLaunchTemplateName.MalformedException", + message="A launch template name must be between 3 and 128 characters, and may contain letters, numbers, and the following characters: - ( ) . / _.'", + ) + + +class InvalidLaunchTemplateIdError(CommonServiceException): + def __init__(self): + super().__init__( + code="InvalidLaunchTemplateId.VersionNotFound", + message="Could not find launch template version", + ) + + +class InvalidSubnetDuplicateCustomIdError(CommonServiceException): + def __init__(self, custom_id): + super().__init__( + code="InvalidSubnet.DuplicateCustomId", + message=f"Subnet with custom id '{custom_id}' already exists", + ) + + +class InvalidSecurityGroupDuplicateCustomIdError(CommonServiceException): + def __init__(self, custom_id): + super().__init__( + code="InvalidSecurityGroupId.DuplicateCustomId", + message=f"Security group with custom id '{custom_id}' already exists", + ) + + +class InvalidVpcDuplicateCustomIdError(CommonServiceException): + def __init__(self, custom_id): + super().__init__( + code="InvalidVpc.DuplicateCustomId", + message=f"VPC with custom id '{custom_id}' already exists", + ) diff --git a/localstack/services/ec2/models.py b/localstack-core/localstack/services/ec2/models.py similarity index 100% rename from localstack/services/ec2/models.py rename to localstack-core/localstack/services/ec2/models.py diff --git a/localstack-core/localstack/services/ec2/patches.py b/localstack-core/localstack/services/ec2/patches.py new file mode 100644 index 0000000000000..d2037015905ef --- /dev/null +++ b/localstack-core/localstack/services/ec2/patches.py @@ -0,0 +1,254 @@ +import logging +from typing import Optional + +from moto.ec2 import models as ec2_models +from moto.utilities.id_generator import Tags + +from localstack.services.ec2.exceptions import ( + InvalidSecurityGroupDuplicateCustomIdError, + InvalidSubnetDuplicateCustomIdError, + InvalidVpcDuplicateCustomIdError, +) +from localstack.utils.id_generator import ( + ExistingIds, + ResourceIdentifier, + localstack_id, +) +from localstack.utils.patch import patch + +LOG = logging.getLogger(__name__) + + +@localstack_id +def generate_vpc_id( + resource_identifier: ResourceIdentifier, + existing_ids: ExistingIds = None, + tags: Tags = None, +) -> str: + # We return an empty string here to differentiate between when a custom ID was used, or when it was randomly generated by `moto`. + return "" + + +@localstack_id +def generate_security_group_id( + resource_identifier: ResourceIdentifier, + existing_ids: ExistingIds = None, + tags: Tags = None, +) -> str: + # We return an empty string here to differentiate between when a custom ID was used, or when it was randomly generated by `moto`. + return "" + + +@localstack_id +def generate_subnet_id( + resource_identifier: ResourceIdentifier, + existing_ids: ExistingIds = None, + tags: Tags = None, +) -> str: + # We return an empty string here to differentiate between when a custom ID was used, or when it was randomly generated by `moto`. + return "" + + +class VpcIdentifier(ResourceIdentifier): + service = "ec2" + resource = "vpc" + + def __init__(self, account_id: str, region: str, cidr_block: str): + super().__init__(account_id, region, name=cidr_block) + + def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str: + return generate_vpc_id( + resource_identifier=self, + existing_ids=existing_ids, + tags=tags, + ) + + +class SecurityGroupIdentifier(ResourceIdentifier): + service = "ec2" + resource = "securitygroup" + + def __init__(self, account_id: str, region: str, vpc_id: str, group_name: str): + super().__init__(account_id, region, name=f"sg-{vpc_id}-{group_name}") + + def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str: + return generate_security_group_id( + resource_identifier=self, existing_ids=existing_ids, tags=tags + ) + + +class SubnetIdentifier(ResourceIdentifier): + service = "ec2" + resource = "subnet" + + def __init__(self, account_id: str, region: str, vpc_id: str, cidr_block: str): + super().__init__(account_id, region, name=f"subnet-{vpc_id}-{cidr_block}") + + def generate(self, existing_ids: ExistingIds = None, tags: Tags = None) -> str: + return generate_subnet_id( + resource_identifier=self, + existing_ids=existing_ids, + tags=tags, + ) + + +def apply_patches(): + @patch(ec2_models.subnets.SubnetBackend.create_subnet) + def ec2_create_subnet( + fn: ec2_models.subnets.SubnetBackend.create_subnet, + self: ec2_models.subnets.SubnetBackend, + *args, + tags: Optional[dict[str, str]] = None, + **kwargs, + ): + # Patch this method so that we can create a subnet with a specific "custom" + # ID. The custom ID that we will use is contained within a special tag. + vpc_id: str = args[0] if len(args) >= 1 else kwargs["vpc_id"] + cidr_block: str = args[1] if len(args) >= 1 else kwargs["cidr_block"] + resource_identifier = SubnetIdentifier( + self.account_id, self.region_name, vpc_id, cidr_block + ) + + # tags has the format: {"subnet": {"Key": ..., "Value": ...}}, but we need + # to pass this to the generate method as {"Key": ..., "Value": ...}. Take + # care not to alter the original tags dict otherwise moto will not be able + # to understand it. + subnet_tags = None + if tags is not None: + subnet_tags = tags.get("subnet", tags) + custom_id = resource_identifier.generate(tags=subnet_tags) + + if custom_id: + # Check if custom id is unique within a given VPC + for az_subnets in self.subnets.values(): + for subnet in az_subnets.values(): + if subnet.vpc_id == vpc_id and subnet.id == custom_id: + raise InvalidSubnetDuplicateCustomIdError(custom_id) + + # Generate subnet with moto library + result: ec2_models.subnets.Subnet = fn(self, *args, tags=tags, **kwargs) + availability_zone = result.availability_zone + + if custom_id: + # Remove the subnet from the default dict and add it back with the custom id + self.subnets[availability_zone].pop(result.id) + old_id = result.id + result.id = custom_id + self.subnets[availability_zone][custom_id] = result + + # Tags are not stored in the Subnet object, but instead stored in a separate + # dict in the EC2 backend, keyed by subnet id. That therefore requires + # updating as well. + if old_id in self.tags: + self.tags[custom_id] = self.tags.pop(old_id) + + # Return the subnet with the patched custom id + return result + + @patch(ec2_models.security_groups.SecurityGroupBackend.create_security_group) + def ec2_create_security_group( + fn: ec2_models.security_groups.SecurityGroupBackend.create_security_group, + self: ec2_models.security_groups.SecurityGroupBackend, + name: str, + *args, + vpc_id: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + force: bool = False, + **kwargs, + ): + vpc_id = vpc_id or self.default_vpc.id + resource_identifier = SecurityGroupIdentifier( + self.account_id, self.region_name, vpc_id, name + ) + custom_id = resource_identifier.generate(tags=tags) + + if not force and self.get_security_group_from_id(custom_id): + raise InvalidSecurityGroupDuplicateCustomIdError(custom_id) + + # Generate security group with moto library + result: ec2_models.security_groups.SecurityGroup = fn( + self, name, *args, vpc_id=vpc_id, tags=tags, force=force, **kwargs + ) + + if custom_id: + # Remove the security group from the default dict and add it back with the custom id + self.groups[result.vpc_id].pop(result.group_id) + old_id = result.group_id + result.group_id = result.id = custom_id + self.groups[result.vpc_id][custom_id] = result + + # Tags are not stored in the Security Group object, but instead are stored in a + # separate dict in the EC2 backend, keyed by id. That therefore requires + # updating as well. + if old_id in self.tags: + self.tags[custom_id] = self.tags.pop(old_id) + + return result + + @patch(ec2_models.vpcs.VPCBackend.create_vpc) + def ec2_create_vpc( + fn: ec2_models.vpcs.VPCBackend.create_vpc, + self: ec2_models.vpcs.VPCBackend, + cidr_block: str, + *args, + tags: Optional[list[dict[str, str]]] = None, + is_default: bool = False, + **kwargs, + ): + resource_identifier = VpcIdentifier(self.account_id, self.region_name, cidr_block) + custom_id = resource_identifier.generate(tags=tags) + + # Check if custom id is unique + if custom_id and custom_id in self.vpcs: + raise InvalidVpcDuplicateCustomIdError(custom_id) + + # Generate VPC with moto library + result: ec2_models.vpcs.VPC = fn( + self, cidr_block, *args, tags=tags, is_default=is_default, **kwargs + ) + vpc_id = result.id + + if custom_id: + # Remove security group associated with unique non-custom VPC ID + default = self.get_security_group_from_name("default", vpc_id=vpc_id) + if not default: + self.delete_security_group( + name="default", + vpc_id=vpc_id, + ) + + # Delete route table if only main route table remains. + for route_table in self.describe_route_tables(filters={"vpc-id": vpc_id}): + self.delete_route_table(route_table.id) # type: ignore[attr-defined] + + # Remove the VPC from the default dict and add it back with the custom id + self.vpcs.pop(vpc_id) + old_id = result.id + result.id = custom_id + self.vpcs[custom_id] = result + + # Tags are not stored in the VPC object, but instead stored in a separate + # dict in the EC2 backend, keyed by VPC id. That therefore requires + # updating as well. + if old_id in self.tags: + self.tags[custom_id] = self.tags.pop(old_id) + + # Create default network ACL, route table, and security group for custom ID VPC + self.create_route_table( + vpc_id=custom_id, + main=True, + ) + self.create_network_acl( + vpc_id=custom_id, + default=True, + ) + # Associate default security group with custom ID VPC + if not default: + self.create_security_group( + name="default", + description="default VPC security group", + vpc_id=custom_id, + is_default=is_default, + ) + + return result diff --git a/localstack-core/localstack/services/ec2/provider.py b/localstack-core/localstack/services/ec2/provider.py new file mode 100644 index 0000000000000..ab52195e4cfa8 --- /dev/null +++ b/localstack-core/localstack/services/ec2/provider.py @@ -0,0 +1,628 @@ +import copy +import json +import logging +import re +from abc import ABC +from datetime import datetime, timezone + +from botocore.parsers import ResponseParserError +from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase +from moto.ec2.exceptions import InvalidVpcEndPointIdError +from moto.ec2.models import ( + EC2Backend, + FlowLogsBackend, + SubnetBackend, + TransitGatewayAttachmentBackend, + VPCBackend, + ec2_backends, +) +from moto.ec2.models.launch_templates import LaunchTemplate as MotoLaunchTemplate +from moto.ec2.models.subnets import Subnet + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.ec2 import ( + AvailabilityZone, + Boolean, + CreateFlowLogsRequest, + CreateFlowLogsResult, + CreateLaunchTemplateRequest, + CreateLaunchTemplateResult, + CreateSubnetRequest, + CreateSubnetResult, + CreateTransitGatewayRequest, + CreateTransitGatewayResult, + CurrencyCodeValues, + DescribeAvailabilityZonesRequest, + DescribeAvailabilityZonesResult, + DescribeReservedInstancesOfferingsRequest, + DescribeReservedInstancesOfferingsResult, + DescribeReservedInstancesRequest, + DescribeReservedInstancesResult, + DescribeSubnetsRequest, + DescribeSubnetsResult, + DescribeTransitGatewaysRequest, + DescribeTransitGatewaysResult, + DescribeVpcEndpointServicesRequest, + DescribeVpcEndpointServicesResult, + DescribeVpcEndpointsRequest, + DescribeVpcEndpointsResult, + DnsOptions, + DnsOptionsSpecification, + DnsRecordIpType, + Ec2Api, + GetSecurityGroupsForVpcRequest, + GetSecurityGroupsForVpcResult, + InstanceType, + IpAddressType, + LaunchTemplate, + ModifyLaunchTemplateRequest, + ModifyLaunchTemplateResult, + ModifySubnetAttributeRequest, + ModifyVpcEndpointResult, + OfferingClassType, + OfferingTypeValues, + PricingDetail, + PurchaseReservedInstancesOfferingRequest, + PurchaseReservedInstancesOfferingResult, + RecurringCharge, + RecurringChargeFrequency, + ReservedInstances, + ReservedInstancesOffering, + ReservedInstanceState, + RevokeSecurityGroupEgressRequest, + RevokeSecurityGroupEgressResult, + RIProductDescription, + SecurityGroupForVpc, + String, + SubnetConfigurationsList, + Tenancy, + UnsuccessfulItem, + UnsuccessfulItemError, + VpcEndpointId, + VpcEndpointRouteTableIdList, + VpcEndpointSecurityGroupIdList, + VpcEndpointSubnetIdList, + scope, +) +from localstack.aws.connect import connect_to +from localstack.services.ec2.exceptions import ( + InvalidLaunchTemplateIdError, + InvalidLaunchTemplateNameError, + MissingParameterError, +) +from localstack.services.ec2.models import get_ec2_backend +from localstack.services.ec2.patches import apply_patches +from localstack.services.moto import call_moto, call_moto_with_request +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.patch import patch +from localstack.utils.strings import first_char_to_upper, long_uid, short_uid + +LOG = logging.getLogger(__name__) + +# additional subnet attributes not yet supported upstream +ADDITIONAL_SUBNET_ATTRS = ("private_dns_name_options_on_launch", "enable_dns64") + + +class Ec2Provider(Ec2Api, ABC, ServiceLifecycleHook): + def on_after_init(self): + apply_patches() + + @handler("DescribeAvailabilityZones", expand=False) + def describe_availability_zones( + self, + context: RequestContext, + describe_availability_zones_request: DescribeAvailabilityZonesRequest, + ) -> DescribeAvailabilityZonesResult: + backend = get_ec2_backend(context.account_id, context.region) + zone_names = describe_availability_zones_request.get("ZoneNames") + zone_ids = describe_availability_zones_request.get("ZoneIds") + if zone_names or zone_ids: + filtered_zones = backend.describe_availability_zones( + zone_names=zone_names, zone_ids=zone_ids + ) + availability_zones = [ + AvailabilityZone( + State="available", + Messages=[], + RegionName=zone.region_name, + ZoneName=zone.name, + ZoneId=zone.zone_id, + ZoneType=zone.zone_type, + ) + for zone in filtered_zones + ] + return DescribeAvailabilityZonesResult(AvailabilityZones=availability_zones) + return call_moto(context) + + @handler("DescribeReservedInstancesOfferings", expand=False) + def describe_reserved_instances_offerings( + self, + context: RequestContext, + describe_reserved_instances_offerings_request: DescribeReservedInstancesOfferingsRequest, + ) -> DescribeReservedInstancesOfferingsResult: + return DescribeReservedInstancesOfferingsResult( + ReservedInstancesOfferings=[ + ReservedInstancesOffering( + AvailabilityZone="eu-central-1a", + Duration=2628000, + FixedPrice=0.0, + InstanceType=InstanceType.t2_small, + ProductDescription=RIProductDescription.Linux_UNIX, + ReservedInstancesOfferingId=long_uid(), + UsagePrice=0.0, + CurrencyCode=CurrencyCodeValues.USD, + InstanceTenancy=Tenancy.default, + Marketplace=True, + PricingDetails=[PricingDetail(Price=0.0, Count=3)], + RecurringCharges=[ + RecurringCharge(Amount=0.25, Frequency=RecurringChargeFrequency.Hourly) + ], + Scope=scope.Availability_Zone, + ) + ] + ) + + @handler("DescribeReservedInstances", expand=False) + def describe_reserved_instances( + self, + context: RequestContext, + describe_reserved_instances_request: DescribeReservedInstancesRequest, + ) -> DescribeReservedInstancesResult: + return DescribeReservedInstancesResult( + ReservedInstances=[ + ReservedInstances( + AvailabilityZone="eu-central-1a", + Duration=2628000, + End=datetime(2016, 6, 30, tzinfo=timezone.utc), + FixedPrice=0.0, + InstanceCount=2, + InstanceType=InstanceType.t2_small, + ProductDescription=RIProductDescription.Linux_UNIX, + ReservedInstancesId=long_uid(), + Start=datetime(2016, 1, 1, tzinfo=timezone.utc), + State=ReservedInstanceState.active, + UsagePrice=0.05, + CurrencyCode=CurrencyCodeValues.USD, + InstanceTenancy=Tenancy.default, + OfferingClass=OfferingClassType.standard, + OfferingType=OfferingTypeValues.Partial_Upfront, + RecurringCharges=[ + RecurringCharge(Amount=0.05, Frequency=RecurringChargeFrequency.Hourly) + ], + Scope=scope.Availability_Zone, + ) + ] + ) + + @handler("PurchaseReservedInstancesOffering", expand=False) + def purchase_reserved_instances_offering( + self, + context: RequestContext, + purchase_reserved_instances_offerings_request: PurchaseReservedInstancesOfferingRequest, + ) -> PurchaseReservedInstancesOfferingResult: + return PurchaseReservedInstancesOfferingResult( + ReservedInstancesId=long_uid(), + ) + + @handler("ModifyVpcEndpoint") + def modify_vpc_endpoint( + self, + context: RequestContext, + vpc_endpoint_id: VpcEndpointId, + dry_run: Boolean = None, + reset_policy: Boolean = None, + policy_document: String = None, + add_route_table_ids: VpcEndpointRouteTableIdList = None, + remove_route_table_ids: VpcEndpointRouteTableIdList = None, + add_subnet_ids: VpcEndpointSubnetIdList = None, + remove_subnet_ids: VpcEndpointSubnetIdList = None, + add_security_group_ids: VpcEndpointSecurityGroupIdList = None, + remove_security_group_ids: VpcEndpointSecurityGroupIdList = None, + ip_address_type: IpAddressType = None, + dns_options: DnsOptionsSpecification = None, + private_dns_enabled: Boolean = None, + subnet_configurations: SubnetConfigurationsList = None, + **kwargs, + ) -> ModifyVpcEndpointResult: + backend = get_ec2_backend(context.account_id, context.region) + + vpc_endpoint = backend.vpc_end_points.get(vpc_endpoint_id) + if not vpc_endpoint: + raise InvalidVpcEndPointIdError(vpc_endpoint_id) + + if policy_document is not None: + vpc_endpoint.policy_document = policy_document + + if add_route_table_ids is not None: + vpc_endpoint.route_table_ids.extend(add_route_table_ids) + + if remove_route_table_ids is not None: + vpc_endpoint.route_table_ids = [ + id_ for id_ in vpc_endpoint.route_table_ids if id_ not in remove_route_table_ids + ] + + if add_subnet_ids is not None: + vpc_endpoint.subnet_ids.extend(add_subnet_ids) + + if remove_subnet_ids is not None: + vpc_endpoint.subnet_ids = [ + id_ for id_ in vpc_endpoint.subnet_ids if id_ not in remove_subnet_ids + ] + + if private_dns_enabled is not None: + vpc_endpoint.private_dns_enabled = private_dns_enabled + + return ModifyVpcEndpointResult(Return=True) + + @handler("ModifySubnetAttribute", expand=False) + def modify_subnet_attribute( + self, context: RequestContext, request: ModifySubnetAttributeRequest + ) -> None: + try: + return call_moto(context) + except Exception as e: + if not isinstance(e, ResponseParserError) and "InvalidParameterValue" not in str(e): + raise + + backend = get_ec2_backend(context.account_id, context.region) + + # fix setting subnet attributes currently not supported upstream + subnet_id = request["SubnetId"] + host_type = request.get("PrivateDnsHostnameTypeOnLaunch") + a_record_on_launch = request.get("EnableResourceNameDnsARecordOnLaunch") + aaaa_record_on_launch = request.get("EnableResourceNameDnsAAAARecordOnLaunch") + enable_dns64 = request.get("EnableDns64") + + if host_type: + attr_name = camelcase_to_underscores("PrivateDnsNameOptionsOnLaunch") + value = {"HostnameType": host_type} + backend.modify_subnet_attribute(subnet_id, attr_name, value) + ## explicitly checking None value as this could contain a False value + if aaaa_record_on_launch is not None: + attr_name = camelcase_to_underscores("PrivateDnsNameOptionsOnLaunch") + value = {"EnableResourceNameDnsAAAARecord": aaaa_record_on_launch["Value"]} + backend.modify_subnet_attribute(subnet_id, attr_name, value) + if a_record_on_launch is not None: + attr_name = camelcase_to_underscores("PrivateDnsNameOptionsOnLaunch") + value = {"EnableResourceNameDnsARecord": a_record_on_launch["Value"]} + backend.modify_subnet_attribute(subnet_id, attr_name, value) + if enable_dns64 is not None: + attr_name = camelcase_to_underscores("EnableDns64") + backend.modify_subnet_attribute(subnet_id, attr_name, enable_dns64["Value"]) + + @handler("CreateSubnet", expand=False) + def create_subnet( + self, context: RequestContext, request: CreateSubnetRequest + ) -> CreateSubnetResult: + response = call_moto(context) + backend = get_ec2_backend(context.account_id, context.region) + subnet_id = response["Subnet"]["SubnetId"] + host_type = request.get("PrivateDnsHostnameTypeOnLaunch", "ip-name") + attr_name = camelcase_to_underscores("PrivateDnsNameOptionsOnLaunch") + value = {"HostnameType": host_type} + backend.modify_subnet_attribute(subnet_id, attr_name, value) + return response + + @handler("RevokeSecurityGroupEgress", expand=False) + def revoke_security_group_egress( + self, + context: RequestContext, + revoke_security_group_egress_request: RevokeSecurityGroupEgressRequest, + ) -> RevokeSecurityGroupEgressResult: + try: + return call_moto(context) + except Exception as e: + if "specified rule does not exist" in str(e): + backend = get_ec2_backend(context.account_id, context.region) + group_id = revoke_security_group_egress_request["GroupId"] + group = backend.get_security_group_by_name_or_id(group_id) + if group and not group.egress_rules: + return RevokeSecurityGroupEgressResult(Return=True) + raise + + @handler("DescribeSubnets", expand=False) + def describe_subnets( + self, + context: RequestContext, + request: DescribeSubnetsRequest, + ) -> DescribeSubnetsResult: + result = call_moto(context) + backend = get_ec2_backend(context.account_id, context.region) + # add additional/missing attributes in subnet responses + for subnet in result.get("Subnets", []): + subnet_obj = backend.subnets[subnet["AvailabilityZone"]].get(subnet["SubnetId"]) + for attr in ADDITIONAL_SUBNET_ATTRS: + if hasattr(subnet_obj, attr): + attr_name = first_char_to_upper(underscores_to_camelcase(attr)) + if attr_name not in subnet: + subnet[attr_name] = getattr(subnet_obj, attr) + return result + + @handler("CreateTransitGateway", expand=False) + def create_transit_gateway( + self, + context: RequestContext, + request: CreateTransitGatewayRequest, + ) -> CreateTransitGatewayResult: + result = call_moto(context) + backend = get_ec2_backend(context.account_id, context.region) + transit_gateway_id = result["TransitGateway"]["TransitGatewayId"] + transit_gateway = backend.transit_gateways.get(transit_gateway_id) + result.get("TransitGateway").get("Options").update(transit_gateway.options) + return result + + @handler("DescribeTransitGateways", expand=False) + def describe_transit_gateways( + self, + context: RequestContext, + request: DescribeTransitGatewaysRequest, + ) -> DescribeTransitGatewaysResult: + result = call_moto(context) + backend = get_ec2_backend(context.account_id, context.region) + for transit_gateway in result.get("TransitGateways", []): + transit_gateway_id = transit_gateway["TransitGatewayId"] + tgw = backend.transit_gateways.get(transit_gateway_id) + transit_gateway["Options"].update(tgw.options) + return result + + @handler("CreateLaunchTemplate", expand=False) + def create_launch_template( + self, + context: RequestContext, + request: CreateLaunchTemplateRequest, + ) -> CreateLaunchTemplateResult: + # parameter validation + if not request["LaunchTemplateData"]: + raise MissingParameterError(parameter="LaunchTemplateData") + + name = request["LaunchTemplateName"] + if len(name) < 3 or len(name) > 128 or not re.fullmatch(r"[a-zA-Z0-9.\-_()/]*", name): + raise InvalidLaunchTemplateNameError() + + return call_moto(context) + + @handler("ModifyLaunchTemplate", expand=False) + def modify_launch_template( + self, + context: RequestContext, + request: ModifyLaunchTemplateRequest, + ) -> ModifyLaunchTemplateResult: + backend = get_ec2_backend(context.account_id, context.region) + template_id = ( + request["LaunchTemplateId"] + or backend.launch_template_name_to_ids[request["LaunchTemplateName"]] + ) + template: MotoLaunchTemplate = backend.launch_templates[template_id] + + # check if defaultVersion exists + if request["DefaultVersion"]: + try: + template.versions[int(request["DefaultVersion"]) - 1] + except IndexError: + raise InvalidLaunchTemplateIdError() + + template.default_version_number = int(request["DefaultVersion"]) + + return ModifyLaunchTemplateResult( + LaunchTemplate=LaunchTemplate( + LaunchTemplateId=template.id, + LaunchTemplateName=template.name, + CreateTime=template.create_time, + DefaultVersionNumber=template.default_version_number, + LatestVersionNumber=template.latest_version_number, + Tags=template.tags, + ) + ) + + @handler("DescribeVpcEndpointServices", expand=False) + def describe_vpc_endpoint_services( + self, + context: RequestContext, + request: DescribeVpcEndpointServicesRequest, + ) -> DescribeVpcEndpointServicesResult: + ep_services = VPCBackend._collect_default_endpoint_services( + account_id=context.account_id, region=context.region + ) + + moto_backend = get_moto_backend(context) + service_names = [s["ServiceName"] for s in ep_services] + execute_api_name = f"com.amazonaws.{context.region}.execute-api" + + if execute_api_name not in service_names: + # ensure that the service entry for execute-api exists + zones = moto_backend.describe_availability_zones() + zones = [zone.name for zone in zones] + private_dns_name = f"*.execute-api.{context.region}.amazonaws.com" + service = { + "ServiceName": execute_api_name, + "ServiceId": f"vpce-svc-{short_uid()}", + "ServiceType": [{"ServiceType": "Interface"}], + "AvailabilityZones": zones, + "Owner": "amazon", + "BaseEndpointDnsNames": [f"execute-api.{context.region}.vpce.amazonaws.com"], + "PrivateDnsName": private_dns_name, + "PrivateDnsNames": [{"PrivateDnsName": private_dns_name}], + "VpcEndpointPolicySupported": True, + "AcceptanceRequired": False, + "ManagesVpcEndpoints": False, + "PrivateDnsNameVerificationState": "verified", + "SupportedIpAddressTypes": ["ipv4"], + } + ep_services.append(service) + + return call_moto(context) + + @handler("DescribeVpcEndpoints", expand=False) + def describe_vpc_endpoints( + self, + context: RequestContext, + request: DescribeVpcEndpointsRequest, + ) -> DescribeVpcEndpointsResult: + result: DescribeVpcEndpointsResult = call_moto(context) + + for endpoint in result.get("VpcEndpoints"): + endpoint.setdefault("DnsOptions", DnsOptions(DnsRecordIpType=DnsRecordIpType.ipv4)) + endpoint.setdefault("IpAddressType", IpAddressType.ipv4) + endpoint.setdefault("RequesterManaged", False) + endpoint.setdefault("RouteTableIds", []) + # AWS parity: Version should not be contained in the policy response + policy = endpoint.get("PolicyDocument") + if policy and '"Version":' in policy: + policy = json.loads(policy) + policy.pop("Version", None) + endpoint["PolicyDocument"] = json.dumps(policy) + + return result + + @handler("CreateFlowLogs", expand=False) + def create_flow_logs( + self, + context: RequestContext, + request: CreateFlowLogsRequest, + **kwargs, + ) -> CreateFlowLogsResult: + if request.get("LogDestination") and request.get("LogGroupName"): + raise CommonServiceException( + code="InvalidParameter", + message="Please only provide LogGroupName or only provide LogDestination.", + ) + if request.get("LogDestinationType") == "s3": + if request.get("LogGroupName"): + raise CommonServiceException( + code="InvalidParameter", + message="LogDestination type must be cloud-watch-logs if LogGroupName is provided.", + ) + elif not (bucket_arn := request.get("LogDestination")): + raise CommonServiceException( + code="InvalidParameter", + message="LogDestination can't be empty if LogGroupName is not provided.", + ) + + # Moto will check in memory whether the bucket exists in Moto itself + # we modify the request to not send a destination, so that the validation does not happen + # we can add the validation ourselves + service_request = copy.deepcopy(request) + service_request["LogDestinationType"] = "__placeholder__" + bucket_name = bucket_arn.split(":", 5)[5].split("/")[0] + # TODO: validate how IAM is enforced? probably with DeliverLogsPermissionArn + s3_client = connect_to().s3 + try: + s3_client.head_bucket(Bucket=bucket_name) + except Exception as e: + LOG.debug( + "An exception occurred when trying to create FlowLogs with S3 destination: %s", + e, + ) + return CreateFlowLogsResult( + FlowLogIds=[], + Unsuccessful=[ + UnsuccessfulItem( + Error=UnsuccessfulItemError( + Code="400", + Message=f"LogDestination: {bucket_name} does not exist", + ), + ResourceId=resource_id, + ) + for resource_id in request.get("ResourceIds", []) + ], + ) + + response: CreateFlowLogsResult = call_moto_with_request(context, service_request) + moto_backend = get_moto_backend(context) + for flow_log_id in response["FlowLogIds"]: + if flow_log := moto_backend.flow_logs.get(flow_log_id): + # just to be sure to not override another value, we only replace if it's the placeholder + flow_log.log_destination_type = flow_log.log_destination_type.replace( + "__placeholder__", "s3" + ) + else: + response = call_moto(context) + + return response + + @handler("GetSecurityGroupsForVpc", expand=False) + def get_security_groups_for_vpc( + self, + context: RequestContext, + get_security_groups_for_vpc_request: GetSecurityGroupsForVpcRequest, + ) -> GetSecurityGroupsForVpcResult: + vpc_id = get_security_groups_for_vpc_request.get("VpcId") + backend = get_ec2_backend(context.account_id, context.region) + filters = {"vpc-id": [vpc_id]} + filtered_sgs = backend.describe_security_groups(filters=filters) + + sgs = [ + SecurityGroupForVpc( + Description=sg.description, + GroupId=sg.id, + GroupName=sg.name, + OwnerId=context.account_id, + PrimaryVpcId=sg.vpc_id, + Tags=[{"Key": tag.get("key"), "Value": tag.get("value")} for tag in sg.get_tags()], + ) + for sg in filtered_sgs + ] + return GetSecurityGroupsForVpcResult(SecurityGroupForVpcs=sgs, NextToken=None) + + +@patch(SubnetBackend.modify_subnet_attribute) +def modify_subnet_attribute(fn, self, subnet_id, attr_name, attr_value): + subnet = self.get_subnet(subnet_id) + if attr_name in ADDITIONAL_SUBNET_ATTRS: + # private dns name options on launch contains dict with keys EnableResourceNameDnsARecord and EnableResourceNameDnsAAAARecord, HostnameType + if attr_name == "private_dns_name_options_on_launch": + if hasattr(subnet, attr_name): + getattr(subnet, attr_name).update(attr_value) + return + else: + setattr(subnet, attr_name, attr_value) + return + setattr(subnet, attr_name, attr_value) + return + return fn(self, subnet_id, attr_name, attr_value) + + +def get_moto_backend(context: RequestContext) -> EC2Backend: + """Get the moto EC2 backend for the given request context""" + return ec2_backends[context.account_id][context.region] + + +@patch(Subnet.get_filter_value) +def get_filter_value(fn, self, filter_name): + if filter_name in ( + "ipv6CidrBlockAssociationSet.associationId", + "ipv6-cidr-block-association.association-id", + ): + return self.ipv6_cidr_block_associations + return fn(self, filter_name) + + +@patch(TransitGatewayAttachmentBackend.delete_transit_gateway_vpc_attachment) +def delete_transit_gateway_vpc_attachment(fn, self, transit_gateway_attachment_id, **kwargs): + transit_gateway_attachment = self.transit_gateway_attachments.get(transit_gateway_attachment_id) + transit_gateway_attachment.state = "deleted" + return transit_gateway_attachment + + +@patch(FlowLogsBackend._validate_request) +def _validate_request( + fn, + self, + log_group_name: str, + log_destination: str, + log_destination_type: str, + max_aggregation_interval: str, + deliver_logs_permission_arn: str, +) -> None: + if not log_destination_type and log_destination: + # this is to fix the S3 destination issue, the validation will occur in the provider + return + + fn( + self, + log_group_name, + log_destination, + log_destination_type, + max_aggregation_interval, + deliver_logs_permission_arn, + ) diff --git a/localstack/services/stepfunctions/__init__.py b/localstack-core/localstack/services/ec2/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/__init__.py rename to localstack-core/localstack/services/ec2/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.py new file mode 100644 index 0000000000000..03665a7c45fb6 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.py @@ -0,0 +1,149 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2DHCPOptionsProperties(TypedDict): + DhcpOptionsId: Optional[str] + DomainName: Optional[str] + DomainNameServers: Optional[list[str]] + NetbiosNameServers: Optional[list[str]] + NetbiosNodeType: Optional[int] + NtpServers: Optional[list[str]] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2DHCPOptionsProvider(ResourceProvider[EC2DHCPOptionsProperties]): + TYPE = "AWS::EC2::DHCPOptions" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2DHCPOptionsProperties], + ) -> ProgressEvent[EC2DHCPOptionsProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DhcpOptionsId + + + + Create-only properties: + - /properties/NetbiosNameServers + - /properties/NetbiosNodeType + - /properties/NtpServers + - /properties/DomainName + - /properties/DomainNameServers + + Read-only properties: + - /properties/DhcpOptionsId + + IAM permissions required: + - ec2:CreateDhcpOptions + - ec2:DescribeDhcpOptions + - ec2:CreateTags + + """ + model = request.desired_state + + dhcp_configurations = [] + if model.get("DomainName"): + dhcp_configurations.append({"Key": "domain-name", "Values": [model["DomainName"]]}) + if model.get("DomainNameServers"): + dhcp_configurations.append( + {"Key": "domain-name-servers", "Values": model["DomainNameServers"]} + ) + if model.get("NetbiosNameServers"): + dhcp_configurations.append( + {"Key": "netbios-name-servers", "Values": model["NetbiosNameServers"]} + ) + if model.get("NetbiosNodeType"): + dhcp_configurations.append( + {"Key": "netbios-node-type", "Values": [str(model["NetbiosNodeType"])]} + ) + if model.get("NtpServers"): + dhcp_configurations.append({"Key": "ntp-servers", "Values": model["NtpServers"]}) + + create_params = { + "DhcpConfigurations": dhcp_configurations, + } + if model.get("Tags"): + tags = [{"Key": str(tag["Key"]), "Value": str(tag["Value"])} for tag in model["Tags"]] + else: + tags = [] + + default_tags = [ + {"Key": "aws:cloudformation:logical-id", "Value": request.logical_resource_id}, + {"Key": "aws:cloudformation:stack-id", "Value": request.stack_id}, + {"Key": "aws:cloudformation:stack-name", "Value": request.stack_name}, + ] + + create_params["TagSpecifications"] = [ + {"ResourceType": "dhcp-options", "Tags": (tags + default_tags)} + ] + + result = request.aws_client_factory.ec2.create_dhcp_options(**create_params) + model["DhcpOptionsId"] = result["DhcpOptions"]["DhcpOptionsId"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[EC2DHCPOptionsProperties], + ) -> ProgressEvent[EC2DHCPOptionsProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeDhcpOptions + - ec2:DescribeTags + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2DHCPOptionsProperties], + ) -> ProgressEvent[EC2DHCPOptionsProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteDhcpOptions + - ec2:DeleteTags + """ + model = request.desired_state + request.aws_client_factory.ec2.delete_dhcp_options(DhcpOptionsId=model["DhcpOptionsId"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[EC2DHCPOptionsProperties], + ) -> ProgressEvent[EC2DHCPOptionsProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:CreateTags + - ec2:DescribeDhcpOptions + - ec2:DeleteTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.schema.json new file mode 100644 index 0000000000000..93e8fd3d62171 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions.schema.json @@ -0,0 +1,120 @@ +{ + "typeName": "AWS::EC2::DHCPOptions", + "description": "Resource Type definition for AWS::EC2::DHCPOptions", + "additionalProperties": false, + "properties": { + "DhcpOptionsId": { + "type": "string" + }, + "DomainName": { + "type": "string", + "description": "This value is used to complete unqualified DNS hostnames." + }, + "DomainNameServers": { + "type": "array", + "description": "The IPv4 addresses of up to four domain name servers, or AmazonProvidedDNS.", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "NetbiosNameServers": { + "type": "array", + "description": "The IPv4 addresses of up to four NetBIOS name servers.", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "NetbiosNodeType": { + "type": "integer", + "description": "The NetBIOS node type (1, 2, 4, or 8)." + }, + "NtpServers": { + "type": "array", + "description": "The IPv4 addresses of up to four Network Time Protocol (NTP) servers.", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Tags": { + "type": "array", + "description": "Any tags assigned to the DHCP options set.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "minLength": 0, + "maxLength": 256 + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "taggable": true, + "createOnlyProperties": [ + "/properties/NetbiosNameServers", + "/properties/NetbiosNodeType", + "/properties/NtpServers", + "/properties/DomainName", + "/properties/DomainNameServers" + ], + "readOnlyProperties": [ + "/properties/DhcpOptionsId" + ], + "primaryIdentifier": [ + "/properties/DhcpOptionsId" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:CreateDhcpOptions", + "ec2:DescribeDhcpOptions", + "ec2:CreateTags" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeDhcpOptions", + "ec2:DescribeTags" + ] + }, + "update": { + "permissions": [ + "ec2:CreateTags", + "ec2:DescribeDhcpOptions", + "ec2:DeleteTags" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteDhcpOptions", + "ec2:DeleteTags" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeDhcpOptions" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions_plugin.py new file mode 100644 index 0000000000000..c3ac8bb5a5827 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_dhcpoptions_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2DHCPOptionsProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::DHCPOptions" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_dhcpoptions import ( + EC2DHCPOptionsProvider, + ) + + self.factory = EC2DHCPOptionsProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.py new file mode 100644 index 0000000000000..8c33cde7b2ab8 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.py @@ -0,0 +1,342 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import base64 +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import to_str + + +class EC2InstanceProperties(TypedDict): + AdditionalInfo: Optional[str] + Affinity: Optional[str] + AvailabilityZone: Optional[str] + BlockDeviceMappings: Optional[list[BlockDeviceMapping]] + CpuOptions: Optional[CpuOptions] + CreditSpecification: Optional[CreditSpecification] + DisableApiTermination: Optional[bool] + EbsOptimized: Optional[bool] + ElasticGpuSpecifications: Optional[list[ElasticGpuSpecification]] + ElasticInferenceAccelerators: Optional[list[ElasticInferenceAccelerator]] + EnclaveOptions: Optional[EnclaveOptions] + HibernationOptions: Optional[HibernationOptions] + HostId: Optional[str] + HostResourceGroupArn: Optional[str] + IamInstanceProfile: Optional[str] + Id: Optional[str] + ImageId: Optional[str] + InstanceInitiatedShutdownBehavior: Optional[str] + InstanceType: Optional[str] + Ipv6AddressCount: Optional[int] + Ipv6Addresses: Optional[list[InstanceIpv6Address]] + KernelId: Optional[str] + KeyName: Optional[str] + LaunchTemplate: Optional[LaunchTemplateSpecification] + LicenseSpecifications: Optional[list[LicenseSpecification]] + Monitoring: Optional[bool] + NetworkInterfaces: Optional[list[NetworkInterface]] + PlacementGroupName: Optional[str] + PrivateDnsName: Optional[str] + PrivateDnsNameOptions: Optional[PrivateDnsNameOptions] + PrivateIp: Optional[str] + PrivateIpAddress: Optional[str] + PropagateTagsToVolumeOnCreation: Optional[bool] + PublicDnsName: Optional[str] + PublicIp: Optional[str] + RamdiskId: Optional[str] + SecurityGroupIds: Optional[list[str]] + SecurityGroups: Optional[list[str]] + SourceDestCheck: Optional[bool] + SsmAssociations: Optional[list[SsmAssociation]] + SubnetId: Optional[str] + Tags: Optional[list[Tag]] + Tenancy: Optional[str] + UserData: Optional[str] + Volumes: Optional[list[Volume]] + + +class Ebs(TypedDict): + DeleteOnTermination: Optional[bool] + Encrypted: Optional[bool] + Iops: Optional[int] + KmsKeyId: Optional[str] + SnapshotId: Optional[str] + VolumeSize: Optional[int] + VolumeType: Optional[str] + + +class BlockDeviceMapping(TypedDict): + DeviceName: Optional[str] + Ebs: Optional[Ebs] + NoDevice: Optional[dict] + VirtualName: Optional[str] + + +class InstanceIpv6Address(TypedDict): + Ipv6Address: Optional[str] + + +class ElasticGpuSpecification(TypedDict): + Type: Optional[str] + + +class ElasticInferenceAccelerator(TypedDict): + Type: Optional[str] + Count: Optional[int] + + +class Volume(TypedDict): + Device: Optional[str] + VolumeId: Optional[str] + + +class LaunchTemplateSpecification(TypedDict): + Version: Optional[str] + LaunchTemplateId: Optional[str] + LaunchTemplateName: Optional[str] + + +class EnclaveOptions(TypedDict): + Enabled: Optional[bool] + + +class PrivateIpAddressSpecification(TypedDict): + Primary: Optional[bool] + PrivateIpAddress: Optional[str] + + +class NetworkInterface(TypedDict): + DeviceIndex: Optional[str] + AssociateCarrierIpAddress: Optional[bool] + AssociatePublicIpAddress: Optional[bool] + DeleteOnTermination: Optional[bool] + Description: Optional[str] + GroupSet: Optional[list[str]] + Ipv6AddressCount: Optional[int] + Ipv6Addresses: Optional[list[InstanceIpv6Address]] + NetworkInterfaceId: Optional[str] + PrivateIpAddress: Optional[str] + PrivateIpAddresses: Optional[list[PrivateIpAddressSpecification]] + SecondaryPrivateIpAddressCount: Optional[int] + SubnetId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class HibernationOptions(TypedDict): + Configured: Optional[bool] + + +class LicenseSpecification(TypedDict): + LicenseConfigurationArn: Optional[str] + + +class CpuOptions(TypedDict): + CoreCount: Optional[int] + ThreadsPerCore: Optional[int] + + +class PrivateDnsNameOptions(TypedDict): + EnableResourceNameDnsAAAARecord: Optional[bool] + EnableResourceNameDnsARecord: Optional[bool] + HostnameType: Optional[str] + + +class AssociationParameter(TypedDict): + Key: Optional[str] + Value: Optional[list[str]] + + +class SsmAssociation(TypedDict): + DocumentName: Optional[str] + AssociationParameters: Optional[list[AssociationParameter]] + + +class CreditSpecification(TypedDict): + CPUCredits: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2InstanceProvider(ResourceProvider[EC2InstanceProperties]): + TYPE = "AWS::EC2::Instance" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2InstanceProperties], + ) -> ProgressEvent[EC2InstanceProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + Create-only properties: + - /properties/ElasticGpuSpecifications + - /properties/Ipv6Addresses + - /properties/PlacementGroupName + - /properties/HostResourceGroupArn + - /properties/ImageId + - /properties/CpuOptions + - /properties/PrivateIpAddress + - /properties/ElasticInferenceAccelerators + - /properties/EnclaveOptions + - /properties/HibernationOptions + - /properties/KeyName + - /properties/LicenseSpecifications + - /properties/NetworkInterfaces + - /properties/AvailabilityZone + - /properties/SubnetId + - /properties/LaunchTemplate + - /properties/SecurityGroups + - /properties/Ipv6AddressCount + + Read-only properties: + - /properties/PublicIp + - /properties/Id + - /properties/PublicDnsName + - /properties/PrivateDnsName + - /properties/PrivateIp + + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO: validations + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: idempotency + params = util.select_attributes( + model, + ["InstanceType", "SecurityGroups", "KeyName", "ImageId", "MaxCount", "MinCount"], + ) + + # This Parameters are not defined in the schema but are required by the API + params["MaxCount"] = 1 + params["MinCount"] = 1 + + if model.get("UserData"): + params["UserData"] = to_str(base64.b64decode(model["UserData"])) + + response = ec2.run_instances(**params) + model["Id"] = response["Instances"][0]["InstanceId"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + response = ec2.describe_instances(InstanceIds=[model["Id"]]) + instance = response["Reservations"][0]["Instances"][0] + if instance["State"]["Name"] != "running": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + model["PrivateIp"] = instance["PrivateIpAddress"] + model["PrivateDnsName"] = instance["PrivateDnsName"] + model["AvailabilityZone"] = instance["Placement"]["AvailabilityZone"] + + # PublicIp is not guaranteed to be returned by the request: + # https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_ec2.Instance.html#instancepublicip + # it says it is supposed to return an empty string, but trying to add an output with the value will result in + # an error: `Attribute 'PublicIp' does not exist` + if public_ip := instance.get("PublicIpAddress"): + model["PublicIp"] = public_ip + + if public_dns_name := instance.get("PublicDnsName"): + model["PublicDnsName"] = public_dns_name + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2InstanceProperties], + ) -> ProgressEvent[EC2InstanceProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2InstanceProperties], + ) -> ProgressEvent[EC2InstanceProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + ec2.terminate_instances(InstanceIds=[model["Id"]]) + # TODO add checking of ec2 instance state + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2InstanceProperties], + ) -> ProgressEvent[EC2InstanceProperties]: + """ + Update a resource + + + """ + desired_state = request.desired_state + ec2 = request.aws_client_factory.ec2 + + groups = desired_state.get("SecurityGroups", desired_state.get("SecurityGroupIds")) + + kwargs = {} + if groups: + kwargs["Groups"] = groups + ec2.modify_instance_attribute( + InstanceId=desired_state["Id"], + InstanceType={"Value": desired_state["InstanceType"]}, + **kwargs, + ) + + response = ec2.describe_instances(InstanceIds=[desired_state["Id"]]) + instance = response["Reservations"][0]["Instances"][0] + if instance["State"]["Name"] != "running": + return ProgressEvent( + status=OperationStatus.PENDING, + resource_model=desired_state, + custom_context=request.custom_context, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=desired_state, + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.schema.json new file mode 100644 index 0000000000000..85ff4e3fd9d10 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance.schema.json @@ -0,0 +1,540 @@ +{ + "typeName": "AWS::EC2::Instance", + "description": "Resource Type definition for AWS::EC2::Instance", + "additionalProperties": false, + "properties": { + "Tenancy": { + "type": "string" + }, + "SecurityGroups": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "PrivateDnsName": { + "type": "string" + }, + "PrivateIpAddress": { + "type": "string" + }, + "UserData": { + "type": "string" + }, + "BlockDeviceMappings": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/BlockDeviceMapping" + } + }, + "IamInstanceProfile": { + "type": "string" + }, + "Ipv6Addresses": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/InstanceIpv6Address" + } + }, + "KernelId": { + "type": "string" + }, + "SubnetId": { + "type": "string" + }, + "EbsOptimized": { + "type": "boolean" + }, + "PropagateTagsToVolumeOnCreation": { + "type": "boolean" + }, + "ElasticGpuSpecifications": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/ElasticGpuSpecification" + } + }, + "ElasticInferenceAccelerators": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/ElasticInferenceAccelerator" + } + }, + "Volumes": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Volume" + } + }, + "PrivateIp": { + "type": "string" + }, + "Ipv6AddressCount": { + "type": "integer" + }, + "LaunchTemplate": { + "$ref": "#/definitions/LaunchTemplateSpecification" + }, + "EnclaveOptions": { + "$ref": "#/definitions/EnclaveOptions" + }, + "NetworkInterfaces": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/NetworkInterface" + } + }, + "ImageId": { + "type": "string" + }, + "InstanceType": { + "type": "string" + }, + "Monitoring": { + "type": "boolean" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "AdditionalInfo": { + "type": "string" + }, + "HibernationOptions": { + "$ref": "#/definitions/HibernationOptions" + }, + "LicenseSpecifications": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/LicenseSpecification" + } + }, + "PublicIp": { + "type": "string" + }, + "InstanceInitiatedShutdownBehavior": { + "type": "string" + }, + "CpuOptions": { + "$ref": "#/definitions/CpuOptions" + }, + "AvailabilityZone": { + "type": "string" + }, + "PrivateDnsNameOptions": { + "$ref": "#/definitions/PrivateDnsNameOptions" + }, + "HostId": { + "type": "string" + }, + "HostResourceGroupArn": { + "type": "string" + }, + "PublicDnsName": { + "type": "string" + }, + "SecurityGroupIds": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "DisableApiTermination": { + "type": "boolean" + }, + "KeyName": { + "type": "string" + }, + "RamdiskId": { + "type": "string" + }, + "SourceDestCheck": { + "type": "boolean" + }, + "PlacementGroupName": { + "type": "string" + }, + "SsmAssociations": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/SsmAssociation" + } + }, + "Affinity": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "CreditSpecification": { + "$ref": "#/definitions/CreditSpecification" + } + }, + "definitions": { + "LaunchTemplateSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "LaunchTemplateName": { + "type": "string" + }, + "LaunchTemplateId": { + "type": "string" + }, + "Version": { + "type": "string" + } + }, + "required": [ + "Version" + ] + }, + "HibernationOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Configured": { + "type": "boolean" + } + } + }, + "LicenseSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "LicenseConfigurationArn": { + "type": "string" + } + }, + "required": [ + "LicenseConfigurationArn" + ] + }, + "CpuOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "ThreadsPerCore": { + "type": "integer" + }, + "CoreCount": { + "type": "integer" + } + } + }, + "NoDevice": { + "type": "object", + "additionalProperties": false + }, + "InstanceIpv6Address": { + "type": "object", + "additionalProperties": false, + "properties": { + "Ipv6Address": { + "type": "string" + } + }, + "required": [ + "Ipv6Address" + ] + }, + "NetworkInterface": { + "type": "object", + "additionalProperties": false, + "properties": { + "Description": { + "type": "string" + }, + "PrivateIpAddress": { + "type": "string" + }, + "PrivateIpAddresses": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/PrivateIpAddressSpecification" + } + }, + "SecondaryPrivateIpAddressCount": { + "type": "integer" + }, + "DeviceIndex": { + "type": "string" + }, + "GroupSet": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Ipv6Addresses": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/InstanceIpv6Address" + } + }, + "SubnetId": { + "type": "string" + }, + "AssociatePublicIpAddress": { + "type": "boolean" + }, + "NetworkInterfaceId": { + "type": "string" + }, + "AssociateCarrierIpAddress": { + "type": "boolean" + }, + "Ipv6AddressCount": { + "type": "integer" + }, + "DeleteOnTermination": { + "type": "boolean" + } + }, + "required": [ + "DeviceIndex" + ] + }, + "PrivateDnsNameOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "HostnameType": { + "type": "string" + }, + "EnableResourceNameDnsAAAARecord": { + "type": "boolean" + }, + "EnableResourceNameDnsARecord": { + "type": "boolean" + } + } + }, + "ElasticGpuSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "Type": { + "type": "string" + } + }, + "required": [ + "Type" + ] + }, + "ElasticInferenceAccelerator": { + "type": "object", + "additionalProperties": false, + "properties": { + "Type": { + "type": "string" + }, + "Count": { + "type": "integer" + } + }, + "required": [ + "Type" + ] + }, + "SsmAssociation": { + "type": "object", + "additionalProperties": false, + "properties": { + "AssociationParameters": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/AssociationParameter" + } + }, + "DocumentName": { + "type": "string" + } + }, + "required": [ + "DocumentName" + ] + }, + "AssociationParameter": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "PrivateIpAddressSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "PrivateIpAddress": { + "type": "string" + }, + "Primary": { + "type": "boolean" + } + }, + "required": [ + "PrivateIpAddress", + "Primary" + ] + }, + "Volume": { + "type": "object", + "additionalProperties": false, + "properties": { + "VolumeId": { + "type": "string" + }, + "Device": { + "type": "string" + } + }, + "required": [ + "VolumeId", + "Device" + ] + }, + "EnclaveOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + } + }, + "Ebs": { + "type": "object", + "additionalProperties": false, + "properties": { + "SnapshotId": { + "type": "string" + }, + "VolumeType": { + "type": "string" + }, + "KmsKeyId": { + "type": "string" + }, + "Encrypted": { + "type": "boolean" + }, + "Iops": { + "type": "integer" + }, + "VolumeSize": { + "type": "integer" + }, + "DeleteOnTermination": { + "type": "boolean" + } + } + }, + "BlockDeviceMapping": { + "type": "object", + "additionalProperties": false, + "properties": { + "NoDevice": { + "$ref": "#/definitions/NoDevice" + }, + "VirtualName": { + "type": "string" + }, + "Ebs": { + "$ref": "#/definitions/Ebs" + }, + "DeviceName": { + "type": "string" + } + }, + "required": [ + "DeviceName" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "CreditSpecification": { + "type": "object", + "additionalProperties": false, + "properties": { + "CPUCredits": { + "type": "string" + } + } + } + }, + "createOnlyProperties": [ + "/properties/ElasticGpuSpecifications", + "/properties/Ipv6Addresses", + "/properties/PlacementGroupName", + "/properties/HostResourceGroupArn", + "/properties/ImageId", + "/properties/CpuOptions", + "/properties/PrivateIpAddress", + "/properties/ElasticInferenceAccelerators", + "/properties/EnclaveOptions", + "/properties/HibernationOptions", + "/properties/KeyName", + "/properties/LicenseSpecifications", + "/properties/NetworkInterfaces", + "/properties/AvailabilityZone", + "/properties/SubnetId", + "/properties/LaunchTemplate", + "/properties/SecurityGroups", + "/properties/Ipv6AddressCount" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/PublicIp", + "/properties/Id", + "/properties/PublicDnsName", + "/properties/PrivateDnsName", + "/properties/PrivateIp" + ] +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance_plugin.py new file mode 100644 index 0000000000000..60f400297a47f --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_instance_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2InstanceProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::Instance" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_instance import EC2InstanceProvider + + self.factory = EC2InstanceProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.py new file mode 100644 index 0000000000000..1ad0d6981b9c0 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.py @@ -0,0 +1,116 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2InternetGatewayProperties(TypedDict): + InternetGatewayId: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2InternetGatewayProvider(ResourceProvider[EC2InternetGatewayProperties]): + TYPE = "AWS::EC2::InternetGateway" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2InternetGatewayProperties], + ) -> ProgressEvent[EC2InternetGatewayProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/InternetGatewayId + + Read-only properties: + - /properties/InternetGatewayId + + IAM permissions required: + - ec2:CreateInternetGateway + - ec2:CreateTags + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + tags = [{"ResourceType": "'internet-gateway'", "Tags": model.get("Tags", [])}] + + response = ec2.create_internet_gateway(TagSpecifications=tags) + model["InternetGatewayId"] = response["InternetGateway"]["InternetGatewayId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2InternetGatewayProperties], + ) -> ProgressEvent[EC2InternetGatewayProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeInternetGateways + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2InternetGatewayProperties], + ) -> ProgressEvent[EC2InternetGatewayProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteInternetGateway + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + # detach it first before deleting it + response = ec2.describe_internet_gateways(InternetGatewayIds=[model["InternetGatewayId"]]) + + for gateway in response.get("InternetGateways", []): + for attachment in gateway.get("Attachments", []): + ec2.detach_internet_gateway( + InternetGatewayId=model["InternetGatewayId"], VpcId=attachment["VpcId"] + ) + ec2.delete_internet_gateway(InternetGatewayId=model["InternetGatewayId"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2InternetGatewayProperties], + ) -> ProgressEvent[EC2InternetGatewayProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:DeleteTags + - ec2:CreateTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.schema.json new file mode 100644 index 0000000000000..62fd843a46c3f --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway.schema.json @@ -0,0 +1,78 @@ +{ + "typeName": "AWS::EC2::InternetGateway", + "description": "Resource Type definition for AWS::EC2::InternetGateway", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "additionalProperties": false, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "maxLength": 256 + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "properties": { + "InternetGatewayId": { + "description": "ID of internet gateway.", + "type": "string" + }, + "Tags": { + "description": "Any tags to assign to the internet gateway.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "taggable": true, + "readOnlyProperties": [ + "/properties/InternetGatewayId" + ], + "primaryIdentifier": [ + "/properties/InternetGatewayId" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:CreateInternetGateway", + "ec2:CreateTags" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeInternetGateways" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteInternetGateway" + ] + }, + "update": { + "permissions": [ + "ec2:DeleteTags", + "ec2:CreateTags" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeInternetGateways" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway_plugin.py new file mode 100644 index 0000000000000..51c889fae01a0 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_internetgateway_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2InternetGatewayProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::InternetGateway" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_internetgateway import ( + EC2InternetGatewayProvider, + ) + + self.factory = EC2InternetGatewayProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.py new file mode 100644 index 0000000000000..8c03d6bc738b5 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.py @@ -0,0 +1,148 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2KeyPairProperties(TypedDict): + KeyName: Optional[str] + KeyFingerprint: Optional[str] + KeyFormat: Optional[str] + KeyPairId: Optional[str] + KeyType: Optional[str] + PublicKeyMaterial: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2KeyPairProvider(ResourceProvider[EC2KeyPairProperties]): + TYPE = "AWS::EC2::KeyPair" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2KeyPairProperties], + ) -> ProgressEvent[EC2KeyPairProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/KeyName + + Required properties: + - KeyName + + Create-only properties: + - /properties/KeyName + - /properties/KeyType + - /properties/KeyFormat + - /properties/PublicKeyMaterial + - /properties/Tags + + Read-only properties: + - /properties/KeyPairId + - /properties/KeyFingerprint + + IAM permissions required: + - ec2:CreateKeyPair + - ec2:ImportKeyPair + - ec2:CreateTags + - ssm:PutParameter + + """ + model = request.desired_state + + if "KeyName" not in model: + raise ValueError("Property 'KeyName' is required") + + if public_key_material := model.get("PublicKeyMaterial"): + response = request.aws_client_factory.ec2.import_key_pair( + KeyName=model["KeyName"], + PublicKeyMaterial=public_key_material, + ) + else: + create_params = util.select_attributes( + model, ["KeyName", "KeyType", "KeyFormat", "Tags"] + ) + response = request.aws_client_factory.ec2.create_key_pair(**create_params) + + model["KeyPairId"] = response["KeyPairId"] + model["KeyFingerprint"] = response["KeyFingerprint"] + + request.aws_client_factory.ssm.put_parameter( + Name=f"/ec2/keypair/{model['KeyPairId']}", + Value=model["KeyName"], + Type="String", + Overwrite=True, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[EC2KeyPairProperties], + ) -> ProgressEvent[EC2KeyPairProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeKeyPairs + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2KeyPairProperties], + ) -> ProgressEvent[EC2KeyPairProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteKeyPair + - ssm:DeleteParameter + - ec2:DescribeKeyPairs + """ + + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + ec2.delete_key_pair(KeyName=model["KeyName"]) + + request.aws_client_factory.ssm.delete_parameter( + Name=f"/ec2/keypair/{model['KeyPairId']}", + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2KeyPairProperties], + ) -> ProgressEvent[EC2KeyPairProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.schema.json new file mode 100644 index 0000000000000..d5b65ffc19a74 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair.schema.json @@ -0,0 +1,133 @@ +{ + "typeName": "AWS::EC2::KeyPair", + "description": "The AWS::EC2::KeyPair creates an SSH key pair", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 0, + "maxLength": 256 + } + }, + "required": [ + "Key", + "Value" + ], + "additionalProperties": false + } + }, + "properties": { + "KeyName": { + "description": "The name of the SSH key pair", + "type": "string" + }, + "KeyType": { + "description": "The crypto-system used to generate a key pair.", + "type": "string", + "default": "rsa", + "enum": [ + "rsa", + "ed25519" + ] + }, + "KeyFormat": { + "description": "The format of the private key", + "type": "string", + "default": "pem", + "enum": [ + "pem", + "ppk" + ] + }, + "PublicKeyMaterial": { + "description": "Plain text public key to import", + "type": "string" + }, + "KeyFingerprint": { + "description": "A short sequence of bytes used for public key verification", + "type": "string" + }, + "KeyPairId": { + "description": "An AWS generated ID for the key pair", + "type": "string" + }, + "Tags": { + "description": "An array of key-value pairs to apply to this resource.", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "additionalProperties": false, + "required": [ + "KeyName" + ], + "primaryIdentifier": [ + "/properties/KeyName" + ], + "additionalIdentifiers": [ + [ + "/properties/KeyPairId" + ] + ], + "createOnlyProperties": [ + "/properties/KeyName", + "/properties/KeyType", + "/properties/KeyFormat", + "/properties/PublicKeyMaterial", + "/properties/Tags" + ], + "writeOnlyProperties": [ + "/properties/KeyFormat" + ], + "readOnlyProperties": [ + "/properties/KeyPairId", + "/properties/KeyFingerprint" + ], + "tagging": { + "taggable": true, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateKeyPair", + "ec2:ImportKeyPair", + "ec2:CreateTags", + "ssm:PutParameter" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeKeyPairs" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeKeyPairs" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteKeyPair", + "ssm:DeleteParameter", + "ec2:DescribeKeyPairs" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair_plugin.py new file mode 100644 index 0000000000000..5bb9524b1f667 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_keypair_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2KeyPairProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::KeyPair" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_keypair import EC2KeyPairProvider + + self.factory = EC2KeyPairProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.py new file mode 100644 index 0000000000000..de03079d89699 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.py @@ -0,0 +1,183 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2NatGatewayProperties(TypedDict): + SubnetId: Optional[str] + AllocationId: Optional[str] + ConnectivityType: Optional[str] + MaxDrainDurationSeconds: Optional[int] + NatGatewayId: Optional[str] + PrivateIpAddress: Optional[str] + SecondaryAllocationIds: Optional[list[str]] + SecondaryPrivateIpAddressCount: Optional[int] + SecondaryPrivateIpAddresses: Optional[list[str]] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2NatGatewayProvider(ResourceProvider[EC2NatGatewayProperties]): + TYPE = "AWS::EC2::NatGateway" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2NatGatewayProperties], + ) -> ProgressEvent[EC2NatGatewayProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/NatGatewayId + + Required properties: + - SubnetId + + Create-only properties: + - /properties/SubnetId + - /properties/ConnectivityType + - /properties/AllocationId + - /properties/PrivateIpAddress + + Read-only properties: + - /properties/NatGatewayId + + IAM permissions required: + - ec2:CreateNatGateway + - ec2:DescribeNatGateways + - ec2:CreateTags + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + # TODO: validations + # TODO add tests for this resource at the moment, it's not covered + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: defaults + # TODO: idempotency + params = util.select_attributes( + model, + ["SubnetId", "AllocationId"], + ) + + if model.get("Tags"): + tags = [{"ResourceType": "natgateway", "Tags": model.get("Tags")}] + params["TagSpecifications"] = tags + + response = ec2.create_nat_gateway( + SubnetId=model["SubnetId"], AllocationId=model["AllocationId"] + ) + model["NatGatewayId"] = response["NatGateway"]["NatGatewayId"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + response = ec2.describe_nat_gateways(NatGatewayIds=[model["NatGatewayId"]]) + if response["NatGateways"][0]["State"] == "pending": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + # TODO add handling for failed events + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2NatGatewayProperties], + ) -> ProgressEvent[EC2NatGatewayProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeNatGateways + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2NatGatewayProperties], + ) -> ProgressEvent[EC2NatGatewayProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteNatGateway + - ec2:DescribeNatGateways + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + ec2.delete_nat_gateway(NatGatewayId=model["NatGatewayId"]) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + is_deleting = False + try: + response = ec2.describe_nat_gateways(NatGatewayIds=[model["NatGatewayId"]]) + is_deleting = response["NatGateways"][0]["State"] == "deleting" + except ec2.exceptions.ClientError: + pass + + if is_deleting: + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2NatGatewayProperties], + ) -> ProgressEvent[EC2NatGatewayProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:DescribeNatGateways + - ec2:CreateTags + - ec2:DeleteTags + - ec2:AssociateNatGatewayAddress + - ec2:DisassociateNatGatewayAddress + - ec2:AssignPrivateNatGatewayAddress + - ec2:UnassignPrivateNatGatewayAddress + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.schema.json new file mode 100644 index 0000000000000..99f268a2dfc29 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway.schema.json @@ -0,0 +1,131 @@ +{ + "typeName": "AWS::EC2::NatGateway", + "description": "Resource Type definition for AWS::EC2::NatGateway", + "additionalProperties": false, + "properties": { + "SubnetId": { + "type": "string" + }, + "NatGatewayId": { + "type": "string" + }, + "ConnectivityType": { + "type": "string" + }, + "PrivateIpAddress": { + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "AllocationId": { + "type": "string" + }, + "SecondaryAllocationIds": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string" + } + }, + "SecondaryPrivateIpAddresses": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string" + } + }, + "SecondaryPrivateIpAddressCount": { + "type": "integer", + "minimum": 1 + }, + "MaxDrainDurationSeconds": { + "type": "integer" + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "SubnetId" + ], + "createOnlyProperties": [ + "/properties/SubnetId", + "/properties/ConnectivityType", + "/properties/AllocationId", + "/properties/PrivateIpAddress" + ], + "primaryIdentifier": [ + "/properties/NatGatewayId" + ], + "readOnlyProperties": [ + "/properties/NatGatewayId" + ], + "writeOnlyProperties": [ + "/properties/MaxDrainDurationSeconds" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true + }, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateNatGateway", + "ec2:DescribeNatGateways", + "ec2:CreateTags" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteNatGateway", + "ec2:DescribeNatGateways" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeNatGateways" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeNatGateways" + ] + }, + "update": { + "permissions": [ + "ec2:DescribeNatGateways", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:AssociateNatGatewayAddress", + "ec2:DisassociateNatGatewayAddress", + "ec2:AssignPrivateNatGatewayAddress", + "ec2:UnassignPrivateNatGatewayAddress" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway_plugin.py new file mode 100644 index 0000000000000..e8036702f5e79 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_natgateway_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2NatGatewayProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::NatGateway" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_natgateway import ( + EC2NatGatewayProvider, + ) + + self.factory = EC2NatGatewayProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.py new file mode 100644 index 0000000000000..47d36951d0068 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.py @@ -0,0 +1,116 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2NetworkAclProperties(TypedDict): + VpcId: Optional[str] + Id: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2NetworkAclProvider(ResourceProvider[EC2NetworkAclProperties]): + TYPE = "AWS::EC2::NetworkAcl" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2NetworkAclProperties], + ) -> ProgressEvent[EC2NetworkAclProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - VpcId + + Create-only properties: + - /properties/VpcId + + Read-only properties: + - /properties/Id + + IAM permissions required: + - ec2:CreateNetworkAcl + - ec2:DescribeNetworkAcls + + """ + model = request.desired_state + + create_params = { + "VpcId": model["VpcId"], + } + + if model.get("Tags"): + create_params["TagSpecifications"] = [ + { + "ResourceType": "network-acl", + "Tags": [{"Key": tag["Key"], "Value": tag["Value"]} for tag in model["Tags"]], + } + ] + + response = request.aws_client_factory.ec2.create_network_acl(**create_params) + model["Id"] = response["NetworkAcl"]["NetworkAclId"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[EC2NetworkAclProperties], + ) -> ProgressEvent[EC2NetworkAclProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeNetworkAcls + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2NetworkAclProperties], + ) -> ProgressEvent[EC2NetworkAclProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteNetworkAcl + - ec2:DescribeNetworkAcls + """ + model = request.desired_state + request.aws_client_factory.ec2.delete_network_acl(NetworkAclId=model["Id"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[EC2NetworkAclProperties], + ) -> ProgressEvent[EC2NetworkAclProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:DescribeNetworkAcls + - ec2:DeleteTags + - ec2:CreateTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.schema.json new file mode 100644 index 0000000000000..52bdc7cdca1ca --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl.schema.json @@ -0,0 +1,92 @@ +{ + "typeName": "AWS::EC2::NetworkAcl", + "description": "Resource Type definition for AWS::EC2::NetworkAcl", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-ec2.git", + "additionalProperties": false, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "properties": { + "Id": { + "type": "string" + }, + "Tags": { + "description": "The tags to assign to the network ACL.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "VpcId": { + "description": "The ID of the VPC.", + "type": "string" + } + }, + "required": [ + "VpcId" + ], + "createOnlyProperties": [ + "/properties/VpcId" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateNetworkAcl", + "ec2:DescribeNetworkAcls" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeNetworkAcls" + ] + }, + "update": { + "permissions": [ + "ec2:DescribeNetworkAcls", + "ec2:DeleteTags", + "ec2:CreateTags" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteNetworkAcl", + "ec2:DescribeNetworkAcls" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeNetworkAcls" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl_plugin.py new file mode 100644 index 0000000000000..0f24a9cd40adc --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_networkacl_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2NetworkAclProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::NetworkAcl" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_networkacl import ( + EC2NetworkAclProvider, + ) + + self.factory = EC2NetworkAclProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.py new file mode 100644 index 0000000000000..8308fb5bfa990 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.py @@ -0,0 +1,167 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2PrefixListProperties(TypedDict): + AddressFamily: Optional[str] + MaxEntries: Optional[int] + PrefixListName: Optional[str] + Arn: Optional[str] + Entries: Optional[list[Entry]] + OwnerId: Optional[str] + PrefixListId: Optional[str] + Tags: Optional[list[Tag]] + Version: Optional[int] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class Entry(TypedDict): + Cidr: Optional[str] + Description: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2PrefixListProvider(ResourceProvider[EC2PrefixListProperties]): + TYPE = "AWS::EC2::PrefixList" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2PrefixListProperties], + ) -> ProgressEvent[EC2PrefixListProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/PrefixListId + + Required properties: + - PrefixListName + - MaxEntries + - AddressFamily + + + + Read-only properties: + - /properties/PrefixListId + - /properties/OwnerId + - /properties/Version + - /properties/Arn + + IAM permissions required: + - EC2:CreateManagedPrefixList + - EC2:DescribeManagedPrefixLists + - EC2:CreateTags + + """ + model = request.desired_state + + if not request.custom_context.get(REPEATED_INVOCATION): + create_params = util.select_attributes( + model, ["PrefixListName", "Entries", "MaxEntries", "AddressFamily", "Tags"] + ) + + if "Tags" in create_params: + create_params["TagSpecifications"] = [ + {"ResourceType": "prefix-list", "Tags": create_params.pop("Tags")} + ] + + response = request.aws_client_factory.ec2.create_managed_prefix_list(**create_params) + model["Arn"] = response["PrefixList"]["PrefixListId"] + model["OwnerId"] = response["PrefixList"]["OwnerId"] + model["PrefixListId"] = response["PrefixList"]["PrefixListId"] + model["Version"] = response["PrefixList"]["Version"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + response = request.aws_client_factory.ec2.describe_managed_prefix_lists( + PrefixListIds=[model["PrefixListId"]] + ) + if not response["PrefixLists"]: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message="Resource not found after creation", + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2PrefixListProperties], + ) -> ProgressEvent[EC2PrefixListProperties]: + """ + Fetch resource information + + IAM permissions required: + - EC2:GetManagedPrefixListEntries + - EC2:DescribeManagedPrefixLists + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2PrefixListProperties], + ) -> ProgressEvent[EC2PrefixListProperties]: + """ + Delete a resource + + IAM permissions required: + - EC2:DeleteManagedPrefixList + - EC2:DescribeManagedPrefixLists + """ + + model = request.previous_state + response = request.aws_client_factory.ec2.describe_managed_prefix_lists( + PrefixListIds=[model["PrefixListId"]] + ) + + if not response["PrefixLists"]: + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + request.aws_client_factory.ec2.delete_managed_prefix_list( + PrefixListId=request.previous_state["PrefixListId"] + ) + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + def update( + self, + request: ResourceRequest[EC2PrefixListProperties], + ) -> ProgressEvent[EC2PrefixListProperties]: + """ + Update a resource + + IAM permissions required: + - EC2:DescribeManagedPrefixLists + - EC2:GetManagedPrefixListEntries + - EC2:ModifyManagedPrefixList + - EC2:CreateTags + - EC2:DeleteTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.schema.json new file mode 100644 index 0000000000000..cb27aefee2bd3 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist.schema.json @@ -0,0 +1,152 @@ +{ + "typeName": "AWS::EC2::PrefixList", + "description": "Resource schema of AWS::EC2::PrefixList Type", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "Tag": { + "type": "object", + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "maxLength": 256 + } + }, + "required": [ + "Key" + ], + "additionalProperties": false + }, + "Entry": { + "type": "object", + "properties": { + "Cidr": { + "type": "string", + "minLength": 1, + "maxLength": 46 + }, + "Description": { + "type": "string", + "minLength": 0, + "maxLength": 255 + } + }, + "required": [ + "Cidr" + ], + "additionalProperties": false + } + }, + "properties": { + "PrefixListName": { + "description": "Name of Prefix List.", + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "PrefixListId": { + "description": "Id of Prefix List.", + "type": "string" + }, + "OwnerId": { + "description": "Owner Id of Prefix List.", + "type": "string" + }, + "AddressFamily": { + "description": "Ip Version of Prefix List.", + "type": "string", + "enum": [ + "IPv4", + "IPv6" + ] + }, + "MaxEntries": { + "description": "Max Entries of Prefix List.", + "type": "integer", + "minimum": 1 + }, + "Version": { + "description": "Version of Prefix List.", + "type": "integer" + }, + "Tags": { + "description": "Tags for Prefix List", + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Entries": { + "description": "Entries of Prefix List.", + "type": "array", + "items": { + "$ref": "#/definitions/Entry" + } + }, + "Arn": { + "description": "The Amazon Resource Name (ARN) of the Prefix List.", + "type": "string" + } + }, + "required": [ + "PrefixListName", + "MaxEntries", + "AddressFamily" + ], + "readOnlyProperties": [ + "/properties/PrefixListId", + "/properties/OwnerId", + "/properties/Version", + "/properties/Arn" + ], + "primaryIdentifier": [ + "/properties/PrefixListId" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true + }, + "handlers": { + "create": { + "permissions": [ + "EC2:CreateManagedPrefixList", + "EC2:DescribeManagedPrefixLists", + "EC2:CreateTags" + ] + }, + "read": { + "permissions": [ + "EC2:GetManagedPrefixListEntries", + "EC2:DescribeManagedPrefixLists" + ] + }, + "update": { + "permissions": [ + "EC2:DescribeManagedPrefixLists", + "EC2:GetManagedPrefixListEntries", + "EC2:ModifyManagedPrefixList", + "EC2:CreateTags", + "EC2:DeleteTags" + ] + }, + "delete": { + "permissions": [ + "EC2:DeleteManagedPrefixList", + "EC2:DescribeManagedPrefixLists" + ] + }, + "list": { + "permissions": [ + "EC2:DescribeManagedPrefixLists", + "EC2:GetManagedPrefixListEntries" + ] + } + }, + "additionalProperties": false +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist_plugin.py new file mode 100644 index 0000000000000..5d8b993d28409 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_prefixlist_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2PrefixListProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::PrefixList" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_prefixlist import ( + EC2PrefixListProvider, + ) + + self.factory = EC2PrefixListProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.py new file mode 100644 index 0000000000000..c779541d04229 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.py @@ -0,0 +1,137 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +from moto.ec2.utils import generate_route_id + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2RouteProperties(TypedDict): + RouteTableId: Optional[str] + CarrierGatewayId: Optional[str] + DestinationCidrBlock: Optional[str] + DestinationIpv6CidrBlock: Optional[str] + EgressOnlyInternetGatewayId: Optional[str] + GatewayId: Optional[str] + Id: Optional[str] + InstanceId: Optional[str] + LocalGatewayId: Optional[str] + NatGatewayId: Optional[str] + NetworkInterfaceId: Optional[str] + TransitGatewayId: Optional[str] + VpcEndpointId: Optional[str] + VpcPeeringConnectionId: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2RouteProvider(ResourceProvider[EC2RouteProperties]): + TYPE = "AWS::EC2::Route" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2RouteProperties], + ) -> ProgressEvent[EC2RouteProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - RouteTableId + + Create-only properties: + - /properties/RouteTableId + - /properties/DestinationCidrBlock + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + cidr_block = model.get("DestinationCidrBlock") + ipv6_cidr_block = model.get("DestinationIpv6CidrBlock", "") + + ec2.create_route( + DestinationCidrBlock=cidr_block, + DestinationIpv6CidrBlock=ipv6_cidr_block, + RouteTableId=model["RouteTableId"], + ) + model["Id"] = generate_route_id( + model["RouteTableId"], + cidr_block, + ipv6_cidr_block, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2RouteProperties], + ) -> ProgressEvent[EC2RouteProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2RouteProperties], + ) -> ProgressEvent[EC2RouteProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + cidr_block = model.get("DestinationCidrBlock") + ipv6_cidr_block = model.get("DestinationIpv6CidrBlock", "") + + try: + ec2.delete_route( + DestinationCidrBlock=cidr_block, + DestinationIpv6CidrBlock=ipv6_cidr_block, + RouteTableId=model["RouteTableId"], + ) + except ec2.exceptions.ClientError: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2RouteProperties], + ) -> ProgressEvent[EC2RouteProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.schema.json new file mode 100644 index 0000000000000..151c2d115972e --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route.schema.json @@ -0,0 +1,62 @@ +{ + "typeName": "AWS::EC2::Route", + "description": "Resource Type definition for AWS::EC2::Route", + "additionalProperties": false, + "properties": { + "DestinationIpv6CidrBlock": { + "type": "string" + }, + "RouteTableId": { + "type": "string" + }, + "InstanceId": { + "type": "string" + }, + "LocalGatewayId": { + "type": "string" + }, + "CarrierGatewayId": { + "type": "string" + }, + "DestinationCidrBlock": { + "type": "string" + }, + "GatewayId": { + "type": "string" + }, + "NetworkInterfaceId": { + "type": "string" + }, + "VpcEndpointId": { + "type": "string" + }, + "TransitGatewayId": { + "type": "string" + }, + "VpcPeeringConnectionId": { + "type": "string" + }, + "EgressOnlyInternetGatewayId": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "NatGatewayId": { + "type": "string" + } + }, + "required": [ + "RouteTableId" + ], + "createOnlyProperties": [ + "/properties/RouteTableId", + "/properties/DestinationCidrBlock" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route_plugin.py new file mode 100644 index 0000000000000..abd759b08aaca --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_route_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2RouteProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::Route" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_route import EC2RouteProvider + + self.factory = EC2RouteProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.py new file mode 100644 index 0000000000000..618c3fad99c08 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.py @@ -0,0 +1,123 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2RouteTableProperties(TypedDict): + VpcId: Optional[str] + RouteTableId: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2RouteTableProvider(ResourceProvider[EC2RouteTableProperties]): + TYPE = "AWS::EC2::RouteTable" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2RouteTableProperties], + ) -> ProgressEvent[EC2RouteTableProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RouteTableId + + Required properties: + - VpcId + + Create-only properties: + - /properties/VpcId + + Read-only properties: + - /properties/RouteTableId + + IAM permissions required: + - ec2:CreateRouteTable + - ec2:CreateTags + - ec2:DescribeRouteTables + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO: validations + params = util.select_attributes(model, ["VpcId", "Tags"]) + + tags = [{"ResourceType": "route-table", "Tags": params.get("Tags", [])}] + + response = ec2.create_route_table(VpcId=params["VpcId"], TagSpecifications=tags) + model["RouteTableId"] = response["RouteTable"]["RouteTableId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2RouteTableProperties], + ) -> ProgressEvent[EC2RouteTableProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeRouteTables + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2RouteTableProperties], + ) -> ProgressEvent[EC2RouteTableProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DescribeRouteTables + - ec2:DeleteRouteTable + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + try: + ec2.delete_route_table(RouteTableId=model["RouteTableId"]) + except ec2.exceptions.ClientError: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2RouteTableProperties], + ) -> ProgressEvent[EC2RouteTableProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:CreateTags + - ec2:DeleteTags + - ec2:DescribeRouteTables + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.schema.json new file mode 100644 index 0000000000000..491be25027a62 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable.schema.json @@ -0,0 +1,94 @@ +{ + "typeName": "AWS::EC2::RouteTable", + "description": "Resource Type definition for AWS::EC2::RouteTable", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-ec2", + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "properties": { + "RouteTableId": { + "description": "The route table ID.", + "type": "string" + }, + "Tags": { + "description": "Any tags assigned to the route table.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "VpcId": { + "description": "The ID of the VPC.", + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "VpcId" + ], + "createOnlyProperties": [ + "/properties/VpcId" + ], + "readOnlyProperties": [ + "/properties/RouteTableId" + ], + "primaryIdentifier": [ + "/properties/RouteTableId" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateRouteTable", + "ec2:CreateTags", + "ec2:DescribeRouteTables" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeRouteTables" + ] + }, + "update": { + "permissions": [ + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:DescribeRouteTables" + ] + }, + "delete": { + "permissions": [ + "ec2:DescribeRouteTables", + "ec2:DeleteRouteTable" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeRouteTables" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable_plugin.py new file mode 100644 index 0000000000000..07396c832bf66 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_routetable_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2RouteTableProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::RouteTable" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_routetable import ( + EC2RouteTableProvider, + ) + + self.factory = EC2RouteTableProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.py new file mode 100644 index 0000000000000..39621b8e5178e --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.py @@ -0,0 +1,230 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + Properties, + ResourceProvider, + ResourceRequest, +) + + +class EC2SecurityGroupProperties(TypedDict): + GroupDescription: Optional[str] + GroupId: Optional[str] + GroupName: Optional[str] + Id: Optional[str] + SecurityGroupEgress: Optional[list[Egress]] + SecurityGroupIngress: Optional[list[Ingress]] + Tags: Optional[list[Tag]] + VpcId: Optional[str] + + +class Ingress(TypedDict): + IpProtocol: Optional[str] + CidrIp: Optional[str] + CidrIpv6: Optional[str] + Description: Optional[str] + FromPort: Optional[int] + SourcePrefixListId: Optional[str] + SourceSecurityGroupId: Optional[str] + SourceSecurityGroupName: Optional[str] + SourceSecurityGroupOwnerId: Optional[str] + ToPort: Optional[int] + + +class Egress(TypedDict): + IpProtocol: Optional[str] + CidrIp: Optional[str] + CidrIpv6: Optional[str] + Description: Optional[str] + DestinationPrefixListId: Optional[str] + DestinationSecurityGroupId: Optional[str] + FromPort: Optional[int] + ToPort: Optional[int] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +def model_from_description(sg_description: dict) -> dict: + model = { + "Id": sg_description.get("GroupId"), + "GroupId": sg_description.get("GroupId"), + "GroupName": sg_description.get("GroupName"), + "GroupDescription": sg_description.get("Description"), + "SecurityGroupEgress": [], + "SecurityGroupIngress": [], + } + if tags := sg_description.get("Tags"): + model["Tags"] = tags + + for i, egress in enumerate(sg_description.get("IpPermissionsEgress", [])): + for ip_range in egress.get("IpRanges", []): + model["SecurityGroupEgress"].append( + { + "CidrIp": ip_range.get("CidrIp"), + "FromPort": egress.get("FromPort", -1), + "IpProtocol": egress.get("IpProtocol", "-1"), + "ToPort": egress.get("ToPort", -1), + } + ) + + for i, ingress in enumerate(sg_description.get("IpPermissions", [])): + for ip_range in ingress.get("IpRanges", []): + model["SecurityGroupIngress"].append( + { + "CidrIp": ip_range.get("CidrIp"), + "FromPort": ingress.get("FromPort", -1), + "IpProtocol": ingress.get("IpProtocol", "-1"), + "ToPort": ingress.get("ToPort", -1), + } + ) + + model["VpcId"] = sg_description.get("VpcId") + return model + + +class EC2SecurityGroupProvider(ResourceProvider[EC2SecurityGroupProperties]): + TYPE = "AWS::EC2::SecurityGroup" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2SecurityGroupProperties], + ) -> ProgressEvent[EC2SecurityGroupProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - GroupDescription + + Create-only properties: + - /properties/GroupDescription + - /properties/GroupName + - /properties/VpcId + + Read-only properties: + - /properties/Id + - /properties/GroupId + + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + params = {} + + if not model.get("GroupName"): + params["GroupName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + else: + params["GroupName"] = model["GroupName"] + + if vpc_id := model.get("VpcId"): + params["VpcId"] = vpc_id + + params["Description"] = model.get("GroupDescription", "") + + tags = [ + {"Key": "aws:cloudformation:logical-id", "Value": request.logical_resource_id}, + {"Key": "aws:cloudformation:stack-id", "Value": request.stack_id}, + {"Key": "aws:cloudformation:stack-name", "Value": request.stack_name}, + ] + + if model_tags := model.get("Tags"): + tags += model_tags + + params["TagSpecifications"] = [{"ResourceType": "security-group", "Tags": tags}] + + response = ec2.create_security_group(**params) + model["GroupId"] = response["GroupId"] + + # When you pass the logical ID of this resource to the intrinsic Ref function, + # Ref returns the ID of the security group if you specified the VpcId property. + # Otherwise, it returns the name of the security group. + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-securitygroup.html#aws-resource-ec2-securitygroup-return-values-ref + if "VpcId" in model: + model["Id"] = response["GroupId"] + else: + model["Id"] = params["GroupName"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2SecurityGroupProperties], + ) -> ProgressEvent[EC2SecurityGroupProperties]: + """ + Fetch resource information + """ + + model = request.desired_state + + security_group = request.aws_client_factory.ec2.describe_security_groups( + GroupIds=[model["Id"]] + )["SecurityGroups"][0] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model_from_description(security_group), + ) + + def list(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + security_groups = request.aws_client_factory.ec2.describe_security_groups()[ + "SecurityGroups" + ] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[{"Id": description["GroupId"]} for description in security_groups], + ) + + def delete( + self, + request: ResourceRequest[EC2SecurityGroupProperties], + ) -> ProgressEvent[EC2SecurityGroupProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + ec2.delete_security_group(GroupId=model["GroupId"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2SecurityGroupProperties], + ) -> ProgressEvent[EC2SecurityGroupProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.schema.json new file mode 100644 index 0000000000000..5ccdf924ac598 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup.schema.json @@ -0,0 +1,148 @@ +{ + "typeName": "AWS::EC2::SecurityGroup", + "description": "Resource Type definition for AWS::EC2::SecurityGroup", + "additionalProperties": false, + "properties": { + "GroupDescription": { + "type": "string" + }, + "GroupName": { + "type": "string" + }, + "VpcId": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "SecurityGroupIngress": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Ingress" + } + }, + "SecurityGroupEgress": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Egress" + } + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "GroupId": { + "type": "string" + } + }, + "definitions": { + "Ingress": { + "type": "object", + "additionalProperties": false, + "properties": { + "CidrIp": { + "type": "string" + }, + "CidrIpv6": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "FromPort": { + "type": "integer" + }, + "SourceSecurityGroupName": { + "type": "string" + }, + "ToPort": { + "type": "integer" + }, + "SourceSecurityGroupOwnerId": { + "type": "string" + }, + "IpProtocol": { + "type": "string" + }, + "SourceSecurityGroupId": { + "type": "string" + }, + "SourcePrefixListId": { + "type": "string" + } + }, + "required": [ + "IpProtocol" + ] + }, + "Egress": { + "type": "object", + "additionalProperties": false, + "properties": { + "CidrIp": { + "type": "string" + }, + "CidrIpv6": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "FromPort": { + "type": "integer" + }, + "ToPort": { + "type": "integer" + }, + "IpProtocol": { + "type": "string" + }, + "DestinationSecurityGroupId": { + "type": "string" + }, + "DestinationPrefixListId": { + "type": "string" + } + }, + "required": [ + "IpProtocol" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "GroupDescription" + ], + "createOnlyProperties": [ + "/properties/GroupDescription", + "/properties/GroupName", + "/properties/VpcId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/GroupId" + ] +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup_plugin.py new file mode 100644 index 0000000000000..176bddb74e703 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_securitygroup_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2SecurityGroupProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::SecurityGroup" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_securitygroup import ( + EC2SecurityGroupProvider, + ) + + self.factory = EC2SecurityGroupProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.py new file mode 100644 index 0000000000000..e7c82a0d3669c --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.py @@ -0,0 +1,248 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import str_to_bool + + +class EC2SubnetProperties(TypedDict): + VpcId: Optional[str] + AssignIpv6AddressOnCreation: Optional[bool] + AvailabilityZone: Optional[str] + AvailabilityZoneId: Optional[str] + CidrBlock: Optional[str] + EnableDns64: Optional[bool] + Ipv6CidrBlock: Optional[str] + Ipv6CidrBlocks: Optional[list[str]] + Ipv6Native: Optional[bool] + MapPublicIpOnLaunch: Optional[bool] + NetworkAclAssociationId: Optional[str] + OutpostArn: Optional[str] + PrivateDnsNameOptionsOnLaunch: Optional[dict] + SubnetId: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +def generate_subnet_read_payload( + ec2_client, schema, subnet_ids: Optional[list[str]] = None +) -> list[EC2SubnetProperties]: + kwargs = {} + if subnet_ids: + kwargs["SubnetIds"] = subnet_ids + subnets = ec2_client.describe_subnets(**kwargs)["Subnets"] + + models = [] + for subnet in subnets: + subnet_id = subnet["SubnetId"] + + model = EC2SubnetProperties(**util.select_attributes(subnet, schema)) + + if "Tags" not in model: + model["Tags"] = [] + + if "EnableDns64" not in model: + model["EnableDns64"] = False + + private_dns_name_options = model.setdefault("PrivateDnsNameOptionsOnLaunch", {}) + + if "HostnameType" not in private_dns_name_options: + private_dns_name_options["HostnameType"] = "ip-name" + + optional_bool_attrs = ["EnableResourceNameDnsAAAARecord", "EnableResourceNameDnsARecord"] + for attr in optional_bool_attrs: + if attr not in private_dns_name_options: + private_dns_name_options[attr] = False + + network_acl_associations = ec2_client.describe_network_acls( + Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}] + ) + model["NetworkAclAssociationId"] = network_acl_associations["NetworkAcls"][0][ + "NetworkAclId" + ] + models.append(model) + + return models + + +class EC2SubnetProvider(ResourceProvider[EC2SubnetProperties]): + TYPE = "AWS::EC2::Subnet" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2SubnetProperties], + ) -> ProgressEvent[EC2SubnetProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/SubnetId + + Required properties: + - VpcId + + Create-only properties: + - /properties/VpcId + - /properties/AvailabilityZone + - /properties/AvailabilityZoneId + - /properties/CidrBlock + - /properties/OutpostArn + - /properties/Ipv6Native + + Read-only properties: + - /properties/NetworkAclAssociationId + - /properties/SubnetId + - /properties/Ipv6CidrBlocks + + IAM permissions required: + - ec2:DescribeSubnets + - ec2:CreateSubnet + - ec2:CreateTags + - ec2:ModifySubnetAttribute + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + params = util.select_attributes( + model, + [ + "AvailabilityZone", + "AvailabilityZoneId", + "CidrBlock", + "Ipv6CidrBlock", + "Ipv6Native", + "OutpostArn", + "VpcId", + ], + ) + if model.get("Tags"): + tags = [{"ResourceType": "subnet", "Tags": model.get("Tags")}] + params["TagSpecifications"] = tags + + response = ec2.create_subnet(**params) + model["SubnetId"] = response["Subnet"]["SubnetId"] + bool_attrs = [ + "AssignIpv6AddressOnCreation", + "EnableDns64", + "MapPublicIpOnLaunch", + ] + custom_attrs = bool_attrs + ["PrivateDnsNameOptionsOnLaunch"] + if not any(attr in model for attr in custom_attrs): + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + # update boolean attributes + for attr in bool_attrs: + if attr in model: + kwargs = {attr: {"Value": str_to_bool(model[attr])}} + ec2.modify_subnet_attribute(SubnetId=model["SubnetId"], **kwargs) + + # determine DNS hostname type on launch + dns_options = model.get("PrivateDnsNameOptionsOnLaunch") + if dns_options: + if isinstance(dns_options, str): + dns_options = json.loads(dns_options) + if dns_options.get("HostnameType"): + ec2.modify_subnet_attribute( + SubnetId=model["SubnetId"], + PrivateDnsHostnameTypeOnLaunch=dns_options.get("HostnameType"), + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2SubnetProperties], + ) -> ProgressEvent[EC2SubnetProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeSubnets + - ec2:DescribeNetworkAcls + """ + models = generate_subnet_read_payload( + ec2_client=request.aws_client_factory.ec2, + schema=self.SCHEMA["properties"], + subnet_ids=[request.desired_state["SubnetId"]], + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=models[0], + custom_context=request.custom_context, + ) + + def delete( + self, + request: ResourceRequest[EC2SubnetProperties], + ) -> ProgressEvent[EC2SubnetProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DescribeSubnets + - ec2:DeleteSubnet + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + ec2.delete_subnet(SubnetId=model["SubnetId"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def update( + self, + request: ResourceRequest[EC2SubnetProperties], + ) -> ProgressEvent[EC2SubnetProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:DescribeSubnets + - ec2:ModifySubnetAttribute + - ec2:CreateTags + - ec2:DeleteTags + - ec2:AssociateSubnetCidrBlock + - ec2:DisassociateSubnetCidrBlock + """ + raise NotImplementedError + + def list( + self, request: ResourceRequest[EC2SubnetProperties] + ) -> ProgressEvent[EC2SubnetProperties]: + """ + List resources + + IAM permissions required: + - ec2:DescribeSubnets + - ec2:DescribeNetworkAcls + """ + models = generate_subnet_read_payload( + request.aws_client_factory.ec2, self.SCHEMA["properties"] + ) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_models=models) diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.schema.json new file mode 100644 index 0000000000000..806f82f3ed8c7 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet.schema.json @@ -0,0 +1,157 @@ +{ + "typeName": "AWS::EC2::Subnet", + "description": "Resource Type definition for AWS::EC2::Subnet", + "additionalProperties": false, + "properties": { + "AssignIpv6AddressOnCreation": { + "type": "boolean" + }, + "VpcId": { + "type": "string" + }, + "MapPublicIpOnLaunch": { + "type": "boolean" + }, + "NetworkAclAssociationId": { + "type": "string" + }, + "AvailabilityZone": { + "type": "string" + }, + "AvailabilityZoneId": { + "type": "string" + }, + "CidrBlock": { + "type": "string" + }, + "SubnetId": { + "type": "string" + }, + "Ipv6CidrBlocks": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Ipv6CidrBlock": { + "type": "string" + }, + "OutpostArn": { + "type": "string" + }, + "Ipv6Native": { + "type": "boolean" + }, + "EnableDns64": { + "type": "boolean" + }, + "PrivateDnsNameOptionsOnLaunch": { + "type": "object", + "additionalProperties": false, + "properties": { + "HostnameType": { + "type": "string" + }, + "EnableResourceNameDnsARecord": { + "type": "boolean" + }, + "EnableResourceNameDnsAAAARecord": { + "type": "boolean" + } + } + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "VpcId" + ], + "createOnlyProperties": [ + "/properties/VpcId", + "/properties/AvailabilityZone", + "/properties/AvailabilityZoneId", + "/properties/CidrBlock", + "/properties/OutpostArn", + "/properties/Ipv6Native" + ], + "conditionalCreateOnlyProperties": [ + "/properties/Ipv6CidrBlock" + ], + "primaryIdentifier": [ + "/properties/SubnetId" + ], + "readOnlyProperties": [ + "/properties/NetworkAclAssociationId", + "/properties/SubnetId", + "/properties/Ipv6CidrBlocks" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:DescribeSubnets", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:ModifySubnetAttribute" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeSubnets", + "ec2:DescribeNetworkAcls" + ] + }, + "update": { + "permissions": [ + "ec2:DescribeSubnets", + "ec2:ModifySubnetAttribute", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:AssociateSubnetCidrBlock", + "ec2:DisassociateSubnetCidrBlock" + ] + }, + "delete": { + "permissions": [ + "ec2:DescribeSubnets", + "ec2:DeleteSubnet" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeSubnets", + "ec2:DescribeNetworkAcls" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet_plugin.py new file mode 100644 index 0000000000000..65349afd2f656 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnet_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2SubnetProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::Subnet" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_subnet import EC2SubnetProvider + + self.factory = EC2SubnetProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.py new file mode 100644 index 0000000000000..d07bbdcb6665e --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.py @@ -0,0 +1,142 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2SubnetRouteTableAssociationProperties(TypedDict): + RouteTableId: Optional[str] + SubnetId: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2SubnetRouteTableAssociationProvider( + ResourceProvider[EC2SubnetRouteTableAssociationProperties] +): + TYPE = "AWS::EC2::SubnetRouteTableAssociation" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2SubnetRouteTableAssociationProperties], + ) -> ProgressEvent[EC2SubnetRouteTableAssociationProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - RouteTableId + - SubnetId + + Create-only properties: + - /properties/SubnetId + - /properties/RouteTableId + + Read-only properties: + - /properties/Id + + IAM permissions required: + - ec2:AssociateRouteTable + - ec2:ReplaceRouteTableAssociation + - ec2:DescribeSubnets + - ec2:DescribeRouteTables + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + # TODO: validations + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: defaults + # TODO: idempotency + model["Id"] = ec2.associate_route_table( + RouteTableId=model["RouteTableId"], SubnetId=model["SubnetId"] + )["AssociationId"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + # we need to check association status + route_table = ec2.describe_route_tables(RouteTableIds=[model["RouteTableId"]])[ + "RouteTables" + ][0] + for association in route_table["Associations"]: + if association["RouteTableAssociationId"] == model["Id"]: + # if it is showing up here, it's associated + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2SubnetRouteTableAssociationProperties], + ) -> ProgressEvent[EC2SubnetRouteTableAssociationProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeRouteTables + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2SubnetRouteTableAssociationProperties], + ) -> ProgressEvent[EC2SubnetRouteTableAssociationProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DisassociateRouteTable + - ec2:DescribeSubnets + - ec2:DescribeRouteTables + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO add async + try: + ec2.disassociate_route_table(AssociationId=model["Id"]) + except ec2.exceptions.ClientError: + pass + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2SubnetRouteTableAssociationProperties], + ) -> ProgressEvent[EC2SubnetRouteTableAssociationProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.schema.json new file mode 100644 index 0000000000000..d0dab1cba2a02 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation.schema.json @@ -0,0 +1,64 @@ +{ + "typeName": "AWS::EC2::SubnetRouteTableAssociation", + "description": "Resource Type definition for AWS::EC2::SubnetRouteTableAssociation", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-ec2.git", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "RouteTableId": { + "type": "string" + }, + "SubnetId": { + "type": "string" + } + }, + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "required": [ + "RouteTableId", + "SubnetId" + ], + "createOnlyProperties": [ + "/properties/SubnetId", + "/properties/RouteTableId" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:AssociateRouteTable", + "ec2:ReplaceRouteTableAssociation", + "ec2:DescribeSubnets", + "ec2:DescribeRouteTables" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeRouteTables" + ] + }, + "delete": { + "permissions": [ + "ec2:DisassociateRouteTable", + "ec2:DescribeSubnets", + "ec2:DescribeRouteTables" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeRouteTables" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation_plugin.py new file mode 100644 index 0000000000000..6841f27741847 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_subnetroutetableassociation_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2SubnetRouteTableAssociationProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::SubnetRouteTableAssociation" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_subnetroutetableassociation import ( + EC2SubnetRouteTableAssociationProvider, + ) + + self.factory = EC2SubnetRouteTableAssociationProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.py new file mode 100644 index 0000000000000..4a4b5825966cc --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.py @@ -0,0 +1,144 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2TransitGatewayProperties(TypedDict): + AmazonSideAsn: Optional[int] + AssociationDefaultRouteTableId: Optional[str] + AutoAcceptSharedAttachments: Optional[str] + DefaultRouteTableAssociation: Optional[str] + DefaultRouteTablePropagation: Optional[str] + Description: Optional[str] + DnsSupport: Optional[str] + Id: Optional[str] + MulticastSupport: Optional[str] + PropagationDefaultRouteTableId: Optional[str] + Tags: Optional[list[Tag]] + TransitGatewayCidrBlocks: Optional[list[str]] + VpnEcmpSupport: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2TransitGatewayProvider(ResourceProvider[EC2TransitGatewayProperties]): + TYPE = "AWS::EC2::TransitGateway" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2TransitGatewayProperties], + ) -> ProgressEvent[EC2TransitGatewayProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + Create-only properties: + - /properties/AmazonSideAsn + - /properties/MulticastSupport + + Read-only properties: + - /properties/Id + + IAM permissions required: + - ec2:CreateTransitGateway + - ec2:CreateTags + + """ + model = request.desired_state + create_params = { + "Options": util.select_attributes( + model, + [ + "AmazonSideAsn", + "AssociationDefaultRouteTableId", + "AutoAcceptSharedAttachments", + "DefaultRouteTableAssociation", + "DefaultRouteTablePropagation", + "DnsSupport", + "MulticastSupport", + "PropagationDefaultRouteTableId", + "TransitGatewayCidrBlocks", + "VpnEcmpSupport", + ], + ) + } + + if model.get("Description"): + create_params["Description"] = model["Description"] + + if model.get("Tags", []): + create_params["TagSpecifications"] = [ + {"ResourceType": "transit-gateway", "Tags": model["Tags"]} + ] + + response = request.aws_client_factory.ec2.create_transit_gateway(**create_params) + model["Id"] = response["TransitGateway"]["TransitGatewayId"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[EC2TransitGatewayProperties], + ) -> ProgressEvent[EC2TransitGatewayProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeTransitGateways + - ec2:DescribeTags + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2TransitGatewayProperties], + ) -> ProgressEvent[EC2TransitGatewayProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteTransitGateway + - ec2:DeleteTags + """ + model = request.desired_state + request.aws_client_factory.ec2.delete_transit_gateway(TransitGatewayId=model["Id"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[EC2TransitGatewayProperties], + ) -> ProgressEvent[EC2TransitGatewayProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:ModifyTransitGateway + - ec2:DeleteTags + - ec2:CreateTags + - ec2:ModifyTransitGatewayOptions + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.schema.json new file mode 100644 index 0000000000000..afa8ae6ecd09f --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway.schema.json @@ -0,0 +1,118 @@ +{ + "typeName": "AWS::EC2::TransitGateway", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-transitgateway", + "description": "Resource Type definition for AWS::EC2::TransitGateway", + "additionalProperties": false, + "properties": { + "DefaultRouteTablePropagation": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "AutoAcceptSharedAttachments": { + "type": "string" + }, + "DefaultRouteTableAssociation": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "VpnEcmpSupport": { + "type": "string" + }, + "DnsSupport": { + "type": "string" + }, + "MulticastSupport": { + "type": "string" + }, + "AmazonSideAsn": { + "type": "integer", + "format": "int64" + }, + "TransitGatewayCidrBlocks": { + "type": "array", + "items": { + "type": "string" + } + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "AssociationDefaultRouteTableId": { + "type": "string" + }, + "PropagationDefaultRouteTableId": { + "type": "string" + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "createOnlyProperties": [ + "/properties/AmazonSideAsn", + "/properties/MulticastSupport" + ], + "taggable": true, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateTransitGateway", + "ec2:CreateTags" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeTransitGateways", + "ec2:DescribeTags" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteTransitGateway", + "ec2:DeleteTags" + ] + }, + "update": { + "permissions": [ + "ec2:ModifyTransitGateway", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:ModifyTransitGatewayOptions" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeTransitGateways", + "ec2:DescribeTags" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway_plugin.py new file mode 100644 index 0000000000000..eac947d512bd5 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgateway_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2TransitGatewayProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::TransitGateway" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_transitgateway import ( + EC2TransitGatewayProvider, + ) + + self.factory = EC2TransitGatewayProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.py new file mode 100644 index 0000000000000..59aac3a6a15d4 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.py @@ -0,0 +1,131 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2TransitGatewayAttachmentProperties(TypedDict): + SubnetIds: Optional[list[str]] + TransitGatewayId: Optional[str] + VpcId: Optional[str] + Id: Optional[str] + Options: Optional[dict] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2TransitGatewayAttachmentProvider(ResourceProvider[EC2TransitGatewayAttachmentProperties]): + TYPE = "AWS::EC2::TransitGatewayAttachment" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2TransitGatewayAttachmentProperties], + ) -> ProgressEvent[EC2TransitGatewayAttachmentProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - VpcId + - SubnetIds + - TransitGatewayId + + Create-only properties: + - /properties/TransitGatewayId + - /properties/VpcId + + Read-only properties: + - /properties/Id + + IAM permissions required: + - ec2:CreateTransitGatewayVpcAttachment + - ec2:CreateTags + + """ + model = request.desired_state + create_params = util.select_attributes( + model, ["SubnetIds", "TransitGatewayId", "VpcId", "Options"] + ) + + if model.get("Tags", []): + create_params["TagSpecifications"] = [ + {"ResourceType": "transit-gateway-attachment", "Tags": model["Tags"]} + ] + + result = request.aws_client_factory.ec2.create_transit_gateway_vpc_attachment( + **create_params + ) + model["Id"] = result["TransitGatewayVpcAttachment"]["TransitGatewayAttachmentId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[EC2TransitGatewayAttachmentProperties], + ) -> ProgressEvent[EC2TransitGatewayAttachmentProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeTransitGatewayAttachments + - ec2:DescribeTransitGatewayVpcAttachments + - ec2:DescribeTags + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2TransitGatewayAttachmentProperties], + ) -> ProgressEvent[EC2TransitGatewayAttachmentProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteTransitGatewayVpcAttachment + - ec2:DeleteTags + """ + model = request.desired_state + request.aws_client_factory.ec2.delete_transit_gateway_vpc_attachment( + TransitGatewayAttachmentId=model["Id"] + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[EC2TransitGatewayAttachmentProperties], + ) -> ProgressEvent[EC2TransitGatewayAttachmentProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:ModifyTransitGatewayVpcAttachment + - ec2:DescribeTransitGatewayVpcAttachments + - ec2:DeleteTags + - ec2:CreateTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.schema.json new file mode 100644 index 0000000000000..075af98c71c9a --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment.schema.json @@ -0,0 +1,128 @@ +{ + "typeName": "AWS::EC2::TransitGatewayAttachment", + "description": "Resource Type definition for AWS::EC2::TransitGatewayAttachment", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-transitgateway", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "TransitGatewayId": { + "type": "string" + }, + "VpcId": { + "type": "string" + }, + "SubnetIds": { + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Tags": { + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Options": { + "description": "The options for the transit gateway vpc attachment.", + "type": "object", + "properties": { + "DnsSupport": { + "description": "Indicates whether to enable DNS Support for Vpc Attachment. Valid Values: enable | disable", + "type": "string" + }, + "Ipv6Support": { + "description": "Indicates whether to enable Ipv6 Support for Vpc Attachment. Valid Values: enable | disable", + "type": "string" + }, + "ApplianceModeSupport": { + "description": "Indicates whether to enable Ipv6 Support for Vpc Attachment. Valid Values: enable | disable", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "VpcId", + "SubnetIds", + "TransitGatewayId" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": false, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/TransitGatewayId", + "/properties/VpcId" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:CreateTransitGatewayVpcAttachment", + "ec2:CreateTags" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeTransitGatewayAttachments", + "ec2:DescribeTransitGatewayVpcAttachments", + "ec2:DescribeTags" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteTransitGatewayVpcAttachment", + "ec2:DeleteTags" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeTransitGatewayAttachments", + "ec2:DescribeTransitGatewayVpcAttachments", + "ec2:DescribeTags" + ] + }, + "update": { + "permissions": [ + "ec2:ModifyTransitGatewayVpcAttachment", + "ec2:DescribeTransitGatewayVpcAttachments", + "ec2:DeleteTags", + "ec2:CreateTags" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment_plugin.py new file mode 100644 index 0000000000000..7b34a535f56e6 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_transitgatewayattachment_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2TransitGatewayAttachmentProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::TransitGatewayAttachment" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_transitgatewayattachment import ( + EC2TransitGatewayAttachmentProvider, + ) + + self.factory = EC2TransitGatewayAttachmentProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.py new file mode 100644 index 0000000000000..3244a72b8b863 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.py @@ -0,0 +1,242 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + +LOG = logging.getLogger(__name__) + + +class EC2VPCProperties(TypedDict): + CidrBlock: Optional[str] + CidrBlockAssociations: Optional[list[str]] + DefaultNetworkAcl: Optional[str] + DefaultSecurityGroup: Optional[str] + EnableDnsHostnames: Optional[bool] + EnableDnsSupport: Optional[bool] + InstanceTenancy: Optional[str] + Ipv4IpamPoolId: Optional[str] + Ipv4NetmaskLength: Optional[int] + Ipv6CidrBlocks: Optional[list[str]] + Tags: Optional[list[Tag]] + VpcId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +def _get_default_security_group_for_vpc(ec2_client, vpc_id: str) -> str: + sgs = ec2_client.describe_security_groups( + Filters=[ + {"Name": "group-name", "Values": ["default"]}, + {"Name": "vpc-id", "Values": [vpc_id]}, + ] + )["SecurityGroups"] + if len(sgs) != 1: + raise Exception(f"There should only be one default group for this VPC ({vpc_id=})") + return sgs[0]["GroupId"] + + +def _get_default_acl_for_vpc(ec2_client, vpc_id: str) -> str: + acls = ec2_client.describe_network_acls( + Filters=[ + {"Name": "default", "Values": ["true"]}, + {"Name": "vpc-id", "Values": [vpc_id]}, + ] + )["NetworkAcls"] + if len(acls) != 1: + raise Exception(f"There should only be one default network ACL for this VPC ({vpc_id=})") + return acls[0]["NetworkAclId"] + + +def generate_vpc_read_payload(ec2_client, vpc_id: str) -> EC2VPCProperties: + vpc = ec2_client.describe_vpcs(VpcIds=[vpc_id])["Vpcs"][0] + + model = EC2VPCProperties( + **util.select_attributes(vpc, EC2VPCProvider.SCHEMA["properties"].keys()) + ) + model["CidrBlockAssociations"] = [ + cba["AssociationId"] for cba in vpc["CidrBlockAssociationSet"] + ] + model["Ipv6CidrBlocks"] = [ + ipv6_ass["Ipv6CidrBlock"] for ipv6_ass in vpc.get("Ipv6CidrBlockAssociationSet", []) + ] + model["DefaultNetworkAcl"] = _get_default_acl_for_vpc(ec2_client, model["VpcId"]) + model["DefaultSecurityGroup"] = _get_default_security_group_for_vpc(ec2_client, model["VpcId"]) + model["EnableDnsHostnames"] = ec2_client.describe_vpc_attribute( + Attribute="enableDnsHostnames", VpcId=vpc_id + )["EnableDnsHostnames"]["Value"] + model["EnableDnsSupport"] = ec2_client.describe_vpc_attribute( + Attribute="enableDnsSupport", VpcId=vpc_id + )["EnableDnsSupport"]["Value"] + + return model + + +class EC2VPCProvider(ResourceProvider[EC2VPCProperties]): + TYPE = "AWS::EC2::VPC" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2VPCProperties], + ) -> ProgressEvent[EC2VPCProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/VpcId + + Create-only properties: + - /properties/CidrBlock + - /properties/Ipv4IpamPoolId + - /properties/Ipv4NetmaskLength + + Read-only properties: + - /properties/CidrBlockAssociations + - /properties/DefaultNetworkAcl + - /properties/DefaultSecurityGroup + - /properties/Ipv6CidrBlocks + - /properties/VpcId + + IAM permissions required: + - ec2:CreateVpc + - ec2:DescribeVpcs + - ec2:ModifyVpcAttribute + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO: validations + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: defaults + # TODO: idempotency + params = util.select_attributes( + model, + ["CidrBlock", "InstanceTenancy"], + ) + if model.get("Tags"): + tags = [{"ResourceType": "vpc", "Tags": model.get("Tags")}] + params["TagSpecifications"] = tags + + response = ec2.create_vpc(**params) + + request.custom_context[REPEATED_INVOCATION] = True + model = generate_vpc_read_payload(ec2, response["Vpc"]["VpcId"]) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + response = ec2.describe_vpcs(VpcIds=[model["VpcId"]])["Vpcs"][0] + if response["State"] == "pending": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2VPCProperties], + ) -> ProgressEvent[EC2VPCProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeVpcs + - ec2:DescribeSecurityGroups + - ec2:DescribeNetworkAcls + - ec2:DescribeVpcAttribute + """ + ec2 = request.aws_client_factory.ec2 + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=generate_vpc_read_payload(ec2, request.desired_state["VpcId"]), + custom_context=request.custom_context, + ) + + def delete( + self, + request: ResourceRequest[EC2VPCProperties], + ) -> ProgressEvent[EC2VPCProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteVpc + - ec2:DescribeVpcs + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + + # remove routes and route tables first + resp = ec2.describe_route_tables( + Filters=[ + {"Name": "vpc-id", "Values": [model["VpcId"]]}, + {"Name": "association.main", "Values": ["false"]}, + ] + ) + for rt in resp["RouteTables"]: + for assoc in rt.get("Associations", []): + # skipping Main association (upstream moto includes default association that cannot be deleted) + if assoc.get("Main"): + continue + ec2.disassociate_route_table(AssociationId=assoc["RouteTableAssociationId"]) + ec2.delete_route_table(RouteTableId=rt["RouteTableId"]) + + # TODO security groups, gateways and other attached resources need to be deleted as well + ec2.delete_vpc(VpcId=model["VpcId"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def update( + self, + request: ResourceRequest[EC2VPCProperties], + ) -> ProgressEvent[EC2VPCProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:CreateTags + - ec2:ModifyVpcAttribute + - ec2:DeleteTags + - ec2:ModifyVpcTenancy + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[EC2VPCProperties], + ) -> ProgressEvent[EC2VPCProperties]: + resources = request.aws_client_factory.ec2.describe_vpcs() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + EC2VPCProperties(VpcId=resource["VpcId"]) for resource in resources["Vpcs"] + ], + ) diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.schema.json new file mode 100644 index 0000000000000..0f8838c52d008 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc.schema.json @@ -0,0 +1,155 @@ +{ + "typeName": "AWS::EC2::VPC", + "description": "Resource Type definition for AWS::EC2::VPC", + "additionalProperties": false, + "properties": { + "VpcId": { + "type": "string", + "description": "The Id for the model." + }, + "CidrBlock": { + "type": "string", + "description": "The primary IPv4 CIDR block for the VPC." + }, + "CidrBlockAssociations": { + "type": "array", + "description": "A list of IPv4 CIDR block association IDs for the VPC.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "DefaultNetworkAcl": { + "type": "string", + "insertionOrder": false, + "description": "The default network ACL ID that is associated with the VPC." + }, + "DefaultSecurityGroup": { + "type": "string", + "insertionOrder": false, + "description": "The default security group ID that is associated with the VPC." + }, + "Ipv6CidrBlocks": { + "type": "array", + "description": "A list of IPv6 CIDR blocks that are associated with the VPC.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "EnableDnsHostnames": { + "type": "boolean", + "description": "Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not. Disabled by default for nondefault VPCs." + }, + "EnableDnsSupport": { + "type": "boolean", + "description": "Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled. Enabled by default." + }, + "InstanceTenancy": { + "type": "string", + "description": "The allowed tenancy of instances launched into the VPC.\n\n\"default\": An instance launched into the VPC runs on shared hardware by default, unless you explicitly specify a different tenancy during instance launch.\n\n\"dedicated\": An instance launched into the VPC is a Dedicated Instance by default, unless you explicitly specify a tenancy of host during instance launch. You cannot specify a tenancy of default during instance launch.\n\nUpdating InstanceTenancy requires no replacement only if you are updating its value from \"dedicated\" to \"default\". Updating InstanceTenancy from \"default\" to \"dedicated\" requires replacement." + }, + "Ipv4IpamPoolId": { + "type": "string", + "description": "The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR" + }, + "Ipv4NetmaskLength": { + "type": "integer", + "description": "The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool" + }, + "Tags": { + "type": "array", + "description": "The tags for the VPC.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/CidrBlock", + "/properties/Ipv4IpamPoolId", + "/properties/Ipv4NetmaskLength" + ], + "conditionalCreateOnlyProperties": [ + "/properties/InstanceTenancy" + ], + "readOnlyProperties": [ + "/properties/CidrBlockAssociations", + "/properties/DefaultNetworkAcl", + "/properties/DefaultSecurityGroup", + "/properties/Ipv6CidrBlocks", + "/properties/VpcId" + ], + "primaryIdentifier": [ + "/properties/VpcId" + ], + "writeOnlyProperties": [ + "/properties/Ipv4IpamPoolId", + "/properties/Ipv4NetmaskLength" + ], + "handlers": { + "create": { + "permissions": [ + "ec2:CreateVpc", + "ec2:DescribeVpcs", + "ec2:ModifyVpcAttribute" + ] + }, + "read": { + "permissions": [ + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeNetworkAcls", + "ec2:DescribeVpcAttribute" + ] + }, + "update": { + "permissions": [ + "ec2:CreateTags", + "ec2:ModifyVpcAttribute", + "ec2:DeleteTags", + "ec2:ModifyVpcTenancy" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteVpc", + "ec2:DescribeVpcs" + ] + }, + "list": { + "permissions": [ + "ec2:DescribeVpcs" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc_plugin.py new file mode 100644 index 0000000000000..3f4aea38386f0 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpc_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2VPCProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::VPC" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_vpc import EC2VPCProvider + + self.factory = EC2VPCProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.py new file mode 100644 index 0000000000000..420efcb8029ee --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.py @@ -0,0 +1,180 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2VPCEndpointProperties(TypedDict): + ServiceName: Optional[str] + VpcId: Optional[str] + CreationTimestamp: Optional[str] + DnsEntries: Optional[list[str]] + Id: Optional[str] + NetworkInterfaceIds: Optional[list[str]] + PolicyDocument: Optional[str | dict] + PrivateDnsEnabled: Optional[bool] + RouteTableIds: Optional[list[str]] + SecurityGroupIds: Optional[list[str]] + SubnetIds: Optional[list[str]] + VpcEndpointType: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2VPCEndpointProvider(ResourceProvider[EC2VPCEndpointProperties]): + TYPE = "AWS::EC2::VPCEndpoint" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2VPCEndpointProperties], + ) -> ProgressEvent[EC2VPCEndpointProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - VpcId + - ServiceName + + Create-only properties: + - /properties/ServiceName + - /properties/VpcEndpointType + - /properties/VpcId + + Read-only properties: + - /properties/NetworkInterfaceIds + - /properties/CreationTimestamp + - /properties/DnsEntries + - /properties/Id + + IAM permissions required: + - ec2:CreateVpcEndpoint + - ec2:DescribeVpcEndpoints + + """ + model = request.desired_state + create_params = util.select_attributes( + model, + [ + "PolidyDocument", + "PrivateDnsEnabled", + "RouteTablesIds", + "SecurityGroupIds", + "ServiceName", + "SubnetIds", + "VpcEndpointType", + "VpcId", + ], + ) + + if not request.custom_context.get(REPEATED_INVOCATION): + response = request.aws_client_factory.ec2.create_vpc_endpoint(**create_params) + model["Id"] = response["VpcEndpoint"]["VpcEndpointId"] + model["DnsEntries"] = response["VpcEndpoint"]["DnsEntries"] + model["CreationTimestamp"] = response["VpcEndpoint"]["CreationTimestamp"] + model["NetworkInterfaceIds"] = response["VpcEndpoint"]["NetworkInterfaceIds"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + response = request.aws_client_factory.ec2.describe_vpc_endpoints( + VpcEndpointIds=[model["Id"]] + ) + if not response["VpcEndpoints"]: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message="Resource not found after creation", + ) + + state = response["VpcEndpoints"][0][ + "State" + ].lower() # API specifies capital but lowercase is returned + match state: + case "available": + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + case "pending": + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + case "pendingacceptance": + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + case _: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message=f"Invalid state '{state}' for resource", + ) + + def read( + self, + request: ResourceRequest[EC2VPCEndpointProperties], + ) -> ProgressEvent[EC2VPCEndpointProperties]: + """ + Fetch resource information + + IAM permissions required: + - ec2:DescribeVpcEndpoints + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2VPCEndpointProperties], + ) -> ProgressEvent[EC2VPCEndpointProperties]: + """ + Delete a resource + + IAM permissions required: + - ec2:DeleteVpcEndpoints + - ec2:DescribeVpcEndpoints + """ + model = request.previous_state + response = request.aws_client_factory.ec2.describe_vpc_endpoints( + VpcEndpointIds=[model["Id"]] + ) + + if not response["VpcEndpoints"]: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="Resource not found for deletion", + ) + + state = response["VpcEndpoints"][0]["State"].lower() + match state: + case "deleted": + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + case "deleting": + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + case _: + request.aws_client_factory.ec2.delete_vpc_endpoints(VpcEndpointIds=[model["Id"]]) + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + def update( + self, + request: ResourceRequest[EC2VPCEndpointProperties], + ) -> ProgressEvent[EC2VPCEndpointProperties]: + """ + Update a resource + + IAM permissions required: + - ec2:ModifyVpcEndpoint + - ec2:DescribeVpcEndpoints + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.schema.json new file mode 100644 index 0000000000000..c8dcc84644d4c --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint.schema.json @@ -0,0 +1,140 @@ +{ + "typeName": "AWS::EC2::VPCEndpoint", + "description": "Resource Type definition for AWS::EC2::VPCEndpoint", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "CreationTimestamp": { + "type": "string" + }, + "DnsEntries": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "NetworkInterfaceIds": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "PolicyDocument": { + "type": [ + "string", + "object" + ], + "description": "A policy to attach to the endpoint that controls access to the service." + }, + "PrivateDnsEnabled": { + "type": "boolean", + "description": "Indicate whether to associate a private hosted zone with the specified VPC." + }, + "RouteTableIds": { + "type": "array", + "description": "One or more route table IDs.", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "SecurityGroupIds": { + "type": "array", + "description": "The ID of one or more security groups to associate with the endpoint network interface.", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "ServiceName": { + "type": "string", + "description": "The service name." + }, + "SubnetIds": { + "type": "array", + "description": "The ID of one or more subnets in which to create an endpoint network interface.", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "VpcEndpointType": { + "type": "string", + "enum": [ + "Interface", + "Gateway", + "GatewayLoadBalancer" + ] + }, + "VpcId": { + "type": "string", + "description": "The ID of the VPC in which the endpoint will be used." + } + }, + "required": [ + "VpcId", + "ServiceName" + ], + "readOnlyProperties": [ + "/properties/NetworkInterfaceIds", + "/properties/CreationTimestamp", + "/properties/DnsEntries", + "/properties/Id" + ], + "createOnlyProperties": [ + "/properties/ServiceName", + "/properties/VpcEndpointType", + "/properties/VpcId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "ec2:CreateVpcEndpoint", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + }, + "read": { + "permissions": [ + "ec2:DescribeVpcEndpoints" + ] + }, + "update": { + "permissions": [ + "ec2:ModifyVpcEndpoint", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + }, + "delete": { + "permissions": [ + "ec2:DeleteVpcEndpoints", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + }, + "list": { + "permissions": [ + "ec2:DescribeVpcEndpoints" + ] + } + } +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint_plugin.py new file mode 100644 index 0000000000000..e0e1d228a95de --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcendpoint_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2VPCEndpointProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::VPCEndpoint" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_vpcendpoint import ( + EC2VPCEndpointProvider, + ) + + self.factory = EC2VPCEndpointProvider diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.py new file mode 100644 index 0000000000000..8f4656e317b7f --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.py @@ -0,0 +1,116 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EC2VPCGatewayAttachmentProperties(TypedDict): + VpcId: Optional[str] + Id: Optional[str] + InternetGatewayId: Optional[str] + VpnGatewayId: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EC2VPCGatewayAttachmentProvider(ResourceProvider[EC2VPCGatewayAttachmentProperties]): + TYPE = "AWS::EC2::VPCGatewayAttachment" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EC2VPCGatewayAttachmentProperties], + ) -> ProgressEvent[EC2VPCGatewayAttachmentProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - VpcId + + + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO: validations + if model.get("InternetGatewayId"): + ec2.attach_internet_gateway( + InternetGatewayId=model["InternetGatewayId"], VpcId=model["VpcId"] + ) + else: + ec2.attach_vpn_gateway(VpnGatewayId=model["VpnGatewayId"], VpcId=model["VpcId"]) + + # TODO: idempotency + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EC2VPCGatewayAttachmentProperties], + ) -> ProgressEvent[EC2VPCGatewayAttachmentProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EC2VPCGatewayAttachmentProperties], + ) -> ProgressEvent[EC2VPCGatewayAttachmentProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ec2 = request.aws_client_factory.ec2 + # TODO: validations + try: + if model.get("InternetGatewayId"): + ec2.detach_internet_gateway( + InternetGatewayId=model["InternetGatewayId"], VpcId=model["VpcId"] + ) + else: + ec2.detach_vpn_gateway(VpnGatewayId=model["VpnGatewayId"], VpcId=model["VpcId"]) + except ec2.exceptions.ClientError: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EC2VPCGatewayAttachmentProperties], + ) -> ProgressEvent[EC2VPCGatewayAttachmentProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.schema.json b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.schema.json new file mode 100644 index 0000000000000..856548db1f173 --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment.schema.json @@ -0,0 +1,28 @@ +{ + "typeName": "AWS::EC2::VPCGatewayAttachment", + "description": "Resource Type definition for AWS::EC2::VPCGatewayAttachment", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "InternetGatewayId": { + "type": "string" + }, + "VpcId": { + "type": "string" + }, + "VpnGatewayId": { + "type": "string" + } + }, + "required": [ + "VpcId" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment_plugin.py b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment_plugin.py new file mode 100644 index 0000000000000..f210fa0ff8c1d --- /dev/null +++ b/localstack-core/localstack/services/ec2/resource_providers/aws_ec2_vpcgatewayattachment_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EC2VPCGatewayAttachmentProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::EC2::VPCGatewayAttachment" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ec2.resource_providers.aws_ec2_vpcgatewayattachment import ( + EC2VPCGatewayAttachmentProvider, + ) + + self.factory = EC2VPCGatewayAttachmentProvider diff --git a/localstack/services/stepfunctions/asl/__init__.py b/localstack-core/localstack/services/ecr/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/__init__.py rename to localstack-core/localstack/services/ecr/__init__.py diff --git a/localstack/services/stepfunctions/asl/antlt4utils/__init__.py b/localstack-core/localstack/services/ecr/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/antlt4utils/__init__.py rename to localstack-core/localstack/services/ecr/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.py b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.py new file mode 100644 index 0000000000000..a42735467d146 --- /dev/null +++ b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.py @@ -0,0 +1,169 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.constants import AWS_REGION_US_EAST_1, DEFAULT_AWS_ACCOUNT_ID +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.aws import arns + +LOG = logging.getLogger(__name__) + +# simple mock state +default_repos_per_stack = {} + + +class ECRRepositoryProperties(TypedDict): + Arn: Optional[str] + EncryptionConfiguration: Optional[EncryptionConfiguration] + ImageScanningConfiguration: Optional[ImageScanningConfiguration] + ImageTagMutability: Optional[str] + LifecyclePolicy: Optional[LifecyclePolicy] + RepositoryName: Optional[str] + RepositoryPolicyText: Optional[dict | str] + RepositoryUri: Optional[str] + Tags: Optional[list[Tag]] + + +class LifecyclePolicy(TypedDict): + LifecyclePolicyText: Optional[str] + RegistryId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class ImageScanningConfiguration(TypedDict): + ScanOnPush: Optional[bool] + + +class EncryptionConfiguration(TypedDict): + EncryptionType: Optional[str] + KmsKey: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ECRRepositoryProvider(ResourceProvider[ECRRepositoryProperties]): + TYPE = "AWS::ECR::Repository" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ECRRepositoryProperties], + ) -> ProgressEvent[ECRRepositoryProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RepositoryName + + Create-only properties: + - /properties/RepositoryName + - /properties/EncryptionConfiguration + - /properties/EncryptionConfiguration/EncryptionType + - /properties/EncryptionConfiguration/KmsKey + + Read-only properties: + - /properties/Arn + - /properties/RepositoryUri + + IAM permissions required: + - ecr:CreateRepository + - ecr:PutLifecyclePolicy + - ecr:SetRepositoryPolicy + - ecr:TagResource + - kms:DescribeKey + - kms:CreateGrant + - kms:RetireGrant + + """ + model = request.desired_state + + default_repos_per_stack[request.stack_name] = model["RepositoryName"] + LOG.warning( + "Creating a Mock ECR Repository for CloudFormation. This is only intended to be used for allowing a successful CDK bootstrap and does not provision any underlying ECR repository." + ) + model.update( + { + "Arn": arns.ecr_repository_arn( + model["RepositoryName"], DEFAULT_AWS_ACCOUNT_ID, AWS_REGION_US_EAST_1 + ), + "RepositoryUri": "http://localhost:4566", + "ImageTagMutability": "MUTABLE", + "ImageScanningConfiguration": {"scanOnPush": True}, + } + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[ECRRepositoryProperties], + ) -> ProgressEvent[ECRRepositoryProperties]: + """ + Fetch resource information + + IAM permissions required: + - ecr:DescribeRepositories + - ecr:GetLifecyclePolicy + - ecr:GetRepositoryPolicy + - ecr:ListTagsForResource + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ECRRepositoryProperties], + ) -> ProgressEvent[ECRRepositoryProperties]: + """ + Delete a resource + + IAM permissions required: + - ecr:DeleteRepository + - kms:RetireGrant + """ + if default_repos_per_stack.get(request.stack_name): + del default_repos_per_stack[request.stack_name] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=request.desired_state, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[ECRRepositoryProperties], + ) -> ProgressEvent[ECRRepositoryProperties]: + """ + Update a resource + + IAM permissions required: + - ecr:PutLifecyclePolicy + - ecr:SetRepositoryPolicy + - ecr:TagResource + - ecr:UntagResource + - ecr:DeleteLifecyclePolicy + - ecr:DeleteRepositoryPolicy + - ecr:PutImageScanningConfiguration + - ecr:PutImageTagMutability + - kms:DescribeKey + - kms:CreateGrant + - kms:RetireGrant + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.schema.json b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.schema.json new file mode 100644 index 0000000000000..ef4f7c01e3a74 --- /dev/null +++ b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository.schema.json @@ -0,0 +1,210 @@ +{ + "typeName": "AWS::ECR::Repository", + "description": "The AWS::ECR::Repository resource specifies an Amazon Elastic Container Registry (Amazon ECR) repository, where users can push and pull Docker images. For more information, see https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-ecr.git", + "definitions": { + "LifecyclePolicy": { + "type": "object", + "description": "The LifecyclePolicy property type specifies a lifecycle policy. For information about lifecycle policy syntax, see https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html", + "properties": { + "LifecyclePolicyText": { + "$ref": "#/definitions/LifecyclePolicyText" + }, + "RegistryId": { + "$ref": "#/definitions/RegistryId" + } + }, + "additionalProperties": false + }, + "LifecyclePolicyText": { + "type": "string", + "description": "The JSON repository policy text to apply to the repository.", + "minLength": 100, + "maxLength": 30720 + }, + "RegistryId": { + "type": "string", + "description": "The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed. ", + "minLength": 12, + "maxLength": 12, + "pattern": "^[0-9]{12}$" + }, + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. ", + "minLength": 1, + "maxLength": 127 + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. ", + "minLength": 1, + "maxLength": 255 + } + }, + "required": [ + "Value", + "Key" + ], + "additionalProperties": false + }, + "ImageScanningConfiguration": { + "type": "object", + "description": "The image scanning configuration for the repository. This setting determines whether images are scanned for known vulnerabilities after being pushed to the repository.", + "properties": { + "ScanOnPush": { + "$ref": "#/definitions/ScanOnPush" + } + }, + "additionalProperties": false + }, + "ScanOnPush": { + "type": "boolean", + "description": "The setting that determines whether images are scanned after being pushed to a repository." + }, + "EncryptionConfiguration": { + "type": "object", + "description": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the AES256 encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more information, see https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html", + "properties": { + "EncryptionType": { + "$ref": "#/definitions/EncryptionType" + }, + "KmsKey": { + "$ref": "#/definitions/KmsKey" + } + }, + "required": [ + "EncryptionType" + ], + "additionalProperties": false + }, + "EncryptionType": { + "type": "string", + "description": "The encryption type to use.", + "enum": [ + "AES256", + "KMS" + ] + }, + "KmsKey": { + "type": "string", + "description": "If you use the KMS encryption type, specify the CMK to use for encryption. The alias, key ID, or full ARN of the CMK can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed CMK for Amazon ECR will be used.", + "minLength": 1, + "maxLength": 2048 + } + }, + "properties": { + "LifecyclePolicy": { + "$ref": "#/definitions/LifecyclePolicy" + }, + "RepositoryName": { + "type": "string", + "description": "The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app). If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the repository name. For more information, see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html.", + "minLength": 2, + "maxLength": 256, + "pattern": "^(?=.{2,256}$)((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)$" + }, + "RepositoryPolicyText": { + "type": [ + "object", + "string" + ], + "description": "The JSON repository policy text to apply to the repository. For more information, see https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicyExamples.html in the Amazon Elastic Container Registry User Guide. " + }, + "Tags": { + "type": "array", + "maxItems": 50, + "uniqueItems": true, + "insertionOrder": false, + "description": "An array of key-value pairs to apply to this resource.", + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Arn": { + "type": "string" + }, + "RepositoryUri": { + "type": "string" + }, + "ImageTagMutability": { + "type": "string", + "description": "The image tag mutability setting for the repository.", + "enum": [ + "MUTABLE", + "IMMUTABLE" + ] + }, + "ImageScanningConfiguration": { + "$ref": "#/definitions/ImageScanningConfiguration" + }, + "EncryptionConfiguration": { + "$ref": "#/definitions/EncryptionConfiguration" + } + }, + "createOnlyProperties": [ + "/properties/RepositoryName", + "/properties/EncryptionConfiguration", + "/properties/EncryptionConfiguration/EncryptionType", + "/properties/EncryptionConfiguration/KmsKey" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/RepositoryUri" + ], + "primaryIdentifier": [ + "/properties/RepositoryName" + ], + "handlers": { + "create": { + "permissions": [ + "ecr:CreateRepository", + "ecr:PutLifecyclePolicy", + "ecr:SetRepositoryPolicy", + "ecr:TagResource", + "kms:DescribeKey", + "kms:CreateGrant", + "kms:RetireGrant" + ] + }, + "read": { + "permissions": [ + "ecr:DescribeRepositories", + "ecr:GetLifecyclePolicy", + "ecr:GetRepositoryPolicy", + "ecr:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "ecr:PutLifecyclePolicy", + "ecr:SetRepositoryPolicy", + "ecr:TagResource", + "ecr:UntagResource", + "ecr:DeleteLifecyclePolicy", + "ecr:DeleteRepositoryPolicy", + "ecr:PutImageScanningConfiguration", + "ecr:PutImageTagMutability", + "kms:DescribeKey", + "kms:CreateGrant", + "kms:RetireGrant" + ] + }, + "delete": { + "permissions": [ + "ecr:DeleteRepository", + "kms:RetireGrant" + ] + }, + "list": { + "permissions": [ + "ecr:DescribeRepositories" + ] + } + }, + "additionalProperties": false +} diff --git a/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository_plugin.py b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository_plugin.py new file mode 100644 index 0000000000000..7d7ba440a668d --- /dev/null +++ b/localstack-core/localstack/services/ecr/resource_providers/aws_ecr_repository_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ECRRepositoryProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ECR::Repository" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ecr.resource_providers.aws_ecr_repository import ( + ECRRepositoryProvider, + ) + + self.factory = ECRRepositoryProvider diff --git a/localstack-core/localstack/services/edge.py b/localstack-core/localstack/services/edge.py new file mode 100644 index 0000000000000..5c3ede66b65e5 --- /dev/null +++ b/localstack-core/localstack/services/edge.py @@ -0,0 +1,213 @@ +import argparse +import logging +import shlex +import subprocess +import sys +from typing import List, Optional, TypeVar + +from localstack import config, constants +from localstack.config import HostAndPort +from localstack.constants import ( + LOCALSTACK_ROOT_FOLDER, +) +from localstack.http import Router +from localstack.http.dispatcher import Handler, handler_dispatcher +from localstack.http.router import GreedyPathConverter +from localstack.utils.collections import split_list_by +from localstack.utils.net import get_free_tcp_port +from localstack.utils.run import is_root, run +from localstack.utils.server.tcp_proxy import TCPProxy +from localstack.utils.threads import start_thread + +T = TypeVar("T") + +LOG = logging.getLogger(__name__) + + +ROUTER: Router[Handler] = Router( + dispatcher=handler_dispatcher(), converters={"greedy_path": GreedyPathConverter} +) +"""This special Router is part of the edge proxy. Use the router to inject custom handlers that are handled before +the actual AWS service call is made.""" + + +def do_start_edge( + listen: HostAndPort | List[HostAndPort], use_ssl: bool, asynchronous: bool = False +): + from localstack.aws.serving.edge import serve_gateway + + return serve_gateway(listen, use_ssl, asynchronous) + + +def can_use_sudo(): + try: + run("sudo -n -v", print_error=False) + return True + except Exception: + return False + + +def ensure_can_use_sudo(): + if not is_root() and not can_use_sudo(): + if not sys.stdin.isatty(): + raise IOError("cannot get sudo password from non-tty input") + print("Please enter your sudo password (required to configure local network):") + run("sudo -v", stdin=True) + + +def start_component( + component: str, listen_str: str | None = None, target_address: str | None = None +): + if component == "edge": + return start_edge(listen_str=listen_str) + if component == "proxy": + if target_address is None: + raise ValueError("no target address specified") + + return start_proxy( + listen_str=listen_str, + target_address=HostAndPort.parse( + target_address, + default_host=config.default_ip, + default_port=constants.DEFAULT_PORT_EDGE, + ), + ) + raise Exception("Unexpected component name '%s' received during start up" % component) + + +def start_proxy( + listen_str: str, target_address: HostAndPort, asynchronous: bool = False +) -> TCPProxy: + """ + Starts a TCP proxy to perform a low-level forwarding of incoming requests. + + :param listen_str: address to listen on + :param target_address: target address to proxy requests to + :param asynchronous: False if the function should join the proxy thread and block until it terminates. + :return: created thread executing the proxy + """ + listen_hosts = parse_gateway_listen( + listen_str, + default_host=constants.LOCALHOST_IP, + default_port=constants.DEFAULT_PORT_EDGE, + ) + listen = listen_hosts[0] + return do_start_tcp_proxy(listen, target_address, asynchronous) + + +def do_start_tcp_proxy( + listen: HostAndPort, target_address: HostAndPort, asynchronous: bool = False +) -> TCPProxy: + src = str(listen) + dst = str(target_address) + + LOG.debug("Starting Local TCP Proxy: %s -> %s", src, dst) + proxy = TCPProxy( + target_address=target_address.host, + target_port=target_address.port, + host=listen.host, + port=listen.port, + ) + proxy.start() + if not asynchronous: + proxy.join() + return proxy + + +def start_edge(listen_str: str, use_ssl: bool = True, asynchronous: bool = False): + if listen_str: + listen = parse_gateway_listen( + listen_str, default_host=config.default_ip, default_port=constants.DEFAULT_PORT_EDGE + ) + else: + listen = config.GATEWAY_LISTEN + + if len(listen) == 0: + raise ValueError("no listen addresses provided") + + # separate privileged and unprivileged addresses + unprivileged, privileged = split_list_by(listen, lambda addr: addr.is_unprivileged() or False) + + # if we are root, we can directly bind to privileged ports as well + if is_root(): + unprivileged = unprivileged + privileged + privileged = [] + + # check that we are actually started the gateway server + if not unprivileged: + unprivileged = parse_gateway_listen( + f":{get_free_tcp_port()}", + default_host=config.default_ip, + default_port=constants.DEFAULT_PORT_EDGE, + ) + + # bind the gateway server to unprivileged addresses + edge_thread = do_start_edge(unprivileged, use_ssl=use_ssl, asynchronous=True) + + # start TCP proxies for the remaining addresses + proxy_destination = unprivileged[0] + for address in privileged: + # escalate to root + args = [ + "proxy", + "--gateway-listen", + str(address), + "--target-address", + str(proxy_destination), + ] + run_module_as_sudo( + module="localstack.services.edge", + arguments=args, + asynchronous=True, + ) + + if edge_thread is not None: + edge_thread.join() + + +def run_module_as_sudo( + module: str, arguments: Optional[List[str]] = None, asynchronous=False, env_vars=None +): + # prepare environment + env_vars = env_vars or {} + env_vars["PYTHONPATH"] = f".:{LOCALSTACK_ROOT_FOLDER}" + + # start the process as sudo + python_cmd = sys.executable + cmd = ["sudo", "-n", "--preserve-env", python_cmd, "-m", module] + arguments = arguments or [] + shell_cmd = shlex.join(cmd + arguments) + + # make sure we can run sudo commands + try: + ensure_can_use_sudo() + except Exception as e: + LOG.error("cannot run command as root (%s): %s ", str(e), shell_cmd) + return + + def run_command(*_): + run(shell_cmd, outfile=subprocess.PIPE, print_error=False, env_vars=env_vars) + + LOG.debug("Running command as sudo: %s", shell_cmd) + result = ( + start_thread(run_command, quiet=True, name="sudo-edge") if asynchronous else run_command() + ) + return result + + +def parse_gateway_listen(listen: str, default_host: str, default_port: int) -> List[HostAndPort]: + addresses = [] + for address in listen.split(","): + addresses.append(HostAndPort.parse(address, default_host, default_port)) + return addresses + + +if __name__ == "__main__": + logging.basicConfig() + parser = argparse.ArgumentParser() + parser.add_argument("component") + parser.add_argument("-l", "--gateway-listen", required=False, type=str) + parser.add_argument("-t", "--target-address", required=False, type=str) + args = parser.parse_args() + + start_component(args.component, args.gateway_listen, args.target_address) diff --git a/localstack/services/stepfunctions/asl/component/__init__.py b/localstack-core/localstack/services/es/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/__init__.py rename to localstack-core/localstack/services/es/__init__.py diff --git a/localstack/services/es/plugins.py b/localstack-core/localstack/services/es/plugins.py similarity index 100% rename from localstack/services/es/plugins.py rename to localstack-core/localstack/services/es/plugins.py diff --git a/localstack-core/localstack/services/es/provider.py b/localstack-core/localstack/services/es/provider.py new file mode 100644 index 0000000000000..4519e417bceaa --- /dev/null +++ b/localstack-core/localstack/services/es/provider.py @@ -0,0 +1,441 @@ +from contextlib import contextmanager +from typing import Dict, Optional, cast + +from botocore.exceptions import ClientError + +from localstack import constants +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.es import ( + ARN, + AccessDeniedException, + AdvancedOptions, + AdvancedSecurityOptionsInput, + AutoTuneOptionsInput, + CognitoOptions, + CompatibleElasticsearchVersionsList, + CompatibleVersionsMap, + ConflictException, + CreateElasticsearchDomainResponse, + DeleteElasticsearchDomainResponse, + DescribeElasticsearchDomainConfigResponse, + DescribeElasticsearchDomainResponse, + DescribeElasticsearchDomainsResponse, + DisabledOperationException, + DomainEndpointOptions, + DomainInfoList, + DomainName, + DomainNameList, + EBSOptions, + ElasticsearchClusterConfig, + ElasticsearchClusterConfigStatus, + ElasticsearchDomainConfig, + ElasticsearchDomainStatus, + ElasticsearchVersionStatus, + ElasticsearchVersionString, + EncryptionAtRestOptions, + EngineType, + EsApi, + GetCompatibleElasticsearchVersionsResponse, + InternalException, + InvalidPaginationTokenException, + InvalidTypeException, + LimitExceededException, + ListDomainNamesResponse, + ListElasticsearchVersionsResponse, + ListTagsResponse, + LogPublishingOptions, + MaxResults, + NextToken, + NodeToNodeEncryptionOptions, + OptionStatus, + PolicyDocument, + ResourceAlreadyExistsException, + ResourceNotFoundException, + SnapshotOptions, + StringList, + TagList, + UpdateElasticsearchDomainConfigRequest, + UpdateElasticsearchDomainConfigResponse, + ValidationException, + VPCOptions, +) +from localstack.aws.api.es import BaseException as EsBaseException +from localstack.aws.api.opensearch import ( + ClusterConfig, + CompatibleVersionsList, + DomainConfig, + DomainStatus, + VersionString, +) +from localstack.aws.connect import connect_to + + +def _version_to_opensearch( + version: Optional[ElasticsearchVersionString], +) -> Optional[VersionString]: + if version is not None: + if version.startswith("OpenSearch_"): + return version + else: + return f"Elasticsearch_{version}" + + +def _version_from_opensearch( + version: Optional[VersionString], +) -> Optional[ElasticsearchVersionString]: + if version is not None: + if version.startswith("Elasticsearch_"): + return version.split("_")[1] + else: + return version + + +def _instancetype_to_opensearch(instance_type: Optional[str]) -> Optional[str]: + if instance_type is not None: + return instance_type.replace("elasticsearch", "search") + + +def _instancetype_from_opensearch(instance_type: Optional[str]) -> Optional[str]: + if instance_type is not None: + return instance_type.replace("search", "elasticsearch") + + +def _clusterconfig_from_opensearch( + cluster_config: Optional[ClusterConfig], +) -> Optional[ElasticsearchClusterConfig]: + if cluster_config is not None: + # Just take the whole typed dict and typecast it to our target type + result = cast(ElasticsearchClusterConfig, cluster_config) + + # Adjust the instance type names + result["InstanceType"] = _instancetype_from_opensearch(cluster_config.get("InstanceType")) + result["DedicatedMasterType"] = _instancetype_from_opensearch( + cluster_config.get("DedicatedMasterType") + ) + result["WarmType"] = _instancetype_from_opensearch(cluster_config.get("WarmType")) + return result + + +def _domainstatus_from_opensearch( + domain_status: Optional[DomainStatus], +) -> Optional[ElasticsearchDomainStatus]: + if domain_status is not None: + # Just take the whole typed dict and typecast it to our target type + result = cast(ElasticsearchDomainStatus, domain_status) + # Only specifically handle keys which are named differently or their values differ (version and clusterconfig) + result["ElasticsearchVersion"] = _version_from_opensearch( + domain_status.get("EngineVersion") + ) + result["ElasticsearchClusterConfig"] = _clusterconfig_from_opensearch( + domain_status.get("ClusterConfig") + ) + result.pop("EngineVersion", None) + result.pop("ClusterConfig", None) + return result + + +def _clusterconfig_to_opensearch( + elasticsearch_cluster_config: Optional[ElasticsearchClusterConfig], +) -> Optional[ClusterConfig]: + if elasticsearch_cluster_config is not None: + result = cast(ClusterConfig, elasticsearch_cluster_config) + if instance_type := result.get("InstanceType"): + result["InstanceType"] = _instancetype_to_opensearch(instance_type) + if dedicated_master_type := result.get("DedicatedMasterType"): + result["DedicatedMasterType"] = _instancetype_to_opensearch(dedicated_master_type) + if warm_type := result.get("WarmType"): + result["WarmType"] = _instancetype_to_opensearch(warm_type) + return result + + +def _domainconfig_from_opensearch( + domain_config: Optional[DomainConfig], +) -> Optional[ElasticsearchDomainConfig]: + if domain_config is not None: + result = cast(ElasticsearchDomainConfig, domain_config) + engine_version = domain_config.get("EngineVersion", {}) + result["ElasticsearchVersion"] = ElasticsearchVersionStatus( + Options=_version_from_opensearch(engine_version.get("Options")), + Status=cast(OptionStatus, engine_version.get("Status")), + ) + cluster_config = domain_config.get("ClusterConfig", {}) + result["ElasticsearchClusterConfig"] = ElasticsearchClusterConfigStatus( + Options=_clusterconfig_from_opensearch(cluster_config.get("Options")), + Status=cluster_config.get("Status"), + ) + result.pop("EngineVersion", None) + result.pop("ClusterConfig", None) + return result + + +def _compatible_version_list_from_opensearch( + compatible_version_list: Optional[CompatibleVersionsList], +) -> Optional[CompatibleElasticsearchVersionsList]: + if compatible_version_list is not None: + return [ + CompatibleVersionsMap( + SourceVersion=_version_from_opensearch(version_map["SourceVersion"]), + TargetVersions=[ + _version_from_opensearch(target_version) + for target_version in version_map["TargetVersions"] + ], + ) + for version_map in compatible_version_list + ] + + +@contextmanager +def exception_mapper(): + """Maps an exception thrown by the OpenSearch client to an exception thrown by the ElasticSearch API.""" + try: + yield + except ClientError as err: + exception_types = { + "AccessDeniedException": AccessDeniedException, + "BaseException": EsBaseException, + "ConflictException": ConflictException, + "DisabledOperationException": DisabledOperationException, + "InternalException": InternalException, + "InvalidPaginationTokenException": InvalidPaginationTokenException, + "InvalidTypeException": InvalidTypeException, + "LimitExceededException": LimitExceededException, + "ResourceAlreadyExistsException": ResourceAlreadyExistsException, + "ResourceNotFoundException": ResourceNotFoundException, + "ValidationException": ValidationException, + } + mapped_exception_type = exception_types.get(err.response["Error"]["Code"], EsBaseException) + raise mapped_exception_type(err.response["Error"]["Message"]) + + +class EsProvider(EsApi): + def create_elasticsearch_domain( + self, + context: RequestContext, + domain_name: DomainName, + elasticsearch_version: ElasticsearchVersionString = None, + elasticsearch_cluster_config: ElasticsearchClusterConfig = None, + ebs_options: EBSOptions = None, + access_policies: PolicyDocument = None, + snapshot_options: SnapshotOptions = None, + vpc_options: VPCOptions = None, + cognito_options: CognitoOptions = None, + encryption_at_rest_options: EncryptionAtRestOptions = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions = None, + advanced_options: AdvancedOptions = None, + log_publishing_options: LogPublishingOptions = None, + domain_endpoint_options: DomainEndpointOptions = None, + advanced_security_options: AdvancedSecurityOptionsInput = None, + auto_tune_options: AutoTuneOptionsInput = None, + tag_list: TagList = None, + **kwargs, + ) -> CreateElasticsearchDomainResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + # If no version is given, we set our default elasticsearch version + engine_version = ( + _version_to_opensearch(elasticsearch_version) + if elasticsearch_version + else constants.ELASTICSEARCH_DEFAULT_VERSION + ) + kwargs = { + "DomainName": domain_name, + "EngineVersion": engine_version, + "ClusterConfig": _clusterconfig_to_opensearch(elasticsearch_cluster_config), + "EBSOptions": ebs_options, + "AccessPolicies": access_policies, + "SnapshotOptions": snapshot_options, + "VPCOptions": vpc_options, + "CognitoOptions": cognito_options, + "EncryptionAtRestOptions": encryption_at_rest_options, + "NodeToNodeEncryptionOptions": node_to_node_encryption_options, + "AdvancedOptions": advanced_options, + "LogPublishingOptions": log_publishing_options, + "DomainEndpointOptions": domain_endpoint_options, + "AdvancedSecurityOptions": advanced_security_options, + "AutoTuneOptions": auto_tune_options, + "TagList": tag_list, + } + + # Filter the kwargs to not set None values at all (boto doesn't like that) + kwargs = {key: value for key, value in kwargs.items() if value is not None} + + with exception_mapper(): + domain_status = opensearch_client.create_domain(**kwargs)["DomainStatus"] + + status = _domainstatus_from_opensearch(domain_status) + return CreateElasticsearchDomainResponse(DomainStatus=status) + + def delete_elasticsearch_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DeleteElasticsearchDomainResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + domain_status = opensearch_client.delete_domain( + DomainName=domain_name, + )["DomainStatus"] + + status = _domainstatus_from_opensearch(domain_status) + return DeleteElasticsearchDomainResponse(DomainStatus=status) + + def describe_elasticsearch_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeElasticsearchDomainResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + opensearch_status = opensearch_client.describe_domain( + DomainName=domain_name, + )["DomainStatus"] + + status = _domainstatus_from_opensearch(opensearch_status) + return DescribeElasticsearchDomainResponse(DomainStatus=status) + + @handler("UpdateElasticsearchDomainConfig", expand=False) + def update_elasticsearch_domain_config( + self, context: RequestContext, payload: UpdateElasticsearchDomainConfigRequest + ) -> UpdateElasticsearchDomainConfigResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + payload: Dict + if "ElasticsearchClusterConfig" in payload: + payload["ClusterConfig"] = payload["ElasticsearchClusterConfig"] + payload["ClusterConfig"]["InstanceType"] = _instancetype_to_opensearch( + payload["ClusterConfig"]["InstanceType"] + ) + payload.pop("ElasticsearchClusterConfig") + + with exception_mapper(): + opensearch_config = opensearch_client.update_domain_config(**payload)["DomainConfig"] + + config = _domainconfig_from_opensearch(opensearch_config) + return UpdateElasticsearchDomainConfigResponse(DomainConfig=config) + + def describe_elasticsearch_domains( + self, context: RequestContext, domain_names: DomainNameList, **kwargs + ) -> DescribeElasticsearchDomainsResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + opensearch_status_list = opensearch_client.describe_domains( + DomainNames=domain_names, + )["DomainStatusList"] + + status_list = [_domainstatus_from_opensearch(s) for s in opensearch_status_list] + return DescribeElasticsearchDomainsResponse(DomainStatusList=status_list) + + def list_domain_names( + self, context: RequestContext, engine_type: EngineType = None, **kwargs + ) -> ListDomainNamesResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + # Only hand the EngineType param to boto if it's set + kwargs = {} + if engine_type: + kwargs["EngineType"] = engine_type + + with exception_mapper(): + domain_names = opensearch_client.list_domain_names(**kwargs)["DomainNames"] + + return ListDomainNamesResponse(DomainNames=cast(Optional[DomainInfoList], domain_names)) + + def list_elasticsearch_versions( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListElasticsearchVersionsResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + # Construct the arguments as kwargs to not set None values at all (boto doesn't like that) + kwargs = { + key: value + for key, value in {"MaxResults": max_results, "NextToken": next_token}.items() + if value is not None + } + with exception_mapper(): + versions = opensearch_client.list_versions(**kwargs) + + return ListElasticsearchVersionsResponse( + ElasticsearchVersions=[ + _version_from_opensearch(version) for version in versions["Versions"] + ], + NextToken=versions.get(next_token), + ) + + def get_compatible_elasticsearch_versions( + self, context: RequestContext, domain_name: DomainName = None, **kwargs + ) -> GetCompatibleElasticsearchVersionsResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + # Only hand the DomainName param to boto if it's set + kwargs = {} + if domain_name: + kwargs["DomainName"] = domain_name + + with exception_mapper(): + compatible_versions_response = opensearch_client.get_compatible_versions(**kwargs) + + compatible_versions = compatible_versions_response.get("CompatibleVersions") + return GetCompatibleElasticsearchVersionsResponse( + CompatibleElasticsearchVersions=_compatible_version_list_from_opensearch( + compatible_versions + ) + ) + + def describe_elasticsearch_domain_config( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeElasticsearchDomainConfigResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + domain_config = opensearch_client.describe_domain_config(DomainName=domain_name).get( + "DomainConfig" + ) + + return DescribeElasticsearchDomainConfigResponse( + DomainConfig=_domainconfig_from_opensearch(domain_config) + ) + + def add_tags(self, context: RequestContext, arn: ARN, tag_list: TagList, **kwargs) -> None: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + opensearch_client.add_tags(ARN=arn, TagList=tag_list) + + def list_tags(self, context: RequestContext, arn: ARN, **kwargs) -> ListTagsResponse: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + response = opensearch_client.list_tags(ARN=arn) + + return ListTagsResponse(TagList=response.get("TagList")) + + def remove_tags( + self, context: RequestContext, arn: ARN, tag_keys: StringList, **kwargs + ) -> None: + opensearch_client = connect_to( + region_name=context.region, aws_access_key_id=context.account_id + ).opensearch + + with exception_mapper(): + opensearch_client.remove_tags(ARN=arn, TagKeys=tag_keys) diff --git a/localstack/services/stepfunctions/asl/component/common/__init__.py b/localstack-core/localstack/services/events/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/__init__.py rename to localstack-core/localstack/services/events/__init__.py diff --git a/localstack-core/localstack/services/events/analytics.py b/localstack-core/localstack/services/events/analytics.py new file mode 100644 index 0000000000000..f47924d04fdb4 --- /dev/null +++ b/localstack-core/localstack/services/events/analytics.py @@ -0,0 +1,14 @@ +from enum import StrEnum + +from localstack.utils.analytics.metrics import Counter + + +class InvocationStatus(StrEnum): + success = "success" + error = "error" + + +# number of EventBridge rule invocations per target (e.g., aws:lambda) +# - status label can be `success` or `error`, see InvocationStatus +# - service label is the target service name +rule_invocation = Counter(namespace="events", name="rule_invocations", labels=["status", "service"]) diff --git a/localstack-core/localstack/services/events/api_destination.py b/localstack-core/localstack/services/events/api_destination.py new file mode 100644 index 0000000000000..0bb9f097ffb4b --- /dev/null +++ b/localstack-core/localstack/services/events/api_destination.py @@ -0,0 +1,308 @@ +import base64 +import json +import logging +import re + +import requests + +from localstack.aws.api.events import ( + ApiDestinationDescription, + ApiDestinationHttpMethod, + ApiDestinationInvocationRateLimitPerSecond, + ApiDestinationName, + ApiDestinationState, + Arn, + ConnectionArn, + ConnectionAuthorizationType, + ConnectionState, + HttpsEndpoint, + Timestamp, +) +from localstack.aws.connect import connect_to +from localstack.services.events.models import ApiDestination, Connection, ValidationException +from localstack.utils.aws.arns import ( + extract_account_id_from_arn, + extract_region_from_arn, + parse_arn, +) +from localstack.utils.aws.message_forwarding import ( + list_of_parameters_to_object, +) +from localstack.utils.http import add_query_params_to_url +from localstack.utils.strings import to_str + +VALID_AUTH_TYPES = [t.value for t in ConnectionAuthorizationType] +LOG = logging.getLogger(__name__) + + +class APIDestinationService: + def __init__( + self, + name: ApiDestinationName, + region: str, + account_id: str, + connection_arn: ConnectionArn, + connection: Connection, + invocation_endpoint: HttpsEndpoint, + http_method: ApiDestinationHttpMethod, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond | None, + description: ApiDestinationDescription | None = None, + ): + self.validate_input(name, connection_arn, http_method, invocation_endpoint) + self.connection = connection + state = self._get_state() + + self.api_destination = ApiDestination( + name, + region, + account_id, + connection_arn, + invocation_endpoint, + http_method, + state, + invocation_rate_limit_per_second, + description, + ) + + @classmethod + def restore_from_api_destination_and_connection( + cls, api_destination: ApiDestination, connection: Connection + ): + api_destination_service = cls( + name=api_destination.name, + region=api_destination.region, + account_id=api_destination.account_id, + connection_arn=api_destination.connection_arn, + connection=connection, + invocation_endpoint=api_destination.invocation_endpoint, + http_method=api_destination.http_method, + invocation_rate_limit_per_second=api_destination.invocation_rate_limit_per_second, + ) + api_destination_service.api_destination = api_destination + return api_destination_service + + @property + def arn(self) -> Arn: + return self.api_destination.arn + + @property + def state(self) -> ApiDestinationState: + return self.api_destination.state + + @property + def creation_time(self) -> Timestamp: + return self.api_destination.creation_time + + @property + def last_modified_time(self) -> Timestamp: + return self.api_destination.last_modified_time + + def set_state(self, state: ApiDestinationState) -> None: + if hasattr(self, "api_destination"): + if state == ApiDestinationState.ACTIVE: + state = self._get_state() + self.api_destination.state = state + + def update( + self, + connection, + invocation_endpoint, + http_method, + invocation_rate_limit_per_second, + description, + ): + self.set_state(ApiDestinationState.INACTIVE) + self.connection = connection + self.api_destination.connection_arn = connection.arn + if invocation_endpoint: + self.api_destination.invocation_endpoint = invocation_endpoint + if http_method: + self.api_destination.http_method = http_method + if invocation_rate_limit_per_second: + self.api_destination.invocation_rate_limit_per_second = invocation_rate_limit_per_second + if description: + self.api_destination.description = description + self.api_destination.last_modified_time = Timestamp.now() + self.set_state(ApiDestinationState.ACTIVE) + + def _get_state(self) -> ApiDestinationState: + """Determine ApiDestinationState based on ConnectionState.""" + return ( + ApiDestinationState.ACTIVE + if self.connection.state == ConnectionState.AUTHORIZED + else ApiDestinationState.INACTIVE + ) + + @classmethod + def validate_input( + cls, + name: ApiDestinationName, + connection_arn: ConnectionArn, + http_method: ApiDestinationHttpMethod, + invocation_endpoint: HttpsEndpoint, + ) -> None: + errors = [] + errors.extend(cls._validate_api_destination_name(name)) + errors.extend(cls._validate_connection_arn(connection_arn)) + errors.extend(cls._validate_http_method(http_method)) + errors.extend(cls._validate_invocation_endpoint(invocation_endpoint)) + + if errors: + error_message = ( + f"{len(errors)} validation error{'s' if len(errors) > 1 else ''} detected: " + ) + error_message += "; ".join(errors) + raise ValidationException(error_message) + + @staticmethod + def _validate_api_destination_name(name: str) -> list[str]: + """Validate the API destination name according to AWS rules. Returns a list of validation errors.""" + errors = [] + if not re.match(r"^[\.\-_A-Za-z0-9]+$", name): + errors.append( + f"Value '{name}' at 'name' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+" + ) + if not (1 <= len(name) <= 64): + errors.append( + f"Value '{name}' at 'name' failed to satisfy constraint: " + "Member must have length less than or equal to 64" + ) + return errors + + @staticmethod + def _validate_connection_arn(connection_arn: ConnectionArn) -> list[str]: + errors = [] + if not re.match( + r"^arn:aws([a-z]|\-)*:events:[a-z0-9\-]+:\d{12}:connection/[\.\-_A-Za-z0-9]+/[\-A-Za-z0-9]+$", + connection_arn, + ): + errors.append( + f"Value '{connection_arn}' at 'connectionArn' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:connection\\/[\\.\\-_A-Za-z0-9]+\\/[\\-A-Za-z0-9]+$" + ) + return errors + + @staticmethod + def _validate_http_method(http_method: ApiDestinationHttpMethod) -> list[str]: + errors = [] + allowed_methods = ["HEAD", "POST", "PATCH", "DELETE", "PUT", "GET", "OPTIONS"] + if http_method not in allowed_methods: + errors.append( + f"Value '{http_method}' at 'httpMethod' failed to satisfy constraint: " + f"Member must satisfy enum value set: [{', '.join(allowed_methods)}]" + ) + return errors + + @staticmethod + def _validate_invocation_endpoint(invocation_endpoint: HttpsEndpoint) -> list[str]: + errors = [] + endpoint_pattern = r"^((%[0-9A-Fa-f]{2}|[-()_.!~*';/?:@&=+$,A-Za-z0-9])+)([).!';/?:,])?$" + if not re.match(endpoint_pattern, invocation_endpoint): + errors.append( + f"Value '{invocation_endpoint}' at 'invocationEndpoint' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "^((%[0-9A-Fa-f]{2}|[-()_.!~*';/?:@&=+$,A-Za-z0-9])+)([).!';/?:,])?$" + ) + return errors + + +ApiDestinationServiceDict = dict[Arn, APIDestinationService] + + +def add_api_destination_authorization(destination, headers, event): + connection_arn = destination.get("ConnectionArn", "") + connection_name = re.search(r"connection\/([a-zA-Z0-9-_]+)\/", connection_arn).group(1) + + account_id = extract_account_id_from_arn(connection_arn) + region = extract_region_from_arn(connection_arn) + + events_client = connect_to(aws_access_key_id=account_id, region_name=region).events + connection_details = events_client.describe_connection(Name=connection_name) + secret_arn = connection_details["SecretArn"] + parsed_arn = parse_arn(secret_arn) + secretsmanager_client = connect_to( + aws_access_key_id=parsed_arn["account"], region_name=parsed_arn["region"] + ).secretsmanager + auth_secret = json.loads( + secretsmanager_client.get_secret_value(SecretId=secret_arn)["SecretString"] + ) + + headers.update(_auth_keys_from_connection(connection_details, auth_secret)) + + auth_parameters = connection_details.get("AuthParameters", {}) + invocation_parameters = auth_parameters.get("InvocationHttpParameters") + + endpoint = destination.get("InvocationEndpoint") + if invocation_parameters: + header_parameters = list_of_parameters_to_object( + invocation_parameters.get("HeaderParameters", []) + ) + headers.update(header_parameters) + + body_parameters = list_of_parameters_to_object( + invocation_parameters.get("BodyParameters", []) + ) + event.update(body_parameters) + + query_parameters = invocation_parameters.get("QueryStringParameters", []) + query_object = list_of_parameters_to_object(query_parameters) + endpoint = add_query_params_to_url(endpoint, query_object) + + return endpoint + + +def _auth_keys_from_connection(connection_details, auth_secret): + headers = {} + + auth_type = connection_details.get("AuthorizationType").upper() + auth_parameters = connection_details.get("AuthParameters") + match auth_type: + case ConnectionAuthorizationType.BASIC: + username = auth_secret.get("username", "") + password = auth_secret.get("password", "") + auth = "Basic " + to_str(base64.b64encode(f"{username}:{password}".encode("ascii"))) + headers.update({"authorization": auth}) + + case ConnectionAuthorizationType.API_KEY: + api_key_name = auth_secret.get("api_key_name", "") + api_key_value = auth_secret.get("api_key_value", "") + headers.update({api_key_name: api_key_value}) + + case ConnectionAuthorizationType.OAUTH_CLIENT_CREDENTIALS: + oauth_parameters = auth_parameters.get("OAuthParameters", {}) + oauth_method = auth_secret.get("http_method") + + oauth_http_parameters = oauth_parameters.get("OAuthHttpParameters", {}) + oauth_endpoint = auth_secret.get("authorization_endpoint", "") + query_object = list_of_parameters_to_object( + oauth_http_parameters.get("QueryStringParameters", []) + ) + oauth_endpoint = add_query_params_to_url(oauth_endpoint, query_object) + + client_id = auth_secret.get("client_id", "") + client_secret = auth_secret.get("client_secret", "") + + oauth_body = list_of_parameters_to_object( + oauth_http_parameters.get("BodyParameters", []) + ) + oauth_body.update({"client_id": client_id, "client_secret": client_secret}) + + oauth_header = list_of_parameters_to_object( + oauth_http_parameters.get("HeaderParameters", []) + ) + oauth_result = requests.request( + method=oauth_method, + url=oauth_endpoint, + data=json.dumps(oauth_body), + headers=oauth_header, + ) + oauth_data = json.loads(oauth_result.text) + + token_type = oauth_data.get("token_type", "") + access_token = oauth_data.get("access_token", "") + auth_header = f"{token_type} {access_token}" + headers.update({"authorization": auth_header}) + + return headers diff --git a/localstack-core/localstack/services/events/archive.py b/localstack-core/localstack/services/events/archive.py new file mode 100644 index 0000000000000..12d7e4601747f --- /dev/null +++ b/localstack-core/localstack/services/events/archive.py @@ -0,0 +1,189 @@ +import json +import logging +from datetime import datetime, timezone +from typing import Self + +from botocore.client import BaseClient + +from localstack.aws.api.events import ( + ArchiveState, + Arn, + EventBusName, + TargetId, + Timestamp, +) +from localstack.aws.connect import connect_to +from localstack.services.events.models import ( + Archive, + ArchiveDescription, + ArchiveName, + EventPattern, + FormattedEvent, + FormattedEventList, + RetentionDays, + RuleName, +) +from localstack.services.events.utils import extract_event_bus_name +from localstack.utils.aws.client_types import ServicePrincipal + +LOG = logging.getLogger(__name__) + + +class ArchiveService: + archive_name: ArchiveName + region: str + account_id: str + event_source_arn: Arn + description: ArchiveDescription + event_pattern: EventPattern + retention_days: RetentionDays + archive: Archive + client: BaseClient + event_bus_name: EventBusName + rule_name: RuleName + target_id: TargetId + + def __init__(self, archive: Archive): + self.archive = archive + self.set_state(ArchiveState.CREATING) + self.set_creation_time() + self.client: BaseClient = self._initialize_client() + self.event_bus_name: EventBusName = extract_event_bus_name(archive.event_source_arn) + self.set_state(ArchiveState.ENABLED) + self.rule_name = f"Events-Archive-{self.archive_name}" + self.target_id = f"Events-Archive-{self.archive_name}" + + @classmethod + def create_archive_service( + cls, + archive_name: ArchiveName, + region: str, + account_id: str, + event_source_arn: Arn, + description: ArchiveDescription, + event_pattern: EventPattern, + retention_days: RetentionDays, + ) -> Self: + return cls( + Archive( + archive_name, + region, + account_id, + event_source_arn, + description, + event_pattern, + retention_days, + ) + ) + + def register_archive_rule_and_targets(self): + self._create_archive_rule() + self._create_archive_target() + + def __getattr__(self, name): + return getattr(self.archive, name) + + @property + def archive_name(self) -> ArchiveName: + return self.archive.name + + @property + def archive_arn(self) -> Arn: + return self.archive.arn + + def set_state(self, state: ArchiveState) -> None: + self.archive.state = state + + def set_creation_time(self) -> None: + self.archive.creation_time = datetime.now(timezone.utc) + + def update( + self, + description: ArchiveDescription, + event_pattern: EventPattern, + retention_days: RetentionDays, + ) -> None: + self.set_state(ArchiveState.UPDATING) + if description is not None: + self.archive.description = description + if event_pattern is not None: + self.archive.event_pattern = event_pattern + if retention_days is not None: + self.archive.retention_days = retention_days + self.set_state(ArchiveState.ENABLED) + + def delete(self) -> None: + self.set_state(ArchiveState.DISABLED) + try: + self.client.remove_targets( + Rule=self.rule_name, EventBusName=self.event_bus_name, Ids=[self.target_id] + ) + except Exception as e: + LOG.debug("Target %s could not be removed, %s", self.target_id, e) + try: + self.client.delete_rule(Name=self.rule_name, EventBusName=self.event_bus_name) + except Exception as e: + LOG.debug("Rule %s could not be deleted, %s", self.rule_name, e) + + def put_events(self, events: FormattedEventList) -> None: + for event in events: + self.archive.events[event["id"]] = event + + def get_events(self, start_time: Timestamp, end_time: Timestamp) -> FormattedEventList: + events_to_replay = self._filter_events_start_end_time(start_time, end_time) + return events_to_replay + + def _initialize_client(self) -> BaseClient: + client_factory = connect_to(aws_access_key_id=self.account_id, region_name=self.region) + client = client_factory.get_client("events") + + service_principal = ServicePrincipal.events + client = client.request_metadata(service_principal=service_principal, source_arn=self.arn) + return client + + def _create_archive_rule( + self, + ): + default_event_pattern = { + "replay-name": [{"exists": False}], + } + if self.event_pattern: + updated_event_pattern = json.loads(self.event_pattern) + updated_event_pattern.update(default_event_pattern) + else: + updated_event_pattern = default_event_pattern + self.client.put_rule( + Name=self.rule_name, + EventBusName=self.event_bus_name, + EventPattern=json.dumps(updated_event_pattern), + ) + + def _create_archive_target( + self, + ): + """Creates a target for the archive rule. The target is required for accessing parameters + from the provider during sending of events to the target but it is not invoked + because events are put to the archive directly to not overload the gateway""" + self.client.put_targets( + Rule=self.rule_name, + EventBusName=self.event_bus_name, + Targets=[{"Id": self.target_id, "Arn": self.arn}], + ) + + def _normalize_datetime(self, dt: datetime) -> datetime: + return dt.replace(second=0, microsecond=0) + + def _filter_events_start_end_time( + self, event_start_time: Timestamp, event_end_time: Timestamp + ) -> list[FormattedEvent]: + events = self.archive.events + event_start_time = self._normalize_datetime(event_start_time) + event_end_time = self._normalize_datetime(event_end_time) + return [ + event + for event in events.values() + if event_start_time <= self._normalize_datetime(event["time"]) <= event_end_time + ] + + +ArchiveServiceDict = dict[Arn, ArchiveService] diff --git a/localstack-core/localstack/services/events/connection.py b/localstack-core/localstack/services/events/connection.py new file mode 100644 index 0000000000000..c2b72a2025328 --- /dev/null +++ b/localstack-core/localstack/services/events/connection.py @@ -0,0 +1,344 @@ +import json +import logging +import re +import uuid +from datetime import datetime, timezone + +from localstack.aws.api.events import ( + Arn, + ConnectionAuthorizationType, + ConnectionDescription, + ConnectionName, + ConnectionState, + ConnectivityResourceParameters, + CreateConnectionAuthRequestParameters, + Timestamp, + UpdateConnectionAuthRequestParameters, +) +from localstack.aws.connect import connect_to +from localstack.services.events.models import Connection, ValidationException + +VALID_AUTH_TYPES = [t.value for t in ConnectionAuthorizationType] +LOG = logging.getLogger(__name__) + + +class ConnectionService: + def __init__( + self, + name: ConnectionName, + region: str, + account_id: str, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters, + description: ConnectionDescription | None = None, + invocation_connectivity_parameters: ConnectivityResourceParameters | None = None, + create_secret: bool = True, + ): + self._validate_input(name, authorization_type) + state = self._get_initial_state(authorization_type) + + secret_arn = None + if create_secret: + secret_arn = self.create_connection_secret( + region, account_id, name, authorization_type, auth_parameters + ) + public_auth_parameters = self._get_public_parameters(authorization_type, auth_parameters) + + self.connection = Connection( + name, + region, + account_id, + authorization_type, + public_auth_parameters, + state, + secret_arn, + description, + invocation_connectivity_parameters, + ) + + @classmethod + def restore_from_connection(cls, connection: Connection): + connection_service = cls( + connection.name, + connection.region, + connection.account_id, + connection.authorization_type, + connection.auth_parameters, + create_secret=False, + ) + connection_service.connection = connection + return connection_service + + @property + def arn(self) -> Arn: + return self.connection.arn + + @property + def state(self) -> ConnectionState: + return self.connection.state + + @property + def creation_time(self) -> Timestamp: + return self.connection.creation_time + + @property + def last_modified_time(self) -> Timestamp: + return self.connection.last_modified_time + + @property + def last_authorized_time(self) -> Timestamp: + return self.connection.last_authorized_time + + @property + def secret_arn(self) -> Arn: + return self.connection.secret_arn + + @property + def auth_parameters(self) -> CreateConnectionAuthRequestParameters: + return self.connection.auth_parameters + + def set_state(self, state: ConnectionState) -> None: + if hasattr(self, "connection"): + self.connection.state = state + + def update( + self, + description: ConnectionDescription, + authorization_type: ConnectionAuthorizationType, + auth_parameters: UpdateConnectionAuthRequestParameters, + invocation_connectivity_parameters: ConnectivityResourceParameters | None = None, + ) -> None: + self.set_state(ConnectionState.UPDATING) + if description: + self.connection.description = description + if invocation_connectivity_parameters: + self.connection.invocation_connectivity_parameters = invocation_connectivity_parameters + # Use existing values if not provided in update + if authorization_type: + auth_type = ( + authorization_type.value + if hasattr(authorization_type, "value") + else authorization_type + ) + self._validate_auth_type(auth_type) + else: + auth_type = self.connection.authorization_type + + try: + if self.connection.secret_arn: + self.update_connection_secret( + self.connection.secret_arn, auth_type, auth_parameters + ) + else: + secret_arn = self.create_connection_secret( + self.connection.region, + self.connection.account_id, + self.connection.name, + auth_type, + auth_parameters, + ) + self.connection.secret_arn = secret_arn + self.connection.last_authorized_time = datetime.now(timezone.utc) + + # Set new values + self.connection.authorization_type = auth_type + public_auth_parameters = ( + self._get_public_parameters(authorization_type, auth_parameters) + if auth_parameters + else self.connection.auth_parameters + ) + self.connection.auth_parameters = public_auth_parameters + self.set_state(ConnectionState.AUTHORIZED) + self.connection.last_modified_time = datetime.now(timezone.utc) + + except Exception as error: + LOG.warning( + "Connection with name %s updating failed with errors: %s.", + self.connection.name, + error, + ) + + def delete(self) -> None: + self.set_state(ConnectionState.DELETING) + self.delete_connection_secret(self.connection.secret_arn) + self.set_state(ConnectionState.DELETING) # required for AWS parity + self.connection.last_modified_time = datetime.now(timezone.utc) + + def create_connection_secret( + self, + region: str, + account_id: str, + name: str, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters + | UpdateConnectionAuthRequestParameters, + ) -> Arn | None: + self.set_state(ConnectionState.AUTHORIZING) + secretsmanager_client = connect_to( + aws_access_key_id=account_id, region_name=region + ).secretsmanager + secret_value = self._get_secret_value(authorization_type, auth_parameters) + secret_name = f"events!connection/{name}/{str(uuid.uuid4())}" + try: + secret_arn = secretsmanager_client.create_secret( + Name=secret_name, + SecretString=secret_value, + Tags=[{"Key": "BYPASS_SECRET_ID_VALIDATION", "Value": "1"}], + )["ARN"] + self.set_state(ConnectionState.AUTHORIZED) + return secret_arn + except Exception as error: + LOG.warning("Secret with name %s creation failed with errors: %s.", secret_name, error) + + def update_connection_secret( + self, + secret_arn: str, + authorization_type: ConnectionAuthorizationType, + auth_parameters: UpdateConnectionAuthRequestParameters, + ) -> None: + self.set_state(ConnectionState.AUTHORIZING) + secretsmanager_client = connect_to( + aws_access_key_id=self.connection.account_id, region_name=self.connection.region + ).secretsmanager + secret_value = self._get_secret_value(authorization_type, auth_parameters) + try: + secretsmanager_client.update_secret(SecretId=secret_arn, SecretString=secret_value) + self.set_state(ConnectionState.AUTHORIZED) + self.connection.last_authorized_time = datetime.now(timezone.utc) + except Exception as error: + LOG.warning("Secret with id %s updating failed with errors: %s.", secret_arn, error) + + def delete_connection_secret(self, secret_arn: str) -> None: + self.set_state(ConnectionState.DEAUTHORIZING) + secretsmanager_client = connect_to( + aws_access_key_id=self.connection.account_id, region_name=self.connection.region + ).secretsmanager + try: + secretsmanager_client.delete_secret( + SecretId=secret_arn, ForceDeleteWithoutRecovery=True + ) + self.set_state(ConnectionState.DEAUTHORIZED) + except Exception as error: + LOG.warning("Secret with id %s deleting failed with errors: %s.", secret_arn, error) + + def _get_initial_state(self, auth_type: str) -> ConnectionState: + if auth_type == "OAUTH_CLIENT_CREDENTIALS": + return ConnectionState.AUTHORIZING + return ConnectionState.AUTHORIZED + + def _get_secret_value( + self, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters + | UpdateConnectionAuthRequestParameters, + ) -> str: + result = {} + match authorization_type: + case ConnectionAuthorizationType.BASIC: + params = auth_parameters.get("BasicAuthParameters", {}) + result = {"username": params.get("Username"), "password": params.get("Password")} + case ConnectionAuthorizationType.API_KEY: + params = auth_parameters.get("ApiKeyAuthParameters", {}) + result = { + "api_key_name": params.get("ApiKeyName"), + "api_key_value": params.get("ApiKeyValue"), + } + case ConnectionAuthorizationType.OAUTH_CLIENT_CREDENTIALS: + params = auth_parameters.get("OAuthParameters", {}) + client_params = params.get("ClientParameters", {}) + result = { + "client_id": client_params.get("ClientID"), + "client_secret": client_params.get("ClientSecret"), + "authorization_endpoint": params.get("AuthorizationEndpoint"), + "http_method": params.get("HttpMethod"), + } + + if "InvocationHttpParameters" in auth_parameters: + result["invocation_http_parameters"] = auth_parameters["InvocationHttpParameters"] + + return json.dumps(result) + + def _get_public_parameters( + self, + auth_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters + | UpdateConnectionAuthRequestParameters, + ) -> CreateConnectionAuthRequestParameters: + """Extract public parameters (without secrets) based on auth type.""" + public_params = {} + + if ( + auth_type == ConnectionAuthorizationType.BASIC + and "BasicAuthParameters" in auth_parameters + ): + public_params["BasicAuthParameters"] = { + "Username": auth_parameters["BasicAuthParameters"]["Username"] + } + + elif ( + auth_type == ConnectionAuthorizationType.API_KEY + and "ApiKeyAuthParameters" in auth_parameters + ): + public_params["ApiKeyAuthParameters"] = { + "ApiKeyName": auth_parameters["ApiKeyAuthParameters"]["ApiKeyName"] + } + + elif ( + auth_type == ConnectionAuthorizationType.OAUTH_CLIENT_CREDENTIALS + and "OAuthParameters" in auth_parameters + ): + oauth_params = auth_parameters["OAuthParameters"] + public_params["OAuthParameters"] = { + "AuthorizationEndpoint": oauth_params["AuthorizationEndpoint"], + "HttpMethod": oauth_params["HttpMethod"], + "ClientParameters": {"ClientID": oauth_params["ClientParameters"]["ClientID"]}, + } + if "OAuthHttpParameters" in oauth_params: + public_params["OAuthParameters"]["OAuthHttpParameters"] = oauth_params.get( + "OAuthHttpParameters" + ) + + if "InvocationHttpParameters" in auth_parameters: + public_params["InvocationHttpParameters"] = auth_parameters["InvocationHttpParameters"] + + return public_params + + def _validate_input( + self, + name: ConnectionName, + authorization_type: ConnectionAuthorizationType, + ) -> None: + errors = [] + errors.extend(self._validate_connection_name(name)) + errors.extend(self._validate_auth_type(authorization_type)) + if errors: + error_message = ( + f"{len(errors)} validation error{'s' if len(errors) > 1 else ''} detected: " + ) + error_message += "; ".join(errors) + raise ValidationException(error_message) + + def _validate_connection_name(self, name: str) -> list[str]: + errors = [] + if not re.match("^[\\.\\-_A-Za-z0-9]+$", name): + errors.append( + f"Value '{name}' at 'name' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+" + ) + if not (1 <= len(name) <= 64): + errors.append( + f"Value '{name}' at 'name' failed to satisfy constraint: " + "Member must have length less than or equal to 64" + ) + return errors + + def _validate_auth_type(self, auth_type: str) -> list[str]: + if auth_type not in VALID_AUTH_TYPES: + return [ + f"Value '{auth_type}' at 'authorizationType' failed to satisfy constraint: " + f"Member must satisfy enum value set: [{', '.join(VALID_AUTH_TYPES)}]" + ] + return [] + + +ConnectionServiceDict = dict[Arn, ConnectionService] diff --git a/localstack-core/localstack/services/events/event_bus.py b/localstack-core/localstack/services/events/event_bus.py new file mode 100644 index 0000000000000..1ea6f332a493b --- /dev/null +++ b/localstack-core/localstack/services/events/event_bus.py @@ -0,0 +1,131 @@ +import json +from datetime import datetime, timezone +from typing import Optional, Self + +from localstack.aws.api.events import ( + Action, + Arn, + Condition, + EventBusName, + Principal, + ResourceNotFoundException, + StatementId, + TagList, +) +from localstack.services.events.models import EventBus, ResourcePolicy, RuleDict, Statement +from localstack.utils.aws.arns import get_partition + + +class EventBusService: + name: EventBusName + region: str + account_id: str + event_source_name: str | None + tags: TagList | None + policy: str | None + event_bus: EventBus + + def __init__(self, event_bus: EventBus): + self.event_bus = event_bus + + @classmethod + def create_event_bus_service( + cls, + name: EventBusName, + region: str, + account_id: str, + event_source_name: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[TagList] = None, + policy: Optional[str] = None, + rules: Optional[RuleDict] = None, + ) -> Self: + return cls( + EventBus( + name, + region, + account_id, + event_source_name, + description, + tags, + policy, + rules, + ) + ) + + @property + def arn(self) -> Arn: + return self.event_bus.arn + + def put_permission( + self, + action: Action, + principal: Principal, + statement_id: StatementId, + condition: Condition, + policy: str, + ): + # TODO: cover via test + # if policy and any([action, principal, statement_id, condition]): + # raise ValueError("Combination of policy with other arguments is not allowed") + self.event_bus.last_modified_time = datetime.now(timezone.utc) + if policy: # policy document replaces all existing permissions + policy = json.loads(policy) + parsed_policy = ResourcePolicy(**policy) + self.event_bus.policy = parsed_policy + else: + permission_statement = self._parse_statement( + statement_id, action, principal, self.arn, condition + ) + + if existing_policy := self.event_bus.policy: + if permission_statement["Principal"] == "*": + for statement in existing_policy["Statement"]: + if "*" == statement["Principal"]: + return + existing_policy["Statement"].append(permission_statement) + else: + parsed_policy = ResourcePolicy( + Version="2012-10-17", Statement=[permission_statement] + ) + self.event_bus.policy = parsed_policy + + def revoke_put_events_permission(self, statement_id: str): + policy = self.event_bus.policy + if not policy or not any( + statement.get("Sid") == statement_id for statement in policy["Statement"] + ): + raise ResourceNotFoundException("Statement with the provided id does not exist.") + if policy: + policy["Statement"] = [ + statement + for statement in policy["Statement"] + if statement.get("Sid") != statement_id + ] + self.event_bus.last_modified_time = datetime.now(timezone.utc) + + def _parse_statement( + self, + statement_id: StatementId, + action: Action, + principal: Principal, + resource_arn: Arn, + condition: Condition, + ) -> Statement: + # TODO: cover via test + # if condition and principal != "*": + # raise ValueError("Condition can only be set when principal is '*'") + if principal != "*": + principal = {"AWS": f"arn:{get_partition(self.event_bus.region)}:iam::{principal}:root"} + statement = Statement( + Sid=statement_id, + Effect="Allow", + Principal=principal, + Action=action, + Resource=resource_arn, + Condition=condition, + ) + return statement + + +EventBusServiceDict = dict[Arn, EventBusService] diff --git a/localstack-core/localstack/services/events/event_rule_engine.py b/localstack-core/localstack/services/events/event_rule_engine.py new file mode 100644 index 0000000000000..a1af9a9cdb339 --- /dev/null +++ b/localstack-core/localstack/services/events/event_rule_engine.py @@ -0,0 +1,624 @@ +import ipaddress +import json +import re +import typing as t + +from localstack.aws.api.events import InvalidEventPatternException + + +class EventRuleEngine: + def evaluate_pattern_on_event(self, compiled_event_pattern: dict, event: str | dict): + if isinstance(event, str): + try: + body = json.loads(event) + if not isinstance(body, dict): + return False + except json.JSONDecodeError: + # Event pattern for the message body assume that the message payload is a well-formed JSON object. + return False + else: + body = event + + return self._evaluate_nested_event_pattern_on_dict(compiled_event_pattern, payload=body) + + def _evaluate_nested_event_pattern_on_dict(self, event_pattern, payload: dict) -> bool: + """ + This method evaluates the event pattern against the JSON decoded payload. + Although it's not documented anywhere, AWS allows `.` in the fields name in the event pattern and the payload, + and will evaluate them. However, it's not JSONPath compatible. + See: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-create-pattern.html#eb-create-pattern-considerations + Example: + Pattern: `{"field1.field2": "value1"}` + This pattern will match both `{"field1.field2": "value1"}` and {"field1: {"field2": "value1"}}`, unlike JSONPath + for which `.` points to a child node. + This might show they are flattening the both dictionaries to a single level for an easier matching without + recursion. + :param event_pattern: a dict, starting at the Event Pattern + :param payload: a dict, starting at the MessageBody + :return: True if the payload respect the event pattern, otherwise False + """ + if not event_pattern: + return True + + # TODO: maybe save/cache the flattened/expanded pattern? + flat_pattern_conditions = self.flatten_pattern(event_pattern) + flat_payloads = self.flatten_payload(payload, flat_pattern_conditions) + + return any( + all( + any( + self._evaluate_condition( + flat_payload.get(key), condition, field_exists=key in flat_payload + ) + for condition in conditions + for flat_payload in flat_payloads + ) + for key, conditions in flat_pattern.items() + ) + for flat_pattern in flat_pattern_conditions + ) + + def _evaluate_condition(self, value, condition, field_exists: bool): + if not isinstance(condition, dict): + return field_exists and value == condition + elif (must_exist := condition.get("exists")) is not None: + # if must_exists is True then field_exists must be True + # if must_exists is False then fields_exists must be False + return must_exist == field_exists + elif (anything_but := condition.get("anything-but")) is not None: + if isinstance(anything_but, dict): + if (not_condition := anything_but.get("prefix")) is not None: + predicate = self._evaluate_prefix + elif (not_condition := anything_but.get("suffix")) is not None: + predicate = self._evaluate_suffix + elif (not_condition := anything_but.get("equals-ignore-case")) is not None: + predicate = self._evaluate_equal_ignore_case + elif (not_condition := anything_but.get("wildcard")) is not None: + predicate = self._evaluate_wildcard + else: + # this should not happen as we validate the EventPattern before + return False + + if isinstance(not_condition, str): + return not predicate(not_condition, value) + elif isinstance(not_condition, list): + return all( + not predicate(sub_condition, value) for sub_condition in not_condition + ) + + elif isinstance(anything_but, list): + return value not in anything_but + else: + return value != anything_but + + elif value is None: + # the remaining conditions require the value to not be None + return False + elif (prefix := condition.get("prefix")) is not None: + if isinstance(prefix, dict): + if (prefix_equal_ignore_case := prefix.get("equals-ignore-case")) is not None: + return self._evaluate_prefix(prefix_equal_ignore_case.lower(), value.lower()) + else: + return self._evaluate_prefix(prefix, value) + + elif (suffix := condition.get("suffix")) is not None: + if isinstance(suffix, dict): + if suffix_equal_ignore_case := suffix.get("equals-ignore-case"): + return self._evaluate_suffix(suffix_equal_ignore_case.lower(), value.lower()) + else: + return self._evaluate_suffix(suffix, value) + + elif (equal_ignore_case := condition.get("equals-ignore-case")) is not None: + return self._evaluate_equal_ignore_case(equal_ignore_case, value) + + # we validated that `numeric` should be a non-empty list when creating the rule, we don't need the None check + elif numeric_condition := condition.get("numeric"): + return self._evaluate_numeric_condition(numeric_condition, value) + + # we also validated the `cidr` that it cannot be empty + elif cidr := condition.get("cidr"): + return self._evaluate_cidr(cidr, value) + + elif (wildcard := condition.get("wildcard")) is not None: + return self._evaluate_wildcard(wildcard, value) + + return False + + @staticmethod + def _evaluate_prefix(condition: str | list, value: str) -> bool: + return value.startswith(condition) + + @staticmethod + def _evaluate_suffix(condition: str | list, value: str) -> bool: + return value.endswith(condition) + + @staticmethod + def _evaluate_equal_ignore_case(condition: str, value: str) -> bool: + return condition.lower() == value.lower() + + @staticmethod + def _evaluate_cidr(condition: str, value: str) -> bool: + try: + ip = ipaddress.ip_address(value) + return ip in ipaddress.ip_network(condition) + except ValueError: + return False + + @staticmethod + def _evaluate_wildcard(condition: str, value: str) -> bool: + return bool(re.match(re.escape(condition).replace("\\*", ".+") + "$", value)) + + @staticmethod + def _evaluate_numeric_condition(conditions: list, value: t.Any) -> bool: + if not isinstance(value, (int, float)): + return False + try: + # try if the value is numeric + value = float(value) + except ValueError: + # the value is not numeric, the condition is False + return False + + for i in range(0, len(conditions), 2): + operator = conditions[i] + operand = float(conditions[i + 1]) + + if operator == "=": + if value != operand: + return False + elif operator == ">": + if value <= operand: + return False + elif operator == "<": + if value >= operand: + return False + elif operator == ">=": + if value < operand: + return False + elif operator == "<=": + if value > operand: + return False + + return True + + @staticmethod + def flatten_pattern(nested_dict: dict) -> list[dict]: + """ + Takes a dictionary as input and will output the dictionary on a single level. + Input: + `{"field1": {"field2": {"field3": "val1", "field4": "val2"}}}` + Output: + `[ + { + "field1.field2.field3": "val1", + "field1.field2.field4": "val2" + } + ]` + Input with $or will create multiple outputs: + `{"$or": [{"field1": "val1"}, {"field2": "val2"}], "field3": "val3"}` + Output: + `[ + {"field1": "val1", "field3": "val3"}, + {"field2": "val2", "field3": "val3"} + ]` + :param nested_dict: a (nested) dictionary + :return: a list of flattened dictionaries with no nested dict or list inside, flattened to a + single level, one list item for every list item encountered + """ + + def _traverse_event_pattern(obj, array=None, parent_key=None) -> list: + if array is None: + array = [{}] + + for key, values in obj.items(): + if key == "$or" and isinstance(values, list) and len(values) > 1: + # $or will create multiple new branches in the array. + # Each current branch will traverse with each choice in $or + array = [ + i + for value in values + for i in _traverse_event_pattern(value, array, parent_key) + ] + else: + # We update the parent key do that {"key1": {"key2": ""}} becomes "key1.key2" + _parent_key = f"{parent_key}.{key}" if parent_key else key + if isinstance(values, dict): + # If the current key has child dict -- key: "key1", child: {"key2": ["val1", val2"]} + # We only update the parent_key and traverse its children with the current branches + array = _traverse_event_pattern(values, array, _parent_key) + else: + # If the current key has no child, this means we found the values to match -- child: ["val1", val2"] + # we update the branches with the parent chain and the values -- {"key1.key2": ["val1, val2"]} + array = [{**item, _parent_key: values} for item in array] + + return array + + return _traverse_event_pattern(nested_dict) + + @staticmethod + def flatten_payload(payload: dict, patterns: list[dict]) -> list[dict]: + """ + Takes a dictionary as input and will output the dictionary on a single level. + The dictionary can have lists containing other dictionaries, and one root level entry will be created for every + item in a list if it corresponds to the entries of the patterns. + Input: + payload: + `{"field1": { + "field2: [ + {"field3: "val1", "field4": "val2"}, + {"field3: "val3", "field4": "val4"}, + } + ]}` + patterns: + `[ + "field1.field2.field3": , + "field1.field2.field4": , + ]` + Output: + `[ + { + "field1.field2.field3": "val1", + "field1.field2.field4": "val2" + }, + { + "field1.field2.field3": "val3", + "field1.field2.field4": "val4" + }, + ]` + :param payload: a (nested) dictionary, the event payload + :param patterns: the flattened patterns from the EventPattern (see flatten_pattern) + :return: flatten_dict: a dictionary with no nested dict inside, flattened to a single level + """ + patterns_keys = {key for keys in patterns for key in keys} + + def _is_key_in_patterns(key: str) -> bool: + return key is None or any(pattern_key.startswith(key) for pattern_key in patterns_keys) + + def _traverse(_object: dict, array=None, parent_key=None) -> list: + if isinstance(_object, dict): + for key, values in _object.items(): + # We update the parent key so that {"key1": {"key2": ""}} becomes "key1.key2" + _parent_key = f"{parent_key}.{key}" if parent_key else key + + # we make sure that we are building only the relevant parts of the payload related to the pattern + # the payload could be very complex, and the pattern only applies to part of it + if _is_key_in_patterns(_parent_key): + array = _traverse(values, array, _parent_key) + + elif isinstance(_object, list): + if not _object: + return array + array = [i for value in _object for i in _traverse(value, array, parent_key)] + else: + array = [{**item, parent_key: _object} for item in array] + return array + + return _traverse(payload, array=[{}], parent_key=None) + + +class EventPatternCompiler: + def __init__(self): + self.error_prefix = "Event pattern is not valid. Reason: " + + def compile_event_pattern(self, event_pattern: str | dict) -> dict[str, t.Any]: + if isinstance(event_pattern, str): + try: + event_pattern = json.loads(event_pattern) + if not isinstance(event_pattern, dict): + raise InvalidEventPatternException( + f"{self.error_prefix}Filter is not an object" + ) + except json.JSONDecodeError: + # this error message is not in parity, as it is tightly coupled to AWS parsing engine + raise InvalidEventPatternException(f"{self.error_prefix}Filter is not valid JSON") + + aggregated_rules, combinations = self.aggregate_rules(event_pattern) + + for rules in aggregated_rules: + for rule in rules: + self._validate_rule(rule) + + return event_pattern + + def aggregate_rules(self, event_pattern: dict[str, t.Any]) -> tuple[list[list[t.Any]], int]: + """ + This method evaluate the event pattern recursively, and returns only a list of lists of rules. + It also calculates the combinations of rules, calculated depending on the nesting of the rules. + Example: + nested_event_pattern = { + "key_a": { + "key_b": { + "key_c": ["value_one", "value_two", "value_three", "value_four"] + } + }, + "key_d": { + "key_e": ["value_one", "value_two", "value_three"] + } + } + This function then iterates on the values of the top level keys of the event pattern: ("key_a", "key_d") + If the iterated value is not a list, it means it is a nested property. If the scope is `MessageBody`, it is + allowed, we call this method on the value, adding a level to the depth to keep track on how deep the key is. + If the value is a list, it means it contains rules: we will append this list of rules in _rules, and + calculate the combinations it adds. + For the example event pattern containing nested properties, we calculate it this way + The first array has four values in a three-level nested key, and the second has three values in a two-level + nested key. 3 x 4 x 2 x 3 = 72 + The return value would be: + [["value_one", "value_two", "value_three", "value_four"], ["value_one", "value_two", "value_three"]] + It allows us to later iterate of the list of rules in an easy way, to verify its conditions only. + + :param event_pattern: a dict, starting at the Event Pattern + :return: a tuple with a list of lists of rules and the calculated number of combinations + """ + + def _inner( + pattern_elements: dict[str, t.Any], depth: int = 1, combinations: int = 1 + ) -> tuple[list[list[t.Any]], int]: + _rules = [] + for key, _value in pattern_elements.items(): + if isinstance(_value, dict): + # From AWS docs: "unlike attribute-based policies, payload-based policies support property nesting." + sub_rules, combinations = _inner( + _value, depth=depth + 1, combinations=combinations + ) + _rules.extend(sub_rules) + elif isinstance(_value, list): + if not _value: + raise InvalidEventPatternException( + f"{self.error_prefix}Empty arrays are not allowed" + ) + + current_combination = 0 + if key == "$or": + for val in _value: + sub_rules, or_combinations = _inner( + val, depth=depth, combinations=combinations + ) + _rules.extend(sub_rules) + current_combination += or_combinations + + combinations = current_combination + else: + _rules.append(_value) + combinations = combinations * len(_value) * depth + else: + raise InvalidEventPatternException( + f'{self.error_prefix}"{key}" must be an object or an array' + ) + + return _rules, combinations + + return _inner(event_pattern) + + def _validate_rule(self, rule: t.Any, from_: str | None = None) -> None: + match rule: + case None | str() | bool(): + return + + case int() | float(): + # TODO: AWS says they support only from -10^9 to 10^9 but seems to accept it, so we just return + # if rule <= -1000000000 or rule >= 1000000000: + # raise "" + return + + case {**kwargs}: + if len(kwargs) != 1: + raise InvalidEventPatternException( + f"{self.error_prefix}Only one key allowed in match expression" + ) + + operator, value = None, None + for k, v in kwargs.items(): + operator, value = k, v + + if operator in ( + "prefix", + "suffix", + ): + if from_ == "anything-but": + if isinstance(value, dict): + raise InvalidEventPatternException( + f"{self.error_prefix}Value of {from_} must be an array or single string/number value." + ) + + if not self._is_str_or_list_of_str(value): + raise InvalidEventPatternException( + f"{self.error_prefix}prefix/suffix match pattern must be a string" + ) + elif not value: + raise InvalidEventPatternException( + f"{self.error_prefix}Null prefix/suffix not allowed" + ) + + elif isinstance(value, dict): + for inner_operator in value.keys(): + if inner_operator != "equals-ignore-case": + raise InvalidEventPatternException( + f"{self.error_prefix}Unsupported anything-but pattern: {inner_operator}" + ) + + elif not isinstance(value, str): + raise InvalidEventPatternException( + f"{self.error_prefix}{operator} match pattern must be a string" + ) + return + + elif operator == "equals-ignore-case": + if from_ == "anything-but": + if not self._is_str_or_list_of_str(value): + raise InvalidEventPatternException( + f"{self.error_prefix}Inside {from_}/{operator} list, number|start|null|boolean is not supported." + ) + elif not isinstance(value, str): + raise InvalidEventPatternException( + f"{self.error_prefix}{operator} match pattern must be a string" + ) + return + + elif operator == "anything-but": + # anything-but can actually contain any kind of simple rule (str, number, and list) + if isinstance(value, list): + for v in value: + self._validate_rule(v) + + return + + # or have a nested `prefix`, `suffix` or `equals-ignore-case` pattern + elif isinstance(value, dict): + for inner_operator in value.keys(): + if inner_operator not in ( + "prefix", + "equals-ignore-case", + "suffix", + "wildcard", + ): + raise InvalidEventPatternException( + f"{self.error_prefix}Unsupported anything-but pattern: {inner_operator}" + ) + + self._validate_rule(value, from_="anything-but") + return + + elif operator == "exists": + if not isinstance(value, bool): + raise InvalidEventPatternException( + f"{self.error_prefix}exists match pattern must be either true or false." + ) + return + + elif operator == "numeric": + self._validate_numeric_condition(value) + + elif operator == "cidr": + self._validate_cidr_condition(value) + + elif operator == "wildcard": + if from_ == "anything-but" and isinstance(value, list): + for v in value: + self._validate_wildcard(v) + else: + self._validate_wildcard(value) + + else: + raise InvalidEventPatternException( + f"{self.error_prefix}Unrecognized match type {operator}" + ) + + case _: + raise InvalidEventPatternException( + f"{self.error_prefix}Match value must be String, number, true, false, or null" + ) + + def _validate_numeric_condition(self, value): + if not isinstance(value, list): + raise InvalidEventPatternException( + f"{self.error_prefix}Value of numeric must be an array." + ) + if not value: + raise InvalidEventPatternException( + f"{self.error_prefix}Invalid member in numeric match: ]" + ) + num_values = value[::-1] + + operator = num_values.pop() + if not isinstance(operator, str): + raise InvalidEventPatternException( + f"{self.error_prefix}Invalid member in numeric match: {operator}" + ) + elif operator not in ("<", "<=", "=", ">", ">="): + raise InvalidEventPatternException( + f"{self.error_prefix}Unrecognized numeric range operator: {operator}" + ) + + value = num_values.pop() if num_values else None + if not isinstance(value, (int, float)): + exc_operator = "equals" if operator == "=" else operator + raise InvalidEventPatternException( + f"{self.error_prefix}Value of {exc_operator} must be numeric" + ) + + if not num_values: + return + + if operator not in (">", ">="): + raise InvalidEventPatternException( + f"{self.error_prefix}Too many elements in numeric expression" + ) + + second_operator = num_values.pop() + if not isinstance(second_operator, str): + raise InvalidEventPatternException( + f"{self.error_prefix}Bad value in numeric range: {second_operator}" + ) + elif second_operator not in ("<", "<="): + raise InvalidEventPatternException( + f"{self.error_prefix}Bad numeric range operator: {second_operator}" + ) + + second_value = num_values.pop() if num_values else None + if not isinstance(second_value, (int, float)): + exc_operator = "equals" if second_operator == "=" else second_operator + raise InvalidEventPatternException( + f"{self.error_prefix}Value of {exc_operator} must be numeric" + ) + + elif second_value <= value: + raise InvalidEventPatternException(f"{self.error_prefix}Bottom must be less than top") + + elif num_values: + raise InvalidEventPatternException( + f"{self.error_prefix}Too many terms in numeric range expression" + ) + + def _validate_wildcard(self, value: t.Any): + if not isinstance(value, str): + raise InvalidEventPatternException( + f"{self.error_prefix}wildcard match pattern must be a string" + ) + # TODO: properly calculate complexity of wildcard + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-create-pattern-operators.html#eb-filtering-wildcard-matching-complexity + # > calculate complexity of repeating character sequences that occur after a wildcard character + if "**" in value: + raise InvalidEventPatternException( + f"{self.error_prefix}Consecutive wildcard characters at pos {value.index('**') + 1}" + ) + + if value.count("*") > 5: + raise InvalidEventPatternException( + f"{self.error_prefix}Rule is too complex - try using fewer wildcard characters or fewer repeating character sequences after a wildcard character" + ) + + def _validate_cidr_condition(self, value): + if not isinstance(value, str): + # `cidr` returns the prefix error + raise InvalidEventPatternException( + f"{self.error_prefix}prefix match pattern must be a string" + ) + ip_and_mask = value.split("/") + if len(ip_and_mask) != 2: + raise InvalidEventPatternException( + f"{self.error_prefix}Malformed CIDR, one '/' required" + ) + ip_addr, mask = value.split("/") + try: + int(mask) + except ValueError: + raise InvalidEventPatternException( + f"{self.error_prefix}Malformed CIDR, mask bits must be an integer" + ) + try: + ipaddress.ip_network(value) + except ValueError: + raise InvalidEventPatternException( + f"{self.error_prefix}Nonstandard IP address: {ip_addr}" + ) + + @staticmethod + def _is_str_or_list_of_str(value: t.Any) -> bool: + if not isinstance(value, (str, list)): + return False + if isinstance(value, list) and not all(isinstance(v, str) for v in value): + return False + + return True diff --git a/localstack-core/localstack/services/events/models.py b/localstack-core/localstack/services/events/models.py new file mode 100644 index 0000000000000..95e64ece83711 --- /dev/null +++ b/localstack-core/localstack/services/events/models.py @@ -0,0 +1,340 @@ +import uuid +from dataclasses import dataclass, field +from datetime import datetime, timezone +from enum import Enum +from typing import Literal, Optional, TypeAlias, TypedDict + +from localstack.aws.api.core import ServiceException +from localstack.aws.api.events import ( + ApiDestinationDescription, + ApiDestinationHttpMethod, + ApiDestinationInvocationRateLimitPerSecond, + ApiDestinationName, + ApiDestinationState, + ArchiveDescription, + ArchiveName, + ArchiveState, + Arn, + ConnectionArn, + ConnectionAuthorizationType, + ConnectionDescription, + ConnectionName, + ConnectionState, + ConnectivityResourceParameters, + CreateConnectionAuthRequestParameters, + CreatedBy, + EventBusName, + EventPattern, + EventResourceList, + EventSourceName, + EventTime, + HttpsEndpoint, + ManagedBy, + ReplayDescription, + ReplayDestination, + ReplayName, + ReplayState, + ReplayStateReason, + RetentionDays, + RoleArn, + RuleDescription, + RuleName, + RuleState, + ScheduleExpression, + TagList, + Target, + TargetId, + Timestamp, +) +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossRegionAttribute, + LocalAttribute, +) +from localstack.utils.aws.arns import ( + event_bus_arn, + events_api_destination_arn, + events_archive_arn, + events_connection_arn, + events_replay_arn, + events_rule_arn, +) +from localstack.utils.strings import short_uid +from localstack.utils.tagging import TaggingService + +TargetDict = dict[TargetId, Target] + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = True + status_code: int = 400 + + +class InvalidEventPatternException(Exception): + reason: str + + def __init__(self, reason=None, message=None) -> None: + self.reason = reason + self.message = message or f"Event pattern is not valid. Reason: {reason}" + + +FormattedEvent = TypedDict( # functional syntax required due to name-name keys + "FormattedEvent", + { + "version": str, + "id": str, + "detail-type": Optional[str], + "source": Optional[EventSourceName], + "account": str, + "time": EventTime, + "region": str, + "resources": Optional[EventResourceList], + "detail": dict[str, str | dict], + "replay-name": Optional[ReplayName], + "event-bus-name": EventBusName, + }, +) + + +FormattedEventDict = dict[str, FormattedEvent] +FormattedEventList = list[FormattedEvent] + +TransformedEvent: TypeAlias = FormattedEvent | dict | str + + +class ResourceType(Enum): + EVENT_BUS = "event_bus" + RULE = "rule" + + +class Condition(TypedDict): + Type: Literal["StringEquals"] + Key: Literal["aws:PrincipalOrgID"] + Value: str + + +class Statement(TypedDict): + Sid: str + Effect: str + Principal: str | dict[str, str] + Action: str + Resource: str + Condition: Condition + + +class ResourcePolicy(TypedDict): + Version: str + Statement: list[Statement] + + +@dataclass +class Rule: + name: RuleName + region: str + account_id: str + schedule_expression: Optional[ScheduleExpression] = None + event_pattern: Optional[EventPattern] = None + state: Optional[RuleState] = None + description: Optional[RuleDescription] = None + role_arn: Optional[RoleArn] = None + tags: TagList = field(default_factory=list) + event_bus_name: EventBusName = "default" + targets: TargetDict = field(default_factory=dict) + managed_by: Optional[ManagedBy] = None # can only be set by AWS services + created_by: CreatedBy = field(init=False) + + def __post_init__(self): + self.created_by = self.account_id + if self.tags is None: + self.tags = [] + if self.targets is None: + self.targets = {} + if self.state is None: + self.state = RuleState.ENABLED + + @property + def arn(self) -> Arn: + return events_rule_arn(self.name, self.account_id, self.region, self.event_bus_name) + + +RuleDict = dict[RuleName, Rule] + + +@dataclass +class Replay: + name: str + region: str + account_id: str + event_source_arn: Arn + destination: ReplayDestination # Event Bus Arn or Rule Arns + event_start_time: Timestamp + event_end_time: Timestamp + description: Optional[ReplayDescription] = None + state: Optional[ReplayState] = None + state_reason: Optional[ReplayStateReason] = None + event_last_replayed_time: Optional[Timestamp] = None + replay_start_time: Optional[Timestamp] = None + replay_end_time: Optional[Timestamp] = None + + @property + def arn(self) -> Arn: + return events_replay_arn(self.name, self.account_id, self.region) + + +ReplayDict = dict[ReplayName, Replay] + + +@dataclass +class Archive: + name: ArchiveName + region: str + account_id: str + event_source_arn: Arn + description: ArchiveDescription = None + event_pattern: EventPattern = None + retention_days: RetentionDays = None + state: ArchiveState = ArchiveState.DISABLED + creation_time: Timestamp = None + size_bytes: int = 0 # TODO how to deal with updating this value? + events: FormattedEventDict = field(default_factory=dict) + + @property + def arn(self) -> Arn: + return events_archive_arn(self.name, self.account_id, self.region) + + @property + def event_count(self) -> int: + return len(self.events) + + +ArchiveDict = dict[ArchiveName, Archive] + + +@dataclass +class EventBus: + name: EventBusName + region: str + account_id: str + event_source_name: Optional[str] = None + description: Optional[str] = None + tags: TagList = field(default_factory=list) + policy: Optional[ResourcePolicy] = None + rules: RuleDict = field(default_factory=dict) + creation_time: Timestamp = field(init=False) + last_modified_time: Timestamp = field(init=False) + + def __post_init__(self): + self.creation_time = datetime.now(timezone.utc) + self.last_modified_time = datetime.now(timezone.utc) + if self.rules is None: + self.rules = {} + if self.tags is None: + self.tags = [] + + @property + def arn(self) -> Arn: + return event_bus_arn(self.name, self.account_id, self.region) + + +EventBusDict = dict[EventBusName, EventBus] + + +@dataclass +class Connection: + name: ConnectionName + region: str + account_id: str + authorization_type: ConnectionAuthorizationType + auth_parameters: CreateConnectionAuthRequestParameters + state: ConnectionState + secret_arn: Arn + description: ConnectionDescription | None = None + invocation_connectivity_parameters: ConnectivityResourceParameters | None = None + creation_time: Timestamp = field(init=False) + last_modified_time: Timestamp = field(init=False) + last_authorized_time: Timestamp = field(init=False) + tags: TagList = field(default_factory=list) + id: str = str(uuid.uuid4()) + + def __post_init__(self): + timestamp_now = datetime.now(timezone.utc) + self.creation_time = timestamp_now + self.last_modified_time = timestamp_now + self.last_authorized_time = timestamp_now + if self.tags is None: + self.tags = [] + + @property + def arn(self) -> Arn: + return events_connection_arn(self.name, self.id, self.account_id, self.region) + + +ConnectionDict = dict[ConnectionName, Connection] + + +@dataclass +class ApiDestination: + name: ApiDestinationName + region: str + account_id: str + connection_arn: ConnectionArn + invocation_endpoint: HttpsEndpoint + http_method: ApiDestinationHttpMethod + state: ApiDestinationState + _invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond | None = None + description: ApiDestinationDescription | None = None + creation_time: Timestamp = field(init=False) + last_modified_time: Timestamp = field(init=False) + last_authorized_time: Timestamp = field(init=False) + tags: TagList = field(default_factory=list) + id: str = str(short_uid()) + + def __post_init__(self): + timestamp_now = datetime.now(timezone.utc) + self.creation_time = timestamp_now + self.last_modified_time = timestamp_now + self.last_authorized_time = timestamp_now + if self.tags is None: + self.tags = [] + + @property + def arn(self) -> Arn: + return events_api_destination_arn(self.name, self.id, self.account_id, self.region) + + @property + def invocation_rate_limit_per_second(self) -> int: + return self._invocation_rate_limit_per_second or 300 # Default value + + @invocation_rate_limit_per_second.setter + def invocation_rate_limit_per_second( + self, value: ApiDestinationInvocationRateLimitPerSecond | None + ): + self._invocation_rate_limit_per_second = value + + +ApiDestinationDict = dict[ApiDestinationName, ApiDestination] + + +class EventsStore(BaseStore): + # Map of eventbus names to eventbus objects. The name MUST be unique per account and region (works with AccountRegionBundle) + event_buses: EventBusDict = LocalAttribute(default=dict) + + # Map of archive names to archive objects. The name MUST be unique per account and region (works with AccountRegionBundle) + archives: ArchiveDict = LocalAttribute(default=dict) + + # Map of replay names to replay objects. The name MUST be unique per account and region (works with AccountRegionBundle) + replays: ReplayDict = LocalAttribute(default=dict) + + # Map of connection names to connection objects. + connections: ConnectionDict = LocalAttribute(default=dict) + + # Map of api destination names to api destination objects + api_destinations: ApiDestinationDict = LocalAttribute(default=dict) + + # Maps resource ARN to tags + TAGS: TaggingService = CrossRegionAttribute(default=TaggingService) + + +events_stores = AccountRegionBundle("events", EventsStore) diff --git a/localstack-core/localstack/services/events/provider.py b/localstack-core/localstack/services/events/provider.py new file mode 100644 index 0000000000000..91e95b5100374 --- /dev/null +++ b/localstack-core/localstack/services/events/provider.py @@ -0,0 +1,1984 @@ +import base64 +import json +import logging +import re +from typing import Callable, Optional + +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.config import TagsList +from localstack.aws.api.events import ( + Action, + ApiDestinationDescription, + ApiDestinationHttpMethod, + ApiDestinationInvocationRateLimitPerSecond, + ApiDestinationName, + ApiDestinationResponseList, + ArchiveDescription, + ArchiveName, + ArchiveResponseList, + ArchiveState, + Arn, + Boolean, + CancelReplayResponse, + Condition, + ConnectionArn, + ConnectionAuthorizationType, + ConnectionDescription, + ConnectionName, + ConnectionResponseList, + ConnectionState, + ConnectivityResourceParameters, + CreateApiDestinationResponse, + CreateArchiveResponse, + CreateConnectionAuthRequestParameters, + CreateConnectionResponse, + CreateEventBusResponse, + DeadLetterConfig, + DeleteApiDestinationResponse, + DeleteArchiveResponse, + DeleteConnectionResponse, + DescribeApiDestinationResponse, + DescribeArchiveResponse, + DescribeConnectionResponse, + DescribeEventBusResponse, + DescribeReplayResponse, + DescribeRuleResponse, + EndpointId, + EventBusArn, + EventBusDescription, + EventBusList, + EventBusName, + EventBusNameOrArn, + EventPattern, + EventsApi, + EventSourceName, + HttpsEndpoint, + InternalException, + KmsKeyIdentifier, + LimitMax100, + ListApiDestinationsResponse, + ListArchivesResponse, + ListConnectionsResponse, + ListEventBusesResponse, + ListReplaysResponse, + ListRuleNamesByTargetResponse, + ListRulesResponse, + ListTagsForResourceResponse, + ListTargetsByRuleResponse, + NextToken, + NonPartnerEventBusName, + Principal, + PutEventsRequestEntry, + PutEventsRequestEntryList, + PutEventsResponse, + PutEventsResultEntry, + PutEventsResultEntryList, + PutPartnerEventsRequestEntryList, + PutPartnerEventsResponse, + PutRuleResponse, + PutTargetsResponse, + RemoveTargetsResponse, + ReplayDescription, + ReplayDestination, + ReplayList, + ReplayName, + ReplayState, + ResourceAlreadyExistsException, + ResourceNotFoundException, + RetentionDays, + RoleArn, + RuleDescription, + RuleName, + RuleResponseList, + RuleState, + ScheduleExpression, + StartReplayResponse, + StatementId, + String, + TagKeyList, + TagList, + TagResourceResponse, + Target, + TargetArn, + TargetId, + TargetIdList, + TargetList, + TestEventPatternResponse, + Timestamp, + UntagResourceResponse, + UpdateApiDestinationResponse, + UpdateArchiveResponse, + UpdateConnectionAuthRequestParameters, + UpdateConnectionResponse, +) +from localstack.aws.api.events import ApiDestination as ApiTypeApiDestination +from localstack.aws.api.events import Archive as ApiTypeArchive +from localstack.aws.api.events import Connection as ApiTypeConnection +from localstack.aws.api.events import EventBus as ApiTypeEventBus +from localstack.aws.api.events import Replay as ApiTypeReplay +from localstack.aws.api.events import Rule as ApiTypeRule +from localstack.services.events.api_destination import ( + APIDestinationService, + ApiDestinationServiceDict, +) +from localstack.services.events.archive import ArchiveService, ArchiveServiceDict +from localstack.services.events.connection import ( + ConnectionService, + ConnectionServiceDict, +) +from localstack.services.events.event_bus import EventBusService, EventBusServiceDict +from localstack.services.events.models import ( + ApiDestination, + ApiDestinationDict, + Archive, + ArchiveDict, + Connection, + ConnectionDict, + EventBus, + EventBusDict, + EventsStore, + FormattedEvent, + Replay, + ReplayDict, + ResourceType, + Rule, + RuleDict, + TargetDict, + ValidationException, + events_stores, +) +from localstack.services.events.replay import ReplayService, ReplayServiceDict +from localstack.services.events.rule import RuleService, RuleServiceDict +from localstack.services.events.scheduler import JobScheduler +from localstack.services.events.target import ( + TargetSender, + TargetSenderDict, + TargetSenderFactory, +) +from localstack.services.events.utils import ( + TARGET_ID_PATTERN, + extract_connection_name, + extract_event_bus_name, + extract_region_and_account_id, + format_event, + get_resource_type, + get_trace_header_encoded_region_account, + is_archive_arn, + recursive_remove_none_values_from_dict, +) +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.common import truncate +from localstack.utils.event_matcher import matches_event +from localstack.utils.strings import long_uid +from localstack.utils.time import TIMESTAMP_FORMAT_TZ, timestamp +from localstack.utils.xray.trace_header import TraceHeader + +from .analytics import InvocationStatus, rule_invocation + +LOG = logging.getLogger(__name__) + +ARCHIVE_TARGET_ID_NAME_PATTERN = re.compile(r"^Events-Archive-(?P[a-zA-Z0-9_-]+)$") + + +def decode_next_token(token: NextToken) -> int: + """Decode a pagination token from base64 to integer.""" + return int.from_bytes(base64.b64decode(token), "big") + + +def encode_next_token(token: int) -> NextToken: + """Encode a pagination token to base64 from integer.""" + return base64.b64encode(token.to_bytes(128, "big")).decode("utf-8") + + +def get_filtered_dict(name_prefix: str, input_dict: dict) -> dict: + """Filter dictionary by prefix.""" + return {name: value for name, value in input_dict.items() if name.startswith(name_prefix)} + + +def validate_event(event: PutEventsRequestEntry) -> None | PutEventsResultEntry: + if not event.get("Source"): + return { + "ErrorCode": "InvalidArgument", + "ErrorMessage": "Parameter Source is not valid. Reason: Source is a required argument.", + } + elif not event.get("DetailType"): + return { + "ErrorCode": "InvalidArgument", + "ErrorMessage": "Parameter DetailType is not valid. Reason: DetailType is a required argument.", + } + elif not event.get("Detail"): + return { + "ErrorCode": "InvalidArgument", + "ErrorMessage": "Parameter Detail is not valid. Reason: Detail is a required argument.", + } + elif event.get("Detail") and len(event["Detail"]) >= 262144: + raise ValidationException("Total size of the entries in the request is over the limit.") + elif event.get("Detail"): + try: + json_detail = json.loads(event.get("Detail")) + if isinstance(json_detail, dict): + return + except json.JSONDecodeError: + pass + + return { + "ErrorCode": "MalformedDetail", + "ErrorMessage": "Detail is malformed.", + } + + +def check_unique_tags(tags: TagsList) -> None: + unique_tag_keys = {tag["Key"] for tag in tags} + if len(unique_tag_keys) < len(tags): + raise ValidationException("Invalid parameter: Duplicated keys are not allowed.") + + +class EventsProvider(EventsApi, ServiceLifecycleHook): + # api methods are grouped by resource type and sorted in alphabetical order + # functions in each group is sorted alphabetically + def __init__(self): + self._event_bus_services_store: EventBusServiceDict = {} + self._rule_services_store: RuleServiceDict = {} + self._target_sender_store: TargetSenderDict = {} + self._archive_service_store: ArchiveServiceDict = {} + self._replay_service_store: ReplayServiceDict = {} + self._connection_service_store: ConnectionServiceDict = {} + self._api_destination_service_store: ApiDestinationServiceDict = {} + + def on_before_start(self): + JobScheduler.start() + + def on_before_stop(self): + JobScheduler.shutdown() + + ################## + # API Destinations + ################## + @handler("CreateApiDestination") + def create_api_destination( + self, + context: RequestContext, + name: ApiDestinationName, + connection_arn: ConnectionArn, + invocation_endpoint: HttpsEndpoint, + http_method: ApiDestinationHttpMethod, + description: ApiDestinationDescription = None, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond = None, + **kwargs, + ) -> CreateApiDestinationResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if name in store.api_destinations: + raise ResourceAlreadyExistsException(f"An api-destination '{name}' already exists.") + APIDestinationService.validate_input(name, connection_arn, http_method, invocation_endpoint) + connection_name = extract_connection_name(connection_arn) + connection = self.get_connection(connection_name, store) + api_destination_service = self.create_api_destinations_service( + name, + region, + account_id, + connection_arn, + connection, + invocation_endpoint, + http_method, + invocation_rate_limit_per_second, + description, + ) + store.api_destinations[api_destination_service.api_destination.name] = ( + api_destination_service.api_destination + ) + + response = CreateApiDestinationResponse( + ApiDestinationArn=api_destination_service.arn, + ApiDestinationState=api_destination_service.state, + CreationTime=api_destination_service.creation_time, + LastModifiedTime=api_destination_service.last_modified_time, + ) + return response + + @handler("DescribeApiDestination") + def describe_api_destination( + self, context: RequestContext, name: ApiDestinationName, **kwargs + ) -> DescribeApiDestinationResponse: + store = self.get_store(context.region, context.account_id) + api_destination = self.get_api_destination(name, store) + + response = self._api_destination_to_api_type_api_destination(api_destination) + return response + + @handler("DeleteApiDestination") + def delete_api_destination( + self, context: RequestContext, name: ApiDestinationName, **kwargs + ) -> DeleteApiDestinationResponse: + store = self.get_store(context.region, context.account_id) + if api_destination := self.get_api_destination(name, store): + del self._api_destination_service_store[api_destination.arn] + del store.api_destinations[name] + del store.TAGS[api_destination.arn] + + return DeleteApiDestinationResponse() + + @handler("ListApiDestinations") + def list_api_destinations( + self, + context: RequestContext, + name_prefix: ApiDestinationName = None, + connection_arn: ConnectionArn = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListApiDestinationsResponse: + store = self.get_store(context.region, context.account_id) + api_destinations = ( + get_filtered_dict(name_prefix, store.api_destinations) + if name_prefix + else store.api_destinations + ) + limited_rules, next_token = self._get_limited_dict_and_next_token( + api_destinations, next_token, limit + ) + + response = ListApiDestinationsResponse( + ApiDestinations=list( + self._api_destination_dict_to_api_destination_response_list(limited_rules) + ) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("UpdateApiDestination") + def update_api_destination( + self, + context: RequestContext, + name: ApiDestinationName, + description: ApiDestinationDescription = None, + connection_arn: ConnectionArn = None, + invocation_endpoint: HttpsEndpoint = None, + http_method: ApiDestinationHttpMethod = None, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond = None, + **kwargs, + ) -> UpdateApiDestinationResponse: + store = self.get_store(context.region, context.account_id) + api_destination = self.get_api_destination(name, store) + api_destination_service = self._api_destination_service_store[api_destination.arn] + if connection_arn: + connection_name = extract_connection_name(connection_arn) + connection = self.get_connection(connection_name, store) + else: + connection = api_destination_service.connection + api_destination_service.update( + connection, + invocation_endpoint, + http_method, + invocation_rate_limit_per_second, + description, + ) + + response = UpdateApiDestinationResponse( + ApiDestinationArn=api_destination_service.arn, + ApiDestinationState=api_destination_service.state, + CreationTime=api_destination_service.creation_time, + LastModifiedTime=api_destination_service.last_modified_time, + ) + return response + + ############# + # Connections + ############# + @handler("CreateConnection") + def create_connection( + self, + context: RequestContext, + name: ConnectionName, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters, + description: ConnectionDescription = None, + invocation_connectivity_parameters: ConnectivityResourceParameters = None, + kms_key_identifier: KmsKeyIdentifier = None, + **kwargs, + ) -> CreateConnectionResponse: + # TODO add support for kms_key_identifier + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if name in store.connections: + raise ResourceAlreadyExistsException(f"Connection {name} already exists.") + connection_service = self.create_connection_service( + name, + region, + account_id, + authorization_type, + auth_parameters, + description, + invocation_connectivity_parameters, + ) + store.connections[connection_service.connection.name] = connection_service.connection + + response = CreateConnectionResponse( + ConnectionArn=connection_service.arn, + ConnectionState=connection_service.state, + CreationTime=connection_service.creation_time, + LastModifiedTime=connection_service.last_modified_time, + ) + return response + + @handler("DescribeConnection") + def describe_connection( + self, context: RequestContext, name: ConnectionName, **kwargs + ) -> DescribeConnectionResponse: + store = self.get_store(context.region, context.account_id) + connection = self.get_connection(name, store) + + response = self._connection_to_api_type_connection(connection) + return response + + @handler("DeleteConnection") + def delete_connection( + self, context: RequestContext, name: ConnectionName, **kwargs + ) -> DeleteConnectionResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if connection := self.get_connection(name, store): + connection_service = self._connection_service_store.pop(connection.arn) + connection_service.delete() + del store.connections[name] + del store.TAGS[connection.arn] + + response = DeleteConnectionResponse( + ConnectionArn=connection.arn, + ConnectionState=connection.state, + CreationTime=connection.creation_time, + LastModifiedTime=connection.last_modified_time, + LastAuthorizedTime=connection.last_authorized_time, + ) + return response + + @handler("ListConnections") + def list_connections( + self, + context: RequestContext, + name_prefix: ConnectionName = None, + connection_state: ConnectionState = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListConnectionsResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + connections = ( + get_filtered_dict(name_prefix, store.connections) if name_prefix else store.connections + ) + limited_rules, next_token = self._get_limited_dict_and_next_token( + connections, next_token, limit + ) + + response = ListConnectionsResponse( + Connections=list(self._connection_dict_to_connection_response_list(limited_rules)) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("UpdateConnection") + def update_connection( + self, + context: RequestContext, + name: ConnectionName, + description: ConnectionDescription = None, + authorization_type: ConnectionAuthorizationType = None, + auth_parameters: UpdateConnectionAuthRequestParameters = None, + invocation_connectivity_parameters: ConnectivityResourceParameters = None, + kms_key_identifier: KmsKeyIdentifier = None, + **kwargs, + ) -> UpdateConnectionResponse: + # TODO add support for kms_key_identifier + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + connection = self.get_connection(name, store) + connection_service = self._connection_service_store[connection.arn] + connection_service.update( + description, authorization_type, auth_parameters, invocation_connectivity_parameters + ) + + response = UpdateConnectionResponse( + ConnectionArn=connection_service.arn, + ConnectionState=connection_service.state, + CreationTime=connection_service.creation_time, + LastModifiedTime=connection_service.last_modified_time, + LastAuthorizedTime=connection_service.last_authorized_time, + ) + return response + + ########## + # EventBus + ########## + + @handler("CreateEventBus") + def create_event_bus( + self, + context: RequestContext, + name: EventBusName, + event_source_name: EventSourceName = None, + description: EventBusDescription = None, + kms_key_identifier: KmsKeyIdentifier = None, + dead_letter_config: DeadLetterConfig = None, + tags: TagList = None, + **kwargs, + ) -> CreateEventBusResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if name in store.event_buses: + raise ResourceAlreadyExistsException(f"Event bus {name} already exists.") + event_bus_service = self.create_event_bus_service( + name, region, account_id, event_source_name, description, tags + ) + store.event_buses[event_bus_service.event_bus.name] = event_bus_service.event_bus + + if tags: + self.tag_resource(context, event_bus_service.arn, tags) + + response = CreateEventBusResponse( + EventBusArn=event_bus_service.arn, + ) + if description := getattr(event_bus_service.event_bus, "description", None): + response["Description"] = description + return response + + @handler("DeleteEventBus") + def delete_event_bus(self, context: RequestContext, name: EventBusName, **kwargs) -> None: + if name == "default": + raise ValidationException("Cannot delete event bus default.") + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + try: + if event_bus := self.get_event_bus(name, store): + del self._event_bus_services_store[event_bus.arn] + if rules := event_bus.rules: + self._delete_rule_services(rules) + del store.event_buses[name] + del store.TAGS[event_bus.arn] + except ResourceNotFoundException as error: + return error + + @handler("DescribeEventBus") + def describe_event_bus( + self, context: RequestContext, name: EventBusNameOrArn = None, **kwargs + ) -> DescribeEventBusResponse: + name = extract_event_bus_name(name) + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus = self.get_event_bus(name, store) + + response = self._event_bus_to_api_type_event_bus(event_bus) + return response + + @handler("ListEventBuses") + def list_event_buses( + self, + context: RequestContext, + name_prefix: EventBusName = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListEventBusesResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_buses = ( + get_filtered_dict(name_prefix, store.event_buses) if name_prefix else store.event_buses + ) + limited_event_buses, next_token = self._get_limited_dict_and_next_token( + event_buses, next_token, limit + ) + + response = ListEventBusesResponse( + EventBuses=self._event_bust_dict_to_event_bus_response_list(limited_event_buses) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("PutPermission") + def put_permission( + self, + context: RequestContext, + event_bus_name: NonPartnerEventBusName = None, + action: Action = None, + principal: Principal = None, + statement_id: StatementId = None, + condition: Condition = None, + policy: String = None, + **kwargs, + ) -> None: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus = self.get_event_bus(event_bus_name, store) + event_bus_service = self._event_bus_services_store[event_bus.arn] + event_bus_service.put_permission(action, principal, statement_id, condition, policy) + + @handler("RemovePermission") + def remove_permission( + self, + context: RequestContext, + statement_id: StatementId = None, + remove_all_permissions: Boolean = None, + event_bus_name: NonPartnerEventBusName = None, + **kwargs, + ) -> None: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus = self.get_event_bus(event_bus_name, store) + event_bus_service = self._event_bus_services_store[event_bus.arn] + if remove_all_permissions: + event_bus_service.event_bus.policy = None + return + if not statement_id: + raise ValidationException("Parameter StatementId is required.") + event_bus_service.revoke_put_events_permission(statement_id) + + ####### + # Rules + ####### + @handler("EnableRule") + def enable_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> None: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rule = self.get_rule(name, event_bus) + rule.state = RuleState.ENABLED + + @handler("DeleteRule") + def delete_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + force: Boolean = None, + **kwargs, + ) -> None: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + try: + rule = self.get_rule(name, event_bus) + if rule.targets and not force: + raise ValidationException("Rule can't be deleted since it has targets.") + self._delete_rule_services(rule) + del event_bus.rules[name] + del store.TAGS[rule.arn] + except ResourceNotFoundException as error: + return error + + @handler("DescribeRule") + def describe_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> DescribeRuleResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rule = self.get_rule(name, event_bus) + + response = self._rule_to_api_type_rule(rule) + return response + + @handler("DisableRule") + def disable_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> None: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rule = self.get_rule(name, event_bus) + rule.state = RuleState.DISABLED + + @handler("ListRules") + def list_rules( + self, + context: RequestContext, + name_prefix: RuleName = None, + event_bus_name: EventBusNameOrArn = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListRulesResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rules = get_filtered_dict(name_prefix, event_bus.rules) if name_prefix else event_bus.rules + limited_rules, next_token = self._get_limited_dict_and_next_token(rules, next_token, limit) + + response = ListRulesResponse( + Rules=list(self._rule_dict_to_rule_response_list(limited_rules)) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("ListRuleNamesByTarget") + def list_rule_names_by_target( + self, + context: RequestContext, + target_arn: TargetArn, + event_bus_name: EventBusNameOrArn = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListRuleNamesByTargetResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + + # Find all rules that have a target with the specified ARN + matching_rule_names = [] + for rule_name, rule in event_bus.rules.items(): + for target_id, target in rule.targets.items(): + if target["Arn"] == target_arn: + matching_rule_names.append(rule_name) + break # Found a match in this rule, no need to check other targets + + limited_rules, next_token = self._get_limited_list_and_next_token( + matching_rule_names, next_token, limit + ) + + response = ListRuleNamesByTargetResponse(RuleNames=limited_rules) + if next_token is not None: + response["NextToken"] = next_token + + return response + + @handler("PutRule") + def put_rule( + self, + context: RequestContext, + name: RuleName, + schedule_expression: ScheduleExpression = None, + event_pattern: EventPattern = None, + state: RuleState = None, + description: RuleDescription = None, + role_arn: RoleArn = None, + tags: TagList = None, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> PutRuleResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + existing_rule = event_bus.rules.get(name) + targets = existing_rule.targets if existing_rule else None + rule_service = self.create_rule_service( + name, + region, + account_id, + schedule_expression, + event_pattern, + state, + description, + role_arn, + tags, + event_bus_name, + targets, + ) + event_bus.rules[name] = rule_service.rule + + if tags: + self.tag_resource(context, rule_service.arn, tags) + + response = PutRuleResponse(RuleArn=rule_service.arn) + return response + + @handler("TestEventPattern") + def test_event_pattern( + self, context: RequestContext, event_pattern: EventPattern, event: str, **kwargs + ) -> TestEventPatternResponse: + """Test event pattern uses EventBridge event pattern matching: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html + """ + try: + json_event = json.loads(event) + except json.JSONDecodeError: + raise ValidationException("Parameter Event is not valid.") + + mandatory_fields = { + "id", + "account", + "source", + "time", + "region", + "detail-type", + } + # https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_TestEventPattern.html + # the documentation says that `resources` is mandatory, but it is not in reality + + if not isinstance(json_event, dict) or not mandatory_fields.issubset(json_event): + raise ValidationException("Parameter Event is not valid.") + + result = matches_event(event_pattern, event) + return TestEventPatternResponse(Result=result) + + ######### + # Targets + ######### + + @handler("ListTargetsByRule") + def list_targets_by_rule( + self, + context: RequestContext, + rule: RuleName, + event_bus_name: EventBusNameOrArn = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListTargetsByRuleResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rule = self.get_rule(rule, event_bus) + targets = rule.targets + limited_targets, next_token = self._get_limited_dict_and_next_token( + targets, next_token, limit + ) + + response = ListTargetsByRuleResponse(Targets=list(limited_targets.values())) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("PutTargets") + def put_targets( + self, + context: RequestContext, + rule: RuleName, + targets: TargetList, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> PutTargetsResponse: + region = context.region + account_id = context.account_id + rule_service = self.get_rule_service(region, account_id, rule, event_bus_name) + failed_entries = rule_service.add_targets(targets) + rule_arn = rule_service.arn + rule_name = rule_service.rule.name + for index, target in enumerate(targets): # TODO only add successful targets + target_id = target["Id"] + if len(target_id) > 64: + raise ValidationException( + rf"1 validation error detected: Value '{target_id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must have length less than or equal to 64" + ) + if not bool(TARGET_ID_PATTERN.match(target_id)): + raise ValidationException( + rf"1 validation error detected: Value '{target_id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must satisfy regular expression pattern: [\.\-_A-Za-z0-9]+" + ) + self.create_target_sender(target, rule_arn, rule_name, region, account_id) + + if rule_service.schedule_cron: + schedule_job_function = self._get_scheduled_rule_job_function( + account_id, region, rule_service.rule + ) + rule_service.create_schedule_job(schedule_job_function) + response = PutTargetsResponse( + FailedEntryCount=len(failed_entries), FailedEntries=failed_entries + ) + return response + + @handler("RemoveTargets") + def remove_targets( + self, + context: RequestContext, + rule: RuleName, + ids: TargetIdList, + event_bus_name: EventBusNameOrArn = None, + force: Boolean = None, + **kwargs, + ) -> RemoveTargetsResponse: + region = context.region + account_id = context.account_id + rule_service = self.get_rule_service(region, account_id, rule, event_bus_name) + failed_entries = rule_service.remove_targets(ids) + self._delete_target_sender(ids, rule_service.rule) + + response = RemoveTargetsResponse( + FailedEntryCount=len(failed_entries), FailedEntries=failed_entries + ) + return response + + ######### + # Archive + ######### + @handler("CreateArchive") + def create_archive( + self, + context: RequestContext, + archive_name: ArchiveName, + event_source_arn: EventBusArn, + description: ArchiveDescription = None, + event_pattern: EventPattern = None, + retention_days: RetentionDays = None, + kms_key_identifier: KmsKeyIdentifier = None, + **kwargs, + ) -> CreateArchiveResponse: + # TODO add support for kms_key_identifier + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if archive_name in store.archives: + raise ResourceAlreadyExistsException(f"Archive {archive_name} already exists.") + self._check_event_bus_exists(event_source_arn, store) + archive_service = self.create_archive_service( + archive_name, + region, + account_id, + event_source_arn, + description, + event_pattern, + retention_days, + ) + store.archives[archive_service.archive.name] = archive_service.archive + + response = CreateArchiveResponse( + ArchiveArn=archive_service.arn, + State=archive_service.state, + CreationTime=archive_service.creation_time, + ) + return response + + @handler("DeleteArchive") + def delete_archive( + self, context: RequestContext, archive_name: ArchiveName, **kwargs + ) -> DeleteArchiveResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if archive := self.get_archive(archive_name, store): + try: + archive_service = self._archive_service_store.pop(archive.arn) + archive_service.delete() + del store.archives[archive_name] + except ResourceNotFoundException as error: + return error + + @handler("DescribeArchive") + def describe_archive( + self, context: RequestContext, archive_name: ArchiveName, **kwargs + ) -> DescribeArchiveResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + archive = self.get_archive(archive_name, store) + + response = self._archive_to_describe_archive_response(archive) + return response + + @handler("ListArchives") + def list_archives( + self, + context: RequestContext, + name_prefix: ArchiveName = None, + event_source_arn: Arn = None, + state: ArchiveState = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListArchivesResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if event_source_arn: + self._check_event_bus_exists(event_source_arn, store) + archives = { + key: archive + for key, archive in store.archives.items() + if archive.event_source_arn == event_source_arn + } + elif name_prefix: + archives = get_filtered_dict(name_prefix, store.archives) + else: + archives = store.archives + limited_archives, next_token = self._get_limited_dict_and_next_token( + archives, next_token, limit + ) + + response = ListArchivesResponse( + Archives=list(self._archive_dict_to_archive_response_list(limited_archives)) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("UpdateArchive") + def update_archive( + self, + context: RequestContext, + archive_name: ArchiveName, + description: ArchiveDescription = None, + event_pattern: EventPattern = None, + retention_days: RetentionDays = None, + kms_key_identifier: KmsKeyIdentifier = None, + **kwargs, + ) -> UpdateArchiveResponse: + # TODO add support for kms_key_identifier + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + try: + archive = self.get_archive(archive_name, store) + except ResourceNotFoundException: + raise InternalException("Service encountered unexpected problem. Please try again.") + archive_service = self._archive_service_store[archive.arn] + archive_service.update(description, event_pattern, retention_days) + + response = UpdateArchiveResponse( + ArchiveArn=archive_service.arn, + State=archive.state, + # StateReason=archive.state_reason, + CreationTime=archive.creation_time, + ) + return response + + ######## + # Events + ######## + + @handler("PutEvents") + def put_events( + self, + context: RequestContext, + entries: PutEventsRequestEntryList, + endpoint_id: EndpointId = None, + **kwargs, + ) -> PutEventsResponse: + if len(entries) > 10: + formatted_entries = [self._event_to_error_type_event(entry) for entry in entries] + formatted_entries = f"[{', '.join(formatted_entries)}]" + raise ValidationException( + f"1 validation error detected: Value '{formatted_entries}' at 'entries' failed to satisfy constraint: Member must have length less than or equal to 10" + ) + entries, failed_entry_count = self._process_entries(context, entries) + + response = PutEventsResponse( + Entries=entries, + FailedEntryCount=failed_entry_count, + ) + return response + + @handler("PutPartnerEvents") + def put_partner_events( + self, + context: RequestContext, + entries: PutPartnerEventsRequestEntryList, + **kwargs, + ) -> PutPartnerEventsResponse: + raise NotImplementedError + + ######## + # Replay + ######## + + @handler("CancelReplay") + def cancel_replay( + self, context: RequestContext, replay_name: ReplayName, **kwargs + ) -> CancelReplayResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + replay = self.get_replay(replay_name, store) + replay_service = self._replay_service_store[replay.arn] + replay_service.stop() + response = CancelReplayResponse( + ReplayArn=replay_service.arn, + State=replay_service.state, + # StateReason=replay_service.state_reason, + ) + return response + + @handler("DescribeReplay") + def describe_replay( + self, context: RequestContext, replay_name: ReplayName, **kwargs + ) -> DescribeReplayResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + replay = self.get_replay(replay_name, store) + + response = self._replay_to_describe_replay_response(replay) + return response + + @handler("ListReplays") + def list_replays( + self, + context: RequestContext, + name_prefix: ReplayName = None, + state: ReplayState = None, + event_source_arn: Arn = None, + next_token: NextToken = None, + limit: LimitMax100 = None, + **kwargs, + ) -> ListReplaysResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if event_source_arn: + replays = { + key: replay + for key, replay in store.replays.items() + if replay.event_source_arn == event_source_arn + } + elif name_prefix: + replays = get_filtered_dict(name_prefix, store.replays) + else: + replays = store.replays + limited_replays, next_token = self._get_limited_dict_and_next_token( + replays, next_token, limit + ) + + response = ListReplaysResponse( + Replays=list(self._replay_dict_to_replay_response_list(limited_replays)) + ) + if next_token is not None: + response["NextToken"] = next_token + return response + + @handler("StartReplay") + def start_replay( + self, + context: RequestContext, + replay_name: ReplayName, + event_source_arn: Arn, # Archive Arn + event_start_time: Timestamp, + event_end_time: Timestamp, + destination: ReplayDestination, + description: ReplayDescription = None, + **kwargs, + ) -> StartReplayResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + if replay_name in store.replays: + raise ResourceAlreadyExistsException(f"Replay {replay_name} already exists.") + self._validate_replay_time(event_start_time, event_end_time) + if event_source_arn not in self._archive_service_store: + archive_name = event_source_arn.split("/")[-1] + raise ValidationException( + f"Parameter EventSourceArn is not valid. Reason: Archive {archive_name} does not exist." + ) + self._validate_replay_destination(destination, event_source_arn) + replay_service = self.create_replay_service( + replay_name, + region, + account_id, + event_source_arn, + destination, + event_start_time, + event_end_time, + description, + ) + store.replays[replay_service.replay.name] = replay_service.replay + archive_service = self._archive_service_store[event_source_arn] + events_to_replay = archive_service.get_events( + replay_service.event_start_time, replay_service.event_end_time + ) + replay_service.start(events_to_replay) + if events_to_replay: + re_formatted_event_to_replay = replay_service.re_format_events_from_archive( + events_to_replay, replay_name + ) + self._process_entries(context, re_formatted_event_to_replay) + replay_service.finish() + + response = StartReplayResponse( + ReplayArn=replay_service.arn, + State=replay_service.state, + StateReason=replay_service.state_reason, + ReplayStartTime=replay_service.replay_start_time, + ) + return response + + ###### + # Tags + ###### + + @handler("ListTagsForResource") + def list_tags_for_resource( + self, context: RequestContext, resource_arn: Arn, **kwargs + ) -> ListTagsForResourceResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + resource_type = get_resource_type(resource_arn) + self._check_resource_exists(resource_arn, resource_type, store) + tags = store.TAGS.list_tags_for_resource(resource_arn) + return ListTagsForResourceResponse(tags) + + @handler("TagResource") + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: TagList, **kwargs + ) -> TagResourceResponse: + # each tag key must be unique + # https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-best-practices + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + resource_type = get_resource_type(resource_arn) + self._check_resource_exists(resource_arn, resource_type, store) + check_unique_tags(tags) + store.TAGS.tag_resource(resource_arn, tags) + + @handler("UntagResource") + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceResponse: + region = context.region + account_id = context.account_id + store = self.get_store(region, account_id) + resource_type = get_resource_type(resource_arn) + self._check_resource_exists(resource_arn, resource_type, store) + store.TAGS.untag_resource(resource_arn, tag_keys) + + ######### + # Methods + ######### + + def get_store(self, region: str, account_id: str) -> EventsStore: + """Returns the events store for the account and region. + On first call, creates the default event bus for the account region.""" + store = events_stores[account_id][region] + # create default event bus for account region on first call + default_event_bus_name = "default" + if default_event_bus_name not in store.event_buses: + event_bus_service = self.create_event_bus_service( + default_event_bus_name, region, account_id, None, None, None + ) + store.event_buses[event_bus_service.event_bus.name] = event_bus_service.event_bus + return store + + def get_event_bus(self, name: EventBusName, store: EventsStore) -> EventBus: + if event_bus := store.event_buses.get(name): + return event_bus + raise ResourceNotFoundException(f"Event bus {name} does not exist.") + + def get_rule(self, name: RuleName, event_bus: EventBus) -> Rule: + if rule := event_bus.rules.get(name): + return rule + raise ResourceNotFoundException(f"Rule {name} does not exist on EventBus {event_bus.name}.") + + def get_target(self, target_id: TargetId, rule: Rule) -> Target: + if target := rule.targets.get(target_id): + return target + raise ResourceNotFoundException(f"Target {target_id} does not exist on Rule {rule.name}.") + + def get_archive(self, name: ArchiveName, store: EventsStore) -> Archive: + if archive := store.archives.get(name): + return archive + raise ResourceNotFoundException(f"Archive {name} does not exist.") + + def get_replay(self, name: ReplayName, store: EventsStore) -> Replay: + if replay := store.replays.get(name): + return replay + raise ResourceNotFoundException(f"Replay {name} does not exist.") + + def get_connection(self, name: ConnectionName, store: EventsStore) -> Connection: + if connection := store.connections.get(name): + return connection + raise ResourceNotFoundException( + f"Failed to describe the connection(s). Connection '{name}' does not exist." + ) + + def get_api_destination(self, name: ApiDestinationName, store: EventsStore) -> ApiDestination: + if api_destination := store.api_destinations.get(name): + return api_destination + raise ResourceNotFoundException( + f"Failed to describe the api-destination(s). An api-destination '{name}' does not exist." + ) + + def get_rule_service( + self, + region: str, + account_id: str, + rule_name: RuleName, + event_bus_name: EventBusName, + ) -> RuleService: + store = self.get_store(region, account_id) + event_bus_name = extract_event_bus_name(event_bus_name) + event_bus = self.get_event_bus(event_bus_name, store) + rule = self.get_rule(rule_name, event_bus) + return self._rule_services_store[rule.arn] + + def create_event_bus_service( + self, + name: EventBusName, + region: str, + account_id: str, + event_source_name: Optional[EventSourceName], + description: Optional[EventBusDescription], + tags: Optional[TagList], + ) -> EventBusService: + event_bus_service = EventBusService.create_event_bus_service( + name, + region, + account_id, + event_source_name, + description, + tags, + ) + self._event_bus_services_store[event_bus_service.arn] = event_bus_service + return event_bus_service + + def create_rule_service( + self, + name: RuleName, + region: str, + account_id: str, + schedule_expression: Optional[ScheduleExpression], + event_pattern: Optional[EventPattern], + state: Optional[RuleState], + description: Optional[RuleDescription], + role_arn: Optional[RoleArn], + tags: Optional[TagList], + event_bus_name: Optional[EventBusName], + targets: Optional[TargetDict], + ) -> RuleService: + rule_service = RuleService.create_rule_service( + name, + region, + account_id, + schedule_expression, + event_pattern, + state, + description, + role_arn, + tags, + event_bus_name, + targets, + ) + self._rule_services_store[rule_service.arn] = rule_service + return rule_service + + def create_target_sender( + self, target: Target, rule_arn: Arn, rule_name: RuleName, region: str, account_id: str + ) -> TargetSender: + target_sender = TargetSenderFactory( + target, rule_arn, rule_name, region, account_id + ).get_target_sender() + self._target_sender_store[target_sender.unique_id] = target_sender + return target_sender + + def create_archive_service( + self, + archive_name: ArchiveName, + region: str, + account_id: str, + event_source_arn: Arn, + description: ArchiveDescription, + event_pattern: EventPattern, + retention_days: RetentionDays, + ) -> ArchiveService: + archive_service = ArchiveService.create_archive_service( + archive_name, + region, + account_id, + event_source_arn, + description, + event_pattern, + retention_days, + ) + archive_service.register_archive_rule_and_targets() + self._archive_service_store[archive_service.arn] = archive_service + return archive_service + + def create_replay_service( + self, + name: ReplayName, + region: str, + account_id: str, + event_source_arn: Arn, + destination: ReplayDestination, + event_start_time: Timestamp, + event_end_time: Timestamp, + description: ReplayDescription, + ) -> ReplayService: + replay_service = ReplayService( + name, + region, + account_id, + event_source_arn, + destination, + event_start_time, + event_end_time, + description, + ) + self._replay_service_store[replay_service.arn] = replay_service + return replay_service + + def create_connection_service( + self, + name: ConnectionName, + region: str, + account_id: str, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters, + description: ConnectionDescription, + invocation_connectivity_parameters: ConnectivityResourceParameters, + ) -> ConnectionService: + connection_service = ConnectionService( + name, + region, + account_id, + authorization_type, + auth_parameters, + description, + invocation_connectivity_parameters, + ) + self._connection_service_store[connection_service.arn] = connection_service + return connection_service + + def create_api_destinations_service( + self, + name: ConnectionName, + region: str, + account_id: str, + connection_arn: ConnectionArn, + connection: Connection, + invocation_endpoint: HttpsEndpoint, + http_method: ApiDestinationHttpMethod, + invocation_rate_limit_per_second: ApiDestinationInvocationRateLimitPerSecond, + description: ApiDestinationDescription, + ) -> APIDestinationService: + api_destination_service = APIDestinationService( + name, + region, + account_id, + connection_arn, + connection, + invocation_endpoint, + http_method, + invocation_rate_limit_per_second, + description, + ) + self._api_destination_service_store[api_destination_service.arn] = api_destination_service + return api_destination_service + + def _delete_connection(self, connection_arn: Arn) -> None: + del self._connection_service_store[connection_arn] + + def _delete_rule_services(self, rules: RuleDict | Rule) -> None: + """ + Delete all rule services associated to the input from the store. + Accepts a single Rule object or a dict of Rule objects as input. + """ + if isinstance(rules, Rule): + rules = {rules.name: rules} + for rule in rules.values(): + del self._rule_services_store[rule.arn] + + def _delete_target_sender(self, ids: TargetIdList, rule) -> None: + for target_id in ids: + if target := rule.targets.get(target_id): + target_unique_id = f"{rule.arn}-{target_id}" + try: + del self._target_sender_store[target_unique_id] + except KeyError: + LOG.error("Error deleting target service %s.", target["Arn"]) + + def _get_limited_dict_and_next_token( + self, input_dict: dict, next_token: NextToken | None, limit: LimitMax100 | None + ) -> tuple[dict, NextToken]: + """Return a slice of the given dictionary starting from next_token with length of limit + and new last index encoded as a next_token for pagination.""" + input_dict_len = len(input_dict) + start_index = decode_next_token(next_token) if next_token is not None else 0 + end_index = start_index + limit if limit is not None else input_dict_len + limited_dict = dict(list(input_dict.items())[start_index:end_index]) + + next_token = ( + encode_next_token(end_index) + # return a next_token (encoded integer of next starting index) if not all items are returned + if end_index < input_dict_len + else None + ) + return limited_dict, next_token + + def _get_limited_list_and_next_token( + self, input_list: list, next_token: NextToken | None, limit: LimitMax100 | None + ) -> tuple[list, NextToken]: + """Return a slice of the given list starting from next_token with length of limit + and new last index encoded as a next_token for pagination.""" + input_list_len = len(input_list) + start_index = decode_next_token(next_token) if next_token is not None else 0 + end_index = start_index + limit if limit is not None else input_list_len + limited_list = input_list[start_index:end_index] + + next_token = ( + encode_next_token(end_index) + # return a next_token (encoded integer of next starting index) if not all items are returned + if end_index < input_list_len + else None + ) + return limited_list, next_token + + def _check_resource_exists( + self, resource_arn: Arn, resource_type: ResourceType, store: EventsStore + ) -> None: + if resource_type == ResourceType.EVENT_BUS: + event_bus_name = extract_event_bus_name(resource_arn) + self.get_event_bus(event_bus_name, store) + if resource_type == ResourceType.RULE: + event_bus_name = extract_event_bus_name(resource_arn) + event_bus = self.get_event_bus(event_bus_name, store) + rule_name = resource_arn.split("/")[-1] + self.get_rule(rule_name, event_bus) + + def _get_scheduled_rule_job_function(self, account_id, region, rule: Rule) -> Callable: + def func(*args, **kwargs): + """Create custom scheduled event and send it to all targets specified by associated rule using respective TargetSender""" + for target in rule.targets.values(): + if custom_input := target.get("Input"): + event = json.loads(custom_input) + else: + event = { + "version": "0", + "id": long_uid(), + "detail-type": "Scheduled Event", + "source": "aws.events", + "account": account_id, + "time": timestamp(format=TIMESTAMP_FORMAT_TZ), + "region": region, + "resources": [rule.arn], + "detail": {}, + } + target_unique_id = f"{rule.arn}-{target['Id']}" + target_sender = self._target_sender_store[target_unique_id] + new_trace_header = ( + TraceHeader().ensure_root_exists() + ) # scheduled events will always start a new trace + try: + target_sender.process_event(event.copy(), trace_header=new_trace_header) + except Exception as e: + LOG.info( + "Unable to send event notification %s to target %s: %s", + truncate(event), + target, + e, + ) + + return func + + def _check_event_bus_exists( + self, event_bus_name_or_arn: EventBusNameOrArn, store: EventsStore + ) -> None: + event_bus_name = extract_event_bus_name(event_bus_name_or_arn) + self.get_event_bus(event_bus_name, store) + + def _validate_replay_time(self, event_start_time: Timestamp, event_end_time: Timestamp) -> None: + if event_end_time <= event_start_time: + raise ValidationException( + "Parameter EventEndTime is not valid. Reason: EventStartTime must be before EventEndTime." + ) + + def _validate_replay_destination( + self, destination: ReplayDestination, event_source_arn: Arn + ) -> None: + archive_service = self._archive_service_store[event_source_arn] + if destination_arn := destination.get("Arn"): + if destination_arn != archive_service.archive.event_source_arn: + if destination_arn in self._event_bus_services_store: + raise ValidationException( + "Parameter Destination.Arn is not valid. Reason: Cross event bus replay is not permitted." + ) + else: + event_bus_name = extract_event_bus_name(destination_arn) + raise ResourceNotFoundException(f"Event bus {event_bus_name} does not exist.") + + # Internal type to API type remappings + + def _event_bust_dict_to_event_bus_response_list( + self, event_buses: EventBusDict + ) -> EventBusList: + """Return a converted dict of EventBus model objects as a list of event buses in API type EventBus format.""" + event_bus_list = [ + self._event_bus_to_api_type_event_bus(event_bus) for event_bus in event_buses.values() + ] + return event_bus_list + + def _event_bus_to_api_type_event_bus(self, event_bus: EventBus) -> ApiTypeEventBus: + event_bus_api_type = { + "Name": event_bus.name, + "Arn": event_bus.arn, + } + if getattr(event_bus, "description", None): + event_bus_api_type["Description"] = event_bus.description + if event_bus.creation_time: + event_bus_api_type["CreationTime"] = event_bus.creation_time + if event_bus.last_modified_time: + event_bus_api_type["LastModifiedTime"] = event_bus.last_modified_time + if event_bus.policy: + event_bus_api_type["Policy"] = json.dumps( + recursive_remove_none_values_from_dict(event_bus.policy) + ) + + return event_bus_api_type + + def _event_to_error_type_event(self, entry: PutEventsRequestEntry) -> str: + detail = ( + json.dumps(json.loads(entry["Detail"]), separators=(", ", ": ")) + if entry.get("Detail") + else "null" + ) + return ( + f"PutEventsRequestEntry(" + f"time={entry.get('Time', 'null')}, " + f"source={entry.get('Source', 'null')}, " + f"resources={entry.get('Resources', 'null')}, " + f"detailType={entry.get('DetailType', 'null')}, " + f"detail={detail}, " + f"eventBusName={entry.get('EventBusName', 'null')}, " + f"traceHeader={entry.get('TraceHeader', 'null')}, " + f"kmsKeyIdentifier={entry.get('kmsKeyIdentifier', 'null')}, " + f"internalMetadata={entry.get('internalMetadata', 'null')}" + f")" + ) + + def _rule_dict_to_rule_response_list(self, rules: RuleDict) -> RuleResponseList: + """Return a converted dict of Rule model objects as a list of rules in API type Rule format.""" + rule_list = [self._rule_to_api_type_rule(rule) for rule in rules.values()] + return rule_list + + def _rule_to_api_type_rule(self, rule: Rule) -> ApiTypeRule: + rule = { + "Name": rule.name, + "Arn": rule.arn, + "EventPattern": rule.event_pattern, + "State": rule.state, + "Description": rule.description, + "ScheduleExpression": rule.schedule_expression, + "RoleArn": rule.role_arn, + "ManagedBy": rule.managed_by, + "EventBusName": rule.event_bus_name, + "CreatedBy": rule.created_by, + } + return {key: value for key, value in rule.items() if value is not None} + + def _archive_dict_to_archive_response_list(self, archives: ArchiveDict) -> ArchiveResponseList: + """Return a converted dict of Archive model objects as a list of archives in API type Archive format.""" + archive_list = [self._archive_to_api_type_archive(archive) for archive in archives.values()] + return archive_list + + def _archive_to_api_type_archive(self, archive: Archive) -> ApiTypeArchive: + archive = { + "ArchiveName": archive.name, + "EventSourceArn": archive.event_source_arn, + "State": archive.state, + # TODO add "StateReason": archive.state_reason, + "RetentionDays": archive.retention_days, + "SizeBytes": archive.size_bytes, + "EventCount": archive.event_count, + "CreationTime": archive.creation_time, + } + return {key: value for key, value in archive.items() if value is not None} + + def _archive_to_describe_archive_response(self, archive: Archive) -> DescribeArchiveResponse: + archive_dict = { + "ArchiveArn": archive.arn, + "ArchiveName": archive.name, + "EventSourceArn": archive.event_source_arn, + "State": archive.state, + # TODO add "StateReason": archive.state_reason, + "RetentionDays": archive.retention_days, + "SizeBytes": archive.size_bytes, + "EventCount": archive.event_count, + "CreationTime": archive.creation_time, + "EventPattern": archive.event_pattern, + "Description": archive.description, + } + return {key: value for key, value in archive_dict.items() if value is not None} + + def _replay_dict_to_replay_response_list(self, replays: ReplayDict) -> ReplayList: + """Return a converted dict of Replay model objects as a list of replays in API type Replay format.""" + replay_list = [self._replay_to_api_type_replay(replay) for replay in replays.values()] + return replay_list + + def _replay_to_api_type_replay(self, replay: Replay) -> ApiTypeReplay: + replay = { + "ReplayName": replay.name, + "EventSourceArn": replay.event_source_arn, + "State": replay.state, + # # "StateReason": replay.state_reason, + "EventStartTime": replay.event_start_time, + "EventEndTime": replay.event_end_time, + "EventLastReplayedTime": replay.event_last_replayed_time, + "ReplayStartTime": replay.replay_start_time, + "ReplayEndTime": replay.replay_end_time, + } + return {key: value for key, value in replay.items() if value is not None} + + def _replay_to_describe_replay_response(self, replay: Replay) -> DescribeReplayResponse: + replay_dict = { + "ReplayName": replay.name, + "ReplayArn": replay.arn, + "Description": replay.description, + "State": replay.state, + # # "StateReason": replay.state_reason, + "EventSourceArn": replay.event_source_arn, + "Destination": replay.destination, + "EventStartTime": replay.event_start_time, + "EventEndTime": replay.event_end_time, + "EventLastReplayedTime": replay.event_last_replayed_time, + "ReplayStartTime": replay.replay_start_time, + "ReplayEndTime": replay.replay_end_time, + } + return {key: value for key, value in replay_dict.items() if value is not None} + + def _connection_to_api_type_connection(self, connection: Connection) -> ApiTypeConnection: + connection = { + "ConnectionArn": connection.arn, + "Name": connection.name, + "ConnectionState": connection.state, + # "StateReason": connection.state_reason, # TODO implement state reason + "AuthorizationType": connection.authorization_type, + "AuthParameters": connection.auth_parameters, + "SecretArn": connection.secret_arn, + "CreationTime": connection.creation_time, + "LastModifiedTime": connection.last_modified_time, + "LastAuthorizedTime": connection.last_authorized_time, + } + return {key: value for key, value in connection.items() if value is not None} + + def _connection_dict_to_connection_response_list( + self, connections: ConnectionDict + ) -> ConnectionResponseList: + """Return a converted dict of Connection model objects as a list of connections in API type Connection format.""" + connection_list = [ + self._connection_to_api_type_connection(connection) + for connection in connections.values() + ] + return connection_list + + def _api_destination_to_api_type_api_destination( + self, api_destination: ApiDestination + ) -> ApiTypeApiDestination: + api_destination = { + "ApiDestinationArn": api_destination.arn, + "Name": api_destination.name, + "ConnectionArn": api_destination.connection_arn, + "ApiDestinationState": api_destination.state, + "InvocationEndpoint": api_destination.invocation_endpoint, + "HttpMethod": api_destination.http_method, + "InvocationRateLimitPerSecond": api_destination.invocation_rate_limit_per_second, + "CreationTime": api_destination.creation_time, + "LastModifiedTime": api_destination.last_modified_time, + "Description": api_destination.description, + } + return {key: value for key, value in api_destination.items() if value is not None} + + def _api_destination_dict_to_api_destination_response_list( + self, api_destinations: ApiDestinationDict + ) -> ApiDestinationResponseList: + """Return a converted dict of ApiDestination model objects as a list of connections in API type ApiDestination format.""" + api_destination_list = [ + self._api_destination_to_api_type_api_destination(api_destination) + for api_destination in api_destinations.values() + ] + return api_destination_list + + def _put_to_archive( + self, + region: str, + account_id: str, + archive_target_id: str, + event: FormattedEvent, + ) -> None: + archive_name = ARCHIVE_TARGET_ID_NAME_PATTERN.match(archive_target_id).group("name") + + store = self.get_store(region, account_id) + archive = self.get_archive(archive_name, store) + archive_service = self._archive_service_store[archive.arn] + archive_service.put_events([event]) + + def _process_entries( + self, context: RequestContext, entries: PutEventsRequestEntryList + ) -> tuple[PutEventsResultEntryList, int]: + """Main method to process events put to an event bus. + Events are validated to contain the proper fields and formatted. + Events are matched against all the rules of the respective event bus. + For matching rules the event is either sent to the respective target, + via the target sender put to the defined archived.""" + processed_entries = [] + failed_entry_count = {"count": 0} + for event in entries: + self._process_entry(event, processed_entries, failed_entry_count, context) + return processed_entries, failed_entry_count["count"] + + def _process_entry( + self, + entry: PutEventsRequestEntry, + processed_entries: PutEventsResultEntryList, + failed_entry_count: dict[str, int], + context: RequestContext, + ) -> None: + event_bus_name_or_arn = entry.get("EventBusName", "default") + event_bus_name = extract_event_bus_name(event_bus_name_or_arn) + if event_failed_validation := validate_event(entry): + processed_entries.append(event_failed_validation) + failed_entry_count["count"] += 1 + LOG.info(json.dumps(event_failed_validation)) + return + + region, account_id = extract_region_and_account_id(event_bus_name_or_arn, context) + + # TODO check interference with x-ray trace header + if encoded_trace_header := get_trace_header_encoded_region_account( + entry, context.region, context.account_id, region, account_id + ): + entry["TraceHeader"] = encoded_trace_header + + event_formatted = format_event(entry, region, account_id, event_bus_name) + store = self.get_store(region, account_id) + + try: + event_bus = self.get_event_bus(event_bus_name, store) + except ResourceNotFoundException: + # ignore events for non-existing event buses but add processed event + processed_entries.append({"EventId": event_formatted["id"]}) + LOG.info( + json.dumps( + { + "ErrorCode": "ResourceNotFoundException at get_event_bus", + "ErrorMessage": f"Event_bus {event_bus_name} does not exist", + } + ) + ) + return + + trace_header = context.trace_context["aws_trace_header"] + + self._proxy_capture_input_event(event_formatted, trace_header, region, account_id) + + # Always add the successful EventId entry, even if target processing might fail + processed_entries.append({"EventId": event_formatted["id"]}) + + if configured_rules := list(event_bus.rules.values()): + for rule in configured_rules: + if rule.schedule_expression: + # we do not want to execute Scheduled Rules on PutEvents + continue + + self._process_rules(rule, region, account_id, event_formatted, trace_header) + else: + LOG.info( + json.dumps( + { + "InfoCode": "InternalInfoEvents at process_rules", + "InfoMessage": f"No rules attached to event_bus: {event_bus_name}", + } + ) + ) + + def _proxy_capture_input_event( + self, event: FormattedEvent, trace_header: TraceHeader, region: str, account_id: str + ) -> None: + # only required for EventStudio to capture input event if no rule is configured + pass + + def _process_rules( + self, + rule: Rule, + region: str, + account_id: str, + event_formatted: FormattedEvent, + trace_header: TraceHeader, + ) -> None: + """Process rules for an event. Note that we no longer handle entries here as AWS returns success regardless of target failures.""" + event_pattern = rule.event_pattern + + if matches_event(event_pattern, event_formatted): + if not rule.targets: + LOG.info( + json.dumps( + { + "InfoCode": "InternalInfoEvents at iterate over targets", + "InfoMessage": f"No target configured for matched rule: {rule}", + } + ) + ) + return + + for target in rule.targets.values(): + target_id = target["Id"] + if is_archive_arn(target["Arn"]): + self._put_to_archive( + region, + account_id, + archive_target_id=target_id, + event=event_formatted, + ) + else: + target_unique_id = f"{rule.arn}-{target_id}" + target_sender = self._target_sender_store[target_unique_id] + try: + target_sender.process_event(event_formatted.copy(), trace_header) + rule_invocation.labels( + status=InvocationStatus.success, + service=target_sender.service, + ).increment() + + except Exception as error: + rule_invocation.labels( + status=InvocationStatus.error, + service=target_sender.service, + ).increment() + # Log the error but don't modify the response + LOG.info( + json.dumps( + { + "ErrorCode": "TargetDeliveryFailure", + "ErrorMessage": f"Failed to deliver to target {target_id}: {str(error)}", + } + ) + ) + else: + LOG.info( + json.dumps( + { + "InfoCode": "InternalInfoEvents at matches_rule", + "InfoMessage": f"No rules matched for formatted event: {event_formatted}", + } + ) + ) diff --git a/localstack-core/localstack/services/events/replay.py b/localstack-core/localstack/services/events/replay.py new file mode 100644 index 0000000000000..7a58fb3534d05 --- /dev/null +++ b/localstack-core/localstack/services/events/replay.py @@ -0,0 +1,94 @@ +from datetime import datetime, timezone + +from localstack.aws.api.events import ( + Arn, + PutEventsRequestEntry, + ReplayDescription, + ReplayDestination, + ReplayName, + ReplayState, + Timestamp, +) +from localstack.services.events.models import FormattedEventList, Replay +from localstack.services.events.utils import ( + convert_to_timezone_aware_datetime, + extract_event_bus_name, + re_format_event, +) + + +class ReplayService: + name: ReplayName + region: str + account_id: str + event_source_arn: Arn + destination: ReplayDestination + event_start_time: Timestamp + event_end_time: Timestamp + description: ReplayDescription + replay: Replay + + def __init__( + self, + name: ReplayName, + region: str, + account_id: str, + event_source_arn: Arn, + destination: ReplayDestination, + event_start_time: Timestamp, + event_end_time: Timestamp, + description: ReplayDescription, + ): + event_start_time = convert_to_timezone_aware_datetime(event_start_time) + event_end_time = convert_to_timezone_aware_datetime(event_end_time) + self.replay = Replay( + name, + region, + account_id, + event_source_arn, + destination, + event_start_time, + event_end_time, + description, + ) + self.set_state(ReplayState.STARTING) + + def __getattr__(self, name): + return getattr(self.replay, name) + + def set_state(self, state: ReplayState) -> None: + self.replay.state = state + + def start(self, events: FormattedEventList | None) -> None: + self.set_state(ReplayState.RUNNING) + self.replay.replay_start_time = datetime.now(timezone.utc) + if events: + self._set_event_last_replayed_time(events) + + def finish(self) -> None: + self.set_state(ReplayState.COMPLETED) + self.replay.replay_end_time = datetime.now(timezone.utc) + + def stop(self) -> None: + self.set_state(ReplayState.CANCELLING) + self.replay.event_last_replayed_time = None + self.replay.replay_end_time = None + + def re_format_events_from_archive( + self, events: FormattedEventList, replay_name: ReplayName + ) -> PutEventsRequestEntry: + event_bus_name = extract_event_bus_name( + self.destination["Arn"] + ) # TODO deal with filter arn -> defining rules to replay to + re_formatted_events = [re_format_event(event, event_bus_name) for event in events] + re_formatted_events_from_archive = [ + {**event, "ReplayName": replay_name} for event in re_formatted_events + ] + return re_formatted_events_from_archive + + def _set_event_last_replayed_time(self, events: FormattedEventList) -> None: + latest_event_time = max(event["time"] for event in events) + self.replay.event_last_replayed_time = latest_event_time + + +ReplayServiceDict = dict[ReplayName, ReplayService] diff --git a/localstack/services/stepfunctions/asl/component/common/catch/__init__.py b/localstack-core/localstack/services/events/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/catch/__init__.py rename to localstack-core/localstack/services/events/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.py b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.py new file mode 100644 index 0000000000000..372d45de40dce --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.py @@ -0,0 +1,115 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EventsApiDestinationProperties(TypedDict): + ConnectionArn: Optional[str] + HttpMethod: Optional[str] + InvocationEndpoint: Optional[str] + Arn: Optional[str] + Description: Optional[str] + InvocationRateLimitPerSecond: Optional[int] + Name: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EventsApiDestinationProvider(ResourceProvider[EventsApiDestinationProperties]): + TYPE = "AWS::Events::ApiDestination" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EventsApiDestinationProperties], + ) -> ProgressEvent[EventsApiDestinationProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + Required properties: + - ConnectionArn + - InvocationEndpoint + - HttpMethod + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - events:CreateApiDestination + - events:DescribeApiDestination + + """ + model = request.desired_state + events = request.aws_client_factory.events + + response = events.create_api_destination(**model) + model["Arn"] = response["ApiDestinationArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EventsApiDestinationProperties], + ) -> ProgressEvent[EventsApiDestinationProperties]: + """ + Fetch resource information + + IAM permissions required: + - events:DescribeApiDestination + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EventsApiDestinationProperties], + ) -> ProgressEvent[EventsApiDestinationProperties]: + """ + Delete a resource + + IAM permissions required: + - events:DeleteApiDestination + - events:DescribeApiDestination + """ + model = request.desired_state + events = request.aws_client_factory.events + + events.delete_api_destination(Name=model["Name"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EventsApiDestinationProperties], + ) -> ProgressEvent[EventsApiDestinationProperties]: + """ + Update a resource + + IAM permissions required: + - events:UpdateApiDestination + - events:DescribeApiDestination + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.schema.json b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.schema.json new file mode 100644 index 0000000000000..f50460b1aea17 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination.schema.json @@ -0,0 +1,92 @@ +{ + "typeName": "AWS::Events::ApiDestination", + "description": "Resource Type definition for AWS::Events::ApiDestination.", + "properties": { + "Name": { + "description": "Name of the apiDestination.", + "type": "string", + "minLength": 1, + "maxLength": 64 + }, + "Description": { + "type": "string", + "maxLength": 512 + }, + "ConnectionArn": { + "description": "The arn of the connection.", + "type": "string" + }, + "Arn": { + "description": "The arn of the api destination.", + "type": "string" + }, + "InvocationRateLimitPerSecond": { + "type": "integer", + "minimum": 1 + }, + "InvocationEndpoint": { + "description": "Url endpoint to invoke.", + "type": "string" + }, + "HttpMethod": { + "type": "string", + "enum": [ + "GET", + "HEAD", + "POST", + "OPTIONS", + "PUT", + "DELETE", + "PATCH" + ] + } + }, + "additionalProperties": false, + "createOnlyProperties": [ + "/properties/Name" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "required": [ + "ConnectionArn", + "InvocationEndpoint", + "HttpMethod" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "tagging": { + "taggable": false + }, + "handlers": { + "create": { + "permissions": [ + "events:CreateApiDestination", + "events:DescribeApiDestination" + ] + }, + "read": { + "permissions": [ + "events:DescribeApiDestination" + ] + }, + "update": { + "permissions": [ + "events:UpdateApiDestination", + "events:DescribeApiDestination" + ] + }, + "delete": { + "permissions": [ + "events:DeleteApiDestination", + "events:DescribeApiDestination" + ] + }, + "list": { + "permissions": [ + "events:ListApiDestinations" + ] + } + } +} diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination_plugin.py b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination_plugin.py new file mode 100644 index 0000000000000..0aa7ada08cc50 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_apidestination_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EventsApiDestinationProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Events::ApiDestination" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.events.resource_providers.aws_events_apidestination import ( + EventsApiDestinationProvider, + ) + + self.factory = EventsApiDestinationProvider diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_connection.py b/localstack-core/localstack/services/events/resource_providers/aws_events_connection.py new file mode 100644 index 0000000000000..a99f8df743aca --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_connection.py @@ -0,0 +1,162 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EventsConnectionProperties(TypedDict): + AuthParameters: Optional[AuthParameters] + AuthorizationType: Optional[str] + Arn: Optional[str] + Description: Optional[str] + Name: Optional[str] + SecretArn: Optional[str] + + +class ApiKeyAuthParameters(TypedDict): + ApiKeyName: Optional[str] + ApiKeyValue: Optional[str] + + +class BasicAuthParameters(TypedDict): + Password: Optional[str] + Username: Optional[str] + + +class ClientParameters(TypedDict): + ClientID: Optional[str] + ClientSecret: Optional[str] + + +class Parameter(TypedDict): + Key: Optional[str] + Value: Optional[str] + IsValueSecret: Optional[bool] + + +class ConnectionHttpParameters(TypedDict): + BodyParameters: Optional[list[Parameter]] + HeaderParameters: Optional[list[Parameter]] + QueryStringParameters: Optional[list[Parameter]] + + +class OAuthParameters(TypedDict): + AuthorizationEndpoint: Optional[str] + ClientParameters: Optional[ClientParameters] + HttpMethod: Optional[str] + OAuthHttpParameters: Optional[ConnectionHttpParameters] + + +class AuthParameters(TypedDict): + ApiKeyAuthParameters: Optional[ApiKeyAuthParameters] + BasicAuthParameters: Optional[BasicAuthParameters] + InvocationHttpParameters: Optional[ConnectionHttpParameters] + OAuthParameters: Optional[OAuthParameters] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EventsConnectionProvider(ResourceProvider[EventsConnectionProperties]): + TYPE = "AWS::Events::Connection" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EventsConnectionProperties], + ) -> ProgressEvent[EventsConnectionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + Required properties: + - AuthorizationType + - AuthParameters + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + - /properties/SecretArn + + IAM permissions required: + - events:CreateConnection + - secretsmanager:CreateSecret + - secretsmanager:GetSecretValue + - secretsmanager:PutSecretValue + - iam:CreateServiceLinkedRole + + """ + model = request.desired_state + events = request.aws_client_factory.events + + response = events.create_connection(**model) + model["Arn"] = response["ConnectionArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EventsConnectionProperties], + ) -> ProgressEvent[EventsConnectionProperties]: + """ + Fetch resource information + + IAM permissions required: + - events:DescribeConnection + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EventsConnectionProperties], + ) -> ProgressEvent[EventsConnectionProperties]: + """ + Delete a resource + + IAM permissions required: + - events:DeleteConnection + """ + model = request.desired_state + events = request.aws_client_factory.events + + events.delete_connection(Name=model["Name"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EventsConnectionProperties], + ) -> ProgressEvent[EventsConnectionProperties]: + """ + Update a resource + + IAM permissions required: + - events:UpdateConnection + - events:DescribeConnection + - secretsmanager:CreateSecret + - secretsmanager:UpdateSecret + - secretsmanager:GetSecretValue + - secretsmanager:PutSecretValue + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_connection.schema.json b/localstack-core/localstack/services/events/resource_providers/aws_events_connection.schema.json new file mode 100644 index 0000000000000..efc8539e82273 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_connection.schema.json @@ -0,0 +1,251 @@ +{ + "typeName": "AWS::Events::Connection", + "description": "Resource Type definition for AWS::Events::Connection.", + "definitions": { + "AuthParameters": { + "type": "object", + "minProperties": 1, + "maxProperties": 2, + "properties": { + "ApiKeyAuthParameters": { + "$ref": "#/definitions/ApiKeyAuthParameters" + }, + "BasicAuthParameters": { + "$ref": "#/definitions/BasicAuthParameters" + }, + "OAuthParameters": { + "$ref": "#/definitions/OAuthParameters" + }, + "InvocationHttpParameters": { + "$ref": "#/definitions/ConnectionHttpParameters" + } + }, + "oneOf": [ + { + "required": [ + "BasicAuthParameters" + ] + }, + { + "required": [ + "OAuthParameters" + ] + }, + { + "required": [ + "ApiKeyAuthParameters" + ] + } + ], + "additionalProperties": false + }, + "BasicAuthParameters": { + "type": "object", + "properties": { + "Username": { + "type": "string" + }, + "Password": { + "type": "string" + } + }, + "required": [ + "Username", + "Password" + ], + "additionalProperties": false + }, + "OAuthParameters": { + "type": "object", + "properties": { + "ClientParameters": { + "$ref": "#/definitions/ClientParameters" + }, + "AuthorizationEndpoint": { + "type": "string", + "minLength": 1, + "maxLength": 2048 + }, + "HttpMethod": { + "type": "string", + "enum": [ + "GET", + "POST", + "PUT" + ] + }, + "OAuthHttpParameters": { + "$ref": "#/definitions/ConnectionHttpParameters" + } + }, + "required": [ + "ClientParameters", + "AuthorizationEndpoint", + "HttpMethod" + ], + "additionalProperties": false + }, + "ApiKeyAuthParameters": { + "type": "object", + "properties": { + "ApiKeyName": { + "type": "string" + }, + "ApiKeyValue": { + "type": "string" + } + }, + "required": [ + "ApiKeyName", + "ApiKeyValue" + ], + "additionalProperties": false + }, + "ClientParameters": { + "type": "object", + "properties": { + "ClientID": { + "type": "string" + }, + "ClientSecret": { + "type": "string" + } + }, + "required": [ + "ClientID", + "ClientSecret" + ], + "additionalProperties": false + }, + "ConnectionHttpParameters": { + "type": "object", + "properties": { + "HeaderParameters": { + "type": "array", + "items": { + "$ref": "#/definitions/Parameter" + } + }, + "QueryStringParameters": { + "type": "array", + "items": { + "$ref": "#/definitions/Parameter" + } + }, + "BodyParameters": { + "type": "array", + "items": { + "$ref": "#/definitions/Parameter" + } + } + }, + "additionalProperties": false + }, + "Parameter": { + "type": "object", + "properties": { + "Key": { + "type": "string" + }, + "Value": { + "type": "string" + }, + "IsValueSecret": { + "type": "boolean", + "default": true + } + }, + "required": [ + "Key", + "Value" + ], + "additionalProperties": false + } + }, + "properties": { + "Name": { + "description": "Name of the connection.", + "type": "string", + "minLength": 1, + "maxLength": 64 + }, + "Arn": { + "description": "The arn of the connection resource.", + "type": "string" + }, + "SecretArn": { + "description": "The arn of the secrets manager secret created in the customer account.", + "type": "string" + }, + "Description": { + "description": "Description of the connection.", + "type": "string", + "maxLength": 512 + }, + "AuthorizationType": { + "type": "string", + "enum": [ + "API_KEY", + "BASIC", + "OAUTH_CLIENT_CREDENTIALS" + ] + }, + "AuthParameters": { + "$ref": "#/definitions/AuthParameters" + } + }, + "additionalProperties": false, + "required": [ + "AuthorizationType", + "AuthParameters" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/SecretArn" + ], + "writeOnlyProperties": [ + "/properties/AuthParameters" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "handlers": { + "create": { + "permissions": [ + "events:CreateConnection", + "secretsmanager:CreateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "iam:CreateServiceLinkedRole" + ] + }, + "read": { + "permissions": [ + "events:DescribeConnection" + ] + }, + "update": { + "permissions": [ + "events:UpdateConnection", + "events:DescribeConnection", + "secretsmanager:CreateSecret", + "secretsmanager:UpdateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue" + ] + }, + "delete": { + "permissions": [ + "events:DeleteConnection" + ] + }, + "list": { + "permissions": [ + "events:ListConnections" + ] + } + } +} diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_connection_plugin.py b/localstack-core/localstack/services/events/resource_providers/aws_events_connection_plugin.py new file mode 100644 index 0000000000000..c8b16c6c961a1 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_connection_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EventsConnectionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Events::Connection" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.events.resource_providers.aws_events_connection import ( + EventsConnectionProvider, + ) + + self.factory = EventsConnectionProvider diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.py b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.py new file mode 100644 index 0000000000000..5929d42f7252b --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.py @@ -0,0 +1,126 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class EventsEventBusProperties(TypedDict): + Name: Optional[str] + Arn: Optional[str] + EventSourceName: Optional[str] + Id: Optional[str] + Policy: Optional[str] + Tags: Optional[list[TagEntry]] + + +class TagEntry(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EventsEventBusProvider(ResourceProvider[EventsEventBusProperties]): + TYPE = "AWS::Events::EventBus" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EventsEventBusProperties], + ) -> ProgressEvent[EventsEventBusProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - Name + + Create-only properties: + - /properties/Name + - /properties/EventSourceName + + Read-only properties: + - /properties/Id + - /properties/Policy + - /properties/Arn + + """ + model = request.desired_state + events = request.aws_client_factory.events + + response = events.create_event_bus(Name=model["Name"]) + model["Arn"] = response["EventBusArn"] + model["Id"] = model["Name"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EventsEventBusProperties], + ) -> ProgressEvent[EventsEventBusProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EventsEventBusProperties], + ) -> ProgressEvent[EventsEventBusProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + events = request.aws_client_factory.events + + events.delete_event_bus(Name=model["Name"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EventsEventBusProperties], + ) -> ProgressEvent[EventsEventBusProperties]: + """ + Update a resource + + + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[EventsEventBusProperties], + ) -> ProgressEvent[EventsEventBusProperties]: + resources = request.aws_client_factory.events.list_event_buses() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + EventsEventBusProperties(Name=resource["Name"]) + for resource in resources["EventBuses"] + ], + ) diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.schema.json b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.schema.json new file mode 100644 index 0000000000000..eb5d780188a5f --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus.schema.json @@ -0,0 +1,62 @@ +{ + "typeName": "AWS::Events::EventBus", + "description": "Resource Type definition for AWS::Events::EventBus", + "additionalProperties": false, + "properties": { + "Policy": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "EventSourceName": { + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/TagEntry" + } + }, + "Name": { + "type": "string" + } + }, + "definitions": { + "TagEntry": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "Name" + ], + "createOnlyProperties": [ + "/properties/Name", + "/properties/EventSourceName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Policy", + "/properties/Arn" + ] +} diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus_plugin.py b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus_plugin.py new file mode 100644 index 0000000000000..25f94f1940bb2 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbus_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EventsEventBusProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Events::EventBus" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.events.resource_providers.aws_events_eventbus import ( + EventsEventBusProvider, + ) + + self.factory = EventsEventBusProvider diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.py b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.py new file mode 100644 index 0000000000000..9da54ceeff6bf --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.py @@ -0,0 +1,155 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +from botocore.exceptions import ClientError + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import short_uid + + +class EventsEventBusPolicyProperties(TypedDict): + StatementId: Optional[str] + Action: Optional[str] + Condition: Optional[Condition] + EventBusName: Optional[str] + Id: Optional[str] + Principal: Optional[str] + Statement: Optional[dict] + + +class Condition(TypedDict): + Key: Optional[str] + Type: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class EventsEventBusPolicyProvider(ResourceProvider[EventsEventBusPolicyProperties]): + TYPE = "AWS::Events::EventBusPolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EventsEventBusPolicyProperties], + ) -> ProgressEvent[EventsEventBusPolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - StatementId + + Create-only properties: + - /properties/EventBusName + - /properties/StatementId + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + events = request.aws_client_factory.events + + model["Id"] = f"EventBusPolicy-{short_uid()}" + + # either this field is set or all other fields (Action, Principal, etc.) + statement = model.get("Statement") + optional_params = {"EventBusName": model.get("EventBusName")} + + if statement: + policy = { + "Version": "2012-10-17", + "Statement": [{"Sid": model["StatementId"], **statement}], + } + events.put_permission(Policy=json.dumps(policy), **optional_params) + else: + if model.get("Condition"): + optional_params.update({"Condition": model.get("Condition")}) + + events.put_permission( + StatementId=model["StatementId"], + Action=model["Action"], + Principal=model["Principal"], + **optional_params, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EventsEventBusPolicyProperties], + ) -> ProgressEvent[EventsEventBusPolicyProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EventsEventBusPolicyProperties], + ) -> ProgressEvent[EventsEventBusPolicyProperties]: + """ + Delete a resource + + """ + model = request.desired_state + events = request.aws_client_factory.events + + statement_id = model["StatementId"] + event_bus_name = model.get("EventBusName") + + params = {"StatementId": statement_id, "RemoveAllPermissions": False} + + if event_bus_name: + params["EventBusName"] = event_bus_name + + # We are using try/except since at the moment + # CFN doesn't properly resolve dependency between resources + # so this resource could be deleted if parent resource was deleted first + + try: + events.remove_permission(**params) + except ClientError as err: + is_resource_not_found = err.response["Error"]["Code"] == "ResourceNotFoundException" + + if not is_resource_not_found: + raise + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EventsEventBusPolicyProperties], + ) -> ProgressEvent[EventsEventBusPolicyProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.schema.json b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.schema.json new file mode 100644 index 0000000000000..99bd136ddbdcd --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy.schema.json @@ -0,0 +1,58 @@ +{ + "typeName": "AWS::Events::EventBusPolicy", + "description": "Resource Type definition for AWS::Events::EventBusPolicy", + "additionalProperties": false, + "properties": { + "EventBusName": { + "type": "string" + }, + "Condition": { + "$ref": "#/definitions/Condition" + }, + "Action": { + "type": "string" + }, + "StatementId": { + "type": "string" + }, + "Statement": { + "type": "object" + }, + "Id": { + "type": "string" + }, + "Principal": { + "type": "string" + } + }, + "definitions": { + "Condition": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Type": { + "type": "string" + }, + "Key": { + "type": "string" + } + } + } + }, + "required": [ + "StatementId" + ], + "createOnlyProperties": [ + "/properties/EventBusName", + "/properties/StatementId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy_plugin.py b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy_plugin.py new file mode 100644 index 0000000000000..5368348690773 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_eventbuspolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EventsEventBusPolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Events::EventBusPolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.events.resource_providers.aws_events_eventbuspolicy import ( + EventsEventBusPolicyProvider, + ) + + self.factory = EventsEventBusPolicyProvider diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_rule.py b/localstack-core/localstack/services/events/resource_providers/aws_events_rule.py new file mode 100644 index 0000000000000..a10d23360a41c --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_rule.py @@ -0,0 +1,323 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils import common + + +class EventsRuleProperties(TypedDict): + Arn: Optional[str] + Description: Optional[str] + EventBusName: Optional[str] + EventPattern: Optional[dict] + Id: Optional[str] + Name: Optional[str] + RoleArn: Optional[str] + ScheduleExpression: Optional[str] + State: Optional[str] + Targets: Optional[list[Target]] + + +class HttpParameters(TypedDict): + HeaderParameters: Optional[dict] + PathParameterValues: Optional[list[str]] + QueryStringParameters: Optional[dict] + + +class DeadLetterConfig(TypedDict): + Arn: Optional[str] + + +class RunCommandTarget(TypedDict): + Key: Optional[str] + Values: Optional[list[str]] + + +class RunCommandParameters(TypedDict): + RunCommandTargets: Optional[list[RunCommandTarget]] + + +class InputTransformer(TypedDict): + InputTemplate: Optional[str] + InputPathsMap: Optional[dict] + + +class KinesisParameters(TypedDict): + PartitionKeyPath: Optional[str] + + +class RedshiftDataParameters(TypedDict): + Database: Optional[str] + Sql: Optional[str] + DbUser: Optional[str] + SecretManagerArn: Optional[str] + StatementName: Optional[str] + WithEvent: Optional[bool] + + +class SqsParameters(TypedDict): + MessageGroupId: Optional[str] + + +class PlacementConstraint(TypedDict): + Expression: Optional[str] + Type: Optional[str] + + +class PlacementStrategy(TypedDict): + Field: Optional[str] + Type: Optional[str] + + +class CapacityProviderStrategyItem(TypedDict): + CapacityProvider: Optional[str] + Base: Optional[int] + Weight: Optional[int] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class AwsVpcConfiguration(TypedDict): + Subnets: Optional[list[str]] + AssignPublicIp: Optional[str] + SecurityGroups: Optional[list[str]] + + +class NetworkConfiguration(TypedDict): + AwsVpcConfiguration: Optional[AwsVpcConfiguration] + + +class EcsParameters(TypedDict): + TaskDefinitionArn: Optional[str] + CapacityProviderStrategy: Optional[list[CapacityProviderStrategyItem]] + EnableECSManagedTags: Optional[bool] + EnableExecuteCommand: Optional[bool] + Group: Optional[str] + LaunchType: Optional[str] + NetworkConfiguration: Optional[NetworkConfiguration] + PlacementConstraints: Optional[list[PlacementConstraint]] + PlacementStrategies: Optional[list[PlacementStrategy]] + PlatformVersion: Optional[str] + PropagateTags: Optional[str] + ReferenceId: Optional[str] + TagList: Optional[list[Tag]] + TaskCount: Optional[int] + + +class BatchRetryStrategy(TypedDict): + Attempts: Optional[int] + + +class BatchArrayProperties(TypedDict): + Size: Optional[int] + + +class BatchParameters(TypedDict): + JobDefinition: Optional[str] + JobName: Optional[str] + ArrayProperties: Optional[BatchArrayProperties] + RetryStrategy: Optional[BatchRetryStrategy] + + +class SageMakerPipelineParameter(TypedDict): + Name: Optional[str] + Value: Optional[str] + + +class SageMakerPipelineParameters(TypedDict): + PipelineParameterList: Optional[list[SageMakerPipelineParameter]] + + +class RetryPolicy(TypedDict): + MaximumEventAgeInSeconds: Optional[int] + MaximumRetryAttempts: Optional[int] + + +class Target(TypedDict): + Arn: Optional[str] + Id: Optional[str] + BatchParameters: Optional[BatchParameters] + DeadLetterConfig: Optional[DeadLetterConfig] + EcsParameters: Optional[EcsParameters] + HttpParameters: Optional[HttpParameters] + Input: Optional[str] + InputPath: Optional[str] + InputTransformer: Optional[InputTransformer] + KinesisParameters: Optional[KinesisParameters] + RedshiftDataParameters: Optional[RedshiftDataParameters] + RetryPolicy: Optional[RetryPolicy] + RoleArn: Optional[str] + RunCommandParameters: Optional[RunCommandParameters] + SageMakerPipelineParameters: Optional[SageMakerPipelineParameters] + SqsParameters: Optional[SqsParameters] + + +REPEATED_INVOCATION = "repeated_invocation" + +MATCHING_OPERATIONS = [ + "prefix", + "cidr", + "exists", + "suffix", + "anything-but", + "numeric", + "equals-ignore-case", + "wildcard", +] + + +def extract_rule_name(rule_id: str) -> str: + return rule_id.rsplit("|", maxsplit=1)[-1] + + +class EventsRuleProvider(ResourceProvider[EventsRuleProperties]): + TYPE = "AWS::Events::Rule" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[EventsRuleProperties], + ) -> ProgressEvent[EventsRuleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Create-only properties: + - /properties/Name + - /properties/EventBusName + + Read-only properties: + - /properties/Id + - /properties/Arn + + + """ + model = request.desired_state + events = request.aws_client_factory.events + + name = model.get("Name") + if not name: + name = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + if event_bus_name := model.get("EventBusName"): + model["Id"] = "|".join( + [ + event_bus_name, + name, + ] + ) + else: + model["Id"] = name + + attrs = [ + "ScheduleExpression", + "EventPattern", + "State", + "Description", + "Name", + "EventBusName", + ] + + params = util.select_attributes(model, attrs) + + def wrap_in_lists(o, **kwargs): + if isinstance(o, dict): + for k, v in o.items(): + if not isinstance(v, (dict, list)) and k not in MATCHING_OPERATIONS: + o[k] = [v] + return o + + pattern = params.get("EventPattern") + if isinstance(pattern, dict): + wrapped = common.recurse_object(pattern, wrap_in_lists) + params["EventPattern"] = json.dumps(wrapped) + + params["Name"] = name + result = events.put_rule(**params) + model["Arn"] = result["RuleArn"] + + # put targets + event_bus_name = model.get("EventBusName") + targets = model.get("Targets") or [] + + if targets: + put_targets_kwargs = {"Rule": extract_rule_name(model["Id"]), "Targets": targets} + if event_bus_name: + put_targets_kwargs["EventBusName"] = event_bus_name + + put_targets_kwargs = util.convert_request_kwargs( + put_targets_kwargs, + events.meta.service_model.operation_model("PutTargets").input_shape, + ) + + events.put_targets(**put_targets_kwargs) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[EventsRuleProperties], + ) -> ProgressEvent[EventsRuleProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[EventsRuleProperties], + ) -> ProgressEvent[EventsRuleProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + events = request.aws_client_factory.events + + rule_name = extract_rule_name(model["Id"]) + targets = events.list_targets_by_rule(Rule=rule_name)["Targets"] + target_ids = [tgt["Id"] for tgt in targets] + if targets: + events.remove_targets(Rule=rule_name, Ids=target_ids, Force=True) + events.delete_rule(Name=rule_name) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[EventsRuleProperties], + ) -> ProgressEvent[EventsRuleProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_rule.schema.json b/localstack-core/localstack/services/events/resource_providers/aws_events_rule.schema.json new file mode 100644 index 0000000000000..c3a3601ff7b49 --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_rule.schema.json @@ -0,0 +1,495 @@ +{ + "typeName": "AWS::Events::Rule", + "description": "Resource Type definition for AWS::Events::Rule", + "additionalProperties": false, + "properties": { + "EventBusName": { + "type": "string" + }, + "EventPattern": { + "type": "object" + }, + "ScheduleExpression": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "State": { + "type": "string" + }, + "Targets": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Target" + } + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "RoleArn": { + "type": "string" + }, + "Name": { + "type": "string" + } + }, + "definitions": { + "CapacityProviderStrategyItem": { + "type": "object", + "additionalProperties": false, + "properties": { + "Base": { + "type": "integer" + }, + "Weight": { + "type": "integer" + }, + "CapacityProvider": { + "type": "string" + } + }, + "required": [ + "CapacityProvider" + ] + }, + "HttpParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "PathParameterValues": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "HeaderParameters": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "QueryStringParameters": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + } + } + }, + "DeadLetterConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "Arn": { + "type": "string" + } + } + }, + "RunCommandParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "RunCommandTargets": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/RunCommandTarget" + } + } + }, + "required": [ + "RunCommandTargets" + ] + }, + "PlacementStrategy": { + "type": "object", + "additionalProperties": false, + "properties": { + "Field": { + "type": "string" + }, + "Type": { + "type": "string" + } + } + }, + "InputTransformer": { + "type": "object", + "additionalProperties": false, + "properties": { + "InputTemplate": { + "type": "string" + }, + "InputPathsMap": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + } + }, + "required": [ + "InputTemplate" + ] + }, + "KinesisParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "PartitionKeyPath": { + "type": "string" + } + }, + "required": [ + "PartitionKeyPath" + ] + }, + "BatchRetryStrategy": { + "type": "object", + "additionalProperties": false, + "properties": { + "Attempts": { + "type": "integer" + } + } + }, + "RedshiftDataParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "StatementName": { + "type": "string" + }, + "Database": { + "type": "string" + }, + "SecretManagerArn": { + "type": "string" + }, + "DbUser": { + "type": "string" + }, + "Sql": { + "type": "string" + }, + "WithEvent": { + "type": "boolean" + } + }, + "required": [ + "Database", + "Sql" + ] + }, + "Target": { + "type": "object", + "additionalProperties": false, + "properties": { + "InputPath": { + "type": "string" + }, + "HttpParameters": { + "$ref": "#/definitions/HttpParameters" + }, + "DeadLetterConfig": { + "$ref": "#/definitions/DeadLetterConfig" + }, + "RunCommandParameters": { + "$ref": "#/definitions/RunCommandParameters" + }, + "InputTransformer": { + "$ref": "#/definitions/InputTransformer" + }, + "KinesisParameters": { + "$ref": "#/definitions/KinesisParameters" + }, + "RoleArn": { + "type": "string" + }, + "RedshiftDataParameters": { + "$ref": "#/definitions/RedshiftDataParameters" + }, + "Input": { + "type": "string" + }, + "SqsParameters": { + "$ref": "#/definitions/SqsParameters" + }, + "EcsParameters": { + "$ref": "#/definitions/EcsParameters" + }, + "BatchParameters": { + "$ref": "#/definitions/BatchParameters" + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "SageMakerPipelineParameters": { + "$ref": "#/definitions/SageMakerPipelineParameters" + }, + "RetryPolicy": { + "$ref": "#/definitions/RetryPolicy" + } + }, + "required": [ + "Id", + "Arn" + ] + }, + "PlacementConstraint": { + "type": "object", + "additionalProperties": false, + "properties": { + "Expression": { + "type": "string" + }, + "Type": { + "type": "string" + } + } + }, + "AwsVpcConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "SecurityGroups": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Subnets": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "AssignPublicIp": { + "type": "string" + } + }, + "required": [ + "Subnets" + ] + }, + "SqsParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "MessageGroupId": { + "type": "string" + } + }, + "required": [ + "MessageGroupId" + ] + }, + "RunCommandTarget": { + "type": "object", + "additionalProperties": false, + "properties": { + "Values": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Values", + "Key" + ] + }, + "EcsParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "PlatformVersion": { + "type": "string" + }, + "Group": { + "type": "string" + }, + "EnableECSManagedTags": { + "type": "boolean" + }, + "EnableExecuteCommand": { + "type": "boolean" + }, + "PlacementConstraints": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/PlacementConstraint" + } + }, + "PropagateTags": { + "type": "string" + }, + "TaskCount": { + "type": "integer" + }, + "PlacementStrategies": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/PlacementStrategy" + } + }, + "CapacityProviderStrategy": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/CapacityProviderStrategyItem" + } + }, + "LaunchType": { + "type": "string" + }, + "ReferenceId": { + "type": "string" + }, + "TagList": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "NetworkConfiguration": { + "$ref": "#/definitions/NetworkConfiguration" + }, + "TaskDefinitionArn": { + "type": "string" + } + }, + "required": [ + "TaskDefinitionArn" + ] + }, + "BatchParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "JobName": { + "type": "string" + }, + "RetryStrategy": { + "$ref": "#/definitions/BatchRetryStrategy" + }, + "ArrayProperties": { + "$ref": "#/definitions/BatchArrayProperties" + }, + "JobDefinition": { + "type": "string" + } + }, + "required": [ + "JobName", + "JobDefinition" + ] + }, + "NetworkConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "AwsVpcConfiguration": { + "$ref": "#/definitions/AwsVpcConfiguration" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + } + }, + "SageMakerPipelineParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "PipelineParameterList": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/SageMakerPipelineParameter" + } + } + } + }, + "RetryPolicy": { + "type": "object", + "additionalProperties": false, + "properties": { + "MaximumEventAgeInSeconds": { + "type": "integer" + }, + "MaximumRetryAttempts": { + "type": "integer" + } + } + }, + "BatchArrayProperties": { + "type": "object", + "additionalProperties": false, + "properties": { + "Size": { + "type": "integer" + } + } + }, + "SageMakerPipelineParameter": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Name": { + "type": "string" + } + }, + "required": [ + "Value", + "Name" + ] + } + }, + "createOnlyProperties": [ + "/properties/Name", + "/properties/EventBusName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Arn" + ] +} diff --git a/localstack-core/localstack/services/events/resource_providers/aws_events_rule_plugin.py b/localstack-core/localstack/services/events/resource_providers/aws_events_rule_plugin.py new file mode 100644 index 0000000000000..3fa01b6717fdc --- /dev/null +++ b/localstack-core/localstack/services/events/resource_providers/aws_events_rule_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class EventsRuleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Events::Rule" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.events.resource_providers.aws_events_rule import EventsRuleProvider + + self.factory = EventsRuleProvider diff --git a/localstack-core/localstack/services/events/rule.py b/localstack-core/localstack/services/events/rule.py new file mode 100644 index 0000000000000..576cfc36e781c --- /dev/null +++ b/localstack-core/localstack/services/events/rule.py @@ -0,0 +1,245 @@ +import re +from typing import Callable, Optional + +from localstack.aws.api.events import ( + Arn, + EventBusName, + EventPattern, + LimitExceededException, + ManagedBy, + PutTargetsResultEntryList, + RemoveTargetsResultEntryList, + RoleArn, + RuleDescription, + RuleName, + RuleState, + ScheduleExpression, + TagList, + Target, + TargetIdList, + TargetList, +) +from localstack.services.events.models import Rule, TargetDict, ValidationException +from localstack.services.events.scheduler import JobScheduler, convert_schedule_to_cron + +TARGET_ID_REGEX = re.compile(r"^[\.\-_A-Za-z0-9]+$") +TARGET_ARN_REGEX = re.compile(r"arn:[\d\w:\-/]*") +CRON_REGEX = ( # borrowed from https://regex101.com/r/I80Eu0/1 + r"^(?:cron[(](?:(?:(?:[0-5]?[0-9])|[*])(?:(?:[-](?:(?:[0-5]?[0-9])|[*]))|(?:[/][0-9]+))?" + r"(?:[,](?:(?:[0-5]?[0-9])|[*])(?:(?:[-](?:(?:[0-5]?[0-9])|[*]))|(?:[/][0-9]+))?)*)[ ]+" + r"(?:(?:(?:[0-2]?[0-9])|[*])(?:(?:[-](?:(?:[0-2]?[0-9])|[*]))|(?:[/][0-9]+))?" + r"(?:[,](?:(?:[0-2]?[0-9])|[*])(?:(?:[-](?:(?:[0-2]?[0-9])|[*]))|(?:[/][0-9]+))?)*)[ ]+" + r"(?:(?:[?][ ]+(?:(?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])" + r"(?:(?:[-](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])(?:[/][0-9]+)?)|" + r"(?:[/][0-9]+))?(?:[,](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])" + r"(?:(?:[-](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])(?:[/][0-9]+)?)|" + r"(?:[/][0-9]+))?)*)[ ]+(?:(?:(?:[1-7]|(?:SUN|MON|TUE|WED|THU|FRI|SAT))[#][0-5])|" + r"(?:(?:(?:(?:[1-7]|(?:SUN|MON|TUE|WED|THU|FRI|SAT))L?)|[L*])(?:(?:[-](?:(?:(?:[1-7]|" + r"(?:SUN|MON|TUE|WED|THU|FRI|SAT))L?)|[L*]))|(?:[/][0-9]+))?(?:[,](?:(?:(?:[1-7]|" + r"(?:SUN|MON|TUE|WED|THU|FRI|SAT))L?)|[L*])(?:(?:[-](?:(?:(?:[1-7]|(?:SUN|MON|TUE|WED|THU|FRI|SAT))L?)|" + r"[L*]))|(?:[/][0-9]+))?)*)))|(?:(?:(?:(?:(?:[1-3]?[0-9])W?)|LW|[L*])(?:(?:[-](?:(?:(?:[1-3]?[0-9])W?)|" + r"LW|[L*]))|(?:[/][0-9]+))?(?:[,](?:(?:(?:[1-3]?[0-9])W?)|LW|[L*])(?:(?:[-](?:(?:(?:[1-3]?[0-9])W?)|" + r"LW|[L*]))|(?:[/][0-9]+))?)*)[ ]+(?:(?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|" + r"[*])(?:(?:[-](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])(?:[/][0-9]+)?)|" + r"(?:[/][0-9]+))?(?:[,](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])" + r"(?:(?:[-](?:(?:[1]?[0-9])|(?:JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)|[*])(?:[/][0-9]+)?)|" + r"(?:[/][0-9]+))?)*)[ ]+[?]))[ ]+(?:(?:(?:[12][0-9]{3})|[*])(?:(?:[-](?:(?:[12][0-9]{3})|[*]))|" + r"(?:[/][0-9]+))?(?:[,](?:(?:[12][0-9]{3})|[*])(?:(?:[-](?:(?:[12][0-9]{3})|[*]))|(?:[/][0-9]+))?)*)[)])$" +) +RULE_SCHEDULE_CRON_REGEX = re.compile(CRON_REGEX) +RULE_SCHEDULE_RATE_REGEX = re.compile(r"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)") + + +class RuleService: + name: RuleName + region: str + account_id: str + schedule_expression: ScheduleExpression | None + event_pattern: EventPattern | None + description: RuleDescription | None + role_arn: Arn | None + tags: TagList | None + event_bus_name: EventBusName | None + targets: TargetDict | None + managed_by: ManagedBy + rule: Rule + + def __init__(self, rule: Rule): + self.rule = rule + if rule.schedule_expression: + self.schedule_cron = self._get_schedule_cron(rule.schedule_expression) + else: + self.schedule_cron = None + + @classmethod + def create_rule_service( + cls, + name: RuleName, + region: Optional[str] = None, + account_id: Optional[str] = None, + schedule_expression: Optional[ScheduleExpression] = None, + event_pattern: Optional[EventPattern] = None, + state: Optional[RuleState] = None, + description: Optional[RuleDescription] = None, + role_arn: Optional[RoleArn] = None, + tags: Optional[TagList] = None, + event_bus_name: Optional[EventBusName] = None, + targets: Optional[TargetDict] = None, + managed_by: Optional[ManagedBy] = None, + ): + cls._validate_input(event_pattern, schedule_expression, event_bus_name) + # required to keep data and functionality separate for persistence + return cls( + Rule( + name, + region, + account_id, + schedule_expression, + event_pattern, + state, + description, + role_arn, + tags, + event_bus_name, + targets, + managed_by, + ) + ) + + @property + def arn(self) -> Arn: + return self.rule.arn + + @property + def state(self) -> RuleState: + return self.rule.state + + def enable(self) -> None: + self.rule.state = RuleState.ENABLED + + def disable(self) -> None: + self.rule.state = RuleState.DISABLED + + def add_targets(self, targets: TargetList) -> PutTargetsResultEntryList: + failed_entries = self.validate_targets_input(targets) + for target in targets: + target_id = target["Id"] + if target_id not in self.rule.targets and self._check_target_limit_reached(): + raise LimitExceededException( + "The requested resource exceeds the maximum number allowed." + ) + target = Target(**target) + self.rule.targets[target_id] = target + return failed_entries + + def remove_targets( + self, target_ids: TargetIdList, force: bool = False + ) -> RemoveTargetsResultEntryList: + delete_errors = [] + for target_id in target_ids: + if target_id in self.rule.targets: + if self.rule.managed_by and not force: + delete_errors.append( + { + "TargetId": target_id, + "ErrorCode": "ManagedRuleException", + "ErrorMessage": f"Rule '{self.rule.name}' is managed by an AWS service can only be modified if force is True.", + } + ) + else: + del self.rule.targets[target_id] + else: + delete_errors.append( + { + "TargetId": target_id, + "ErrorCode": "ResourceNotFoundException", + "ErrorMessage": f"Rule '{self.rule.name}' does not have a target with the Id '{target_id}'.", + } + ) + return delete_errors + + def create_schedule_job(self, schedule_job_sender_func: Callable) -> None: + cron = self.schedule_cron + state = self.rule.state != "DISABLED" + self.job_id = JobScheduler.instance().add_job(schedule_job_sender_func, cron, state) + + def validate_targets_input(self, targets: TargetList) -> PutTargetsResultEntryList: + validation_errors = [] + for index, target in enumerate(targets): + id = target.get("Id") + arn = target.get("Arn", "") + if not TARGET_ID_REGEX.match(id): + validation_errors.append( + { + "TargetId": id, + "ErrorCode": "ValidationException", + "ErrorMessage": f"Value '{id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+", + } + ) + + if len(id) > 64: + validation_errors.append( + { + "TargetId": id, + "ErrorCode": "ValidationException", + "ErrorMessage": f"Value '{id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must have length less than or equal to 64", + } + ) + + if not TARGET_ARN_REGEX.match(arn): + validation_errors.append( + { + "TargetId": id, + "ErrorCode": "ValidationException", + "ErrorMessage": f"Parameter {arn} is not valid. Reason: Provided Arn is not in correct format.", + } + ) + + if ":sqs:" in arn and arn.endswith(".fifo") and not target.get("SqsParameters"): + validation_errors.append( + { + "TargetId": id, + "ErrorCode": "ValidationException", + "ErrorMessage": f"Parameter(s) SqsParameters must be specified for target: {id}.", + } + ) + + return validation_errors + + @classmethod + def _validate_input( + cls, + event_pattern: Optional[EventPattern], + schedule_expression: Optional[ScheduleExpression], + event_bus_name: Optional[EventBusName] = "default", + ) -> None: + if not event_pattern and not schedule_expression: + raise ValidationException( + "Parameter(s) EventPattern or ScheduleExpression must be specified." + ) + + if schedule_expression: + if event_bus_name != "default": + raise ValidationException( + "ScheduleExpression is supported only on the default event bus." + ) + if not ( + RULE_SCHEDULE_CRON_REGEX.match(schedule_expression) + or RULE_SCHEDULE_RATE_REGEX.match(schedule_expression) + ): + raise ValidationException("Parameter ScheduleExpression is not valid.") + + def _check_target_limit_reached(self) -> bool: + if len(self.rule.targets) >= 5: + return True + return False + + def _get_schedule_cron(self, schedule_expression: ScheduleExpression) -> str: + try: + cron = convert_schedule_to_cron(schedule_expression) + return cron + except ValueError as e: + raise ValidationException("Parameter ScheduleExpression is not valid.") from e + + +RuleServiceDict = dict[Arn, RuleService] diff --git a/localstack-core/localstack/services/events/scheduler.py b/localstack-core/localstack/services/events/scheduler.py new file mode 100644 index 0000000000000..c71833f402d0b --- /dev/null +++ b/localstack-core/localstack/services/events/scheduler.py @@ -0,0 +1,136 @@ +import logging +import re +import threading + +from crontab import CronTab + +from localstack.utils.common import short_uid +from localstack.utils.run import FuncThread + +LOG = logging.getLogger(__name__) + +CRON_REGEX = re.compile(r"\s*cron\s*\(([^\)]*)\)\s*") +RATE_REGEX = re.compile(r"\s*rate\s*\(([^\)]*)\)\s*") + + +def convert_schedule_to_cron(schedule): + """Convert Events schedule like "cron(0 20 * * ? *)" or "rate(5 minutes)" """ + cron_match = CRON_REGEX.match(schedule) + if cron_match: + return cron_match.group(1) + + rate_match = RATE_REGEX.match(schedule) + if rate_match: + rate = rate_match.group(1) + rate_value, rate_unit = re.split(r"\s+", rate.strip()) + rate_value = int(rate_value) + + if rate_value < 1: + raise ValueError("Rate value must be larger than 0") + # see https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rate-expressions.html + if rate_value == 1 and rate_unit.endswith("s"): + raise ValueError("If the value is equal to 1, then the unit must be singular") + if rate_value > 1 and not rate_unit.endswith("s"): + raise ValueError("If the value is greater than 1, the unit must be plural") + + if "minute" in rate_unit: + return f"*/{rate_value} * * * *" + if "hour" in rate_unit: + return f"0 */{rate_value} * * *" + if "day" in rate_unit: + return f"0 0 */{rate_value} * *" + + # TODO: cover via test + # raise ValueError(f"Unable to parse events schedule expression: {schedule}") + + return schedule + + +class Job: + def __init__(self, job_func, schedule, enabled): + self.job_func = job_func + self.schedule = schedule + self.job_id = short_uid() + self.is_enabled = enabled + + def run(self): + try: + if self.should_run_now() and self.is_enabled: + self.do_run() + except Exception as e: + LOG.debug("Unable to run scheduled function %s: %s", self.job_func, e) + + def should_run_now(self): + schedule = CronTab(self.schedule) + delay_secs = schedule.next( + default_utc=True + ) # utc default time format for rule schedule cron + # TODO fix execute on exact cron time + return delay_secs is not None and delay_secs < 60 + + def do_run(self): + FuncThread(self.job_func, name="events-job-run").start() + + +class JobScheduler: + _instance = None + + def __init__(self): + # TODO: introduce RLock for mutating jobs list + self.jobs = [] + self.thread = None + self._stop_event = threading.Event() + + def add_job(self, job_func, schedule, enabled=True): + job = Job(job_func, schedule, enabled=enabled) + self.jobs.append(job) + return job.job_id + + def get_job(self, job_id) -> Job | None: + for job in self.jobs: + if job.job_id == job_id: + return job + return None + + def disable_job(self, job_id): + for job in self.jobs: + if job.job_id == job_id: + job.is_enabled = False + break + + def cancel_job(self, job_id): + self.jobs = [job for job in self.jobs if job.job_id != job_id] + + def loop(self, *args): + while not self._stop_event.is_set(): + try: + for job in list(self.jobs): + job.run() + except Exception: + pass + # This is a simple heuristic to cause the loop to run approximately every minute + # TODO: we should keep track of jobs execution times, to avoid duplicate executions + self._stop_event.wait(timeout=59.9) + + def start_loop(self): + self.thread = FuncThread(self.loop, name="events-jobscheduler-loop") + self.thread.start() + + @classmethod + def instance(cls): + if not cls._instance: + cls._instance = JobScheduler() + return cls._instance + + @classmethod + def start(cls): + instance = cls.instance() + if not instance.thread: + instance.start_loop() + return instance + + @classmethod + def shutdown(cls): + instance = cls.instance() + if instance.thread: + instance._stop_event.set() diff --git a/localstack-core/localstack/services/events/target.py b/localstack-core/localstack/services/events/target.py new file mode 100644 index 0000000000000..fe18ce999412c --- /dev/null +++ b/localstack-core/localstack/services/events/target.py @@ -0,0 +1,749 @@ +import datetime +import json +import logging +import re +import uuid +from abc import ABC, abstractmethod +from typing import Any, Dict, Set, Type +from urllib.parse import urlencode + +import requests +from botocore.client import BaseClient + +from localstack import config +from localstack.aws.api.events import ( + Arn, + InputTransformer, + RuleName, + Target, + TargetInputPath, +) +from localstack.aws.connect import connect_to +from localstack.services.events.api_destination import add_api_destination_authorization +from localstack.services.events.models import ( + FormattedEvent, + TransformedEvent, + ValidationException, +) +from localstack.services.events.utils import ( + event_time_to_time_string, + get_trace_header_encoded_region_account, + is_nested_in_string, + to_json_str, +) +from localstack.utils import collections +from localstack.utils.aws.arns import ( + extract_account_id_from_arn, + extract_region_from_arn, + extract_service_from_arn, + firehose_name, + parse_arn, + sqs_queue_url_for_arn, +) +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.aws.message_forwarding import ( + add_target_http_parameters, +) +from localstack.utils.json import extract_jsonpath +from localstack.utils.strings import to_bytes +from localstack.utils.time import now_utc +from localstack.utils.xray.trace_header import TraceHeader + +LOG = logging.getLogger(__name__) + +# https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-transform-target-input.html#eb-transform-input-predefined +AWS_PREDEFINED_PLACEHOLDERS_STRING_VALUES = { + "aws.events.rule-arn", + "aws.events.rule-name", + "aws.events.event.ingestion-time", +} +AWS_PREDEFINED_PLACEHOLDERS_JSON_VALUES = {"aws.events.event", "aws.events.event.json"} + +PREDEFINED_PLACEHOLDERS: Set[str] = AWS_PREDEFINED_PLACEHOLDERS_STRING_VALUES.union( + AWS_PREDEFINED_PLACEHOLDERS_JSON_VALUES +) + +TRANSFORMER_PLACEHOLDER_PATTERN = re.compile(r"<(.*?)>") +TRACE_HEADER_KEY = "X-Amzn-Trace-Id" + + +def transform_event_with_target_input_path( + input_path: TargetInputPath, event: FormattedEvent +) -> TransformedEvent: + formatted_event = extract_jsonpath(event, input_path) + return formatted_event + + +def get_template_replacements( + input_transformer: InputTransformer, event: FormattedEvent +) -> dict[str, Any]: + """Extracts values from the event using the input paths map keys and places them in the input template dict.""" + template_replacements = {} + transformer_path_map = input_transformer.get("InputPathsMap", {}) + for placeholder, transformer_path in transformer_path_map.items(): + if placeholder in PREDEFINED_PLACEHOLDERS: + continue + value = extract_jsonpath(event, transformer_path) + if not value: + value = "" # default value is empty string + template_replacements[placeholder] = value + return template_replacements + + +def replace_template_placeholders( + template: str, replacements: dict[str, Any], is_json_template: bool +) -> TransformedEvent: + """Replace placeholders defined by in the template with the values from the replacements dict. + Can handle single template string or template dict.""" + + def replace_placeholder(match): + key = match.group(1) + value = replacements.get(key, "") # handle non defined placeholders + if isinstance(value, datetime.datetime): + return event_time_to_time_string(value) + if isinstance(value, dict): + json_str = to_json_str(value).replace('\\"', '"') + if is_json_template: + return json_str + return json_str.replace('"', "") + if isinstance(value, list): + if is_json_template: + return json.dumps(value) + return f"[{','.join(value)}]" + if is_nested_in_string(template, match): + return value + if is_json_template: + return json.dumps(value) + return value + + formatted_template = TRANSFORMER_PLACEHOLDER_PATTERN.sub(replace_placeholder, template).replace( + "\\n", "\n" + ) + + if is_json_template: + try: + loaded_json_template = json.loads(formatted_template) + return loaded_json_template + except json.JSONDecodeError: + LOG.info( + json.dumps( + { + "InfoCode": "InternalInfoEvents at transform_event", + "InfoMessage": f"Replaced template is not valid json: {formatted_template}", + } + ) + ) + else: + return formatted_template[1:-1] + + +class TargetSender(ABC): + target: Target + rule_arn: Arn + rule_name: RuleName + service: str + + region: str # region of the event bus + account_id: str # region of the event bus + target_region: str + target_account_id: str + _client: BaseClient | None + + def __init__( + self, + target: Target, + rule_arn: Arn, + rule_name: RuleName, + service: str, + region: str, + account_id: str, + ): + self.target = target + self.rule_arn = rule_arn + self.rule_name = rule_name + self.service = service + self.region = region + self.account_id = account_id + + self.target_region = extract_region_from_arn(self.target["Arn"]) + self.target_account_id = extract_account_id_from_arn(self.target["Arn"]) + + self._validate_input(target) + self._client: BaseClient | None = None + + @property + def arn(self): + return self.target["Arn"] + + @property + def target_id(self): + return self.target["Id"] + + @property + def unique_id(self): + """Necessary to distinguish between targets with the same ARN but for different rules. + The unique_id is a combination of the rule ARN and the Target Id. + This is necessary since input path and input transformer can be different for the same target ARN, + attached to different rules.""" + return f"{self.rule_arn}-{self.target_id}" + + @property + def client(self): + """Lazy initialization of internal botoclient factory.""" + if self._client is None: + self._client = self._initialize_client() + return self._client + + @abstractmethod + def send_event(self, event: FormattedEvent | TransformedEvent, trace_header: TraceHeader): + pass + + def process_event(self, event: FormattedEvent, trace_header: TraceHeader): + """Processes the event and send it to the target.""" + if input_ := self.target.get("Input"): + event = json.loads(input_) + if isinstance(event, dict): + event.pop("event-bus-name", None) + if not input_: + if input_path := self.target.get("InputPath"): + event = transform_event_with_target_input_path(input_path, event) + if input_transformer := self.target.get("InputTransformer"): + event = self.transform_event_with_target_input_transformer(input_transformer, event) + if event: + self.send_event(event, trace_header) + else: + LOG.info("No event to send to target %s", self.target.get("Id")) + + def transform_event_with_target_input_transformer( + self, input_transformer: InputTransformer, event: FormattedEvent + ) -> TransformedEvent: + input_template = input_transformer["InputTemplate"] + template_replacements = get_template_replacements(input_transformer, event) + predefined_template_replacements = self._get_predefined_template_replacements(event) + template_replacements.update(predefined_template_replacements) + + is_json_template = input_template.strip().startswith(("{")) + populated_template = replace_template_placeholders( + input_template, template_replacements, is_json_template + ) + + return populated_template + + def _validate_input(self, target: Target): + """Provide a default implementation extended for each target based on specifications.""" + # TODO add For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. + if "InputPath" in target and "InputTransformer" in target: + raise ValidationException( + f"Only one of Input, InputPath, or InputTransformer must be provided for target {target.get('Id')}." + ) + if input_transformer := target.get("InputTransformer"): + self._validate_input_transformer(input_transformer) + + def _initialize_client(self) -> BaseClient: + """Initializes internal boto client. + If a role from a target is provided, the client will be initialized with the assumed role. + If no role is provided, the client will be initialized with the account ID and region. + In both cases event bridge is requested as service principal""" + service_principal = ServicePrincipal.events + role_arn = self.target.get("RoleArn") + if role_arn: # required for cross account + # assumed role sessions expire after 6 hours in AWS, currently no expiration in LocalStack + client_factory = connect_to.with_assumed_role( + role_arn=role_arn, + service_principal=service_principal, + region_name=self.region, + ) + else: + client_factory = connect_to(aws_access_key_id=self.account_id, region_name=self.region) + client = client_factory.get_client(self.service) + client = client.request_metadata( + service_principal=service_principal, source_arn=self.rule_arn + ) + self._register_client_hooks(client) + return client + + def _validate_input_transformer(self, input_transformer: InputTransformer): + # TODO: cover via test + # if "InputTemplate" not in input_transformer: + # raise ValueError("InputTemplate is required for InputTransformer") + input_template = input_transformer["InputTemplate"] + input_paths_map = input_transformer.get("InputPathsMap", {}) + placeholders = TRANSFORMER_PLACEHOLDER_PATTERN.findall(input_template) + for placeholder in placeholders: + if placeholder not in input_paths_map and placeholder not in PREDEFINED_PLACEHOLDERS: + raise ValidationException( + f"InputTemplate for target {self.target.get('Id')} contains invalid placeholder {placeholder}." + ) + + def _get_predefined_template_replacements(self, event: FormattedEvent) -> dict[str, Any]: + """Extracts predefined values from the event.""" + predefined_template_replacements = {} + predefined_template_replacements["aws.events.rule-arn"] = self.rule_arn + predefined_template_replacements["aws.events.rule-name"] = self.rule_name + predefined_template_replacements["aws.events.event.ingestion-time"] = event["time"] + predefined_template_replacements["aws.events.event"] = { + "detailType" if k == "detail-type" else k: v # detail-type is is returned as detailType + for k, v in event.items() + if k != "detail" # detail is not part of .event placeholder + } + predefined_template_replacements["aws.events.event.json"] = event + + return predefined_template_replacements + + def _register_client_hooks(self, client: BaseClient): + """Register client hooks to inject trace header into requests.""" + + def handle_extract_params(params, context, **kwargs): + trace_header = params.pop("TraceHeader", None) + if trace_header is None: + return + context[TRACE_HEADER_KEY] = trace_header.to_header_str() + + def handle_inject_headers(params, context, **kwargs): + if trace_header_str := context.pop(TRACE_HEADER_KEY, None): + params["headers"][TRACE_HEADER_KEY] = trace_header_str + + client.meta.events.register( + f"provide-client-params.{self.service}.*", handle_extract_params + ) + client.meta.events.register(f"before-call.{self.service}.*", handle_inject_headers) + + +TargetSenderDict = dict[str, TargetSender] # rule_arn-target_id as global unique id + +# Target Senders are ordered alphabetically by service name + + +class ApiGatewayTargetSender(TargetSender): + """ + ApiGatewayTargetSender is a TargetSender that sends events to an API Gateway target. + """ + + PROHIBITED_HEADERS = [ + "authorization", + "connection", + "content-encoding", + "content-length", + "host", + "max-forwards", + "te", + "transfer-encoding", + "trailer", + "upgrade", + "via", + "www-authenticate", + "x-forwarded-for", + ] # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-gateway-target.html + + ALLOWED_HTTP_METHODS = {"GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"} + + def send_event(self, event, trace_header): + # Parse the ARN to extract api_id, stage_name, http_method, and resource path + # Example ARN: arn:{partition}:execute-api:{region}:{account_id}:{api_id}/{stage_name}/{method}/{resource_path} + arn_parts = parse_arn(self.target["Arn"]) + api_gateway_info = arn_parts["resource"] # e.g., 'myapi/dev/POST/pets/*/*' + api_gateway_info_parts = api_gateway_info.split("/") + + api_id = api_gateway_info_parts[0] + stage_name = api_gateway_info_parts[1] + http_method = api_gateway_info_parts[2].upper() + resource_path_parts = api_gateway_info_parts[3:] # may contain wildcards + + if http_method not in self.ALLOWED_HTTP_METHODS: + LOG.error("Unsupported HTTP method: %s", http_method) + return + + # Replace wildcards in resource path with PathParameterValues + path_params_values = self.target.get("HttpParameters", {}).get("PathParameterValues", []) + resource_path_segments = [] + path_param_index = 0 + for part in resource_path_parts: + if part == "*": + if path_param_index < len(path_params_values): + resource_path_segments.append(path_params_values[path_param_index]) + path_param_index += 1 + else: + # Use empty string if no path parameter is provided + resource_path_segments.append("") + else: + resource_path_segments.append(part) + resource_path = "/".join(resource_path_segments) + + # Ensure resource path starts and ends with '/' + resource_path = f"/{resource_path.strip('/')}/" + + # Construct query string parameters + query_params = self.target.get("HttpParameters", {}).get("QueryStringParameters", {}) + query_string = urlencode(query_params) if query_params else "" + + # Construct headers + headers = self.target.get("HttpParameters", {}).get("HeaderParameters", {}) + headers = {k: v for k, v in headers.items() if k.lower() not in self.PROHIBITED_HEADERS} + # Add Host header to ensure proper routing in LocalStack + + host = f"{api_id}.execute-api.localhost.localstack.cloud" + headers["Host"] = host + + # Ensure Content-Type is set + headers.setdefault("Content-Type", "application/json") + + # Construct the full URL + resource_path = f"/{resource_path.strip('/')}/" + + # Construct the full URL using urljoin + from urllib.parse import urljoin + + base_url = config.internal_service_url() + base_path = f"/{stage_name}" + full_path = urljoin(base_path + "/", resource_path.lstrip("/")) + url = urljoin(base_url + "/", full_path.lstrip("/")) + + if query_string: + url += f"?{query_string}" + + # Serialize the event, converting datetime objects to strings + event_json = json.dumps(event, default=str) + + # Add trace header + headers[TRACE_HEADER_KEY] = trace_header.to_header_str() + + # Send the HTTP request + response = requests.request( + method=http_method, url=url, headers=headers, data=event_json, timeout=5 + ) + if not response.ok: + LOG.warning( + "API Gateway target invocation failed with status code %s, response: %s", + response.status_code, + response.text, + ) + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via test + # if not collections.get_safe(target, "$.RoleArn"): + # raise ValueError("RoleArn is required for ApiGateway target") + + def _get_predefined_template_replacements(self, event: Dict[str, Any]) -> Dict[str, Any]: + """Extracts predefined values from the event.""" + predefined_template_replacements = {} + predefined_template_replacements["aws.events.rule-arn"] = self.rule_arn + predefined_template_replacements["aws.events.rule-name"] = self.rule_name + predefined_template_replacements["aws.events.event.ingestion-time"] = event.get("time", "") + predefined_template_replacements["aws.events.event"] = { + "detailType" if k == "detail-type" else k: v for k, v in event.items() if k != "detail" + } + predefined_template_replacements["aws.events.event.json"] = event + + return predefined_template_replacements + + +class AppSyncTargetSender(TargetSender): + def send_event(self, event, trace_header): + raise NotImplementedError("AppSync target is not yet implemented") + + +class BatchTargetSender(TargetSender): + def send_event(self, event, trace_header): + raise NotImplementedError("Batch target is not yet implemented") + + def _validate_input(self, target: Target): + # TODO: cover via test and fix (only required if we have BatchParameters) + # if not collections.get_safe(target, "$.BatchParameters.JobDefinition"): + # raise ValueError("BatchParameters.JobDefinition is required for Batch target") + # if not collections.get_safe(target, "$.BatchParameters.JobName"): + # raise ValueError("BatchParameters.JobName is required for Batch target") + pass + + +class ECSTargetSender(TargetSender): + def send_event(self, event, trace_header): + raise NotImplementedError("ECS target is a pro feature, please use LocalStack Pro") + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via test + # if not collections.get_safe(target, "$.EcsParameters.TaskDefinitionArn"): + # raise ValueError("EcsParameters.TaskDefinitionArn is required for ECS target") + + +class EventsTargetSender(TargetSender): + def send_event(self, event, trace_header): + # TODO add validation and tests for eventbridge to eventbridge requires Detail, DetailType, and Source + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/events/client/put_events.html + source = self._get_source(event) + detail_type = self._get_detail_type(event) + detail = event.get("detail", event) + resources = self._get_resources(event) + entries = [ + { + "EventBusName": self.target["Arn"], # use arn for target account and region + "Source": source, + "DetailType": detail_type, + "Detail": json.dumps(detail), + "Resources": resources, + } + ] + if encoded_original_id := get_trace_header_encoded_region_account( + event, self.region, self.account_id, self.target_region, self.target_account_id + ): + entries[0]["TraceHeader"] = encoded_original_id + + self.client.put_events(Entries=entries, TraceHeader=trace_header) + + def _get_source(self, event: FormattedEvent | TransformedEvent) -> str: + if isinstance(event, dict) and (source := event.get("source")): + return source + else: + return self.service or "" + + def _get_detail_type(self, event: FormattedEvent | TransformedEvent) -> str: + if isinstance(event, dict) and (detail_type := event.get("detail-type")): + return detail_type + else: + return "" + + def _get_resources(self, event: FormattedEvent | TransformedEvent) -> list[str]: + if isinstance(event, dict) and (resources := event.get("resources")): + return resources + else: + return [] + + +class EventsApiDestinationTargetSender(TargetSender): + def send_event(self, event, trace_header): + """Send an event to an EventBridge API destination + See https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-api-destinations.html""" + target_arn = self.target["Arn"] + target_region = extract_region_from_arn(target_arn) + target_account_id = extract_account_id_from_arn(target_arn) + api_destination_name = target_arn.split(":")[-1].split("/")[1] + + events_client = connect_to( + aws_access_key_id=target_account_id, region_name=target_region + ).events + destination = events_client.describe_api_destination(Name=api_destination_name) + + # get destination endpoint details + method = destination.get("HttpMethod", "GET") + endpoint = destination.get("InvocationEndpoint") + state = destination.get("ApiDestinationState") or "ACTIVE" + + LOG.debug( + 'Calling EventBridge API destination (state "%s"): %s %s', state, method, endpoint + ) + headers = { + # default headers AWS sends with every api destination call + "User-Agent": "Amazon/EventBridge/ApiDestinations", + "Content-Type": "application/json; charset=utf-8", + "Range": "bytes=0-1048575", + "Accept-Encoding": "gzip,deflate", + "Connection": "close", + } + + endpoint = add_api_destination_authorization(destination, headers, event) + if http_parameters := self.target.get("HttpParameters"): + endpoint = add_target_http_parameters(http_parameters, endpoint, headers, event) + + # add trace header + headers[TRACE_HEADER_KEY] = trace_header.to_header_str() + + result = requests.request( + method=method, url=endpoint, data=json.dumps(event or {}), headers=headers + ) + if result.status_code >= 400: + LOG.debug( + "Received code %s forwarding events: %s %s", result.status_code, method, endpoint + ) + if result.status_code == 429 or 500 <= result.status_code <= 600: + pass # TODO: retry logic (only retry on 429 and 5xx response status) + + +class FirehoseTargetSender(TargetSender): + def send_event(self, event, trace_header): + delivery_stream_name = firehose_name(self.target["Arn"]) + + self.client.put_record( + DeliveryStreamName=delivery_stream_name, + Record={"Data": to_bytes(to_json_str(event))}, + ) + + +class KinesisTargetSender(TargetSender): + def send_event(self, event, trace_header): + partition_key_path = collections.get_safe( + self.target, + "$.KinesisParameters.PartitionKeyPath", + default_value="$.id", + ) + stream_name = self.target["Arn"].split("/")[-1] + partition_key = collections.get_safe(event, partition_key_path, event["id"]) + + self.client.put_record( + StreamName=stream_name, + Data=to_bytes(to_json_str(event)), + PartitionKey=partition_key, + ) + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via tests + # if not collections.get_safe(target, "$.RoleArn"): + # raise ValueError("RoleArn is required for Kinesis target") + # if not collections.get_safe(target, "$.KinesisParameters.PartitionKeyPath"): + # raise ValueError("KinesisParameters.PartitionKeyPath is required for Kinesis target") + + +class LambdaTargetSender(TargetSender): + def send_event(self, event, trace_header): + self.client.invoke( + FunctionName=self.target["Arn"], + Payload=to_bytes(to_json_str(event)), + InvocationType="Event", + TraceHeader=trace_header, + ) + + +class LogsTargetSender(TargetSender): + def send_event(self, event, trace_header): + log_group_name = self.target["Arn"].split(":")[6] + log_stream_name = str(uuid.uuid4()) # Unique log stream name + + self.client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) + self.client.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + "timestamp": now_utc(millis=True), + "message": to_json_str(event), + } + ], + ) + + +class RedshiftTargetSender(TargetSender): + def send_event(self, event, trace_header): + raise NotImplementedError("Redshift target is not yet implemented") + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via test + # if not collections.get_safe(target, "$.RedshiftDataParameters.Database"): + # raise ValueError("RedshiftDataParameters.Database is required for Redshift target") + + +class SagemakerTargetSender(TargetSender): + def send_event(self, event, trace_header): + raise NotImplementedError("Sagemaker target is not yet implemented") + + +class SnsTargetSender(TargetSender): + def send_event(self, event, trace_header): + self.client.publish(TopicArn=self.target["Arn"], Message=to_json_str(event)) + + +class SqsTargetSender(TargetSender): + def send_event(self, event, trace_header): + queue_url = sqs_queue_url_for_arn(self.target["Arn"]) + msg_group_id = self.target.get("SqsParameters", {}).get("MessageGroupId", None) + kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {} + + self.client.send_message( + QueueUrl=queue_url, + MessageBody=to_json_str(event), + **kwargs, + ) + + +class StatesTargetSender(TargetSender): + """Step Functions Target Sender""" + + def send_event(self, event, trace_header): + self.service = "stepfunctions" + + self.client.start_execution( + stateMachineArn=self.target["Arn"], name=event["id"], input=to_json_str(event) + ) + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via test + # if not collections.get_safe(target, "$.RoleArn"): + # raise ValueError("RoleArn is required for StepFunctions target") + + +class SystemsManagerSender(TargetSender): + """EC2 Run Command Target Sender""" + + def send_event(self, event, trace_header): + raise NotImplementedError("Systems Manager target is not yet implemented") + + def _validate_input(self, target: Target): + super()._validate_input(target) + # TODO: cover via test + # if not collections.get_safe(target, "$.RoleArn"): + # raise ValueError( + # "RoleArn is required for SystemManager target to invoke a EC2 run command" + # ) + # if not collections.get_safe(target, "$.RunCommandParameters.RunCommandTargets"): + # raise ValueError( + # "RunCommandParameters.RunCommandTargets is required for Systems Manager target" + # ) + + +class TargetSenderFactory: + # supported targets: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-targets.html + target: Target + rule_arn: Arn + rule_name: RuleName + region: str + account_id: str + + target_map = { + "apigateway": ApiGatewayTargetSender, + "appsync": AppSyncTargetSender, + "batch": BatchTargetSender, + "ecs": ECSTargetSender, + "events": EventsTargetSender, + "events_api_destination": EventsApiDestinationTargetSender, + "firehose": FirehoseTargetSender, + "kinesis": KinesisTargetSender, + "lambda": LambdaTargetSender, + "logs": LogsTargetSender, + "redshift": RedshiftTargetSender, + "sns": SnsTargetSender, + "sqs": SqsTargetSender, + "sagemaker": SagemakerTargetSender, + "ssm": SystemsManagerSender, + "states": StatesTargetSender, + "execute-api": ApiGatewayTargetSender, + # TODO custom endpoints via http target + } + + def __init__( + self, target: Target, rule_arn: Arn, rule_name: RuleName, region: str, account_id: str + ): + self.target = target + self.rule_arn = rule_arn + self.rule_name = rule_name + self.region = region + self.account_id = account_id + + @classmethod + def register_target_sender(cls, service_name: str, sender_class: Type[TargetSender]): + cls.target_map[service_name] = sender_class + + def get_target_sender(self) -> TargetSender: + target_arn = self.target["Arn"] + service = extract_service_from_arn(target_arn) + if ":api-destination/" in target_arn or ":destination/" in target_arn: + service = "events_api_destination" + if service in self.target_map: + target_sender_class = self.target_map[service] + else: + raise Exception(f"Unsupported target for Service: {service}") + target_sender = target_sender_class( + self.target, self.rule_arn, self.rule_name, service, self.region, self.account_id + ) + return target_sender diff --git a/localstack-core/localstack/services/events/utils.py b/localstack-core/localstack/services/events/utils.py new file mode 100644 index 0000000000000..5ac8e835b136f --- /dev/null +++ b/localstack-core/localstack/services/events/utils.py @@ -0,0 +1,296 @@ +import json +import logging +import re +from datetime import datetime, timezone +from typing import Any, Dict, Optional + +from botocore.utils import ArnParser + +from localstack.aws.api import RequestContext +from localstack.aws.api.events import ( + ArchiveName, + Arn, + ConnectionArn, + ConnectionName, + EventBusName, + EventBusNameOrArn, + EventTime, + PutEventsRequestEntry, + RuleArn, + Timestamp, +) +from localstack.services.events.models import ( + FormattedEvent, + ResourceType, + TransformedEvent, + ValidationException, +) +from localstack.utils.aws.arns import ARN_PARTITION_REGEX, parse_arn +from localstack.utils.strings import long_uid + +LOG = logging.getLogger(__name__) + +RULE_ARN_CUSTOM_EVENT_BUS_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:events:[a-z0-9-]+:\d{{12}}:rule/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+$" +) + +RULE_ARN_ARCHIVE_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:events:[a-z0-9-]+:\d{{12}}:archive/[a-zA-Z0-9_-]+$" +) +ARCHIVE_NAME_ARN_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:events:[a-z0-9-]+:\d{{12}}:archive/(?P.+)$" +) +CONNECTION_NAME_ARN_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:events:[a-z0-9-]+:\d{{12}}:connection/(?P[^/]+)/(?P[^/]+)$" +) + +TARGET_ID_PATTERN = re.compile(r"[\.\-_A-Za-z0-9]+") + + +class EventJSONEncoder(json.JSONEncoder): + """This json encoder is used to serialize datetime object + of a eventbridge event to time strings.""" + + def default(self, obj): + if isinstance(obj, datetime): + return event_time_to_time_string(obj) + return super().default(obj) + + +def to_json_str(obj: Any, separators: Optional[tuple[str, str]] = (",", ":")) -> str: + return json.dumps(obj, cls=EventJSONEncoder, separators=separators) + + +def extract_region_and_account_id( + name_or_arn: EventBusNameOrArn, context: RequestContext +) -> tuple[str, str]: + """Returns the region and account id from the arn, + or falls back on the region and account id of the context""" + account_id = None + region = None + if ArnParser.is_arn(name_or_arn): + parsed_arn = parse_arn(name_or_arn) + region = parsed_arn.get("region") + account_id = parsed_arn.get("account") + if not account_id or not region: + region = context.get("region") + account_id = context.get("account_id") + return region, account_id + + +def extract_event_bus_name( + resource_arn_or_name: EventBusNameOrArn | RuleArn | None, +) -> EventBusName: + """Return the event bus name. Input can be either an event bus name or ARN.""" + if not resource_arn_or_name: + return "default" + if not re.match(f"{ARN_PARTITION_REGEX}:events", resource_arn_or_name): + return resource_arn_or_name + resource_type = get_resource_type(resource_arn_or_name) + if resource_type == ResourceType.EVENT_BUS: + return resource_arn_or_name.split("/")[-1] + if resource_type == ResourceType.RULE: + if bool(RULE_ARN_CUSTOM_EVENT_BUS_PATTERN.match(resource_arn_or_name)): + return resource_arn_or_name.split("rule/", 1)[1].split("/", 1)[0] + return "default" + + +def extract_connection_name( + connection_arn: ConnectionArn, +) -> ConnectionName: + match = CONNECTION_NAME_ARN_PATTERN.match(connection_arn) + if not match: + raise ValidationException( + f"Parameter {connection_arn} is not valid. Reason: Provided Arn is not in correct format." + ) + return match.group("name") + + +def extract_archive_name(arn: Arn) -> ArchiveName: + match = ARCHIVE_NAME_ARN_PATTERN.match(arn) + if not match: + raise ValidationException( + f"Parameter {arn} is not valid. Reason: Provided Arn is not in correct format." + ) + return match.group("name") + + +def is_archive_arn(arn: Arn) -> bool: + return bool(RULE_ARN_ARCHIVE_PATTERN.match(arn)) + + +def get_resource_type(arn: Arn) -> ResourceType: + parsed_arn = parse_arn(arn) + resource_type = parsed_arn["resource"].split("/", 1)[0] + if resource_type == "event-bus": + return ResourceType.EVENT_BUS + if resource_type == "rule": + return ResourceType.RULE + raise ValidationException( + f"Parameter {arn} is not valid. Reason: Provided Arn is not in correct format." + ) + + +def get_event_time(event: PutEventsRequestEntry) -> EventTime: + event_time = datetime.now(timezone.utc) + if event_timestamp := event.get("Time"): + try: + # use time from event if provided + event_time = event_timestamp.replace(tzinfo=timezone.utc) + except ValueError: + # use current time if event time is invalid + LOG.debug( + "Could not parse the `Time` parameter, falling back to current time for the following Event: '%s'", + event, + ) + return event_time + + +def event_time_to_time_string(event_time: EventTime) -> str: + return event_time.strftime("%Y-%m-%dT%H:%M:%SZ") + + +def convert_to_timezone_aware_datetime( + timestamp: Timestamp, +) -> Timestamp: + if timestamp.tzinfo is None: + timestamp = timestamp.replace(tzinfo=timezone.utc) + return timestamp + + +def recursive_remove_none_values_from_dict(d: Dict[str, Any]) -> Dict[str, Any]: + """ + Recursively removes keys with non values from a dictionary. + """ + if not isinstance(d, dict): + return d + + clean_dict = {} + for key, value in d.items(): + if value is None: + continue + if isinstance(value, list): + nested_list = [recursive_remove_none_values_from_dict(item) for item in value] + nested_list = [item for item in nested_list if item] + if nested_list: + clean_dict[key] = nested_list + elif isinstance(value, dict): + nested_dict = recursive_remove_none_values_from_dict(value) + if nested_dict: + clean_dict[key] = nested_dict + else: + clean_dict[key] = value + return clean_dict + + +def format_event( + event: PutEventsRequestEntry, region: str, account_id: str, event_bus_name: EventBusName +) -> FormattedEvent: + # See https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-events.html + # region_name and account_id of original event is preserved fro cross-region event bus communication + trace_header = event.get("TraceHeader") + message = {} + if trace_header: + try: + message = json.loads(trace_header) + except json.JSONDecodeError: + pass + message_id = message.get("original_id", str(long_uid())) + region = message.get("original_region", region) + account_id = message.get("original_account", account_id) + # Format the datetime to ISO-8601 string + event_time = get_event_time(event) + formatted_time = event_time_to_time_string(event_time) + + formatted_event = { + "version": "0", + "id": message_id, + "detail-type": event.get("DetailType"), + "source": event.get("Source"), + "account": account_id, + "time": formatted_time, + "region": region, + "resources": event.get("Resources", []), + "detail": json.loads(event.get("Detail", "{}")), + "event-bus-name": event_bus_name, # current workaround for EventStudio extension + } + if replay_name := event.get("ReplayName"): + formatted_event["replay-name"] = replay_name # required for replay from archive + + return formatted_event + + +def re_format_event(event: FormattedEvent, event_bus_name: EventBusName) -> PutEventsRequestEntry: + """Transforms the event to the original event structure.""" + re_formatted_event = { + "Source": event["source"], + "DetailType": event[ + "detail-type" + ], # detail_type automatically interpreted as detail-type in typedict + "Detail": json.dumps(event["detail"]), + "Time": event["time"], + } + if event.get("resources"): + re_formatted_event["Resources"] = event["resources"] + if event_bus_name: + re_formatted_event["EventBusName"] = event_bus_name + if event.get("replay-name"): + re_formatted_event["ReplayName"] = event["replay_name"] + return re_formatted_event + + +def get_trace_header_encoded_region_account( + event: PutEventsRequestEntry | FormattedEvent | TransformedEvent, + source_region: str, + source_account_id: str, + target_region: str, + target_account_id: str, +) -> str | None: + """Encode the original region and account_id for cross-region and cross-account + event bus communication in the trace header. For event bus to event bus communication + in a different account the event id is preserved. This is not the case if the region differs.""" + if event.get("TraceHeader"): + return None + if source_region != target_region and source_account_id != target_account_id: + return json.dumps( + { + "original_region": source_region, + "original_account": source_account_id, + } + ) + if source_region != target_region: + return json.dumps({"original_region": source_region}) + if source_account_id != target_account_id: + if original_id := event.get("id"): + return json.dumps({"original_id": original_id, "original_account": source_account_id}) + else: + return json.dumps({"original_account": source_account_id}) + + +def is_nested_in_string(template: str, match: re.Match[str]) -> bool: + """ + Determines if a match (string) is within quotes in the given template. + + Examples: + True for "users-service/users/" # nested within larger string + True for "" # simple quoted placeholder + True for "Hello " # nested within larger string + False for {"id": } # not in quotes at all + """ + start = match.start() + end = match.end() + + left_quote = template.rfind('"', 0, start) + right_quote = template.find('"', end) + next_comma = template.find(",", end) + next_brace = template.find("}", end) + + # If no right quote, or if comma/brace comes before right quote, not nested + if ( + right_quote == -1 + or (next_comma != -1 and next_comma < right_quote) + or (next_brace != -1 and next_brace < right_quote) + ): + return False + + return left_quote != -1 diff --git a/localstack/services/stepfunctions/asl/component/common/error_name/__init__.py b/localstack-core/localstack/services/events/v1/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/error_name/__init__.py rename to localstack-core/localstack/services/events/v1/__init__.py diff --git a/localstack/services/events/models.py b/localstack-core/localstack/services/events/v1/models.py similarity index 100% rename from localstack/services/events/models.py rename to localstack-core/localstack/services/events/v1/models.py diff --git a/localstack-core/localstack/services/events/v1/provider.py b/localstack-core/localstack/services/events/v1/provider.py new file mode 100644 index 0000000000000..9e3da8e447f6a --- /dev/null +++ b/localstack-core/localstack/services/events/v1/provider.py @@ -0,0 +1,536 @@ +import datetime +import json +import logging +import os +import re +import time +from typing import Any, Dict, Optional + +from moto.events import events_backends +from moto.events.responses import EventsHandler as MotoEventsHandler +from werkzeug import Request +from werkzeug.exceptions import NotFound + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.core import CommonServiceException, ServiceException +from localstack.aws.api.events import ( + Boolean, + ConnectionAuthorizationType, + ConnectionDescription, + ConnectionName, + ConnectivityResourceParameters, + CreateConnectionAuthRequestParameters, + CreateConnectionResponse, + EventBusNameOrArn, + EventPattern, + EventsApi, + KmsKeyIdentifier, + PutRuleResponse, + PutTargetsResponse, + RoleArn, + RuleDescription, + RuleName, + RuleState, + ScheduleExpression, + String, + TagList, + TargetList, + TestEventPatternResponse, +) +from localstack.constants import APPLICATION_AMZ_JSON_1_1 +from localstack.http import route +from localstack.services.edge import ROUTER +from localstack.services.events.scheduler import JobScheduler +from localstack.services.events.v1.models import EventsStore, events_stores +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.aws.arns import event_bus_arn, parse_arn +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.aws.message_forwarding import send_event_to_target +from localstack.utils.collections import pick_attributes +from localstack.utils.common import TMP_FILES, mkdir, save_file, truncate +from localstack.utils.event_matcher import matches_event +from localstack.utils.json import extract_jsonpath +from localstack.utils.strings import long_uid, short_uid +from localstack.utils.time import TIMESTAMP_FORMAT_TZ, timestamp + +LOG = logging.getLogger(__name__) + +# list of events used to run assertions during integration testing (not exposed to the user) +TEST_EVENTS_CACHE = [] +EVENTS_TMP_DIR = "cw_events" +DEFAULT_EVENT_BUS_NAME = "default" +CONNECTION_NAME_PATTERN = re.compile("^[\\.\\-_A-Za-z0-9]+$") + + +class ValidationException(ServiceException): + code: str = "ValidationException" + sender_fault: bool = True + status_code: int = 400 + + +class EventsProvider(EventsApi, ServiceLifecycleHook): + def __init__(self): + apply_patches() + + def on_after_init(self): + ROUTER.add(self.trigger_scheduled_rule) + + def on_before_start(self): + JobScheduler.start() + + def on_before_stop(self): + JobScheduler.shutdown() + + @route("/_aws/events/rules//trigger") + def trigger_scheduled_rule(self, request: Request, rule_arn: str): + """Developer endpoint to trigger a scheduled rule.""" + arn_data = parse_arn(rule_arn) + account_id = arn_data["account"] + region = arn_data["region"] + rule_name = arn_data["resource"].split("/", maxsplit=1)[-1] + + job_id = events_stores[account_id][region].rule_scheduled_jobs.get(rule_name) + if not job_id: + raise NotFound() + job = JobScheduler().instance().get_job(job_id) + if not job: + raise NotFound() + + # TODO: once job scheduler is refactored, we can update the deadline of the task instead of running + # it here + job.run() + + @staticmethod + def get_store(context: RequestContext) -> EventsStore: + return events_stores[context.account_id][context.region] + + def test_event_pattern( + self, context: RequestContext, event_pattern: EventPattern, event: String, **kwargs + ) -> TestEventPatternResponse: + """Test event pattern uses EventBridge event pattern matching: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html + """ + result = matches_event(event_pattern, event) + return TestEventPatternResponse(Result=result) + + @staticmethod + def get_scheduled_rule_func( + store: EventsStore, + rule_name: RuleName, + event_bus_name_or_arn: Optional[EventBusNameOrArn] = None, + ): + def func(*args, **kwargs): + account_id = store._account_id + region = store._region_name + moto_backend = events_backends[account_id][region] + event_bus_name = get_event_bus_name(event_bus_name_or_arn) + event_bus = moto_backend.event_buses[event_bus_name] + rule = event_bus.rules.get(rule_name) + if not rule: + LOG.info("Unable to find rule `%s` for event bus `%s`", rule_name, event_bus_name) + return + if rule.targets: + LOG.debug( + "Notifying %s targets in response to triggered Events rule %s", + len(rule.targets), + rule_name, + ) + + default_event = { + "version": "0", + "id": long_uid(), + "detail-type": "Scheduled Event", + "source": "aws.events", + "account": account_id, + "time": timestamp(format=TIMESTAMP_FORMAT_TZ), + "region": region, + "resources": [rule.arn], + "detail": {}, + } + + for target in rule.targets: + arn = target.get("Arn") + + if input_ := target.get("Input"): + event = json.loads(input_) + else: + event = default_event + if target.get("InputPath"): + event = filter_event_with_target_input_path(target, event) + if input_transformer := target.get("InputTransformer"): + event = process_event_with_input_transformer(input_transformer, event) + + attr = pick_attributes(target, ["$.SqsParameters", "$.KinesisParameters"]) + + try: + send_event_to_target( + arn, + event, + target_attributes=attr, + role=target.get("RoleArn"), + target=target, + source_arn=rule.arn, + source_service=ServicePrincipal.events, + ) + except Exception as e: + LOG.info( + "Unable to send event notification %s to target %s: %s", + truncate(event), + target, + e, + ) + + return func + + @staticmethod + def convert_schedule_to_cron(schedule): + """Convert Events schedule like "cron(0 20 * * ? *)" or "rate(5 minutes)" """ + cron_regex = r"\s*cron\s*\(([^\)]*)\)\s*" + if re.match(cron_regex, schedule): + cron = re.sub(cron_regex, r"\1", schedule) + return cron + rate_regex = r"\s*rate\s*\(([^\)]*)\)\s*" + if re.match(rate_regex, schedule): + rate = re.sub(rate_regex, r"\1", schedule) + value, unit = re.split(r"\s+", rate.strip()) + + value = int(value) + if value < 1: + raise ValueError("Rate value must be larger than 0") + # see https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rate-expressions.html + if value == 1 and unit.endswith("s"): + raise ValueError("If the value is equal to 1, then the unit must be singular") + if value > 1 and not unit.endswith("s"): + raise ValueError("If the value is greater than 1, the unit must be plural") + + if "minute" in unit: + return "*/%s * * * *" % value + if "hour" in unit: + return "0 */%s * * *" % value + if "day" in unit: + return "0 0 */%s * *" % value + raise ValueError("Unable to parse events schedule expression: %s" % schedule) + return schedule + + @staticmethod + def put_rule_job_scheduler( + store: EventsStore, + name: Optional[RuleName], + state: Optional[RuleState], + schedule_expression: Optional[ScheduleExpression], + event_bus_name_or_arn: Optional[EventBusNameOrArn] = None, + ): + if not schedule_expression: + return + + try: + cron = EventsProvider.convert_schedule_to_cron(schedule_expression) + except ValueError as e: + LOG.error("Error parsing schedule expression: %s", e) + raise ValidationException("Parameter ScheduleExpression is not valid.") from e + + job_func = EventsProvider.get_scheduled_rule_func( + store, name, event_bus_name_or_arn=event_bus_name_or_arn + ) + LOG.debug("Adding new scheduled Events rule with cron schedule %s", cron) + + enabled = state != "DISABLED" + job_id = JobScheduler.instance().add_job(job_func, cron, enabled) + rule_scheduled_jobs = store.rule_scheduled_jobs + rule_scheduled_jobs[name] = job_id + + def put_rule( + self, + context: RequestContext, + name: RuleName, + schedule_expression: ScheduleExpression = None, + event_pattern: EventPattern = None, + state: RuleState = None, + description: RuleDescription = None, + role_arn: RoleArn = None, + tags: TagList = None, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> PutRuleResponse: + store = self.get_store(context) + self.put_rule_job_scheduler( + store, name, state, schedule_expression, event_bus_name_or_arn=event_bus_name + ) + return call_moto(context) + + def delete_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + force: Boolean = None, + **kwargs, + ) -> None: + rule_scheduled_jobs = self.get_store(context).rule_scheduled_jobs + job_id = rule_scheduled_jobs.get(name) + if job_id: + LOG.debug("Removing scheduled Events: %s | job_id: %s", name, job_id) + JobScheduler.instance().cancel_job(job_id=job_id) + call_moto(context) + + def disable_rule( + self, + context: RequestContext, + name: RuleName, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> None: + rule_scheduled_jobs = self.get_store(context).rule_scheduled_jobs + job_id = rule_scheduled_jobs.get(name) + if job_id: + LOG.debug("Disabling Rule: %s | job_id: %s", name, job_id) + JobScheduler.instance().disable_job(job_id=job_id) + call_moto(context) + + def create_connection( + self, + context: RequestContext, + name: ConnectionName, + authorization_type: ConnectionAuthorizationType, + auth_parameters: CreateConnectionAuthRequestParameters, + description: ConnectionDescription = None, + invocation_connectivity_parameters: ConnectivityResourceParameters = None, + kms_key_identifier: KmsKeyIdentifier = None, + **kwargs, + ) -> CreateConnectionResponse: + # TODO add support for kms_key_identifier + errors = [] + + if not CONNECTION_NAME_PATTERN.match(name): + error = f"{name} at 'name' failed to satisfy: Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+" + errors.append(error) + + if len(name) > 64: + error = f"{name} at 'name' failed to satisfy: Member must have length less than or equal to 64" + errors.append(error) + + if authorization_type not in ["BASIC", "API_KEY", "OAUTH_CLIENT_CREDENTIALS"]: + error = f"{authorization_type} at 'authorizationType' failed to satisfy: Member must satisfy enum value set: [BASIC, OAUTH_CLIENT_CREDENTIALS, API_KEY]" + errors.append(error) + + if len(errors) > 0: + error_description = "; ".join(errors) + error_plural = "errors" if len(errors) > 1 else "error" + errors_amount = len(errors) + message = f"{errors_amount} validation {error_plural} detected: {error_description}" + raise CommonServiceException(message=message, code="ValidationException") + + return call_moto(context) + + def put_targets( + self, + context: RequestContext, + rule: RuleName, + targets: TargetList, + event_bus_name: EventBusNameOrArn = None, + **kwargs, + ) -> PutTargetsResponse: + validation_errors = [] + + id_regex = re.compile(r"^[\.\-_A-Za-z0-9]+$") + for index, target in enumerate(targets): + id = target.get("Id") + if not id_regex.match(id): + validation_errors.append( + f"Value '{id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+" + ) + + if len(id) > 64: + validation_errors.append( + f"Value '{id}' at 'targets.{index + 1}.member.id' failed to satisfy constraint: Member must have length less than or equal to 64" + ) + + if validation_errors: + errors_message = "; ".join(validation_errors) + message = f"{len(validation_errors)} validation {'errors' if len(validation_errors) > 1 else 'error'} detected: {errors_message}" + raise CommonServiceException(message=message, code="ValidationException") + + return call_moto(context) + + +def _get_events_tmp_dir(): + return os.path.join(config.dirs.tmp, EVENTS_TMP_DIR) + + +def _create_and_register_temp_dir(): + tmp_dir = _get_events_tmp_dir() + if not os.path.exists(tmp_dir): + mkdir(tmp_dir) + TMP_FILES.append(tmp_dir) + return tmp_dir + + +def _dump_events_to_files(events_with_added_uuid): + try: + _create_and_register_temp_dir() + current_time_millis = int(round(time.time() * 1000)) + for event in events_with_added_uuid: + target = os.path.join( + _get_events_tmp_dir(), + "%s_%s" % (current_time_millis, event["uuid"]), + ) + save_file(target, json.dumps(event["event"])) + except Exception as e: + LOG.info("Unable to dump events to tmp dir %s: %s", _get_events_tmp_dir(), e) + + +def filter_event_based_on_event_format( + self, rule_name: str, event_bus_name: str, event: dict[str, Any] +): + rule_information = self.events_backend.describe_rule( + rule_name, event_bus_arn(event_bus_name, self.current_account, self.region) + ) + + if not rule_information: + LOG.info('Unable to find rule "%s" in backend: %s', rule_name, rule_information) + return False + if rule_information.event_pattern._pattern: + event_pattern = rule_information.event_pattern._pattern + if not matches_event(event_pattern, event): + return False + return True + + +def filter_event_with_target_input_path(target: Dict, event: Dict) -> Dict: + input_path = target.get("InputPath") + if input_path: + event = extract_jsonpath(event, input_path) + return event + + +def process_event_with_input_transformer(input_transformer: Dict, event: Dict) -> Dict: + """ + Process the event with the input transformer of the target event, + by replacing the message with the populated InputTemplate. + docs.aws.amazon.com/eventbridge/latest/userguide/eb-transform-target-input.html + """ + try: + input_paths = input_transformer["InputPathsMap"] + input_template = input_transformer["InputTemplate"] + except KeyError as e: + LOG.error("%s key does not exist in input_transformer.", e) + raise e + for key, path in input_paths.items(): + value = extract_jsonpath(event, path) + if not value: + value = "" + input_template = input_template.replace(f"<{key}>", value) + templated_event = re.sub('"', "", input_template) + return templated_event + + +def process_events(event: Dict, targets: list[Dict]): + for target in targets: + arn = target["Arn"] + changed_event = filter_event_with_target_input_path(target, event) + if input_transformer := target.get("InputTransformer"): + changed_event = process_event_with_input_transformer(input_transformer, changed_event) + if target.get("Input"): + changed_event = json.loads(target.get("Input")) + try: + send_event_to_target( + arn, + changed_event, + pick_attributes(target, ["$.SqsParameters", "$.KinesisParameters"]), + role=target.get("RoleArn"), + target=target, + source_service=ServicePrincipal.events, + source_arn=target.get("RuleArn"), + ) + except Exception as e: + LOG.info( + "Unable to send event notification %s to target %s: %s", + truncate(event), + target, + e, + ) + + +def get_event_bus_name(event_bus_name_or_arn: Optional[EventBusNameOrArn] = None) -> str: + event_bus_name_or_arn = event_bus_name_or_arn or DEFAULT_EVENT_BUS_NAME + return event_bus_name_or_arn.split("/")[-1] + + +# specific logic for put_events which forwards matching events to target listeners +def events_handler_put_events(self): + entries = self._get_param("Entries") + + # keep track of events for local integration testing + if config.is_local_test_mode(): + TEST_EVENTS_CACHE.extend(entries) + + events = [{"event": event, "uuid": str(long_uid())} for event in entries] + + _dump_events_to_files(events) + + for event_envelope in events: + event = event_envelope["event"] + event_bus_name = get_event_bus_name(event.get("EventBusName")) + event_bus = self.events_backend.event_buses.get(event_bus_name) + if not event_bus: + continue + + matching_rules = [ + r + for r in event_bus.rules.values() + if r.event_bus_name == event_bus_name and not r.scheduled_expression + ] + if not matching_rules: + continue + + event_time = datetime.datetime.utcnow() + if event_timestamp := event.get("Time"): + try: + # if provided, use the time from event + event_time = datetime.datetime.utcfromtimestamp(event_timestamp) + except ValueError: + # if we can't parse it, pass and keep using `utcnow` + LOG.debug( + "Could not parse the `Time` parameter, falling back to `utcnow` for the following Event: '%s'", + event, + ) + + # See https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-events.html + formatted_event = { + "version": "0", + "id": event_envelope["uuid"], + "detail-type": event.get("DetailType"), + "source": event.get("Source"), + "account": self.current_account, + "time": event_time.strftime("%Y-%m-%dT%H:%M:%SZ"), + "region": self.region, + "resources": event.get("Resources", []), + "detail": json.loads(event.get("Detail", "{}")), + } + + targets = [] + for rule in matching_rules: + if filter_event_based_on_event_format(self, rule.name, event_bus_name, formatted_event): + rule_targets, _ = self.events_backend.list_targets_by_rule( + rule.name, event_bus_arn(event_bus_name, self.current_account, self.region) + ) + targets.extend([{"RuleArn": rule.arn} | target for target in rule_targets]) + # process event + process_events(formatted_event, targets) + + content = { + "FailedEntryCount": 0, # TODO: dynamically set proper value when refactoring + "Entries": [{"EventId": event["uuid"]} for event in events], + } + + self.response_headers.update( + {"Content-Type": APPLICATION_AMZ_JSON_1_1, "x-amzn-RequestId": short_uid()} + ) + + return json.dumps(content), self.response_headers + + +def apply_patches(): + MotoEventsHandler.put_events = events_handler_put_events diff --git a/localstack/services/stepfunctions/asl/component/common/flow/__init__.py b/localstack-core/localstack/services/firehose/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/flow/__init__.py rename to localstack-core/localstack/services/firehose/__init__.py diff --git a/localstack/services/firehose/mappers.py b/localstack-core/localstack/services/firehose/mappers.py similarity index 92% rename from localstack/services/firehose/mappers.py rename to localstack-core/localstack/services/firehose/mappers.py index 5d56d17c10340..f262db136020a 100644 --- a/localstack/services/firehose/mappers.py +++ b/localstack-core/localstack/services/firehose/mappers.py @@ -16,6 +16,8 @@ HttpEndpointDestinationUpdate, KinesisStreamSourceConfiguration, KinesisStreamSourceDescription, + RedshiftDestinationConfiguration, + RedshiftDestinationDescription, S3DestinationConfiguration, S3DestinationDescription, S3DestinationUpdate, @@ -156,3 +158,15 @@ def convert_source_config_to_desc( result = cast(KinesisStreamSourceDescription, configuration) result["DeliveryStartTimestamp"] = datetime.now() return SourceDescription(KinesisStreamSourceDescription=result) + + +def convert_redshift_config_to_desc( + configuration: RedshiftDestinationConfiguration, +) -> RedshiftDestinationDescription: + if configuration is not None: + result = cast(RedshiftDestinationDescription, configuration) + result["S3DestinationDescription"] = convert_s3_config_to_desc( + configuration["S3Configuration"] + ) + result.pop("S3Configuration", None) + return result diff --git a/localstack/services/firehose/models.py b/localstack-core/localstack/services/firehose/models.py similarity index 100% rename from localstack/services/firehose/models.py rename to localstack-core/localstack/services/firehose/models.py diff --git a/localstack-core/localstack/services/firehose/provider.py b/localstack-core/localstack/services/firehose/provider.py new file mode 100644 index 0000000000000..c678d0647c076 --- /dev/null +++ b/localstack-core/localstack/services/firehose/provider.py @@ -0,0 +1,959 @@ +import base64 +import functools +import json +import logging +import os +import re +import threading +import time +import uuid +from datetime import datetime +from typing import Dict, List +from urllib.parse import urlparse + +import requests + +from localstack.aws.api import RequestContext +from localstack.aws.api.firehose import ( + AmazonOpenSearchServerlessDestinationConfiguration, + AmazonOpenSearchServerlessDestinationUpdate, + AmazonopensearchserviceDestinationConfiguration, + AmazonopensearchserviceDestinationDescription, + AmazonopensearchserviceDestinationUpdate, + BooleanObject, + CreateDeliveryStreamOutput, + DatabaseSourceConfiguration, + DeleteDeliveryStreamOutput, + DeliveryStreamDescription, + DeliveryStreamEncryptionConfigurationInput, + DeliveryStreamName, + DeliveryStreamStatus, + DeliveryStreamType, + DeliveryStreamVersionId, + DescribeDeliveryStreamInputLimit, + DescribeDeliveryStreamOutput, + DestinationDescription, + DestinationDescriptionList, + DestinationId, + DirectPutSourceConfiguration, + ElasticsearchDestinationConfiguration, + ElasticsearchDestinationDescription, + ElasticsearchDestinationUpdate, + ElasticsearchS3BackupMode, + ExtendedS3DestinationConfiguration, + ExtendedS3DestinationUpdate, + FirehoseApi, + HttpEndpointDestinationConfiguration, + HttpEndpointDestinationUpdate, + IcebergDestinationConfiguration, + IcebergDestinationUpdate, + InvalidArgumentException, + KinesisStreamSourceConfiguration, + ListDeliveryStreamsInputLimit, + ListDeliveryStreamsOutput, + ListTagsForDeliveryStreamInputLimit, + ListTagsForDeliveryStreamOutput, + ListTagsForDeliveryStreamOutputTagList, + MSKSourceConfiguration, + PutRecordBatchOutput, + PutRecordBatchRequestEntryList, + PutRecordBatchResponseEntry, + PutRecordOutput, + Record, + RedshiftDestinationConfiguration, + RedshiftDestinationDescription, + RedshiftDestinationUpdate, + ResourceNotFoundException, + S3DestinationConfiguration, + S3DestinationDescription, + S3DestinationUpdate, + SnowflakeDestinationConfiguration, + SnowflakeDestinationUpdate, + SplunkDestinationConfiguration, + SplunkDestinationUpdate, + TagDeliveryStreamInputTagList, + TagDeliveryStreamOutput, + TagKey, + TagKeyList, + UntagDeliveryStreamOutput, + UpdateDestinationOutput, +) +from localstack.aws.connect import connect_to +from localstack.services.firehose.mappers import ( + convert_es_config_to_desc, + convert_es_update_to_desc, + convert_extended_s3_config_to_desc, + convert_extended_s3_update_to_desc, + convert_http_config_to_desc, + convert_http_update_to_desc, + convert_opensearch_config_to_desc, + convert_opensearch_update_to_desc, + convert_redshift_config_to_desc, + convert_s3_config_to_desc, + convert_s3_update_to_desc, + convert_source_config_to_desc, +) +from localstack.services.firehose.models import FirehoseStore, firehose_stores +from localstack.utils.aws.arns import ( + extract_account_id_from_arn, + extract_region_from_arn, + firehose_stream_arn, + opensearch_domain_name, + s3_bucket_name, +) +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.collections import select_from_typed_dict +from localstack.utils.common import ( + TIMESTAMP_FORMAT_MICROS, + first_char_to_lower, + keys_to_lower, + now_utc, + short_uid, + timestamp, + to_bytes, + to_str, + truncate, +) +from localstack.utils.kinesis import kinesis_connector +from localstack.utils.kinesis.kinesis_connector import KinesisProcessorThread +from localstack.utils.run import run_for_max_seconds + +LOG = logging.getLogger(__name__) + +# global sequence number counter for Firehose records (these are very large long values in AWS) +SEQUENCE_NUMBER = 49546986683135544286507457936321625675700192471156785154 +SEQUENCE_NUMBER_MUTEX = threading.RLock() + + +def next_sequence_number() -> int: + """Increase and return the next global sequence number.""" + global SEQUENCE_NUMBER + with SEQUENCE_NUMBER_MUTEX: + SEQUENCE_NUMBER += 1 + return SEQUENCE_NUMBER + + +def _get_description_or_raise_not_found( + context, delivery_stream_name: str +) -> DeliveryStreamDescription: + store = FirehoseProvider.get_store(context.account_id, context.region) + delivery_stream_description = store.delivery_streams.get(delivery_stream_name) + if not delivery_stream_description: + raise ResourceNotFoundException( + f"Firehose {delivery_stream_name} under account {context.account_id} not found." + ) + return delivery_stream_description + + +def get_opensearch_endpoint(domain_arn: str) -> str: + """ + Get an OpenSearch cluster endpoint by describing the cluster associated with the domain_arn + :param domain_arn: ARN of the cluster. + :returns: cluster endpoint + :raises: ValueError if the domain_arn is malformed + """ + account_id = extract_account_id_from_arn(domain_arn) + region_name = extract_region_from_arn(domain_arn) + if region_name is None: + raise ValueError("unable to parse region from opensearch domain ARN") + opensearch_client = connect_to(aws_access_key_id=account_id, region_name=region_name).opensearch + domain_name = opensearch_domain_name(domain_arn) + info = opensearch_client.describe_domain(DomainName=domain_name) + base_domain = info["DomainStatus"]["Endpoint"] + # Add the URL scheme "http" if it's not set yet. https might not be enabled for all instances + # f.e. when the endpoint strategy is PORT or there is a custom opensearch/elasticsearch instance + endpoint = base_domain if base_domain.startswith("http") else f"http://{base_domain}" + return endpoint + + +def get_search_db_connection(endpoint: str, region_name: str): + """ + Get a connection to an ElasticSearch or OpenSearch DB + :param endpoint: cluster endpoint + :param region_name: cluster region e.g. us-east-1 + """ + from opensearchpy import OpenSearch, RequestsHttpConnection + from requests_aws4auth import AWS4Auth + + verify_certs = False + use_ssl = False + # use ssl? + if "https://" in endpoint: + use_ssl = True + # TODO remove this condition once ssl certs are available for .es.localhost.localstack.cloud domains + endpoint_netloc = urlparse(endpoint).netloc + if not re.match(r"^.*(localhost(\.localstack\.cloud)?)(:\d+)?$", endpoint_netloc): + verify_certs = True + + LOG.debug("Creating ES client with endpoint %s", endpoint) + if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ: + access_key = os.environ.get("AWS_ACCESS_KEY_ID") + secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + session_token = os.environ.get("AWS_SESSION_TOKEN") + awsauth = AWS4Auth(access_key, secret_key, region_name, "es", session_token=session_token) + connection_class = RequestsHttpConnection + return OpenSearch( + hosts=[endpoint], + verify_certs=verify_certs, + use_ssl=use_ssl, + connection_class=connection_class, + http_auth=awsauth, + ) + return OpenSearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl) + + +def _drop_keys_in_destination_descriptions_not_in_output_types( + destinations: list, +) -> list[dict]: + """For supported destinations, drops the keys in the description not defined in the respective destination description return type""" + for destination in destinations: + if amazon_open_search_service_destination_description := destination.get( + "AmazonopensearchserviceDestinationDescription" + ): + destination["AmazonopensearchserviceDestinationDescription"] = select_from_typed_dict( + AmazonopensearchserviceDestinationDescription, + amazon_open_search_service_destination_description, + filter=True, + ) + if elasticsearch_destination_description := destination.get( + "ElasticsearchDestinationDescription" + ): + destination["ElasticsearchDestinationDescription"] = select_from_typed_dict( + ElasticsearchDestinationDescription, + elasticsearch_destination_description, + filter=True, + ) + if http_endpoint_destination_description := destination.get( + "HttpEndpointDestinationDescription" + ): + destination["HttpEndpointDestinationDescription"] = select_from_typed_dict( + HttpEndpointDestinationConfiguration, + http_endpoint_destination_description, + filter=True, + ) + if redshift_destination_description := destination.get("RedshiftDestinationDescription"): + destination["RedshiftDestinationDescription"] = select_from_typed_dict( + RedshiftDestinationDescription, + redshift_destination_description, + filter=True, + ) + if s3_destination_description := destination.get("S3DestinationDescription"): + destination["S3DestinationDescription"] = select_from_typed_dict( + S3DestinationDescription, s3_destination_description, filter=True + ) + + return destinations + + +class FirehoseProvider(FirehoseApi): + # maps a delivery_stream_arn to its kinesis thread; the arn encodes account id and region + kinesis_listeners: dict[str, KinesisProcessorThread] + + def __init__(self) -> None: + super().__init__() + self.kinesis_listeners = {} + + @staticmethod + def get_store(account_id: str, region_name: str) -> FirehoseStore: + return firehose_stores[account_id][region_name] + + def create_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + delivery_stream_type: DeliveryStreamType = None, + direct_put_source_configuration: DirectPutSourceConfiguration = None, + kinesis_stream_source_configuration: KinesisStreamSourceConfiguration = None, + delivery_stream_encryption_configuration_input: DeliveryStreamEncryptionConfigurationInput = None, + s3_destination_configuration: S3DestinationConfiguration = None, + extended_s3_destination_configuration: ExtendedS3DestinationConfiguration = None, + redshift_destination_configuration: RedshiftDestinationConfiguration = None, + elasticsearch_destination_configuration: ElasticsearchDestinationConfiguration = None, + amazonopensearchservice_destination_configuration: AmazonopensearchserviceDestinationConfiguration = None, + splunk_destination_configuration: SplunkDestinationConfiguration = None, + http_endpoint_destination_configuration: HttpEndpointDestinationConfiguration = None, + tags: TagDeliveryStreamInputTagList = None, + amazon_open_search_serverless_destination_configuration: AmazonOpenSearchServerlessDestinationConfiguration = None, + msk_source_configuration: MSKSourceConfiguration = None, + snowflake_destination_configuration: SnowflakeDestinationConfiguration = None, + iceberg_destination_configuration: IcebergDestinationConfiguration = None, + database_source_configuration: DatabaseSourceConfiguration = None, + **kwargs, + ) -> CreateDeliveryStreamOutput: + # TODO add support for database_source_configuration and direct_put_source_configuration + store = self.get_store(context.account_id, context.region) + + destinations: DestinationDescriptionList = [] + if elasticsearch_destination_configuration: + destinations.append( + DestinationDescription( + DestinationId=short_uid(), + ElasticsearchDestinationDescription=convert_es_config_to_desc( + elasticsearch_destination_configuration + ), + ) + ) + if amazonopensearchservice_destination_configuration: + db_description = convert_opensearch_config_to_desc( + amazonopensearchservice_destination_configuration + ) + destinations.append( + DestinationDescription( + DestinationId=short_uid(), + AmazonopensearchserviceDestinationDescription=db_description, + ) + ) + if s3_destination_configuration or extended_s3_destination_configuration: + destinations.append( + DestinationDescription( + DestinationId=short_uid(), + S3DestinationDescription=convert_s3_config_to_desc( + s3_destination_configuration + ), + ExtendedS3DestinationDescription=convert_extended_s3_config_to_desc( + extended_s3_destination_configuration + ), + ) + ) + if http_endpoint_destination_configuration: + destinations.append( + DestinationDescription( + DestinationId=short_uid(), + HttpEndpointDestinationDescription=convert_http_config_to_desc( + http_endpoint_destination_configuration + ), + ) + ) + if splunk_destination_configuration: + LOG.warning( + "Delivery stream contains a splunk destination (which is currently not supported)." + ) + if redshift_destination_configuration: + destinations.append( + DestinationDescription( + DestinationId=short_uid(), + RedshiftDestinationDescription=convert_redshift_config_to_desc( + redshift_destination_configuration + ), + ) + ) + if amazon_open_search_serverless_destination_configuration: + LOG.warning( + "Delivery stream contains a opensearch serverless destination (which is currently not supported)." + ) + + stream = DeliveryStreamDescription( + DeliveryStreamName=delivery_stream_name, + DeliveryStreamARN=firehose_stream_arn( + stream_name=delivery_stream_name, + account_id=context.account_id, + region_name=context.region, + ), + DeliveryStreamStatus=DeliveryStreamStatus.ACTIVE, + DeliveryStreamType=delivery_stream_type, + HasMoreDestinations=False, + VersionId="1", + CreateTimestamp=datetime.now(), + Destinations=destinations, + Source=convert_source_config_to_desc(kinesis_stream_source_configuration), + ) + delivery_stream_arn = stream["DeliveryStreamARN"] + store.TAGS.tag_resource(delivery_stream_arn, tags) + store.delivery_streams[delivery_stream_name] = stream + + if delivery_stream_type == DeliveryStreamType.KinesisStreamAsSource: + if not kinesis_stream_source_configuration: + raise InvalidArgumentException("Missing delivery stream configuration") + kinesis_stream_arn = kinesis_stream_source_configuration["KinesisStreamARN"] + kinesis_stream_name = kinesis_stream_arn.split(":stream/")[1] + + def _startup(): + stream["DeliveryStreamStatus"] = DeliveryStreamStatus.CREATING + try: + listener_function = functools.partial( + self._process_records, + context.account_id, + context.region, + delivery_stream_name, + ) + process = kinesis_connector.listen_to_kinesis( + stream_name=kinesis_stream_name, + account_id=context.account_id, + region_name=context.region, + listener_func=listener_function, + wait_until_started=True, + ddb_lease_table_suffix=f"-firehose-{delivery_stream_name}", + ) + + self.kinesis_listeners[delivery_stream_arn] = process + stream["DeliveryStreamStatus"] = DeliveryStreamStatus.ACTIVE + except Exception as e: + LOG.warning( + "Unable to create Firehose delivery stream %s: %s", + delivery_stream_name, + e, + ) + stream["DeliveryStreamStatus"] = DeliveryStreamStatus.CREATING_FAILED + + run_for_max_seconds(25, _startup) + return CreateDeliveryStreamOutput(DeliveryStreamARN=stream["DeliveryStreamARN"]) + + def delete_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + allow_force_delete: BooleanObject = None, + **kwargs, + ) -> DeleteDeliveryStreamOutput: + store = self.get_store(context.account_id, context.region) + delivery_stream_description = store.delivery_streams.pop(delivery_stream_name, {}) + if not delivery_stream_description: + raise ResourceNotFoundException( + f"Firehose {delivery_stream_name} under account {context.account_id} not found." + ) + + delivery_stream_arn = firehose_stream_arn( + stream_name=delivery_stream_name, + account_id=context.account_id, + region_name=context.region, + ) + if kinesis_process := self.kinesis_listeners.pop(delivery_stream_arn, None): + LOG.debug("Stopping kinesis listener for %s", delivery_stream_name) + kinesis_process.stop() + + return DeleteDeliveryStreamOutput() + + def describe_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + limit: DescribeDeliveryStreamInputLimit = None, + exclusive_start_destination_id: DestinationId = None, + **kwargs, + ) -> DescribeDeliveryStreamOutput: + delivery_stream_description = _get_description_or_raise_not_found( + context, delivery_stream_name + ) + if destinations := delivery_stream_description.get("Destinations"): + delivery_stream_description["Destinations"] = ( + _drop_keys_in_destination_descriptions_not_in_output_types(destinations) + ) + + return DescribeDeliveryStreamOutput(DeliveryStreamDescription=delivery_stream_description) + + def list_delivery_streams( + self, + context: RequestContext, + limit: ListDeliveryStreamsInputLimit = None, + delivery_stream_type: DeliveryStreamType = None, + exclusive_start_delivery_stream_name: DeliveryStreamName = None, + **kwargs, + ) -> ListDeliveryStreamsOutput: + store = self.get_store(context.account_id, context.region) + delivery_stream_names = [] + for name, stream in store.delivery_streams.items(): + delivery_stream_names.append(stream["DeliveryStreamName"]) + return ListDeliveryStreamsOutput( + DeliveryStreamNames=delivery_stream_names, HasMoreDeliveryStreams=False + ) + + def put_record( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + record: Record, + **kwargs, + ) -> PutRecordOutput: + record = self._reencode_record(record) + return self._put_record(context.account_id, context.region, delivery_stream_name, record) + + def put_record_batch( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + records: PutRecordBatchRequestEntryList, + **kwargs, + ) -> PutRecordBatchOutput: + records = self._reencode_records(records) + return PutRecordBatchOutput( + FailedPutCount=0, + RequestResponses=self._put_records( + context.account_id, context.region, delivery_stream_name, records + ), + ) + + def tag_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + tags: TagDeliveryStreamInputTagList, + **kwargs, + ) -> TagDeliveryStreamOutput: + store = self.get_store(context.account_id, context.region) + delivery_stream_description = _get_description_or_raise_not_found( + context, delivery_stream_name + ) + store.TAGS.tag_resource(delivery_stream_description["DeliveryStreamARN"], tags) + return ListTagsForDeliveryStreamOutput() + + def list_tags_for_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + exclusive_start_tag_key: TagKey = None, + limit: ListTagsForDeliveryStreamInputLimit = None, + **kwargs, + ) -> ListTagsForDeliveryStreamOutput: + store = self.get_store(context.account_id, context.region) + delivery_stream_description = _get_description_or_raise_not_found( + context, delivery_stream_name + ) + # The tagging service returns a dictionary with the given root name + tags = store.TAGS.list_tags_for_resource( + arn=delivery_stream_description["DeliveryStreamARN"], root_name="root" + ) + # Extract the actual list of tags for the typed response + tag_list: ListTagsForDeliveryStreamOutputTagList = tags["root"] + return ListTagsForDeliveryStreamOutput(Tags=tag_list, HasMoreTags=False) + + def untag_delivery_stream( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagDeliveryStreamOutput: + store = self.get_store(context.account_id, context.region) + delivery_stream_description = _get_description_or_raise_not_found( + context, delivery_stream_name + ) + # The tagging service returns a dictionary with the given root name + store.TAGS.untag_resource( + arn=delivery_stream_description["DeliveryStreamARN"], tag_names=tag_keys + ) + return UntagDeliveryStreamOutput() + + def update_destination( + self, + context: RequestContext, + delivery_stream_name: DeliveryStreamName, + current_delivery_stream_version_id: DeliveryStreamVersionId, + destination_id: DestinationId, + s3_destination_update: S3DestinationUpdate = None, + extended_s3_destination_update: ExtendedS3DestinationUpdate = None, + redshift_destination_update: RedshiftDestinationUpdate = None, + elasticsearch_destination_update: ElasticsearchDestinationUpdate = None, + amazonopensearchservice_destination_update: AmazonopensearchserviceDestinationUpdate = None, + splunk_destination_update: SplunkDestinationUpdate = None, + http_endpoint_destination_update: HttpEndpointDestinationUpdate = None, + amazon_open_search_serverless_destination_update: AmazonOpenSearchServerlessDestinationUpdate = None, + snowflake_destination_update: SnowflakeDestinationUpdate = None, + iceberg_destination_update: IcebergDestinationUpdate = None, + **kwargs, + ) -> UpdateDestinationOutput: + delivery_stream_description = _get_description_or_raise_not_found( + context, delivery_stream_name + ) + destinations = delivery_stream_description["Destinations"] + try: + destination = next(filter(lambda d: d["DestinationId"] == destination_id, destinations)) + except StopIteration: + destination = DestinationDescription(DestinationId=destination_id) + delivery_stream_description["Destinations"].append(destination) + + if elasticsearch_destination_update: + destination["ElasticsearchDestinationDescription"] = convert_es_update_to_desc( + elasticsearch_destination_update + ) + + if amazonopensearchservice_destination_update: + destination["AmazonopensearchserviceDestinationDescription"] = ( + convert_opensearch_update_to_desc(amazonopensearchservice_destination_update) + ) + + if s3_destination_update: + destination["S3DestinationDescription"] = convert_s3_update_to_desc( + s3_destination_update + ) + + if extended_s3_destination_update: + destination["ExtendedS3DestinationDescription"] = convert_extended_s3_update_to_desc( + extended_s3_destination_update + ) + + if http_endpoint_destination_update: + destination["HttpEndpointDestinationDescription"] = convert_http_update_to_desc( + http_endpoint_destination_update + ) + # TODO: add feature update redshift destination + + return UpdateDestinationOutput() + + def _reencode_record(self, record: Record) -> Record: + """ + The ASF decodes the record's data automatically. But most of the service integrations (kinesis, lambda, http) + are working with the base64 encoded data. + """ + if "Data" in record: + record["Data"] = base64.b64encode(record["Data"]) + return record + + def _reencode_records(self, records: List[Record]) -> List[Record]: + return [self._reencode_record(r) for r in records] + + def _process_records( + self, + account_id: str, + region_name: str, + fh_d_stream: str, + records: List[Record], + ): + """Process the given records from the underlying Kinesis stream""" + return self._put_records(account_id, region_name, fh_d_stream, records) + + def _put_record( + self, + account_id: str, + region_name: str, + delivery_stream_name: str, + record: Record, + ) -> PutRecordOutput: + """Put a record to the firehose stream from a PutRecord API call""" + result = self._put_records(account_id, region_name, delivery_stream_name, [record]) + return PutRecordOutput(RecordId=result[0]["RecordId"]) + + def _put_records( + self, + account_id: str, + region_name: str, + delivery_stream_name: str, + unprocessed_records: List[Record], + ) -> List[PutRecordBatchResponseEntry]: + """Put a list of records to the firehose stream - either directly from a PutRecord API call, or + received from an underlying Kinesis stream (if 'KinesisStreamAsSource' is configured)""" + store = self.get_store(account_id, region_name) + delivery_stream_description = store.delivery_streams.get(delivery_stream_name) + if not delivery_stream_description: + raise ResourceNotFoundException( + f"Firehose {delivery_stream_name} under account {account_id} not found." + ) + + # preprocess records, add any missing attributes + self._add_missing_record_attributes(unprocessed_records) + + for destination in delivery_stream_description.get("Destinations", []): + # apply processing steps to incoming items + proc_config = {} + for child in destination.values(): + proc_config = ( + isinstance(child, dict) and child.get("ProcessingConfiguration") or proc_config + ) + records = list(unprocessed_records) + if proc_config.get("Enabled") is not False: + for processor in proc_config.get("Processors", []): + # TODO: run processors asynchronously, to avoid request timeouts on PutRecord API calls + records = self._preprocess_records(processor, records) + + if "ElasticsearchDestinationDescription" in destination: + self._put_to_search_db( + "ElasticSearch", + destination["ElasticsearchDestinationDescription"], + delivery_stream_name, + records, + unprocessed_records, + region_name, + ) + if "AmazonopensearchserviceDestinationDescription" in destination: + self._put_to_search_db( + "OpenSearch", + destination["AmazonopensearchserviceDestinationDescription"], + delivery_stream_name, + records, + unprocessed_records, + region_name, + ) + if "S3DestinationDescription" in destination: + s3_dest_desc = ( + destination["S3DestinationDescription"] + or destination["ExtendedS3DestinationDescription"] + ) + self._put_records_to_s3_bucket(delivery_stream_name, records, s3_dest_desc) + if "HttpEndpointDestinationDescription" in destination: + http_dest = destination["HttpEndpointDestinationDescription"] + end_point = http_dest["EndpointConfiguration"] + url = end_point["Url"] + record_to_send = { + "requestId": str(uuid.uuid4()), + "timestamp": (int(time.time())), + "records": [], + } + for record in records: + data = record.get("Data") or record.get("data") + record_to_send["records"].append({"data": to_str(data)}) + headers = { + "Content-Type": "application/json", + } + try: + requests.post(url, json=record_to_send, headers=headers) + except Exception as e: + LOG.exception("Unable to put Firehose records to HTTP endpoint %s.", url) + raise e + if "RedshiftDestinationDescription" in destination: + s3_dest_desc = destination["RedshiftDestinationDescription"][ + "S3DestinationDescription" + ] + self._put_records_to_s3_bucket(delivery_stream_name, records, s3_dest_desc) + + redshift_dest_desc = destination["RedshiftDestinationDescription"] + self._put_to_redshift(records, redshift_dest_desc) + return [ + PutRecordBatchResponseEntry(RecordId=str(uuid.uuid4())) for _ in unprocessed_records + ] + + def _put_to_search_db( + self, + db_flavor, + db_description, + delivery_stream_name, + records, + unprocessed_records, + region_name, + ): + """ + sends Firehose records to an ElasticSearch or Opensearch database + """ + search_db_index = db_description["IndexName"] + domain_arn = db_description.get("DomainARN") + cluster_endpoint = db_description.get("ClusterEndpoint") + if cluster_endpoint is None: + cluster_endpoint = get_opensearch_endpoint(domain_arn) + + db_connection = get_search_db_connection(cluster_endpoint, region_name) + + if db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.AllDocuments: + s3_dest_desc = db_description.get("S3DestinationDescription") + if s3_dest_desc: + try: + self._put_records_to_s3_bucket( + stream_name=delivery_stream_name, + records=unprocessed_records, + s3_destination_description=s3_dest_desc, + ) + except Exception as e: + LOG.warning("Unable to backup unprocessed records to S3. Error: %s", e) + else: + LOG.warning("Passed S3BackupMode without S3Configuration. Cannot backup...") + elif db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.FailedDocumentsOnly: + # TODO support FailedDocumentsOnly as well + LOG.warning("S3BackupMode FailedDocumentsOnly is set but currently not supported.") + for record in records: + obj_id = uuid.uuid4() + + data = "{}" + # DirectPut + if "Data" in record: + data = base64.b64decode(record["Data"]) + # KinesisAsSource + elif "data" in record: + data = base64.b64decode(record["data"]) + + try: + body = json.loads(data) + except Exception as e: + LOG.warning("%s only allows json input data!", db_flavor) + raise e + + if LOG.isEnabledFor(logging.DEBUG): + LOG.debug( + "Publishing to %s destination. Data: %s", + db_flavor, + truncate(data, max_length=300), + ) + try: + db_connection.create(index=search_db_index, id=obj_id, body=body) + except Exception as e: + LOG.exception("Unable to put record to stream %s.", delivery_stream_name) + raise e + + def _add_missing_record_attributes(self, records: List[Dict]) -> None: + def _get_entry(obj, key): + return obj.get(key) or obj.get(first_char_to_lower(key)) + + for record in records: + if not _get_entry(record, "ApproximateArrivalTimestamp"): + record["ApproximateArrivalTimestamp"] = int(now_utc(millis=True)) + if not _get_entry(record, "KinesisRecordMetadata"): + record["kinesisRecordMetadata"] = { + "shardId": "shardId-000000000000", + # not really documented what AWS is using internally - simply using a random UUID here + "partitionKey": str(uuid.uuid4()), + "approximateArrivalTimestamp": timestamp( + float(_get_entry(record, "ApproximateArrivalTimestamp")) / 1000, + format=TIMESTAMP_FORMAT_MICROS, + ), + "sequenceNumber": next_sequence_number(), + "subsequenceNumber": "", + } + + def _preprocess_records(self, processor: Dict, records: List[Record]) -> List[Dict]: + """Preprocess the list of records by calling the given processor (e.g., Lamnda function).""" + proc_type = processor.get("Type") + parameters = processor.get("Parameters", []) + parameters = {p["ParameterName"]: p["ParameterValue"] for p in parameters} + if proc_type == "Lambda": + lambda_arn = parameters.get("LambdaArn") + # TODO: add support for other parameters, e.g., NumberOfRetries, BufferSizeInMBs, BufferIntervalInSeconds, ... + records = keys_to_lower(records) + # Convert the record data to string (for json serialization) + for record in records: + if "data" in record: + record["data"] = to_str(record["data"]) + if "Data" in record: + record["Data"] = to_str(record["Data"]) + event = {"records": records} + event = to_bytes(json.dumps(event)) + + account_id = extract_account_id_from_arn(lambda_arn) + region_name = extract_region_from_arn(lambda_arn) + client = connect_to(aws_access_key_id=account_id, region_name=region_name).lambda_ + + response = client.invoke(FunctionName=lambda_arn, Payload=event) + result = json.load(response["Payload"]) + records = result.get("records", []) if result else [] + else: + LOG.warning("Unsupported Firehose processor type '%s'", proc_type) + return records + + def _put_records_to_s3_bucket( + self, + stream_name: str, + records: List[Dict], + s3_destination_description: S3DestinationDescription, + ): + bucket = s3_bucket_name(s3_destination_description["BucketARN"]) + prefix = s3_destination_description.get("Prefix", "") + file_extension = s3_destination_description.get("FileExtension", "") + + if role_arn := s3_destination_description.get("RoleARN"): + factory = connect_to.with_assumed_role( + role_arn=role_arn, service_principal=ServicePrincipal.firehose + ) + else: + factory = connect_to() + s3 = factory.s3.request_metadata( + source_arn=stream_name, service_principal=ServicePrincipal.firehose + ) + batched_data = b"".join([base64.b64decode(r.get("Data") or r.get("data")) for r in records]) + + obj_path = self._get_s3_object_path(stream_name, prefix, file_extension) + try: + LOG.debug("Publishing to S3 destination: %s. Data: %s", bucket, batched_data) + s3.put_object(Bucket=bucket, Key=obj_path, Body=batched_data) + except Exception as e: + LOG.exception( + "Unable to put records %s to s3 bucket.", + records, + ) + raise e + + def _get_s3_object_path(self, stream_name, prefix, file_extension): + # See https://aws.amazon.com/kinesis/data-firehose/faqs/#Data_delivery + # Path prefix pattern: myApp/YYYY/MM/DD/HH/ + # Object name pattern: DeliveryStreamName-DeliveryStreamVersion-YYYY-MM-DD-HH-MM-SS-RandomString + if not prefix.endswith("/") and prefix != "": + prefix = prefix + "/" + pattern = "{pre}%Y/%m/%d/%H/{name}-%Y-%m-%d-%H-%M-%S-{rand}" + path = pattern.format(pre=prefix, name=stream_name, rand=str(uuid.uuid4())) + path = timestamp(format=path) + + if file_extension: + path += file_extension + + return path + + def _put_to_redshift( + self, + records: List[Dict], + redshift_destination_description: RedshiftDestinationDescription, + ): + jdbcurl = redshift_destination_description.get("ClusterJDBCURL") + cluster_id = self._get_cluster_id_from_jdbc_url(jdbcurl) + db_name = jdbcurl.split("/")[-1] + table_name = redshift_destination_description.get("CopyCommand").get("DataTableName") + + rows_to_insert = [self._prepare_records_for_redshift(record) for record in records] + columns_placeholder_str = self._extract_columns(records[0]) + sql_insert_statement = f"INSERT INTO {table_name} VALUES ({columns_placeholder_str})" + + execute_statement = { + "Sql": sql_insert_statement, + "Database": db_name, + "ClusterIdentifier": cluster_id, # cluster_identifier in cluster create + } + + role_arn = redshift_destination_description.get("RoleARN") + account_id = extract_account_id_from_arn(role_arn) + region_name = self._get_region_from_jdbc_url(jdbcurl) + redshift_data = connect_to( + aws_access_key_id=account_id, region_name=region_name + ).redshift_data + + for row_to_insert in rows_to_insert: # redsift_data only allows single row inserts + try: + LOG.debug( + "Publishing to Redshift destination: %s. Data: %s", + jdbcurl, + row_to_insert, + ) + redshift_data.execute_statement(Parameters=row_to_insert, **execute_statement) + except Exception as e: + LOG.exception( + "Unable to put records %s to redshift cluster.", + row_to_insert, + ) + raise e + + def _get_cluster_id_from_jdbc_url(self, jdbc_url: str) -> str: + pattern = r"://(.*?)\." + match = re.search(pattern, jdbc_url) + if match: + return match.group(1) + else: + raise ValueError(f"Unable to extract cluster id from jdbc url: {jdbc_url}") + + def _get_region_from_jdbc_url(self, jdbc_url: str) -> str | None: + match = re.search(r"://(?:[^.]+\.){2}([^.]+)\.", jdbc_url) + if match: + return match.group(1) + else: + LOG.debug("Cannot extract region from JDBC url '%s'", jdbc_url) + return None + + def _decode_record(self, record: Dict) -> Dict: + data = base64.b64decode(record.get("Data") or record.get("data")) + data = to_str(data) + data = json.loads(data) + return data + + def _prepare_records_for_redshift(self, record: Dict) -> List[Dict]: + data = self._decode_record(record) + + parameters = [] + for key, value in data.items(): + if isinstance(value, str): + value = value.replace("\t", " ") + value = value.replace("\n", " ") + elif value is None: + value = "NULL" + else: + value = str(value) + parameters.append({"name": key, "value": value}) + # required to work with execute_statement in community (moto) and ext (localstack native) + + return parameters + + def _extract_columns(self, record: Dict) -> str: + data = self._decode_record(record) + placeholders = [f":{key}" for key in data] + placeholder_str = ", ".join(placeholders) + return placeholder_str diff --git a/localstack/services/stepfunctions/asl/component/common/path/__init__.py b/localstack-core/localstack/services/iam/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/path/__init__.py rename to localstack-core/localstack/services/iam/__init__.py diff --git a/localstack-core/localstack/services/iam/iam_patches.py b/localstack-core/localstack/services/iam/iam_patches.py new file mode 100644 index 0000000000000..bec31419c3c8f --- /dev/null +++ b/localstack-core/localstack/services/iam/iam_patches.py @@ -0,0 +1,164 @@ +import threading +from typing import Dict, List, Optional + +from moto.iam.models import ( + AccessKey, + AWSManagedPolicy, + IAMBackend, + InlinePolicy, + Policy, + User, +) +from moto.iam.models import Role as MotoRole +from moto.iam.policy_validation import VALID_STATEMENT_ELEMENTS + +from localstack import config +from localstack.constants import TAG_KEY_CUSTOM_ID +from localstack.utils.patch import patch + +ADDITIONAL_MANAGED_POLICIES = { + "AWSLambdaExecute": { + "Arn": "arn:aws:iam::aws:policy/AWSLambdaExecute", + "Path": "/", + "CreateDate": "2017-10-20T17:23:10+00:00", + "DefaultVersionId": "v4", + "Document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["logs:*"], + "Resource": "arn:aws:logs:*:*:*", + }, + { + "Effect": "Allow", + "Action": ["s3:GetObject", "s3:PutObject"], + "Resource": "arn:aws:s3:::*", + }, + ], + }, + "UpdateDate": "2019-05-20T18:22:18+00:00", + } +} + +IAM_PATCHED = False +IAM_PATCH_LOCK = threading.RLock() + + +def apply_iam_patches(): + global IAM_PATCHED + + # prevent patching multiple times, as this is called from both STS and IAM (for now) + with IAM_PATCH_LOCK: + if IAM_PATCHED: + return + + IAM_PATCHED = True + + # support service linked roles + moto_role_og_arn_prop = MotoRole.arn + + @property + def moto_role_arn(self): + return getattr(self, "service_linked_role_arn", None) or moto_role_og_arn_prop.__get__(self) + + MotoRole.arn = moto_role_arn + + # Add missing managed polices + # TODO this might not be necessary + @patch(IAMBackend._init_aws_policies) + def _init_aws_policies_extended(_init_aws_policies, self): + loaded_policies = _init_aws_policies(self) + loaded_policies.extend( + [ + AWSManagedPolicy.from_data(name, self.account_id, self.region_name, d) + for name, d in ADDITIONAL_MANAGED_POLICIES.items() + ] + ) + return loaded_policies + + if "Principal" not in VALID_STATEMENT_ELEMENTS: + VALID_STATEMENT_ELEMENTS.append("Principal") + + # patch policy __init__ to set document as attribute + + @patch(Policy.__init__) + def policy__init__( + fn, + self, + name, + account_id, + region, + default_version_id=None, + description=None, + document=None, + **kwargs, + ): + fn(self, name, account_id, region, default_version_id, description, document, **kwargs) + self.document = document + if "tags" in kwargs and TAG_KEY_CUSTOM_ID in kwargs["tags"]: + self.id = kwargs["tags"][TAG_KEY_CUSTOM_ID]["Value"] + + @patch(IAMBackend.create_role) + def iam_backend_create_role( + fn, + self, + role_name: str, + assume_role_policy_document: str, + path: str, + permissions_boundary: Optional[str], + description: str, + tags: List[Dict[str, str]], + max_session_duration: Optional[str], + linked_service: Optional[str] = None, + ): + role = fn( + self, + role_name, + assume_role_policy_document, + path, + permissions_boundary, + description, + tags, + max_session_duration, + linked_service, + ) + new_id_tag = [tag for tag in (tags or []) if tag["Key"] == TAG_KEY_CUSTOM_ID] + if new_id_tag: + new_id = new_id_tag[0]["Value"] + old_id = role.id + role.id = new_id + self.roles[new_id] = self.roles.pop(old_id) + return role + + @patch(InlinePolicy.unapply_policy) + def inline_policy_unapply_policy(fn, self, backend): + try: + fn(self, backend) + except Exception: + # Actually role can be deleted before policy being deleted in cloudformation + pass + + @patch(AccessKey.__init__) + def access_key__init__( + fn, + self, + user_name: Optional[str], + prefix: str, + account_id: str, + status: str = "Active", + **kwargs, + ): + if not config.PARITY_AWS_ACCESS_KEY_ID: + prefix = "L" + prefix[1:] + fn(self, user_name, prefix, account_id, status, **kwargs) + + @patch(User.__init__) + def user__init__( + fn, + self, + *args, + **kwargs, + ): + fn(self, *args, **kwargs) + self.service_specific_credentials = [] diff --git a/localstack-core/localstack/services/iam/provider.py b/localstack-core/localstack/services/iam/provider.py new file mode 100644 index 0000000000000..312a2a714aafc --- /dev/null +++ b/localstack-core/localstack/services/iam/provider.py @@ -0,0 +1,764 @@ +import inspect +import json +import logging +import random +import re +import string +import uuid +from datetime import datetime +from typing import Any, Dict, List, TypeVar +from urllib.parse import quote + +from moto.iam.models import ( + IAMBackend, + filter_items_with_path_prefix, + iam_backends, +) +from moto.iam.models import Role as MotoRole +from moto.iam.models import User as MotoUser +from moto.iam.utils import generate_access_key_id_from_account_id + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.iam import ( + ActionNameListType, + ActionNameType, + AttachedPermissionsBoundary, + ContextEntryListType, + CreateRoleRequest, + CreateRoleResponse, + CreateServiceLinkedRoleResponse, + CreateServiceSpecificCredentialResponse, + CreateUserResponse, + DeleteConflictException, + DeleteServiceLinkedRoleResponse, + DeletionTaskIdType, + DeletionTaskStatusType, + EvaluationResult, + GetServiceLinkedRoleDeletionStatusResponse, + GetUserResponse, + IamApi, + InvalidInputException, + ListInstanceProfileTagsResponse, + ListRolesResponse, + ListServiceSpecificCredentialsResponse, + MalformedPolicyDocumentException, + NoSuchEntityException, + PolicyEvaluationDecisionType, + ResetServiceSpecificCredentialResponse, + ResourceHandlingOptionType, + ResourceNameListType, + ResourceNameType, + Role, + ServiceSpecificCredential, + ServiceSpecificCredentialMetadata, + SimulatePolicyResponse, + SimulationPolicyListType, + Tag, + User, + arnType, + customSuffixType, + existingUserNameType, + groupNameType, + instanceProfileNameType, + markerType, + maxItemsType, + pathPrefixType, + pathType, + policyDocumentType, + roleDescriptionType, + roleNameType, + serviceName, + serviceSpecificCredentialId, + statusType, + tagKeyListType, + tagListType, + userNameType, +) +from localstack.aws.connect import connect_to +from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY +from localstack.services.iam.iam_patches import apply_iam_patches +from localstack.services.iam.resources.service_linked_roles import SERVICE_LINKED_ROLES +from localstack.services.moto import call_moto +from localstack.utils.aws.request_context import extract_access_key_id_from_auth_header + +LOG = logging.getLogger(__name__) + +SERVICE_LINKED_ROLE_PATH_PREFIX = "/aws-service-role" + +POLICY_ARN_REGEX = re.compile(r"arn:[^:]+:iam::(?:\d{12}|aws):policy/.*") + +CREDENTIAL_ID_REGEX = re.compile(r"^\w+$") + +T = TypeVar("T") + + +class ValidationError(CommonServiceException): + def __init__(self, message: str): + super().__init__("ValidationError", message, 400, True) + + +class ValidationListError(ValidationError): + def __init__(self, validation_errors: list[str]): + message = f"{len(validation_errors)} validation error{'s' if len(validation_errors) > 1 else ''} detected: {'; '.join(validation_errors)}" + super().__init__(message) + + +def get_iam_backend(context: RequestContext) -> IAMBackend: + return iam_backends[context.account_id][context.partition] + + +def get_policies_from_principal(backend: IAMBackend, principal_arn: str) -> list[dict]: + policies = [] + if ":role" in principal_arn: + role_name = principal_arn.split("/")[-1] + + policies.append(backend.get_role(role_name=role_name).assume_role_policy_document) + + policy_names = backend.list_role_policies(role_name=role_name) + policies.extend( + [ + backend.get_role_policy(role_name=role_name, policy_name=policy_name)[1] + for policy_name in policy_names + ] + ) + + attached_policies, _ = backend.list_attached_role_policies(role_name=role_name) + policies.extend([policy.document for policy in attached_policies]) + + if ":group" in principal_arn: + print(principal_arn) + group_name = principal_arn.split("/")[-1] + policy_names = backend.list_group_policies(group_name=group_name) + policies.extend( + [ + backend.get_group_policy(group_name=group_name, policy_name=policy_name)[1] + for policy_name in policy_names + ] + ) + + attached_policies, _ = backend.list_attached_group_policies(group_name=group_name) + policies.extend([policy.document for policy in attached_policies]) + + if ":user" in principal_arn: + print(principal_arn) + user_name = principal_arn.split("/")[-1] + policy_names = backend.list_user_policies(user_name=user_name) + policies.extend( + [ + backend.get_user_policy(user_name=user_name, policy_name=policy_name)[1] + for policy_name in policy_names + ] + ) + + attached_policies, _ = backend.list_attached_user_policies(user_name=user_name) + policies.extend([policy.document for policy in attached_policies]) + + return policies + + +class IamProvider(IamApi): + def __init__(self): + apply_iam_patches() + + @handler("CreateRole", expand=False) + def create_role( + self, context: RequestContext, request: CreateRoleRequest + ) -> CreateRoleResponse: + try: + json.loads(request["AssumeRolePolicyDocument"]) + except json.JSONDecodeError: + raise MalformedPolicyDocumentException("This policy contains invalid Json") + result = call_moto(context) + + if not request.get("MaxSessionDuration") and result["Role"].get("MaxSessionDuration"): + result["Role"].pop("MaxSessionDuration") + + if "RoleLastUsed" in result["Role"] and not result["Role"]["RoleLastUsed"]: + # not part of the AWS response if it's empty + # FIXME: RoleLastUsed did not seem well supported when this check was added + result["Role"].pop("RoleLastUsed") + + return result + + @staticmethod + def build_evaluation_result( + action_name: ActionNameType, resource_name: ResourceNameType, policy_statements: List[Dict] + ) -> EvaluationResult: + eval_res = EvaluationResult() + eval_res["EvalActionName"] = action_name + eval_res["EvalResourceName"] = resource_name + eval_res["EvalDecision"] = PolicyEvaluationDecisionType.explicitDeny + for statement in policy_statements: + # TODO Implement evaluation logic here + if ( + action_name in statement["Action"] + and resource_name in statement["Resource"] + and statement["Effect"] == "Allow" + ): + eval_res["EvalDecision"] = PolicyEvaluationDecisionType.allowed + eval_res["MatchedStatements"] = [] # TODO: add support for statement compilation. + return eval_res + + def simulate_principal_policy( + self, + context: RequestContext, + policy_source_arn: arnType, + action_names: ActionNameListType, + policy_input_list: SimulationPolicyListType = None, + permissions_boundary_policy_input_list: SimulationPolicyListType = None, + resource_arns: ResourceNameListType = None, + resource_policy: policyDocumentType = None, + resource_owner: ResourceNameType = None, + caller_arn: ResourceNameType = None, + context_entries: ContextEntryListType = None, + resource_handling_option: ResourceHandlingOptionType = None, + max_items: maxItemsType = None, + marker: markerType = None, + **kwargs, + ) -> SimulatePolicyResponse: + backend = get_iam_backend(context) + + policies = get_policies_from_principal(backend, policy_source_arn) + + def _get_statements_from_policy_list(policies: list[str]): + statements = [] + for policy_str in policies: + policy_dict = json.loads(policy_str) + if isinstance(policy_dict["Statement"], list): + statements.extend(policy_dict["Statement"]) + else: + statements.append(policy_dict["Statement"]) + return statements + + policy_statements = _get_statements_from_policy_list(policies) + + evaluations = [ + self.build_evaluation_result(action_name, resource_arn, policy_statements) + for action_name in action_names + for resource_arn in resource_arns + ] + + response = SimulatePolicyResponse() + response["IsTruncated"] = False + response["EvaluationResults"] = evaluations + return response + + def delete_policy(self, context: RequestContext, policy_arn: arnType, **kwargs) -> None: + backend = get_iam_backend(context) + if backend.managed_policies.get(policy_arn): + backend.managed_policies.pop(policy_arn, None) + else: + raise NoSuchEntityException("Policy {0} was not found.".format(policy_arn)) + + def detach_role_policy( + self, context: RequestContext, role_name: roleNameType, policy_arn: arnType, **kwargs + ) -> None: + backend = get_iam_backend(context) + try: + role = backend.get_role(role_name) + policy = role.managed_policies[policy_arn] + policy.detach_from(role) + except KeyError: + raise NoSuchEntityException("Policy {0} was not found.".format(policy_arn)) + + @staticmethod + def moto_role_to_role_type(moto_role: MotoRole) -> Role: + role = Role() + role["Path"] = moto_role.path + role["RoleName"] = moto_role.name + role["RoleId"] = moto_role.id + role["Arn"] = moto_role.arn + role["CreateDate"] = moto_role.create_date + if moto_role.assume_role_policy_document: + role["AssumeRolePolicyDocument"] = moto_role.assume_role_policy_document + if moto_role.description: + role["Description"] = moto_role.description + if moto_role.max_session_duration: + role["MaxSessionDuration"] = moto_role.max_session_duration + if moto_role.permissions_boundary: + role["PermissionsBoundary"] = moto_role.permissions_boundary + if moto_role.tags: + role["Tags"] = [Tag(Key=k, Value=v) for k, v in moto_role.tags.items()] + # role["RoleLastUsed"]: # TODO: add support + return role + + def list_roles( + self, + context: RequestContext, + path_prefix: pathPrefixType = None, + marker: markerType = None, + max_items: maxItemsType = None, + **kwargs, + ) -> ListRolesResponse: + backend = get_iam_backend(context) + moto_roles = backend.roles.values() + if path_prefix: + moto_roles = filter_items_with_path_prefix(path_prefix, moto_roles) + moto_roles = sorted(moto_roles, key=lambda role: role.id) + + response_roles = [] + for moto_role in moto_roles: + response_role = self.moto_role_to_role_type(moto_role) + # Permission boundary should not be a part of the response + response_role.pop("PermissionsBoundary", None) + response_roles.append(response_role) + if path_prefix: # TODO: this is consistent with the patch it migrates, but should add tests for this. + response_role["AssumeRolePolicyDocument"] = quote( + json.dumps(moto_role.assume_role_policy_document or {}) + ) + + return ListRolesResponse(Roles=response_roles, IsTruncated=False) + + def update_group( + self, + context: RequestContext, + group_name: groupNameType, + new_path: pathType = None, + new_group_name: groupNameType = None, + **kwargs, + ) -> None: + new_group_name = new_group_name or group_name + backend = get_iam_backend(context) + group = backend.get_group(group_name) + group.path = new_path + group.name = new_group_name + backend.groups[new_group_name] = backend.groups.pop(group_name) + + def list_instance_profile_tags( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + marker: markerType = None, + max_items: maxItemsType = None, + **kwargs, + ) -> ListInstanceProfileTagsResponse: + backend = get_iam_backend(context) + profile = backend.get_instance_profile(instance_profile_name) + response = ListInstanceProfileTagsResponse() + response["Tags"] = [Tag(Key=k, Value=v) for k, v in profile.tags.items()] + return response + + def tag_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + tags: tagListType, + **kwargs, + ) -> None: + backend = get_iam_backend(context) + profile = backend.get_instance_profile(instance_profile_name) + value_by_key = {tag["Key"]: tag["Value"] for tag in tags} + profile.tags.update(value_by_key) + + def untag_instance_profile( + self, + context: RequestContext, + instance_profile_name: instanceProfileNameType, + tag_keys: tagKeyListType, + **kwargs, + ) -> None: + backend = get_iam_backend(context) + profile = backend.get_instance_profile(instance_profile_name) + for tag in tag_keys: + profile.tags.pop(tag, None) + + def create_service_linked_role( + self, + context: RequestContext, + aws_service_name: groupNameType, + description: roleDescriptionType = None, + custom_suffix: customSuffixType = None, + **kwargs, + ) -> CreateServiceLinkedRoleResponse: + policy_doc = json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": aws_service_name}, + "Action": "sts:AssumeRole", + } + ], + } + ) + service_role_data = SERVICE_LINKED_ROLES.get(aws_service_name) + + path = f"{SERVICE_LINKED_ROLE_PATH_PREFIX}/{aws_service_name}/" + if service_role_data: + if custom_suffix and not service_role_data["suffix_allowed"]: + raise InvalidInputException(f"Custom suffix is not allowed for {aws_service_name}") + role_name = service_role_data.get("role_name") + attached_policies = service_role_data["attached_policies"] + else: + role_name = f"AWSServiceRoleFor{aws_service_name.split('.')[0].capitalize()}" + attached_policies = [] + if custom_suffix: + role_name = f"{role_name}_{custom_suffix}" + backend = get_iam_backend(context) + + # check for role duplicates + for role in backend.roles.values(): + if role.name == role_name: + raise InvalidInputException( + f"Service role name {role_name} has been taken in this account, please try a different suffix." + ) + + role = backend.create_role( + role_name=role_name, + assume_role_policy_document=policy_doc, + path=path, + permissions_boundary="", + description=description, + tags={}, + max_session_duration=3600, + linked_service=aws_service_name, + ) + # attach policies + for policy in attached_policies: + try: + backend.attach_role_policy(policy, role_name) + except Exception as e: + LOG.warning( + "Policy %s for service linked role %s does not exist: %s", + policy, + aws_service_name, + e, + ) + + res_role = self.moto_role_to_role_type(role) + return CreateServiceLinkedRoleResponse(Role=res_role) + + def delete_service_linked_role( + self, context: RequestContext, role_name: roleNameType, **kwargs + ) -> DeleteServiceLinkedRoleResponse: + backend = get_iam_backend(context) + role = backend.get_role(role_name=role_name) + role.managed_policies.clear() + backend.delete_role(role_name) + return DeleteServiceLinkedRoleResponse( + DeletionTaskId=f"task{role.path}{role.name}/{uuid.uuid4()}" + ) + + def get_service_linked_role_deletion_status( + self, context: RequestContext, deletion_task_id: DeletionTaskIdType, **kwargs + ) -> GetServiceLinkedRoleDeletionStatusResponse: + # TODO: check if task id is valid + return GetServiceLinkedRoleDeletionStatusResponse(Status=DeletionTaskStatusType.SUCCEEDED) + + def put_user_permissions_boundary( + self, + context: RequestContext, + user_name: userNameType, + permissions_boundary: arnType, + **kwargs, + ) -> None: + if user := get_iam_backend(context).users.get(user_name): + user.permissions_boundary = permissions_boundary + else: + raise NoSuchEntityException() + + def delete_user_permissions_boundary( + self, context: RequestContext, user_name: userNameType, **kwargs + ) -> None: + if user := get_iam_backend(context).users.get(user_name): + if hasattr(user, "permissions_boundary"): + delattr(user, "permissions_boundary") + else: + raise NoSuchEntityException() + + def create_user( + self, + context: RequestContext, + user_name: userNameType, + path: pathType = None, + permissions_boundary: arnType = None, + tags: tagListType = None, + **kwargs, + ) -> CreateUserResponse: + response = call_moto(context=context) + user = get_iam_backend(context).get_user(user_name) + if permissions_boundary: + user.permissions_boundary = permissions_boundary + response["User"]["PermissionsBoundary"] = AttachedPermissionsBoundary( + PermissionsBoundaryArn=permissions_boundary, + PermissionsBoundaryType="Policy", + ) + return response + + def get_user( + self, context: RequestContext, user_name: existingUserNameType = None, **kwargs + ) -> GetUserResponse: + response = call_moto(context=context) + moto_user_name = response["User"]["UserName"] + moto_user = get_iam_backend(context).users.get(moto_user_name) + # if the user does not exist or is no user + if not moto_user and not user_name: + access_key_id = extract_access_key_id_from_auth_header(context.request.headers) + sts_client = connect_to( + region_name=context.region, + aws_access_key_id=access_key_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + ).sts + caller_identity = sts_client.get_caller_identity() + caller_arn = caller_identity["Arn"] + if caller_arn.endswith(":root"): + return GetUserResponse( + User=User( + UserId=context.account_id, + Arn=caller_arn, + CreateDate=datetime.now(), + PasswordLastUsed=datetime.now(), + ) + ) + else: + raise CommonServiceException( + "ValidationError", + "Must specify userName when calling with non-User credentials", + ) + + if hasattr(moto_user, "permissions_boundary") and moto_user.permissions_boundary: + response["User"]["PermissionsBoundary"] = AttachedPermissionsBoundary( + PermissionsBoundaryArn=moto_user.permissions_boundary, + PermissionsBoundaryType="Policy", + ) + + return response + + def delete_user( + self, context: RequestContext, user_name: existingUserNameType, **kwargs + ) -> None: + moto_user = get_iam_backend(context).users.get(user_name) + if moto_user and moto_user.service_specific_credentials: + LOG.info( + "Cannot delete user '%s' because service specific credentials are still present.", + user_name, + ) + raise DeleteConflictException( + "Cannot delete entity, must remove referenced objects first." + ) + return call_moto(context=context) + + def attach_role_policy( + self, context: RequestContext, role_name: roleNameType, policy_arn: arnType, **kwargs + ) -> None: + if not POLICY_ARN_REGEX.match(policy_arn): + raise ValidationError("Invalid ARN: Could not be parsed!") + return call_moto(context=context) + + def attach_user_policy( + self, context: RequestContext, user_name: userNameType, policy_arn: arnType, **kwargs + ) -> None: + if not POLICY_ARN_REGEX.match(policy_arn): + raise ValidationError("Invalid ARN: Could not be parsed!") + return call_moto(context=context) + + # ------------------------------ Service specific credentials ------------------------------ # + + def _get_user_or_raise_error(self, user_name: str, context: RequestContext) -> MotoUser: + """ + Return the moto user from the store, or raise the proper exception if no user can be found. + + :param user_name: Username to find + :param context: Request context + :return: A moto user object + """ + moto_user = get_iam_backend(context).users.get(user_name) + if not moto_user: + raise NoSuchEntityException(f"The user with name {user_name} cannot be found.") + return moto_user + + def _validate_service_name(self, service_name: str) -> None: + """ + Validate if the service provided is supported. + + :param service_name: Service name to check + """ + if service_name not in ["codecommit.amazonaws.com", "cassandra.amazonaws.com"]: + raise NoSuchEntityException( + f"No such service {service_name} is supported for Service Specific Credentials" + ) + + def _validate_credential_id(self, credential_id: str) -> None: + """ + Validate if the credential id is correctly formed. + + :param credential_id: Credential ID to check + """ + if not CREDENTIAL_ID_REGEX.match(credential_id): + raise ValidationListError( + [ + "Value at 'serviceSpecificCredentialId' failed to satisfy constraint: Member must satisfy regular expression pattern: [\\w]+" + ] + ) + + def _generate_service_password(self): + """ + Generate a new service password for a service specific credential. + + :return: 60 letter password ending in `=` + """ + password_charset = string.ascii_letters + string.digits + "+/" + # password always ends in = for some reason - but it is not base64 + return "".join(random.choices(password_charset, k=59)) + "=" + + def _generate_credential_id(self, context: RequestContext): + """ + Generate a credential ID. + Credentials have a similar structure as access key ids, and also contain the account id encoded in them. + Example: `ACCAQAAAAAAAPBAFQJI5W` for account `000000000000` + + :param context: Request context (to extract account id) + :return: New credential id. + """ + return generate_access_key_id_from_account_id( + context.account_id, prefix="ACCA", total_length=21 + ) + + def _new_service_specific_credential( + self, user_name: str, service_name: str, context: RequestContext + ) -> ServiceSpecificCredential: + """ + Create a new service specific credential for the given username and service. + + :param user_name: Username the credential will be assigned to. + :param service_name: Service the credential will be used for. + :param context: Request context, used to extract the account id. + :return: New ServiceSpecificCredential + """ + password = self._generate_service_password() + credential_id = self._generate_credential_id(context) + return ServiceSpecificCredential( + CreateDate=datetime.now(), + ServiceName=service_name, + ServiceUserName=f"{user_name}-at-{context.account_id}", + ServicePassword=password, + ServiceSpecificCredentialId=credential_id, + UserName=user_name, + Status=statusType.Active, + ) + + def _find_credential_in_user_by_id( + self, user_name: str, credential_id: str, context: RequestContext + ) -> ServiceSpecificCredential: + """ + Find a credential by a given username and id. + Raises errors if the user or credential is not found. + + :param user_name: Username of the user the credential is assigned to. + :param credential_id: Credential ID to check + :param context: Request context (used to determine account and region) + :return: Service specific credential + """ + moto_user = self._get_user_or_raise_error(user_name, context) + self._validate_credential_id(credential_id) + matching_credentials = [ + cred + for cred in moto_user.service_specific_credentials + if cred["ServiceSpecificCredentialId"] == credential_id + ] + if not matching_credentials: + raise NoSuchEntityException(f"No such credential {credential_id} exists") + return matching_credentials[0] + + def _validate_status(self, status: str): + """ + Validate if the status has an accepted value. + Raises a ValidationError if the status is invalid. + + :param status: Status to check + """ + try: + statusType(status) + except ValueError: + raise ValidationListError( + [ + "Value at 'status' failed to satisfy constraint: Member must satisfy enum value set" + ] + ) + + def build_dict_with_only_defined_keys( + self, data: dict[str, Any], typed_dict_type: type[T] + ) -> T: + """ + Builds a dict with only the defined keys from a given typed dict. + Filtering is only present on the first level. + + :param data: Dict to filter. + :param typed_dict_type: TypedDict subtype containing the attributes allowed to be present in the return value + :return: shallow copy of the data only containing the keys defined on typed_dict_type + """ + key_set = inspect.get_annotations(typed_dict_type).keys() + return {k: v for k, v in data.items() if k in key_set} + + def create_service_specific_credential( + self, context: RequestContext, user_name: userNameType, service_name: serviceName, **kwargs + ) -> CreateServiceSpecificCredentialResponse: + moto_user = self._get_user_or_raise_error(user_name, context) + self._validate_service_name(service_name) + credential = self._new_service_specific_credential(user_name, service_name, context) + moto_user.service_specific_credentials.append(credential) + return CreateServiceSpecificCredentialResponse(ServiceSpecificCredential=credential) + + def list_service_specific_credentials( + self, + context: RequestContext, + user_name: userNameType = None, + service_name: serviceName = None, + **kwargs, + ) -> ListServiceSpecificCredentialsResponse: + moto_user = self._get_user_or_raise_error(user_name, context) + self._validate_service_name(service_name) + result = [ + self.build_dict_with_only_defined_keys(creds, ServiceSpecificCredentialMetadata) + for creds in moto_user.service_specific_credentials + if creds["ServiceName"] == service_name + ] + return ListServiceSpecificCredentialsResponse(ServiceSpecificCredentials=result) + + def update_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + status: statusType, + user_name: userNameType = None, + **kwargs, + ) -> None: + self._validate_status(status) + + credential = self._find_credential_in_user_by_id( + user_name, service_specific_credential_id, context + ) + credential["Status"] = status + + def reset_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + user_name: userNameType = None, + **kwargs, + ) -> ResetServiceSpecificCredentialResponse: + credential = self._find_credential_in_user_by_id( + user_name, service_specific_credential_id, context + ) + credential["ServicePassword"] = self._generate_service_password() + return ResetServiceSpecificCredentialResponse(ServiceSpecificCredential=credential) + + def delete_service_specific_credential( + self, + context: RequestContext, + service_specific_credential_id: serviceSpecificCredentialId, + user_name: userNameType = None, + **kwargs, + ) -> None: + moto_user = self._get_user_or_raise_error(user_name, context) + credentials = self._find_credential_in_user_by_id( + user_name, service_specific_credential_id, context + ) + try: + moto_user.service_specific_credentials.remove(credentials) + # just in case of race conditions + except ValueError: + raise NoSuchEntityException( + f"No such credential {service_specific_credential_id} exists" + ) diff --git a/localstack/services/stepfunctions/asl/component/common/payload/__init__.py b/localstack-core/localstack/services/iam/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/__init__.py rename to localstack-core/localstack/services/iam/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.py new file mode 100644 index 0000000000000..a945e5af67a47 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.py @@ -0,0 +1,116 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMAccessKeyProperties(TypedDict): + UserName: Optional[str] + Id: Optional[str] + SecretAccessKey: Optional[str] + Serial: Optional[int] + Status: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMAccessKeyProvider(ResourceProvider[IAMAccessKeyProperties]): + TYPE = "AWS::IAM::AccessKey" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMAccessKeyProperties], + ) -> ProgressEvent[IAMAccessKeyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - UserName + + Create-only properties: + - /properties/UserName + - /properties/Serial + + Read-only properties: + - /properties/SecretAccessKey + - /properties/Id + + """ + # TODO: what alues can model['Serial'] take on initial create? + model = request.desired_state + iam_client = request.aws_client_factory.iam + + access_key = iam_client.create_access_key(UserName=model["UserName"]) + model["SecretAccessKey"] = access_key["AccessKey"]["SecretAccessKey"] + model["Id"] = access_key["AccessKey"]["AccessKeyId"] + + if model.get("Status") == "Inactive": + # can be "Active" or "Inactive" + # by default the created access key has Status "Active", but if user set Inactive this needs to be adjusted + iam_client.update_access_key( + AccessKeyId=model["Id"], UserName=model["UserName"], Status=model["Status"] + ) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[IAMAccessKeyProperties], + ) -> ProgressEvent[IAMAccessKeyProperties]: + """ + Fetch resource information + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMAccessKeyProperties], + ) -> ProgressEvent[IAMAccessKeyProperties]: + """ + Delete a resource + """ + iam_client = request.aws_client_factory.iam + model = request.previous_state + iam_client.delete_access_key(AccessKeyId=model["Id"], UserName=model["UserName"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[IAMAccessKeyProperties], + ) -> ProgressEvent[IAMAccessKeyProperties]: + """ + Update a resource + """ + iam_client = request.aws_client_factory.iam + + # FIXME: replacement should be handled in engine before here + user_name_changed = request.desired_state["UserName"] != request.previous_state["UserName"] + serial_changed = request.desired_state["Serial"] != request.previous_state["Serial"] + if user_name_changed or serial_changed: + # recreate the key + self.delete(request) + create_event = self.create(request) + return create_event + + iam_client.update_access_key( + AccessKeyId=request.previous_state["Id"], + UserName=request.previous_state["UserName"], + Status=request.desired_state["Status"], + ) + old_model = request.previous_state + old_model["Status"] = request.desired_state["Status"] + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=old_model) diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.schema.json new file mode 100644 index 0000000000000..4925db7a9d608 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey.schema.json @@ -0,0 +1,36 @@ +{ + "typeName": "AWS::IAM::AccessKey", + "description": "Resource Type definition for AWS::IAM::AccessKey", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "SecretAccessKey": { + "type": "string" + }, + "Serial": { + "type": "integer" + }, + "Status": { + "type": "string" + }, + "UserName": { + "type": "string" + } + }, + "required": [ + "UserName" + ], + "readOnlyProperties": [ + "/properties/SecretAccessKey", + "/properties/Id" + ], + "createOnlyProperties": [ + "/properties/UserName", + "/properties/Serial" + ], + "primaryIdentifier": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey_plugin.py new file mode 100644 index 0000000000000..a54ee6f94b3db --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_accesskey_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMAccessKeyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::AccessKey" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_accesskey import ( + IAMAccessKeyProvider, + ) + + self.factory = IAMAccessKeyProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.py new file mode 100644 index 0000000000000..69c2b15ab1bfe --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.py @@ -0,0 +1,152 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMGroupProperties(TypedDict): + Arn: Optional[str] + GroupName: Optional[str] + Id: Optional[str] + ManagedPolicyArns: Optional[list[str]] + Path: Optional[str] + Policies: Optional[list[Policy]] + + +class Policy(TypedDict): + PolicyDocument: Optional[dict] + PolicyName: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMGroupProvider(ResourceProvider[IAMGroupProperties]): + TYPE = "AWS::IAM::Group" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMGroupProperties], + ) -> ProgressEvent[IAMGroupProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Create-only properties: + - /properties/GroupName + + Read-only properties: + - /properties/Arn + - /properties/Id + """ + model = request.desired_state + iam_client = request.aws_client_factory.iam + + group_name = model.get("GroupName") + if not group_name: + group_name = util.generate_default_name(request.stack_name, request.logical_resource_id) + model["GroupName"] = group_name + + create_group_result = iam_client.create_group( + **util.select_attributes(model, ["GroupName", "Path"]) + ) + model["Id"] = create_group_result["Group"][ + "GroupName" + ] # a bit weird that this is not the GroupId + model["Arn"] = create_group_result["Group"]["Arn"] + + for managed_policy in model.get("ManagedPolicyArns", []): + iam_client.attach_group_policy(GroupName=group_name, PolicyArn=managed_policy) + + for inline_policy in model.get("Policies", []): + doc = json.dumps(inline_policy.get("PolicyDocument")) + iam_client.put_group_policy( + GroupName=group_name, + PolicyName=inline_policy.get("PolicyName"), + PolicyDocument=doc, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[IAMGroupProperties], + ) -> ProgressEvent[IAMGroupProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMGroupProperties], + ) -> ProgressEvent[IAMGroupProperties]: + """ + Delete a resource + """ + model = request.desired_state + iam_client = request.aws_client_factory.iam + + # first we need to detach and delete any attached policies + for managed_policy in model.get("ManagedPolicyArns", []): + iam_client.detach_group_policy(GroupName=model["GroupName"], PolicyArn=managed_policy) + + for inline_policy in model.get("Policies", []): + iam_client.delete_group_policy( + GroupName=model["GroupName"], + PolicyName=inline_policy.get("PolicyName"), + ) + + # now we can delete the actual group + iam_client.delete_group(GroupName=model["GroupName"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[IAMGroupProperties], + ) -> ProgressEvent[IAMGroupProperties]: + """ + Update a resource + """ + # TODO: note: while the resource implemented "update_resource" previously, it didn't actually work + # so leaving it out here for now + # iam.update_group( + # GroupName=props.get("GroupName"), + # NewPath=props.get("NewPath") or "", + # NewGroupName=props.get("NewGroupName") or "", + # ) + raise NotImplementedError + + def list( + self, + request: ResourceRequest[IAMGroupProperties], + ) -> ProgressEvent[IAMGroupProperties]: + resources = request.aws_client_factory.iam.list_groups() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + IAMGroupProperties(Id=resource["GroupName"]) for resource in resources["Groups"] + ], + ) diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.schema.json new file mode 100644 index 0000000000000..e31b0e5594b3f --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group.schema.json @@ -0,0 +1,61 @@ +{ + "typeName": "AWS::IAM::Group", + "description": "Resource Type definition for AWS::IAM::Group", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "GroupName": { + "type": "string" + }, + "ManagedPolicyArns": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Path": { + "type": "string" + }, + "Policies": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Policy" + } + } + }, + "definitions": { + "Policy": { + "type": "object", + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "type": "object" + }, + "PolicyName": { + "type": "string" + } + }, + "required": [ + "PolicyDocument", + "PolicyName" + ] + } + }, + "readOnlyProperties": [ + "/properties/Arn", + "/properties/Id" + ], + "createOnlyProperties": [ + "/properties/GroupName" + ], + "primaryIdentifier": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_group_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group_plugin.py new file mode 100644 index 0000000000000..24af55af719b1 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_group_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMGroupProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::Group" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_group import IAMGroupProvider + + self.factory = IAMGroupProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.py new file mode 100644 index 0000000000000..b65f5f079d0ff --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.py @@ -0,0 +1,136 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMInstanceProfileProperties(TypedDict): + Roles: Optional[list[str]] + Arn: Optional[str] + InstanceProfileName: Optional[str] + Path: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMInstanceProfileProvider(ResourceProvider[IAMInstanceProfileProperties]): + TYPE = "AWS::IAM::InstanceProfile" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMInstanceProfileProperties], + ) -> ProgressEvent[IAMInstanceProfileProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/InstanceProfileName + + Required properties: + - Roles + + Create-only properties: + - /properties/InstanceProfileName + - /properties/Path + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - iam:CreateInstanceProfile + - iam:PassRole + - iam:AddRoleToInstanceProfile + - iam:GetInstanceProfile + + """ + model = request.desired_state + iam = request.aws_client_factory.iam + + # defaults + role_name = model.get("InstanceProfileName") + if not role_name: + role_name = util.generate_default_name(request.stack_name, request.logical_resource_id) + model["InstanceProfileName"] = role_name + + response = iam.create_instance_profile( + **util.select_attributes( + model, + [ + "InstanceProfileName", + "Path", + ], + ), + ) + for role_name in model.get("Roles", []): + iam.add_role_to_instance_profile( + InstanceProfileName=model["InstanceProfileName"], RoleName=role_name + ) + model["Arn"] = response["InstanceProfile"]["Arn"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[IAMInstanceProfileProperties], + ) -> ProgressEvent[IAMInstanceProfileProperties]: + """ + Fetch resource information + + IAM permissions required: + - iam:GetInstanceProfile + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMInstanceProfileProperties], + ) -> ProgressEvent[IAMInstanceProfileProperties]: + """ + Delete a resource + + IAM permissions required: + - iam:GetInstanceProfile + - iam:RemoveRoleFromInstanceProfile + - iam:DeleteInstanceProfile + """ + iam = request.aws_client_factory.iam + instance_profile = iam.get_instance_profile( + InstanceProfileName=request.previous_state["InstanceProfileName"] + ) + for role in instance_profile["InstanceProfile"]["Roles"]: + iam.remove_role_from_instance_profile( + InstanceProfileName=request.previous_state["InstanceProfileName"], + RoleName=role["RoleName"], + ) + iam.delete_instance_profile( + InstanceProfileName=request.previous_state["InstanceProfileName"] + ) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[IAMInstanceProfileProperties], + ) -> ProgressEvent[IAMInstanceProfileProperties]: + """ + Update a resource + + IAM permissions required: + - iam:PassRole + - iam:RemoveRoleFromInstanceProfile + - iam:AddRoleToInstanceProfile + - iam:GetInstanceProfile + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.schema.json new file mode 100644 index 0000000000000..f04a6751c1691 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile.schema.json @@ -0,0 +1,77 @@ +{ + "typeName": "AWS::IAM::InstanceProfile", + "description": "Resource Type definition for AWS::IAM::InstanceProfile", + "additionalProperties": false, + "properties": { + "Path": { + "type": "string", + "description": "The path to the instance profile." + }, + "Roles": { + "type": "array", + "description": "The name of the role to associate with the instance profile. Only one role can be assigned to an EC2 instance at a time, and all applications on the instance share the same role and permissions.", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "InstanceProfileName": { + "type": "string", + "description": "The name of the instance profile to create." + }, + "Arn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the instance profile." + } + }, + "taggable": false, + "required": [ + "Roles" + ], + "createOnlyProperties": [ + "/properties/InstanceProfileName", + "/properties/Path" + ], + "primaryIdentifier": [ + "/properties/InstanceProfileName" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "handlers": { + "create": { + "permissions": [ + "iam:CreateInstanceProfile", + "iam:PassRole", + "iam:AddRoleToInstanceProfile", + "iam:GetInstanceProfile" + ] + }, + "read": { + "permissions": [ + "iam:GetInstanceProfile" + ] + }, + "update": { + "permissions": [ + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile", + "iam:AddRoleToInstanceProfile", + "iam:GetInstanceProfile" + ] + }, + "delete": { + "permissions": [ + "iam:GetInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ] + }, + "list": { + "permissions": [ + "iam:ListInstanceProfiles" + ] + } + } +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile_plugin.py new file mode 100644 index 0000000000000..875b729a55323 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_instanceprofile_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMInstanceProfileProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::InstanceProfile" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_instanceprofile import ( + IAMInstanceProfileProvider, + ) + + self.factory = IAMInstanceProfileProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.py new file mode 100644 index 0000000000000..0bca0e5a02169 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.py @@ -0,0 +1,117 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMManagedPolicyProperties(TypedDict): + PolicyDocument: Optional[dict] + Description: Optional[str] + Groups: Optional[list[str]] + Id: Optional[str] + ManagedPolicyName: Optional[str] + Path: Optional[str] + Roles: Optional[list[str]] + Users: Optional[list[str]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMManagedPolicyProvider(ResourceProvider[IAMManagedPolicyProperties]): + TYPE = "AWS::IAM::ManagedPolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMManagedPolicyProperties], + ) -> ProgressEvent[IAMManagedPolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - PolicyDocument + + Create-only properties: + - /properties/ManagedPolicyName + - /properties/Description + - /properties/Path + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + iam_client = request.aws_client_factory.iam + group_name = model.get("ManagedPolicyName") + if not group_name: + group_name = util.generate_default_name(request.stack_name, request.logical_resource_id) + model["ManagedPolicyName"] = group_name + + policy_doc = json.dumps(util.remove_none_values(model["PolicyDocument"])) + policy = iam_client.create_policy( + PolicyName=model["ManagedPolicyName"], PolicyDocument=policy_doc + ) + model["Id"] = policy["Policy"]["Arn"] + policy_arn = policy["Policy"]["Arn"] + for role in model.get("Roles", []): + iam_client.attach_role_policy(RoleName=role, PolicyArn=policy_arn) + for user in model.get("Users", []): + iam_client.attach_user_policy(UserName=user, PolicyArn=policy_arn) + for group in model.get("Groups", []): + iam_client.attach_group_policy(GroupName=group, PolicyArn=policy_arn) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[IAMManagedPolicyProperties], + ) -> ProgressEvent[IAMManagedPolicyProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMManagedPolicyProperties], + ) -> ProgressEvent[IAMManagedPolicyProperties]: + """ + Delete a resource + """ + iam_client = request.aws_client_factory.iam + model = request.previous_state + + for role in model.get("Roles", []): + iam_client.detach_role_policy(RoleName=role, PolicyArn=model["Id"]) + for user in model.get("Users", []): + iam_client.detach_user_policy(UserName=user, PolicyArn=model["Id"]) + for group in model.get("Groups", []): + iam_client.detach_group_policy(GroupName=group, PolicyArn=model["Id"]) + + iam_client.delete_policy(PolicyArn=model["Id"]) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def update( + self, + request: ResourceRequest[IAMManagedPolicyProperties], + ) -> ProgressEvent[IAMManagedPolicyProperties]: + """ + Update a resource + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.schema.json new file mode 100644 index 0000000000000..da6d25ca321bf --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy.schema.json @@ -0,0 +1,57 @@ +{ + "typeName": "AWS::IAM::ManagedPolicy", + "description": "Resource Type definition for AWS::IAM::ManagedPolicy", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "Groups": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "ManagedPolicyName": { + "type": "string" + }, + "Path": { + "type": "string" + }, + "PolicyDocument": { + "type": "object" + }, + "Roles": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Users": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + } + }, + "required": [ + "PolicyDocument" + ], + "createOnlyProperties": [ + "/properties/ManagedPolicyName", + "/properties/Description", + "/properties/Path" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy_plugin.py new file mode 100644 index 0000000000000..d33ce61ef26b5 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_managedpolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMManagedPolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::ManagedPolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_managedpolicy import ( + IAMManagedPolicyProvider, + ) + + self.factory = IAMManagedPolicyProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.py new file mode 100644 index 0000000000000..97fdb19341b57 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.py @@ -0,0 +1,143 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +import random +import string +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMPolicyProperties(TypedDict): + PolicyDocument: Optional[dict] + PolicyName: Optional[str] + Groups: Optional[list[str]] + Id: Optional[str] + Roles: Optional[list[str]] + Users: Optional[list[str]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMPolicyProvider(ResourceProvider[IAMPolicyProperties]): + TYPE = "AWS::IAM::Policy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMPolicyProperties], + ) -> ProgressEvent[IAMPolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - PolicyDocument + - PolicyName + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + iam_client = request.aws_client_factory.iam + + policy_doc = json.dumps(util.remove_none_values(model["PolicyDocument"])) + policy_name = model["PolicyName"] + + if not any([model.get("Roles"), model.get("Users"), model.get("Groups")]): + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model={}, + error_code="InvalidRequest", + message="At least one of [Groups,Roles,Users] must be non-empty.", + ) + + for role in model.get("Roles", []): + iam_client.put_role_policy( + RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc + ) + for user in model.get("Users", []): + iam_client.put_user_policy( + UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc + ) + for group in model.get("Groups", []): + iam_client.put_group_policy( + GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc + ) + + # the physical resource ID here has a bit of a weird format + # e.g. 'stack-fnSe-1OKWZIBB89193' where fnSe are the first 4 characters of the LogicalResourceId (or name?) + suffix = "".join(random.choices(string.ascii_uppercase + string.digits, k=13)) + model["Id"] = f"stack-{model.get('PolicyName', '')[:4]}-{suffix}" + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[IAMPolicyProperties], + ) -> ProgressEvent[IAMPolicyProperties]: + """ + Fetch resource information + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMPolicyProperties], + ) -> ProgressEvent[IAMPolicyProperties]: + """ + Delete a resource + """ + iam = request.aws_client_factory.iam + + model = request.previous_state + policy_name = request.previous_state["PolicyName"] + for role in model.get("Roles", []): + iam.delete_role_policy(RoleName=role, PolicyName=policy_name) + for user in model.get("Users", []): + iam.delete_user_policy(UserName=user, PolicyName=policy_name) + for group in model.get("Groups", []): + iam.delete_group_policy(GroupName=group, PolicyName=policy_name) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[IAMPolicyProperties], + ) -> ProgressEvent[IAMPolicyProperties]: + """ + Update a resource + """ + iam_client = request.aws_client_factory.iam + model = request.desired_state + # FIXME: this wasn't properly implemented before as well, still needs to be rewritten + policy_doc = json.dumps(util.remove_none_values(model["PolicyDocument"])) + policy_name = model["PolicyName"] + + for role in model.get("Roles", []): + iam_client.put_role_policy( + RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc + ) + for user in model.get("Users", []): + iam_client.put_user_policy( + UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc + ) + for group in model.get("Groups", []): + iam_client.put_group_policy( + GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={**request.previous_state, **request.desired_state}, + ) diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.schema.json new file mode 100644 index 0000000000000..1b6a5fb438e4b --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy.schema.json @@ -0,0 +1,47 @@ +{ + "typeName": "AWS::IAM::Policy", + "description": "Resource Type definition for AWS::IAM::Policy", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Groups": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "PolicyDocument": { + "type": "object" + }, + "PolicyName": { + "type": "string" + }, + "Roles": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Users": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + } + }, + "required": [ + "PolicyDocument", + "PolicyName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy_plugin.py new file mode 100644 index 0000000000000..a3fdd7e9c9dc3 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_policy_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMPolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::Policy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_policy import IAMPolicyProvider + + self.factory = IAMPolicyProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.py new file mode 100644 index 0000000000000..f3687337e332d --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.py @@ -0,0 +1,269 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.functions import call_safe + + +class IAMRoleProperties(TypedDict): + AssumeRolePolicyDocument: Optional[dict | str] + Arn: Optional[str] + Description: Optional[str] + ManagedPolicyArns: Optional[list[str]] + MaxSessionDuration: Optional[int] + Path: Optional[str] + PermissionsBoundary: Optional[str] + Policies: Optional[list[Policy]] + RoleId: Optional[str] + RoleName: Optional[str] + Tags: Optional[list[Tag]] + + +class Policy(TypedDict): + PolicyDocument: Optional[str | dict] + PolicyName: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + +IAM_POLICY_VERSION = "2012-10-17" + + +class IAMRoleProvider(ResourceProvider[IAMRoleProperties]): + TYPE = "AWS::IAM::Role" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMRoleProperties], + ) -> ProgressEvent[IAMRoleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/RoleName + + Required properties: + - AssumeRolePolicyDocument + + Create-only properties: + - /properties/Path + - /properties/RoleName + + Read-only properties: + - /properties/Arn + - /properties/RoleId + + IAM permissions required: + - iam:CreateRole + - iam:PutRolePolicy + - iam:AttachRolePolicy + - iam:GetRolePolicy <- not in use right now + + """ + model = request.desired_state + iam = request.aws_client_factory.iam + + # defaults + role_name = model.get("RoleName") + if not role_name: + role_name = util.generate_default_name(request.stack_name, request.logical_resource_id) + model["RoleName"] = role_name + + create_role_response = iam.create_role( + **{ + k: v + for k, v in model.items() + if k not in ["ManagedPolicyArns", "Policies", "AssumeRolePolicyDocument"] + }, + AssumeRolePolicyDocument=json.dumps(model["AssumeRolePolicyDocument"]), + ) + + # attach managed policies + policy_arns = model.get("ManagedPolicyArns", []) + for arn in policy_arns: + iam.attach_role_policy(RoleName=role_name, PolicyArn=arn) + + # add inline policies + inline_policies = model.get("Policies", []) + for policy in inline_policies: + if not isinstance(policy, dict): + request.logger.info( + 'Invalid format of policy for IAM role "%s": %s', + model.get("RoleName"), + policy, + ) + continue + pol_name = policy.get("PolicyName") + + # get policy document - make sure we're resolving references in the policy doc + doc = dict(policy["PolicyDocument"]) + doc = util.remove_none_values(doc) + + doc["Version"] = doc.get("Version") or IAM_POLICY_VERSION + statements = doc["Statement"] + statements = statements if isinstance(statements, list) else [statements] + for statement in statements: + if isinstance(statement.get("Resource"), list): + # filter out empty resource strings + statement["Resource"] = [r for r in statement["Resource"] if r] + doc = json.dumps(doc) + iam.put_role_policy( + RoleName=model["RoleName"], + PolicyName=pol_name, + PolicyDocument=doc, + ) + model["Arn"] = create_role_response["Role"]["Arn"] + model["RoleId"] = create_role_response["Role"]["RoleId"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[IAMRoleProperties], + ) -> ProgressEvent[IAMRoleProperties]: + """ + Fetch resource information + + IAM permissions required: + - iam:GetRole + - iam:ListAttachedRolePolicies + - iam:ListRolePolicies + - iam:GetRolePolicy + """ + role_name = request.desired_state["RoleName"] + get_role = request.aws_client_factory.iam.get_role(RoleName=role_name) + + model = {**get_role["Role"]} + model.pop("CreateDate") + model.pop("RoleLastUsed") + + list_managed_policies = request.aws_client_factory.iam.list_attached_role_policies( + RoleName=role_name + ) + model["ManagedPolicyArns"] = [ + policy["PolicyArn"] for policy in list_managed_policies["AttachedPolicies"] + ] + model["Policies"] = [] + + policies = request.aws_client_factory.iam.list_role_policies(RoleName=role_name) + for policy_name in policies["PolicyNames"]: + policy = request.aws_client_factory.iam.get_role_policy( + RoleName=role_name, PolicyName=policy_name + ) + policy.pop("ResponseMetadata") + policy.pop("RoleName") + model["Policies"].append(policy) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def delete( + self, + request: ResourceRequest[IAMRoleProperties], + ) -> ProgressEvent[IAMRoleProperties]: + """ + Delete a resource + + IAM permissions required: + - iam:DeleteRole + - iam:DetachRolePolicy + - iam:DeleteRolePolicy + - iam:GetRole + - iam:ListAttachedRolePolicies + - iam:ListRolePolicies + """ + iam_client = request.aws_client_factory.iam + role_name = request.previous_state["RoleName"] + + # detach managed policies + for policy in iam_client.list_attached_role_policies(RoleName=role_name).get( + "AttachedPolicies", [] + ): + call_safe( + iam_client.detach_role_policy, + kwargs={"RoleName": role_name, "PolicyArn": policy["PolicyArn"]}, + ) + + # delete inline policies + for inline_policy_name in iam_client.list_role_policies(RoleName=role_name).get( + "PolicyNames", [] + ): + call_safe( + iam_client.delete_role_policy, + kwargs={"RoleName": role_name, "PolicyName": inline_policy_name}, + ) + + iam_client.delete_role(RoleName=role_name) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[IAMRoleProperties], + ) -> ProgressEvent[IAMRoleProperties]: + """ + Update a resource + + IAM permissions required: + - iam:UpdateRole + - iam:UpdateRoleDescription + - iam:UpdateAssumeRolePolicy + - iam:DetachRolePolicy + - iam:AttachRolePolicy + - iam:DeleteRolePermissionsBoundary + - iam:PutRolePermissionsBoundary + - iam:DeleteRolePolicy + - iam:PutRolePolicy + - iam:TagRole + - iam:UntagRole + """ + props = request.desired_state + _states = request.previous_state + + # note that we're using permissions that are not technically allowed here due to the currently broken change detection + props_policy = props.get("AssumeRolePolicyDocument") + # technically a change to the role name shouldn't even get here since it implies a replacement, not an in-place update + # for now we just go with it though + # determine if the previous name was autogenerated or not + new_role_name = props.get("RoleName") + name_changed = new_role_name and new_role_name != _states["RoleName"] + + # new_role_name = props.get("RoleName", _states.get("RoleName")) + policy_changed = props_policy and props_policy != _states.get( + "AssumeRolePolicyDocument", "" + ) + managed_policy_arns_changed = props.get("ManagedPolicyArns", []) != _states.get( + "ManagedPolicyArns", [] + ) + if name_changed or policy_changed or managed_policy_arns_changed: + # TODO: do a proper update instead of replacement + self.delete(request) + return self.create(request) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=request.previous_state) + # raise Exception("why was a change even detected?") + + def list( + self, + request: ResourceRequest[IAMRoleProperties], + ) -> ProgressEvent[IAMRoleProperties]: + resources = request.aws_client_factory.iam.list_roles() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + IAMRoleProperties(RoleName=resource["RoleName"]) for resource in resources["Roles"] + ], + ) diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.schema.json new file mode 100644 index 0000000000000..a7b8a4489cc59 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role.schema.json @@ -0,0 +1,183 @@ +{ + "typeName": "AWS::IAM::Role", + "$schema": "https://raw.githubusercontent.com/aws-cloudformation/cloudformation-resource-schema/master/src/main/resources/schema/provider.definition.schema.v1.json", + "description": "Resource Type definition for AWS::IAM::Role", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-iam.git", + "definitions": { + "Policy": { + "description": "The inline policy document that is embedded in the specified IAM role.", + "type": "object", + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "description": "The policy document.", + "type": [ + "string", + "object" + ] + }, + "PolicyName": { + "description": "The friendly name (not ARN) identifying the policy.", + "type": "string" + } + }, + "required": [ + "PolicyName", + "PolicyDocument" + ] + }, + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + } + }, + "required": [ + "Key", + "Value" + ], + "additionalProperties": false + } + }, + "properties": { + "Arn": { + "description": "The Amazon Resource Name (ARN) for the role.", + "type": "string" + }, + "AssumeRolePolicyDocument": { + "description": "The trust policy that is associated with this role.", + "type": [ + "object", + "string" + ] + }, + "Description": { + "description": "A description of the role that you provide.", + "type": "string" + }, + "ManagedPolicyArns": { + "description": "A list of Amazon Resource Names (ARNs) of the IAM managed policies that you want to attach to the role. ", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "type": "string" + } + }, + "MaxSessionDuration": { + "description": "The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours. ", + "type": "integer" + }, + "Path": { + "description": "The path to the role.", + "type": "string" + }, + "PermissionsBoundary": { + "description": "The ARN of the policy used to set the permissions boundary for the role.", + "type": "string" + }, + "Policies": { + "description": "Adds or updates an inline policy document that is embedded in the specified IAM role. ", + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Policy" + } + }, + "RoleId": { + "description": "The stable and unique string identifying the role.", + "type": "string" + }, + "RoleName": { + "description": "A name for the IAM role, up to 64 characters in length.", + "type": "string" + }, + "Tags": { + "description": "A list of tags that are attached to the role.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "additionalProperties": false, + "required": [ + "AssumeRolePolicyDocument" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/RoleId" + ], + "createOnlyProperties": [ + "/properties/Path", + "/properties/RoleName" + ], + "primaryIdentifier": [ + "/properties/RoleName" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": false, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "iam:CreateRole", + "iam:PutRolePolicy", + "iam:AttachRolePolicy", + "iam:GetRolePolicy" + ] + }, + "read": { + "permissions": [ + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies", + "iam:GetRolePolicy" + ] + }, + "update": { + "permissions": [ + "iam:UpdateRole", + "iam:UpdateRoleDescription", + "iam:UpdateAssumeRolePolicy", + "iam:DetachRolePolicy", + "iam:AttachRolePolicy", + "iam:DeleteRolePermissionsBoundary", + "iam:PutRolePermissionsBoundary", + "iam:DeleteRolePolicy", + "iam:PutRolePolicy", + "iam:TagRole", + "iam:UntagRole" + ] + }, + "delete": { + "permissions": [ + "iam:DeleteRole", + "iam:DetachRolePolicy", + "iam:DeleteRolePolicy", + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies" + ] + }, + "list": { + "permissions": [ + "iam:ListRoles" + ] + } + } +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_role_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role_plugin.py new file mode 100644 index 0000000000000..d6c7059f611eb --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_role_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMRoleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::Role" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_role import IAMRoleProvider + + self.factory = IAMRoleProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.py new file mode 100644 index 0000000000000..233f9554efcc0 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.py @@ -0,0 +1,133 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMServerCertificateProperties(TypedDict): + Arn: Optional[str] + CertificateBody: Optional[str] + CertificateChain: Optional[str] + Path: Optional[str] + PrivateKey: Optional[str] + ServerCertificateName: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMServerCertificateProvider(ResourceProvider[IAMServerCertificateProperties]): + TYPE = "AWS::IAM::ServerCertificate" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMServerCertificateProperties], + ) -> ProgressEvent[IAMServerCertificateProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/ServerCertificateName + + + + Create-only properties: + - /properties/ServerCertificateName + - /properties/PrivateKey + - /properties/CertificateBody + - /properties/CertificateChain + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - iam:UploadServerCertificate + - iam:GetServerCertificate + + """ + model = request.desired_state + if not model.get("ServerCertificateName"): + model["ServerCertificateName"] = util.generate_default_name_without_stack( + request.logical_resource_id + ) + + create_params = util.select_attributes( + model, + [ + "ServerCertificateName", + "PrivateKey", + "CertificateBody", + "CertificateChain", + "Path", + "Tags", + ], + ) + + # Create the resource + certificate = request.aws_client_factory.iam.upload_server_certificate(**create_params) + model["Arn"] = certificate["ServerCertificateMetadata"]["Arn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[IAMServerCertificateProperties], + ) -> ProgressEvent[IAMServerCertificateProperties]: + """ + Fetch resource information + + IAM permissions required: + - iam:GetServerCertificate + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMServerCertificateProperties], + ) -> ProgressEvent[IAMServerCertificateProperties]: + """ + Delete a resource + + IAM permissions required: + - iam:DeleteServerCertificate + """ + model = request.desired_state + request.aws_client_factory.iam.delete_server_certificate( + ServerCertificateName=model["ServerCertificateName"] + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + ) + + def update( + self, + request: ResourceRequest[IAMServerCertificateProperties], + ) -> ProgressEvent[IAMServerCertificateProperties]: + """ + Update a resource + + IAM permissions required: + - iam:TagServerCertificate + - iam:UntagServerCertificate + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.schema.json new file mode 100644 index 0000000000000..b0af6c74c2da9 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate.schema.json @@ -0,0 +1,129 @@ +{ + "typeName": "AWS::IAM::ServerCertificate", + "description": "Resource Type definition for AWS::IAM::ServerCertificate", + "additionalProperties": false, + "properties": { + "CertificateBody": { + "minLength": 1, + "maxLength": 16384, + "pattern": "[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "type": "string" + }, + "CertificateChain": { + "minLength": 1, + "maxLength": 2097152, + "pattern": "[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "type": "string" + }, + "ServerCertificateName": { + "minLength": 1, + "maxLength": 128, + "pattern": "[\\w+=,.@-]+", + "type": "string" + }, + "Path": { + "minLength": 1, + "maxLength": 512, + "pattern": "(\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F)", + "type": "string" + }, + "PrivateKey": { + "minLength": 1, + "maxLength": 16384, + "pattern": "[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "type": "string" + }, + "Arn": { + "description": "Amazon Resource Name (ARN) of the server certificate", + "minLength": 1, + "maxLength": 1600, + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "Key": { + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string", + "minLength": 1, + "maxLength": 128 + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "createOnlyProperties": [ + "/properties/ServerCertificateName", + "/properties/PrivateKey", + "/properties/CertificateBody", + "/properties/CertificateChain" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "writeOnlyProperties": [ + "/properties/PrivateKey", + "/properties/CertificateBody", + "/properties/CertificateChain" + ], + "primaryIdentifier": [ + "/properties/ServerCertificateName" + ], + "handlers": { + "create": { + "permissions": [ + "iam:UploadServerCertificate", + "iam:GetServerCertificate" + ] + }, + "read": { + "permissions": [ + "iam:GetServerCertificate" + ] + }, + "update": { + "permissions": [ + "iam:TagServerCertificate", + "iam:UntagServerCertificate" + ] + }, + "delete": { + "permissions": [ + "iam:DeleteServerCertificate" + ] + }, + "list": { + "permissions": [ + "iam:ListServerCertificates", + "iam:GetServerCertificate" + ] + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": false + } +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate_plugin.py new file mode 100644 index 0000000000000..13723bd73ce2b --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servercertificate_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMServerCertificateProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::ServerCertificate" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_servercertificate import ( + IAMServerCertificateProvider, + ) + + self.factory = IAMServerCertificateProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.py new file mode 100644 index 0000000000000..2437966df10e7 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.py @@ -0,0 +1,95 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMServiceLinkedRoleProperties(TypedDict): + AWSServiceName: Optional[str] + CustomSuffix: Optional[str] + Description: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMServiceLinkedRoleProvider(ResourceProvider[IAMServiceLinkedRoleProperties]): + TYPE = "AWS::IAM::ServiceLinkedRole" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMServiceLinkedRoleProperties], + ) -> ProgressEvent[IAMServiceLinkedRoleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - AWSServiceName + + Create-only properties: + - /properties/CustomSuffix + - /properties/AWSServiceName + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + response = request.aws_client_factory.iam.create_service_linked_role(**model) + model["Id"] = response["Role"]["RoleName"] # TODO + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[IAMServiceLinkedRoleProperties], + ) -> ProgressEvent[IAMServiceLinkedRoleProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMServiceLinkedRoleProperties], + ) -> ProgressEvent[IAMServiceLinkedRoleProperties]: + """ + Delete a resource + """ + request.aws_client_factory.iam.delete_service_linked_role( + RoleName=request.previous_state["Id"] + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[IAMServiceLinkedRoleProperties], + ) -> ProgressEvent[IAMServiceLinkedRoleProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.schema.json new file mode 100644 index 0000000000000..4472358b498b1 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole.schema.json @@ -0,0 +1,32 @@ +{ + "typeName": "AWS::IAM::ServiceLinkedRole", + "description": "Resource Type definition for AWS::IAM::ServiceLinkedRole", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "CustomSuffix": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "AWSServiceName": { + "type": "string" + } + }, + "required": [ + "AWSServiceName" + ], + "createOnlyProperties": [ + "/properties/CustomSuffix", + "/properties/AWSServiceName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole_plugin.py new file mode 100644 index 0000000000000..e81cc105f85c1 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_servicelinkedrole_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMServiceLinkedRoleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::ServiceLinkedRole" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_servicelinkedrole import ( + IAMServiceLinkedRoleProvider, + ) + + self.factory = IAMServiceLinkedRoleProvider diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.py new file mode 100644 index 0000000000000..8600522013b39 --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.py @@ -0,0 +1,158 @@ +# LocalStack Resource Provider Scaffolding v1 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class IAMUserProperties(TypedDict): + Arn: Optional[str] + Groups: Optional[list[str]] + Id: Optional[str] + LoginProfile: Optional[LoginProfile] + ManagedPolicyArns: Optional[list[str]] + Path: Optional[str] + PermissionsBoundary: Optional[str] + Policies: Optional[list[Policy]] + Tags: Optional[list[Tag]] + UserName: Optional[str] + + +class Policy(TypedDict): + PolicyDocument: Optional[dict] + PolicyName: Optional[str] + + +class LoginProfile(TypedDict): + Password: Optional[str] + PasswordResetRequired: Optional[bool] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class IAMUserProvider(ResourceProvider[IAMUserProperties]): + TYPE = "AWS::IAM::User" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[IAMUserProperties], + ) -> ProgressEvent[IAMUserProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Create-only properties: + - /properties/UserName + + Read-only properties: + - /properties/Id + - /properties/Arn + """ + model = request.desired_state + iam_client = request.aws_client_factory.iam + # TODO: validations + # TODO: idempotency + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + + # Set defaults + if not model.get("UserName"): + model["UserName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + # actually create the resource + # note: technically we could make this synchronous, but for the sake of this being an example it is intentionally "asynchronous" and returns IN_PROGRESS + + # this example uses a helper utility, check out the module for more helpful utilities and add your own! + iam_client.create_user( + **util.select_attributes(model, ["UserName", "Path", "PermissionsBoundary", "Tags"]) + ) + + # alternatively you can also just do: + # iam_client.create_user( + # UserName=model["UserName"], + # Path=model["Path"], + # PermissionsBoundary=model["PermissionsBoundary"], + # Tags=model["Tags"], + # ) + + # this kind of logic below was previously done in either a result_handler or a custom "_post_create" function + for group in model.get("Groups", []): + iam_client.add_user_to_group(GroupName=group, UserName=model["UserName"]) + + for policy_arn in model.get("ManagedPolicyArns", []): + iam_client.attach_user_policy(UserName=model["UserName"], PolicyArn=policy_arn) + + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + get_response = iam_client.get_user(UserName=model["UserName"]) + model["Id"] = get_response["User"]["UserName"] # this is the ref / physical resource id + model["Arn"] = get_response["User"]["Arn"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[IAMUserProperties], + ) -> ProgressEvent[IAMUserProperties]: + """ + Fetch resource information + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[IAMUserProperties], + ) -> ProgressEvent[IAMUserProperties]: + """ + Delete a resource + """ + iam_client = request.aws_client_factory.iam + iam_client.delete_user(UserName=request.desired_state["Id"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=request.previous_state) + + def update( + self, + request: ResourceRequest[IAMUserProperties], + ) -> ProgressEvent[IAMUserProperties]: + """ + Update a resource + """ + # return ProgressEvent(OperationStatus.SUCCESS, request.desired_state) + raise NotImplementedError + + def list( + self, + request: ResourceRequest[IAMUserProperties], + ) -> ProgressEvent[IAMUserProperties]: + resources = request.aws_client_factory.iam.list_users() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + IAMUserProperties(Id=resource["UserName"]) for resource in resources["Users"] + ], + ) diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.schema.json b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.schema.json new file mode 100644 index 0000000000000..aabdb1c81ddbf --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user.schema.json @@ -0,0 +1,112 @@ +{ + "typeName": "AWS::IAM::User", + "description": "Resource Type definition for AWS::IAM::User", + "additionalProperties": false, + "properties": { + "Path": { + "type": "string" + }, + "ManagedPolicyArns": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Policies": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Policy" + } + }, + "UserName": { + "type": "string" + }, + "Groups": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "LoginProfile": { + "$ref": "#/definitions/LoginProfile" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "PermissionsBoundary": { + "type": "string" + } + }, + "definitions": { + "Policy": { + "type": "object", + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "type": "object" + }, + "PolicyName": { + "type": "string" + } + }, + "required": [ + "PolicyName", + "PolicyDocument" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "LoginProfile": { + "type": "object", + "additionalProperties": false, + "properties": { + "PasswordResetRequired": { + "type": "boolean" + }, + "Password": { + "type": "string" + } + }, + "required": [ + "Password" + ] + } + }, + "createOnlyProperties": [ + "/properties/UserName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Arn" + ] +} diff --git a/localstack-core/localstack/services/iam/resource_providers/aws_iam_user_plugin.py b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user_plugin.py new file mode 100644 index 0000000000000..60acd8fc1493c --- /dev/null +++ b/localstack-core/localstack/services/iam/resource_providers/aws_iam_user_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class IAMUserProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::IAM::User" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.iam.resource_providers.aws_iam_user import IAMUserProvider + + self.factory = IAMUserProvider diff --git a/localstack-core/localstack/services/iam/resources/service_linked_roles.py b/localstack-core/localstack/services/iam/resources/service_linked_roles.py new file mode 100644 index 0000000000000..679ec393dcffa --- /dev/null +++ b/localstack-core/localstack/services/iam/resources/service_linked_roles.py @@ -0,0 +1,550 @@ +SERVICE_LINKED_ROLES = { + "accountdiscovery.ssm.amazonaws.com": { + "service": "accountdiscovery.ssm.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonSSM_AccountDiscovery", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSSystemsManagerAccountDiscoveryServicePolicy" + ], + "suffix_allowed": False, + }, + "acm.amazonaws.com": { + "service": "acm.amazonaws.com", + "role_name": "AWSServiceRoleForCertificateManager", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/CertificateManagerServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "appmesh.amazonaws.com": { + "service": "appmesh.amazonaws.com", + "role_name": "AWSServiceRoleForAppMesh", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSAppMeshServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "autoscaling-plans.amazonaws.com": { + "service": "autoscaling-plans.amazonaws.com", + "role_name": "AWSServiceRoleForAutoScalingPlans_EC2AutoScaling", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSAutoScalingPlansEC2AutoScalingPolicy" + ], + "suffix_allowed": False, + }, + "autoscaling.amazonaws.com": { + "service": "autoscaling.amazonaws.com", + "role_name": "AWSServiceRoleForAutoScaling", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AutoScalingServiceRolePolicy" + ], + "suffix_allowed": True, + }, + "backup.amazonaws.com": { + "service": "backup.amazonaws.com", + "role_name": "AWSServiceRoleForBackup", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSBackupServiceLinkedRolePolicyForBackup" + ], + "suffix_allowed": False, + }, + "batch.amazonaws.com": { + "service": "batch.amazonaws.com", + "role_name": "AWSServiceRoleForBatch", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/BatchServiceRolePolicy"], + "suffix_allowed": False, + }, + "cassandra.application-autoscaling.amazonaws.com": { + "service": "cassandra.application-autoscaling.amazonaws.com", + "role_name": "AWSServiceRoleForApplicationAutoScaling_CassandraTable", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingCassandraTablePolicy" + ], + "suffix_allowed": False, + }, + "cks.kms.amazonaws.com": { + "service": "cks.kms.amazonaws.com", + "role_name": "AWSServiceRoleForKeyManagementServiceCustomKeyStores", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSKeyManagementServiceCustomKeyStoresServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "cloudtrail.amazonaws.com": { + "service": "cloudtrail.amazonaws.com", + "role_name": "AWSServiceRoleForCloudTrail", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/CloudTrailServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "codestar-notifications.amazonaws.com": { + "service": "codestar-notifications.amazonaws.com", + "role_name": "AWSServiceRoleForCodeStarNotifications", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSCodeStarNotificationsServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "config.amazonaws.com": { + "service": "config.amazonaws.com", + "role_name": "AWSServiceRoleForConfig", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSConfigServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "connect.amazonaws.com": { + "service": "connect.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonConnect", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonConnectServiceLinkedRolePolicy" + ], + "suffix_allowed": True, + }, + "dms-fleet-advisor.amazonaws.com": { + "service": "dms-fleet-advisor.amazonaws.com", + "role_name": "AWSServiceRoleForDMSFleetAdvisor", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSDMSFleetAdvisorServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "dms.amazonaws.com": { + "service": "dms.amazonaws.com", + "role_name": "AWSServiceRoleForDMSServerless", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSDMSServerlessServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "docdb-elastic.amazonaws.com": { + "service": "docdb-elastic.amazonaws.com", + "role_name": "AWSServiceRoleForDocDB-Elastic", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonDocDB-ElasticServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ec2-instance-connect.amazonaws.com": { + "service": "ec2-instance-connect.amazonaws.com", + "role_name": "AWSServiceRoleForEc2InstanceConnect", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/Ec2InstanceConnectEndpoint" + ], + "suffix_allowed": False, + }, + "ec2.application-autoscaling.amazonaws.com": { + "service": "ec2.application-autoscaling.amazonaws.com", + "role_name": "AWSServiceRoleForApplicationAutoScaling_EC2SpotFleetRequest", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingEC2SpotFleetRequestPolicy" + ], + "suffix_allowed": False, + }, + "ecr.amazonaws.com": { + "service": "ecr.amazonaws.com", + "role_name": "AWSServiceRoleForECRTemplate", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/ECRTemplateServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ecs.amazonaws.com": { + "service": "ecs.amazonaws.com", + "role_name": "AWSServiceRoleForECS", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonECSServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "eks-connector.amazonaws.com": { + "service": "eks-connector.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEKSConnector", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonEKSConnectorServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "eks-fargate.amazonaws.com": { + "service": "eks-fargate.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEKSForFargate", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonEKSForFargateServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "eks-nodegroup.amazonaws.com": { + "service": "eks-nodegroup.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEKSNodegroup", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSServiceRoleForAmazonEKSNodegroup" + ], + "suffix_allowed": False, + }, + "eks.amazonaws.com": { + "service": "eks.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEKS", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonEKSServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "elasticache.amazonaws.com": { + "service": "elasticache.amazonaws.com", + "role_name": "AWSServiceRoleForElastiCache", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/ElastiCacheServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "elasticbeanstalk.amazonaws.com": { + "service": "elasticbeanstalk.amazonaws.com", + "role_name": "AWSServiceRoleForElasticBeanstalk", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSElasticBeanstalkServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "elasticfilesystem.amazonaws.com": { + "service": "elasticfilesystem.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonElasticFileSystem", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonElasticFileSystemServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "elasticloadbalancing.amazonaws.com": { + "service": "elasticloadbalancing.amazonaws.com", + "role_name": "AWSServiceRoleForElasticLoadBalancing", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "email.cognito-idp.amazonaws.com": { + "service": "email.cognito-idp.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonCognitoIdpEmailService", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonCognitoIdpEmailServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "emr-containers.amazonaws.com": { + "service": "emr-containers.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEMRContainers", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonEMRContainersServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "emrwal.amazonaws.com": { + "service": "emrwal.amazonaws.com", + "role_name": "AWSServiceRoleForEMRWAL", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/EMRDescribeClusterPolicyForEMRWAL" + ], + "suffix_allowed": False, + }, + "fis.amazonaws.com": { + "service": "fis.amazonaws.com", + "role_name": "AWSServiceRoleForFIS", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonFISServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "grafana.amazonaws.com": { + "service": "grafana.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonGrafana", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonGrafanaServiceLinkedRolePolicy" + ], + "suffix_allowed": False, + }, + "imagebuilder.amazonaws.com": { + "service": "imagebuilder.amazonaws.com", + "role_name": "AWSServiceRoleForImageBuilder", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSServiceRoleForImageBuilder" + ], + "suffix_allowed": False, + }, + "iotmanagedintegrations.amazonaws.com": { + "service": "iotmanagedintegrations.amazonaws.com", + "role_name": "AWSServiceRoleForIoTManagedIntegrations", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSIoTManagedIntegrationsRolePolicy" + ], + "suffix_allowed": False, + }, + "kafka.amazonaws.com": { + "service": "kafka.amazonaws.com", + "role_name": "AWSServiceRoleForKafka", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/KafkaServiceRolePolicy"], + "suffix_allowed": False, + }, + "kafkaconnect.amazonaws.com": { + "service": "kafkaconnect.amazonaws.com", + "role_name": "AWSServiceRoleForKafkaConnect", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/KafkaConnectServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "lakeformation.amazonaws.com": { + "service": "lakeformation.amazonaws.com", + "role_name": "AWSServiceRoleForLakeFormationDataAccess", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/LakeFormationDataAccessServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "lex.amazonaws.com": { + "service": "lex.amazonaws.com", + "role_name": "AWSServiceRoleForLexBots", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/AmazonLexBotPolicy"], + "suffix_allowed": False, + }, + "lexv2.amazonaws.com": { + "service": "lexv2.amazonaws.com", + "role_name": "AWSServiceRoleForLexV2Bots", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/AmazonLexV2BotPolicy"], + "suffix_allowed": True, + }, + "lightsail.amazonaws.com": { + "service": "lightsail.amazonaws.com", + "role_name": "AWSServiceRoleForLightsail", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/LightsailExportAccess"], + "suffix_allowed": False, + }, + "m2.amazonaws.com": { + "service": "m2.amazonaws.com", + "role_name": "AWSServiceRoleForAWSM2", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/AWSM2ServicePolicy"], + "suffix_allowed": False, + }, + "memorydb.amazonaws.com": { + "service": "memorydb.amazonaws.com", + "role_name": "AWSServiceRoleForMemoryDB", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/MemoryDBServiceRolePolicy"], + "suffix_allowed": False, + }, + "mq.amazonaws.com": { + "service": "mq.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonMQ", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/AmazonMQServiceRolePolicy"], + "suffix_allowed": False, + }, + "mrk.kms.amazonaws.com": { + "service": "mrk.kms.amazonaws.com", + "role_name": "AWSServiceRoleForKeyManagementServiceMultiRegionKeys", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSKeyManagementServiceMultiRegionKeysServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "notifications.amazonaws.com": { + "service": "notifications.amazonaws.com", + "role_name": "AWSServiceRoleForAwsUserNotifications", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSUserNotificationsServiceLinkedRolePolicy" + ], + "suffix_allowed": False, + }, + "observability.aoss.amazonaws.com": { + "service": "observability.aoss.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonOpenSearchServerless", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonOpenSearchServerlessServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "opensearchservice.amazonaws.com": { + "service": "opensearchservice.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonOpenSearchService", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonOpenSearchServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ops.apigateway.amazonaws.com": { + "service": "ops.apigateway.amazonaws.com", + "role_name": "AWSServiceRoleForAPIGateway", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ops.emr-serverless.amazonaws.com": { + "service": "ops.emr-serverless.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonEMRServerless", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonEMRServerlessServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "opsdatasync.ssm.amazonaws.com": { + "service": "opsdatasync.ssm.amazonaws.com", + "role_name": "AWSServiceRoleForSystemsManagerOpsDataSync", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSSystemsManagerOpsDataSyncServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "opsinsights.ssm.amazonaws.com": { + "service": "opsinsights.ssm.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonSSM_OpsInsights", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSSSMOpsInsightsServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "pullthroughcache.ecr.amazonaws.com": { + "service": "pullthroughcache.ecr.amazonaws.com", + "role_name": "AWSServiceRoleForECRPullThroughCache", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSECRPullThroughCache_ServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ram.amazonaws.com": { + "service": "ram.amazonaws.com", + "role_name": "AWSServiceRoleForResourceAccessManager", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSResourceAccessManagerServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "rds.amazonaws.com": { + "service": "rds.amazonaws.com", + "role_name": "AWSServiceRoleForRDS", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonRDSServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "redshift.amazonaws.com": { + "service": "redshift.amazonaws.com", + "role_name": "AWSServiceRoleForRedshift", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonRedshiftServiceLinkedRolePolicy" + ], + "suffix_allowed": False, + }, + "replication.cassandra.amazonaws.com": { + "service": "replication.cassandra.amazonaws.com", + "role_name": "AWSServiceRoleForKeyspacesReplication", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/KeyspacesReplicationServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "replication.ecr.amazonaws.com": { + "service": "replication.ecr.amazonaws.com", + "role_name": "AWSServiceRoleForECRReplication", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/ECRReplicationServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "repository.sync.codeconnections.amazonaws.com": { + "service": "repository.sync.codeconnections.amazonaws.com", + "role_name": "AWSServiceRoleForGitSync", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSGitSyncServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "resource-explorer-2.amazonaws.com": { + "service": "resource-explorer-2.amazonaws.com", + "role_name": "AWSServiceRoleForResourceExplorer", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSResourceExplorerServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "rolesanywhere.amazonaws.com": { + "service": "rolesanywhere.amazonaws.com", + "role_name": "AWSServiceRoleForRolesAnywhere", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSRolesAnywhereServicePolicy" + ], + "suffix_allowed": False, + }, + "s3-outposts.amazonaws.com": { + "service": "s3-outposts.amazonaws.com", + "role_name": "AWSServiceRoleForS3OnOutposts", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSS3OnOutpostsServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ses.amazonaws.com": { + "service": "ses.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonSES", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonSESServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "shield.amazonaws.com": { + "service": "shield.amazonaws.com", + "role_name": "AWSServiceRoleForAWSShield", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSShieldServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ssm-incidents.amazonaws.com": { + "service": "ssm-incidents.amazonaws.com", + "role_name": "AWSServiceRoleForIncidentManager", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSIncidentManagerServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "ssm-quicksetup.amazonaws.com": { + "service": "ssm-quicksetup.amazonaws.com", + "role_name": "AWSServiceRoleForSSMQuickSetup", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/SSMQuickSetupRolePolicy"], + "suffix_allowed": False, + }, + "ssm.amazonaws.com": { + "service": "ssm.amazonaws.com", + "role_name": "AWSServiceRoleForAmazonSSM", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AmazonSSMServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "sso.amazonaws.com": { + "service": "sso.amazonaws.com", + "role_name": "AWSServiceRoleForSSO", + "attached_policies": ["arn:aws:iam::aws:policy/aws-service-role/AWSSSOServiceRolePolicy"], + "suffix_allowed": False, + }, + "vpcorigin.cloudfront.amazonaws.com": { + "service": "vpcorigin.cloudfront.amazonaws.com", + "role_name": "AWSServiceRoleForCloudFrontVPCOrigin", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/AWSCloudFrontVPCOriginServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "waf.amazonaws.com": { + "service": "waf.amazonaws.com", + "role_name": "AWSServiceRoleForWAFLogging", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/WAFLoggingServiceRolePolicy" + ], + "suffix_allowed": False, + }, + "wafv2.amazonaws.com": { + "service": "wafv2.amazonaws.com", + "role_name": "AWSServiceRoleForWAFV2Logging", + "attached_policies": [ + "arn:aws:iam::aws:policy/aws-service-role/WAFV2LoggingServiceRolePolicy" + ], + "suffix_allowed": False, + }, +} diff --git a/localstack-core/localstack/services/internal.py b/localstack-core/localstack/services/internal.py new file mode 100644 index 0000000000000..85c4de12ff351 --- /dev/null +++ b/localstack-core/localstack/services/internal.py @@ -0,0 +1,344 @@ +"""Module for localstack internal resources, such as health, graph, or _localstack/cloudformation/deploy.""" + +import logging +import os +import re +import time +from collections import defaultdict +from datetime import datetime + +from plux import PluginManager +from werkzeug.exceptions import NotFound + +from localstack import config, constants +from localstack.deprecations import deprecated_endpoint +from localstack.http import Request, Resource, Response, Router +from localstack.http.dispatcher import handler_dispatcher +from localstack.runtime.legacy import signal_supervisor_restart +from localstack.utils.analytics.metadata import ( + get_client_metadata, + get_localstack_edition, + is_license_activated, +) +from localstack.utils.collections import merge_recursive +from localstack.utils.functions import call_safe +from localstack.utils.numbers import is_number +from localstack.utils.objects import singleton_factory + +LOG = logging.getLogger(__name__) + +HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH"] + + +class DeprecatedResource: + """ + Resource class which wraps a given resource in the deprecated_endpoint (i.e. logs deprecation warnings on every + invocation). + """ + + def __init__(self, resource, previous_path: str, deprecation_version: str, new_path: str): + for http_method in HTTP_METHODS: + fn_name = f"on_{http_method.lower()}" + fn = getattr(resource, fn_name, None) + if fn: + wrapped = deprecated_endpoint( + fn, + previous_path=previous_path, + deprecation_version=deprecation_version, + new_path=new_path, + ) + setattr(self, fn_name, wrapped) + + +class HealthResource: + """ + Resource for the LocalStack /health endpoint. It provides access to the service states and other components of + localstack. We support arbitrary data to be put into the health state to support things like the + run_startup_scripts function in docker-entrypoint.sh which sets the status of the init scripts feature. + """ + + def __init__(self, service_manager) -> None: + super().__init__() + self.service_manager = service_manager + self.state = {} + + def on_post(self, request: Request): + data = request.get_json(True, True) + if not data: + return Response("invalid request", 400) + + # backdoor API to support restarting the instance + if data.get("action") == "restart": + signal_supervisor_restart() + elif data.get("action") == "kill": + from localstack.runtime import get_current_runtime + + get_current_runtime().exit(0) + + return Response("ok", 200) + + def on_get(self, request: Request): + path = request.path + + reload = "reload" in path + + # get service state + if reload: + self.service_manager.check_all() + services = { + service: state.value for service, state in self.service_manager.get_states().items() + } + + # build state dict from internal state and merge into it the service states + result = dict(self.state) + result = merge_recursive({"services": services}, result) + result["edition"] = get_localstack_edition() + result["version"] = constants.VERSION + return result + + def on_head(self, request: Request): + return Response("ok", 200) + + def on_put(self, request: Request): + data = request.get_json(True, True) or {} + + # keys like "features:initScripts" should be interpreted as ['features']['initScripts'] + state = defaultdict(dict) + for k, v in data.items(): + if ":" in k: + path = k.split(":") + else: + path = [k] + + d = state + for p in path[:-1]: + d = state[p] + d[path[-1]] = v + + self.state = merge_recursive(state, self.state, overwrite=True) + return {"status": "OK"} + + +class InfoResource: + """ + Resource that is exposed to /_localstack/info and used to get generalized information about the current + localstack instance. + """ + + def on_get(self, request): + return self.get_info_data() + + @staticmethod + def get_info_data() -> dict: + client_metadata = get_client_metadata() + uptime = int(time.time() - config.load_start_time) + + return { + "version": client_metadata.version, + "edition": get_localstack_edition(), + "is_license_activated": is_license_activated(), + "session_id": client_metadata.session_id, + "machine_id": client_metadata.machine_id, + "system": client_metadata.system, + "is_docker": client_metadata.is_docker, + "server_time_utc": datetime.utcnow().isoformat(timespec="seconds"), + "uptime": uptime, + } + + +class UsageResource: + def on_get(self, request): + from localstack.utils import diagnose + + return call_safe(diagnose.get_usage) or {} + + +class DiagnoseResource: + def on_get(self, request): + from localstack.utils import diagnose + + return { + "version": { + "image-version": call_safe(diagnose.get_docker_image_details), + "localstack-version": call_safe(diagnose.get_localstack_version), + "host": { + "kernel": call_safe(diagnose.get_host_kernel_version), + }, + }, + "info": call_safe(InfoResource.get_info_data), + "services": call_safe(diagnose.get_service_stats), + "config": call_safe(diagnose.get_localstack_config), + "docker-inspect": call_safe(diagnose.inspect_main_container), + "docker-dependent-image-hashes": call_safe(diagnose.get_important_image_hashes), + "file-tree": call_safe(diagnose.get_file_tree), + "important-endpoints": call_safe(diagnose.resolve_endpoints), + "logs": call_safe(diagnose.get_localstack_logs), + "usage": call_safe(diagnose.get_usage), + } + + +class PluginsResource: + """ + Resource to list information about plux plugins. + """ + + plugin_managers: list[PluginManager] = [] + + def __init__(self): + # defer imports here to lazy-load code + from localstack.runtime import hooks, init + from localstack.services.plugins import SERVICE_PLUGINS + + # service providers + PluginsResource.plugin_managers.append(SERVICE_PLUGINS.plugin_manager) + # init script runners + PluginsResource.plugin_managers.append(init.init_script_manager().runner_manager) + # init hooks + PluginsResource.plugin_managers.append(hooks.configure_localstack_container.manager) + PluginsResource.plugin_managers.append(hooks.prepare_host.manager) + PluginsResource.plugin_managers.append(hooks.on_infra_ready.manager) + PluginsResource.plugin_managers.append(hooks.on_infra_start.manager) + PluginsResource.plugin_managers.append(hooks.on_infra_shutdown.manager) + + def on_get(self, request): + return { + manager.namespace: [ + self._get_plugin_details(manager, name) for name in manager.list_names() + ] + for manager in self.plugin_managers + } + + def _get_plugin_details(self, manager: PluginManager, plugin_name: str) -> dict: + container = manager.get_container(plugin_name) + + details = { + "name": plugin_name, + "is_initialized": container.is_init, + "is_loaded": container.is_loaded, + } + + # optionally add requires_license information if the plugin provides it + requires_license = None + if container.plugin: + try: + requires_license = container.plugin.requires_license + except AttributeError: + pass + if requires_license is not None: + details["requires_license"] = requires_license + + return details + + +class InitScriptsResource: + def on_get(self, request): + from localstack.runtime.init import init_script_manager + + manager = init_script_manager() + + return { + "completed": { + stage.name: completed for stage, completed in manager.stage_completed.items() + }, + "scripts": [ + { + "stage": script.stage.name, + "name": os.path.basename(script.path), + "state": script.state.name, + } + for scripts in manager.scripts.values() + for script in scripts + ], + } + + +class InitScriptsStageResource: + def on_get(self, request, stage: str): + from localstack.runtime.init import Stage, init_script_manager + + manager = init_script_manager() + + try: + stage = Stage[stage.upper()] + except KeyError as e: + raise NotFound(f"no such stage {stage}") from e + + return { + "completed": manager.stage_completed.get(stage), + "scripts": [ + { + "stage": script.stage.name, + "name": os.path.basename(script.path), + "state": script.state.name, + } + for script in manager.scripts.get(stage) + ], + } + + +class ConfigResource: + def on_get(self, request): + from localstack.utils import diagnose + + return call_safe(diagnose.get_localstack_config) + + def on_post(self, request: Request): + from localstack.utils.config_listener import update_config_variable + + data = request.get_json(force=True) + variable = data.get("variable", "") + if not re.match(r"^[_a-zA-Z0-9]+$", variable): + return Response("{}", mimetype="application/json", status=400) + new_value = data.get("value") + if is_number(new_value): + new_value = float(new_value) + update_config_variable(variable, new_value) + value = getattr(config, variable, None) + return { + "variable": variable, + "value": value, + } + + +class LocalstackResources(Router): + """ + Router for localstack-internal HTTP resources. + """ + + def __init__(self): + super().__init__(dispatcher=handler_dispatcher()) + self.add_default_routes() + # TODO: load routes as plugins + + def add_default_routes(self): + from localstack.services.plugins import SERVICE_PLUGINS + + health_resource = HealthResource(SERVICE_PLUGINS) + self.add(Resource("/_localstack/health", health_resource)) + self.add(Resource("/_localstack/info", InfoResource())) + self.add(Resource("/_localstack/plugins", PluginsResource())) + self.add(Resource("/_localstack/init", InitScriptsResource())) + self.add(Resource("/_localstack/init/", InitScriptsStageResource())) + + if config.ENABLE_CONFIG_UPDATES: + LOG.warning( + "Enabling config endpoint, " + "please be aware that this can expose sensitive information via your network." + ) + self.add(Resource("/_localstack/config", ConfigResource())) + + if config.DEBUG: + LOG.warning( + "Enabling diagnose endpoint, " + "please be aware that this can expose sensitive information via your network." + ) + self.add(Resource("/_localstack/diagnose", DiagnoseResource())) + self.add(Resource("/_localstack/usage", UsageResource())) + + +@singleton_factory +def get_internal_apis() -> LocalstackResources: + """ + Get the LocalstackResources singleton. + """ + return LocalstackResources() diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/__init__.py b/localstack-core/localstack/services/kinesis/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/__init__.py rename to localstack-core/localstack/services/kinesis/__init__.py diff --git a/localstack-core/localstack/services/kinesis/kinesis_mock_server.py b/localstack-core/localstack/services/kinesis/kinesis_mock_server.py new file mode 100644 index 0000000000000..b9ce394e1415d --- /dev/null +++ b/localstack-core/localstack/services/kinesis/kinesis_mock_server.py @@ -0,0 +1,233 @@ +import logging +import os +import threading +from abc import abstractmethod +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +from localstack import config +from localstack.services.kinesis.packages import ( + KinesisMockEngine, + kinesismock_package, + kinesismock_scala_package, +) +from localstack.utils.common import TMP_THREADS, ShellCommandThread, get_free_tcp_port, mkdir +from localstack.utils.run import FuncThread +from localstack.utils.serving import Server + +LOG = logging.getLogger(__name__) + + +class KinesisMockServer(Server): + """ + Server abstraction for controlling Kinesis Mock in a separate thread + """ + + def __init__( + self, + port: int, + exe_path: Path, + latency: str, + account_id: str, + host: str = "localhost", + log_level: str = "INFO", + data_dir: Optional[str] = None, + ) -> None: + self._account_id = account_id + self._latency = latency + self._data_dir = data_dir + self._data_filename = f"{self._account_id}.json" + self._exe_path = exe_path + self._log_level = log_level + super().__init__(port, host) + + def do_start_thread(self) -> FuncThread: + cmd, env_vars = self._create_shell_command() + LOG.debug("starting kinesis process %s with env vars %s", cmd, env_vars) + t = ShellCommandThread( + cmd, + strip_color=True, + env_vars=env_vars, + log_listener=self._log_listener, + auto_restart=True, + name="kinesis-mock", + ) + TMP_THREADS.append(t) + t.start() + return t + + @property + def _environment_variables(self) -> Dict: + env_vars = { + "KINESIS_MOCK_PLAIN_PORT": self.port, + # Each kinesis-mock instance listens to two ports - secure and insecure. + # LocalStack uses only one - the insecure one. Block the secure port to avoid conflicts. + "KINESIS_MOCK_TLS_PORT": get_free_tcp_port(), + "SHARD_LIMIT": config.KINESIS_SHARD_LIMIT, + "ON_DEMAND_STREAM_COUNT_LIMIT": config.KINESIS_ON_DEMAND_STREAM_COUNT_LIMIT, + "AWS_ACCOUNT_ID": self._account_id, + } + + latency_params = [ + "CREATE_STREAM_DURATION", + "DELETE_STREAM_DURATION", + "REGISTER_STREAM_CONSUMER_DURATION", + "START_STREAM_ENCRYPTION_DURATION", + "STOP_STREAM_ENCRYPTION_DURATION", + "DEREGISTER_STREAM_CONSUMER_DURATION", + "MERGE_SHARDS_DURATION", + "SPLIT_SHARD_DURATION", + "UPDATE_SHARD_COUNT_DURATION", + "UPDATE_STREAM_MODE_DURATION", + ] + for param in latency_params: + env_vars[param] = self._latency + + if self._data_dir and config.KINESIS_PERSISTENCE: + env_vars["SHOULD_PERSIST_DATA"] = "true" + env_vars["PERSIST_PATH"] = self._data_dir + env_vars["PERSIST_FILE_NAME"] = self._data_filename + env_vars["PERSIST_INTERVAL"] = config.KINESIS_MOCK_PERSIST_INTERVAL + + env_vars["LOG_LEVEL"] = self._log_level + + return env_vars + + @abstractmethod + def _create_shell_command(self) -> Tuple[List, Dict]: + """ + Helper method for creating kinesis mock invocation command + :return: returns a tuple containing the command list and a dictionary with the environment variables + """ + pass + + def _log_listener(self, line, **_kwargs): + LOG.info(line.rstrip()) + + +class KinesisMockScalaServer(KinesisMockServer): + def _create_shell_command(self) -> Tuple[List, Dict]: + cmd = ["java", "-jar", *self._get_java_vm_options(), str(self._exe_path)] + return cmd, self._environment_variables + + @property + def _environment_variables(self) -> Dict: + default_env_vars = super()._environment_variables + kinesis_mock_installer = kinesismock_scala_package.get_installer() + return { + **default_env_vars, + **kinesis_mock_installer.get_java_env_vars(), + } + + def _get_java_vm_options(self) -> list[str]: + return [ + f"-Xms{config.KINESIS_MOCK_INITIAL_HEAP_SIZE}", + f"-Xmx{config.KINESIS_MOCK_MAXIMUM_HEAP_SIZE}", + "-XX:MaxGCPauseMillis=500", + "-XX:+ExitOnOutOfMemoryError", + ] + + +class KinesisMockNodeServer(KinesisMockServer): + @property + def _environment_variables(self) -> Dict: + node_env_vars = { + # Use the `server.json` packaged next to the main.js + "KINESIS_MOCK_CERT_PATH": str((self._exe_path.parent / "server.json").absolute()), + } + + default_env_vars = super()._environment_variables + return {**node_env_vars, **default_env_vars} + + def _create_shell_command(self) -> Tuple[List, Dict]: + cmd = ["node", self._exe_path] + return cmd, self._environment_variables + + +class KinesisServerManager: + default_startup_timeout = 60 + + def __init__(self): + self._lock = threading.RLock() + self._servers: dict[str, KinesisMockServer] = {} + + def get_server_for_account(self, account_id: str) -> KinesisMockServer: + if account_id in self._servers: + return self._servers[account_id] + + with self._lock: + if account_id in self._servers: + return self._servers[account_id] + + LOG.info("Creating kinesis backend for account %s", account_id) + self._servers[account_id] = self._create_kinesis_mock_server(account_id) + self._servers[account_id].start() + if not self._servers[account_id].wait_is_up(timeout=self.default_startup_timeout): + raise TimeoutError("gave up waiting for kinesis backend to start up") + return self._servers[account_id] + + def shutdown_all(self): + with self._lock: + while self._servers: + account_id, server = self._servers.popitem() + LOG.info("Shutting down kinesis backend for account %s", account_id) + server.shutdown() + + def _create_kinesis_mock_server(self, account_id: str) -> KinesisMockServer: + """ + Creates a new Kinesis Mock server instance. Installs Kinesis Mock on the host first if necessary. + Introspects on the host config to determine server configuration: + config.dirs.data -> if set, the server runs with persistence using the path to store data + config.LS_LOG -> configure kinesis mock log level (defaults to INFO) + config.KINESIS_LATENCY -> configure stream latency (in milliseconds) + """ + port = get_free_tcp_port() + + # kinesis-mock stores state in json files .json, so we can dump everything into `kinesis/` + persist_path = os.path.join(config.dirs.data, "kinesis") + mkdir(persist_path) + if config.KINESIS_MOCK_LOG_LEVEL: + log_level = config.KINESIS_MOCK_LOG_LEVEL.upper() + elif config.LS_LOG: + ls_log_level = config.LS_LOG.upper() + if ls_log_level == "WARNING": + log_level = "WARN" + elif ls_log_level == "TRACE-INTERNAL": + log_level = "TRACE" + elif ls_log_level not in ("ERROR", "WARN", "INFO", "DEBUG", "TRACE"): + # to protect from cases where the log level will be rejected from kinesis-mock + log_level = "INFO" + else: + log_level = ls_log_level + else: + log_level = "INFO" + latency = config.KINESIS_LATENCY + "ms" + + # Install the Scala Kinesis Mock build if specified in KINESIS_MOCK_PROVIDER_ENGINE + if KinesisMockEngine(config.KINESIS_MOCK_PROVIDER_ENGINE) == KinesisMockEngine.SCALA: + kinesismock_scala_package.install() + kinesis_mock_path = Path( + kinesismock_scala_package.get_installer().get_executable_path() + ) + + return KinesisMockScalaServer( + port=port, + exe_path=kinesis_mock_path, + log_level=log_level, + latency=latency, + data_dir=persist_path, + account_id=account_id, + ) + + # Otherwise, install the NodeJS version (default) + kinesismock_package.install() + kinesis_mock_path = Path(kinesismock_package.get_installer().get_executable_path()) + + return KinesisMockNodeServer( + port=port, + exe_path=kinesis_mock_path, + log_level=log_level, + latency=latency, + data_dir=persist_path, + account_id=account_id, + ) diff --git a/localstack/services/kinesis/models.py b/localstack-core/localstack/services/kinesis/models.py similarity index 100% rename from localstack/services/kinesis/models.py rename to localstack-core/localstack/services/kinesis/models.py diff --git a/localstack-core/localstack/services/kinesis/packages.py b/localstack-core/localstack/services/kinesis/packages.py new file mode 100644 index 0000000000000..1d64bb4194b63 --- /dev/null +++ b/localstack-core/localstack/services/kinesis/packages.py @@ -0,0 +1,82 @@ +import os +from enum import StrEnum +from functools import lru_cache +from typing import Any, List + +from localstack.packages import InstallTarget, Package +from localstack.packages.core import GitHubReleaseInstaller, NodePackageInstaller +from localstack.packages.java import JavaInstallerMixin, java_package + +_KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.4.12" + + +class KinesisMockEngine(StrEnum): + NODE = "node" + SCALA = "scala" + + @classmethod + def _missing_(cls, value: str | Any) -> str: + # default to 'node' if invalid enum + if not isinstance(value, str): + return cls(cls.NODE) + return cls.__members__.get(value.upper(), cls.NODE) + + +class KinesisMockNodePackageInstaller(NodePackageInstaller): + def __init__(self, version: str): + super().__init__(package_name="kinesis-local", version=version) + + +class KinesisMockScalaPackageInstaller(JavaInstallerMixin, GitHubReleaseInstaller): + def __init__(self, version: str = _KINESIS_MOCK_VERSION): + super().__init__( + name="kinesis-local", tag=f"v{version}", github_slug="etspaceman/kinesis-mock" + ) + + # Kinesis Mock requires JRE 21+ + self.java_version = "21" + + def _get_github_asset_name(self) -> str: + return "kinesis-mock.jar" + + def _prepare_installation(self, target: InstallTarget) -> None: + java_package.get_installer(self.java_version).install(target) + + def get_java_home(self) -> str | None: + """Override to use the specific Java version""" + return java_package.get_installer(self.java_version).get_java_home() + + +class KinesisMockScalaPackage(Package[KinesisMockScalaPackageInstaller]): + def __init__( + self, + default_version: str = _KINESIS_MOCK_VERSION, + ): + super().__init__(name="Kinesis Mock", default_version=default_version) + + @lru_cache + def _get_installer(self, version: str) -> KinesisMockScalaPackageInstaller: + return KinesisMockScalaPackageInstaller(version) + + def get_versions(self) -> List[str]: + return [_KINESIS_MOCK_VERSION] # Only supported on v0.4.12+ + + +class KinesisMockNodePackage(Package[KinesisMockNodePackageInstaller]): + def __init__( + self, + default_version: str = _KINESIS_MOCK_VERSION, + ): + super().__init__(name="Kinesis Mock", default_version=default_version) + + @lru_cache + def _get_installer(self, version: str) -> KinesisMockNodePackageInstaller: + return KinesisMockNodePackageInstaller(version) + + def get_versions(self) -> List[str]: + return [_KINESIS_MOCK_VERSION] + + +# leave as 'kinesismock_package' for backwards compatability +kinesismock_package = KinesisMockNodePackage() +kinesismock_scala_package = KinesisMockScalaPackage() diff --git a/localstack-core/localstack/services/kinesis/plugins.py b/localstack-core/localstack/services/kinesis/plugins.py new file mode 100644 index 0000000000000..75249c9a2d904 --- /dev/null +++ b/localstack-core/localstack/services/kinesis/plugins.py @@ -0,0 +1,16 @@ +import localstack.config as config +from localstack.packages import Package, package + + +@package(name="kinesis-mock") +def kinesismock_package() -> Package: + from localstack.services.kinesis.packages import ( + KinesisMockEngine, + kinesismock_package, + kinesismock_scala_package, + ) + + if KinesisMockEngine(config.KINESIS_MOCK_PROVIDER_ENGINE) == KinesisMockEngine.SCALA: + return kinesismock_scala_package + + return kinesismock_package diff --git a/localstack-core/localstack/services/kinesis/provider.py b/localstack-core/localstack/services/kinesis/provider.py new file mode 100644 index 0000000000000..7f080e35fc122 --- /dev/null +++ b/localstack-core/localstack/services/kinesis/provider.py @@ -0,0 +1,185 @@ +import logging +import os +import time +from random import random + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.kinesis import ( + ConsumerARN, + Data, + HashKey, + KinesisApi, + PartitionKey, + ProvisionedThroughputExceededException, + PutRecordOutput, + PutRecordsOutput, + PutRecordsRequestEntryList, + PutRecordsResultEntry, + SequenceNumber, + ShardId, + StartingPosition, + StreamARN, + StreamName, + SubscribeToShardEvent, + SubscribeToShardEventStream, + SubscribeToShardOutput, +) +from localstack.aws.connect import connect_to +from localstack.constants import LOCALHOST +from localstack.services.kinesis.kinesis_mock_server import KinesisServerManager +from localstack.services.kinesis.models import KinesisStore, kinesis_stores +from localstack.services.plugins import ServiceLifecycleHook +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws import arns +from localstack.utils.aws.arns import extract_account_id_from_arn, extract_region_from_arn +from localstack.utils.time import now_utc + +LOG = logging.getLogger(__name__) +MAX_SUBSCRIPTION_SECONDS = 300 +SERVER_STARTUP_TIMEOUT = 120 + + +def find_stream_for_consumer(consumer_arn): + account_id = extract_account_id_from_arn(consumer_arn) + region_name = extract_region_from_arn(consumer_arn) + kinesis = connect_to(aws_access_key_id=account_id, region_name=region_name).kinesis + for stream_name in kinesis.list_streams()["StreamNames"]: + stream_arn = arns.kinesis_stream_arn(stream_name, account_id, region_name) + for cons in kinesis.list_stream_consumers(StreamARN=stream_arn)["Consumers"]: + if cons["ConsumerARN"] == consumer_arn: + return stream_name + raise Exception("Unable to find stream for stream consumer %s" % consumer_arn) + + +class KinesisProvider(KinesisApi, ServiceLifecycleHook): + server_manager: KinesisServerManager + + def __init__(self): + self.server_manager = KinesisServerManager() + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(kinesis_stores) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, "kinesis"))) + + def on_before_state_load(self): + # no need to restart servers, since that happens lazily in `server_manager.get_server_for_account`. + self.server_manager.shutdown_all() + + def on_before_state_reset(self): + self.server_manager.shutdown_all() + + def on_before_stop(self): + self.server_manager.shutdown_all() + + def get_forward_url(self, account_id: str, region_name: str) -> str: + """Return the URL of the backend Kinesis server to forward requests to""" + server = self.server_manager.get_server_for_account(account_id) + return f"http://{LOCALHOST}:{server.port}" + + @staticmethod + def get_store(account_id: str, region_name: str) -> KinesisStore: + return kinesis_stores[account_id][region_name] + + def subscribe_to_shard( + self, + context: RequestContext, + consumer_arn: ConsumerARN, + shard_id: ShardId, + starting_position: StartingPosition, + **kwargs, + ) -> SubscribeToShardOutput: + kinesis = connect_to( + aws_access_key_id=context.account_id, region_name=context.region + ).kinesis + stream_name = find_stream_for_consumer(consumer_arn) + iter_type = starting_position["Type"] + kwargs = {} + starting_sequence_number = starting_position.get("SequenceNumber") or "0" + if iter_type in ["AT_SEQUENCE_NUMBER", "AFTER_SEQUENCE_NUMBER"]: + kwargs["StartingSequenceNumber"] = starting_sequence_number + elif iter_type in ["AT_TIMESTAMP"]: + # or value is just an example timestamp from aws docs + timestamp = starting_position.get("Timestamp") or 1459799926.480 + kwargs["Timestamp"] = timestamp + initial_shard_iterator = kinesis.get_shard_iterator( + StreamName=stream_name, ShardId=shard_id, ShardIteratorType=iter_type, **kwargs + )["ShardIterator"] + + def event_generator(): + shard_iterator = initial_shard_iterator + last_sequence_number = starting_sequence_number + + maximum_duration_subscription_timestamp = now_utc() + MAX_SUBSCRIPTION_SECONDS + + while now_utc() < maximum_duration_subscription_timestamp: + try: + result = kinesis.get_records(ShardIterator=shard_iterator) + except Exception as e: + if "ResourceNotFoundException" in str(e): + LOG.debug( + 'Kinesis stream "%s" has been deleted, closing shard subscriber', + stream_name, + ) + return + raise + shard_iterator = result.get("NextShardIterator") + records = result.get("Records", []) + if not records: + # On AWS there is *at least* 1 event every 5 seconds + # but this is not possible in this structure. + # In order to avoid a 5-second blocking call, we make the compromise of 3 seconds. + time.sleep(3) + + yield SubscribeToShardEventStream( + SubscribeToShardEvent=SubscribeToShardEvent( + Records=records, + ContinuationSequenceNumber=str(last_sequence_number), + MillisBehindLatest=0, + ChildShards=[], + ) + ) + + return SubscribeToShardOutput(EventStream=event_generator()) + + def put_record( + self, + context: RequestContext, + data: Data, + partition_key: PartitionKey, + stream_name: StreamName = None, + explicit_hash_key: HashKey = None, + sequence_number_for_ordering: SequenceNumber = None, + stream_arn: StreamARN = None, + **kwargs, + ) -> PutRecordOutput: + # TODO: Ensure use of `stream_arn` works. Currently kinesis-mock only works with ctx request account ID and region + if random() < config.KINESIS_ERROR_PROBABILITY: + raise ProvisionedThroughputExceededException( + "Rate exceeded for shard X in stream Y under account Z." + ) + # If "we were lucky" and the error probability didn't hit, we raise a NotImplementedError in order to + # trigger the fallback to kinesis-mock + raise NotImplementedError + + def put_records( + self, + context: RequestContext, + records: PutRecordsRequestEntryList, + stream_name: StreamName = None, + stream_arn: StreamARN = None, + **kwargs, + ) -> PutRecordsOutput: + # TODO: Ensure use of `stream_arn` works. Currently kinesis-mock only works with ctx request account ID and region + if random() < config.KINESIS_ERROR_PROBABILITY: + records_count = len(records) if records is not None else 0 + records = [ + PutRecordsResultEntry( + ErrorCode="ProvisionedThroughputExceededException", + ErrorMessage="Rate exceeded for shard X in stream Y under account Z.", + ) + ] * records_count + return PutRecordsOutput(FailedRecordCount=1, Records=records) + # If "we were lucky" and the error probability didn't hit, we raise a NotImplementedError in order to + # trigger the fallback to kinesis-mock + raise NotImplementedError diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/__init__.py b/localstack-core/localstack/services/kinesis/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/__init__.py rename to localstack-core/localstack/services/kinesis/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.py b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.py new file mode 100644 index 0000000000000..27d18c1ff3fe3 --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.py @@ -0,0 +1,181 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class KinesisStreamProperties(TypedDict): + Arn: Optional[str] + Name: Optional[str] + RetentionPeriodHours: Optional[int] + ShardCount: Optional[int] + StreamEncryption: Optional[StreamEncryption] + StreamModeDetails: Optional[StreamModeDetails] + Tags: Optional[list[Tag]] + + +class StreamModeDetails(TypedDict): + StreamMode: Optional[str] + + +class StreamEncryption(TypedDict): + EncryptionType: Optional[str] + KeyId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class KinesisStreamProvider(ResourceProvider[KinesisStreamProperties]): + TYPE = "AWS::Kinesis::Stream" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[KinesisStreamProperties], + ) -> ProgressEvent[KinesisStreamProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - kinesis:EnableEnhancedMonitoring + - kinesis:DescribeStreamSummary + - kinesis:CreateStream + - kinesis:IncreaseStreamRetentionPeriod + - kinesis:StartStreamEncryption + - kinesis:AddTagsToStream + - kinesis:ListTagsForStream + + """ + model = request.desired_state + kinesis = request.aws_client_factory.kinesis + if not request.custom_context.get(REPEATED_INVOCATION): + if not model.get("Name"): + model["Name"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + if not model.get("ShardCount"): + model["ShardCount"] = 1 + + if not model.get("StreamModeDetails"): + model["StreamModeDetails"] = StreamModeDetails(StreamMode="ON_DEMAND") + + kinesis.create_stream( + StreamName=model["Name"], + ShardCount=model["ShardCount"], + StreamModeDetails=model["StreamModeDetails"], + ) + + stream_data = kinesis.describe_stream(StreamName=model["Name"])["StreamDescription"] + model["Arn"] = stream_data["StreamARN"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + stream_data = kinesis.describe_stream(StreamARN=model["Arn"])["StreamDescription"] + if stream_data["StreamStatus"] != "ACTIVE": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[KinesisStreamProperties], + ) -> ProgressEvent[KinesisStreamProperties]: + """ + Fetch resource information + + IAM permissions required: + - kinesis:DescribeStreamSummary + - kinesis:ListTagsForStream + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[KinesisStreamProperties], + ) -> ProgressEvent[KinesisStreamProperties]: + """ + Delete a resource + + IAM permissions required: + - kinesis:DescribeStreamSummary + - kinesis:DeleteStream + - kinesis:RemoveTagsFromStream + """ + model = request.previous_state + client = request.aws_client_factory.kinesis + + if not request.custom_context.get(REPEATED_INVOCATION): + client.delete_stream(StreamARN=model["Arn"], EnforceConsumerDeletion=True) + request.custom_context[REPEATED_INVOCATION] = True + + try: + client.describe_stream(StreamARN=model["Arn"]) + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model={}, + ) + except client.exceptions.ResourceNotFoundException: + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[KinesisStreamProperties], + ) -> ProgressEvent[KinesisStreamProperties]: + """ + Update a resource + + IAM permissions required: + - kinesis:EnableEnhancedMonitoring + - kinesis:DisableEnhancedMonitoring + - kinesis:DescribeStreamSummary + - kinesis:UpdateShardCount + - kinesis:UpdateStreamMode + - kinesis:IncreaseStreamRetentionPeriod + - kinesis:DecreaseStreamRetentionPeriod + - kinesis:StartStreamEncryption + - kinesis:StopStreamEncryption + - kinesis:AddTagsToStream + - kinesis:RemoveTagsFromStream + - kinesis:ListTagsForStream + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.schema.json b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.schema.json new file mode 100644 index 0000000000000..69b6d10cfd89d --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream.schema.json @@ -0,0 +1,173 @@ +{ + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-kinesis.git", + "handlers": { + "read": { + "permissions": [ + "kinesis:DescribeStreamSummary", + "kinesis:ListTagsForStream" + ] + }, + "create": { + "permissions": [ + "kinesis:EnableEnhancedMonitoring", + "kinesis:DescribeStreamSummary", + "kinesis:CreateStream", + "kinesis:IncreaseStreamRetentionPeriod", + "kinesis:StartStreamEncryption", + "kinesis:AddTagsToStream", + "kinesis:ListTagsForStream" + ] + }, + "update": { + "permissions": [ + "kinesis:EnableEnhancedMonitoring", + "kinesis:DisableEnhancedMonitoring", + "kinesis:DescribeStreamSummary", + "kinesis:UpdateShardCount", + "kinesis:UpdateStreamMode", + "kinesis:IncreaseStreamRetentionPeriod", + "kinesis:DecreaseStreamRetentionPeriod", + "kinesis:StartStreamEncryption", + "kinesis:StopStreamEncryption", + "kinesis:AddTagsToStream", + "kinesis:RemoveTagsFromStream", + "kinesis:ListTagsForStream" + ], + "timeoutInMinutes": 240 + }, + "list": { + "permissions": [ + "kinesis:ListStreams" + ] + }, + "delete": { + "permissions": [ + "kinesis:DescribeStreamSummary", + "kinesis:DeleteStream", + "kinesis:RemoveTagsFromStream" + ] + } + }, + "typeName": "AWS::Kinesis::Stream", + "readOnlyProperties": [ + "/properties/Arn" + ], + "description": "Resource Type definition for AWS::Kinesis::Stream", + "createOnlyProperties": [ + "/properties/Name" + ], + "additionalProperties": false, + "primaryIdentifier": [ + "/properties/Name" + ], + "definitions": { + "StreamModeDetails": { + "description": "When specified, enables or updates the mode of stream. Default is PROVISIONED.", + "additionalProperties": false, + "type": "object", + "properties": { + "StreamMode": { + "description": "The mode of the stream", + "type": "string", + "enum": [ + "ON_DEMAND", + "PROVISIONED" + ] + } + }, + "required": [ + "StreamMode" + ] + }, + "StreamEncryption": { + "description": "When specified, enables or updates server-side encryption using an AWS KMS key for a specified stream. Removing this property from your stack template and updating your stack disables encryption.", + "additionalProperties": false, + "type": "object", + "properties": { + "EncryptionType": { + "description": "The encryption type to use. The only valid value is KMS. ", + "type": "string", + "enum": [ + "KMS" + ] + }, + "KeyId": { + "minLength": 1, + "description": "The GUID for the customer-managed AWS KMS key to use for encryption. This value can be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data Streams by specifying the alias aws/kinesis.", + "type": "string", + "maxLength": 2048 + } + }, + "required": [ + "EncryptionType", + "KeyId" + ] + }, + "Tag": { + "description": "An arbitrary set of tags (key-value pairs) to associate with the Kinesis stream.", + "additionalProperties": false, + "type": "object", + "properties": { + "Value": { + "minLength": 0, + "description": "The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string", + "maxLength": 255 + }, + "Key": { + "minLength": 1, + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string", + "maxLength": 128 + } + }, + "required": [ + "Key", + "Value" + ] + } + }, + "properties": { + "StreamModeDetails": { + "default": { + "StreamMode": "PROVISIONED" + }, + "description": "The mode in which the stream is running.", + "$ref": "#/definitions/StreamModeDetails" + }, + "StreamEncryption": { + "description": "When specified, enables or updates server-side encryption using an AWS KMS key for a specified stream.", + "$ref": "#/definitions/StreamEncryption" + }, + "Arn": { + "description": "The Amazon resource name (ARN) of the Kinesis stream", + "type": "string" + }, + "RetentionPeriodHours": { + "description": "The number of hours for the data records that are stored in shards to remain accessible.", + "type": "integer", + "minimum": 24 + }, + "Tags": { + "uniqueItems": false, + "description": "An arbitrary set of tags (key\u2013value pairs) to associate with the Kinesis stream.", + "insertionOrder": false, + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Name": { + "minLength": 1, + "pattern": "^[a-zA-Z0-9_.-]+$", + "description": "The name of the Kinesis stream.", + "type": "string", + "maxLength": 128 + }, + "ShardCount": { + "description": "The number of shards that the stream uses. Required when StreamMode = PROVISIONED is passed.", + "type": "integer", + "minimum": 1 + } + } +} diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream_plugin.py b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream_plugin.py new file mode 100644 index 0000000000000..d7e834e7bb0bf --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_stream_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class KinesisStreamProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Kinesis::Stream" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.kinesis.resource_providers.aws_kinesis_stream import ( + KinesisStreamProvider, + ) + + self.factory = KinesisStreamProvider diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.py b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.py new file mode 100644 index 0000000000000..3f0faee08ffda --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.py @@ -0,0 +1,131 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class KinesisStreamConsumerProperties(TypedDict): + ConsumerName: Optional[str] + StreamARN: Optional[str] + ConsumerARN: Optional[str] + ConsumerCreationTimestamp: Optional[str] + ConsumerStatus: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class KinesisStreamConsumerProvider(ResourceProvider[KinesisStreamConsumerProperties]): + TYPE = "AWS::Kinesis::StreamConsumer" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[KinesisStreamConsumerProperties], + ) -> ProgressEvent[KinesisStreamConsumerProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - ConsumerName + - StreamARN + + Create-only properties: + - /properties/ConsumerName + - /properties/StreamARN + + Read-only properties: + - /properties/ConsumerStatus + - /properties/ConsumerARN + - /properties/ConsumerCreationTimestamp + - /properties/Id + + + + """ + model = request.desired_state + kinesis = request.aws_client_factory.kinesis + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: idempotency + + response = kinesis.register_stream_consumer( + StreamARN=model["StreamARN"], ConsumerName=model["ConsumerName"] + ) + model["ConsumerARN"] = response["Consumer"]["ConsumerARN"] + model["ConsumerStatus"] = response["Consumer"]["ConsumerStatus"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + response = kinesis.describe_stream_consumer(ConsumerARN=model["ConsumerARN"]) + model["ConsumerStatus"] = response["ConsumerDescription"]["ConsumerStatus"] + if model["ConsumerStatus"] == "CREATING": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[KinesisStreamConsumerProperties], + ) -> ProgressEvent[KinesisStreamConsumerProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[KinesisStreamConsumerProperties], + ) -> ProgressEvent[KinesisStreamConsumerProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + kinesis = request.aws_client_factory.kinesis + kinesis.deregister_stream_consumer(ConsumerARN=model["ConsumerARN"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[KinesisStreamConsumerProperties], + ) -> ProgressEvent[KinesisStreamConsumerProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.schema.json b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.schema.json new file mode 100644 index 0000000000000..635fb10017540 --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer.schema.json @@ -0,0 +1,42 @@ +{ + "typeName": "AWS::Kinesis::StreamConsumer", + "description": "Resource Type definition for AWS::Kinesis::StreamConsumer", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "ConsumerCreationTimestamp": { + "type": "string" + }, + "ConsumerName": { + "type": "string" + }, + "ConsumerARN": { + "type": "string" + }, + "ConsumerStatus": { + "type": "string" + }, + "StreamARN": { + "type": "string" + } + }, + "required": [ + "ConsumerName", + "StreamARN" + ], + "readOnlyProperties": [ + "/properties/ConsumerStatus", + "/properties/ConsumerARN", + "/properties/ConsumerCreationTimestamp", + "/properties/Id" + ], + "createOnlyProperties": [ + "/properties/ConsumerName", + "/properties/StreamARN" + ], + "primaryIdentifier": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer_plugin.py b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer_plugin.py new file mode 100644 index 0000000000000..b1f2cab38423d --- /dev/null +++ b/localstack-core/localstack/services/kinesis/resource_providers/aws_kinesis_streamconsumer_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class KinesisStreamConsumerProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Kinesis::StreamConsumer" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.kinesis.resource_providers.aws_kinesis_streamconsumer import ( + KinesisStreamConsumerProvider, + ) + + self.factory = KinesisStreamConsumerProvider diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/__init__.py b/localstack-core/localstack/services/kinesisfirehose/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/__init__.py rename to localstack-core/localstack/services/kinesisfirehose/__init__.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/__init__.py b/localstack-core/localstack/services/kinesisfirehose/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/__init__.py rename to localstack-core/localstack/services/kinesisfirehose/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.py b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.py new file mode 100644 index 0000000000000..6764a783667f0 --- /dev/null +++ b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.py @@ -0,0 +1,496 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class KinesisFirehoseDeliveryStreamProperties(TypedDict): + AmazonOpenSearchServerlessDestinationConfiguration: Optional[ + AmazonOpenSearchServerlessDestinationConfiguration + ] + AmazonopensearchserviceDestinationConfiguration: Optional[ + AmazonopensearchserviceDestinationConfiguration + ] + Arn: Optional[str] + DeliveryStreamEncryptionConfigurationInput: Optional[DeliveryStreamEncryptionConfigurationInput] + DeliveryStreamName: Optional[str] + DeliveryStreamType: Optional[str] + ElasticsearchDestinationConfiguration: Optional[ElasticsearchDestinationConfiguration] + ExtendedS3DestinationConfiguration: Optional[ExtendedS3DestinationConfiguration] + HttpEndpointDestinationConfiguration: Optional[HttpEndpointDestinationConfiguration] + KinesisStreamSourceConfiguration: Optional[KinesisStreamSourceConfiguration] + RedshiftDestinationConfiguration: Optional[RedshiftDestinationConfiguration] + S3DestinationConfiguration: Optional[S3DestinationConfiguration] + SplunkDestinationConfiguration: Optional[SplunkDestinationConfiguration] + Tags: Optional[list[Tag]] + + +class DeliveryStreamEncryptionConfigurationInput(TypedDict): + KeyType: Optional[str] + KeyARN: Optional[str] + + +class ElasticsearchBufferingHints(TypedDict): + IntervalInSeconds: Optional[int] + SizeInMBs: Optional[int] + + +class CloudWatchLoggingOptions(TypedDict): + Enabled: Optional[bool] + LogGroupName: Optional[str] + LogStreamName: Optional[str] + + +class ProcessorParameter(TypedDict): + ParameterName: Optional[str] + ParameterValue: Optional[str] + + +class Processor(TypedDict): + Type: Optional[str] + Parameters: Optional[list[ProcessorParameter]] + + +class ProcessingConfiguration(TypedDict): + Enabled: Optional[bool] + Processors: Optional[list[Processor]] + + +class ElasticsearchRetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class BufferingHints(TypedDict): + IntervalInSeconds: Optional[int] + SizeInMBs: Optional[int] + + +class KMSEncryptionConfig(TypedDict): + AWSKMSKeyARN: Optional[str] + + +class EncryptionConfiguration(TypedDict): + KMSEncryptionConfig: Optional[KMSEncryptionConfig] + NoEncryptionConfig: Optional[str] + + +class S3DestinationConfiguration(TypedDict): + BucketARN: Optional[str] + RoleARN: Optional[str] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + CompressionFormat: Optional[str] + EncryptionConfiguration: Optional[EncryptionConfiguration] + ErrorOutputPrefix: Optional[str] + Prefix: Optional[str] + + +class VpcConfiguration(TypedDict): + RoleARN: Optional[str] + SecurityGroupIds: Optional[list[str]] + SubnetIds: Optional[list[str]] + + +class DocumentIdOptions(TypedDict): + DefaultDocumentIdFormat: Optional[str] + + +class ElasticsearchDestinationConfiguration(TypedDict): + IndexName: Optional[str] + RoleARN: Optional[str] + S3Configuration: Optional[S3DestinationConfiguration] + BufferingHints: Optional[ElasticsearchBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ClusterEndpoint: Optional[str] + DocumentIdOptions: Optional[DocumentIdOptions] + DomainARN: Optional[str] + IndexRotationPeriod: Optional[str] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RetryOptions: Optional[ElasticsearchRetryOptions] + S3BackupMode: Optional[str] + TypeName: Optional[str] + VpcConfiguration: Optional[VpcConfiguration] + + +class AmazonopensearchserviceBufferingHints(TypedDict): + IntervalInSeconds: Optional[int] + SizeInMBs: Optional[int] + + +class AmazonopensearchserviceRetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class AmazonopensearchserviceDestinationConfiguration(TypedDict): + IndexName: Optional[str] + RoleARN: Optional[str] + S3Configuration: Optional[S3DestinationConfiguration] + BufferingHints: Optional[AmazonopensearchserviceBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ClusterEndpoint: Optional[str] + DocumentIdOptions: Optional[DocumentIdOptions] + DomainARN: Optional[str] + IndexRotationPeriod: Optional[str] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RetryOptions: Optional[AmazonopensearchserviceRetryOptions] + S3BackupMode: Optional[str] + TypeName: Optional[str] + VpcConfiguration: Optional[VpcConfiguration] + + +class AmazonOpenSearchServerlessBufferingHints(TypedDict): + IntervalInSeconds: Optional[int] + SizeInMBs: Optional[int] + + +class AmazonOpenSearchServerlessRetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class AmazonOpenSearchServerlessDestinationConfiguration(TypedDict): + IndexName: Optional[str] + RoleARN: Optional[str] + S3Configuration: Optional[S3DestinationConfiguration] + BufferingHints: Optional[AmazonOpenSearchServerlessBufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + CollectionEndpoint: Optional[str] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RetryOptions: Optional[AmazonOpenSearchServerlessRetryOptions] + S3BackupMode: Optional[str] + VpcConfiguration: Optional[VpcConfiguration] + + +class HiveJsonSerDe(TypedDict): + TimestampFormats: Optional[list[str]] + + +class OpenXJsonSerDe(TypedDict): + CaseInsensitive: Optional[bool] + ColumnToJsonKeyMappings: Optional[dict] + ConvertDotsInJsonKeysToUnderscores: Optional[bool] + + +class Deserializer(TypedDict): + HiveJsonSerDe: Optional[HiveJsonSerDe] + OpenXJsonSerDe: Optional[OpenXJsonSerDe] + + +class InputFormatConfiguration(TypedDict): + Deserializer: Optional[Deserializer] + + +class OrcSerDe(TypedDict): + BlockSizeBytes: Optional[int] + BloomFilterColumns: Optional[list[str]] + BloomFilterFalsePositiveProbability: Optional[float] + Compression: Optional[str] + DictionaryKeyThreshold: Optional[float] + EnablePadding: Optional[bool] + FormatVersion: Optional[str] + PaddingTolerance: Optional[float] + RowIndexStride: Optional[int] + StripeSizeBytes: Optional[int] + + +class ParquetSerDe(TypedDict): + BlockSizeBytes: Optional[int] + Compression: Optional[str] + EnableDictionaryCompression: Optional[bool] + MaxPaddingBytes: Optional[int] + PageSizeBytes: Optional[int] + WriterVersion: Optional[str] + + +class Serializer(TypedDict): + OrcSerDe: Optional[OrcSerDe] + ParquetSerDe: Optional[ParquetSerDe] + + +class OutputFormatConfiguration(TypedDict): + Serializer: Optional[Serializer] + + +class SchemaConfiguration(TypedDict): + CatalogId: Optional[str] + DatabaseName: Optional[str] + Region: Optional[str] + RoleARN: Optional[str] + TableName: Optional[str] + VersionId: Optional[str] + + +class DataFormatConversionConfiguration(TypedDict): + Enabled: Optional[bool] + InputFormatConfiguration: Optional[InputFormatConfiguration] + OutputFormatConfiguration: Optional[OutputFormatConfiguration] + SchemaConfiguration: Optional[SchemaConfiguration] + + +class RetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class DynamicPartitioningConfiguration(TypedDict): + Enabled: Optional[bool] + RetryOptions: Optional[RetryOptions] + + +class ExtendedS3DestinationConfiguration(TypedDict): + BucketARN: Optional[str] + RoleARN: Optional[str] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + CompressionFormat: Optional[str] + DataFormatConversionConfiguration: Optional[DataFormatConversionConfiguration] + DynamicPartitioningConfiguration: Optional[DynamicPartitioningConfiguration] + EncryptionConfiguration: Optional[EncryptionConfiguration] + ErrorOutputPrefix: Optional[str] + Prefix: Optional[str] + ProcessingConfiguration: Optional[ProcessingConfiguration] + S3BackupConfiguration: Optional[S3DestinationConfiguration] + S3BackupMode: Optional[str] + + +class KinesisStreamSourceConfiguration(TypedDict): + KinesisStreamARN: Optional[str] + RoleARN: Optional[str] + + +class CopyCommand(TypedDict): + DataTableName: Optional[str] + CopyOptions: Optional[str] + DataTableColumns: Optional[str] + + +class RedshiftRetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class RedshiftDestinationConfiguration(TypedDict): + ClusterJDBCURL: Optional[str] + CopyCommand: Optional[CopyCommand] + Password: Optional[str] + RoleARN: Optional[str] + S3Configuration: Optional[S3DestinationConfiguration] + Username: Optional[str] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RetryOptions: Optional[RedshiftRetryOptions] + S3BackupConfiguration: Optional[S3DestinationConfiguration] + S3BackupMode: Optional[str] + + +class SplunkRetryOptions(TypedDict): + DurationInSeconds: Optional[int] + + +class SplunkDestinationConfiguration(TypedDict): + HECEndpoint: Optional[str] + HECEndpointType: Optional[str] + HECToken: Optional[str] + S3Configuration: Optional[S3DestinationConfiguration] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + HECAcknowledgmentTimeoutInSeconds: Optional[int] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RetryOptions: Optional[SplunkRetryOptions] + S3BackupMode: Optional[str] + + +class HttpEndpointConfiguration(TypedDict): + Url: Optional[str] + AccessKey: Optional[str] + Name: Optional[str] + + +class HttpEndpointCommonAttribute(TypedDict): + AttributeName: Optional[str] + AttributeValue: Optional[str] + + +class HttpEndpointRequestConfiguration(TypedDict): + CommonAttributes: Optional[list[HttpEndpointCommonAttribute]] + ContentEncoding: Optional[str] + + +class HttpEndpointDestinationConfiguration(TypedDict): + EndpointConfiguration: Optional[HttpEndpointConfiguration] + S3Configuration: Optional[S3DestinationConfiguration] + BufferingHints: Optional[BufferingHints] + CloudWatchLoggingOptions: Optional[CloudWatchLoggingOptions] + ProcessingConfiguration: Optional[ProcessingConfiguration] + RequestConfiguration: Optional[HttpEndpointRequestConfiguration] + RetryOptions: Optional[RetryOptions] + RoleARN: Optional[str] + S3BackupMode: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class KinesisFirehoseDeliveryStreamProvider( + ResourceProvider[KinesisFirehoseDeliveryStreamProperties] +): + TYPE = "AWS::KinesisFirehose::DeliveryStream" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[KinesisFirehoseDeliveryStreamProperties], + ) -> ProgressEvent[KinesisFirehoseDeliveryStreamProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DeliveryStreamName + + + + Create-only properties: + - /properties/DeliveryStreamName + - /properties/DeliveryStreamType + - /properties/ElasticsearchDestinationConfiguration/VpcConfiguration + - /properties/AmazonopensearchserviceDestinationConfiguration/VpcConfiguration + - /properties/AmazonOpenSearchServerlessDestinationConfiguration/VpcConfiguration + - /properties/KinesisStreamSourceConfiguration + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - firehose:CreateDeliveryStream + - firehose:DescribeDeliveryStream + - iam:GetRole + - iam:PassRole + - kms:CreateGrant + - kms:DescribeKey + + """ + model = request.desired_state + firehose = request.aws_client_factory.firehose + parameters = [ + "DeliveryStreamName", + "DeliveryStreamType", + "S3DestinationConfiguration", + "ElasticsearchDestinationConfiguration", + "AmazonopensearchserviceDestinationConfiguration", + "DeliveryStreamEncryptionConfigurationInput", + "ExtendedS3DestinationConfiguration", + "HttpEndpointDestinationConfiguration", + "KinesisStreamSourceConfiguration", + "RedshiftDestinationConfiguration", + "SplunkDestinationConfiguration", + "Tags", + ] + attrs = util.select_attributes(model, params=parameters) + if not attrs.get("DeliveryStreamName"): + attrs["DeliveryStreamName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + if not request.custom_context.get(REPEATED_INVOCATION): + response = firehose.create_delivery_stream(**attrs) + # TODO: defaults + # TODO: idempotency + model["Arn"] = response["DeliveryStreamARN"] + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + # TODO add handler for CREATE FAILED state + stream = firehose.describe_delivery_stream(DeliveryStreamName=model["DeliveryStreamName"]) + if stream["DeliveryStreamDescription"]["DeliveryStreamStatus"] != "ACTIVE": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[KinesisFirehoseDeliveryStreamProperties], + ) -> ProgressEvent[KinesisFirehoseDeliveryStreamProperties]: + """ + Fetch resource information + + IAM permissions required: + - firehose:DescribeDeliveryStream + - firehose:ListTagsForDeliveryStream + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[KinesisFirehoseDeliveryStreamProperties], + ) -> ProgressEvent[KinesisFirehoseDeliveryStreamProperties]: + """ + Delete a resource + + IAM permissions required: + - firehose:DeleteDeliveryStream + - firehose:DescribeDeliveryStream + - kms:RevokeGrant + - kms:DescribeKey + """ + model = request.desired_state + firehose = request.aws_client_factory.firehose + try: + stream = firehose.describe_delivery_stream( + DeliveryStreamName=model["DeliveryStreamName"] + ) + except request.aws_client_factory.firehose.exceptions.ResourceNotFoundException: + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + if stream["DeliveryStreamDescription"]["DeliveryStreamStatus"] != "DELETING": + firehose.delete_delivery_stream(DeliveryStreamName=model["DeliveryStreamName"]) + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[KinesisFirehoseDeliveryStreamProperties], + ) -> ProgressEvent[KinesisFirehoseDeliveryStreamProperties]: + """ + Update a resource + + IAM permissions required: + - firehose:UpdateDestination + - firehose:DescribeDeliveryStream + - firehose:StartDeliveryStreamEncryption + - firehose:StopDeliveryStreamEncryption + - firehose:ListTagsForDeliveryStream + - firehose:TagDeliveryStream + - firehose:UntagDeliveryStream + - kms:CreateGrant + - kms:RevokeGrant + - kms:DescribeKey + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.schema.json b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.schema.json new file mode 100644 index 0000000000000..939b5c7bd35d2 --- /dev/null +++ b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream.schema.json @@ -0,0 +1,1205 @@ +{ + "typeName": "AWS::KinesisFirehose::DeliveryStream", + "description": "Resource Type definition for AWS::KinesisFirehose::DeliveryStream", + "additionalProperties": false, + "properties": { + "Arn": { + "type": "string" + }, + "DeliveryStreamEncryptionConfigurationInput": { + "$ref": "#/definitions/DeliveryStreamEncryptionConfigurationInput" + }, + "DeliveryStreamName": { + "type": "string", + "minLength": 1, + "maxLength": 64, + "pattern": "[a-zA-Z0-9._-]+" + }, + "DeliveryStreamType": { + "type": "string", + "enum": [ + "DirectPut", + "KinesisStreamAsSource" + ] + }, + "ElasticsearchDestinationConfiguration": { + "$ref": "#/definitions/ElasticsearchDestinationConfiguration" + }, + "AmazonopensearchserviceDestinationConfiguration": { + "$ref": "#/definitions/AmazonopensearchserviceDestinationConfiguration" + }, + "AmazonOpenSearchServerlessDestinationConfiguration": { + "$ref": "#/definitions/AmazonOpenSearchServerlessDestinationConfiguration" + }, + "ExtendedS3DestinationConfiguration": { + "$ref": "#/definitions/ExtendedS3DestinationConfiguration" + }, + "KinesisStreamSourceConfiguration": { + "$ref": "#/definitions/KinesisStreamSourceConfiguration" + }, + "RedshiftDestinationConfiguration": { + "$ref": "#/definitions/RedshiftDestinationConfiguration" + }, + "S3DestinationConfiguration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "SplunkDestinationConfiguration": { + "$ref": "#/definitions/SplunkDestinationConfiguration" + }, + "HttpEndpointDestinationConfiguration": { + "$ref": "#/definitions/HttpEndpointDestinationConfiguration" + }, + "Tags": { + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + }, + "minItems": 1, + "maxItems": 50 + } + }, + "definitions": { + "DeliveryStreamEncryptionConfigurationInput": { + "type": "object", + "additionalProperties": false, + "properties": { + "KeyARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "KeyType": { + "type": "string", + "enum": [ + "AWS_OWNED_CMK", + "CUSTOMER_MANAGED_CMK" + ] + } + }, + "required": [ + "KeyType" + ] + }, + "SplunkDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "HECAcknowledgmentTimeoutInSeconds": { + "type": "integer", + "minimum": 180, + "maximum": 600 + }, + "HECEndpoint": { + "type": "string", + "minLength": 0, + "maxLength": 2048 + }, + "HECEndpointType": { + "type": "string", + "enum": [ + "Raw", + "Event" + ] + }, + "HECToken": { + "type": "string", + "minLength": 0, + "maxLength": 2048 + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/SplunkRetryOptions" + }, + "S3BackupMode": { + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + } + }, + "required": [ + "HECEndpoint", + "S3Configuration", + "HECToken", + "HECEndpointType" + ] + }, + "HttpEndpointDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "EndpointConfiguration": { + "$ref": "#/definitions/HttpEndpointConfiguration" + }, + "RequestConfiguration": { + "$ref": "#/definitions/HttpEndpointRequestConfiguration" + }, + "BufferingHints": { + "$ref": "#/definitions/BufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/RetryOptions" + }, + "S3BackupMode": { + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + } + }, + "required": [ + "EndpointConfiguration", + "S3Configuration" + ] + }, + "KinesisStreamSourceConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "KinesisStreamARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + } + }, + "required": [ + "RoleARN", + "KinesisStreamARN" + ] + }, + "VpcConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "SubnetIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string", + "minLength": 1, + "maxLength": 1024 + }, + "minItems": 1, + "maxItems": 16 + }, + "SecurityGroupIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string", + "minLength": 1, + "maxLength": 1024 + }, + "minItems": 1, + "maxItems": 5 + } + }, + "required": [ + "RoleARN", + "SubnetIds", + "SecurityGroupIds" + ] + }, + "DocumentIdOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DefaultDocumentIdFormat": { + "type": "string", + "enum": [ + "FIREHOSE_DEFAULT", + "NO_DOCUMENT_ID" + ] + } + }, + "required": [ + "DefaultDocumentIdFormat" + ] + }, + "ExtendedS3DestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "BucketARN": { + "type": "string", + "minLength": 1, + "maxLength": 2048, + "pattern": "arn:.*" + }, + "BufferingHints": { + "$ref": "#/definitions/BufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "CompressionFormat": { + "type": "string", + "enum": [ + "UNCOMPRESSED", + "GZIP", + "ZIP", + "Snappy", + "HADOOP_SNAPPY" + ] + }, + "DataFormatConversionConfiguration": { + "$ref": "#/definitions/DataFormatConversionConfiguration" + }, + "DynamicPartitioningConfiguration": { + "$ref": "#/definitions/DynamicPartitioningConfiguration" + }, + "EncryptionConfiguration": { + "$ref": "#/definitions/EncryptionConfiguration" + }, + "ErrorOutputPrefix": { + "type": "string", + "minLength": 0, + "maxLength": 1024 + }, + "Prefix": { + "type": "string", + "minLength": 0, + "maxLength": 1024 + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "S3BackupConfiguration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "S3BackupMode": { + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + } + }, + "required": [ + "BucketARN", + "RoleARN" + ] + }, + "S3DestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "BucketARN": { + "type": "string", + "minLength": 1, + "maxLength": 2048, + "pattern": "arn:.*" + }, + "BufferingHints": { + "$ref": "#/definitions/BufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "CompressionFormat": { + "type": "string", + "enum": [ + "UNCOMPRESSED", + "GZIP", + "ZIP", + "Snappy", + "HADOOP_SNAPPY" + ] + }, + "EncryptionConfiguration": { + "$ref": "#/definitions/EncryptionConfiguration" + }, + "ErrorOutputPrefix": { + "type": "string", + "minLength": 0, + "maxLength": 1024 + }, + "Prefix": { + "type": "string", + "minLength": 0, + "maxLength": 1024 + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + } + }, + "required": [ + "BucketARN", + "RoleARN" + ] + }, + "RedshiftDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "ClusterJDBCURL": { + "type": "string", + "minLength": 1, + "maxLength": 512 + }, + "CopyCommand": { + "$ref": "#/definitions/CopyCommand" + }, + "Password": { + "type": "string", + "minLength": 6, + "maxLength": 512 + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/RedshiftRetryOptions" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "S3BackupConfiguration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "S3BackupMode": { + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "Username": { + "type": "string", + "minLength": 1, + "maxLength": 512 + } + }, + "required": [ + "S3Configuration", + "Username", + "ClusterJDBCURL", + "CopyCommand", + "RoleARN", + "Password" + ] + }, + "ElasticsearchDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "BufferingHints": { + "$ref": "#/definitions/ElasticsearchBufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "DomainARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "IndexName": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "IndexRotationPeriod": { + "type": "string", + "enum": [ + "NoRotation", + "OneHour", + "OneDay", + "OneWeek", + "OneMonth" + ] + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/ElasticsearchRetryOptions" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "S3BackupMode": { + "type": "string", + "enum": [ + "FailedDocumentsOnly", + "AllDocuments" + ] + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "ClusterEndpoint": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "https:.*" + }, + "TypeName": { + "type": "string", + "minLength": 0, + "maxLength": 100 + }, + "VpcConfiguration": { + "$ref": "#/definitions/VpcConfiguration" + }, + "DocumentIdOptions": { + "$ref": "#/definitions/DocumentIdOptions" + } + }, + "required": [ + "IndexName", + "S3Configuration", + "RoleARN" + ] + }, + "AmazonopensearchserviceDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "BufferingHints": { + "$ref": "#/definitions/AmazonopensearchserviceBufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "DomainARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "IndexName": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "IndexRotationPeriod": { + "type": "string", + "enum": [ + "NoRotation", + "OneHour", + "OneDay", + "OneWeek", + "OneMonth" + ] + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AmazonopensearchserviceRetryOptions" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "S3BackupMode": { + "type": "string", + "enum": [ + "FailedDocumentsOnly", + "AllDocuments" + ] + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "ClusterEndpoint": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "https:.*" + }, + "TypeName": { + "type": "string", + "minLength": 0, + "maxLength": 100 + }, + "VpcConfiguration": { + "$ref": "#/definitions/VpcConfiguration" + }, + "DocumentIdOptions": { + "$ref": "#/definitions/DocumentIdOptions" + } + }, + "required": [ + "IndexName", + "S3Configuration", + "RoleARN" + ] + }, + "AmazonOpenSearchServerlessDestinationConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "BufferingHints": { + "$ref": "#/definitions/AmazonOpenSearchServerlessBufferingHints" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/CloudWatchLoggingOptions" + }, + "IndexName": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AmazonOpenSearchServerlessRetryOptions" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "S3BackupMode": { + "type": "string", + "enum": [ + "FailedDocumentsOnly", + "AllDocuments" + ] + }, + "S3Configuration": { + "$ref": "#/definitions/S3DestinationConfiguration" + }, + "CollectionEndpoint": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "https:.*" + }, + "VpcConfiguration": { + "$ref": "#/definitions/VpcConfiguration" + } + }, + "required": [ + "IndexName", + "S3Configuration", + "RoleARN" + ] + }, + "BufferingHints": { + "type": "object", + "additionalProperties": false, + "properties": { + "IntervalInSeconds": { + "type": "integer" + }, + "SizeInMBs": { + "type": "integer" + } + } + }, + "ProcessingConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "Processors": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Processor" + } + } + } + }, + "SplunkRetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "ElasticsearchRetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "AmazonopensearchserviceRetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "AmazonOpenSearchServerlessRetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "RedshiftRetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "RetryOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "type": "integer" + } + } + }, + "DataFormatConversionConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "InputFormatConfiguration": { + "$ref": "#/definitions/InputFormatConfiguration" + }, + "OutputFormatConfiguration": { + "$ref": "#/definitions/OutputFormatConfiguration" + }, + "SchemaConfiguration": { + "$ref": "#/definitions/SchemaConfiguration" + } + } + }, + "DynamicPartitioningConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "RetryOptions": { + "$ref": "#/definitions/RetryOptions" + } + } + }, + "CopyCommand": { + "type": "object", + "additionalProperties": false, + "properties": { + "CopyOptions": { + "type": "string", + "minLength": 0, + "maxLength": 204800 + }, + "DataTableColumns": { + "type": "string", + "minLength": 0, + "maxLength": 204800 + }, + "DataTableName": { + "type": "string", + "minLength": 1, + "maxLength": 512 + } + }, + "required": [ + "DataTableName" + ] + }, + "EncryptionConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "KMSEncryptionConfig": { + "$ref": "#/definitions/KMSEncryptionConfig" + }, + "NoEncryptionConfig": { + "type": "string", + "enum": [ + "NoEncryption" + ] + } + } + }, + "ElasticsearchBufferingHints": { + "type": "object", + "additionalProperties": false, + "properties": { + "IntervalInSeconds": { + "type": "integer" + }, + "SizeInMBs": { + "type": "integer" + } + } + }, + "AmazonopensearchserviceBufferingHints": { + "type": "object", + "additionalProperties": false, + "properties": { + "IntervalInSeconds": { + "type": "integer" + }, + "SizeInMBs": { + "type": "integer" + } + } + }, + "AmazonOpenSearchServerlessBufferingHints": { + "type": "object", + "additionalProperties": false, + "properties": { + "IntervalInSeconds": { + "type": "integer" + }, + "SizeInMBs": { + "type": "integer" + } + } + }, + "CloudWatchLoggingOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "LogGroupName": { + "type": "string" + }, + "LogStreamName": { + "type": "string" + } + } + }, + "OutputFormatConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Serializer": { + "$ref": "#/definitions/Serializer" + } + } + }, + "Processor": { + "type": "object", + "additionalProperties": false, + "properties": { + "Parameters": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/ProcessorParameter" + } + }, + "Type": { + "type": "string", + "enum": [ + "RecordDeAggregation", + "Lambda", + "MetadataExtraction", + "AppendDelimiterToRecord" + ] + } + }, + "required": [ + "Type" + ] + }, + "KMSEncryptionConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "AWSKMSKeyARN": { + "type": "string" + } + }, + "required": [ + "AWSKMSKeyARN" + ] + }, + "InputFormatConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Deserializer": { + "$ref": "#/definitions/Deserializer" + } + } + }, + "SchemaConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "CatalogId": { + "type": "string" + }, + "DatabaseName": { + "type": "string" + }, + "Region": { + "type": "string" + }, + "RoleARN": { + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "arn:.*" + }, + "TableName": { + "type": "string" + }, + "VersionId": { + "type": "string" + } + } + }, + "Serializer": { + "type": "object", + "additionalProperties": false, + "properties": { + "OrcSerDe": { + "$ref": "#/definitions/OrcSerDe" + }, + "ParquetSerDe": { + "$ref": "#/definitions/ParquetSerDe" + } + } + }, + "ProcessorParameter": { + "type": "object", + "additionalProperties": false, + "properties": { + "ParameterName": { + "type": "string" + }, + "ParameterValue": { + "type": "string" + } + }, + "required": [ + "ParameterValue", + "ParameterName" + ] + }, + "Deserializer": { + "type": "object", + "additionalProperties": false, + "properties": { + "HiveJsonSerDe": { + "$ref": "#/definitions/HiveJsonSerDe" + }, + "OpenXJsonSerDe": { + "$ref": "#/definitions/OpenXJsonSerDe" + } + } + }, + "HiveJsonSerDe": { + "type": "object", + "additionalProperties": false, + "properties": { + "TimestampFormats": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + } + } + }, + "OrcSerDe": { + "type": "object", + "additionalProperties": false, + "properties": { + "BlockSizeBytes": { + "type": "integer" + }, + "BloomFilterColumns": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "BloomFilterFalsePositiveProbability": { + "type": "number" + }, + "Compression": { + "type": "string" + }, + "DictionaryKeyThreshold": { + "type": "number" + }, + "EnablePadding": { + "type": "boolean" + }, + "FormatVersion": { + "type": "string" + }, + "PaddingTolerance": { + "type": "number" + }, + "RowIndexStride": { + "type": "integer" + }, + "StripeSizeBytes": { + "type": "integer" + } + } + }, + "ParquetSerDe": { + "type": "object", + "additionalProperties": false, + "properties": { + "BlockSizeBytes": { + "type": "integer" + }, + "Compression": { + "type": "string" + }, + "EnableDictionaryCompression": { + "type": "boolean" + }, + "MaxPaddingBytes": { + "type": "integer" + }, + "PageSizeBytes": { + "type": "integer" + }, + "WriterVersion": { + "type": "string" + } + } + }, + "OpenXJsonSerDe": { + "type": "object", + "additionalProperties": false, + "properties": { + "CaseInsensitive": { + "type": "boolean" + }, + "ColumnToJsonKeyMappings": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "ConvertDotsInJsonKeysToUnderscores": { + "type": "boolean" + } + } + }, + "HttpEndpointRequestConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "ContentEncoding": { + "type": "string", + "enum": [ + "NONE", + "GZIP" + ] + }, + "CommonAttributes": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/HttpEndpointCommonAttribute" + }, + "minItems": 0, + "maxItems": 50 + } + } + }, + "HttpEndpointCommonAttribute": { + "type": "object", + "additionalProperties": false, + "properties": { + "AttributeName": { + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "AttributeValue": { + "type": "string", + "minLength": 0, + "maxLength": 1024 + } + }, + "required": [ + "AttributeName", + "AttributeValue" + ] + }, + "HttpEndpointConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Url": { + "type": "string", + "minLength": 1, + "maxLength": 1000 + }, + "AccessKey": { + "type": "string", + "minLength": 0, + "maxLength": 4096 + }, + "Name": { + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "required": [ + "Url" + ] + }, + "Tag": { + "type": "object", + "properties": { + "Key": { + "type": "string", + "pattern": "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "pattern": "^[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$", + "minLength": 0, + "maxLength": 256 + } + }, + "required": [ + "Key" + ] + } + }, + "handlers": { + "create": { + "permissions": [ + "firehose:CreateDeliveryStream", + "firehose:DescribeDeliveryStream", + "iam:GetRole", + "iam:PassRole", + "kms:CreateGrant", + "kms:DescribeKey" + ] + }, + "read": { + "permissions": [ + "firehose:DescribeDeliveryStream", + "firehose:ListTagsForDeliveryStream" + ] + }, + "update": { + "permissions": [ + "firehose:UpdateDestination", + "firehose:DescribeDeliveryStream", + "firehose:StartDeliveryStreamEncryption", + "firehose:StopDeliveryStreamEncryption", + "firehose:ListTagsForDeliveryStream", + "firehose:TagDeliveryStream", + "firehose:UntagDeliveryStream", + "kms:CreateGrant", + "kms:RevokeGrant", + "kms:DescribeKey" + ] + }, + "delete": { + "permissions": [ + "firehose:DeleteDeliveryStream", + "firehose:DescribeDeliveryStream", + "kms:RevokeGrant", + "kms:DescribeKey" + ] + }, + "list": { + "permissions": [ + "firehose:ListDeliveryStreams" + ] + } + }, + "readOnlyProperties": [ + "/properties/Arn" + ], + "createOnlyProperties": [ + "/properties/DeliveryStreamName", + "/properties/DeliveryStreamType", + "/properties/ElasticsearchDestinationConfiguration/VpcConfiguration", + "/properties/AmazonopensearchserviceDestinationConfiguration/VpcConfiguration", + "/properties/AmazonOpenSearchServerlessDestinationConfiguration/VpcConfiguration", + "/properties/KinesisStreamSourceConfiguration" + ], + "primaryIdentifier": [ + "/properties/DeliveryStreamName" + ] +} diff --git a/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream_plugin.py b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream_plugin.py new file mode 100644 index 0000000000000..772007e6ce18d --- /dev/null +++ b/localstack-core/localstack/services/kinesisfirehose/resource_providers/aws_kinesisfirehose_deliverystream_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class KinesisFirehoseDeliveryStreamProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::KinesisFirehose::DeliveryStream" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.kinesisfirehose.resource_providers.aws_kinesisfirehose_deliverystream import ( + KinesisFirehoseDeliveryStreamProvider, + ) + + self.factory = KinesisFirehoseDeliveryStreamProvider diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/__init__.py b/localstack-core/localstack/services/kms/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/__init__.py rename to localstack-core/localstack/services/kms/__init__.py diff --git a/localstack-core/localstack/services/kms/exceptions.py b/localstack-core/localstack/services/kms/exceptions.py new file mode 100644 index 0000000000000..ad157c5d85c4a --- /dev/null +++ b/localstack-core/localstack/services/kms/exceptions.py @@ -0,0 +1,16 @@ +from localstack.aws.api import CommonServiceException + + +class ValidationException(CommonServiceException): + def __init__(self, message: str): + super().__init__("ValidationException", message, 400, True) + + +class AccessDeniedException(CommonServiceException): + def __init__(self, message: str): + super().__init__("AccessDeniedException", message, 400, True) + + +class TagException(CommonServiceException): + def __init__(self, message=None): + super().__init__("TagException", status_code=400, message=message) diff --git a/localstack-core/localstack/services/kms/models.py b/localstack-core/localstack/services/kms/models.py new file mode 100644 index 0000000000000..3479e309d4903 --- /dev/null +++ b/localstack-core/localstack/services/kms/models.py @@ -0,0 +1,851 @@ +import base64 +import datetime +import io +import json +import logging +import os +import random +import re +import struct +import uuid +from collections import namedtuple +from dataclasses import dataclass +from typing import Dict, Optional, Tuple + +from cryptography.exceptions import InvalidSignature, InvalidTag, UnsupportedAlgorithm +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, hmac +from cryptography.hazmat.primitives import serialization as crypto_serialization +from cryptography.hazmat.primitives.asymmetric import ec, padding, rsa, utils +from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey +from cryptography.hazmat.primitives.asymmetric.padding import PSS, PKCS1v15 +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey +from cryptography.hazmat.primitives.asymmetric.utils import Prehashed +from cryptography.hazmat.primitives.kdf.hkdf import HKDF +from cryptography.hazmat.primitives.serialization import load_der_public_key + +from localstack.aws.api.kms import ( + CreateAliasRequest, + CreateGrantRequest, + CreateKeyRequest, + EncryptionContextType, + InvalidCiphertextException, + InvalidKeyUsageException, + KeyMetadata, + KeySpec, + KeyState, + KeyUsageType, + KMSInvalidMacException, + KMSInvalidSignatureException, + LimitExceededException, + MacAlgorithmSpec, + MessageType, + MultiRegionConfiguration, + MultiRegionKey, + MultiRegionKeyType, + OriginType, + ReplicateKeyRequest, + SigningAlgorithmSpec, + TagList, + UnsupportedOperationException, +) +from localstack.constants import TAG_KEY_CUSTOM_ID +from localstack.services.kms.exceptions import TagException, ValidationException +from localstack.services.kms.utils import is_valid_key_arn, validate_tag +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute +from localstack.utils.aws.arns import get_partition, kms_alias_arn, kms_key_arn +from localstack.utils.crypto import decrypt, encrypt +from localstack.utils.strings import long_uid, to_bytes, to_str + +LOG = logging.getLogger(__name__) + +PATTERN_UUID = re.compile( + r"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" +) +MULTI_REGION_PATTERN = re.compile(r"^mrk-[a-fA-F0-9]{32}$") + +SYMMETRIC_DEFAULT_MATERIAL_LENGTH = 32 + +RSA_CRYPTO_KEY_LENGTHS = { + "RSA_2048": 2048, + "RSA_3072": 3072, + "RSA_4096": 4096, +} + +ECC_CURVES = { + "ECC_NIST_P256": ec.SECP256R1(), + "ECC_NIST_P384": ec.SECP384R1(), + "ECC_NIST_P521": ec.SECP521R1(), + "ECC_SECG_P256K1": ec.SECP256K1(), +} + +HMAC_RANGE_KEY_LENGTHS = { + "HMAC_224": (28, 64), + "HMAC_256": (32, 64), + "HMAC_384": (48, 128), + "HMAC_512": (64, 128), +} + +ON_DEMAND_ROTATION_LIMIT = 10 +KEY_ID_LEN = 36 +# Moto uses IV_LEN of 12, as it is fine for GCM encryption mode, but we use CBC, so have to set it to 16. +IV_LEN = 16 +TAG_LEN = 16 +CIPHERTEXT_HEADER_FORMAT = ">{key_id_len}s{iv_len}s{tag_len}s".format( + key_id_len=KEY_ID_LEN, iv_len=IV_LEN, tag_len=TAG_LEN +) +HEADER_LEN = KEY_ID_LEN + IV_LEN + TAG_LEN +Ciphertext = namedtuple("Ciphertext", ("key_id", "iv", "ciphertext", "tag")) + +RESERVED_ALIASES = [ + "alias/aws/acm", + "alias/aws/dynamodb", + "alias/aws/ebs", + "alias/aws/elasticfilesystem", + "alias/aws/es", + "alias/aws/glue", + "alias/aws/kinesisvideo", + "alias/aws/lambda", + "alias/aws/rds", + "alias/aws/redshift", + "alias/aws/s3", + "alias/aws/secretsmanager", + "alias/aws/ssm", + "alias/aws/xray", +] + +# list of key names that should be skipped when serializing the encryption context +IGNORED_CONTEXT_KEYS = ["aws-crypto-public-key"] + +# special tag name to allow specifying a custom key material for created keys +TAG_KEY_CUSTOM_KEY_MATERIAL = "_custom_key_material_" + + +def _serialize_ciphertext_blob(ciphertext: Ciphertext) -> bytes: + header = struct.pack( + CIPHERTEXT_HEADER_FORMAT, + ciphertext.key_id.encode("utf-8"), + ciphertext.iv, + ciphertext.tag, + ) + return header + ciphertext.ciphertext + + +def deserialize_ciphertext_blob(ciphertext_blob: bytes) -> Ciphertext: + header = ciphertext_blob[:HEADER_LEN] + ciphertext = ciphertext_blob[HEADER_LEN:] + key_id, iv, tag = struct.unpack(CIPHERTEXT_HEADER_FORMAT, header) + return Ciphertext(key_id=key_id.decode("utf-8"), iv=iv, ciphertext=ciphertext, tag=tag) + + +def _serialize_encryption_context(encryption_context: Optional[EncryptionContextType]) -> bytes: + if encryption_context: + aad = io.BytesIO() + for key, value in sorted(encryption_context.items(), key=lambda x: x[0]): + # remove the reserved key-value pair from additional authentication data + if key not in IGNORED_CONTEXT_KEYS: + aad.write(key.encode("utf-8")) + aad.write(value.encode("utf-8")) + return aad.getvalue() + else: + return b"" + + +# Confusion alert! +# In KMS, there are two things that can be called "keys": +# 1. A cryptographic key, i.e. a string of characters, a private/public/symmetrical key for cryptographic encoding +# and decoding etc. It is modeled here by KmsCryptoKey class. +# 2. An AWS object that stores both a cryptographic key and some relevant metadata, e.g. creation time, a unique ID, +# some state. It is modeled by KmsKey class. +# +# While KmsKeys always contain KmsCryptoKeys, sometimes KmsCryptoKeys exist without corresponding KmsKeys, +# e.g. GenerateDataKeyPair API call returns contents of a new KmsCryptoKey that is not associated with any KmsKey, +# but is partially encrypted by some pre-existing KmsKey. + + +class KmsCryptoKey: + """ + KmsCryptoKeys used to model both of the two cases where AWS generates keys: + 1. Keys that are created to be used inside of AWS. For such a key, its key material / private key are not to + leave AWS unencrypted. If they have to leave AWS, a different KmsCryptoKey is used to encrypt the data first. + 2. Keys that AWS creates for customers for some external use. Such a key might be returned to a customer with its + key material or public key unencrypted - see KMS GenerateDataKey / GenerateDataKeyPair. But such a key is not stored + by AWS and is not used by AWS. + """ + + public_key: Optional[bytes] + private_key: Optional[bytes] + key_material: bytes + key_spec: str + + @staticmethod + def assert_valid(key_spec: str): + """ + Validates that the given ``key_spec`` is supported in the current context. + + :param key_spec: The key specification to validate. + :type key_spec: str + :raises ValidationException: If ``key_spec`` is not a known valid spec. + :raises UnsupportedOperationException: If ``key_spec`` is entirely unsupported. + """ + + def raise_validation(): + raise ValidationException( + f"1 validation error detected: Value '{key_spec}' at 'keySpec' " + f"failed to satisfy constraint: Member must satisfy enum value set: " + f"[RSA_2048, ECC_NIST_P384, ECC_NIST_P256, ECC_NIST_P521, HMAC_384, RSA_3072, " + f"ECC_SECG_P256K1, RSA_4096, SYMMETRIC_DEFAULT, HMAC_256, HMAC_224, HMAC_512]" + ) + + if key_spec == "SYMMETRIC_DEFAULT": + return + + if key_spec.startswith("RSA"): + if key_spec not in RSA_CRYPTO_KEY_LENGTHS: + raise_validation() + return + + if key_spec.startswith("ECC"): + if key_spec not in ECC_CURVES: + raise_validation() + return + + if key_spec.startswith("HMAC"): + if key_spec not in HMAC_RANGE_KEY_LENGTHS: + raise_validation() + return + + raise UnsupportedOperationException(f"KeySpec {key_spec} is not supported") + + def __init__(self, key_spec: str, key_material: Optional[bytes] = None): + self.private_key = None + self.public_key = None + # Technically, key_material, being a symmetric encryption key, is only relevant for + # key_spec == SYMMETRIC_DEFAULT. + # But LocalStack uses symmetric encryption with this key_material even for other specs. Asymmetric keys are + # generated, but are not actually used for encryption. Signing is different. + self.key_material = key_material or os.urandom(SYMMETRIC_DEFAULT_MATERIAL_LENGTH) + self.key_spec = key_spec + + KmsCryptoKey.assert_valid(key_spec) + + if key_spec == "SYMMETRIC_DEFAULT": + return + + if key_spec.startswith("RSA"): + key_size = RSA_CRYPTO_KEY_LENGTHS.get(key_spec) + key = rsa.generate_private_key(public_exponent=65537, key_size=key_size) + elif key_spec.startswith("ECC"): + curve = ECC_CURVES.get(key_spec) + if key_material: + key = crypto_serialization.load_der_private_key(key_material, password=None) + else: + key = ec.generate_private_key(curve) + elif key_spec.startswith("HMAC"): + minimum_length, maximum_length = HMAC_RANGE_KEY_LENGTHS.get(key_spec) + self.key_material = key_material or os.urandom( + random.randint(minimum_length, maximum_length) + ) + return + + self._serialize_key(key) + + def load_key_material(self, material: bytes): + if self.key_spec == "SYMMETRIC_DEFAULT": + self.key_material = material + else: + key = crypto_serialization.load_der_private_key(material, password=None) + self._serialize_key(key) + + def _serialize_key(self, key: ec.EllipticCurvePrivateKey | rsa.RSAPrivateKey): + self.public_key = key.public_key().public_bytes( + crypto_serialization.Encoding.DER, + crypto_serialization.PublicFormat.SubjectPublicKeyInfo, + ) + self.private_key = key.private_bytes( + crypto_serialization.Encoding.DER, + crypto_serialization.PrivateFormat.PKCS8, + crypto_serialization.NoEncryption(), + ) + + @property + def key(self) -> RSAPrivateKey | EllipticCurvePrivateKey: + return crypto_serialization.load_der_private_key( + self.private_key, + password=None, + backend=default_backend(), + ) + + +class KmsKey: + metadata: KeyMetadata + crypto_key: KmsCryptoKey + tags: Dict[str, str] + policy: str + is_key_rotation_enabled: bool + rotation_period_in_days: int + next_rotation_date: datetime.datetime + previous_keys = [str] + + def __init__( + self, + create_key_request: CreateKeyRequest = None, + account_id: str = None, + region: str = None, + ): + create_key_request = create_key_request or CreateKeyRequest() + self.previous_keys = [] + + # Please keep in mind that tags of a key could be present in the request, they are not a part of metadata. At + # least in the sense of DescribeKey not returning them with the rest of the metadata. Instead, tags are more + # like aliases: + # https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html + # "DescribeKey does not return the following information: ... Tags on the KMS key." + self.tags = {} + self.add_tags(create_key_request.get("Tags")) + # Same goes for the policy. It is in the request, but not in the metadata. + self.policy = create_key_request.get("Policy") or self._get_default_key_policy( + account_id, region + ) + # https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # "Automatic key rotation is disabled by default on customer managed keys but authorized users can enable and + # disable it." + self.is_key_rotation_enabled = False + + self._populate_metadata(create_key_request, account_id, region) + custom_key_material = None + if TAG_KEY_CUSTOM_KEY_MATERIAL in self.tags: + # check if the _custom_key_material_ tag is specified, to use a custom key material for this key + custom_key_material = base64.b64decode(self.tags[TAG_KEY_CUSTOM_KEY_MATERIAL]) + # remove the _custom_key_material_ tag from the tags to not readily expose the custom key material + del self.tags[TAG_KEY_CUSTOM_KEY_MATERIAL] + self.crypto_key = KmsCryptoKey(self.metadata.get("KeySpec"), custom_key_material) + self.rotation_period_in_days = 365 + self.next_rotation_date = None + + def calculate_and_set_arn(self, account_id, region): + self.metadata["Arn"] = kms_key_arn(self.metadata.get("KeyId"), account_id, region) + + def generate_mac(self, msg: bytes, mac_algorithm: MacAlgorithmSpec) -> bytes: + h = self._get_hmac_context(mac_algorithm) + h.update(msg) + return h.finalize() + + def verify_mac(self, msg: bytes, mac: bytes, mac_algorithm: MacAlgorithmSpec) -> bool: + h = self._get_hmac_context(mac_algorithm) + h.update(msg) + try: + h.verify(mac) + return True + except InvalidSignature: + raise KMSInvalidMacException() + + # Encrypt is a method of KmsKey and not of KmsCryptoKey only because it requires KeyId, and KmsCryptoKeys do not + # hold KeyIds. Maybe it would be possible to remodel this better. + def encrypt(self, plaintext: bytes, encryption_context: EncryptionContextType = None) -> bytes: + iv = os.urandom(IV_LEN) + aad = _serialize_encryption_context(encryption_context=encryption_context) + ciphertext, tag = encrypt(self.crypto_key.key_material, plaintext, iv, aad) + return _serialize_ciphertext_blob( + ciphertext=Ciphertext( + key_id=self.metadata.get("KeyId"), iv=iv, ciphertext=ciphertext, tag=tag + ) + ) + + # The ciphertext has to be deserialized before this call. + def decrypt( + self, ciphertext: Ciphertext, encryption_context: EncryptionContextType = None + ) -> bytes: + aad = _serialize_encryption_context(encryption_context=encryption_context) + keys_to_try = [self.crypto_key.key_material] + self.previous_keys + + for key in keys_to_try: + try: + return decrypt(key, ciphertext.ciphertext, ciphertext.iv, ciphertext.tag, aad) + except (InvalidTag, InvalidSignature): + continue + + raise InvalidCiphertextException() + + def decrypt_rsa(self, encrypted: bytes) -> bytes: + private_key = crypto_serialization.load_der_private_key( + self.crypto_key.private_key, password=None, backend=default_backend() + ) + decrypted = private_key.decrypt( + encrypted, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + return decrypted + + def sign( + self, data: bytes, message_type: MessageType, signing_algorithm: SigningAlgorithmSpec + ) -> bytes: + hasher, wrapped_hasher = self._construct_sign_verify_hasher(signing_algorithm, message_type) + try: + if signing_algorithm.startswith("ECDSA"): + return self.crypto_key.key.sign(data, ec.ECDSA(wrapped_hasher)) + else: + padding = self._construct_sign_verify_padding(signing_algorithm, hasher) + return self.crypto_key.key.sign(data, padding, wrapped_hasher) + except ValueError as exc: + raise ValidationException(str(exc)) + + def verify( + self, + data: bytes, + message_type: MessageType, + signing_algorithm: SigningAlgorithmSpec, + signature: bytes, + ) -> bool: + hasher, wrapped_hasher = self._construct_sign_verify_hasher(signing_algorithm, message_type) + try: + if signing_algorithm.startswith("ECDSA"): + self.crypto_key.key.public_key().verify(signature, data, ec.ECDSA(wrapped_hasher)) + else: + padding = self._construct_sign_verify_padding(signing_algorithm, hasher) + self.crypto_key.key.public_key().verify(signature, data, padding, wrapped_hasher) + return True + except ValueError as exc: + raise ValidationException(str(exc)) + except InvalidSignature: + # AWS itself raises this exception without any additional message. + raise KMSInvalidSignatureException() + + def derive_shared_secret(self, public_key: bytes) -> bytes: + key_spec = self.metadata.get("KeySpec") + match key_spec: + case KeySpec.ECC_NIST_P256 | KeySpec.ECC_SECG_P256K1: + algorithm = hashes.SHA256() + case KeySpec.ECC_NIST_P384: + algorithm = hashes.SHA384() + case KeySpec.ECC_NIST_P521: + algorithm = hashes.SHA512() + case _: + raise InvalidKeyUsageException( + f"{self.metadata['Arn']} key usage is {self.metadata['KeyUsage']} which is not valid for DeriveSharedSecret." + ) + + # Deserialize public key from DER encoded data to EllipticCurvePublicKey. + try: + pub_key = load_der_public_key(public_key) + except (UnsupportedAlgorithm, ValueError): + raise ValidationException("") + shared_secret = self.crypto_key.key.exchange(ec.ECDH(), pub_key) + # Perform shared secret derivation. + return HKDF( + algorithm=algorithm, + salt=None, + info=b"", + length=algorithm.digest_size, + backend=default_backend(), + ).derive(shared_secret) + + # This method gets called when a key is replicated to another region. It's meant to populate the required metadata + # fields in a new replica key. + def replicate_metadata( + self, replicate_key_request: ReplicateKeyRequest, account_id: str, replica_region: str + ) -> None: + self.metadata["Description"] = replicate_key_request.get("Description") or "" + primary_key_arn = self.metadata["Arn"] + # Multi region keys have the same key ID for all replicas, but ARNs differ, as they include actual regions of + # replicas. + self.calculate_and_set_arn(account_id, replica_region) + + current_replica_keys = self.metadata.get("MultiRegionConfiguration", {}).get( + "ReplicaKeys", [] + ) + current_replica_keys.append(MultiRegionKey(Arn=self.metadata["Arn"], Region=replica_region)) + primary_key_region = ( + self.metadata.get("MultiRegionConfiguration", {}).get("PrimaryKey", {}).get("Region") + ) + + self.metadata["MultiRegionConfiguration"] = MultiRegionConfiguration( + MultiRegionKeyType=MultiRegionKeyType.REPLICA, + PrimaryKey=MultiRegionKey( + Arn=primary_key_arn, + Region=primary_key_region, + ), + ReplicaKeys=current_replica_keys, + ) + + def _get_hmac_context(self, mac_algorithm: MacAlgorithmSpec) -> hmac.HMAC: + if mac_algorithm == "HMAC_SHA_224": + h = hmac.HMAC(self.crypto_key.key_material, hashes.SHA224()) + elif mac_algorithm == "HMAC_SHA_256": + h = hmac.HMAC(self.crypto_key.key_material, hashes.SHA256()) + elif mac_algorithm == "HMAC_SHA_384": + h = hmac.HMAC(self.crypto_key.key_material, hashes.SHA384()) + elif mac_algorithm == "HMAC_SHA_512": + h = hmac.HMAC(self.crypto_key.key_material, hashes.SHA512()) + else: + raise ValidationException( + f"1 validation error detected: Value '{mac_algorithm}' at 'macAlgorithm' " + f"failed to satisfy constraint: Member must satisfy enum value set: " + f"[HMAC_SHA_384, HMAC_SHA_256, HMAC_SHA_224, HMAC_SHA_512]" + ) + return h + + def _construct_sign_verify_hasher( + self, signing_algorithm: SigningAlgorithmSpec, message_type: MessageType + ) -> ( + Prehashed | hashes.SHA256 | hashes.SHA384 | hashes.SHA512, + Prehashed | hashes.SHA256 | hashes.SHA384 | hashes.SHA512, + ): + if "SHA_256" in signing_algorithm: + hasher = hashes.SHA256() + elif "SHA_384" in signing_algorithm: + hasher = hashes.SHA384() + elif "SHA_512" in signing_algorithm: + hasher = hashes.SHA512() + else: + raise ValidationException( + f"Unsupported hash type in SigningAlgorithm '{signing_algorithm}'" + ) + + wrapped_hasher = hasher + if message_type == MessageType.DIGEST: + wrapped_hasher = utils.Prehashed(hasher) + return hasher, wrapped_hasher + + def _construct_sign_verify_padding( + self, + signing_algorithm: SigningAlgorithmSpec, + hasher: Prehashed | hashes.SHA256 | hashes.SHA384 | hashes.SHA512, + ) -> PKCS1v15 | PSS: + if signing_algorithm.startswith("RSA"): + if "PKCS" in signing_algorithm: + return padding.PKCS1v15() + elif "PSS" in signing_algorithm: + return padding.PSS(mgf=padding.MGF1(hasher), salt_length=padding.PSS.DIGEST_LENGTH) + else: + LOG.warning("Unsupported padding in SigningAlgorithm '%s'", signing_algorithm) + + # Not a comment, rather some possibly relevant links for the future. + # https://docs.aws.amazon.com/kms/latest/developerguide/asymm-create-key.html + # "You cannot create an elliptic curve key pair for encryption and decryption." + # https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#asymmetric-keys-concept + # "You can create asymmetric KMS keys that represent RSA key pairs for public key encryption or signing and + # verification, or elliptic curve key pairs for signing and verification." + # + # A useful link with a cheat-sheet of what operations are supported by what types of keys: + # https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-compare.html + # + # https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys + # "AWS KMS generates the data key. Then it encrypts a copy of the data key under a symmetric encryption KMS key that + # you specify." + # + # Data keys are symmetric, data key pairs are asymmetric. + def _populate_metadata( + self, create_key_request: CreateKeyRequest, account_id: str, region: str + ) -> None: + self.metadata = KeyMetadata() + # Metadata fields coming from a creation request + # + # We do not include tags into the metadata. Tags might be present in a key creation request, but our metadata + # only contains data displayed by DescribeKey. And tags are not there: + # https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html + # "DescribeKey does not return the following information: ... Tags on the KMS key." + + self.metadata["Description"] = create_key_request.get("Description") or "" + self.metadata["MultiRegion"] = create_key_request.get("MultiRegion") or False + self.metadata["Origin"] = create_key_request.get("Origin") or "AWS_KMS" + # https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html#KMS-CreateKey-request-CustomerMasterKeySpec + # CustomerMasterKeySpec has been deprecated, still used for compatibility. Is replaced by KeySpec. + # The meaning is the same, just the name differs. + self.metadata["KeySpec"] = ( + create_key_request.get("KeySpec") + or create_key_request.get("CustomerMasterKeySpec") + or "SYMMETRIC_DEFAULT" + ) + self.metadata["CustomerMasterKeySpec"] = self.metadata.get("KeySpec") + self.metadata["KeyUsage"] = self._get_key_usage( + create_key_request.get("KeyUsage"), self.metadata.get("KeySpec") + ) + + # Metadata fields AWS introduces automatically + self.metadata["AWSAccountId"] = account_id + self.metadata["CreationDate"] = datetime.datetime.now() + self.metadata["Enabled"] = create_key_request.get("Origin") != OriginType.EXTERNAL + self.metadata["KeyManager"] = "CUSTOMER" + self.metadata["KeyState"] = ( + KeyState.Enabled + if create_key_request.get("Origin") != OriginType.EXTERNAL + else KeyState.PendingImport + ) + + if TAG_KEY_CUSTOM_ID in self.tags: + # check if the _custom_id_ tag is specified, to set a user-defined KeyId for this key + self.metadata["KeyId"] = self.tags[TAG_KEY_CUSTOM_ID].strip() + elif self.metadata.get("MultiRegion"): + # https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # "Notice that multi-Region keys have a distinctive key ID that begins with mrk-. You can use the mrk- prefix to + # identify MRKs programmatically." + # The ID for MultiRegion keys also do not have dashes. + self.metadata["KeyId"] = "mrk-" + str(uuid.uuid4().hex) + else: + self.metadata["KeyId"] = str(uuid.uuid4()) + self.calculate_and_set_arn(account_id, region) + + self._populate_encryption_algorithms( + self.metadata.get("KeyUsage"), self.metadata.get("KeySpec") + ) + self._populate_signing_algorithms( + self.metadata.get("KeyUsage"), self.metadata.get("KeySpec") + ) + self._populate_mac_algorithms(self.metadata.get("KeyUsage"), self.metadata.get("KeySpec")) + + if self.metadata["MultiRegion"]: + self.metadata["MultiRegionConfiguration"] = MultiRegionConfiguration( + MultiRegionKeyType=MultiRegionKeyType.PRIMARY, + PrimaryKey=MultiRegionKey(Arn=self.metadata["Arn"], Region=region), + ReplicaKeys=[], + ) + + def add_tags(self, tags: TagList) -> None: + # Just in case we get None from somewhere. + if not tags: + return + + unique_tag_keys = {tag["TagKey"] for tag in tags} + if len(unique_tag_keys) < len(tags): + raise TagException("Duplicate tag keys") + + if len(tags) > 50: + raise TagException("Too many tags") + + # Do not care if we overwrite an existing tag: + # https://docs.aws.amazon.com/kms/latest/APIReference/API_TagResource.html + # "To edit a tag, specify an existing tag key and a new tag value." + for i, tag in enumerate(tags, start=1): + validate_tag(i, tag) + self.tags[tag.get("TagKey")] = tag.get("TagValue") + + def schedule_key_deletion(self, pending_window_in_days: int) -> None: + self.metadata["Enabled"] = False + # TODO For MultiRegion keys, the status of replicas get set to "PendingDeletion", while the primary key + # becomes "PendingReplicaDeletion". Here we just set all keys to "PendingDeletion", as we do not have any + # notion of a primary key in LocalStack. Might be useful to improve it. + # https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html#primary-delete + self.metadata["KeyState"] = "PendingDeletion" + self.metadata["DeletionDate"] = datetime.datetime.now() + datetime.timedelta( + days=pending_window_in_days + ) + + def _update_key_rotation_date(self) -> None: + if not self.next_rotation_date or self.next_rotation_date < datetime.datetime.now(): + self.next_rotation_date = datetime.datetime.now() + datetime.timedelta( + days=self.rotation_period_in_days + ) + + # An example of how the whole policy should look like: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-overview.html + # The default statement is here: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-root-enable-iam + def _get_default_key_policy(self, account_id: str, region: str) -> str: + return json.dumps( + { + "Version": "2012-10-17", + "Id": "key-default-1", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": {"AWS": f"arn:{get_partition(region)}:iam::{account_id}:root"}, + "Action": "kms:*", + "Resource": "*", + } + ], + } + ) + + def _populate_encryption_algorithms(self, key_usage: str, key_spec: str) -> None: + # The two main usages for KMS keys are encryption/decryption and signing/verification. + # Doesn't make sense to populate fields related to encryption/decryption unless the key is created with that + # goal in mind. + if key_usage != "ENCRYPT_DECRYPT": + return + if key_spec == "SYMMETRIC_DEFAULT": + self.metadata["EncryptionAlgorithms"] = ["SYMMETRIC_DEFAULT"] + else: + self.metadata["EncryptionAlgorithms"] = ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] + + def _populate_signing_algorithms(self, key_usage: str, key_spec: str) -> None: + # The two main usages for KMS keys are encryption/decryption and signing/verification. + # Doesn't make sense to populate fields related to signing/verification unless the key is created with that + # goal in mind. + if key_usage != "SIGN_VERIFY": + return + if key_spec in ["ECC_NIST_P256", "ECC_SECG_P256K1"]: + self.metadata["SigningAlgorithms"] = ["ECDSA_SHA_256"] + elif key_spec == "ECC_NIST_P384": + self.metadata["SigningAlgorithms"] = ["ECDSA_SHA_384"] + elif key_spec == "ECC_NIST_P521": + self.metadata["SigningAlgorithms"] = ["ECDSA_SHA_512"] + else: + self.metadata["SigningAlgorithms"] = [ + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + ] + + def _populate_mac_algorithms(self, key_usage: str, key_spec: str) -> None: + if key_usage != "GENERATE_VERIFY_MAC": + return + if key_spec == "HMAC_224": + self.metadata["MacAlgorithms"] = ["HMAC_SHA_224"] + elif key_spec == "HMAC_256": + self.metadata["MacAlgorithms"] = ["HMAC_SHA_256"] + elif key_spec == "HMAC_384": + self.metadata["MacAlgorithms"] = ["HMAC_SHA_384"] + elif key_spec == "HMAC_512": + self.metadata["MacAlgorithms"] = ["HMAC_SHA_512"] + + def _get_key_usage(self, request_key_usage: str, key_spec: str) -> str: + if key_spec in HMAC_RANGE_KEY_LENGTHS: + if request_key_usage is None: + raise ValidationException( + "You must specify a KeyUsage value for all KMS keys except for symmetric encryption keys." + ) + elif request_key_usage != KeyUsageType.GENERATE_VERIFY_MAC: + raise ValidationException( + f"1 validation error detected: Value '{request_key_usage}' at 'keyUsage' " + f"failed to satisfy constraint: Member must satisfy enum value set: " + f"[ENCRYPT_DECRYPT, SIGN_VERIFY, GENERATE_VERIFY_MAC]" + ) + else: + return KeyUsageType.GENERATE_VERIFY_MAC + elif request_key_usage == KeyUsageType.KEY_AGREEMENT: + if key_spec not in [ + KeySpec.ECC_NIST_P256, + KeySpec.ECC_NIST_P384, + KeySpec.ECC_NIST_P521, + KeySpec.ECC_SECG_P256K1, + KeySpec.SM2, + ]: + raise ValidationException( + f"KeyUsage {request_key_usage} is not compatible with KeySpec {key_spec}" + ) + else: + return request_key_usage + else: + return request_key_usage or "ENCRYPT_DECRYPT" + + def rotate_key_on_demand(self): + if len(self.previous_keys) >= ON_DEMAND_ROTATION_LIMIT: + raise LimitExceededException( + f"The on-demand rotations limit has been reached for the given keyId. " + f"No more on-demand rotations can be performed for this key: {self.metadata['Arn']}" + ) + self.previous_keys.append(self.crypto_key.key_material) + self.crypto_key = KmsCryptoKey(KeySpec.SYMMETRIC_DEFAULT) + + +class KmsGrant: + # AWS documentation doesn't seem to mention any metadata object for grants like it does mention KeyMetadata for + # keys. But, based on our understanding of AWS documentation for CreateGrant, ListGrants operations etc, + # AWS has some set of fields for grants like it has for keys. So we are going to call them `metadata` here for + # consistency. + metadata: Dict + # Tokens are not a part of metadata, as their use is more limited and specific than for the rest of the + # metadata: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # Tokens are used to refer to a grant in a short period right after the grant gets created. Normally it might + # take KMS up to 5 minutes to make a new grant available. In that time window referring to a grant by its + # GrantId might not work, so tokens are supposed to be used. The tokens could possibly be used even + # afterwards. But since the only way to get a token is through a CreateGrant operation (see below), the chances + # of someone storing a token and using it later are slim. + # + # https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # "CreateGrant is the only operation that returns a grant token. You cannot get a grant token from any other + # AWS KMS operation or from the CloudTrail log event for the CreateGrant operation. The ListGrants and + # ListRetirableGrants operations return the grant ID, but not a grant token." + # + # Usually a grant might have multiple unique tokens. But here we just model it with a single token for + # simplicity. + token: str + + def __init__(self, create_grant_request: CreateGrantRequest, account_id: str, region_name: str): + self.metadata = dict(create_grant_request) + + if is_valid_key_arn(self.metadata["KeyId"]): + self.metadata["KeyArn"] = self.metadata["KeyId"] + else: + self.metadata["KeyArn"] = kms_key_arn(self.metadata["KeyId"], account_id, region_name) + + self.metadata["GrantId"] = long_uid() + self.metadata["CreationDate"] = datetime.datetime.now() + # https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantListEntry.html + # "If a name was provided in the CreateGrant request, that name is returned. Otherwise this value is null." + # According to the examples in AWS docs + # https://docs.aws.amazon.com/kms/latest/APIReference/API_ListGrants.html#API_ListGrants_Examples + # The Name field is present with just an empty string value. + self.metadata.setdefault("Name", "") + + # Encode account ID and region in grant token. + # This way the grant can be located when being retired by grant principal. + # The token consists of account ID, region name and a UUID concatenated with ':' and encoded with base64 + decoded_token = account_id + ":" + region_name + ":" + long_uid() + self.token = to_str(base64.b64encode(to_bytes(decoded_token))) + + +class KmsAlias: + # Like with grants (see comment for KmsGrant), there is no mention of some specific object modeling metadata + # for KMS aliases. But there is data that is some metadata, so we model it in a way similar to KeyMetadata for keys. + metadata: Dict + + def __init__( + self, + create_alias_request: CreateAliasRequest = None, + account_id: str = None, + region: str = None, + ): + create_alias_request = create_alias_request or CreateAliasRequest() + self.metadata = {} + self.metadata["AliasName"] = create_alias_request.get("AliasName") + self.metadata["TargetKeyId"] = create_alias_request.get("TargetKeyId") + self.update_date_of_last_update() + self.metadata["CreationDate"] = self.metadata["LastUpdateDate"] + self.metadata["AliasArn"] = kms_alias_arn(self.metadata["AliasName"], account_id, region) + + def update_date_of_last_update(self): + self.metadata["LastUpdateDate"] = datetime.datetime.now() + + +@dataclass +class KeyImportState: + key_id: str + import_token: str + wrapping_algo: str + key: KmsKey + + +class KmsStore(BaseStore): + # maps key ids to keys + keys: Dict[str, KmsKey] = LocalAttribute(default=dict) + + # According to AWS documentation on grants https://docs.aws.amazon.com/kms/latest/APIReference/API_RetireGrant.html + # "Cross-account use: Yes. You can retire a grant on a KMS key in a different AWS account." + + # maps grant ids to grants + grants: Dict[str, KmsGrant] = LocalAttribute(default=dict) + + # maps from (grant names (used for idempotency), key id) to grant ids + grant_names: Dict[Tuple[str, str], str] = LocalAttribute(default=dict) + + # maps grant tokens to grant ids + grant_tokens: Dict[str, str] = LocalAttribute(default=dict) + + # maps key alias names to aliases + aliases: Dict[str, KmsAlias] = LocalAttribute(default=dict) + + # maps import tokens to import data + imports: Dict[str, KeyImportState] = LocalAttribute(default=dict) + + +kms_stores = AccountRegionBundle("kms", KmsStore) diff --git a/localstack-core/localstack/services/kms/provider.py b/localstack-core/localstack/services/kms/provider.py new file mode 100644 index 0000000000000..9f29780fa2103 --- /dev/null +++ b/localstack-core/localstack/services/kms/provider.py @@ -0,0 +1,1575 @@ +import base64 +import copy +import datetime +import logging +import os +from typing import Dict, Tuple + +from cryptography.exceptions import InvalidTag +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import padding + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.kms import ( + AlgorithmSpec, + AlreadyExistsException, + CancelKeyDeletionRequest, + CancelKeyDeletionResponse, + CiphertextType, + CreateAliasRequest, + CreateGrantRequest, + CreateGrantResponse, + CreateKeyRequest, + CreateKeyResponse, + DataKeyPairSpec, + DateType, + DecryptResponse, + DeleteAliasRequest, + DeriveSharedSecretResponse, + DescribeKeyRequest, + DescribeKeyResponse, + DisabledException, + DisableKeyRequest, + DisableKeyRotationRequest, + EnableKeyRequest, + EnableKeyRotationRequest, + EncryptionAlgorithmSpec, + EncryptionContextType, + EncryptResponse, + ExpirationModelType, + GenerateDataKeyPairResponse, + GenerateDataKeyPairWithoutPlaintextResponse, + GenerateDataKeyRequest, + GenerateDataKeyResponse, + GenerateDataKeyWithoutPlaintextRequest, + GenerateDataKeyWithoutPlaintextResponse, + GenerateMacRequest, + GenerateMacResponse, + GenerateRandomRequest, + GenerateRandomResponse, + GetKeyPolicyRequest, + GetKeyPolicyResponse, + GetKeyRotationStatusRequest, + GetKeyRotationStatusResponse, + GetParametersForImportResponse, + GetPublicKeyResponse, + GrantIdType, + GrantTokenList, + GrantTokenType, + ImportKeyMaterialResponse, + IncorrectKeyException, + InvalidCiphertextException, + InvalidGrantIdException, + InvalidKeyUsageException, + KeyAgreementAlgorithmSpec, + KeyIdType, + KeySpec, + KeyState, + KeyUsageType, + KmsApi, + KMSInvalidStateException, + LimitType, + ListAliasesResponse, + ListGrantsRequest, + ListGrantsResponse, + ListKeyPoliciesRequest, + ListKeyPoliciesResponse, + ListKeysRequest, + ListKeysResponse, + ListResourceTagsRequest, + ListResourceTagsResponse, + MacAlgorithmSpec, + MarkerType, + MultiRegionKey, + NotFoundException, + NullableBooleanType, + OriginType, + PlaintextType, + PrincipalIdType, + PublicKeyType, + PutKeyPolicyRequest, + RecipientInfo, + ReEncryptResponse, + ReplicateKeyRequest, + ReplicateKeyResponse, + RotateKeyOnDemandRequest, + RotateKeyOnDemandResponse, + ScheduleKeyDeletionRequest, + ScheduleKeyDeletionResponse, + SignRequest, + SignResponse, + TagResourceRequest, + UnsupportedOperationException, + UntagResourceRequest, + UpdateAliasRequest, + UpdateKeyDescriptionRequest, + VerifyMacRequest, + VerifyMacResponse, + VerifyRequest, + VerifyResponse, + WrappingKeySpec, +) +from localstack.services.kms.exceptions import ValidationException +from localstack.services.kms.models import ( + MULTI_REGION_PATTERN, + PATTERN_UUID, + RESERVED_ALIASES, + KeyImportState, + KmsAlias, + KmsCryptoKey, + KmsGrant, + KmsKey, + KmsStore, + deserialize_ciphertext_blob, + kms_stores, +) +from localstack.services.kms.utils import ( + execute_dry_run_capable, + is_valid_key_arn, + parse_key_arn, + validate_alias_name, +) +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.aws.arns import get_partition, kms_alias_arn, parse_arn +from localstack.utils.collections import PaginatedList +from localstack.utils.common import select_attributes +from localstack.utils.strings import short_uid, to_bytes, to_str + +LOG = logging.getLogger(__name__) + +# valid operations +VALID_OPERATIONS = [ + "CreateKey", + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "Sign", + "Verify", + "GetPublicKey", + "CreateGrant", + "RetireGrant", + "DescribeKey", + "GenerateDataKeyPair", + "GenerateDataKeyPairWithoutPlaintext", +] + + +class ValidationError(CommonServiceException): + """General validation error type (defined in the AWS docs, but not part of the botocore spec)""" + + def __init__(self, message=None): + super().__init__("ValidationError", message=message) + + +# For all operations constraints for states of keys are based on +# https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +class KmsProvider(KmsApi, ServiceLifecycleHook): + """ + The LocalStack Key Management Service (KMS) provider. + + Cross-account access is supported by following operations where key ID belonging + to another account can be used with the key ARN. + - CreateGrant + - DescribeKey + - GetKeyRotationStatus + - GetPublicKey + - ListGrants + - RetireGrant + - RevokeGrant + - Decrypt + - Encrypt + - GenerateDataKey + - GenerateDataKeyPair + - GenerateDataKeyPairWithoutPlaintext + - GenerateDataKeyWithoutPlaintext + - GenerateMac + - ReEncrypt + - Sign + - Verify + - VerifyMac + """ + + # + # Helpers + # + + @staticmethod + def _get_store(account_id: str, region_name: str) -> KmsStore: + return kms_stores[account_id][region_name] + + @staticmethod + def _create_kms_alias(account_id: str, region_name: str, request: CreateAliasRequest): + store = kms_stores[account_id][region_name] + alias = KmsAlias(request, account_id, region_name) + alias_name = request.get("AliasName") + store.aliases[alias_name] = alias + + @staticmethod + def _create_kms_key( + account_id: str, region_name: str, request: CreateKeyRequest = None + ) -> KmsKey: + store = kms_stores[account_id][region_name] + key = KmsKey(request, account_id, region_name) + key_id = key.metadata["KeyId"] + store.keys[key_id] = key + return key + + @staticmethod + def _get_key_id_from_any_id(account_id: str, region_name: str, some_id: str) -> str: + """ + Resolve a KMS key ID by using one of the following identifiers: + - key ID + - key ARN + - key alias + - key alias ARN + """ + alias_name = None + key_id = None + key_arn = None + + if some_id.startswith("arn:"): + if ":alias/" in some_id: + alias_arn = some_id + alias_name = "alias/" + alias_arn.split(":alias/")[1] + elif ":key/" in some_id: + key_arn = some_id + key_id = key_arn.split(":key/")[1] + parsed_arn = parse_arn(key_arn) + if parsed_arn["region"] != region_name: + raise NotFoundException(f"Invalid arn {parsed_arn['region']}") + else: + raise ValueError( + f"Supplied value of {some_id} is an ARN, but neither of a KMS key nor of a KMS key " + f"alias" + ) + elif some_id.startswith("alias/"): + alias_name = some_id + else: + key_id = some_id + + store = kms_stores[account_id][region_name] + + if alias_name: + KmsProvider._create_alias_if_reserved_and_not_exists( + account_id, + region_name, + alias_name, + ) + if alias_name not in store.aliases: + raise NotFoundException(f"Unable to find KMS alias with name {alias_name}") + key_id = store.aliases[alias_name].metadata["TargetKeyId"] + + # regular KeyId are UUID, and MultiRegion keys starts with 'mrk-' and 32 hex chars + if not PATTERN_UUID.match(key_id) and not MULTI_REGION_PATTERN.match(key_id): + raise NotFoundException(f"Invalid keyId '{key_id}'") + + if key_id not in store.keys: + if not key_arn: + key_arn = ( + f"arn:{get_partition(region_name)}:kms:{region_name}:{account_id}:key/{key_id}" + ) + raise NotFoundException(f"Key '{key_arn}' does not exist") + + return key_id + + @staticmethod + def _create_alias_if_reserved_and_not_exists( + account_id: str, region_name: str, alias_name: str + ): + store = kms_stores[account_id][region_name] + if alias_name not in RESERVED_ALIASES or alias_name in store.aliases: + return + create_key_request = {} + key_id = KmsProvider._create_kms_key( + account_id, + region_name, + create_key_request, + ).metadata.get("KeyId") + create_alias_request = CreateAliasRequest(AliasName=alias_name, TargetKeyId=key_id) + KmsProvider._create_kms_alias(account_id, region_name, create_alias_request) + + # While in AWS keys have more than Enabled, Disabled and PendingDeletion states, we currently only model these 3 + # in LocalStack, so this function is limited to them. + # + # The current default values are based on most of the operations working in AWS with enabled keys, but failing with + # disabled and those pending deletion. + # + # If we decide to use the other states as well, we might want to come up with a better key state validation per + # operation. Can consult this page for what states are supported by various operations: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + @staticmethod + def _get_kms_key( + account_id: str, + region_name: str, + any_type_of_key_id: str, + any_key_state_allowed: bool = False, + enabled_key_allowed: bool = True, + disabled_key_allowed: bool = False, + pending_deletion_key_allowed: bool = False, + ) -> KmsKey: + store = kms_stores[account_id][region_name] + + if any_key_state_allowed: + enabled_key_allowed = True + disabled_key_allowed = True + pending_deletion_key_allowed = True + if not (enabled_key_allowed or disabled_key_allowed or pending_deletion_key_allowed): + raise ValueError("A key is requested, but all possible key states are prohibited") + + key_id = KmsProvider._get_key_id_from_any_id(account_id, region_name, any_type_of_key_id) + key = store.keys[key_id] + + if not disabled_key_allowed and key.metadata.get("KeyState") == "Disabled": + raise DisabledException(f"{key.metadata.get('Arn')} is disabled.") + if not pending_deletion_key_allowed and key.metadata.get("KeyState") == "PendingDeletion": + raise KMSInvalidStateException(f"{key.metadata.get('Arn')} is pending deletion.") + if not enabled_key_allowed and key.metadata.get("KeyState") == "Enabled": + raise KMSInvalidStateException( + f"{key.metadata.get('Arn')} is enabled, but the operation doesn't support " + f"such a state" + ) + return store.keys[key_id] + + @staticmethod + def _get_kms_alias(account_id: str, region_name: str, alias_name_or_arn: str) -> KmsAlias: + store = kms_stores[account_id][region_name] + + if not alias_name_or_arn.startswith("arn:"): + alias_name = alias_name_or_arn + else: + if ":alias/" not in alias_name_or_arn: + raise ValidationException(f"{alias_name_or_arn} is not a valid alias ARN") + alias_name = "alias/" + alias_name_or_arn.split(":alias/")[1] + + validate_alias_name(alias_name) + + if alias_name not in store.aliases: + alias_arn = kms_alias_arn(alias_name, account_id, region_name) + # AWS itself uses AliasArn instead of AliasName in this exception. + raise NotFoundException(f"Alias {alias_arn} is not found.") + + return store.aliases.get(alias_name) + + @staticmethod + def _parse_key_id(key_id_or_arn: str, context: RequestContext) -> Tuple[str, str, str]: + """ + Return locator attributes (account ID, region_name, key ID) of a given KMS key. + + If an ARN is provided, this is extracted from it. Otherwise, context data is used. + + :param key_id_or_arn: KMS key ID or ARN + :param context: request context + :return: Tuple of account ID, region name and key ID + """ + if is_valid_key_arn(key_id_or_arn): + account_id, region_name, key_id = parse_key_arn(key_id_or_arn) + if region_name != context.region: + raise NotFoundException(f"Invalid arn {region_name}") + return account_id, region_name, key_id + + return context.account_id, context.region, key_id_or_arn + + @staticmethod + def _is_rsa_spec(key_spec: str) -> bool: + return key_spec in [KeySpec.RSA_2048, KeySpec.RSA_3072, KeySpec.RSA_4096] + + # + # Operation Handlers + # + + @handler("CreateKey", expand=False) + def create_key( + self, + context: RequestContext, + request: CreateKeyRequest = None, + ) -> CreateKeyResponse: + key = self._create_kms_key(context.account_id, context.region, request) + return CreateKeyResponse(KeyMetadata=key.metadata) + + @handler("ScheduleKeyDeletion", expand=False) + def schedule_key_deletion( + self, context: RequestContext, request: ScheduleKeyDeletionRequest + ) -> ScheduleKeyDeletionResponse: + pending_window = int(request.get("PendingWindowInDays", 30)) + if pending_window < 7 or pending_window > 30: + raise ValidationException( + f"PendingWindowInDays should be between 7 and 30, but it is {pending_window}" + ) + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.schedule_key_deletion(pending_window) + attrs = ["DeletionDate", "KeyId", "KeyState"] + result = select_attributes(key.metadata, attrs) + result["PendingWindowInDays"] = pending_window + return ScheduleKeyDeletionResponse(**result) + + @handler("CancelKeyDeletion", expand=False) + def cancel_key_deletion( + self, context: RequestContext, request: CancelKeyDeletionRequest + ) -> CancelKeyDeletionResponse: + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=False, + pending_deletion_key_allowed=True, + ) + key.metadata["KeyState"] = KeyState.Disabled + key.metadata["DeletionDate"] = None + # https://docs.aws.amazon.com/kms/latest/APIReference/API_CancelKeyDeletion.html#API_CancelKeyDeletion_ResponseElements + # "The Amazon Resource Name (key ARN) of the KMS key whose deletion is canceled." + return CancelKeyDeletionResponse(KeyId=key.metadata.get("Arn")) + + @handler("DisableKey", expand=False) + def disable_key(self, context: RequestContext, request: DisableKeyRequest) -> None: + # Technically, AWS allows DisableKey for keys that are already disabled. + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.metadata["KeyState"] = KeyState.Disabled + key.metadata["Enabled"] = False + + @handler("EnableKey", expand=False) + def enable_key(self, context: RequestContext, request: EnableKeyRequest) -> None: + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.metadata["KeyState"] = KeyState.Enabled + key.metadata["Enabled"] = True + + @handler("ListKeys", expand=False) + def list_keys(self, context: RequestContext, request: ListKeysRequest) -> ListKeysResponse: + # https://docs.aws.amazon.com/kms/latest/APIReference/API_ListKeys.html#API_ListKeys_ResponseSyntax + # Out of whole KeyMetadata only two fields are present in the response. + keys_list = PaginatedList( + [ + {"KeyId": key.metadata["KeyId"], "KeyArn": key.metadata["Arn"]} + for key in self._get_store(context.account_id, context.region).keys.values() + ] + ) + # https://docs.aws.amazon.com/kms/latest/APIReference/API_ListKeys.html#API_ListKeys_RequestParameters + # Regarding the default value of Limit: "If you do not include a value, it defaults to 100." + page, next_token = keys_list.get_page( + lambda key_data: key_data.get("KeyId"), + next_token=request.get("Marker"), + page_size=request.get("Limit", 100), + ) + kwargs = {"NextMarker": next_token, "Truncated": True} if next_token else {} + return ListKeysResponse(Keys=page, **kwargs) + + @handler("DescribeKey", expand=False) + def describe_key( + self, context: RequestContext, request: DescribeKeyRequest + ) -> DescribeKeyResponse: + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id, any_key_state_allowed=True) + return DescribeKeyResponse(KeyMetadata=key.metadata) + + @handler("ReplicateKey", expand=False) + def replicate_key( + self, context: RequestContext, request: ReplicateKeyRequest + ) -> ReplicateKeyResponse: + account_id = context.account_id + key = self._get_kms_key(account_id, context.region, request.get("KeyId")) + key_id = key.metadata.get("KeyId") + if not key.metadata.get("MultiRegion"): + raise UnsupportedOperationException( + f"Unable to replicate a non-MultiRegion key {key_id}" + ) + replica_region = request.get("ReplicaRegion") + replicate_to_store = kms_stores[account_id][replica_region] + if key_id in replicate_to_store.keys: + raise AlreadyExistsException( + f"Unable to replicate key {key_id} to region {replica_region}, as the key " + f"already exist there" + ) + replica_key = copy.deepcopy(key) + replica_key.replicate_metadata(request, account_id, replica_region) + replicate_to_store.keys[key_id] = replica_key + + self.update_primary_key_with_replica_keys(key, replica_key, replica_region) + + return ReplicateKeyResponse(ReplicaKeyMetadata=replica_key.metadata) + + @staticmethod + # Adds new multi region replica key to the primary key's metadata. + def update_primary_key_with_replica_keys(key: KmsKey, replica_key: KmsKey, region: str): + key.metadata["MultiRegionConfiguration"]["ReplicaKeys"].append( + MultiRegionKey( + Arn=replica_key.metadata["Arn"], + Region=region, + ) + ) + + @handler("UpdateKeyDescription", expand=False) + def update_key_description( + self, context: RequestContext, request: UpdateKeyDescriptionRequest + ) -> None: + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.metadata["Description"] = request.get("Description") + + @handler("CreateGrant", expand=False) + def create_grant( + self, context: RequestContext, request: CreateGrantRequest + ) -> CreateGrantResponse: + key_account_id, key_region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(key_account_id, key_region_name, key_id) + + # KeyId can potentially hold one of multiple different types of key identifiers. Here we find a key no + # matter which type of id is used. + key_id = key.metadata.get("KeyId") + request["KeyId"] = key_id + self._validate_grant_request(request) + grant_name = request.get("Name") + + store = self._get_store(context.account_id, context.region) + if grant_name and (grant_name, key_id) in store.grant_names: + grant = store.grants[store.grant_names[(grant_name, key_id)]] + else: + grant = KmsGrant(request, context.account_id, context.region) + grant_id = grant.metadata["GrantId"] + store.grants[grant_id] = grant + if grant_name: + store.grant_names[(grant_name, key_id)] = grant_id + store.grant_tokens[grant.token] = grant_id + + # At the moment we do not support multiple GrantTokens for grant creation request. Instead, we always use + # the same token. For the reference, AWS documentation says: + # https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html#API_CreateGrant_RequestParameters + # "The returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is + # returned". "A duplicate GrantId" refers to the idempotency of grant creation requests - if a request has + # "Name" field, and if such name already belongs to a previously created grant, no new grant gets created + # and the existing grant with the name is returned. + return CreateGrantResponse(GrantId=grant.metadata["GrantId"], GrantToken=grant.token) + + @handler("ListGrants", expand=False) + def list_grants( + self, context: RequestContext, request: ListGrantsRequest + ) -> ListGrantsResponse: + if not request.get("KeyId"): + raise ValidationError("Required input parameter KeyId not specified") + key_account_id, key_region_name, _ = self._parse_key_id(request["KeyId"], context) + # KeyId can potentially hold one of multiple different types of key identifiers. Here we find a key no + # matter which type of id is used. + key = self._get_kms_key( + key_account_id, key_region_name, request.get("KeyId"), any_key_state_allowed=True + ) + key_id = key.metadata.get("KeyId") + + store = self._get_store(context.account_id, context.region) + grant_id = request.get("GrantId") + if grant_id: + if grant_id not in store.grants: + raise InvalidGrantIdException() + return ListGrantsResponse(Grants=[store.grants[grant_id].metadata]) + + matching_grants = [] + grantee_principal = request.get("GranteePrincipal") + for grant in store.grants.values(): + # KeyId is a mandatory field of ListGrants request, so is going to be present. + _, _, grant_key_id = parse_key_arn(grant.metadata["KeyArn"]) + if grant_key_id != key_id: + continue + # GranteePrincipal is a mandatory field for CreateGrant, should be in grants. But it is an optional field + # for ListGrants, so might not be there. + if grantee_principal and grant.metadata["GranteePrincipal"] != grantee_principal: + continue + matching_grants.append(grant.metadata) + + grants_list = PaginatedList(matching_grants) + page, next_token = grants_list.get_page( + lambda grant_data: grant_data.get("GrantId"), + next_token=request.get("Marker"), + page_size=request.get("Limit", 50), + ) + kwargs = {"NextMarker": next_token, "Truncated": True} if next_token else {} + + return ListGrantsResponse(Grants=page, **kwargs) + + @staticmethod + def _delete_grant(store: KmsStore, grant_id: str, key_id: str): + grant = store.grants[grant_id] + + _, _, grant_key_id = parse_key_arn(grant.metadata.get("KeyArn")) + if key_id != grant_key_id: + raise ValidationError(f"Invalid KeyId={key_id} specified for grant {grant_id}") + + store.grant_tokens.pop(grant.token) + store.grant_names.pop((grant.metadata.get("Name"), key_id), None) + store.grants.pop(grant_id) + + def revoke_grant( + self, + context: RequestContext, + key_id: KeyIdType, + grant_id: GrantIdType, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> None: + # TODO add support for "dry_run" + key_account_id, key_region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key(key_account_id, key_region_name, key_id, any_key_state_allowed=True) + key_id = key.metadata.get("KeyId") + + store = self._get_store(context.account_id, context.region) + + if grant_id not in store.grants: + raise InvalidGrantIdException() + + self._delete_grant(store, grant_id, key_id) + + def retire_grant( + self, + context: RequestContext, + grant_token: GrantTokenType = None, + key_id: KeyIdType = None, + grant_id: GrantIdType = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> None: + # TODO add support for "dry_run" + if not grant_token and (not grant_id or not key_id): + raise ValidationException("Grant token OR (grant ID, key ID) must be specified") + + if grant_token: + decoded_token = to_str(base64.b64decode(grant_token)) + grant_account_id, grant_region_name, _ = decoded_token.split(":") + grant_store = self._get_store(grant_account_id, grant_region_name) + + if grant_token not in grant_store.grant_tokens: + raise NotFoundException(f"Unable to find grant token {grant_token}") + + grant_id = grant_store.grant_tokens[grant_token] + else: + grant_store = self._get_store(context.account_id, context.region) + + if key_id: + key_account_id, key_region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key( + key_account_id, key_region_name, key_id, any_key_state_allowed=True + ) + key_id = key.metadata.get("KeyId") + else: + _, _, key_id = parse_key_arn(grant_store.grants[grant_id].metadata.get("KeyArn")) + + self._delete_grant(grant_store, grant_id, key_id) + + def list_retirable_grants( + self, + context: RequestContext, + retiring_principal: PrincipalIdType, + limit: LimitType = None, + marker: MarkerType = None, + **kwargs, + ) -> ListGrantsResponse: + if not retiring_principal: + raise ValidationError("Required input parameter 'RetiringPrincipal' not specified") + + matching_grants = [ + grant.metadata + for grant in self._get_store(context.account_id, context.region).grants.values() + if grant.metadata.get("RetiringPrincipal") == retiring_principal + ] + grants_list = PaginatedList(matching_grants) + limit = limit or 50 + page, next_token = grants_list.get_page( + lambda grant_data: grant_data.get("GrantId"), + next_token=marker, + page_size=limit, + ) + kwargs = {"NextMarker": next_token, "Truncated": True} if next_token else {} + + return ListGrantsResponse(Grants=page, **kwargs) + + def get_public_key( + self, + context: RequestContext, + key_id: KeyIdType, + grant_tokens: GrantTokenList = None, + **kwargs, + ) -> GetPublicKeyResponse: + # According to https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html, GetPublicKey is supposed + # to fail for disabled keys. But it actually doesn't fail in AWS. + account_id, region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key( + account_id, + region_name, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + attrs = [ + "KeySpec", + "KeyUsage", + "EncryptionAlgorithms", + "SigningAlgorithms", + ] + result = select_attributes(key.metadata, attrs) + result["PublicKey"] = key.crypto_key.public_key + result["KeyId"] = key.metadata["Arn"] + return GetPublicKeyResponse(**result) + + def _generate_data_key_pair( + self, + context: RequestContext, + key_id: str, + key_pair_spec: str, + encryption_context: EncryptionContextType = None, + dry_run: NullableBooleanType = None, + ): + account_id, region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key(account_id, region_name, key_id) + self._validate_key_for_encryption_decryption(context, key) + KmsCryptoKey.assert_valid(key_pair_spec) + return execute_dry_run_capable( + self._build_data_key_pair_response, dry_run, key, key_pair_spec, encryption_context + ) + + def _build_data_key_pair_response( + self, key: KmsKey, key_pair_spec: str, encryption_context: EncryptionContextType = None + ): + crypto_key = KmsCryptoKey(key_pair_spec) + + return { + "KeyId": key.metadata["Arn"], + "KeyPairSpec": key_pair_spec, + "PrivateKeyCiphertextBlob": key.encrypt(crypto_key.private_key, encryption_context), + "PrivateKeyPlaintext": crypto_key.private_key, + "PublicKey": crypto_key.public_key, + } + + @handler("GenerateDataKeyPair") + def generate_data_key_pair( + self, + context: RequestContext, + key_id: KeyIdType, + key_pair_spec: DataKeyPairSpec, + encryption_context: EncryptionContextType = None, + grant_tokens: GrantTokenList = None, + recipient: RecipientInfo = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> GenerateDataKeyPairResponse: + result = self._generate_data_key_pair( + context, key_id, key_pair_spec, encryption_context, dry_run + ) + return GenerateDataKeyPairResponse(**result) + + @handler("GenerateRandom", expand=False) + def generate_random( + self, context: RequestContext, request: GenerateRandomRequest + ) -> GenerateRandomResponse: + number_of_bytes = request.get("NumberOfBytes") + if number_of_bytes is None: + raise ValidationException("NumberOfBytes is required.") + if number_of_bytes > 1024: + raise ValidationException( + f"1 validation error detected: Value '{number_of_bytes}' at 'numberOfBytes' failed " + "to satisfy constraint: Member must have value less than or equal to 1024" + ) + if number_of_bytes < 1: + raise ValidationException( + f"1 validation error detected: Value '{number_of_bytes}' at 'numberOfBytes' failed " + "to satisfy constraint: Member must have value greater than or equal to 1" + ) + + byte_string = os.urandom(number_of_bytes) + + return GenerateRandomResponse(Plaintext=byte_string) + + @handler("GenerateDataKeyPairWithoutPlaintext") + def generate_data_key_pair_without_plaintext( + self, + context: RequestContext, + key_id: KeyIdType, + key_pair_spec: DataKeyPairSpec, + encryption_context: EncryptionContextType = None, + grant_tokens: GrantTokenList = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> GenerateDataKeyPairWithoutPlaintextResponse: + result = self._generate_data_key_pair( + context, key_id, key_pair_spec, encryption_context, dry_run + ) + result.pop("PrivateKeyPlaintext") + return GenerateDataKeyPairResponse(**result) + + # We currently act on neither on KeySpec setting (which is different from and holds values different then + # KeySpec for CreateKey) nor on NumberOfBytes. Instead, we generate a key with a key length that is "standard" in + # LocalStack. + # + def _generate_data_key( + self, context: RequestContext, key_id: str, encryption_context: EncryptionContextType = None + ): + account_id, region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key(account_id, region_name, key_id) + # TODO Should also have a validation for the key being a symmetric one. + self._validate_key_for_encryption_decryption(context, key) + crypto_key = KmsCryptoKey("SYMMETRIC_DEFAULT") + return { + "KeyId": key.metadata["Arn"], + "Plaintext": crypto_key.key_material, + "CiphertextBlob": key.encrypt(crypto_key.key_material, encryption_context), + } + + @handler("GenerateDataKey", expand=False) + def generate_data_key( + self, context: RequestContext, request: GenerateDataKeyRequest + ) -> GenerateDataKeyResponse: + result = self._generate_data_key( + context, request.get("KeyId"), request.get("EncryptionContext") + ) + return GenerateDataKeyResponse(**result) + + @handler("GenerateDataKeyWithoutPlaintext", expand=False) + def generate_data_key_without_plaintext( + self, context: RequestContext, request: GenerateDataKeyWithoutPlaintextRequest + ) -> GenerateDataKeyWithoutPlaintextResponse: + result = self._generate_data_key( + context, request.get("KeyId"), request.get("EncryptionContext") + ) + result.pop("Plaintext") + return GenerateDataKeyWithoutPlaintextResponse(**result) + + @handler("GenerateMac", expand=False) + def generate_mac( + self, + context: RequestContext, + request: GenerateMacRequest, + ) -> GenerateMacResponse: + msg = request.get("Message") + self._validate_mac_msg_length(msg) + + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id) + + self._validate_key_for_generate_verify_mac(context, key) + + algorithm = request.get("MacAlgorithm") + self._validate_mac_algorithm(key, algorithm) + + mac = key.generate_mac(msg, algorithm) + + return GenerateMacResponse(Mac=mac, MacAlgorithm=algorithm, KeyId=key.metadata.get("Arn")) + + @handler("VerifyMac", expand=False) + def verify_mac( + self, + context: RequestContext, + request: VerifyMacRequest, + ) -> VerifyMacResponse: + msg = request.get("Message") + self._validate_mac_msg_length(msg) + + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id) + + self._validate_key_for_generate_verify_mac(context, key) + + algorithm = request.get("MacAlgorithm") + self._validate_mac_algorithm(key, algorithm) + + mac_valid = key.verify_mac(msg, request.get("Mac"), algorithm) + + return VerifyMacResponse( + KeyId=key.metadata.get("Arn"), MacValid=mac_valid, MacAlgorithm=algorithm + ) + + @handler("Sign", expand=False) + def sign(self, context: RequestContext, request: SignRequest) -> SignResponse: + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id) + + self._validate_key_for_sign_verify(context, key) + + # TODO Add constraints on KeySpec / SigningAlgorithm pairs: + # https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-ecc + + signing_algorithm = request.get("SigningAlgorithm") + signature = key.sign(request.get("Message"), request.get("MessageType"), signing_algorithm) + + result = { + "KeyId": key.metadata["Arn"], + "Signature": signature, + "SigningAlgorithm": signing_algorithm, + } + return SignResponse(**result) + + # Currently LocalStack only calculates SHA256 digests no matter what the signing algorithm is. + @handler("Verify", expand=False) + def verify(self, context: RequestContext, request: VerifyRequest) -> VerifyResponse: + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id) + + self._validate_key_for_sign_verify(context, key) + + signing_algorithm = request.get("SigningAlgorithm") + is_signature_valid = key.verify( + request.get("Message"), + request.get("MessageType"), + signing_algorithm, + request.get("Signature"), + ) + + result = { + "KeyId": key.metadata["Arn"], + "SignatureValid": is_signature_valid, + "SigningAlgorithm": signing_algorithm, + } + return VerifyResponse(**result) + + def re_encrypt( + self, + context: RequestContext, + ciphertext_blob: CiphertextType, + destination_key_id: KeyIdType, + source_encryption_context: EncryptionContextType = None, + source_key_id: KeyIdType = None, + destination_encryption_context: EncryptionContextType = None, + source_encryption_algorithm: EncryptionAlgorithmSpec = None, + destination_encryption_algorithm: EncryptionAlgorithmSpec = None, + grant_tokens: GrantTokenList = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> ReEncryptResponse: + # TODO: when implementing, ensure cross-account support for source_key_id and destination_key_id + raise NotImplementedError + + def encrypt( + self, + context: RequestContext, + key_id: KeyIdType, + plaintext: PlaintextType, + encryption_context: EncryptionContextType = None, + grant_tokens: GrantTokenList = None, + encryption_algorithm: EncryptionAlgorithmSpec = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> EncryptResponse: + # TODO add support for "dry_run" + account_id, region_name, key_id = self._parse_key_id(key_id, context) + key = self._get_kms_key(account_id, region_name, key_id) + self._validate_plaintext_length(plaintext) + self._validate_plaintext_key_type_based(plaintext, key, encryption_algorithm) + self._validate_key_for_encryption_decryption(context, key) + self._validate_key_state_not_pending_import(key) + + ciphertext_blob = key.encrypt(plaintext, encryption_context) + # For compatibility, we return EncryptionAlgorithm values expected from AWS. But LocalStack currently always + # encrypts with symmetric encryption no matter the key settings. + return EncryptResponse( + CiphertextBlob=ciphertext_blob, + KeyId=key.metadata.get("Arn"), + EncryptionAlgorithm=encryption_algorithm, + ) + + # TODO We currently do not even check encryption_context, while moto does. Should add the corresponding logic later. + def decrypt( + self, + context: RequestContext, + ciphertext_blob: CiphertextType, + encryption_context: EncryptionContextType = None, + grant_tokens: GrantTokenList = None, + key_id: KeyIdType = None, + encryption_algorithm: EncryptionAlgorithmSpec = None, + recipient: RecipientInfo = None, + dry_run: NullableBooleanType = None, + **kwargs, + ) -> DecryptResponse: + # In AWS, key_id is only supplied for data encrypted with an asymmetrical algorithm. For symmetrical + # encryption, key_id is taken from the encrypted data itself. + # Since LocalStack doesn't currently do asymmetrical encryption, there is a question of modeling here: we + # currently expect data to be only encrypted with symmetric encryption, so having key_id inside. It might not + # always be what customers expect. + if key_id: + account_id, region_name, key_id = self._parse_key_id(key_id, context) + try: + ciphertext = deserialize_ciphertext_blob(ciphertext_blob=ciphertext_blob) + except Exception: + ciphertext = None + pass + else: + try: + ciphertext = deserialize_ciphertext_blob(ciphertext_blob=ciphertext_blob) + account_id, region_name, key_id = self._parse_key_id(ciphertext.key_id, context) + except Exception: + raise InvalidCiphertextException( + "LocalStack is unable to deserialize the ciphertext blob. Perhaps the " + "blob didn't come from LocalStack" + ) + + key = self._get_kms_key(account_id, region_name, key_id) + if ciphertext and key.metadata["KeyId"] != ciphertext.key_id: + raise IncorrectKeyException( + "The key ID in the request does not identify a CMK that can perform this operation." + ) + + self._validate_key_for_encryption_decryption(context, key) + self._validate_key_state_not_pending_import(key) + + try: + # TODO: Extend the implementation to handle additional encryption/decryption scenarios + # beyond the current support for offline encryption and online decryption using RSA keys if key id exists in + # parameters, where `ciphertext_blob` will not be deserializable. + if self._is_rsa_spec(key.crypto_key.key_spec) and not ciphertext: + plaintext = key.decrypt_rsa(ciphertext_blob) + else: + plaintext = key.decrypt(ciphertext, encryption_context) + except InvalidTag: + raise InvalidCiphertextException() + # For compatibility, we return EncryptionAlgorithm values expected from AWS. But LocalStack currently always + # encrypts with symmetric encryption no matter the key settings. + # + # We return a key ARN instead of KeyId despite the name of the parameter, as this is what AWS does and states + # in its docs. + # TODO add support for "recipient" + # https://docs.aws.amazon.com/kms/latest/APIReference/API_Decrypt.html#API_Decrypt_RequestSyntax + # TODO add support for "dry_run" + return DecryptResponse( + KeyId=key.metadata.get("Arn"), + Plaintext=plaintext, + EncryptionAlgorithm=encryption_algorithm, + ) + + def get_parameters_for_import( + self, + context: RequestContext, + key_id: KeyIdType, + wrapping_algorithm: AlgorithmSpec, + wrapping_key_spec: WrappingKeySpec, + **kwargs, + ) -> GetParametersForImportResponse: + store = self._get_store(context.account_id, context.region) + # KeyId can potentially hold one of multiple different types of key identifiers. get_key finds a key no + # matter which type of id is used. + key_to_import_material_to = self._get_kms_key( + context.account_id, + context.region, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key_arn = key_to_import_material_to.metadata["Arn"] + key_origin = key_to_import_material_to.metadata.get("Origin") + + if key_origin != "EXTERNAL": + raise UnsupportedOperationException( + f"{key_arn} origin is {key_origin} which is not valid for this operation." + ) + + key_id = key_to_import_material_to.metadata["KeyId"] + + key = KmsKey(CreateKeyRequest(KeySpec=wrapping_key_spec)) + import_token = short_uid() + import_state = KeyImportState( + key_id=key_id, import_token=import_token, wrapping_algo=wrapping_algorithm, key=key + ) + store.imports[import_token] = import_state + # https://docs.aws.amazon.com/kms/latest/APIReference/API_GetParametersForImport.html + # "To import key material, you must use the public key and import token from the same response. These items + # are valid for 24 hours." + expiry_date = datetime.datetime.now() + datetime.timedelta(days=100) + return GetParametersForImportResponse( + KeyId=key_to_import_material_to.metadata["Arn"], + ImportToken=to_bytes(import_state.import_token), + PublicKey=import_state.key.crypto_key.public_key, + ParametersValidTo=expiry_date, + ) + + def import_key_material( + self, + context: RequestContext, + key_id: KeyIdType, + import_token: CiphertextType, + encrypted_key_material: CiphertextType, + valid_to: DateType = None, + expiration_model: ExpirationModelType = None, + **kwargs, + ) -> ImportKeyMaterialResponse: + store = self._get_store(context.account_id, context.region) + import_token = to_str(import_token) + import_state = store.imports.get(import_token) + if not import_state: + raise NotFoundException(f"Unable to find key import token '{import_token}'") + key_to_import_material_to = self._get_kms_key( + context.account_id, + context.region, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + + if import_state.wrapping_algo == AlgorithmSpec.RSAES_PKCS1_V1_5: + decrypt_padding = padding.PKCS1v15() + elif import_state.wrapping_algo == AlgorithmSpec.RSAES_OAEP_SHA_1: + decrypt_padding = padding.OAEP(padding.MGF1(hashes.SHA1()), hashes.SHA1(), None) + elif import_state.wrapping_algo == AlgorithmSpec.RSAES_OAEP_SHA_256: + decrypt_padding = padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None) + else: + raise KMSInvalidStateException( + f"Unsupported padding, requested wrapping algorithm:'{import_state.wrapping_algo}'" + ) + + # TODO check if there was already a key imported for this kms key + # if so, it has to be identical. We cannot change keys by reimporting after deletion/expiry + key_material = import_state.key.crypto_key.key.decrypt( + encrypted_key_material, decrypt_padding + ) + if expiration_model: + key_to_import_material_to.metadata["ExpirationModel"] = expiration_model + else: + key_to_import_material_to.metadata["ExpirationModel"] = ( + ExpirationModelType.KEY_MATERIAL_EXPIRES + ) + if ( + key_to_import_material_to.metadata["ExpirationModel"] + == ExpirationModelType.KEY_MATERIAL_EXPIRES + and not valid_to + ): + raise ValidationException( + "A validTo date must be set if the ExpirationModel is KEY_MATERIAL_EXPIRES" + ) + # TODO actually set validTo and make the key expire + key_to_import_material_to.metadata["Enabled"] = True + key_to_import_material_to.metadata["KeyState"] = KeyState.Enabled + key_to_import_material_to.crypto_key.load_key_material(key_material) + + return ImportKeyMaterialResponse() + + def delete_imported_key_material( + self, context: RequestContext, key_id: KeyIdType, **kwargs + ) -> None: + key = self._get_kms_key( + context.account_id, + context.region, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.crypto_key.key_material = None + key.metadata["Enabled"] = False + key.metadata["KeyState"] = KeyState.PendingImport + key.metadata.pop("ExpirationModel", None) + + @handler("CreateAlias", expand=False) + def create_alias(self, context: RequestContext, request: CreateAliasRequest) -> None: + store = self._get_store(context.account_id, context.region) + alias_name = request["AliasName"] + validate_alias_name(alias_name) + if alias_name in store.aliases: + alias_arn = store.aliases.get(alias_name).metadata["AliasArn"] + # AWS itself uses AliasArn instead of AliasName in this exception. + raise AlreadyExistsException(f"An alias with the name {alias_arn} already exists") + # KeyId can potentially hold one of multiple different types of key identifiers. Here we find a key no + # matter which type of id is used. + key = self._get_kms_key( + context.account_id, + context.region, + request.get("TargetKeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + request["TargetKeyId"] = key.metadata.get("KeyId") + self._create_kms_alias(context.account_id, context.region, request) + + @handler("DeleteAlias", expand=False) + def delete_alias(self, context: RequestContext, request: DeleteAliasRequest) -> None: + # We do not check the state of the key, as, according to AWS docs, all key states, that are possible in + # LocalStack, are supported by this operation. + store = self._get_store(context.account_id, context.region) + alias_name = request["AliasName"] + if alias_name not in store.aliases: + alias_arn = kms_alias_arn(request["AliasName"], context.account_id, context.region) + # AWS itself uses AliasArn instead of AliasName in this exception. + raise NotFoundException(f"Alias {alias_arn} is not found") + store.aliases.pop(alias_name, None) + + @handler("UpdateAlias", expand=False) + def update_alias(self, context: RequestContext, request: UpdateAliasRequest) -> None: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # "If the source KMS key is pending deletion, the command succeeds. If the destination KMS key is pending + # deletion, the command fails with error: KMSInvalidStateException : is pending deletion." + # Also disabled keys are accepted for this operation (see the table on that page). + # + # As such, we do not care about the state of the source key, but check the destination one. + + alias_name = request["AliasName"] + # This API, per AWS docs, accepts only names, not ARNs. + validate_alias_name(alias_name) + alias = self._get_kms_alias(context.account_id, context.region, alias_name) + key_id = request["TargetKeyId"] + # Don't care about the key itself, just want to validate its state. + self._get_kms_key( + context.account_id, + context.region, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + alias.metadata["TargetKeyId"] = key_id + alias.update_date_of_last_update() + + @handler("ListAliases") + def list_aliases( + self, + context: RequestContext, + key_id: KeyIdType = None, + limit: LimitType = None, + marker: MarkerType = None, + **kwargs, + ) -> ListAliasesResponse: + store = self._get_store(context.account_id, context.region) + if key_id: + # KeyId can potentially hold one of multiple different types of key identifiers. Here we find a key no + # matter which type of id is used. + key = self._get_kms_key( + context.account_id, context.region, key_id, any_key_state_allowed=True + ) + key_id = key.metadata.get("KeyId") + + matching_aliases = [] + for alias in store.aliases.values(): + if key_id and alias.metadata["TargetKeyId"] != key_id: + continue + matching_aliases.append(alias.metadata) + aliases_list = PaginatedList(matching_aliases) + limit = limit or 100 + page, next_token = aliases_list.get_page( + lambda alias_metadata: alias_metadata.get("AliasName"), + next_token=marker, + page_size=limit, + ) + kwargs = {"NextMarker": next_token, "Truncated": True} if next_token else {} + return ListAliasesResponse(Aliases=page, **kwargs) + + @handler("GetKeyRotationStatus", expand=False) + def get_key_rotation_status( + self, context: RequestContext, request: GetKeyRotationStatusRequest + ) -> GetKeyRotationStatusResponse: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # "If the KMS key has imported key material or is in a custom key store: UnsupportedOperationException." + # We do not model that here, though. + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id, any_key_state_allowed=True) + + response = GetKeyRotationStatusResponse( + KeyId=key_id, + KeyRotationEnabled=key.is_key_rotation_enabled, + NextRotationDate=key.next_rotation_date, + ) + if key.is_key_rotation_enabled: + response["RotationPeriodInDays"] = key.rotation_period_in_days + + return response + + @handler("DisableKeyRotation", expand=False) + def disable_key_rotation( + self, context: RequestContext, request: DisableKeyRotationRequest + ) -> None: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # "If the KMS key has imported key material or is in a custom key store: UnsupportedOperationException." + # We do not model that here, though. + key = self._get_kms_key(context.account_id, context.region, request.get("KeyId")) + key.is_key_rotation_enabled = False + + @handler("EnableKeyRotation", expand=False) + def enable_key_rotation( + self, context: RequestContext, request: EnableKeyRotationRequest + ) -> None: + # https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # "If the KMS key has imported key material or is in a custom key store: UnsupportedOperationException." + # We do not model that here, though. + key = self._get_kms_key(context.account_id, context.region, request.get("KeyId")) + key.is_key_rotation_enabled = True + if request.get("RotationPeriodInDays"): + key.rotation_period_in_days = request.get("RotationPeriodInDays") + key._update_key_rotation_date() + + @handler("ListKeyPolicies", expand=False) + def list_key_policies( + self, context: RequestContext, request: ListKeyPoliciesRequest + ) -> ListKeyPoliciesResponse: + # We just care if the key exists. The response, by AWS specifications, is the same for all keys, as the only + # supported policy is "default": + # https://docs.aws.amazon.com/kms/latest/APIReference/API_ListKeyPolicies.html#API_ListKeyPolicies_ResponseElements + self._get_kms_key( + context.account_id, context.region, request.get("KeyId"), any_key_state_allowed=True + ) + return ListKeyPoliciesResponse(PolicyNames=["default"], Truncated=False) + + @handler("PutKeyPolicy", expand=False) + def put_key_policy(self, context: RequestContext, request: PutKeyPolicyRequest) -> None: + key = self._get_kms_key( + context.account_id, context.region, request.get("KeyId"), any_key_state_allowed=True + ) + if request.get("PolicyName") != "default": + raise UnsupportedOperationException("Only default policy is supported") + key.policy = request.get("Policy") + + @handler("GetKeyPolicy", expand=False) + def get_key_policy( + self, context: RequestContext, request: GetKeyPolicyRequest + ) -> GetKeyPolicyResponse: + key = self._get_kms_key( + context.account_id, context.region, request.get("KeyId"), any_key_state_allowed=True + ) + if request.get("PolicyName") != "default": + raise NotFoundException("No such policy exists") + return GetKeyPolicyResponse(Policy=key.policy) + + @handler("ListResourceTags", expand=False) + def list_resource_tags( + self, context: RequestContext, request: ListResourceTagsRequest + ) -> ListResourceTagsResponse: + key = self._get_kms_key( + context.account_id, context.region, request.get("KeyId"), any_key_state_allowed=True + ) + keys_list = PaginatedList( + [{"TagKey": tag_key, "TagValue": tag_value} for tag_key, tag_value in key.tags.items()] + ) + page, next_token = keys_list.get_page( + lambda tag: tag.get("TagKey"), + next_token=request.get("Marker"), + page_size=request.get("Limit", 50), + ) + kwargs = {"NextMarker": next_token, "Truncated": True} if next_token else {} + return ListResourceTagsResponse(Tags=page, **kwargs) + + @handler("RotateKeyOnDemand", expand=False) + # TODO: return the key rotations in the ListKeyRotations operation + def rotate_key_on_demand( + self, context: RequestContext, request: RotateKeyOnDemandRequest + ) -> RotateKeyOnDemandResponse: + account_id, region_name, key_id = self._parse_key_id(request["KeyId"], context) + key = self._get_kms_key(account_id, region_name, key_id) + + if key.metadata["KeySpec"] != KeySpec.SYMMETRIC_DEFAULT: + raise UnsupportedOperationException() + if key.metadata["Origin"] == OriginType.EXTERNAL: + raise UnsupportedOperationException( + f"{key.metadata['Arn']} origin is EXTERNAL which is not valid for this operation." + ) + + key.rotate_key_on_demand() + + return RotateKeyOnDemandResponse( + KeyId=key_id, + ) + + @handler("TagResource", expand=False) + def tag_resource(self, context: RequestContext, request: TagResourceRequest) -> None: + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key.add_tags(request.get("Tags")) + + @handler("UntagResource", expand=False) + def untag_resource(self, context: RequestContext, request: UntagResourceRequest) -> None: + key = self._get_kms_key( + context.account_id, + context.region, + request.get("KeyId"), + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + if not request.get("TagKeys"): + return + for tag_key in request.get("TagKeys"): + # AWS doesn't seem to mind removal of a non-existent tag, so we do not raise any exception. + key.tags.pop(tag_key, None) + + def derive_shared_secret( + self, + context: RequestContext, + key_id: KeyIdType, + key_agreement_algorithm: KeyAgreementAlgorithmSpec, + public_key: PublicKeyType, + grant_tokens: GrantTokenList = None, + dry_run: NullableBooleanType = None, + recipient: RecipientInfo = None, + **kwargs, + ) -> DeriveSharedSecretResponse: + key = self._get_kms_key( + context.account_id, + context.region, + key_id, + enabled_key_allowed=True, + disabled_key_allowed=True, + ) + key_usage = key.metadata.get("KeyUsage") + key_origin = key.metadata.get("Origin") + + if key_usage != KeyUsageType.KEY_AGREEMENT: + raise InvalidKeyUsageException( + f"{key.metadata['Arn']} key usage is {key_usage} which is not valid for {context.operation.name}." + ) + + if key_agreement_algorithm != KeyAgreementAlgorithmSpec.ECDH: + raise ValidationException( + f"1 validation error detected: Value '{key_agreement_algorithm}' at 'keyAgreementAlgorithm' " + f"failed to satisfy constraint: Member must satisfy enum value set: [ECDH]" + ) + + # TODO: Verify the actual error raised + if key_origin not in [OriginType.AWS_KMS, OriginType.EXTERNAL]: + raise ValueError(f"Key origin: {key_origin} is not valid for {context.operation.name}.") + + shared_secret = key.derive_shared_secret(public_key) + return DeriveSharedSecretResponse( + KeyId=key_id, + SharedSecret=shared_secret, + KeyAgreementAlgorithm=key_agreement_algorithm, + KeyOrigin=key_origin, + ) + + def _validate_key_state_not_pending_import(self, key: KmsKey): + if key.metadata["KeyState"] == KeyState.PendingImport: + raise KMSInvalidStateException(f"{key.metadata['Arn']} is pending import.") + + def _validate_key_for_encryption_decryption(self, context: RequestContext, key: KmsKey): + key_usage = key.metadata["KeyUsage"] + if key_usage != "ENCRYPT_DECRYPT": + raise InvalidKeyUsageException( + f"{key.metadata['Arn']} key usage is {key_usage} which is not valid for {context.operation.name}." + ) + + def _validate_key_for_sign_verify(self, context: RequestContext, key: KmsKey): + key_usage = key.metadata["KeyUsage"] + if key_usage != "SIGN_VERIFY": + raise InvalidKeyUsageException( + f"{key.metadata['Arn']} key usage is {key_usage} which is not valid for {context.operation.name}." + ) + + def _validate_key_for_generate_verify_mac(self, context: RequestContext, key: KmsKey): + key_usage = key.metadata["KeyUsage"] + if key_usage != "GENERATE_VERIFY_MAC": + raise InvalidKeyUsageException( + f"{key.metadata['Arn']} key usage is {key_usage} which is not valid for {context.operation.name}." + ) + + def _validate_mac_msg_length(self, msg: bytes): + if len(msg) > 4096: + raise ValidationException( + "1 validation error detected: Value at 'message' failed to satisfy constraint: " + "Member must have length less than or equal to 4096" + ) + + def _validate_mac_algorithm(self, key: KmsKey, algorithm: str): + if not hasattr(MacAlgorithmSpec, algorithm): + raise ValidationException( + f"1 validation error detected: Value '{algorithm}' at 'macAlgorithm' " + f"failed to satisfy constraint: Member must satisfy enum value set: " + f"[HMAC_SHA_384, HMAC_SHA_256, HMAC_SHA_224, HMAC_SHA_512]" + ) + + key_spec = key.metadata["KeySpec"] + if x := algorithm.split("_"): + if len(x) == 3 and x[0] + "_" + x[2] != key_spec: + raise InvalidKeyUsageException( + f"Algorithm {algorithm} is incompatible with key spec {key_spec}." + ) + + def _validate_plaintext_length(self, plaintext: bytes): + if len(plaintext) > 4096: + raise ValidationException( + "1 validation error detected: Value at 'plaintext' failed to satisfy constraint: " + "Member must have length less than or equal to 4096" + ) + + def _validate_grant_request(self, data: Dict): + if "KeyId" not in data or "GranteePrincipal" not in data or "Operations" not in data: + raise ValidationError("Grant ID, key ID and grantee principal must be specified") + + for operation in data["Operations"]: + if operation not in VALID_OPERATIONS: + raise ValidationError( + f"Value {['Operations']} at 'operations' failed to satisfy constraint: Member must satisfy" + f" constraint: [Member must satisfy enum value set: {VALID_OPERATIONS}]" + ) + + def _validate_plaintext_key_type_based( + self, + plaintext: PlaintextType, + key: KmsKey, + encryption_algorithm: EncryptionAlgorithmSpec = None, + ): + # max size values extracted from AWS boto3 documentation + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms/client/encrypt.html + max_size_bytes = 4096 # max allowed size + if ( + key.metadata["KeySpec"] == KeySpec.RSA_2048 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_1 + ): + max_size_bytes = 214 + elif ( + key.metadata["KeySpec"] == KeySpec.RSA_2048 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_256 + ): + max_size_bytes = 190 + elif ( + key.metadata["KeySpec"] == KeySpec.RSA_3072 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_1 + ): + max_size_bytes = 342 + elif ( + key.metadata["KeySpec"] == KeySpec.RSA_3072 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_256 + ): + max_size_bytes = 318 + elif ( + key.metadata["KeySpec"] == KeySpec.RSA_4096 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_1 + ): + max_size_bytes = 470 + elif ( + key.metadata["KeySpec"] == KeySpec.RSA_4096 + and encryption_algorithm == EncryptionAlgorithmSpec.RSAES_OAEP_SHA_256 + ): + max_size_bytes = 446 + + if len(plaintext) > max_size_bytes: + raise ValidationException( + f"Algorithm {encryption_algorithm} and key spec {key.metadata['KeySpec']} cannot encrypt data larger than {max_size_bytes} bytes." + ) + + +# --------------- +# UTIL FUNCTIONS +# --------------- + +# Different AWS services have some internal integrations with KMS. Some create keys, that are used to encrypt/decrypt +# customer's data. Such keys can't be created from outside for security reasons. So AWS services use some internal +# APIs to do that. Functions here are supposed to be used by other LocalStack services to have similar integrations +# with KMS in LocalStack. As such, they are supposed to be proper APIs (as in error and security handling), +# just with more features. + + +def set_key_managed(key_id: str, account_id: str, region_name: str) -> None: + key = KmsProvider._get_kms_key(account_id, region_name, key_id) + key.metadata["KeyManager"] = "AWS" diff --git a/localstack/services/stepfunctions/asl/component/common/retry/__init__.py b/localstack-core/localstack/services/kms/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/retry/__init__.py rename to localstack-core/localstack/services/kms/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.py b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.py new file mode 100644 index 0000000000000..81ecef65ca520 --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.py @@ -0,0 +1,105 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class KMSAliasProperties(TypedDict): + AliasName: Optional[str] + TargetKeyId: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class KMSAliasProvider(ResourceProvider[KMSAliasProperties]): + TYPE = "AWS::KMS::Alias" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[KMSAliasProperties], + ) -> ProgressEvent[KMSAliasProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/AliasName + + Required properties: + - AliasName + - TargetKeyId + + Create-only properties: + - /properties/AliasName + + + + IAM permissions required: + - kms:CreateAlias + + """ + model = request.desired_state + kms = request.aws_client_factory.kms + + kms.create_alias(AliasName=model["AliasName"], TargetKeyId=model["TargetKeyId"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[KMSAliasProperties], + ) -> ProgressEvent[KMSAliasProperties]: + """ + Fetch resource information + + IAM permissions required: + - kms:ListAliases + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[KMSAliasProperties], + ) -> ProgressEvent[KMSAliasProperties]: + """ + Delete a resource + + IAM permissions required: + - kms:DeleteAlias + """ + model = request.desired_state + kms = request.aws_client_factory.kms + + kms.delete_alias(AliasName=model["AliasName"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[KMSAliasProperties], + ) -> ProgressEvent[KMSAliasProperties]: + """ + Update a resource + + IAM permissions required: + - kms:UpdateAlias + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.schema.json b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.schema.json new file mode 100644 index 0000000000000..e3eb5a1591f1d --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias.schema.json @@ -0,0 +1,61 @@ +{ + "typeName": "AWS::KMS::Alias", + "description": "The AWS::KMS::Alias resource specifies a display name for an AWS KMS key in AWS Key Management Service (AWS KMS). You can use an alias to identify an AWS KMS key in cryptographic operations.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "properties": { + "AliasName": { + "description": "Specifies the alias name. This value must begin with alias/ followed by a name, such as alias/ExampleAlias. The alias name cannot begin with alias/aws/. The alias/aws/ prefix is reserved for AWS managed keys.", + "type": "string", + "pattern": "^(alias/)[a-zA-Z0-9:/_-]+$", + "minLength": 1, + "maxLength": 256 + }, + "TargetKeyId": { + "description": "Identifies the AWS KMS key to which the alias refers. Specify the key ID or the Amazon Resource Name (ARN) of the AWS KMS key. You cannot specify another alias. For help finding the key ID and ARN, see Finding the Key ID and ARN in the AWS Key Management Service Developer Guide.", + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "additionalProperties": false, + "required": [ + "AliasName", + "TargetKeyId" + ], + "createOnlyProperties": [ + "/properties/AliasName" + ], + "primaryIdentifier": [ + "/properties/AliasName" + ], + "tagging": { + "taggable": false + }, + "handlers": { + "create": { + "permissions": [ + "kms:CreateAlias" + ] + }, + "read": { + "permissions": [ + "kms:ListAliases" + ] + }, + "update": { + "permissions": [ + "kms:UpdateAlias" + ] + }, + "delete": { + "permissions": [ + "kms:DeleteAlias" + ] + }, + "list": { + "permissions": [ + "kms:ListAliases" + ] + } + } +} diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias_plugin.py b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias_plugin.py new file mode 100644 index 0000000000000..172d4915576ce --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_alias_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class KMSAliasProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::KMS::Alias" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.kms.resource_providers.aws_kms_alias import KMSAliasProvider + + self.factory = KMSAliasProvider diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.py b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.py new file mode 100644 index 0000000000000..6228292ed2953 --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.py @@ -0,0 +1,190 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class KMSKeyProperties(TypedDict): + KeyPolicy: Optional[dict | str] + Arn: Optional[str] + Description: Optional[str] + EnableKeyRotation: Optional[bool] + Enabled: Optional[bool] + KeyId: Optional[str] + KeySpec: Optional[str] + KeyUsage: Optional[str] + MultiRegion: Optional[bool] + PendingWindowInDays: Optional[int] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class KMSKeyProvider(ResourceProvider[KMSKeyProperties]): + TYPE = "AWS::KMS::Key" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[KMSKeyProperties], + ) -> ProgressEvent[KMSKeyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/KeyId + + Required properties: + - KeyPolicy + + + + Read-only properties: + - /properties/Arn + - /properties/KeyId + + IAM permissions required: + - kms:CreateKey + - kms:EnableKeyRotation + - kms:DisableKey + - kms:TagResource + + """ + model = request.desired_state + kms = request.aws_client_factory.kms + + params = util.select_attributes(model, ["Description", "KeySpec", "KeyUsage"]) + + if model.get("KeyPolicy"): + params["Policy"] = json.dumps(model["KeyPolicy"]) + + if model.get("Tags"): + params["Tags"] = [ + {"TagKey": tag["Key"], "TagValue": tag["Value"]} for tag in model.get("Tags", []) + ] + response = kms.create_key(**params) + model["KeyId"] = response["KeyMetadata"]["KeyId"] + model["Arn"] = response["KeyMetadata"]["Arn"] + + # key is created but some fields map to separate api calls + if model.get("EnableKeyRotation", False): + kms.enable_key_rotation(KeyId=model["KeyId"]) + else: + kms.disable_key_rotation(KeyId=model["KeyId"]) + + if model.get("Enabled", True): + kms.enable_key(KeyId=model["KeyId"]) + else: + kms.disable_key(KeyId=model["KeyId"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[KMSKeyProperties], + ) -> ProgressEvent[KMSKeyProperties]: + """ + Fetch resource information + + IAM permissions required: + - kms:DescribeKey + - kms:GetKeyPolicy + - kms:GetKeyRotationStatus + - kms:ListResourceTags + """ + kms = request.aws_client_factory.kms + key_id = request.desired_state["KeyId"] + + key = kms.describe_key(KeyId=key_id) + + policy = kms.get_key_policy(KeyId=key_id, PolicyName="default") + rotation_status = kms.get_key_rotation_status(KeyId=key_id) + tags = kms.list_resource_tags(KeyId=key_id) + + model = util.select_attributes(key["KeyMetadata"], self.SCHEMA["properties"]) + model["KeyPolicy"] = json.loads(policy["Policy"]) + model["EnableKeyRotation"] = rotation_status["KeyRotationEnabled"] + # Super consistent api... KMS api does return TagKey/TagValue, but the CC api transforms it to Key/Value + # It migth be worth noting if there are more apis for which CC does it again + model["Tags"] = [{"Key": tag["TagKey"], "Value": tag["TagValue"]} for tag in tags["Tags"]] + + if "Origin" not in model: + model["Origin"] = "AWS_KMS" + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def delete( + self, + request: ResourceRequest[KMSKeyProperties], + ) -> ProgressEvent[KMSKeyProperties]: + """ + Delete a resource + + IAM permissions required: + - kms:DescribeKey + - kms:ScheduleKeyDeletion + """ + model = request.desired_state + kms = request.aws_client_factory.kms + + kms.schedule_key_deletion(KeyId=model["KeyId"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[KMSKeyProperties], + ) -> ProgressEvent[KMSKeyProperties]: + """ + Update a resource + + IAM permissions required: + - kms:DescribeKey + - kms:DisableKey + - kms:DisableKeyRotation + - kms:EnableKey + - kms:EnableKeyRotation + - kms:PutKeyPolicy + - kms:TagResource + - kms:UntagResource + - kms:UpdateKeyDescription + """ + raise NotImplementedError + + def list(self, request: ResourceRequest[KMSKeyProperties]) -> ProgressEvent[KMSKeyProperties]: + """ + List a resource + + IAM permissions required: + - kms:ListKeys + - kms:DescribeKey + """ + kms = request.aws_client_factory.kms + + response = kms.list_keys(Limit=10) + models = [{"KeyId": key["KeyId"]} for key in response["Keys"]] + return ProgressEvent(status=OperationStatus.SUCCESS, resource_models=models) diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.schema.json b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.schema.json new file mode 100644 index 0000000000000..782d35fa134ac --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key.schema.json @@ -0,0 +1,172 @@ +{ + "typeName": "AWS::KMS::Key", + "description": "The AWS::KMS::Key resource specifies an AWS KMS key in AWS Key Management Service (AWS KMS). Authorized users can use the AWS KMS key to encrypt and decrypt small amounts of data (up to 4096 bytes), but they are more commonly used to generate data keys. You can also use AWS KMS keys to encrypt data stored in AWS services that are integrated with AWS KMS or within their applications.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-kms", + "definitions": { + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 0, + "maxLength": 256 + } + }, + "additionalProperties": false, + "required": [ + "Key", + "Value" + ] + } + }, + "properties": { + "Description": { + "description": "A description of the AWS KMS key. Use a description that helps you to distinguish this AWS KMS key from others in the account, such as its intended use.", + "type": "string", + "minLength": 0, + "maxLength": 8192 + }, + "Enabled": { + "description": "Specifies whether the AWS KMS key is enabled. Disabled AWS KMS keys cannot be used in cryptographic operations.", + "type": "boolean" + }, + "EnableKeyRotation": { + "description": "Enables automatic rotation of the key material for the specified AWS KMS key. By default, automation key rotation is not enabled.", + "type": "boolean" + }, + "KeyPolicy": { + "description": "The key policy that authorizes use of the AWS KMS key. The key policy must observe the following rules.", + "type": [ + "object", + "string" + ] + }, + "KeyUsage": { + "description": "Determines the cryptographic operations for which you can use the AWS KMS key. The default value is ENCRYPT_DECRYPT. This property is required only for asymmetric AWS KMS keys. You can't change the KeyUsage value after the AWS KMS key is created.", + "type": "string", + "default": "ENCRYPT_DECRYPT", + "enum": [ + "ENCRYPT_DECRYPT", + "SIGN_VERIFY", + "GENERATE_VERIFY_MAC" + ] + }, + "KeySpec": { + "description": "Specifies the type of AWS KMS key to create. The default value is SYMMETRIC_DEFAULT. This property is required only for asymmetric AWS KMS keys. You can't change the KeySpec value after the AWS KMS key is created.", + "type": "string", + "default": "SYMMETRIC_DEFAULT", + "enum": [ + "SYMMETRIC_DEFAULT", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2" + ] + }, + "MultiRegion": { + "description": "Specifies whether the AWS KMS key should be Multi-Region. You can't change the MultiRegion value after the AWS KMS key is created.", + "type": "boolean", + "default": false + }, + "PendingWindowInDays": { + "description": "Specifies the number of days in the waiting period before AWS KMS deletes an AWS KMS key that has been removed from a CloudFormation stack. Enter a value between 7 and 30 days. The default value is 30 days.", + "type": "integer", + "minimum": 7, + "maximum": 30 + }, + "Tags": { + "description": "An array of key-value pairs to apply to this resource.", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Arn": { + "type": "string" + }, + "KeyId": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "KeyPolicy" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/KeyId" + ], + "primaryIdentifier": [ + "/properties/KeyId" + ], + "writeOnlyProperties": [ + "/properties/PendingWindowInDays" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "kms:CreateKey", + "kms:EnableKeyRotation", + "kms:DisableKey", + "kms:TagResource" + ] + }, + "read": { + "permissions": [ + "kms:DescribeKey", + "kms:GetKeyPolicy", + "kms:GetKeyRotationStatus", + "kms:ListResourceTags" + ] + }, + "update": { + "permissions": [ + "kms:DescribeKey", + "kms:DisableKey", + "kms:DisableKeyRotation", + "kms:EnableKey", + "kms:EnableKeyRotation", + "kms:PutKeyPolicy", + "kms:TagResource", + "kms:UntagResource", + "kms:UpdateKeyDescription" + ] + }, + "delete": { + "permissions": [ + "kms:DescribeKey", + "kms:ScheduleKeyDeletion" + ] + }, + "list": { + "permissions": [ + "kms:ListKeys", + "kms:DescribeKey" + ] + } + } +} diff --git a/localstack-core/localstack/services/kms/resource_providers/aws_kms_key_plugin.py b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key_plugin.py new file mode 100644 index 0000000000000..a03c3c714af8c --- /dev/null +++ b/localstack-core/localstack/services/kms/resource_providers/aws_kms_key_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class KMSKeyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::KMS::Key" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.kms.resource_providers.aws_kms_key import KMSKeyProvider + + self.factory = KMSKeyProvider diff --git a/localstack-core/localstack/services/kms/utils.py b/localstack-core/localstack/services/kms/utils.py new file mode 100644 index 0000000000000..ae9ff4580caa1 --- /dev/null +++ b/localstack-core/localstack/services/kms/utils.py @@ -0,0 +1,87 @@ +import re +from typing import Callable, Tuple, TypeVar + +from localstack.aws.api.kms import DryRunOperationException, Tag, TagException +from localstack.services.kms.exceptions import ValidationException +from localstack.utils.aws.arns import ARN_PARTITION_REGEX + +T = TypeVar("T") + +KMS_KEY_ARN_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:kms:(?P[^:]+):(?P\d{{12}}):key\/(?P[^:]+)$" +) + + +def get_hash_algorithm(signing_algorithm: str) -> str: + """ + Return the hashing algorithm for a given signing algorithm. + eg. "RSASSA_PSS_SHA_512" -> "SHA_512" + """ + return "_".join(signing_algorithm.rsplit(sep="_", maxsplit=-2)[-2:]) + + +def parse_key_arn(key_arn: str) -> Tuple[str, str, str]: + """ + Parse a valid KMS key arn into its constituents. + + :param key_arn: KMS key ARN + :return: Tuple of account ID, region name and key ID + """ + return KMS_KEY_ARN_PATTERN.match(key_arn).group("account_id", "region_name", "key_id") + + +def is_valid_key_arn(key_arn: str) -> bool: + """ + Check if a given string is a valid KMS key ARN. + """ + return KMS_KEY_ARN_PATTERN.match(key_arn) is not None + + +def validate_alias_name(alias_name: str) -> None: + if not alias_name.startswith("alias/"): + raise ValidationException( + 'Alias must start with the prefix "alias/". Please see ' + "https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html" + ) + + +def validate_tag(tag_position: int, tag: Tag) -> None: + tag_key = tag.get("TagKey") + tag_value = tag.get("TagValue") + + if len(tag_key) > 128: + raise ValidationException( + f"1 validation error detected: Value '{tag_key}' at 'tags.{tag_position}.member.tagKey' failed to satisfy constraint: Member must have length less than or equal to 128" + ) + if len(tag_value) > 256: + raise ValidationException( + f"1 validation error detected: Value '{tag_value}' at 'tags.{tag_position}.member.tagValue' failed to satisfy constraint: Member must have length less than or equal to 256" + ) + + if tag_key.lower().startswith("aws:"): + raise TagException("Tags beginning with aws: are reserved") + + +def execute_dry_run_capable(func: Callable[..., T], dry_run: bool, *args, **kwargs) -> T: + """ + Executes a function unless dry run mode is enabled. + + If ``dry_run`` is ``True``, the function is not executed and a + ``DryRunOperationException`` is raised. Otherwise, the provided + function is called with the given positional and keyword arguments. + + :param func: The function to be executed. + :type func: Callable[..., T] + :param dry_run: Flag indicating whether the execution is a dry run. + :type dry_run: bool + :param args: Positional arguments to pass to the function. + :param kwargs: Keyword arguments to pass to the function. + :returns: The result of the function call if ``dry_run`` is ``False``. + :rtype: T + :raises DryRunOperationException: If ``dry_run`` is ``True``. + """ + if dry_run: + raise DryRunOperationException( + "The request would have succeeded, but the DryRun option is set." + ) + return func(*args, **kwargs) diff --git a/localstack/services/stepfunctions/asl/component/intrinsic/__init__.py b/localstack-core/localstack/services/lambda_/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/intrinsic/__init__.py rename to localstack-core/localstack/services/lambda_/__init__.py diff --git a/localstack-core/localstack/services/lambda_/analytics.py b/localstack-core/localstack/services/lambda_/analytics.py new file mode 100644 index 0000000000000..4545f23a7139e --- /dev/null +++ b/localstack-core/localstack/services/lambda_/analytics.py @@ -0,0 +1,53 @@ +from enum import StrEnum + +from localstack.utils.analytics.metrics import Counter + +NAMESPACE = "lambda" + +hotreload_counter = Counter(namespace=NAMESPACE, name="hotreload", labels=["operation"]) + +function_counter = Counter( + namespace=NAMESPACE, + name="function", + labels=[ + "operation", + "status", + "runtime", + "package_type", + # only for operation "invoke" + "invocation_type", + ], +) + + +class FunctionOperation(StrEnum): + invoke = "invoke" + create = "create" + + +class FunctionStatus(StrEnum): + success = "success" + zero_reserved_concurrency_error = "zero_reserved_concurrency_error" + event_age_exceeded_error = "event_age_exceeded_error" + throttle_error = "throttle_error" + system_error = "system_error" + unhandled_state_error = "unhandled_state_error" + failed_state_error = "failed_state_error" + pending_state_error = "pending_state_error" + invalid_payload_error = "invalid_payload_error" + invocation_error = "invocation_error" + + +esm_counter = Counter(namespace=NAMESPACE, name="esm", labels=["source", "status"]) + + +class EsmExecutionStatus(StrEnum): + success = "success" + partial_batch_failure_error = "partial_batch_failure_error" + target_invocation_error = "target_invocation_error" + unhandled_error = "unhandled_error" + source_poller_error = "source_poller_error" + # TODO: Add tracking for filter error. Options: + # a) raise filter exception and track it in the esm_worker + # b) somehow add tracking in the individual pollers + filter_error = "filter_error" diff --git a/localstack-core/localstack/services/lambda_/api_utils.py b/localstack-core/localstack/services/lambda_/api_utils.py new file mode 100644 index 0000000000000..bc573c5e019f6 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/api_utils.py @@ -0,0 +1,764 @@ +"""Utilities related to Lambda API operations such as ARN handling, validations, and output formatting. +Everything related to behavior or implicit functionality goes into `lambda_utils.py`. +""" + +import datetime +import random +import re +import string +from typing import TYPE_CHECKING, Any, Optional, Tuple + +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api import lambda_ as api_spec +from localstack.aws.api.lambda_ import ( + AliasConfiguration, + Architecture, + DeadLetterConfig, + EnvironmentResponse, + EphemeralStorage, + FunctionConfiguration, + FunctionUrlAuthType, + ImageConfig, + ImageConfigResponse, + InvalidParameterValueException, + LayerVersionContentOutput, + PublishLayerVersionResponse, + ResourceNotFoundException, + TracingConfig, + VpcConfigResponse, +) +from localstack.services.lambda_.invocation import AccessDeniedException +from localstack.services.lambda_.runtimes import ALL_RUNTIMES, VALID_LAYER_RUNTIMES, VALID_RUNTIMES +from localstack.utils.aws.arns import ARN_PARTITION_REGEX, get_partition +from localstack.utils.collections import merge_recursive + +if TYPE_CHECKING: + from localstack.services.lambda_.invocation.lambda_models import ( + CodeSigningConfig, + Function, + FunctionUrlConfig, + FunctionVersion, + LayerVersion, + VersionAlias, + ) + from localstack.services.lambda_.invocation.models import LambdaStore + + +# Pattern for a full (both with and without qualifier) lambda function ARN +FULL_FN_ARN_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:lambda:(?P[^:]+):(?P\d{{12}}):function:(?P[^:]+)(:(?P.*))?$" +) + +# Pattern for a full (both with and without qualifier) lambda layer ARN +# TODO: It looks like they added `|(arn:[a-zA-Z0-9-]+:lambda:::awslayer:[a-zA-Z0-9-_]+` in 2024-11 +LAYER_VERSION_ARN_PATTERN = re.compile( + rf"{ARN_PARTITION_REGEX}:lambda:(?P[^:]+):(?P\d{{12}}):layer:(?P[^:]+)(:(?P\d+))?$" +) + + +# Pattern for a valid destination arn +DESTINATION_ARN_PATTERN = re.compile( + r"^$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-])+:([a-z]{2}(-gov)?-[a-z]+-\d{1})?:(\d{12})?:(.*)" +) + +AWS_FUNCTION_NAME_REGEX = re.compile( + "^(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_.]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" +) + +# Pattern for extracting various attributes from a full or partial ARN or just a function name. +FUNCTION_NAME_REGEX = re.compile( + r"(arn:(aws[a-zA-Z-]*):lambda:)?((?P[a-z]{2}(-gov)?-[a-z]+-\d{1}):)?(?:(?P\d{12}):)?(function:)?(?P[a-zA-Z0-9-_\.]+)(:(?P\$LATEST|[a-zA-Z0-9-_]+))?" +) # also length 1-170 incl. +# Pattern for a lambda function handler +HANDLER_REGEX = re.compile(r"[^\s]+") +# Pattern for a valid kms key +KMS_KEY_ARN_REGEX = re.compile(r"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()") +# Pattern for a valid IAM role assumed by a lambda function +ROLE_REGEX = re.compile(r"arn:(aws[a-zA-Z-]*)?:iam::\d{12}:role/?[a-zA-Z_0-9+=,.@\-_/]+") +# Pattern for a valid AWS account +AWS_ACCOUNT_REGEX = re.compile(r"\d{12}") +# Pattern for a signing job arn +SIGNING_JOB_ARN_REGEX = re.compile( + r"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-])+:([a-z]{2}(-gov)?-[a-z]+-\d{1})?:(\d{12})?:(.*)" +) +# Pattern for a signing profiler version arn +SIGNING_PROFILE_VERSION_ARN_REGEX = re.compile( + r"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-])+:([a-z]{2}(-gov)?-[a-z]+-\d{1})?:(\d{12})?:(.*)" +) +# Combined pattern for alias and version based on AWS error using "(|[a-zA-Z0-9$_-]+)" +QUALIFIER_REGEX = re.compile(r"(^[a-zA-Z0-9$_-]+$)") +# Pattern for a version qualifier +VERSION_REGEX = re.compile(r"^[0-9]+$") +# Pattern for an alias qualifier +# Rules: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html#SSS-CreateAlias-request-Name +# The original regex from AWS misses ^ and $ in the second regex, which allowed for partial substring matches +ALIAS_REGEX = re.compile(r"(?!^[0-9]+$)(^[a-zA-Z0-9-_]+$)") +# Permission statement id +STATEMENT_ID_REGEX = re.compile(r"^[a-zA-Z0-9-_]+$") +# Pattern for a valid SubnetId +SUBNET_ID_REGEX = re.compile(r"^subnet-[0-9a-z]*$") + + +URL_CHAR_SET = string.ascii_lowercase + string.digits +# Date format as returned by the lambda service +LAMBDA_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f+0000" + +# An unordered list of all Lambda CPU architectures supported by LocalStack. +ARCHITECTURES = [Architecture.arm64, Architecture.x86_64] + +# ARN pattern returned in validation exception messages. +# Some excpetions from AWS return a '\.' in the function name regex +# pattern therefore we can sub this value in when appropriate. +ARN_NAME_PATTERN_VALIDATION_TEMPLATE = "(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{{2}}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{{1}}:)?(\\d{{12}}:)?(function:)?([a-zA-Z0-9-_{0}]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + +# AWS response when invalid ARNs are used in Tag operations. +TAGGABLE_RESOURCE_ARN_PATTERN = "arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|layer:([a-zA-Z0-9-_]+)|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" + + +def validate_function_name(function_name_or_arn: str, operation_type: str): + function_name, *_ = function_locators_from_arn(function_name_or_arn) + arn_name_pattern = ARN_NAME_PATTERN_VALIDATION_TEMPLATE.format("") + max_length = 170 + + match operation_type: + case "GetFunction" | "Invoke": + arn_name_pattern = ARN_NAME_PATTERN_VALIDATION_TEMPLATE.format(r"\.") + case "CreateFunction" if function_name == function_name_or_arn: # only a function name + max_length = 64 + case "CreateFunction" | "DeleteFunction": + max_length = 140 + + validations = [] + if len(function_name_or_arn) > max_length: + constraint = f"Member must have length less than or equal to {max_length}" + validation_msg = f"Value '{function_name_or_arn}' at 'functionName' failed to satisfy constraint: {constraint}" + validations.append(validation_msg) + + if not AWS_FUNCTION_NAME_REGEX.match(function_name_or_arn) or not function_name: + constraint = f"Member must satisfy regular expression pattern: {arn_name_pattern}" + validation_msg = f"Value '{function_name_or_arn}' at 'functionName' failed to satisfy constraint: {constraint}" + validations.append(validation_msg) + + return validations + + +def validate_qualifier(qualifier: str): + validations = [] + + if len(qualifier) > 128: + constraint = "Member must have length less than or equal to 128" + validation_msg = ( + f"Value '{qualifier}' at 'qualifier' failed to satisfy constraint: {constraint}" + ) + validations.append(validation_msg) + + if not QUALIFIER_REGEX.match(qualifier): + constraint = "Member must satisfy regular expression pattern: (|[a-zA-Z0-9$_-]+)" + validation_msg = ( + f"Value '{qualifier}' at 'qualifier' failed to satisfy constraint: {constraint}" + ) + validations.append(validation_msg) + + return validations + + +def construct_validation_exception_message(validation_errors): + if validation_errors: + return f"{len(validation_errors)} validation error{'s' if len(validation_errors) > 1 else ''} detected: {'; '.join(validation_errors)}" + + return None + + +def map_function_url_config(model: "FunctionUrlConfig") -> api_spec.FunctionUrlConfig: + return api_spec.FunctionUrlConfig( + FunctionUrl=model.url, + FunctionArn=model.function_arn, + CreationTime=model.creation_time, + LastModifiedTime=model.last_modified_time, + Cors=model.cors, + AuthType=model.auth_type, + InvokeMode=model.invoke_mode, + ) + + +def map_csc(model: "CodeSigningConfig") -> api_spec.CodeSigningConfig: + return api_spec.CodeSigningConfig( + CodeSigningConfigId=model.csc_id, + CodeSigningConfigArn=model.arn, + Description=model.description, + AllowedPublishers=model.allowed_publishers, + CodeSigningPolicies=model.policies, + LastModified=model.last_modified, + ) + + +def get_config_for_url(store: "LambdaStore", url_id: str) -> "Optional[FunctionUrlConfig]": + """ + Get a config object when resolving a URL + + :param store: Lambda Store + :param url_id: unique url ID (prefixed domain when calling the function) + :return: FunctionUrlConfig that belongs to this ID + + # TODO: quite inefficient: optimize + """ + for fn_name, fn in store.functions.items(): + for qualifier, fn_url_config in fn.function_url_configs.items(): + if fn_url_config.url_id == url_id: + return fn_url_config + return None + + +def is_qualifier_expression(qualifier: str) -> bool: + """Checks if a given qualifier is a syntactically accepted expression. + It is not necessarily a valid alias or version. + + :param qualifier: Qualifier to check + :return True if syntactically accepted qualifier expression, false otherwise + """ + return bool(QUALIFIER_REGEX.match(qualifier)) + + +def qualifier_is_version(qualifier: str) -> bool: + """ + Checks if a given qualifier represents a version + + :param qualifier: Qualifier to check + :return: True if it matches a version, false otherwise + """ + return bool(VERSION_REGEX.match(qualifier)) + + +def qualifier_is_alias(qualifier: str) -> bool: + """ + Checks if a given qualifier represents an alias + + :param qualifier: Qualifier to check + :return: True if it matches an alias, false otherwise + """ + return bool(ALIAS_REGEX.match(qualifier)) + + +def get_function_name(function_arn_or_name: str, context: RequestContext) -> str: + """ + Return function name from a given arn. + Will check if the context region matches the arn region in the arn, if an arn is provided. + + :param function_arn_or_name: Function arn or only name + :return: function name + """ + name, _ = get_name_and_qualifier(function_arn_or_name, qualifier=None, context=context) + return name + + +def function_locators_from_arn(arn: str) -> tuple[str | None, str | None, str | None, str | None]: + """ + Takes a full or partial arn, or a name + + :param arn: Given arn (or name) + :return: tuple with (name, qualifier, account, region). Qualifier and region are none if missing + """ + + if matched := FUNCTION_NAME_REGEX.match(arn): + name = matched.group("name") + qualifier = matched.group("qualifier") + account = matched.group("account") + region = matched.group("region") + return (name, qualifier, account, region) + + return None, None, None, None + + +def get_account_and_region(function_arn_or_name: str, context: RequestContext) -> Tuple[str, str]: + """ + Takes a full ARN, partial ARN or a name. Returns account ID and region from ARN if available, else + falls back to context account ID and region. + + Lambda allows cross-account access. This function should be used to resolve the correct Store based on the ARN. + """ + _, _, account_id, region = function_locators_from_arn(function_arn_or_name) + return account_id or context.account_id, region or context.region + + +def get_name_and_qualifier( + function_arn_or_name: str, qualifier: str | None, context: RequestContext +) -> tuple[str, str | None]: + """ + Takes a full or partial arn, or a name and a qualifier. + + :param function_arn_or_name: Given arn (or name) + :param qualifier: A qualifier for the function (or None) + :param context: Request context + :return: tuple with (name, qualifier). Qualifier is none if missing + :raises: `ResourceNotFoundException` when the context's region differs from the ARN's region + :raises: `AccessDeniedException` when the context's account ID differs from the ARN's account ID + :raises: `ValidationExcpetion` when a function ARN/name or qualifier fails validation checks + :raises: `InvalidParameterValueException` when a qualified arn is provided and the qualifier does not match (but is given) + """ + function_name, arn_qualifier, account, region = function_locators_from_arn(function_arn_or_name) + operation_type = context.operation.name + + if operation_type not in _supported_resource_based_operations: + if account and account != context.account_id: + raise AccessDeniedException(None) + + # TODO: should this only run if operation type is unsupported? + if region and region != context.region: + raise ResourceNotFoundException( + f"Functions from '{region}' are not reachable in this region ('{context.region}')", + Type="User", + ) + + validation_errors = [] + if function_arn_or_name: + validation_errors.extend(validate_function_name(function_arn_or_name, operation_type)) + + if qualifier: + validation_errors.extend(validate_qualifier(qualifier)) + + is_only_function_name = function_arn_or_name == function_name + if validation_errors: + message = construct_validation_exception_message(validation_errors) + # Edge-case where the error type is not ValidationException + if ( + operation_type == "CreateFunction" + and is_only_function_name + and arn_qualifier is None + and region is None + ): # just name OR partial + raise InvalidParameterValueException(message=message, Type="User") + raise CommonServiceException(message=message, code="ValidationException") + + if qualifier and arn_qualifier and arn_qualifier != qualifier: + raise InvalidParameterValueException( + "The derived qualifier from the function name does not match the specified qualifier.", + Type="User", + ) + + qualifier = qualifier or arn_qualifier + return function_name, qualifier + + +def build_statement( + partition: str, + resource_arn: str, + statement_id: str, + action: str, + principal: str, + source_arn: Optional[str] = None, + source_account: Optional[str] = None, + principal_org_id: Optional[str] = None, + event_source_token: Optional[str] = None, + auth_type: Optional[FunctionUrlAuthType] = None, +) -> dict[str, Any]: + statement = { + "Sid": statement_id, + "Effect": "Allow", + "Action": action, + "Resource": resource_arn, + } + + # See AWS service principals for comprehensive docs: + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html + # TODO: validate against actual list of IAM-supported AWS services (e.g., lambda.amazonaws.com) + if principal.endswith(".amazonaws.com"): + statement["Principal"] = {"Service": principal} + elif is_aws_account(principal): + statement["Principal"] = {"AWS": f"arn:{partition}:iam::{principal}:root"} + # TODO: potentially validate against IAM? + elif re.match(f"{ARN_PARTITION_REGEX}:iam:", principal): + statement["Principal"] = {"AWS": principal} + elif principal == "*": + statement["Principal"] = principal + # TODO: unclear whether above matching is complete? + else: + raise InvalidParameterValueException( + "The provided principal was invalid. Please check the principal and try again.", + Type="User", + ) + + condition = dict() + if auth_type: + update = {"StringEquals": {"lambda:FunctionUrlAuthType": auth_type}} + condition = merge_recursive(condition, update) + + if principal_org_id: + update = {"StringEquals": {"aws:PrincipalOrgID": principal_org_id}} + condition = merge_recursive(condition, update) + + if source_account: + update = {"StringEquals": {"AWS:SourceAccount": source_account}} + condition = merge_recursive(condition, update) + + if event_source_token: + update = {"StringEquals": {"lambda:EventSourceToken": event_source_token}} + condition = merge_recursive(condition, update) + + if source_arn: + update = {"ArnLike": {"AWS:SourceArn": source_arn}} + condition = merge_recursive(condition, update) + + if condition: + statement["Condition"] = condition + + return statement + + +def generate_random_url_id() -> str: + """ + 32 characters [0-9a-z] url ID + """ + + return "".join(random.choices(URL_CHAR_SET, k=32)) + + +def unqualified_lambda_arn(function_name: str, account: str, region: str): + """ + Generate an unqualified lambda arn + + :param function_name: Function name (not an arn!) + :param account: Account ID + :param region: Region + :return: Unqualified lambda arn + """ + return f"arn:{get_partition(region)}:lambda:{region}:{account}:function:{function_name}" + + +def qualified_lambda_arn( + function_name: str, qualifier: Optional[str], account: str, region: str +) -> str: + """ + Generate a qualified lambda arn + + :param function_name: Function name (not an arn!) + :param qualifier: qualifier (will be set to $LATEST if not present) + :param account: Account ID + :param region: Region + :return: Qualified lambda arn + """ + qualifier = qualifier or "$LATEST" + return f"{unqualified_lambda_arn(function_name=function_name, account=account, region=region)}:{qualifier}" + + +def lambda_arn(function_name: str, qualifier: Optional[str], account: str, region: str) -> str: + """ + Return the lambda arn for the given parameters, with a qualifier if supplied, without otherwise + + :param function_name: Function name + :param qualifier: Qualifier. May be left out, then the returning arn does not have one either + :param account: Account ID + :param region: Region of the Lambda + :return: Lambda Arn with or without qualifier + """ + if qualifier: + return qualified_lambda_arn( + function_name=function_name, qualifier=qualifier, account=account, region=region + ) + else: + return unqualified_lambda_arn(function_name=function_name, account=account, region=region) + + +def is_role_arn(role_arn: str) -> bool: + """ + Returns true if the provided string is a role arn, false otherwise + + :param role_arn: Potential role arn + :return: Boolean indicating if input is a role arn + """ + return bool(ROLE_REGEX.match(role_arn)) + + +def is_aws_account(aws_account: str) -> bool: + """ + Returns true if the provided string is an AWS account, false otherwise + + :param role_arn: Potential AWS account + :return: Boolean indicating if input is an AWS account + """ + return bool(AWS_ACCOUNT_REGEX.match(aws_account)) + + +def format_lambda_date(date_to_format: datetime.datetime) -> str: + """Format a given datetime to a string generated with the lambda date format""" + return date_to_format.strftime(LAMBDA_DATE_FORMAT) + + +def generate_lambda_date() -> str: + """Get the current date as string generated with the lambda date format""" + return format_lambda_date(datetime.datetime.now()) + + +def map_update_status_config(version: "FunctionVersion") -> dict[str, str]: + """Map version model to dict output""" + result = {} + if version.config.last_update: + if version.config.last_update.status: + result["LastUpdateStatus"] = version.config.last_update.status + if version.config.last_update.code: + result["LastUpdateStatusReasonCode"] = version.config.last_update.code + if version.config.last_update.reason: + result["LastUpdateStatusReason"] = version.config.last_update.reason + return result + + +def map_state_config(version: "FunctionVersion") -> dict[str, str]: + """Map version state to dict output""" + result = {} + if version_state := version.config.state: + if version_state.state: + result["State"] = version_state.state + if version_state.reason: + result["StateReason"] = version_state.reason + if version_state.code: + result["StateReasonCode"] = version_state.code + return result + + +def map_config_out( + version: "FunctionVersion", + return_qualified_arn: bool = False, + return_update_status: bool = True, + alias_name: str | None = None, +) -> FunctionConfiguration: + """map function version to function configuration""" + + # handle optional entries that shouldn't be rendered at all if not present + optional_kwargs = {} + if return_update_status: + optional_kwargs.update(map_update_status_config(version)) + optional_kwargs.update(map_state_config(version)) + + if version.config.architectures: + optional_kwargs["Architectures"] = version.config.architectures + + if version.config.dead_letter_arn: + optional_kwargs["DeadLetterConfig"] = DeadLetterConfig( + TargetArn=version.config.dead_letter_arn + ) + + if version.config.vpc_config: + optional_kwargs["VpcConfig"] = VpcConfigResponse( + VpcId=version.config.vpc_config.vpc_id, + SubnetIds=version.config.vpc_config.subnet_ids, + SecurityGroupIds=version.config.vpc_config.security_group_ids, + ) + + if version.config.environment is not None: + optional_kwargs["Environment"] = EnvironmentResponse( + Variables=version.config.environment + ) # TODO: Errors key? + + if version.config.layers: + optional_kwargs["Layers"] = [ + {"Arn": layer.layer_version_arn, "CodeSize": layer.code.code_size} + for layer in version.config.layers + ] + if version.config.image_config: + image_config = ImageConfig() + if version.config.image_config.command: + image_config["Command"] = version.config.image_config.command + if version.config.image_config.entrypoint: + image_config["EntryPoint"] = version.config.image_config.entrypoint + if version.config.image_config.working_directory: + image_config["WorkingDirectory"] = version.config.image_config.working_directory + if image_config: + optional_kwargs["ImageConfigResponse"] = ImageConfigResponse(ImageConfig=image_config) + if version.config.code: + optional_kwargs["CodeSize"] = version.config.code.code_size + optional_kwargs["CodeSha256"] = version.config.code.code_sha256 + elif version.config.image: + optional_kwargs["CodeSize"] = 0 + optional_kwargs["CodeSha256"] = version.config.image.code_sha256 + + # output for an alias qualifier is completely the same except for the returned ARN + if alias_name: + function_arn = f"{':'.join(version.id.qualified_arn().split(':')[:-1])}:{alias_name}" + else: + function_arn = ( + version.id.qualified_arn() if return_qualified_arn else version.id.unqualified_arn() + ) + + func_conf = FunctionConfiguration( + RevisionId=version.config.revision_id, + FunctionName=version.id.function_name, + FunctionArn=function_arn, + LastModified=version.config.last_modified, + Version=version.id.qualifier, + Description=version.config.description, + Role=version.config.role, + Timeout=version.config.timeout, + Runtime=version.config.runtime, + Handler=version.config.handler, + MemorySize=version.config.memory_size, + PackageType=version.config.package_type, + TracingConfig=TracingConfig(Mode=version.config.tracing_config_mode), + EphemeralStorage=EphemeralStorage(Size=version.config.ephemeral_storage.size), + SnapStart=version.config.snap_start, + RuntimeVersionConfig=version.config.runtime_version_config, + LoggingConfig=version.config.logging_config, + **optional_kwargs, + ) + return func_conf + + +def map_to_list_response(config: FunctionConfiguration) -> FunctionConfiguration: + """remove values not usually presented in list operations from function config output""" + shallow_copy = config.copy() + for k in [ + "State", + "StateReason", + "StateReasonCode", + "LastUpdateStatus", + "LastUpdateStatusReason", + "LastUpdateStatusReasonCode", + "RuntimeVersionConfig", + ]: + shallow_copy.pop(k, None) + return shallow_copy + + +def map_alias_out(alias: "VersionAlias", function: "Function") -> AliasConfiguration: + """map alias model to alias configuration output""" + alias_arn = f"{function.latest().id.unqualified_arn()}:{alias.name}" + optional_kwargs = {} + if alias.routing_configuration: + optional_kwargs |= { + "RoutingConfig": { + "AdditionalVersionWeights": alias.routing_configuration.version_weights + } + } + return AliasConfiguration( + AliasArn=alias_arn, + Description=alias.description, + FunctionVersion=alias.function_version, + Name=alias.name, + RevisionId=alias.revision_id, + **optional_kwargs, + ) + + +def validate_and_set_batch_size(service: str, batch_size: Optional[int] = None) -> int: + min_batch_size = 1 + + BATCH_SIZE_RANGES = { + "kafka": (100, 10_000), + "kinesis": (100, 10_000), + "dynamodb": (100, 10_000), + "sqs-fifo": (10, 10), + "sqs": (10, 10_000), + "mq": (100, 10_000), + } + svc_range = BATCH_SIZE_RANGES.get(service) + + if svc_range: + default_batch_size, max_batch_size = svc_range + + if batch_size is None: + batch_size = default_batch_size + + if batch_size < min_batch_size or batch_size > max_batch_size: + raise InvalidParameterValueException("out of bounds todo", Type="User") # TODO: test + + return batch_size + + +def map_layer_out(layer_version: "LayerVersion") -> PublishLayerVersionResponse: + return PublishLayerVersionResponse( + Content=LayerVersionContentOutput( + Location=layer_version.code.generate_presigned_url(), + CodeSha256=layer_version.code.code_sha256, + CodeSize=layer_version.code.code_size, + # SigningProfileVersionArn="", # same as in function configuration + # SigningJobArn="" # same as in function configuration + ), + LicenseInfo=layer_version.license_info, + Description=layer_version.description, + CompatibleArchitectures=layer_version.compatible_architectures, + CompatibleRuntimes=layer_version.compatible_runtimes, + CreatedDate=layer_version.created, + LayerArn=layer_version.layer_arn, + LayerVersionArn=layer_version.layer_version_arn, + Version=layer_version.version, + ) + + +def layer_arn(layer_name: str, account: str, region: str): + return f"arn:{get_partition(region)}:lambda:{region}:{account}:layer:{layer_name}" + + +def layer_version_arn(layer_name: str, account: str, region: str, version: str): + return f"arn:{get_partition(region)}:lambda:{region}:{account}:layer:{layer_name}:{version}" + + +def parse_layer_arn(layer_version_arn: str) -> Tuple[str, str, str, str]: + return LAYER_VERSION_ARN_PATTERN.match(layer_version_arn).group( + "region_name", "account_id", "layer_name", "layer_version" + ) + + +def validate_layer_runtime(compatible_runtime: str) -> str | None: + if compatible_runtime is not None and compatible_runtime not in ALL_RUNTIMES: + return f"Value '{compatible_runtime}' at 'compatibleRuntime' failed to satisfy constraint: Member must satisfy enum value set: {VALID_LAYER_RUNTIMES}" + return None + + +def validate_layer_architecture(compatible_architecture: str) -> str | None: + if compatible_architecture is not None and compatible_architecture not in ARCHITECTURES: + return f"Value '{compatible_architecture}' at 'compatibleArchitecture' failed to satisfy constraint: Member must satisfy enum value set: [x86_64, arm64]" + return None + + +def validate_layer_runtimes_and_architectures( + compatible_runtimes: list[str], compatible_architectures: list[str] +): + validations = [] + + if compatible_runtimes and set(compatible_runtimes).difference(ALL_RUNTIMES): + constraint = f"Member must satisfy enum value set: {VALID_RUNTIMES}" + validation_msg = f"Value '[{', '.join([s for s in compatible_runtimes])}]' at 'compatibleRuntimes' failed to satisfy constraint: {constraint}" + validations.append(validation_msg) + + if compatible_architectures and set(compatible_architectures).difference(ARCHITECTURES): + constraint = "[Member must satisfy enum value set: [x86_64, arm64]]" + validation_msg = f"Value '[{', '.join([s for s in compatible_architectures])}]' at 'compatibleArchitectures' failed to satisfy constraint: Member must satisfy constraint: {constraint}" + validations.append(validation_msg) + + return validations + + +def is_layer_arn(layer_name: str) -> bool: + return LAYER_VERSION_ARN_PATTERN.match(layer_name) is not None + + +# See Lambda API actions that support resource-based IAM policies +# https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-api +_supported_resource_based_operations = { + "CreateAlias", + "DeleteAlias", + "DeleteFunction", + "DeleteFunctionConcurrency", + "DeleteFunctionEventInvokeConfig", + "DeleteProvisionedConcurrencyConfig", + "GetAlias", + "GetFunction", + "GetFunctionConcurrency", + "GetFunctionConfiguration", + "GetFunctionEventInvokeConfig", + "GetPolicy", + "GetProvisionedConcurrencyConfig", + "Invoke", + "ListAliases", + "ListFunctionEventInvokeConfigs", + "ListProvisionedConcurrencyConfigs", + "ListTags", + "ListVersionsByFunction", + "PublishVersion", + "PutFunctionConcurrency", + "PutFunctionEventInvokeConfig", + "PutProvisionedConcurrencyConfig", + "TagResource", + "UntagResource", + "UpdateAlias", + "UpdateFunctionCode", + "UpdateFunctionEventInvokeConfig", +} diff --git a/localstack-core/localstack/services/lambda_/custom_endpoints.py b/localstack-core/localstack/services/lambda_/custom_endpoints.py new file mode 100644 index 0000000000000..b8267f1e7d06b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/custom_endpoints.py @@ -0,0 +1,49 @@ +import urllib.parse +from typing import List, TypedDict + +from rolo import Request, route + +from localstack.aws.api.lambda_ import Runtime +from localstack.http import Response +from localstack.services.lambda_.packages import get_runtime_client_path +from localstack.services.lambda_.runtimes import ( + ALL_RUNTIMES, + DEPRECATED_RUNTIMES, + SUPPORTED_RUNTIMES, +) + + +class LambdaRuntimesResponse(TypedDict, total=False): + Runtimes: List[Runtime] + + +class LambdaCustomEndpoints: + @route("/_aws/lambda/runtimes", methods=["GET"]) + def runtimes(self, request: Request) -> LambdaRuntimesResponse: + """This metadata endpoint needs to be loaded before the Lambda provider. + It can be used by the Webapp to query supported Lambda runtimes of an unknown LocalStack version.""" + query_params = urllib.parse.parse_qs(request.environ["QUERY_STRING"]) + # Query parameter values are all lists. Example: { "filter": ["all"] } + filter_params = query_params.get("filter", []) + runtimes = set() + if "all" in filter_params: + runtimes.update(ALL_RUNTIMES) + if "deprecated" in filter_params: + runtimes.update(DEPRECATED_RUNTIMES) + # By default (i.e., without any filter param), we return the supported runtimes because that is most useful. + if "supported" in filter_params or len(runtimes) == 0: + runtimes.update(SUPPORTED_RUNTIMES) + + return LambdaRuntimesResponse(Runtimes=list(runtimes)) + + @route("/_aws/lambda/init", methods=["GET"]) + def init(self, request: Request) -> Response: + """ + This internal endpoint exposes the init binary over an http API + :param request: The HTTP request object. + :return: Response containing the init binary. + """ + runtime_client_path = get_runtime_client_path() / "var" / "rapid" / "init" + runtime_init_binary = runtime_client_path.read_bytes() + + return Response(runtime_init_binary, mimetype="application/octet-stream") diff --git a/localstack/services/stepfunctions/asl/component/program/__init__.py b/localstack-core/localstack/services/lambda_/event_source_mapping/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/program/__init__.py rename to localstack-core/localstack/services/lambda_/event_source_mapping/__init__.py diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_config_factory.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_config_factory.py new file mode 100644 index 0000000000000..aea1aeb33bb65 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_config_factory.py @@ -0,0 +1,118 @@ +import datetime + +from localstack.aws.api.lambda_ import ( + CreateEventSourceMappingRequest, + DestinationConfig, + EventSourceMappingConfiguration, + EventSourcePosition, + RequestContext, +) +from localstack.services.lambda_ import hooks as lambda_hooks +from localstack.services.lambda_.event_source_mapping.esm_worker import EsmState, EsmStateReason +from localstack.services.lambda_.event_source_mapping.pipe_utils import ( + get_standardized_service_name, +) +from localstack.utils.aws.arns import lambda_event_source_mapping_arn, parse_arn +from localstack.utils.collections import merge_recursive +from localstack.utils.strings import long_uid + + +class EsmConfigFactory: + request: CreateEventSourceMappingRequest + context: RequestContext + function_arn: str + + def __init__( + self, request: CreateEventSourceMappingRequest, context: RequestContext, function_arn: str + ): + self.request = request + self.function_arn = function_arn + self.context = context + + def get_esm_config(self) -> EventSourceMappingConfiguration: + """Creates an Event Source Mapping (ESM) configuration based on a create ESM request. + * CreateEventSourceMapping API: https://docs.aws.amazon.com/lambda/latest/api/API_CreateEventSourceMapping.html + * CreatePipe API: https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_CreatePipe.html + The CreatePipe API covers largely the same parameters, but is better structured using hierarchical parameters. + """ + service = "" + if source_arn := self.request.get("EventSourceArn"): + parsed_arn = parse_arn(source_arn) + service = get_standardized_service_name(parsed_arn["service"]) + + uuid = long_uid() + + default_source_parameters = {} + default_source_parameters["UUID"] = uuid + default_source_parameters["EventSourceMappingArn"] = lambda_event_source_mapping_arn( + uuid, self.context.account_id, self.context.region + ) + default_source_parameters["StateTransitionReason"] = EsmStateReason.USER_ACTION + + if service == "sqs": + default_source_parameters["BatchSize"] = 10 + default_source_parameters["MaximumBatchingWindowInSeconds"] = 0 + default_source_parameters["StateTransitionReason"] = EsmStateReason.USER_INITIATED + elif service == "kinesis": + # TODO: test all defaults + default_source_parameters["BatchSize"] = 100 + default_source_parameters["DestinationConfig"] = DestinationConfig(OnFailure={}) + default_source_parameters["BisectBatchOnFunctionError"] = False + default_source_parameters["MaximumBatchingWindowInSeconds"] = 0 + default_source_parameters["MaximumRecordAgeInSeconds"] = -1 + default_source_parameters["MaximumRetryAttempts"] = -1 + default_source_parameters["ParallelizationFactor"] = 1 + default_source_parameters["StartingPosition"] = EventSourcePosition.TRIM_HORIZON + default_source_parameters["TumblingWindowInSeconds"] = 0 + default_source_parameters["LastProcessingResult"] = EsmStateReason.NO_RECORDS_PROCESSED + elif service == "dynamodbstreams": + # TODO: test all defaults + default_source_parameters["BatchSize"] = 100 + default_source_parameters["DestinationConfig"] = DestinationConfig(OnFailure={}) + default_source_parameters["BisectBatchOnFunctionError"] = False + default_source_parameters["MaximumBatchingWindowInSeconds"] = 0 + default_source_parameters["MaximumRecordAgeInSeconds"] = -1 + default_source_parameters["MaximumRetryAttempts"] = -1 + default_source_parameters["ParallelizationFactor"] = 1 + default_source_parameters["StartingPosition"] = EventSourcePosition.TRIM_HORIZON + default_source_parameters["TumblingWindowInSeconds"] = 0 + default_source_parameters["LastProcessingResult"] = EsmStateReason.NO_RECORDS_PROCESSED + else: + lambda_hooks.set_event_source_config_defaults.run( + default_source_parameters, self.request, service + ) + + if not default_source_parameters: + raise Exception( + f"Default Lambda Event Source Mapping parameters not implemented for service {service}. req={self.request} dict={default_source_parameters}" + ) + + # TODO: test whether merging actually happens recursively. Examples: + # a) What happens if only one of the parameters for DocumentDBEventSourceConfig change? + # b) Does a change of AmazonManagedKafkaEventSourceConfig.ConsumerGroupId affect flat parameters such as BatchSize and MaximumBatchingWindowInSeconds)? + # c) Are FilterCriteria.Filters merged or replaced upon update? + # TODO: can we ignore extra parameters from the request (e.g., Kinesis params for SQS source)? + derived_source_parameters = merge_recursive(default_source_parameters, self.request) + + # TODO What happens when FunctionResponseTypes value or target service is invalid? + if service in ["sqs", "kinesis", "dynamodbstreams"]: + derived_source_parameters["FunctionResponseTypes"] = derived_source_parameters.get( + "FunctionResponseTypes", [] + ) + + state = EsmState.CREATING if self.request.get("Enabled", True) else EsmState.DISABLED + esm_config = EventSourceMappingConfiguration( + **derived_source_parameters, + FunctionArn=self.function_arn, + # TODO: last modified => does state transition affect this? + LastModified=datetime.datetime.now(), + State=state, + # TODO: complete missing fields + ) + # TODO: check whether we need to remove any more fields that are present in the request but should not be in the + # esm_config + esm_config.pop("Enabled", "") + esm_config.pop("FunctionName", "") + if not esm_config.get("FilterCriteria", {}).get("Filters", []): + esm_config.pop("FilterCriteria", "") + return esm_config diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py new file mode 100644 index 0000000000000..b2e85a04ea26c --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_event_processor.py @@ -0,0 +1,178 @@ +import json +import logging +import uuid + +from localstack.aws.api.pipes import LogLevel +from localstack.services.lambda_.analytics import EsmExecutionStatus, esm_counter +from localstack.services.lambda_.event_source_mapping.event_processor import ( + BatchFailureError, + EventProcessor, + PartialBatchFailureError, +) +from localstack.services.lambda_.event_source_mapping.pipe_loggers.pipe_logger import PipeLogger +from localstack.services.lambda_.event_source_mapping.pipe_utils import to_json_str +from localstack.services.lambda_.event_source_mapping.senders.sender import ( + PartialFailureSenderError, + Sender, + SenderError, +) + +LOG = logging.getLogger(__name__) + + +class EsmEventProcessor(EventProcessor): + sender: Sender + logger: PipeLogger + + def __init__(self, sender, logger): + self.sender = sender + self.logger = logger + + def process_events_batch(self, input_events: list[dict] | dict) -> None: + # analytics + if isinstance(input_events, list) and input_events: + first_event = input_events[0] + elif input_events: + first_event = input_events + else: + first_event = {} + event_source = first_event.get("eventSource") + + execution_id = uuid.uuid4() + # Create a copy of the original input events + events = input_events.copy() + try: + self.logger.set_fields(executionId=str(execution_id)) + self.logger.log( + messageType="ExecutionStarted", + logLevel=LogLevel.INFO, + payload=to_json_str(events), + ) + # An execution is only triggered upon successful polling. Therefore, `PollingStageStarted` never occurs. + self.logger.log( + messageType="PollingStageSucceeded", + logLevel=LogLevel.TRACE, + ) + # Target Stage + self.process_target_stage(events) + self.logger.log( + messageType="ExecutionSucceeded", + logLevel=LogLevel.INFO, + ) + esm_counter.labels(source=event_source, status=EsmExecutionStatus.success).increment() + except PartialFailureSenderError as e: + self.logger.log( + messageType="ExecutionFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + esm_counter.labels( + source=event_source, status=EsmExecutionStatus.partial_batch_failure_error + ).increment() + # TODO: check whether partial batch item failures is enabled by default or need to be explicitly enabled + # using --function-response-types "ReportBatchItemFailures" + # https://docs.aws.amazon.com/lambda/latest/dg/services-sqs-errorhandling.html + raise PartialBatchFailureError( + partial_failure_payload=e.partial_failure_payload, error=e.error + ) from e + except SenderError as e: + self.logger.log( + messageType="ExecutionFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + esm_counter.labels( + source=event_source, status=EsmExecutionStatus.target_invocation_error + ).increment() + raise BatchFailureError(error=e.error) from e + except Exception as e: + LOG.error( + "Unhandled exception while processing Lambda event source mapping (ESM) events %s for ESM with execution id %s", + events, + execution_id, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + esm_counter.labels( + source=event_source, status=EsmExecutionStatus.unhandled_error + ).increment() + raise e + + def process_target_stage(self, events: list[dict]) -> None: + try: + self.logger.log( + messageType="TargetStageEntered", + logLevel=LogLevel.INFO, + ) + # 2) Deliver to target in batches + try: + self.logger.log( + messageType="TargetInvocationStarted", + logLevel=LogLevel.TRACE, + ) + # TODO: handle and log target invocation + stage skipped (when no records present) + payload = self.sender.send_events(events) + if payload: + # TODO: test unserializable content (e.g., byte strings) + payload = json.dumps(payload) + else: + payload = "" + self.logger.log( + messageType="TargetInvocationSucceeded", + logLevel=LogLevel.TRACE, + ) + except PartialFailureSenderError as e: + self.logger.log( + messageType="TargetInvocationPartiallyFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + raise e + except SenderError as e: + self.logger.log( + messageType="TargetInvocationFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + raise e + self.logger.log( + messageType="TargetStageSucceeded", + logLevel=LogLevel.INFO, + payload=payload, + ) + except PartialFailureSenderError as e: + self.logger.log( + messageType="TargetStagePartiallyFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + raise e + except SenderError as e: + self.logger.log( + messageType="TargetStageFailed", + logLevel=LogLevel.ERROR, + error=e.error, + ) + raise e + + def generate_event_failure_context(self, abort_condition: str, **kwargs) -> dict: + error_payload: dict = kwargs.get("error") + if not error_payload: + return {} + # TODO: Should 'requestContext' and 'responseContext' be defined as models? + # TODO: Allow for generating failure context where there is no responseContext i.e + # if a RecordAgeExceeded condition is triggered. + context = { + "requestContext": { + "requestId": error_payload.get("requestId"), + "functionArn": self.sender.target_arn, # get the target ARN from the sender (always LambdaSender) + "condition": abort_condition, + "approximateInvokeCount": kwargs.get("attempts_count"), + }, + "responseContext": { + "statusCode": error_payload.get("httpStatusCode"), + "executedVersion": error_payload.get("executedVersion"), + "functionError": error_payload.get("functionError"), + }, + } + + return context diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker.py new file mode 100644 index 0000000000000..05f38bcf5ddbf --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker.py @@ -0,0 +1,233 @@ +import logging +import threading +from enum import StrEnum + +from localstack.aws.api.lambda_ import ( + EventSourceMappingConfiguration, +) +from localstack.config import ( + LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC, + LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC, + LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC, +) +from localstack.services.lambda_.analytics import EsmExecutionStatus, esm_counter +from localstack.services.lambda_.event_source_mapping.pollers.poller import ( + EmptyPollResultsException, + Poller, +) +from localstack.services.lambda_.invocation.models import LambdaStore, lambda_stores +from localstack.services.lambda_.provider_utils import get_function_version_from_arn +from localstack.utils.aws.arns import parse_arn +from localstack.utils.backoff import ExponentialBackoff +from localstack.utils.threads import FuncThread + +LOG = logging.getLogger(__name__) + + +class EsmState(StrEnum): + # https://docs.aws.amazon.com/lambda/latest/api/API_CreateEventSourceMapping.html#lambda-CreateEventSourceMapping-response-State + CREATING = "Creating" + ENABLING = "Enabling" + ENABLED = "Enabled" + DISABLING = "Disabling" + DISABLED = "Disabled" + UPDATING = "Updating" + DELETING = "Deleting" + + +class EsmStateReason(StrEnum): + # Used for Kinesis and DynamoDB + USER_ACTION = "User action" + # Used for SQS + USER_INITIATED = "USER_INITIATED" + NO_RECORDS_PROCESSED = "No records processed" + # TODO: add others? + + +class EsmWorker: + esm_config: EventSourceMappingConfiguration + enabled: bool + current_state: EsmState + state_transition_reason: EsmStateReason + # Either USER_ACTION or USER_INITIATED (SQS) depending on the event source + user_state_reason: EsmStateReason + # TODO: test + last_processing_result: str + + poller: Poller + + _state: LambdaStore + _state_lock: threading.RLock + _shutdown_event: threading.Event + _poller_thread: FuncThread | None + + def __init__( + self, + esm_config: EventSourceMappingConfiguration, + poller: Poller, + enabled: bool = True, + user_state_reason: EsmStateReason = EsmStateReason.USER_ACTION, + ): + self.esm_config = esm_config + self.enabled = enabled + self.current_state = EsmState.CREATING + self.user_state_reason = user_state_reason + self.state_transition_reason = self.user_state_reason + + self.poller = poller + + # TODO: implement lifecycle locking + self._state_lock = threading.RLock() + self._shutdown_event = threading.Event() + self._poller_thread = None + + function_version = get_function_version_from_arn(self.esm_config["FunctionArn"]) + self._state = lambda_stores[function_version.id.account][function_version.id.region] + + # HACK: Flag used to check if a graceful shutdown was triggered. + self._graceful_shutdown_triggered = False + + @property + def uuid(self) -> str: + return self.esm_config["UUID"] + + def stop_for_shutdown(self): + # Signal the worker's poller_loop thread to gracefully shutdown + # TODO: Once ESM state is de-coupled from lambda store, re-think this approach. + self._shutdown_event.set() + self._graceful_shutdown_triggered = True + + def create(self): + if self.enabled: + with self._state_lock: + self.current_state = EsmState.CREATING + self.state_transition_reason = self.user_state_reason + self.start() + else: + # TODO: validate with tests + with self._state_lock: + self.current_state = EsmState.DISABLED + self.state_transition_reason = self.user_state_reason + self.update_esm_state_in_store(EsmState.DISABLED) + + def start(self): + with self._state_lock: + self.enabled = True + # CREATING state takes precedence over ENABLING + if self.current_state != EsmState.CREATING: + self.current_state = EsmState.ENABLING + self.state_transition_reason = self.user_state_reason + # Reset the shutdown event such that we don't stop immediately after a restart + self._shutdown_event.clear() + self._poller_thread = FuncThread( + self.poller_loop, + name=f"event-source-mapping-poller-{self.uuid}", + ) + self._poller_thread.start() + + def stop(self): + with self._state_lock: + self.enabled = False + self.current_state = EsmState.DISABLING + self.update_esm_state_in_store(EsmState.DISABLING) + self.state_transition_reason = self.user_state_reason + self._shutdown_event.set() + + def delete(self): + with self._state_lock: + self.current_state = EsmState.DELETING + self.update_esm_state_in_store(EsmState.DELETING) + self.state_transition_reason = self.user_state_reason + self._shutdown_event.set() + + def poller_loop(self, *args, **kwargs): + with self._state_lock: + self.current_state = EsmState.ENABLED + self.update_esm_state_in_store(EsmState.ENABLED) + self.state_transition_reason = self.user_state_reason + + error_boff = ExponentialBackoff( + initial_interval=2, max_interval=LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_ERROR_SEC + ) + empty_boff = ExponentialBackoff( + initial_interval=1, + max_interval=LAMBDA_EVENT_SOURCE_MAPPING_MAX_BACKOFF_ON_EMPTY_POLL_SEC, + ) + + poll_interval_duration = LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC + + while not self._shutdown_event.is_set(): + try: + # TODO: update state transition reason? + self.poller.poll_events() + + # If no exception encountered, reset the backoff + error_boff.reset() + empty_boff.reset() + + # Set the poll frequency back to the default + poll_interval_duration = LAMBDA_EVENT_SOURCE_MAPPING_POLL_INTERVAL_SEC + except EmptyPollResultsException as miss_ex: + # If the event source is empty, backoff + poll_interval_duration = empty_boff.next_backoff() + LOG.debug( + "The event source %s is empty. Backing off for %.2f seconds until next request.", + miss_ex.source_arn, + poll_interval_duration, + ) + except Exception as e: + LOG.error( + "Error while polling messages for event source %s: %s", + self.esm_config.get("EventSourceArn") + or self.esm_config.get("SelfManagedEventSource"), + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + event_source = parse_arn(self.esm_config.get("EventSourceArn")).get("service") + esm_counter.labels( + source=event_source, status=EsmExecutionStatus.source_poller_error + ).increment() + # Wait some time between retries to avoid running into the problem right again + poll_interval_duration = error_boff.next_backoff() + finally: + self._shutdown_event.wait(poll_interval_duration) + + # Optionally closes internal components of Poller. This is a no-op for unimplemented pollers. + self.poller.close() + + try: + # Update state in store after async stop or delete + if self.enabled and self.current_state == EsmState.DELETING: + # TODO: we also need to remove the ESM worker reference from the Lambda provider to esm_worker + # TODO: proper locking for store updates + self.delete_esm_in_store() + elif not self.enabled and self.current_state == EsmState.DISABLING: + with self._state_lock: + self.current_state = EsmState.DISABLED + self.state_transition_reason = self.user_state_reason + self.update_esm_state_in_store(EsmState.DISABLED) + elif not self._graceful_shutdown_triggered: + # HACK: If we reach this state and a graceful shutdown was not triggered, log a warning to indicate + # an unexpected state. + LOG.warning( + "Invalid state %s for event source mapping %s.", + self.current_state, + self.esm_config["UUID"], + ) + except Exception as e: + LOG.warning( + "Failed to update state %s for event source mapping %s. Exception: %s ", + self.current_state, + self.esm_config["UUID"], + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + def delete_esm_in_store(self): + self._state.event_source_mappings.pop(self.esm_config["UUID"], None) + + # TODO: how can we handle async state updates better? Async deletion or disabling needs to update the model state. + def update_esm_state_in_store(self, new_state: EsmState): + esm_update = {"State": new_state} + # TODO: add proper locking for store updates + self._state.event_source_mappings[self.esm_config["UUID"]].update(esm_update) diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py new file mode 100644 index 0000000000000..0bf30dfb15d79 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/esm_worker_factory.py @@ -0,0 +1,241 @@ +from typing import Callable + +import botocore.config + +from localstack.aws.api.lambda_ import ( + EventSourceMappingConfiguration, + FunctionResponseType, +) +from localstack.aws.api.pipes import ( + DynamoDBStreamStartPosition, + KinesisStreamStartPosition, + PipeSourceDynamoDBStreamParameters, + PipeSourceKinesisStreamParameters, + PipeSourceParameters, + PipeSourceSqsQueueParameters, + PipeTargetInvocationType, + PipeTargetLambdaFunctionParameters, + PipeTargetParameters, +) +from localstack.services.lambda_ import hooks as lambda_hooks +from localstack.services.lambda_.event_source_mapping.esm_event_processor import ( + EsmEventProcessor, +) +from localstack.services.lambda_.event_source_mapping.esm_worker import EsmStateReason, EsmWorker +from localstack.services.lambda_.event_source_mapping.pipe_loggers.noops_pipe_logger import ( + NoOpsPipeLogger, +) +from localstack.services.lambda_.event_source_mapping.pipe_utils import ( + get_internal_client, + get_standardized_service_name, +) +from localstack.services.lambda_.event_source_mapping.pollers.dynamodb_poller import DynamoDBPoller +from localstack.services.lambda_.event_source_mapping.pollers.kinesis_poller import KinesisPoller +from localstack.services.lambda_.event_source_mapping.pollers.poller import Poller +from localstack.services.lambda_.event_source_mapping.pollers.sqs_poller import ( + DEFAULT_MAX_WAIT_TIME_SECONDS, + SqsPoller, +) +from localstack.services.lambda_.event_source_mapping.senders.lambda_sender import LambdaSender +from localstack.utils.aws.arns import parse_arn +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.lambda_debug_mode.lambda_debug_mode import ( + DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS, + is_lambda_debug_mode, +) + + +class PollerHolder: + """Holds a `Callable` function `create_poller_fn` used to create a Poller. Useful when creating Pollers downstream via hooks.""" + + create_poller_fn: Callable[..., Poller] | None = None + + +class EsmWorkerFactory: + esm_config: EventSourceMappingConfiguration + function_role_arn: str + enabled: bool + + def __init__(self, esm_config, function_role, enabled): + self.esm_config = esm_config + self.function_role_arn = function_role + self.enabled = enabled + + def get_esm_worker(self) -> EsmWorker: + # Sender (always Lambda) + function_arn = self.esm_config["FunctionArn"] + + if is_lambda_debug_mode(): + timeout_seconds = DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS + else: + # 900s is the maximum amount of time a Lambda can run for. + lambda_max_timeout_seconds = 900 + invoke_timeout_buffer_seconds = 5 + timeout_seconds = lambda_max_timeout_seconds + invoke_timeout_buffer_seconds + + lambda_client = get_internal_client( + arn=function_arn, # Only the function_arn is necessary since the Lambda should be able to invoke itself + client_config=botocore.config.Config( + retries={ + "total_max_attempts": 1 + }, # Disable retries, to prevent re-invoking the Lambda + read_timeout=timeout_seconds, + tcp_keepalive=True, + ), + ) + sender = LambdaSender( + target_arn=function_arn, + target_parameters=PipeTargetParameters( + LambdaFunctionParameters=PipeTargetLambdaFunctionParameters( + InvocationType=PipeTargetInvocationType.REQUEST_RESPONSE + ) + ), + target_client=lambda_client, + payload_dict=True, # TODO: This should be handled better since not all payloads in ESM are in the form { "Records" : List[Dict]} + report_batch_item_failures=self.esm_config.get("FunctionResponseTypes") + == [FunctionResponseType.ReportBatchItemFailures], + ) + + # Logger + logger = NoOpsPipeLogger() + + # Event Source Mapping processor + esm_processor = EsmEventProcessor(sender=sender, logger=logger) + + # Poller + source_service = "" + source_client = None + source_arn = self.esm_config.get("EventSourceArn", "") + if source_arn: + parsed_source_arn = parse_arn(source_arn) + source_service = get_standardized_service_name(parsed_source_arn["service"]) + source_client = get_internal_client( + arn=source_arn, + role_arn=self.function_role_arn, + service_principal=ServicePrincipal.lambda_, + source_arn=self.esm_config["FunctionArn"], + client_config=botocore.config.Config( + retries={"total_max_attempts": 1}, # Disable retries + read_timeout=max( + self.esm_config.get( + "MaximumBatchingWindowInSeconds", DEFAULT_MAX_WAIT_TIME_SECONDS + ), + 60, + ) + + 5, # Extend read timeout (with 5s buffer) for long-polling + # Setting tcp_keepalive to true allows the boto client to keep + # a long-running TCP connection when making calls to the gateway. + # This ensures long-poll calls do not prematurely have their socket + # connection marked as stale if no data is transferred for a given + # period of time hence preventing premature drops or resets of the + # connection. + # See https://aws.amazon.com/blogs/networking-and-content-delivery/implementing-long-running-tcp-connections-within-vpc-networking/ + tcp_keepalive=True, + ), + ) + + filter_criteria = self.esm_config.get("FilterCriteria", {"Filters": []}) + user_state_reason = EsmStateReason.USER_ACTION + if source_service == "sqs": + user_state_reason = EsmStateReason.USER_INITIATED + source_parameters = PipeSourceParameters( + FilterCriteria=filter_criteria, + SqsQueueParameters=PipeSourceSqsQueueParameters( + BatchSize=self.esm_config["BatchSize"], + MaximumBatchingWindowInSeconds=self.esm_config[ + "MaximumBatchingWindowInSeconds" + ], + ), + ) + poller = SqsPoller( + source_arn=source_arn, + source_parameters=source_parameters, + source_client=source_client, + processor=esm_processor, + ) + elif source_service == "kinesis": + # TODO: map all supported ESM to Pipe parameters + optional_params = {} + dead_letter_config_arn = ( + self.esm_config.get("DestinationConfig", {}).get("OnFailure", {}).get("Destination") + ) + if dead_letter_config_arn: + optional_params["DeadLetterConfig"] = {"Arn": dead_letter_config_arn} + source_parameters = PipeSourceParameters( + FilterCriteria=filter_criteria, + KinesisStreamParameters=PipeSourceKinesisStreamParameters( + StartingPosition=KinesisStreamStartPosition[ + self.esm_config["StartingPosition"] + ], + BatchSize=self.esm_config["BatchSize"], + MaximumBatchingWindowInSeconds=self.esm_config[ + "MaximumBatchingWindowInSeconds" + ], + MaximumRetryAttempts=self.esm_config["MaximumRetryAttempts"], + MaximumRecordAgeInSeconds=self.esm_config["MaximumRecordAgeInSeconds"], + **optional_params, + ), + ) + poller = KinesisPoller( + esm_uuid=self.esm_config["UUID"], + source_arn=source_arn, + source_parameters=source_parameters, + source_client=source_client, + processor=esm_processor, + invoke_identity_arn=self.function_role_arn, + kinesis_namespace=True, + ) + elif source_service == "dynamodbstreams": + # TODO: map all supported ESM to Pipe parameters + optional_params = {} + dead_letter_config_arn = ( + self.esm_config.get("DestinationConfig", {}).get("OnFailure", {}).get("Destination") + ) + if dead_letter_config_arn: + optional_params["DeadLetterConfig"] = {"Arn": dead_letter_config_arn} + source_parameters = PipeSourceParameters( + FilterCriteria=filter_criteria, + DynamoDBStreamParameters=PipeSourceDynamoDBStreamParameters( + StartingPosition=DynamoDBStreamStartPosition[ + self.esm_config["StartingPosition"] + ], + BatchSize=self.esm_config["BatchSize"], + MaximumBatchingWindowInSeconds=self.esm_config[ + "MaximumBatchingWindowInSeconds" + ], + MaximumRetryAttempts=self.esm_config["MaximumRetryAttempts"], + MaximumRecordAgeInSeconds=self.esm_config["MaximumRecordAgeInSeconds"], + **optional_params, + ), + ) + poller = DynamoDBPoller( + esm_uuid=self.esm_config["UUID"], + source_arn=source_arn, + source_parameters=source_parameters, + source_client=source_client, + processor=esm_processor, + ) + else: + poller_holder = PollerHolder() + lambda_hooks.create_event_source_poller.run( + poller_holder, source_service, self.esm_config + ) + + if not poller_holder.create_poller_fn: + raise Exception( + f"Unsupported event source mapping source service {source_service}. Please upvote or create a feature request." + ) + + poller: Poller = poller_holder.create_poller_fn( + arn=source_arn, + client=source_client, + processor=esm_processor, + ) + + esm_worker = EsmWorker( + self.esm_config, + poller=poller, + enabled=self.enabled, + user_state_reason=user_state_reason, + ) + return esm_worker diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/event_processor.py b/localstack-core/localstack/services/lambda_/event_source_mapping/event_processor.py new file mode 100644 index 0000000000000..cccd02e843aec --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/event_processor.py @@ -0,0 +1,73 @@ +from abc import ABC, abstractmethod +from typing import TypedDict + + +class EventProcessorError(Exception): + pass + + +class PipeInternalError(EventProcessorError): + """Errors caused by an internal event processor implementation such as Pipes or Lambda ESM. + Examples: connection error to target service, transient availability issue, implementation error + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-error-troubleshooting.html#eb-pipes-error-invoke + """ + + pass + + +class CustomerInvocationError(EventProcessorError): + """Errors caused by customers due to configuration or code errors. + Examples: insufficient permissions, logic error in synchronously invoked Lambda target. + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-error-troubleshooting.html#eb-pipes-error-invoke + """ + + pass + + +class BatchFailureError(EventProcessorError): + """The entire batch failed.""" + + def __init__(self, error=None) -> None: + self.error = error + + +class PartialFailurePayload(TypedDict, total=False): + """Following the partial failure payload structure defined by AWS: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-batching-concurrency.html + Special cases: https://repost.aws/knowledge-center/lambda-sqs-report-batch-item-failures + """ + + batchItemFailures: list[dict[str, str]] + + +class PartialBatchFailureError(EventProcessorError): + """A part of the batch failed.""" + + def __init__( + self, + partial_failure_payload: PartialFailurePayload | None = None, + error=None, + ) -> None: + self.error = error + self.partial_failure_payload = partial_failure_payload + + +class EventProcessor(ABC): + """Interface for event processors such as Event Source Mapping or Pipes that process batches of events.""" + + @abstractmethod + def process_events_batch(self, input_events: list[dict]) -> None: + """Processes a batch of `input_events`. + Throws an error upon full or partial batch failure. + """ + + @abstractmethod + def generate_event_failure_context(self, abort_condition: str, **kwargs) -> dict: + """ + Generates a context object for a failed event processing invocation. + + This method is used to create a standardized failure context for both + event source mapping and pipes processing scenarios. The resulting + context will be passed to a Dead Letter Queue (DLQ). + """ + pass diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/noops_event_processor.py b/localstack-core/localstack/services/lambda_/event_source_mapping/noops_event_processor.py new file mode 100644 index 0000000000000..88d89fa41d026 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/noops_event_processor.py @@ -0,0 +1,11 @@ +import logging + +from localstack.services.lambda_.event_source_mapping.event_processor import EventProcessor + +LOG = logging.getLogger(__name__) + + +class NoOpsEventProcessor(EventProcessor): + def process_events_batch(self, input_events: list[dict]) -> None: + """Intentionally do nothing""" + LOG.debug("Process input events %s using NoOpsEventProcessor", input_events) diff --git a/localstack/services/stepfunctions/asl/component/state/__init__.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/__init__.py rename to localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/__init__.py diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/noops_pipe_logger.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/noops_pipe_logger.py new file mode 100644 index 0000000000000..4743ef9a7339b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/noops_pipe_logger.py @@ -0,0 +1,14 @@ +from localstack.services.lambda_.event_source_mapping.pipe_loggers.pipe_logger import PipeLogger + + +class NoOpsPipeLogger(PipeLogger): + def __init__(self): + super().__init__(log_configuration={}) + + def log_msg(self, message: dict) -> None: + # intentionally logs nothing + pass + + def log(self, logLevel: str, **kwargs): + # intentionally logs nothing + pass diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/pipe_logger.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/pipe_logger.py new file mode 100644 index 0000000000000..1dda8cd7a25f4 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_loggers/pipe_logger.py @@ -0,0 +1,113 @@ +import logging +import time +from abc import ABC, abstractmethod + +from localstack.aws.api.pipes import IncludeExecutionDataOption, LogLevel + +LOG = logging.getLogger(__name__) + + +class PipeLogger(ABC): + """Logger interface designed for EventBridge pipes logging: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html + """ + + log_configuration: dict + extra_fields: dict + + def __init__(self, log_configuration): + self.log_configuration = log_configuration + self.extra_fields = {} + + @abstractmethod + def log_msg(self, message: dict) -> None: + pass + + @property + def include_execution_data(self) -> list[str] | None: + return self.log_configuration.get("IncludeExecutionData") + + def set_fields(self, **kwargs): + self.extra_fields.update(kwargs) + + def log(self, logLevel: str, **kwargs): + if self.is_enabled_for(logLevel): + message = { + **self.extra_fields, + "timestamp": int(time.time() * 1000), + "logLevel": logLevel, + **kwargs, + } + filtered_message = self.filter_message(message) + LOG.debug(filtered_message) + self.log_msg(filtered_message) + + def is_enabled_for(self, level: str): + return log_levels().index(level) <= log_levels().index(self.get_effective_level()) + + def get_effective_level(self): + return self.log_configuration["Level"] + + def filter_message(self, message: dict) -> dict: + """ + Filters a message payload to ensure it is formatted correcly for EventBridge Pipes Logging (see [AWS docs example](https://aws.amazon.com/blogs/compute/introducing-logging-support-for-amazon-eventbridge-pipes/)): + ```python + { + "resourceArn": str, + "timestamp": str, + "executionId": str, + "messageType": str, + "logLevel": str, + "error": { + "message": str, + "httpStatusCode": int, + "awsService": str, + "requestId": str, + "exceptionType": str, + "resourceArn": str + }, # Optional + "awsRequest": str, # Optional + "awsResponse": str # Optional + } + ``` + """ + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data + execution_data_fields = { + "payload", + "awsRequest", + "awsResponse", + } + fields_to_include = { + "resourceArn", + "timestamp", + "executionId", + "messageType", + "logLevel", + } + error_fields_to_include = { + "message", + "httpStatusCode", + "awsService", + "requestId", + "exceptionType", + "resourceArn", + } + + if self.include_execution_data == [IncludeExecutionDataOption.ALL]: + fields_to_include.update(execution_data_fields) + + filtered_message = { + key: value for key, value in message.items() if key in fields_to_include + } + + if error := message.get("error"): + filtered_error = { + key: value for key, value in error.items() if key in error_fields_to_include + } + filtered_message["error"] = filtered_error + + return filtered_message + + +def log_levels(): + return [LogLevel.OFF, LogLevel.ERROR, LogLevel.INFO, LogLevel.TRACE] diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_utils.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_utils.py new file mode 100644 index 0000000000000..644e99c264035 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pipe_utils.py @@ -0,0 +1,77 @@ +import json +from datetime import datetime, timezone + +import botocore +from botocore.client import BaseClient + +from localstack.aws.connect import connect_to +from localstack.utils.aws.arns import parse_arn +from localstack.utils.json import BytesEncoder + + +def get_internal_client( + arn: str, + client_config: botocore.config.Config = None, + role_arn: str = None, + service_principal: str = None, + source_arn: str = None, + service: str = None, + session_name: str = None, +) -> BaseClient: + """Return a botocore client for a given arn. Supports: + * assume role if `role_arn` is provided + * request metadata if `source_arn` is provided + """ + parsed_arn = parse_arn(arn) + parsed_arn["service"] = get_standardized_service_name(parsed_arn["service"]) + service = service or parsed_arn["service"] + if role_arn: + client = connect_to.with_assumed_role( + role_arn=role_arn, + service_principal=service_principal, + session_name=session_name, + region_name=parsed_arn["region"], + config=client_config, + ).get_client(service) + else: + client = connect_to( + aws_access_key_id=parsed_arn["account"], + region_name=parsed_arn["region"], + config=client_config, + ).get_client(service) + + if source_arn: + client = client.request_metadata(source_arn=source_arn, service_principal=service_principal) + + return client + + +def get_standardized_service_name(service_name: str) -> str: + """Convert ARN service namespace to standardized service name used for boto clients.""" + if service_name == "states": + return "stepfunctions" + elif service_name == "dynamodb": + return "dynamodbstreams" + else: + return service_name + + +def get_current_time() -> datetime: + return datetime.now(tz=timezone.utc) + + +def get_datetime_from_timestamp(timestamp: float) -> datetime: + return datetime.utcfromtimestamp(timestamp) + # TODO: fixed deprecated API (timestamp snapshots fail with the below) + # return datetime.fromtimestamp(timestamp, tz=timezone.utc) + + +def to_json_str(obj: any) -> str: + """Custom JSON encoding for events with potentially unserializable fields (e.g., byte string). + JSON encoders in LocalStack: + * localstack.utils.json.CustomEncoder + * localstack.utils.json.BytesEncoder + * localstack.services.events.utils.EventJSONEncoder + * localstack.services.stepfunctions.asl.utils.encoding._DateTimeEncoder + """ + return json.dumps(obj, cls=BytesEncoder) diff --git a/localstack/services/stepfunctions/asl/component/state/state_choice/__init__.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_choice/__init__.py rename to localstack-core/localstack/services/lambda_/event_source_mapping/pollers/__init__.py diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py new file mode 100644 index 0000000000000..d8b1af71b1b71 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/dynamodb_poller.py @@ -0,0 +1,119 @@ +import logging +from datetime import datetime + +from botocore.client import BaseClient + +from localstack.aws.api.dynamodbstreams import StreamStatus +from localstack.services.lambda_.event_source_mapping.event_processor import ( + EventProcessor, +) +from localstack.services.lambda_.event_source_mapping.pipe_utils import get_current_time +from localstack.services.lambda_.event_source_mapping.pollers.stream_poller import StreamPoller + +LOG = logging.getLogger(__name__) + + +class DynamoDBPoller(StreamPoller): + def __init__( + self, + source_arn: str, + source_parameters: dict | None = None, + source_client: BaseClient | None = None, + processor: EventProcessor | None = None, + partner_resource_arn: str | None = None, + esm_uuid: str | None = None, + shards: dict[str, str] | None = None, + ): + super().__init__( + source_arn, + source_parameters, + source_client, + processor, + esm_uuid=esm_uuid, + partner_resource_arn=partner_resource_arn, + shards=shards, + ) + + @property + def stream_parameters(self) -> dict: + return self.source_parameters["DynamoDBStreamParameters"] + + def initialize_shards(self): + # TODO: update upon re-sharding, maybe using a cache and call every time?! + stream_info = self.source_client.describe_stream(StreamArn=self.source_arn) + stream_status = stream_info["StreamDescription"]["StreamStatus"] + if stream_status != StreamStatus.ENABLED: + LOG.warning( + "DynamoDB stream %s is not enabled. Current status: %s", + self.source_arn, + stream_status, + ) + return {} + + # NOTICE: re-sharding might require updating this periodically (unknown how Pipes does it!?) + # Mapping of shard id => shard iterator + shards = {} + for shard in stream_info["StreamDescription"]["Shards"]: + shard_id = shard["ShardId"] + starting_position = self.stream_parameters["StartingPosition"] + kwargs = {} + get_shard_iterator_response = self.source_client.get_shard_iterator( + StreamArn=self.source_arn, + ShardId=shard_id, + ShardIteratorType=starting_position, + **kwargs, + ) + shards[shard_id] = get_shard_iterator_response["ShardIterator"] + + LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards)) + return shards + + def stream_arn_param(self) -> dict: + # Not supported for GetRecords: + # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_GetRecords.html + return {} + + def event_source(self) -> str: + return "aws:dynamodb" + + def extra_metadata(self) -> dict: + return { + "eventVersion": "1.1", + } + + def transform_into_events(self, records: list[dict], shard_id) -> list[dict]: + events = [] + for record in records: + # TODO: consolidate with DynamoDB event source listener: + # localstack.services.lambda_.event_source_listeners.dynamodb_event_source_listener.DynamoDBEventSourceListener._create_lambda_event_payload + dynamodb = record["dynamodb"] + + if creation_time := dynamodb.get("ApproximateCreationDateTime"): + # Float conversion validated by TestDynamoDBEventSourceMapping.test_dynamodb_event_filter + dynamodb["ApproximateCreationDateTime"] = float(creation_time.timestamp()) + event = { + # TODO: add this metadata after filtering (these are based on the original record!) + # This requires some design adjustment because the eventId and eventName depend on the record. + "eventID": record["eventID"], + "eventName": record["eventName"], + # record content + "dynamodb": dynamodb, + } + events.append(event) + return events + + def failure_payload_details_field_name(self) -> str: + return "DDBStreamBatchInfo" + + def get_approximate_arrival_time(self, record: dict) -> float: + # TODO: validate whether the default should be now + # Optional according to AWS docs: + # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_StreamRecord.html + # TODO: parse float properly if present from ApproximateCreationDateTime -> now works, compare via debug! + return record["dynamodb"].get("todo", get_current_time().timestamp()) + + def format_datetime(self, time: datetime) -> str: + return f"{time.isoformat(timespec='seconds')}Z" + + def get_sequence_number(self, record: dict) -> str: + return record["dynamodb"]["SequenceNumber"] diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py new file mode 100644 index 0000000000000..defe87a6a6dee --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/kinesis_poller.py @@ -0,0 +1,212 @@ +import base64 +import json +import logging +from copy import deepcopy +from datetime import datetime + +from botocore.client import BaseClient + +from localstack.aws.api.kinesis import StreamStatus +from localstack.aws.api.pipes import ( + KinesisStreamStartPosition, +) +from localstack.services.lambda_.event_source_mapping.event_processor import ( + EventProcessor, +) +from localstack.services.lambda_.event_source_mapping.pollers.stream_poller import StreamPoller +from localstack.utils.strings import to_str + +LOG = logging.getLogger(__name__) + + +class KinesisPoller(StreamPoller): + # The role ARN of the processor (e.g., role ARN of the Pipe) + invoke_identity_arn: str | None + # Flag to enable nested kinesis namespace when formatting events to support the nested `kinesis` field structure + # used for Lambda ESM: https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-event-example + # EventBridge Pipes uses no nesting: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kinesis.html + kinesis_namespace: bool + + def __init__( + self, + source_arn: str, + source_parameters: dict | None = None, + source_client: BaseClient | None = None, + processor: EventProcessor | None = None, + partner_resource_arn: str | None = None, + invoke_identity_arn: str | None = None, + kinesis_namespace: bool = False, + esm_uuid: str | None = None, + shards: dict[str, str] | None = None, + ): + super().__init__( + source_arn, + source_parameters, + source_client, + processor, + esm_uuid=esm_uuid, + partner_resource_arn=partner_resource_arn, + shards=shards, + ) + self.invoke_identity_arn = invoke_identity_arn + self.kinesis_namespace = kinesis_namespace + + @property + def stream_parameters(self) -> dict: + return self.source_parameters["KinesisStreamParameters"] + + def initialize_shards(self) -> dict[str, str]: + # TODO: cache this and update/re-try upon failures + stream_info = self.source_client.describe_stream(StreamARN=self.source_arn) + stream_status = stream_info["StreamDescription"]["StreamStatus"] + if stream_status != StreamStatus.ACTIVE: + LOG.warning( + "Stream %s is not active. Current status: %s", + self.source_arn, + stream_status, + ) + return {} + + # NOTICE: re-sharding might require updating this periodically (unknown how Pipes does it!?) + # Mapping of shard id => shard iterator + shards = {} + for shard in stream_info["StreamDescription"]["Shards"]: + shard_id = shard["ShardId"] + starting_position = self.stream_parameters["StartingPosition"] + kwargs = {} + # TODO: test StartingPosition=AT_TIMESTAMP (only supported for Kinesis!) + if starting_position == KinesisStreamStartPosition.AT_TIMESTAMP: + kwargs["StartingSequenceNumber"] = self.stream_parameters[ + "StartingPositionTimestamp" + ] + get_shard_iterator_response = self.source_client.get_shard_iterator( + StreamARN=self.source_arn, + ShardId=shard_id, + ShardIteratorType=starting_position, + **kwargs, + ) + shards[shard_id] = get_shard_iterator_response["ShardIterator"] + + LOG.debug("Event source %s has %d shards.", self.source_arn, len(self.shards)) + return shards + + def stream_arn_param(self) -> dict: + return {"StreamARN": self.source_arn} + + def event_source(self) -> str: + return "aws:kinesis" + + def extra_metadata(self) -> dict: + return { + "eventVersion": "1.0", + "eventName": "aws:kinesis:record", + "invokeIdentityArn": self.invoke_identity_arn, + } + + def transform_into_events(self, records: list[dict], shard_id) -> list[dict]: + events = [] + for record in records: + # TODO: consolidate with Kinesis event source listener: + # localstack.services.lambda_.event_source_listeners.kinesis_event_source_listener.KinesisEventSourceListener._create_lambda_event_payload + # check `encryptionType` leading to serialization errors by Dotnet Lambdas + sequence_number = record["SequenceNumber"] + event = { + # TODO: add this metadata after filtering. + # This requires some design adjustment because the sequence number depends on the record. + "eventID": f"{shard_id}:{sequence_number}", + } + kinesis_fields = { + "kinesisSchemaVersion": "1.0", + "partitionKey": record["PartitionKey"], + "sequenceNumber": sequence_number, + # TODO: implement heuristic based on content type: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html#pipes-filter-sqs + # boto3 automatically decodes records in get_records(), so we must re-encode + "data": to_str(base64.b64encode(record["Data"])), + "approximateArrivalTimestamp": record["ApproximateArrivalTimestamp"].timestamp(), + } + if self.kinesis_namespace: + event["kinesis"] = kinesis_fields + else: + event.update(kinesis_fields) + events.append(event) + return events + + def failure_payload_details_field_name(self) -> str: + return "KinesisBatchInfo" + + def get_approximate_arrival_time(self, record: dict) -> float: + if self.kinesis_namespace: + return record["kinesis"]["approximateArrivalTimestamp"] + else: + return record["approximateArrivalTimestamp"] + + def format_datetime(self, time: datetime) -> str: + return f"{time.isoformat(timespec='milliseconds')}Z" + + def get_sequence_number(self, record: dict) -> str: + if self.kinesis_namespace: + return record["kinesis"]["sequenceNumber"] + else: + return record["sequenceNumber"] + + def pre_filter(self, events: list[dict]) -> list[dict]: + # TODO: test what happens with a mixture of data and non-data filters? + if has_data_filter_criteria_parsed(self.filter_patterns): + parsed_events = [] + for event in events: + raw_data = self.get_data(event) + try: + data = self.parse_data(raw_data) + # TODO: test "data" key remapping + # Filtering remaps "kinesis.data" in ESM to "data (idempotent for Pipes using "data" directly) + # ESM: https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis-filtering.html + # Pipes: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kinesis.html + # Pipes filtering: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html + parsed_event = deepcopy(event) + parsed_event["data"] = data + + parsed_events.append(parsed_event) + except json.JSONDecodeError: + LOG.warning( + "Unable to convert event data '%s' to json... Record will be dropped.", + raw_data, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + return parsed_events + else: + return events + + def post_filter(self, events: list[dict]) -> list[dict]: + if has_data_filter_criteria_parsed(self.filter_patterns): + # convert them back (HACK for fixing parity with v1 and getting regression tests passing) + for event in events: + parsed_data = event.pop("data") + encoded_data = self.encode_data(parsed_data) + self.set_data(event, encoded_data) + return events + + def get_data(self, event: dict) -> str: + if self.kinesis_namespace: + return event["kinesis"]["data"] + else: + return event["data"] + + def set_data(self, event: dict, data: bytes) -> None: + if self.kinesis_namespace: + event["kinesis"]["data"] = data + else: + event["data"] = data + + def parse_data(self, raw_data: str) -> dict | str: + decoded_data = base64.b64decode(raw_data) + return json.loads(decoded_data) + + def encode_data(self, parsed_data: dict) -> str: + return base64.b64encode(json.dumps(parsed_data).encode()).decode() + + +def has_data_filter_criteria_parsed(parsed_filters: list[dict]) -> bool: + for filter in parsed_filters: + if "data" in filter: + return True + return False diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/poller.py new file mode 100644 index 0000000000000..3f8fdd88f0305 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/poller.py @@ -0,0 +1,208 @@ +import json +import logging +from abc import ABC, abstractmethod +from typing import Any + +from botocore.client import BaseClient + +from localstack.aws.api.pipes import PipeStateReason +from localstack.services.lambda_.event_source_mapping.event_processor import EventProcessor +from localstack.services.lambda_.event_source_mapping.noops_event_processor import ( + NoOpsEventProcessor, +) +from localstack.services.lambda_.event_source_mapping.pipe_utils import get_internal_client +from localstack.utils.aws.arns import parse_arn +from localstack.utils.event_matcher import matches_event + + +class EmptyPollResultsException(Exception): + service: str + source_arn: str + + def __init__(self, service: str = "", source_arn: str = ""): + self.service = service + self.source_arn = source_arn + + +class PipeStateReasonValues(PipeStateReason): + USER_INITIATED = "USER_INITIATED" + NO_RECORDS_PROCESSED = "No records processed" + # TODO: add others (e.g., failure) + + +LOG = logging.getLogger(__name__) + + +class Poller(ABC): + source_arn: str | None + aws_region: str | None + source_parameters: dict + filter_patterns: list[dict[str, Any]] + source_client: BaseClient + + # Target processor (e.g., Pipe, EventSourceMapping) + processor: EventProcessor + + def __init__( + self, + source_arn: str | None = None, + source_parameters: dict | None = None, + source_client: BaseClient | None = None, + processor: EventProcessor | None = None, + ): + # TODO: handle pollers without an ARN (e.g., Apache Kafka) + if source_arn: + self.source_arn = source_arn + self.aws_region = parse_arn(source_arn)["region"] + self.source_client = source_client or get_internal_client(source_arn) + + self.source_parameters = source_parameters or {} + filters = self.source_parameters.get("FilterCriteria", {}).get("Filters", []) + self.filter_patterns = [json.loads(event_filter["Pattern"]) for event_filter in filters] + + # Target processor + self.processor = processor or NoOpsEventProcessor() + + @abstractmethod + def event_source(self) -> str: + """Return the event source metadata (e.g., aws:sqs)""" + pass + + # TODO: create an abstract fetch_records method that all children should implement. This will unify how poller's internally retreive data from an event + # source and make for much easier error handling. + @abstractmethod + def poll_events(self) -> None: + """Poll events polled from the event source and matching at least one filter criteria and invoke the target processor.""" + pass + + def close(self) -> None: + """Closes a target poller alongside all associated internal polling/consuming clients. + Only implemented for supported pollers. Therefore, the default implementation is empty.""" + pass + + def send_events_to_dlq(self, events, context) -> None: + """Send failed events to a DLQ configured on the source. + Only implemented for supported pollers. Therefore, the default implementation is empty.""" + pass + + def filter_events(self, events: list[dict]) -> list[dict]: + """Filter events using the EventBridge event patterns: + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html""" + if len(self.filter_patterns) == 0: + return events + + filtered_events = [] + for event in events: + # TODO: add try/catch with default discard and error log for extra resilience + if any(matches_event(pattern, event) for pattern in self.filter_patterns): + filtered_events.append(event) + return filtered_events + + def add_source_metadata(self, events: list[dict], extra_metadata=None) -> list[dict]: + """Add event source metadata to each event for eventSource, eventSourceARN, and awsRegion. + This metadata is added after filtering: https://repost.aws/knowledge-center/eventbridge-filter-events-with-pipes + See "The following fields can't be used in event patterns": + https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html + """ + for event in events: + event["eventSourceARN"] = self.source_arn + event["eventSource"] = self.event_source() + event["awsRegion"] = self.aws_region + event.update(self.extra_metadata()) + return events + + def extra_metadata(self) -> dict: + """Default implementation that subclasses can override to customize""" + return {} + + +def has_batch_item_failures( + result: dict | str | None, valid_item_ids: set[str] | None = None +) -> bool: + """Returns False if no batch item failures are present and True otherwise (i.e., including parse exceptions).""" + # TODO: validate correct behavior upon exceptions + try: + failed_items_ids = parse_batch_item_failures(result, valid_item_ids) + return len(failed_items_ids) > 0 + except (KeyError, ValueError): + return True + + +def get_batch_item_failures( + result: dict | str | None, valid_item_ids: set[str] | None = None +) -> list[str] | None: + """ + Returns a list of failed batch item IDs. If an empty list is returned, then the batch should be considered as a complete success. + + If `None` is returned, the batch should be considered a complete failure. + """ + try: + failed_items_ids = parse_batch_item_failures(result, valid_item_ids) + return failed_items_ids + except (KeyError, ValueError): + return None + + +def parse_batch_item_failures( + result: dict | str | None, valid_item_ids: set[str] | None = None +) -> list[str]: + """ + Parses a partial batch failure response, that looks like this: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-batching-concurrency.html + + { + "batchItemFailures": [ + { + "itemIdentifier": "id2" + }, + { + "itemIdentifier": "id4" + } + ] + } + + If the response returns an empty list, then the batch should be considered as a complete success. If an exception + is raised, the batch should be considered a complete failure. + + Pipes partial batch failure: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-batching-concurrency.html + Lambda ESM with SQS: https://docs.aws.amazon.com/lambda/latest/dg/services-sqs-errorhandling.html + Special cases: https://repost.aws/knowledge-center/lambda-sqs-report-batch-item-failures + Kinesis: https://docs.aws.amazon.com/lambda/latest/dg/services-kinesis-batchfailurereporting.html + + :param result: the process status (e.g., invocation result from Lambda) + :param valid_item_ids: the set of valid item ids in the batch + :raises KeyError: if the itemIdentifier value is missing or not in the batch + :raises Exception: any other exception related to parsing (e.g., JSON parser error) + :return: a list of item IDs that failed + """ + if not result: + return [] + + if isinstance(result, dict): + partial_batch_failure = result + else: + partial_batch_failure = json.loads(result) + + if not partial_batch_failure: + return [] + + batch_item_failures = partial_batch_failure.get("batchItemFailures") + + if not batch_item_failures: + return [] + + failed_items = [] + for item in batch_item_failures: + if "itemIdentifier" not in item: + raise KeyError(f"missing itemIdentifier in batchItemFailure record {item}") + + item_identifier = item["itemIdentifier"] + if not item_identifier: + raise ValueError("itemIdentifier cannot be empty or null") + + # Optionally validate whether the item_identifier is part of the batch + if valid_item_ids and item_identifier not in valid_item_ids: + raise KeyError(f"itemIdentifier '{item_identifier}' not in the batch") + + failed_items.append(item_identifier) + + return failed_items diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py new file mode 100644 index 0000000000000..d39805dce9113 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/sqs_poller.py @@ -0,0 +1,342 @@ +import functools +import json +import logging +from collections import defaultdict +from functools import cached_property + +from botocore.client import BaseClient + +from localstack.aws.api.pipes import PipeSourceSqsQueueParameters +from localstack.aws.api.sqs import MessageSystemAttributeName +from localstack.aws.connect import connect_to +from localstack.services.lambda_.event_source_mapping.event_processor import ( + EventProcessor, + PartialBatchFailureError, +) +from localstack.services.lambda_.event_source_mapping.pollers.poller import ( + EmptyPollResultsException, + Poller, + parse_batch_item_failures, +) +from localstack.services.lambda_.event_source_mapping.senders.sender_utils import ( + batched, +) +from localstack.services.sqs.constants import ( + HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT, + HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS, +) +from localstack.utils.aws.arns import parse_arn +from localstack.utils.strings import first_char_to_lower + +LOG = logging.getLogger(__name__) + +DEFAULT_MAX_RECEIVE_COUNT = 10 +# See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html +DEFAULT_MAX_WAIT_TIME_SECONDS = 20 + + +class SqsPoller(Poller): + queue_url: str + + batch_size: int + maximum_batching_window: int + + def __init__( + self, + source_arn: str, + source_parameters: dict | None = None, + source_client: BaseClient | None = None, + processor: EventProcessor | None = None, + ): + super().__init__(source_arn, source_parameters, source_client, processor) + self.queue_url = get_queue_url(self.source_arn) + + self.batch_size = self.sqs_queue_parameters.get("BatchSize", DEFAULT_MAX_RECEIVE_COUNT) + # HACK: When the MaximumBatchingWindowInSeconds is not set, just default to short-polling. + # While set in ESM (via the config factory) setting this param as a default in Pipes causes + # parity issues with a retrieved config since no default value is returned. + self.maximum_batching_window = self.sqs_queue_parameters.get( + "MaximumBatchingWindowInSeconds", 0 + ) + + self._register_client_hooks() + + @property + def sqs_queue_parameters(self) -> PipeSourceSqsQueueParameters: + # TODO: De-couple Poller configuration params from ESM/Pipes specific config (i.e PipeSourceSqsQueueParameters) + return self.source_parameters["SqsQueueParameters"] + + @cached_property + def is_fifo_queue(self) -> bool: + # Alternative heuristic: self.queue_url.endswith(".fifo"), but we need the call to get_queue_attributes for IAM + return self.get_queue_attributes().get("FifoQueue", "false").lower() == "true" + + def _register_client_hooks(self): + event_system = self.source_client.meta.events + + def handle_message_count_override(params, context, **kwargs): + requested_count = params.pop("sqs_override_max_message_count", None) + if not requested_count or requested_count <= DEFAULT_MAX_RECEIVE_COUNT: + return + + context[HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT] = str(requested_count) + + def handle_message_wait_time_seconds_override(params, context, **kwargs): + requested_wait = params.pop("sqs_override_wait_time_seconds", None) + if not requested_wait or requested_wait <= DEFAULT_MAX_WAIT_TIME_SECONDS: + return + + context[HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS] = str(requested_wait) + + def handle_inject_headers(params, context, **kwargs): + if override_message_count := context.pop( + HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT, None + ): + params["headers"][HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT] = ( + override_message_count + ) + + if override_wait_time := context.pop( + HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS, None + ): + params["headers"][HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS] = ( + override_wait_time + ) + + event_system.register( + "provide-client-params.sqs.ReceiveMessage", handle_message_count_override + ) + event_system.register( + "provide-client-params.sqs.ReceiveMessage", handle_message_wait_time_seconds_override + ) + # Since we delete SQS messages after processing, this allows us to remove up to 10K entries at a time. + event_system.register( + "provide-client-params.sqs.DeleteMessageBatch", handle_message_count_override + ) + + event_system.register("before-call.sqs.ReceiveMessage", handle_inject_headers) + event_system.register("before-call.sqs.DeleteMessageBatch", handle_inject_headers) + + def get_queue_attributes(self) -> dict: + """The API call to sqs:GetQueueAttributes is required for IAM policy streamsing.""" + get_queue_attributes_response = self.source_client.get_queue_attributes( + QueueUrl=self.queue_url, + AttributeNames=["FifoQueue"], + ) + return get_queue_attributes_response.get("Attributes", {}) + + def event_source(self) -> str: + return "aws:sqs" + + def poll_events(self) -> None: + # In order to improve performance, we've adopted long-polling for the SQS poll operation `ReceiveMessage` [1]. + # * Our LS-internal optimizations leverage custom boto-headers to set larger batch sizes and longer wait times than what the AWS API allows [2]. + # * Higher batch collection durations and no. of records retrieved per request mean fewer calls to the LocalStack gateway [3] when polling an event-source [4]. + # * LocalStack shutdown works because the LocalStack gateway shuts down and terminates the open connection. + # * Provider lifecycle hooks have been added to ensure blocking long-poll calls are gracefully interrupted and returned. + # + # Pros (+) / Cons (-): + # + Alleviates pressure on the gateway since each `ReceiveMessage` call only returns once we reach the desired `BatchSize` or the `WaitTimeSeconds` elapses. + # + Matches the AWS behavior also using long-polling + # - Blocks a LocalStack gateway thread (default 1k) for every open connection, which could lead to resource contention if used at scale. + # + # Refs / Notes: + # [1] Amazon SQS short and long polling: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html + # [2] PR (2025-02): https://github.com/localstack/localstack/pull/12002 + # [3] Note: Under high volumes of requests, the LocalStack gateway becomes a major performance bottleneck. + # [4] ESM blog mentioning long-polling: https://aws.amazon.com/de/blogs/aws/aws-lambda-adds-amazon-simple-queue-service-to-supported-event-sources/ + + # TODO: Handle exceptions differently i.e QueueNotExist or ConnectionFailed should retry with backoff + response = self.source_client.receive_message( + QueueUrl=self.queue_url, + MaxNumberOfMessages=min(self.batch_size, DEFAULT_MAX_RECEIVE_COUNT), + WaitTimeSeconds=min(self.maximum_batching_window, DEFAULT_MAX_WAIT_TIME_SECONDS), + MessageAttributeNames=["All"], + MessageSystemAttributeNames=[MessageSystemAttributeName.All], + # Override how many messages we can receive per call + sqs_override_max_message_count=self.batch_size, + # Override how long to wait until batching conditions are met + sqs_override_wait_time_seconds=self.maximum_batching_window, + ) + + messages = response.get("Messages", []) + if not messages: + raise EmptyPollResultsException(service="sqs", source_arn=self.source_arn) + + LOG.debug("Polled %d events from %s", len(messages), self.source_arn) + # TODO: implement invocation payload size quota + # NOTE: Split up a batch into mini-batches of up to 2.5K records each. This is to prevent exceeding the 6MB size-limit + # imposed on payloads sent to a Lambda as well as LocalStack Lambdas failing to handle large payloads efficiently. + # See https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#invocation-eventsourcemapping-batching + for message_batch in batched(messages, 2500): + if len(message_batch) < len(messages): + LOG.debug( + "Splitting events from %s into mini-batch (%d/%d)", + self.source_arn, + len(message_batch), + len(messages), + ) + try: + if self.is_fifo_queue: + # TODO: think about starvation behavior because once failing message could block other groups + fifo_groups = split_by_message_group_id(message_batch) + for fifo_group_messages in fifo_groups.values(): + self.handle_messages(fifo_group_messages) + else: + self.handle_messages(message_batch) + + # TODO: unify exception handling across pollers: should we catch and raise? + except Exception as e: + # TODO: improve error messages (produce same failure and design better error messages) + LOG.warning( + "Polling or batch processing failed: %s", + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + def handle_messages(self, messages): + polled_events = transform_into_events(messages) + # Filtering: matching vs. discarded (i.e., not matching filter criteria) + # TODO: implement format detection behavior (e.g., for JSON body): + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html#pipes-filter-sqs + # Check whether we need poller-specific filter-preprocessing here without modifying the actual event! + # convert to json for filtering (HACK for fixing parity with v1 and getting regression tests passing) + for event in polled_events: + try: + event["body"] = json.loads(event["body"]) + except json.JSONDecodeError: + LOG.debug( + "Unable to convert event body '%s' to json... Event might be dropped.", + event["body"], + ) + matching_events = self.filter_events(polled_events) + # convert them back (HACK for fixing parity with v1 and getting regression tests passing) + for event in matching_events: + event["body"] = ( + json.dumps(event["body"]) if not isinstance(event["body"], str) else event["body"] + ) + + all_message_ids = {message["MessageId"] for message in messages} + matching_message_ids = {event["messageId"] for event in matching_events} + discarded_message_ids = all_message_ids.difference(matching_message_ids) + # Delete discarded events immediately: + # https://lucvandonkersgoed.com/2022/01/20/the-9-ways-an-sqs-message-can-be-deleted/#7-event-source-mappings-with-filters + self.delete_messages(messages, discarded_message_ids) + + # Don't trigger upon empty events + if len(matching_events) == 0: + return + # Enrich events with metadata after filtering + enriched_events = self.add_source_metadata(matching_events) + + # Invoke the processor (e.g., Pipe, ESM) and handle partial batch failures + try: + self.processor.process_events_batch(enriched_events) + successful_message_ids = all_message_ids + except PartialBatchFailureError as e: + failed_message_ids = parse_batch_item_failures( + e.partial_failure_payload, matching_message_ids + ) + successful_message_ids = matching_message_ids.difference(failed_message_ids) + + # Only delete messages that are processed successfully as described here: + # https://docs.aws.amazon.com/en_gb/lambda/latest/dg/with-sqs.html + # When Lambda reads a batch, the messages stay in the queue but are hidden for the length of the queue's + # visibility timeout. If your function successfully processes the batch, Lambda deletes the messages + # from the queue. By default, if your function encounters an error while processing a batch, + # all messages in that batch become visible in the queue again. For this reason, your function code must + # be able to process the same message multiple times without unintended side effects. + # Troubleshooting: https://repost.aws/knowledge-center/lambda-sqs-report-batch-item-failures + # For FIFO queues, AWS also deletes successfully sent messages. Therefore, the AWS docs recommends: + # "If you're using this feature with a FIFO queue, your function should stop processing messages after the first + # failure and return all failed and unprocessed messages in batchItemFailures. This helps preserve the ordering + # of messages in your queue." + # Following this recommendation could result in the unsolved side effect that valid messages are continuously + # placed in the same batch as failing messages: + # * https://stackoverflow.com/questions/78694079/how-to-stop-fifo-sqs-messages-from-being-placed-in-a-batch-with-failing-messages + # * https://stackoverflow.com/questions/76912394/can-i-report-only-messages-from-failing-group-id-in-reportbatchitemfailures-resp + + # TODO: Test blocking failure behavior for FIFO queues to guarantee strict ordering + # -> might require some checkpointing or retry control on the poller side?! + # The poller should only proceed processing FIFO queues after having retried failing messages: + # "If your pipe returns an error, the pipe attempts all retries on the affected messages before EventBridge + # receives additional messages from the same group." + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-sqs.html + self.delete_messages(messages, successful_message_ids) + + def delete_messages(self, messages: list[dict], message_ids_to_delete: set): + """Delete SQS `messages` from the source queue that match a MessageId within `message_ids_to_delete`""" + # TODO: unclear how (partial) failures for deleting are handled, retry or fail batch? Hard to test against AWS + if len(message_ids_to_delete) > 0: + entries = [ + {"Id": str(count), "ReceiptHandle": message["ReceiptHandle"]} + for count, message in enumerate(messages) + if message["MessageId"] in message_ids_to_delete + ] + + self.source_client.delete_message_batch( + QueueUrl=self.queue_url, + Entries=entries, + # Override how many messages can be deleted at once + sqs_override_max_message_count=self.batch_size, + ) + + +def split_by_message_group_id(messages) -> defaultdict[str, list[dict]]: + """Splitting SQS messages by MessageGroupId to ensure strict ordering for FIFO queues""" + fifo_groups = defaultdict(list) + for message in messages: + message_group_id = message["Attributes"]["MessageGroupId"] + fifo_groups[message_group_id].append(message) + return fifo_groups + + +def transform_into_events(messages: list[dict]) -> list[dict]: + events = [] + for message in messages: + # TODO: consolidate with SQS event source listener: + # localstack.services.lambda_.event_source_listeners.sqs_event_source_listener.SQSEventSourceListener._send_event_to_lambda + message_attrs = message_attributes_to_lower(message.get("MessageAttributes")) + event = { + # Original SQS message attributes + "messageId": message["MessageId"], + "receiptHandle": message["ReceiptHandle"], + # TODO: test with empty body + # TODO: implement heuristic based on content type: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html#pipes-filter-sqs + "body": message.get("Body", "MessageBody"), + "attributes": message.get("Attributes", {}), + "messageAttributes": message_attrs, + # TODO: test with empty body + "md5OfBody": message.get("MD5OfBody") or message.get("MD5OfMessageBody"), + } + # TODO: test Pipe with message attributes (only covered by Lambda ESM SQS test so far) + if md5_of_message_attributes := message.get("MD5OfMessageAttributes"): + event["md5OfMessageAttributes"] = md5_of_message_attributes + events.append(event) + return events + + +@functools.cache +def get_queue_url(queue_arn: str) -> str: + parsed_arn = parse_arn(queue_arn) + + queue_name = parsed_arn["resource"] + account_id = parsed_arn["account"] + region = parsed_arn["region"] + + sqs_client = connect_to(region_name=region).sqs + queue_url = sqs_client.get_queue_url(QueueName=queue_name, QueueOwnerAWSAccountId=account_id)[ + "QueueUrl" + ] + return queue_url + + +def message_attributes_to_lower(message_attrs): + """Convert message attribute details (first characters) to lower case (e.g., stringValue, dataType).""" + message_attrs = message_attrs or {} + for _, attr in message_attrs.items(): + if not isinstance(attr, dict): + continue + for key, value in dict(attr).items(): + attr[first_char_to_lower(key)] = attr.pop(key) + return message_attrs diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py new file mode 100644 index 0000000000000..07ef9a7d9cca5 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py @@ -0,0 +1,521 @@ +import json +import logging +import threading +from abc import abstractmethod +from bisect import bisect_left +from collections import defaultdict +from datetime import datetime +from typing import Iterator + +from botocore.client import BaseClient +from botocore.exceptions import ClientError + +from localstack.aws.api.pipes import ( + OnPartialBatchItemFailureStreams, +) +from localstack.services.lambda_.event_source_mapping.event_processor import ( + BatchFailureError, + CustomerInvocationError, + EventProcessor, + PartialBatchFailureError, + PipeInternalError, +) +from localstack.services.lambda_.event_source_mapping.pipe_utils import ( + get_current_time, + get_datetime_from_timestamp, + get_internal_client, +) +from localstack.services.lambda_.event_source_mapping.pollers.poller import ( + EmptyPollResultsException, + Poller, + get_batch_item_failures, +) +from localstack.services.lambda_.event_source_mapping.pollers.sqs_poller import get_queue_url +from localstack.services.lambda_.event_source_mapping.senders.sender_utils import ( + batched, +) +from localstack.utils.aws.arns import parse_arn, s3_bucket_name +from localstack.utils.backoff import ExponentialBackoff +from localstack.utils.batch_policy import Batcher +from localstack.utils.strings import long_uid + +LOG = logging.getLogger(__name__) + + +# TODO: fix this poller to support resharding +# https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-resharding.html +class StreamPoller(Poller): + # Mapping of shard id => shard iterator + # TODO: This mapping approach needs to be re-worked to instead store last processed sequence number. + shards: dict[str, str] + # Iterator for round-robin polling from different shards because a batch cannot contain events from different shards + # This is a workaround for not handling shards in parallel. + iterator_over_shards: Iterator[tuple[str, str]] | None + # ESM UUID is needed in failure processing to form s3 failure destination object key + esm_uuid: str | None + + # The ARN of the processor (e.g., Pipe ARN) + partner_resource_arn: str | None + + # Used for backing-off between retries and breaking the retry loop + _is_shutdown: threading.Event + + # Collects and flushes a batch of records based on a batching policy + shard_batcher: dict[str, Batcher[dict]] + + def __init__( + self, + source_arn: str, + source_parameters: dict | None = None, + source_client: BaseClient | None = None, + processor: EventProcessor | None = None, + partner_resource_arn: str | None = None, + esm_uuid: str | None = None, + shards: dict[str, str] | None = None, + ): + super().__init__(source_arn, source_parameters, source_client, processor) + self.partner_resource_arn = partner_resource_arn + self.esm_uuid = esm_uuid + self.shards = shards if shards is not None else {} + self.iterator_over_shards = None + + self._is_shutdown = threading.Event() + + self.shard_batcher = defaultdict( + lambda: Batcher( + max_count=self.stream_parameters.get("BatchSize", 100), + max_window=self.stream_parameters.get("MaximumBatchingWindowInSeconds", 0), + ) + ) + + @abstractmethod + def transform_into_events(self, records: list[dict], shard_id) -> list[dict]: + pass + + @property + @abstractmethod + def stream_parameters(self) -> dict: + pass + + @abstractmethod + def initialize_shards(self) -> dict[str, str]: + """Returns a shard dict mapping from shard id -> shard iterator + The implementations for Kinesis and DynamoDB are similar but differ in various ways: + * Kinesis uses "StreamARN" and DynamoDB uses "StreamArn" as source parameter + * Kinesis uses "StreamStatus.ACTIVE" and DynamoDB uses "StreamStatus.ENABLED" + * Only Kinesis supports the additional StartingPosition called "AT_TIMESTAMP" using "StartingPositionTimestamp" + """ + pass + + @abstractmethod + def stream_arn_param(self) -> dict: + """Returns a dict of the correct key/value pair for the stream arn used in GetRecords. + Either StreamARN for Kinesis or {} for DynamoDB (unsupported)""" + pass + + @abstractmethod + def failure_payload_details_field_name(self) -> str: + pass + + @abstractmethod + def get_approximate_arrival_time(self, record: dict) -> float: + pass + + @abstractmethod + def format_datetime(self, time: datetime) -> str: + """Formats a datetime in the correct format for DynamoDB (with ms) or Kinesis (without ms)""" + pass + + @abstractmethod + def get_sequence_number(self, record: dict) -> str: + pass + + def close(self): + self._is_shutdown.set() + + def pre_filter(self, events: list[dict]) -> list[dict]: + return events + + def post_filter(self, events: list[dict]) -> list[dict]: + return events + + def poll_events(self): + """Generalized poller for streams such as Kinesis or DynamoDB + Examples of Kinesis consumers: + * StackOverflow: https://stackoverflow.com/a/22403036/6875981 + * AWS Sample: https://github.com/aws-samples/kinesis-poster-worker/blob/master/worker.py + Examples of DynamoDB consumers: + * Blogpost: https://www.tecracer.com/blog/2022/05/getting-a-near-real-time-view-of-a-dynamodb-stream-with-python.html + """ + # TODO: consider potential shard iterator timeout after 300 seconds (likely not relevant with short-polling): + # https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html#shard-iterator-expires-unexpectedly + # Does this happen if no records are received for 300 seconds? + if not self.shards: + self.shards = self.initialize_shards() + + if not self.shards: + LOG.debug("No shards found for %s.", self.source_arn) + raise EmptyPollResultsException(service=self.event_source(), source_arn=self.source_arn) + else: + # Remove all shard batchers without corresponding shards + for shard_id in self.shard_batcher.keys() - self.shards.keys(): + self.shard_batcher.pop(shard_id, None) + + # TODO: improve efficiency because this currently limits the throughput to at most batch size per poll interval + # Handle shards round-robin. Re-initialize current shard iterator once all shards are handled. + if self.iterator_over_shards is None: + self.iterator_over_shards = iter(self.shards.items()) + + current_shard_tuple = next(self.iterator_over_shards, None) + if not current_shard_tuple: + self.iterator_over_shards = iter(self.shards.items()) + current_shard_tuple = next(self.iterator_over_shards, None) + + # TODO Better handling when shards are initialised and the iterator returns nothing + if not current_shard_tuple: + raise PipeInternalError( + "Failed to retrieve any shards for stream polling despite initialization." + ) + + try: + self.poll_events_from_shard(*current_shard_tuple) + except PipeInternalError: + # TODO: standardize logging + # Ignore and wait for the next polling interval, which will do retry + pass + + def poll_events_from_shard(self, shard_id: str, shard_iterator: str): + get_records_response = self.get_records(shard_iterator) + records: list[dict] = get_records_response.get("Records", []) + if not (next_shard_iterator := get_records_response.get("NextShardIterator")): + # If the next shard iterator is None, we can assume the shard is closed or + # has expired on the DynamoDB Local server, hence we should re-initialize. + self.shards = self.initialize_shards() + else: + # We should always be storing the next_shard_iterator value, otherwise we risk an iterator expiring + # and all records being re-processed. + self.shards[shard_id] = next_shard_iterator + + # We cannot reliably back-off when no records found since an iterator + # may have to move multiple times until records are returned. + # See https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html#getrecords-returns-empty + # However, we still need to check if batcher should be triggered due to time-based batching. + should_flush = self.shard_batcher[shard_id].add(records) + if not should_flush: + return + + # Retrieve and drain all events in batcher + collected_records = self.shard_batcher[shard_id].flush() + # If there is overflow (i.e 1k BatchSize and 1.2K returned in flush), further split up the batch. + for batch in batched(collected_records, self.stream_parameters.get("BatchSize")): + # This could potentially lead to data loss if forward_events_to_target raises an exception after a flush + # which would otherwise be solved with checkpointing. + # TODO: Implement checkpointing, leasing, etc. from https://docs.aws.amazon.com/streams/latest/dev/kcl-concepts.html + self.forward_events_to_target(shard_id, batch) + + def forward_events_to_target(self, shard_id, records): + polled_events = self.transform_into_events(records, shard_id) + abort_condition = None + # TODO: implement format detection behavior (e.g., for JSON body): + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html + # Check whether we need poller-specific filter-preprocessing here without modifying the actual event! + # convert to json for filtering (HACK for fixing parity with v1 and getting regression tests passing) + # localstack.services.lambda_.event_source_listeners.kinesis_event_source_listener.KinesisEventSourceListener._filter_records + # TODO: explore better abstraction for the entire filtering, including the set_data and get_data remapping + # We need better clarify which transformations happen before and after filtering -> fix missing test coverage + parsed_events = self.pre_filter(polled_events) + # TODO: advance iterator past matching events! + # We need to checkpoint the sequence number for each shard and then advance the shard iterator using + # GetShardIterator with a given sequence number + # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html + # Failing to do so kinda blocks the stream resulting in very high latency. + matching_events = self.filter_events(parsed_events) + matching_events_post_filter = self.post_filter(matching_events) + + # TODO: implement MaximumBatchingWindowInSeconds flush condition (before or after filter?) + # Don't trigger upon empty events + if len(matching_events_post_filter) == 0: + return + + events = self.add_source_metadata(matching_events_post_filter) + LOG.debug("Polled %d events from %s in shard %s", len(events), self.source_arn, shard_id) + # -> This could be tested by setting a high retry number, using a long pipe execution, and a relatively + # short record expiration age at the source. Check what happens if the record expires at the source. + # A potential implementation could use checkpointing based on the iterator position (within shard scope) + # TODO: handle partial batch failure (see poller.py:parse_batch_item_failures) + # TODO: think about how to avoid starvation of other shards if one shard runs into infinite retries + attempts = 0 + discarded_events_for_dlq = [] + error_payload = {} + + max_retries = self.stream_parameters.get("MaximumRetryAttempts", -1) + max_record_age = max( + self.stream_parameters.get("MaximumRecordAgeInSeconds", -1), 0 + ) # Disable check if -1 + # NOTE: max_retries == 0 means exponential backoff is disabled + boff = ExponentialBackoff(max_retries=max_retries) + while not abort_condition and events and not self._is_shutdown.is_set(): + if self.max_retries_exceeded(attempts): + abort_condition = "RetryAttemptsExhausted" + break + + if max_record_age: + events, expired_events = self.bisect_events_by_record_age(max_record_age, events) + if expired_events: + discarded_events_for_dlq.extend(expired_events) + continue + + try: + if attempts > 0: + # TODO: Should we always backoff (with jitter) before processing since we may not want multiple pollers + # all starting up and polling simultaneously + # For example: 500 persisted ESMs starting up and requesting concurrently could flood gateway + self._is_shutdown.wait(boff.next_backoff()) + + self.processor.process_events_batch(events) + boff.reset() + # We may need to send on data to a DLQ so break the processing loop and proceed if invocation successful. + break + except PartialBatchFailureError as ex: + # TODO: add tests for partial batch failure scenarios + if ( + self.stream_parameters.get("OnPartialBatchItemFailure") + == OnPartialBatchItemFailureStreams.AUTOMATIC_BISECT + ): + # TODO: implement and test splitting batches in half until batch size 1 + # https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_PipeSourceKinesisStreamParameters.html + LOG.warning( + "AUTOMATIC_BISECT upon partial batch item failure is not yet implemented. Retrying the entire batch." + ) + error_payload = ex.error + + # Extract all sequence numbers from events in batch. This allows us to fail the whole batch if + # an unknown itemidentifier is returned. + batch_sequence_numbers = { + self.get_sequence_number(event) for event in matching_events + } + + # If the batchItemFailures array contains multiple items, Lambda uses the record with the lowest sequence number as the checkpoint. + # Lambda then retries all records starting from that checkpoint. + failed_sequence_ids: list[int] | None = get_batch_item_failures( + ex.partial_failure_payload, batch_sequence_numbers + ) + + # If None is returned, consider the entire batch a failure. + if failed_sequence_ids is None: + continue + + # This shouldn't be possible since a PartialBatchFailureError was raised + if len(failed_sequence_ids) == 0: + assert failed_sequence_ids, ( + "Invalid state encountered: PartialBatchFailureError raised but no batch item failures found." + ) + + lowest_sequence_id: str = min(failed_sequence_ids, key=int) + + # Discard all successful events and re-process from sequence number of failed event + _, events = self.bisect_events(lowest_sequence_id, events) + except BatchFailureError as ex: + error_payload = ex.error + + # FIXME partner_resource_arn is not defined in ESM + LOG.debug( + "Attempt %d failed while processing %s with events: %s", + attempts, + self.partner_resource_arn or self.source_arn, + events, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + except Exception: + # FIXME partner_resource_arn is not defined in ESM + LOG.error( + "Attempt %d failed with unexpected error while processing %s with events: %s", + attempts, + self.partner_resource_arn or self.source_arn, + events, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + finally: + # Retry polling until the record expires at the source + attempts += 1 + + if discarded_events_for_dlq: + abort_condition = "RecordAgeExceeded" + error_payload = {} + events = discarded_events_for_dlq + + # Send failed events to potential DLQ + if abort_condition: + failure_context = self.processor.generate_event_failure_context( + abort_condition=abort_condition, + error=error_payload, + attempts_count=attempts, + partner_resource_arn=self.partner_resource_arn, + ) + self.send_events_to_dlq(shard_id, events, context=failure_context) + + def get_records(self, shard_iterator: str) -> dict: + """Returns a GetRecordsOutput from the GetRecords endpoint of streaming services such as Kinesis or DynamoDB""" + try: + get_records_response = self.source_client.get_records( + # TODO: add test for cross-account scenario + # Differs for Kinesis and DynamoDB but required for cross-account scenario + **self.stream_arn_param(), + ShardIterator=shard_iterator, + Limit=self.stream_parameters["BatchSize"], + ) + return get_records_response + # TODO: test iterator expired with conditional error scenario (requires failure destinations) + except self.source_client.exceptions.ExpiredIteratorException as e: + LOG.debug( + "Shard iterator %s expired for stream %s, re-initializing shards", + shard_iterator, + self.source_arn, + ) + # TODO: test TRIM_HORIZON and AT_TIMESTAMP scenarios for this case. We don't want to start from scratch and + # might need to think about checkpointing here. + self.shards = self.initialize_shards() + raise PipeInternalError from e + except ClientError as e: + if "AccessDeniedException" in str(e): + LOG.warning( + "Insufficient permissions to get records from stream %s: %s", + self.source_arn, + e, + ) + raise CustomerInvocationError from e + elif "ResourceNotFoundException" in str(e): + # FIXME: The 'Invalid ShardId in ShardIterator' error is returned by DynamoDB-local. Unsure when/why this is returned. + if "Invalid ShardId in ShardIterator" in str(e): + LOG.warning( + "Invalid ShardId in ShardIterator for %s. Re-initializing shards.", + self.source_arn, + ) + self.shards = self.initialize_shards() + else: + LOG.warning( + "Source stream %s does not exist: %s", + self.source_arn, + e, + ) + raise CustomerInvocationError from e + elif "TrimmedDataAccessException" in str(e): + LOG.debug( + "Attempted to iterate over trimmed record or expired shard iterator %s for stream %s, re-initializing shards", + shard_iterator, + self.source_arn, + ) + self.shards = self.initialize_shards() + else: + LOG.debug("ClientError during get_records for stream %s: %s", self.source_arn, e) + raise PipeInternalError from e + + def send_events_to_dlq(self, shard_id, events, context) -> None: + dlq_arn = self.stream_parameters.get("DeadLetterConfig", {}).get("Arn") + if dlq_arn: + failure_timstamp = get_current_time() + dlq_event = self.create_dlq_event(shard_id, events, context, failure_timstamp) + # Send DLQ event to DLQ target + parsed_arn = parse_arn(dlq_arn) + service = parsed_arn["service"] + # TODO: use a sender instance here, likely inject via DI into poller (what if it updates?) + if service == "sqs": + # TODO: inject and cache SQS client using proper IAM role (supports cross-account operations) + sqs_client = get_internal_client(dlq_arn) + # TODO: check if the DLQ exists + dlq_url = get_queue_url(dlq_arn) + # TODO: validate no FIFO queue because they are unsupported + sqs_client.send_message(QueueUrl=dlq_url, MessageBody=json.dumps(dlq_event)) + elif service == "sns": + sns_client = get_internal_client(dlq_arn) + sns_client.publish(TopicArn=dlq_arn, Message=json.dumps(dlq_event)) + elif service == "s3": + s3_client = get_internal_client(dlq_arn) + dlq_event_with_payload = { + **dlq_event, + "payload": { + "Records": events, + }, + } + s3_client.put_object( + Bucket=s3_bucket_name(dlq_arn), + Key=get_failure_s3_object_key(self.esm_uuid, shard_id, failure_timstamp), + Body=json.dumps(dlq_event_with_payload), + ) + else: + LOG.warning("Unsupported DLQ service %s", service) + + def create_dlq_event( + self, shard_id: str, events: list[dict], context: dict, failure_timestamp: datetime + ) -> dict: + first_record = events[0] + first_record_arrival = get_datetime_from_timestamp( + self.get_approximate_arrival_time(first_record) + ) + + last_record = events[-1] + last_record_arrival = get_datetime_from_timestamp( + self.get_approximate_arrival_time(last_record) + ) + return { + **context, + self.failure_payload_details_field_name(): { + "approximateArrivalOfFirstRecord": self.format_datetime(first_record_arrival), + "approximateArrivalOfLastRecord": self.format_datetime(last_record_arrival), + "batchSize": len(events), + "endSequenceNumber": self.get_sequence_number(last_record), + "shardId": shard_id, + "startSequenceNumber": self.get_sequence_number(first_record), + "streamArn": self.source_arn, + }, + "timestamp": failure_timestamp.isoformat(timespec="milliseconds").replace( + "+00:00", "Z" + ), + "version": "1.0", + } + + def max_retries_exceeded(self, attempts: int) -> bool: + maximum_retry_attempts = self.stream_parameters.get("MaximumRetryAttempts", -1) + # Infinite retries until the source expires + if maximum_retry_attempts == -1: + return False + return attempts > maximum_retry_attempts + + def bisect_events( + self, sequence_number: str, events: list[dict] + ) -> tuple[list[dict], list[dict]]: + """Splits list of events in two, where a sequence number equals a passed parameter `sequence_number`. + This is used for: + - `ReportBatchItemFailures`: Discarding events in a batch following a failure when is set. + - `BisectBatchOnFunctionError`: Used to split a failed batch in two when doing a retry (not implemented).""" + for i, event in enumerate(events): + if self.get_sequence_number(event) == sequence_number: + return events[:i], events[i:] + + return events, [] + + def bisect_events_by_record_age( + self, maximum_record_age: int, events: list[dict] + ) -> tuple[list[dict], list[dict]]: + """Splits events into [valid_events], [expired_events] based on record age. + Where: + - Events with age < maximum_record_age are valid. + - Events with age >= maximum_record_age are expired.""" + cutoff_timestamp = get_current_time().timestamp() - maximum_record_age + index = bisect_left(events, cutoff_timestamp, key=self.get_approximate_arrival_time) + return events[index:], events[:index] + + +def get_failure_s3_object_key(esm_uuid: str, shard_id: str, failure_datetime: datetime) -> str: + """ + From https://docs.aws.amazon.com/lambda/latest/dg/kinesis-on-failure-destination.html: + + The S3 object containing the invocation record uses the following naming convention: + aws/lambda///YYYY/MM/DD/YYYY-MM-DDTHH.MM.SS- + + :return: Key for s3 object that invocation failure record will be put to + """ + timestamp = failure_datetime.strftime("%Y-%m-%dT%H.%M.%S") + year_month_day = failure_datetime.strftime("%Y/%m/%d") + random_uuid = long_uid() + return f"aws/lambda/{esm_uuid}/{shard_id}/{year_month_day}/{timestamp}-{random_uuid}" diff --git a/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/__init__.py b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_choice/comparison/__init__.py rename to localstack-core/localstack/services/lambda_/event_source_mapping/senders/__init__.py diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/senders/lambda_sender.py b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/lambda_sender.py new file mode 100644 index 0000000000000..71911f545a600 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/lambda_sender.py @@ -0,0 +1,112 @@ +import json +import logging + +from localstack.aws.api.lambda_ import InvocationType +from localstack.aws.api.pipes import PipeTargetInvocationType +from localstack.services.lambda_.api_utils import function_locators_from_arn +from localstack.services.lambda_.event_source_mapping.pipe_utils import to_json_str +from localstack.services.lambda_.event_source_mapping.pollers.poller import has_batch_item_failures +from localstack.services.lambda_.event_source_mapping.senders.sender import ( + PartialFailureSenderError, + Sender, + SenderError, +) + +LOG = logging.getLogger(__name__) + + +class LambdaSender(Sender): + # Flag to enable the payload dict using the "Records" key used for Lambda event source mapping + payload_dict: bool + + # Flag to enable partial successes/failures when processing batched events through a Lambda event source mapping + report_batch_item_failures: bool + + def __init__( + self, + target_arn, + target_parameters=None, + target_client=None, + payload_dict=False, + report_batch_item_failures=False, + ): + super().__init__(target_arn, target_parameters, target_client) + self.payload_dict = payload_dict + self.report_batch_item_failures = report_batch_item_failures + + def event_target(self) -> str: + return "aws:lambda" + + def send_events(self, events: list[dict] | dict) -> dict: + if self.payload_dict: + events = {"Records": events} + # TODO: test qualified + unqualified Lambda invoke + # According to Pipe trace logs, the internal awsRequest contains a qualifier, even if "null" + _, qualifier, _, _ = function_locators_from_arn(self.target_arn) + optional_qualifier = {} + if qualifier is not None: + optional_qualifier["Qualifier"] = qualifier + invocation_type = InvocationType.RequestResponse + if ( + self.target_parameters.get("LambdaFunctionParameters", {}).get("InvocationType") + == PipeTargetInvocationType.FIRE_AND_FORGET + ): + invocation_type = InvocationType.Event + + # TODO: test special payloads (e.g., None, str, empty str, bytes) + # see "to_bytes(json.dumps(payload or {}, cls=BytesEncoder))" in legacy invoke adapter + # localstack.services.lambda_.event_source_listeners.adapters.EventSourceAsfAdapter.invoke_with_statuscode + invoke_result = self.target_client.invoke( + FunctionName=self.target_arn, + Payload=to_json_str(events), + InvocationType=invocation_type, + **optional_qualifier, + ) + + try: + payload = json.load(invoke_result["Payload"]) + except json.JSONDecodeError: + payload = None + LOG.debug( + "Payload from Lambda invocation '%s' is invalid json. Setting this to 'None'", + invoke_result["Payload"], + ) + + if function_error := invoke_result.get("FunctionError"): + LOG.debug( + "Pipe target function %s failed with FunctionError %s. Payload: %s", + self.target_arn, + function_error, + payload, + ) + error = { + "message": f"Target {self.target_arn} encountered an error while processing event(s).", + "httpStatusCode": invoke_result["StatusCode"], + "awsService": "lambda", + "requestId": invoke_result["ResponseMetadata"]["RequestId"], + # TODO: fix hardcoded value by figuring out what other exception types exist + "exceptionType": "BadRequest", # Currently only used in Pipes + "resourceArn": self.target_arn, + "functionError": function_error, + "executedVersion": invoke_result.get("ExecutedVersion", "$LATEST"), + } + raise SenderError( + f"Error during sending events {events} due to FunctionError {function_error}.", + error=error, + ) + + # The payload can contain the key "batchItemFailures" with a list of partial batch failures: + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-batching-concurrency.html + if self.report_batch_item_failures and has_batch_item_failures(payload): + error = { + "message": "Target invocation failed partially.", + "httpStatusCode": invoke_result["StatusCode"], + "awsService": "lambda", + "requestId": invoke_result["ResponseMetadata"]["RequestId"], + "exceptionType": "BadRequest", + "resourceArn": self.target_arn, + "executedVersion": invoke_result.get("ExecutedVersion", "$LATEST"), + } + raise PartialFailureSenderError(error=error, partial_failure_payload=payload) + + return payload diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender.py b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender.py new file mode 100644 index 0000000000000..58196bc3d6b02 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender.py @@ -0,0 +1,50 @@ +from abc import abstractmethod + +from botocore.client import BaseClient + +from localstack.services.lambda_.event_source_mapping.pipe_utils import get_internal_client + + +class SenderError(Exception): + def __init__(self, message=None, error=None) -> None: + self.message = message or "Error during sending events" + self.error = error + + +class PartialFailureSenderError(SenderError): + def __init__(self, message=None, error=None, partial_failure_payload=None) -> None: + self.message = message or "Target invocation failed partially." + self.error = error + # Following the partial failure payload structure: + # https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-batching-concurrency.html + self.partial_failure_payload = partial_failure_payload + + +class Sender: + target_arn: str + target_parameters: dict + target_client: BaseClient + + def __init__( + self, + target_arn: str, + target_parameters: dict | None = None, + target_client: BaseClient | None = None, + ) -> None: + self.target_arn = target_arn + self.target_parameters = target_parameters or {} + self.target_client = target_client or get_internal_client(target_arn) + + # TODO: Can an event also be of type `bytes`? + @abstractmethod + def send_events(self, events: list[dict | str]) -> dict | None: + """Send the given `events` to the target. + Returns an optional payload with a list of "batchItemFailures" if only part of the batch succeeds. + """ + pass + + @abstractmethod + def event_target(self) -> str: + """Return the event target metadata (e.g., aws:sqs) + Format analogous to event_source of pollers""" + pass diff --git a/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender_utils.py b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender_utils.py new file mode 100644 index 0000000000000..ab1180adbdd1d --- /dev/null +++ b/localstack-core/localstack/services/lambda_/event_source_mapping/senders/sender_utils.py @@ -0,0 +1,43 @@ +import sys +from itertools import islice +from typing import Any, Iterable, Iterator + + +def batched(iterable, n): + # TODO: replace this method with native version when supporting Python 3.12 + # https://docs.python.org/3.12/library/itertools.html#itertools.batched + # batched('ABCDEFG', 3) --> ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch + + +def batched_by_size(iterable: Iterable[Any], max_bytes) -> Iterator[tuple[Any, ...]]: + """ + Generate batches from iterable where the total size of each batch in bytes does not exceed `max_bytes`. + """ + if max_bytes < 1: + raise ValueError("max_bytes must be at least one") + + it = iter(iterable) + while True: + batch = [] + current_size = 0 + try: + while current_size < max_bytes: + item = next(it) + item_size = sys.getsizeof(item) + if current_size + item_size > max_bytes: + # If adding this item exceeds max_bytes, push it back onto the iterator and stop this batch + it = iter([item] + list(it)) + break + batch.append(item) + current_size += item_size + except StopIteration: + pass + + if not batch: + break + yield tuple(batch) diff --git a/localstack-core/localstack/services/lambda_/hooks.py b/localstack-core/localstack/services/lambda_/hooks.py new file mode 100644 index 0000000000000..16195ae538bca --- /dev/null +++ b/localstack-core/localstack/services/lambda_/hooks.py @@ -0,0 +1,20 @@ +"""Definition of Plux extension points (i.e., hooks) for Lambda.""" + +from localstack.runtime.hooks import hook_spec + +HOOKS_LAMBDA_START_DOCKER_EXECUTOR = "localstack.hooks.lambda_start_docker_executor" +HOOKS_LAMBDA_PREPARE_DOCKER_EXECUTOR = "localstack.hooks.lambda_prepare_docker_executors" +HOOKS_LAMBDA_INJECT_LAYER_FETCHER = "localstack.hooks.lambda_inject_layer_fetcher" +HOOKS_LAMBDA_PREBUILD_ENVIRONMENT_IMAGE = "localstack.hooks.lambda_prebuild_environment_image" +HOOKS_LAMBDA_CREATE_EVENT_SOURCE_POLLER = "localstack.hooks.lambda_create_event_source_poller" +HOOKS_LAMBDA_SET_EVENT_SOURCE_CONFIG_DEFAULTS = ( + "localstack.hooks.lambda_set_event_source_config_defaults" +) + + +start_docker_executor = hook_spec(HOOKS_LAMBDA_START_DOCKER_EXECUTOR) +prepare_docker_executor = hook_spec(HOOKS_LAMBDA_PREPARE_DOCKER_EXECUTOR) +inject_layer_fetcher = hook_spec(HOOKS_LAMBDA_INJECT_LAYER_FETCHER) +prebuild_environment_image = hook_spec(HOOKS_LAMBDA_PREBUILD_ENVIRONMENT_IMAGE) +create_event_source_poller = hook_spec(HOOKS_LAMBDA_CREATE_EVENT_SOURCE_POLLER) +set_event_source_config_defaults = hook_spec(HOOKS_LAMBDA_SET_EVENT_SOURCE_CONFIG_DEFAULTS) diff --git a/localstack/services/awslambda/invocation/__init__.py b/localstack-core/localstack/services/lambda_/invocation/__init__.py similarity index 100% rename from localstack/services/awslambda/invocation/__init__.py rename to localstack-core/localstack/services/lambda_/invocation/__init__.py diff --git a/localstack-core/localstack/services/lambda_/invocation/assignment.py b/localstack-core/localstack/services/lambda_/invocation/assignment.py new file mode 100644 index 0000000000000..39f4d04383e26 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/assignment.py @@ -0,0 +1,191 @@ +import contextlib +import logging +from collections import defaultdict +from concurrent.futures import Future, ThreadPoolExecutor +from typing import ContextManager + +from localstack.services.lambda_.invocation.execution_environment import ( + EnvironmentStartupTimeoutException, + ExecutionEnvironment, + InvalidStatusException, +) +from localstack.services.lambda_.invocation.executor_endpoint import StatusErrorException +from localstack.services.lambda_.invocation.lambda_models import ( + FunctionVersion, + InitializationType, + OtherServiceEndpoint, +) +from localstack.utils.lambda_debug_mode.lambda_debug_mode import ( + is_lambda_debug_enabled_for, + is_lambda_debug_timeout_enabled_for, +) + +LOG = logging.getLogger(__name__) + + +class AssignmentException(Exception): + pass + + +class AssignmentService(OtherServiceEndpoint): + """ + scope: LocalStack global + """ + + # function_version manager id => runtime_environment_id => runtime_environment + environments: dict[str, dict[str, ExecutionEnvironment]] + + # Global pool for spawning and killing provisioned Lambda runtime environments + provisioning_pool: ThreadPoolExecutor + + def __init__(self): + self.environments = defaultdict(dict) + self.provisioning_pool = ThreadPoolExecutor(thread_name_prefix="lambda-provisioning-pool") + + @contextlib.contextmanager + def get_environment( + self, + version_manager_id: str, + function_version: FunctionVersion, + provisioning_type: InitializationType, + ) -> ContextManager[ExecutionEnvironment]: + applicable_envs = ( + env + for env in self.environments[version_manager_id].values() + if env.initialization_type == provisioning_type + ) + execution_environment = None + for environment in applicable_envs: + try: + environment.reserve() + execution_environment = environment + break + except InvalidStatusException: + pass + + if execution_environment is None: + if provisioning_type == "provisioned-concurrency": + raise AssignmentException( + "No provisioned concurrency environment available despite lease." + ) + elif provisioning_type == "on-demand": + execution_environment = self.start_environment(version_manager_id, function_version) + self.environments[version_manager_id][execution_environment.id] = ( + execution_environment + ) + execution_environment.reserve() + else: + raise ValueError(f"Invalid provisioning type {provisioning_type}") + + try: + yield execution_environment + if is_lambda_debug_timeout_enabled_for(lambda_arn=function_version.qualified_arn): + self.stop_environment(execution_environment) + else: + execution_environment.release() + except InvalidStatusException as invalid_e: + LOG.error("InvalidStatusException: %s", invalid_e) + except Exception as e: + LOG.error( + "Failed invocation <%s>: %s", type(e), e, exc_info=LOG.isEnabledFor(logging.DEBUG) + ) + self.stop_environment(execution_environment) + raise e + + def start_environment( + self, version_manager_id: str, function_version: FunctionVersion + ) -> ExecutionEnvironment: + LOG.debug("Starting new environment") + execution_environment = ExecutionEnvironment( + function_version=function_version, + initialization_type="on-demand", + on_timeout=self.on_timeout, + version_manager_id=version_manager_id, + ) + try: + execution_environment.start() + except StatusErrorException: + raise + except EnvironmentStartupTimeoutException: + raise + except Exception as e: + message = f"Could not start new environment: {type(e).__name__}:{e}" + raise AssignmentException(message) from e + return execution_environment + + def on_timeout(self, version_manager_id: str, environment_id: str) -> None: + """Callback for deleting environment after function times out""" + del self.environments[version_manager_id][environment_id] + + def stop_environment(self, environment: ExecutionEnvironment) -> None: + version_manager_id = environment.version_manager_id + try: + environment.stop() + self.environments.get(version_manager_id).pop(environment.id) + except Exception as e: + LOG.debug( + "Error while stopping environment for lambda %s, manager id %s, environment: %s, error: %s", + environment.function_version.qualified_arn, + version_manager_id, + environment.id, + e, + ) + + def stop_environments_for_version(self, version_manager_id: str): + # We have to materialize the list before iterating due to concurrency + environments_to_stop = list(self.environments.get(version_manager_id, {}).values()) + for env in environments_to_stop: + self.stop_environment(env) + + def scale_provisioned_concurrency( + self, + version_manager_id: str, + function_version: FunctionVersion, + target_provisioned_environments: int, + ) -> list[Future[None]]: + # Enforce a single environment per lambda version if this is a target + # of an active Lambda Debug Mode. + qualified_lambda_version_arn = function_version.qualified_arn + if ( + is_lambda_debug_enabled_for(qualified_lambda_version_arn) + and target_provisioned_environments > 0 + ): + LOG.warning( + "Environments for '%s' enforced to '1' by Lambda Debug Mode, " + "configurations will continue to report the set value '%s'", + qualified_lambda_version_arn, + target_provisioned_environments, + ) + target_provisioned_environments = 1 + + current_provisioned_environments = [ + e + for e in self.environments[version_manager_id].values() + if e.initialization_type == "provisioned-concurrency" + ] + # TODO: refine scaling loop to re-use existing environments instead of re-creating all + # current_provisioned_environments_count = len(current_provisioned_environments) + # diff = target_provisioned_environments - current_provisioned_environments_count + + # TODO: handle case where no provisioned environment is available during scaling + # Most simple scaling implementation for now: + futures = [] + # 1) Re-create new target + for _ in range(target_provisioned_environments): + execution_environment = ExecutionEnvironment( + function_version=function_version, + initialization_type="provisioned-concurrency", + on_timeout=self.on_timeout, + version_manager_id=version_manager_id, + ) + self.environments[version_manager_id][execution_environment.id] = execution_environment + futures.append(self.provisioning_pool.submit(execution_environment.start)) + # 2) Kill all existing + for env in current_provisioned_environments: + # TODO: think about concurrent updates while deleting a function + futures.append(self.provisioning_pool.submit(self.stop_environment, env)) + + return futures + + def stop(self): + self.provisioning_pool.shutdown(cancel_futures=True) diff --git a/localstack-core/localstack/services/lambda_/invocation/counting_service.py b/localstack-core/localstack/services/lambda_/invocation/counting_service.py new file mode 100644 index 0000000000000..3c7024288a305 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/counting_service.py @@ -0,0 +1,266 @@ +import contextlib +import logging +from collections import defaultdict +from threading import RLock + +from localstack import config +from localstack.aws.api.lambda_ import TooManyRequestsException +from localstack.services.lambda_.invocation.lambda_models import ( + Function, + FunctionVersion, + InitializationType, +) +from localstack.services.lambda_.invocation.models import lambda_stores +from localstack.utils.lambda_debug_mode.lambda_debug_mode import ( + is_lambda_debug_enabled_for, +) + +LOG = logging.getLogger(__name__) + + +class ConcurrencyTracker: + """Keeps track of the number of concurrent executions per lock scope (e.g., per function or function version). + The lock scope depends on the provisioning type (i.e., on-demand or provisioned): + * on-demand concurrency per function: unqualified arn ending with my-function + * provisioned concurrency per function version: qualified arn ending with my-function:1 + """ + + # Lock scope => concurrent executions counter + concurrent_executions: dict[str, int] + # Lock for safely updating the concurrent executions counter + lock: RLock + + def __init__(self): + self.concurrent_executions = defaultdict(int) + self.lock = RLock() + + def increment(self, scope: str) -> None: + self.concurrent_executions[scope] += 1 + + def atomic_decrement(self, scope: str): + with self.lock: + self.decrement(scope) + + def decrement(self, scope: str) -> None: + self.concurrent_executions[scope] -= 1 + + +def calculate_provisioned_concurrency_sum(function: Function) -> int: + """Returns the total provisioned concurrency for a given function, including all versions.""" + provisioned_concurrency_sum_for_fn = sum( + [ + provisioned_configs.provisioned_concurrent_executions + for provisioned_configs in function.provisioned_concurrency_configs.values() + ] + ) + return provisioned_concurrency_sum_for_fn + + +class CountingService: + """ + The CountingService enforces quota limits per region and account in get_invocation_lease() + for every Lambda invocation. It uses separate ConcurrencyTrackers for on-demand and provisioned concurrency + to keep track of the number of concurrent invocations. + + Concurrency limits are per region and account: + https://repost.aws/knowledge-center/lambda-concurrency-limit-increase + https://docs.aws.amazon.com/lambda/latest/dg/lambda-concurrency.htm + https://docs.aws.amazon.com/lambda/latest/dg/monitoring-concurrency.html + """ + + # (account, region) => ConcurrencyTracker (unqualified arn) => concurrent executions + on_demand_concurrency_trackers: dict[(str, str), ConcurrencyTracker] + # Lock for safely initializing new on-demand concurrency trackers + on_demand_init_lock: RLock + + # (account, region) => ConcurrencyTracker (qualified arn) => concurrent executions + provisioned_concurrency_trackers: dict[(str, str), ConcurrencyTracker] + # Lock for safely initializing new provisioned concurrency trackers + provisioned_concurrency_init_lock: RLock + + def __init__(self): + self.on_demand_concurrency_trackers = {} + self.on_demand_init_lock = RLock() + self.provisioned_concurrency_trackers = {} + self.provisioned_concurrency_init_lock = RLock() + + @contextlib.contextmanager + def get_invocation_lease( + self, function: Function | None, function_version: FunctionVersion + ) -> InitializationType: + """An invocation lease reserves the right to schedule an invocation. + The returned lease type can either be on-demand or provisioned. + Scheduling preference: + 1) Check for free provisioned concurrency => provisioned + 2) Check for reserved concurrency => on-demand + 3) Check for unreserved concurrency => on-demand + + HACK: We allow the function to be None for Lambda@Edge to skip provisioned and reserved concurrency. + """ + account = function_version.id.account + region = function_version.id.region + scope_tuple = (account, region) + on_demand_tracker = self.on_demand_concurrency_trackers.get(scope_tuple) + # Double-checked locking pattern to initialize an on-demand concurrency tracker if it does not exist + if not on_demand_tracker: + with self.on_demand_init_lock: + on_demand_tracker = self.on_demand_concurrency_trackers.get(scope_tuple) + if not on_demand_tracker: + on_demand_tracker = self.on_demand_concurrency_trackers[scope_tuple] = ( + ConcurrencyTracker() + ) + + provisioned_tracker = self.provisioned_concurrency_trackers.get(scope_tuple) + # Double-checked locking pattern to initialize a provisioned concurrency tracker if it does not exist + if not provisioned_tracker: + with self.provisioned_concurrency_init_lock: + provisioned_tracker = self.provisioned_concurrency_trackers.get(scope_tuple) + if not provisioned_tracker: + provisioned_tracker = self.provisioned_concurrency_trackers[scope_tuple] = ( + ConcurrencyTracker() + ) + + # TODO: check that we don't give a lease while updating provisioned concurrency + # Potential challenge if an update happens in between reserving the lease here and actually assigning + # * Increase provisioned: It could happen that we give a lease for provisioned-concurrency although + # brand new provisioned environments are not yet initialized. + # * Decrease provisioned: It could happen that we have running invocations that should still be counted + # against the limit but they are not because we already updated the concurrency config to fewer envs. + + unqualified_function_arn = function_version.id.unqualified_arn() + qualified_arn = function_version.id.qualified_arn() + + # Enforce one lease per ARN if the global flag is set + if is_lambda_debug_enabled_for(qualified_arn): + with provisioned_tracker.lock, on_demand_tracker.lock: + on_demand_executions: int = on_demand_tracker.concurrent_executions[ + unqualified_function_arn + ] + provisioned_executions = provisioned_tracker.concurrent_executions[qualified_arn] + if on_demand_executions or provisioned_executions: + LOG.warning( + "Concurrent lambda invocations disabled for '%s' by Lambda Debug Mode", + qualified_arn, + ) + raise TooManyRequestsException( + "Rate Exceeded.", + Reason="SingleLeaseEnforcement", + Type="User", + ) + + lease_type = None + # HACK: skip reserved and provisioned concurrency if function not available (e.g., in Lambda@Edge) + if function is not None: + with provisioned_tracker.lock: + # 1) Check for free provisioned concurrency + provisioned_concurrency_config = function.provisioned_concurrency_configs.get( + function_version.id.qualifier + ) + if not provisioned_concurrency_config: + # check if any aliases point to the current version, and check the provisioned concurrency config + # for them. There can be only one config for a version, not matter if defined on the alias or version itself. + for alias in function.aliases.values(): + if alias.function_version == function_version.id.qualifier: + provisioned_concurrency_config = ( + function.provisioned_concurrency_configs.get(alias.name) + ) + break + if provisioned_concurrency_config: + available_provisioned_concurrency = ( + provisioned_concurrency_config.provisioned_concurrent_executions + - provisioned_tracker.concurrent_executions[qualified_arn] + ) + if available_provisioned_concurrency > 0: + provisioned_tracker.increment(qualified_arn) + lease_type = "provisioned-concurrency" + + if not lease_type: + with on_demand_tracker.lock: + # 2) If reserved concurrency is set AND no provisioned concurrency available: + # => Check if enough reserved concurrency is available for the specific function. + # HACK: skip reserved if function not available (e.g., in Lambda@Edge) + if function and function.reserved_concurrent_executions is not None: + on_demand_running_invocation_count = on_demand_tracker.concurrent_executions[ + unqualified_function_arn + ] + available_reserved_concurrency = ( + function.reserved_concurrent_executions + - calculate_provisioned_concurrency_sum(function) + - on_demand_running_invocation_count + ) + if available_reserved_concurrency > 0: + on_demand_tracker.increment(unqualified_function_arn) + lease_type = "on-demand" + else: + extras = { + "available_reserved_concurrency": available_reserved_concurrency, + "reserved_concurrent_executions": function.reserved_concurrent_executions, + "provisioned_concurrency_sum": calculate_provisioned_concurrency_sum( + function + ), + "on_demand_running_invocation_count": on_demand_running_invocation_count, + } + LOG.debug("Insufficient reserved concurrency available: %s", extras) + raise TooManyRequestsException( + "Rate Exceeded.", + Reason="ReservedFunctionConcurrentInvocationLimitExceeded", + Type="User", + ) + # 3) If no reserved concurrency is set AND no provisioned concurrency available. + # => Check the entire state within the scope of account and region. + else: + # TODO: Consider a dedicated counter for unavailable concurrency with locks for updates on + # reserved and provisioned concurrency if this is too slow + # The total concurrency allocated or used (i.e., unavailable concurrency) per account and region + total_used_concurrency = 0 + store = lambda_stores[account][region] + for fn in store.functions.values(): + if fn.reserved_concurrent_executions is not None: + total_used_concurrency += fn.reserved_concurrent_executions + else: + fn_provisioned_concurrency = calculate_provisioned_concurrency_sum(fn) + total_used_concurrency += fn_provisioned_concurrency + fn_on_demand_concurrent_executions = ( + on_demand_tracker.concurrent_executions[ + fn.latest().id.unqualified_arn() + ] + ) + total_used_concurrency += fn_on_demand_concurrent_executions + + available_unreserved_concurrency = ( + config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS - total_used_concurrency + ) + if available_unreserved_concurrency > 0: + on_demand_tracker.increment(unqualified_function_arn) + lease_type = "on-demand" + else: + if available_unreserved_concurrency < 0: + LOG.error( + "Invalid function concurrency state detected for function: %s | available unreserved concurrency: %d", + unqualified_function_arn, + available_unreserved_concurrency, + ) + extras = { + "available_unreserved_concurrency": available_unreserved_concurrency, + "lambda_limits_concurrent_executions": config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS, + "total_used_concurrency": total_used_concurrency, + } + LOG.debug("Insufficient unreserved concurrency available: %s", extras) + raise TooManyRequestsException( + "Rate Exceeded.", + Reason="ReservedFunctionConcurrentInvocationLimitExceeded", + Type="User", + ) + try: + yield lease_type + finally: + if lease_type == "provisioned-concurrency": + provisioned_tracker.atomic_decrement(qualified_arn) + elif lease_type == "on-demand": + on_demand_tracker.atomic_decrement(unqualified_function_arn) + else: + LOG.error( + "Invalid lease type detected for function: %s: %s", + unqualified_function_arn, + lease_type, + ) diff --git a/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py b/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py new file mode 100644 index 0000000000000..c67f39addb414 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/docker_runtime_executor.py @@ -0,0 +1,514 @@ +import dataclasses +import json +import logging +import shutil +import tempfile +import threading +from collections import defaultdict +from pathlib import Path +from typing import Callable, Dict, Literal, Optional + +from localstack import config +from localstack.aws.api.lambda_ import Architecture, PackageType, Runtime +from localstack.dns import server as dns_server +from localstack.services.lambda_ import hooks as lambda_hooks +from localstack.services.lambda_.invocation.executor_endpoint import ( + INVOCATION_PORT, + ExecutorEndpoint, +) +from localstack.services.lambda_.invocation.lambda_models import FunctionVersion +from localstack.services.lambda_.invocation.runtime_executor import ( + ChmodPath, + LambdaPrebuildContext, + LambdaRuntimeException, + RuntimeExecutor, +) +from localstack.services.lambda_.lambda_utils import HINT_LOG +from localstack.services.lambda_.networking import ( + get_all_container_networks_for_lambda, + get_main_endpoint_from_container, +) +from localstack.services.lambda_.packages import get_runtime_client_path +from localstack.services.lambda_.runtimes import IMAGE_MAPPING +from localstack.utils.container_networking import get_main_container_name +from localstack.utils.container_utils.container_client import ( + BindMount, + ContainerConfiguration, + DockerNotAvailable, + DockerPlatform, + NoSuchContainer, + NoSuchImage, + PortMappings, + VolumeMappings, +) +from localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT +from localstack.utils.files import chmod_r, rm_rf +from localstack.utils.lambda_debug_mode.lambda_debug_mode import lambda_debug_port_for +from localstack.utils.net import get_free_tcp_port +from localstack.utils.strings import short_uid, truncate + +LOG = logging.getLogger(__name__) + +IMAGE_PREFIX = "public.ecr.aws/lambda/" +# IMAGE_PREFIX = "amazon/aws-lambda-" + +RAPID_ENTRYPOINT = "/var/rapid/init" + +InitializationType = Literal["on-demand", "provisioned-concurrency"] + +LAMBDA_DOCKERFILE = """FROM {base_img} +COPY init {rapid_entrypoint} +COPY code/ /var/task +""" + +PULLED_IMAGES: set[(str, DockerPlatform)] = set() +PULL_LOCKS: dict[(str, DockerPlatform), threading.RLock] = defaultdict(threading.RLock) + +HOT_RELOADING_ENV_VARIABLE = "LOCALSTACK_HOT_RELOADING_PATHS" + + +"""Map AWS Lambda architecture to Docker platform flags. Example: arm64 => linux/arm64""" +ARCHITECTURE_PLATFORM_MAPPING: dict[Architecture, DockerPlatform] = { + Architecture.x86_64: DockerPlatform.linux_amd64, + Architecture.arm64: DockerPlatform.linux_arm64, +} + + +def docker_platform(lambda_architecture: Architecture) -> DockerPlatform | None: + """ + Convert an AWS Lambda architecture into a Docker platform flag. Examples: + * docker_platform("x86_64") == "linux/amd64" + * docker_platform("arm64") == "linux/arm64" + + :param lambda_architecture: the instruction set that the function supports + :return: Docker platform in the format ``os[/arch[/variant]]`` or None if configured to ignore the architecture + """ + if config.LAMBDA_IGNORE_ARCHITECTURE: + return None + return ARCHITECTURE_PLATFORM_MAPPING[lambda_architecture] + + +def get_image_name_for_function(function_version: FunctionVersion) -> str: + return f"localstack/prebuild-lambda-{function_version.id.qualified_arn().replace(':', '_').replace('$', '_').lower()}" + + +def get_default_image_for_runtime(runtime: Runtime) -> str: + postfix = IMAGE_MAPPING.get(runtime) + if not postfix: + raise ValueError(f"Unsupported runtime {runtime}!") + return f"{IMAGE_PREFIX}{postfix}" + + +def _ensure_runtime_image_present(image: str, platform: DockerPlatform) -> None: + # Pull image for a given platform upon function creation such that invocations do not time out. + if (image, platform) in PULLED_IMAGES: + return + # use a lock to avoid concurrent pulling of the same image + with PULL_LOCKS[(image, platform)]: + if (image, platform) in PULLED_IMAGES: + return + try: + CONTAINER_CLIENT.pull_image(image, platform) + PULLED_IMAGES.add((image, platform)) + except NoSuchImage as e: + LOG.debug("Unable to pull image %s for runtime executor preparation.", image) + raise e + except DockerNotAvailable as e: + HINT_LOG.error( + "Failed to pull Docker image because Docker is not available in the LocalStack container " + "but required to run Lambda functions. Please add the Docker volume mount " + '"/var/run/docker.sock:/var/run/docker.sock" to your LocalStack startup. ' + "https://docs.localstack.cloud/user-guide/aws/lambda/#docker-not-available" + ) + raise e + + +class RuntimeImageResolver: + """ + Resolves Lambda runtimes to corresponding docker images + The default behavior resolves based on a prefix (including the repository) and a suffix (per runtime). + + This can be customized via the LAMBDA_RUNTIME_IMAGE_MAPPING config in 2 distinct ways: + + Option A: use a pattern string for the config variable that includes the "" string + e.g. "myrepo/lambda:-custom" would resolve the runtime "python3.9" to "myrepo/lambda:python3.9-custom" + + Option B: use a JSON dict string for the config variable, mapping the runtime to the full image name & tag + e.g. {"python3.9": "myrepo/lambda:python3.9-custom", "python3.8": "myotherrepo/pylambda:3.8"} + + Note that with Option B this will only apply to the runtimes included in the dict. + All other (non-included) runtimes will fall back to the default behavior. + """ + + _mapping: dict[Runtime, str] + _default_resolve_fn: Callable[[Runtime], str] + + def __init__( + self, default_resolve_fn: Callable[[Runtime], str] = get_default_image_for_runtime + ): + self._mapping = dict() + self._default_resolve_fn = default_resolve_fn + + def _resolve(self, runtime: Runtime, custom_image_mapping: str = "") -> str: + if runtime not in IMAGE_MAPPING: + raise ValueError(f"Unsupported runtime {runtime}") + + if not custom_image_mapping: + return self._default_resolve_fn(runtime) + + # Option A (pattern string that includes to replace) + if "" in custom_image_mapping: + return custom_image_mapping.replace("", runtime) + + # Option B (json dict mapping with fallback) + try: + mapping: dict = json.loads(custom_image_mapping) + # at this point we're loading the whole dict to avoid parsing multiple times + for k, v in mapping.items(): + if k not in IMAGE_MAPPING: + raise ValueError( + f"Unsupported runtime ({runtime}) provided in LAMBDA_RUNTIME_IMAGE_MAPPING" + ) + self._mapping[k] = v + + if runtime in self._mapping: + return self._mapping[runtime] + + # fall back to default behavior if the runtime was not present in the custom config + return self._default_resolve_fn(runtime) + + except Exception: + LOG.error( + "Failed to load config from LAMBDA_RUNTIME_IMAGE_MAPPING=%s", + custom_image_mapping, + ) + raise # TODO: validate config at start and prevent startup + + def get_image_for_runtime(self, runtime: Runtime) -> str: + if runtime not in self._mapping: + resolved_image = self._resolve(runtime, config.LAMBDA_RUNTIME_IMAGE_MAPPING) + self._mapping[runtime] = resolved_image + + return self._mapping[runtime] + + +resolver = RuntimeImageResolver() + + +def prepare_image(function_version: FunctionVersion, platform: DockerPlatform) -> None: + if not function_version.config.runtime: + raise NotImplementedError( + "Custom images are currently not supported with image prebuilding" + ) + + # create dockerfile + docker_file = LAMBDA_DOCKERFILE.format( + base_img=resolver.get_image_for_runtime(function_version.config.runtime), + rapid_entrypoint=RAPID_ENTRYPOINT, + ) + + code_path = function_version.config.code.get_unzipped_code_location() + context_path = Path( + f"{tempfile.gettempdir()}/lambda/prebuild_tmp/{function_version.id.function_name}-{short_uid()}" + ) + context_path.mkdir(parents=True) + prebuild_context = LambdaPrebuildContext( + docker_file_content=docker_file, + context_path=context_path, + function_version=function_version, + ) + lambda_hooks.prebuild_environment_image.run(prebuild_context) + LOG.debug( + "Prebuilding image for function %s from context %s and Dockerfile %s", + function_version.qualified_arn, + str(prebuild_context.context_path), + prebuild_context.docker_file_content, + ) + # save dockerfile + docker_file_path = prebuild_context.context_path / "Dockerfile" + with docker_file_path.open(mode="w") as f: + f.write(prebuild_context.docker_file_content) + + # copy init file + init_destination_path = prebuild_context.context_path / "init" + src_init = f"{get_runtime_client_path()}/var/rapid/init" + shutil.copy(src_init, init_destination_path) + init_destination_path.chmod(0o755) + + # copy function code + context_code_path = prebuild_context.context_path / "code" + shutil.copytree( + f"{str(code_path)}/", + str(context_code_path), + dirs_exist_ok=True, + ) + # if layers are present, permissions should be 0755 + if prebuild_context.function_version.config.layers: + chmod_r(str(context_code_path), 0o755) + + try: + image_name = get_image_name_for_function(function_version) + CONTAINER_CLIENT.build_image( + dockerfile_path=str(docker_file_path), + image_name=image_name, + platform=platform, + ) + except Exception as e: + if LOG.isEnabledFor(logging.DEBUG): + LOG.exception( + "Error while building prebuilt lambda image for '%s'", + function_version.qualified_arn, + ) + else: + LOG.error( + "Error while building prebuilt lambda image for '%s', Error: %s", + function_version.qualified_arn, + e, + ) + finally: + rm_rf(str(prebuild_context.context_path)) + + +@dataclasses.dataclass +class LambdaContainerConfiguration(ContainerConfiguration): + copy_folders: list[tuple[str, str]] = dataclasses.field(default_factory=list) + + +class DockerRuntimeExecutor(RuntimeExecutor): + ip: Optional[str] + executor_endpoint: Optional[ExecutorEndpoint] + container_name: str + + def __init__(self, id: str, function_version: FunctionVersion) -> None: + super(DockerRuntimeExecutor, self).__init__(id=id, function_version=function_version) + self.ip = None + self.executor_endpoint = ExecutorEndpoint(self.id) + self.container_name = self._generate_container_name() + LOG.debug("Assigning container name of %s to executor %s", self.container_name, self.id) + + def get_image(self) -> str: + if not self.function_version.config.runtime: + raise NotImplementedError("Container images are a Pro feature.") + return ( + get_image_name_for_function(self.function_version) + if config.LAMBDA_PREBUILD_IMAGES + else resolver.get_image_for_runtime(self.function_version.config.runtime) + ) + + def _generate_container_name(self): + """ + Format -lambda-- + TODO: make the format configurable + """ + container_name = "-".join( + [ + get_main_container_name() or "localstack", + "lambda", + self.function_version.id.function_name.lower(), + ] + ).replace("_", "-") + return f"{container_name}-{self.id}" + + def start(self, env_vars: dict[str, str]) -> None: + self.executor_endpoint.start() + main_network, *additional_networks = self._get_networks_for_executor() + container_config = LambdaContainerConfiguration( + image_name=None, + name=self.container_name, + env_vars=env_vars, + network=main_network, + entrypoint=RAPID_ENTRYPOINT, + platform=docker_platform(self.function_version.config.architectures[0]), + additional_flags=config.LAMBDA_DOCKER_FLAGS, + ) + debug_port = lambda_debug_port_for(self.function_version.qualified_arn) + if debug_port is not None: + container_config.ports.add(debug_port, debug_port) + + if self.function_version.config.package_type == PackageType.Zip: + if self.function_version.config.code.is_hot_reloading(): + container_config.env_vars[HOT_RELOADING_ENV_VARIABLE] = "/var/task" + if container_config.volumes is None: + container_config.volumes = VolumeMappings() + container_config.volumes.add( + BindMount( + str(self.function_version.config.code.get_unzipped_code_location()), + "/var/task", + read_only=True, + ) + ) + else: + container_config.copy_folders.append( + ( + f"{str(self.function_version.config.code.get_unzipped_code_location())}/.", + "/var/task", + ) + ) + + # always chmod /tmp to 700 + chmod_paths = [ChmodPath(path="/tmp", mode="0700")] + + # set the dns server of the lambda container to the LocalStack container IP + # the dns server will automatically respond with the right target for transparent endpoint injection + if config.LAMBDA_DOCKER_DNS: + # Don't overwrite DNS container config if it is already set (e.g., using LAMBDA_DOCKER_DNS) + LOG.warning( + "Container DNS overridden to %s, connection to names pointing to LocalStack, like 'localhost.localstack.cloud' will need additional configuration.", + config.LAMBDA_DOCKER_DNS, + ) + container_config.dns = config.LAMBDA_DOCKER_DNS + else: + if dns_server.is_server_running(): + # Set the container DNS to LocalStack to resolve localhost.localstack.cloud and + # enable transparent endpoint injection (Pro image only). + container_config.dns = self.get_endpoint_from_executor() + + lambda_hooks.start_docker_executor.run(container_config, self.function_version) + + if not container_config.image_name: + container_config.image_name = self.get_image() + if config.LAMBDA_DEV_PORT_EXPOSE: + self.executor_endpoint.container_port = get_free_tcp_port() + if container_config.ports is None: + container_config.ports = PortMappings() + container_config.ports.add(self.executor_endpoint.container_port, INVOCATION_PORT) + + if config.LAMBDA_INIT_DEBUG: + container_config.entrypoint = "/debug-bootstrap.sh" + if not container_config.ports: + container_config.ports = PortMappings() + container_config.ports.add(config.LAMBDA_INIT_DELVE_PORT, config.LAMBDA_INIT_DELVE_PORT) + + if ( + self.function_version.config.layers + and not config.LAMBDA_PREBUILD_IMAGES + and self.function_version.config.package_type == PackageType.Zip + ): + # avoid chmod on mounted code paths + hot_reloading_env = container_config.env_vars.get(HOT_RELOADING_ENV_VARIABLE, "") + if "/opt" not in hot_reloading_env: + chmod_paths.append(ChmodPath(path="/opt", mode="0755")) + if "/var/task" not in hot_reloading_env: + chmod_paths.append(ChmodPath(path="/var/task", mode="0755")) + container_config.env_vars["LOCALSTACK_CHMOD_PATHS"] = json.dumps(chmod_paths) + + CONTAINER_CLIENT.create_container_from_config(container_config) + if ( + not config.LAMBDA_PREBUILD_IMAGES + or self.function_version.config.package_type != PackageType.Zip + ): + CONTAINER_CLIENT.copy_into_container( + self.container_name, f"{str(get_runtime_client_path())}/.", "/" + ) + # tiny bit inefficient since we actually overwrite the init, but otherwise the path might not exist + if config.LAMBDA_INIT_BIN_PATH: + CONTAINER_CLIENT.copy_into_container( + self.container_name, config.LAMBDA_INIT_BIN_PATH, "/var/rapid/init" + ) + if config.LAMBDA_INIT_DEBUG: + CONTAINER_CLIENT.copy_into_container( + self.container_name, config.LAMBDA_INIT_DELVE_PATH, "/var/rapid/dlv" + ) + CONTAINER_CLIENT.copy_into_container( + self.container_name, config.LAMBDA_INIT_BOOTSTRAP_PATH, "/debug-bootstrap.sh" + ) + + if not config.LAMBDA_PREBUILD_IMAGES: + # copy_folders should be empty here if package type is not zip + for source, target in container_config.copy_folders: + CONTAINER_CLIENT.copy_into_container(self.container_name, source, target) + + if additional_networks: + for additional_network in additional_networks: + CONTAINER_CLIENT.connect_container_to_network( + additional_network, self.container_name + ) + + CONTAINER_CLIENT.start_container(self.container_name) + # still using main network as main entrypoint + self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network( + container_name_or_id=self.container_name, container_network=main_network + ) + if config.LAMBDA_DEV_PORT_EXPOSE: + self.ip = "127.0.0.1" + self.executor_endpoint.container_address = self.ip + + self.executor_endpoint.wait_for_startup() + + def stop(self) -> None: + CONTAINER_CLIENT.stop_container(container_name=self.container_name, timeout=5) + if config.LAMBDA_REMOVE_CONTAINERS: + CONTAINER_CLIENT.remove_container(container_name=self.container_name) + try: + self.executor_endpoint.shutdown() + except Exception as e: + LOG.debug( + "Error while stopping executor endpoint for lambda %s, error: %s", + self.function_version.qualified_arn, + e, + ) + + def get_address(self) -> str: + if not self.ip: + raise LambdaRuntimeException(f"IP address of executor '{self.id}' unknown") + return self.ip + + def get_endpoint_from_executor(self) -> str: + return get_main_endpoint_from_container() + + def _get_networks_for_executor(self) -> list[str]: + return get_all_container_networks_for_lambda() + + def invoke(self, payload: Dict[str, str]): + LOG.debug( + "Sending invoke-payload '%s' to executor '%s'", + truncate(json.dumps(payload), config.LAMBDA_TRUNCATE_STDOUT), + self.id, + ) + return self.executor_endpoint.invoke(payload) + + def get_logs(self) -> str: + try: + return CONTAINER_CLIENT.get_container_logs(container_name_or_id=self.container_name) + except NoSuchContainer: + return "Container was not created" + + @classmethod + def prepare_version(cls, function_version: FunctionVersion) -> None: + lambda_hooks.prepare_docker_executor.run(function_version) + # Trigger the installation of the Lambda runtime-init binary before invocation and + # cache the result to save time upon every invocation. + get_runtime_client_path() + if function_version.config.code: + function_version.config.code.prepare_for_execution() + image_name = resolver.get_image_for_runtime(function_version.config.runtime) + platform = docker_platform(function_version.config.architectures[0]) + _ensure_runtime_image_present(image_name, platform) + if config.LAMBDA_PREBUILD_IMAGES: + prepare_image(function_version, platform) + + @classmethod + def cleanup_version(cls, function_version: FunctionVersion) -> None: + if config.LAMBDA_PREBUILD_IMAGES: + # TODO re-enable image cleanup. + # Enabling it currently deletes image after updates as well + # It also creates issues when cleanup is concurrently with build + # probably due to intermediate layers being deleted + # image_name = get_image_name_for_function(function_version) + # LOG.debug("Removing image %s after version deletion", image_name) + # CONTAINER_CLIENT.remove_image(image_name) + pass + + def get_runtime_endpoint(self) -> str: + return f"http://{self.get_endpoint_from_executor()}:{config.GATEWAY_LISTEN[0].port}{self.executor_endpoint.get_endpoint_prefix()}" + + @classmethod + def validate_environment(cls) -> bool: + if not CONTAINER_CLIENT.has_docker(): + LOG.warning( + "WARNING: Docker not available in the LocalStack container but required to run Lambda " + 'functions. Please add the Docker volume mount "/var/run/docker.sock:/var/run/docker.sock" to your ' + "LocalStack startup. https://docs.localstack.cloud/user-guide/aws/lambda/#docker-not-available" + ) + return False + return True diff --git a/localstack-core/localstack/services/lambda_/invocation/event_manager.py b/localstack-core/localstack/services/lambda_/invocation/event_manager.py new file mode 100644 index 0000000000000..a433460543b7b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/event_manager.py @@ -0,0 +1,572 @@ +import base64 +import dataclasses +import json +import logging +import threading +import time +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime +from math import ceil + +from botocore.config import Config + +from localstack import config +from localstack.aws.api.lambda_ import InvocationType, TooManyRequestsException +from localstack.services.lambda_.analytics import ( + FunctionOperation, + FunctionStatus, + function_counter, +) +from localstack.services.lambda_.invocation.internal_sqs_queue import get_fake_sqs_client +from localstack.services.lambda_.invocation.lambda_models import ( + EventInvokeConfig, + FunctionVersion, + Invocation, + InvocationResult, +) +from localstack.services.lambda_.invocation.version_manager import LambdaVersionManager +from localstack.utils.aws import dead_letter_queue +from localstack.utils.aws.message_forwarding import send_event_to_target +from localstack.utils.strings import md5, to_str +from localstack.utils.threads import FuncThread +from localstack.utils.time import timestamp_millis +from localstack.utils.xray.trace_header import TraceHeader + +LOG = logging.getLogger(__name__) + + +def get_sqs_client(function_version: FunctionVersion, client_config=None): + return get_fake_sqs_client() + + +# TODO: remove once DLQ handling is refactored following the removal of the legacy lambda provider +class LegacyInvocationException(Exception): + def __init__(self, message, log_output=None, result=None): + super(LegacyInvocationException, self).__init__(message) + self.log_output = log_output + self.result = result + + +@dataclasses.dataclass +class SQSInvocation: + invocation: Invocation + retries: int = 0 + exception_retries: int = 0 + + def encode(self) -> str: + # Encode TraceHeader as string + aws_trace_header = self.invocation.trace_context.get("aws_trace_header") + aws_trace_header_str = aws_trace_header.to_header_str() + self.invocation.trace_context["aws_trace_header"] = aws_trace_header_str + return json.dumps( + { + "payload": to_str(base64.b64encode(self.invocation.payload)), + "invoked_arn": self.invocation.invoked_arn, + "client_context": self.invocation.client_context, + "invocation_type": self.invocation.invocation_type, + "invoke_time": self.invocation.invoke_time.isoformat(), + # = invocation_id + "request_id": self.invocation.request_id, + "retries": self.retries, + "exception_retries": self.exception_retries, + "trace_context": self.invocation.trace_context, + } + ) + + @classmethod + def decode(cls, message: str) -> "SQSInvocation": + invocation_dict = json.loads(message) + invocation = Invocation( + payload=base64.b64decode(invocation_dict["payload"]), + invoked_arn=invocation_dict["invoked_arn"], + client_context=invocation_dict["client_context"], + invocation_type=invocation_dict["invocation_type"], + invoke_time=datetime.fromisoformat(invocation_dict["invoke_time"]), + request_id=invocation_dict["request_id"], + trace_context=invocation_dict.get("trace_context"), + ) + # Decode TraceHeader + aws_trace_header_str = invocation_dict.get("trace_context", {}).get("aws_trace_header") + invocation_dict["trace_context"]["aws_trace_header"] = TraceHeader.from_header_str( + aws_trace_header_str + ) + return cls( + invocation=invocation, + retries=invocation_dict["retries"], + exception_retries=invocation_dict["exception_retries"], + ) + + +def has_enough_time_for_retry( + sqs_invocation: SQSInvocation, event_invoke_config: EventInvokeConfig +) -> bool: + time_passed = datetime.now() - sqs_invocation.invocation.invoke_time + delay_queue_invoke_seconds = ( + sqs_invocation.retries + 1 + ) * config.LAMBDA_RETRY_BASE_DELAY_SECONDS + # 6 hours is the default based on these AWS sources: + # https://repost.aws/questions/QUd214DdOQRkKWr7D8IuSMIw/why-is-aws-lambda-eventinvokeconfig-s-limit-for-maximumretryattempts-2 + # https://aws.amazon.com/blogs/compute/introducing-new-asynchronous-invocation-metrics-for-aws-lambda/ + # https://aws.amazon.com/about-aws/whats-new/2019/11/aws-lambda-supports-max-retry-attempts-event-age-asynchronous-invocations/ + maximum_event_age_in_seconds = 6 * 60 * 60 + if event_invoke_config and event_invoke_config.maximum_event_age_in_seconds is not None: + maximum_event_age_in_seconds = event_invoke_config.maximum_event_age_in_seconds + return ( + maximum_event_age_in_seconds + and ceil(time_passed.total_seconds()) + delay_queue_invoke_seconds + <= maximum_event_age_in_seconds + ) + + +# TODO: optimize this client configuration. Do we need to consider client caching here? +CLIENT_CONFIG = Config( + connect_timeout=5, + read_timeout=10, + retries={"max_attempts": 0}, +) + + +class Poller: + version_manager: LambdaVersionManager + event_queue_url: str + _shutdown_event: threading.Event + invoker_pool: ThreadPoolExecutor + + def __init__(self, version_manager: LambdaVersionManager, event_queue_url: str): + self.version_manager = version_manager + self.event_queue_url = event_queue_url + self._shutdown_event = threading.Event() + function_id = self.version_manager.function_version.id + # TODO: think about scaling, test it, make it configurable?! + self.invoker_pool = ThreadPoolExecutor( + thread_name_prefix=f"lambda-invoker-{function_id.function_name}:{function_id.qualifier}" + ) + + def run(self, *args, **kwargs): + sqs_client = get_sqs_client( + self.version_manager.function_version, client_config=CLIENT_CONFIG + ) + function_timeout = self.version_manager.function_version.config.timeout + while not self._shutdown_event.is_set(): + try: + response = sqs_client.receive_message( + QueueUrl=self.event_queue_url, + # TODO: consider replacing with short polling instead of long polling to prevent keeping connections open + # however, we had some serious performance issues when tried out, so those have to be investigated first + WaitTimeSeconds=2, + # Related: SQS event source mapping batches up to 10 messages: + # https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html + MaxNumberOfMessages=10, + VisibilityTimeout=function_timeout + 60, + ) + if not response.get("Messages"): + continue + LOG.debug("[%s] Got %d messages", self.event_queue_url, len(response["Messages"])) + # Guard against shutdown event arriving while polling SQS for messages + if not self._shutdown_event.is_set(): + for message in response["Messages"]: + # NOTE: queueing within the thread pool executor could lead to double executions + # due to the visibility timeout + self.invoker_pool.submit(self.handle_message, message) + + except Exception as e: + # TODO: if the gateway shuts down before the shutdown event even is set, + # we might still get an error message + # after shutdown of LS, we might expectedly get errors, if other components shut down. + # In any case, after the event manager is shut down, we do not need to spam error logs in case + # some resource is already missing + if self._shutdown_event.is_set(): + return + LOG.error( + "Error while polling lambda events for function %s: %s", + self.version_manager.function_version.qualified_arn, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + # some time between retries to avoid running into the problem right again + time.sleep(1) + + def stop(self): + LOG.debug( + "Stopping event poller %s %s", + self.version_manager.function_version.qualified_arn, + id(self), + ) + self._shutdown_event.set() + self.invoker_pool.shutdown(cancel_futures=True, wait=False) + + def handle_message(self, message: dict) -> None: + failure_cause = None + qualifier = self.version_manager.function_version.id.qualifier + event_invoke_config = self.version_manager.function.event_invoke_configs.get(qualifier) + runtime = None + status = None + try: + sqs_invocation = SQSInvocation.decode(message["Body"]) + invocation = sqs_invocation.invocation + try: + invocation_result = self.version_manager.invoke(invocation=invocation) + function_config = self.version_manager.function_version.config + function_counter.labels( + operation=FunctionOperation.invoke, + runtime=function_config.runtime or "n/a", + status=FunctionStatus.success, + invocation_type=InvocationType.Event, + package_type=function_config.package_type, + ).increment() + except Exception as e: + # Reserved concurrency == 0 + if self.version_manager.function.reserved_concurrent_executions == 0: + failure_cause = "ZeroReservedConcurrency" + status = FunctionStatus.zero_reserved_concurrency_error + # Maximum event age expired (lookahead for next retry) + elif not has_enough_time_for_retry(sqs_invocation, event_invoke_config): + failure_cause = "EventAgeExceeded" + status = FunctionStatus.event_age_exceeded_error + if failure_cause: + invocation_result = InvocationResult( + is_error=True, request_id=invocation.request_id, payload=None, logs=None + ) + self.process_failure_destination( + sqs_invocation, invocation_result, event_invoke_config, failure_cause + ) + self.process_dead_letter_queue(sqs_invocation, invocation_result) + return + # 3) Otherwise, retry without increasing counter + status = self.process_throttles_and_system_errors(sqs_invocation, e) + return + finally: + sqs_client = get_sqs_client(self.version_manager.function_version) + sqs_client.delete_message( + QueueUrl=self.event_queue_url, ReceiptHandle=message["ReceiptHandle"] + ) + # status MUST be set before returning + package_type = self.version_manager.function_version.config.package_type + function_counter.labels( + operation=FunctionOperation.invoke, + runtime=runtime or "n/a", + status=status, + invocation_type=InvocationType.Event, + package_type=package_type, + ).increment() + + # Good summary blogpost: https://haithai91.medium.com/aws-lambdas-retry-behaviors-edff90e1cf1b + # Asynchronous invocation handling: https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html + # https://aws.amazon.com/blogs/compute/introducing-new-asynchronous-invocation-metrics-for-aws-lambda/ + max_retry_attempts = 2 + if event_invoke_config and event_invoke_config.maximum_retry_attempts is not None: + max_retry_attempts = event_invoke_config.maximum_retry_attempts + + # An invocation error either leads to a terminal failure or to a scheduled retry + if invocation_result.is_error: # invocation error + failure_cause = None + # Reserved concurrency == 0 + if self.version_manager.function.reserved_concurrent_executions == 0: + failure_cause = "ZeroReservedConcurrency" + # Maximum retries exhausted + elif sqs_invocation.retries >= max_retry_attempts: + failure_cause = "RetriesExhausted" + # TODO: test what happens if max event age expired before it gets scheduled the first time?! + # Maximum event age expired (lookahead for next retry) + elif not has_enough_time_for_retry(sqs_invocation, event_invoke_config): + failure_cause = "EventAgeExceeded" + + if failure_cause: # handle failure destination and DLQ + self.process_failure_destination( + sqs_invocation, invocation_result, event_invoke_config, failure_cause + ) + self.process_dead_letter_queue(sqs_invocation, invocation_result) + return + else: # schedule retry + sqs_invocation.retries += 1 + # Assumption: We assume that the internal exception retries counter is reset after + # an invocation that does not throw an exception + sqs_invocation.exception_retries = 0 + # LAMBDA_RETRY_BASE_DELAY_SECONDS has a limit of 300s because the maximum SQS DelaySeconds + # is 15 minutes (900s) and the maximum retry count is 3. SQS quota for "Message timer": + # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html + delay_seconds = sqs_invocation.retries * config.LAMBDA_RETRY_BASE_DELAY_SECONDS + # TODO: max SQS message size limit could break parity with AWS because + # our SQSInvocation contains additional fields! 256kb is max for both Lambda payload + SQS + # TODO: write test with max SQS message size + sqs_client.send_message( + QueueUrl=self.event_queue_url, + MessageBody=sqs_invocation.encode(), + DelaySeconds=delay_seconds, + ) + return + else: # invocation success + self.process_success_destination( + sqs_invocation, invocation_result, event_invoke_config + ) + except Exception as e: + LOG.error( + "Error handling lambda invoke %s", e, exc_info=LOG.isEnabledFor(logging.DEBUG) + ) + + def process_throttles_and_system_errors( + self, sqs_invocation: SQSInvocation, error: Exception + ) -> str: + # If the function doesn't have enough concurrency available to process all events, additional + # requests are throttled. For throttling errors (429) and system errors (500-series), Lambda returns + # the event to the queue and attempts to run the function again for up to 6 hours. The retry interval + # increases exponentially from 1 second after the first attempt to a maximum of 5 minutes. If the + # queue contains many entries, Lambda increases the retry interval and reduces the rate at which it + # reads events from the queue. Source: + # https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html + # Difference depending on error cause: + # https://aws.amazon.com/blogs/compute/introducing-new-asynchronous-invocation-metrics-for-aws-lambda/ + # Troubleshooting 500 errors: + # https://repost.aws/knowledge-center/lambda-troubleshoot-invoke-error-502-500 + if isinstance(error, TooManyRequestsException): # Throttles 429 + LOG.debug("Throttled lambda %s: %s", self.version_manager.function_arn, error) + status = FunctionStatus.throttle_error + else: # System errors 5xx + LOG.debug( + "Service exception in lambda %s: %s", self.version_manager.function_arn, error + ) + status = FunctionStatus.system_error + maximum_exception_retry_delay_seconds = 5 * 60 + delay_seconds = min( + 2**sqs_invocation.exception_retries, maximum_exception_retry_delay_seconds + ) + # TODO: calculate delay seconds into max event age handling + sqs_client = get_sqs_client(self.version_manager.function_version) + sqs_client.send_message( + QueueUrl=self.event_queue_url, + MessageBody=sqs_invocation.encode(), + DelaySeconds=delay_seconds, + ) + return status + + def process_success_destination( + self, + sqs_invocation: SQSInvocation, + invocation_result: InvocationResult, + event_invoke_config: EventInvokeConfig | None, + ) -> None: + if event_invoke_config is None: + return + success_destination = event_invoke_config.destination_config.get("OnSuccess", {}).get( + "Destination" + ) + if success_destination is None: + return + LOG.debug("Handling success destination for %s", self.version_manager.function_arn) + + original_payload = sqs_invocation.invocation.payload + destination_payload = { + "version": "1.0", + "timestamp": timestamp_millis(), + "requestContext": { + "requestId": invocation_result.request_id, + "functionArn": self.version_manager.function_version.qualified_arn, + "condition": "Success", + "approximateInvokeCount": sqs_invocation.retries + 1, + }, + "requestPayload": json.loads(to_str(original_payload)), + "responseContext": { + "statusCode": 200, + "executedVersion": self.version_manager.function_version.id.qualifier, + }, + "responsePayload": json.loads(to_str(invocation_result.payload or {})), + } + + target_arn = event_invoke_config.destination_config["OnSuccess"]["Destination"] + try: + send_event_to_target( + target_arn=target_arn, + event=destination_payload, + role=self.version_manager.function_version.config.role, + source_arn=self.version_manager.function_version.id.unqualified_arn(), + source_service="lambda", + events_source="lambda", + events_detail_type="Lambda Function Invocation Result - Success", + ) + except Exception as e: + LOG.warning("Error sending invocation result to %s: %s", target_arn, e) + + def process_failure_destination( + self, + sqs_invocation: SQSInvocation, + invocation_result: InvocationResult, + event_invoke_config: EventInvokeConfig | None, + failure_cause: str, + ): + if event_invoke_config is None: + return + failure_destination = event_invoke_config.destination_config.get("OnFailure", {}).get( + "Destination" + ) + if failure_destination is None: + return + LOG.debug("Handling failure destination for %s", self.version_manager.function_arn) + + original_payload = sqs_invocation.invocation.payload + if failure_cause == "ZeroReservedConcurrency": + approximate_invoke_count = sqs_invocation.retries + else: + approximate_invoke_count = sqs_invocation.retries + 1 + destination_payload = { + "version": "1.0", + "timestamp": timestamp_millis(), + "requestContext": { + "requestId": invocation_result.request_id, + "functionArn": self.version_manager.function_version.qualified_arn, + "condition": failure_cause, + "approximateInvokeCount": approximate_invoke_count, + }, + "requestPayload": json.loads(to_str(original_payload)), + } + if failure_cause != "ZeroReservedConcurrency": + destination_payload["responseContext"] = { + "statusCode": 200, + "executedVersion": self.version_manager.function_version.id.qualifier, + "functionError": "Unhandled", + } + destination_payload["responsePayload"] = json.loads(to_str(invocation_result.payload)) + + target_arn = event_invoke_config.destination_config["OnFailure"]["Destination"] + try: + send_event_to_target( + target_arn=target_arn, + event=destination_payload, + role=self.version_manager.function_version.config.role, + source_arn=self.version_manager.function_version.id.unqualified_arn(), + source_service="lambda", + events_source="lambda", + events_detail_type="Lambda Function Invocation Result - Failure", + ) + except Exception as e: + LOG.warning("Error sending invocation result to %s: %s", target_arn, e) + + def process_dead_letter_queue( + self, + sqs_invocation: SQSInvocation, + invocation_result: InvocationResult, + ): + LOG.debug("Handling dead letter queue for %s", self.version_manager.function_arn) + try: + dead_letter_queue._send_to_dead_letter_queue( + source_arn=self.version_manager.function_arn, + dlq_arn=self.version_manager.function_version.config.dead_letter_arn, + event=json.loads(to_str(sqs_invocation.invocation.payload)), + # TODO: Refactor DLQ handling by removing the invocation exception from the legacy lambda provider + # TODO: Check message. Possibly remove because it is not used in the DLQ message?! + error=LegacyInvocationException( + message="hi", result=to_str(invocation_result.payload) + ), + role=self.version_manager.function_version.config.role, + ) + except Exception as e: + LOG.warning( + "Error sending invocation result to DLQ %s: %s", + self.version_manager.function_version.config.dead_letter_arn, + e, + ) + + +class LambdaEventManager: + version_manager: LambdaVersionManager + poller: Poller | None + poller_thread: FuncThread | None + event_queue_url: str | None + lifecycle_lock: threading.RLock + stopped: threading.Event + + def __init__(self, version_manager: LambdaVersionManager): + self.version_manager = version_manager + self.poller = None + self.poller_thread = None + self.event_queue_url = None + self.lifecycle_lock = threading.RLock() + self.stopped = threading.Event() + + def enqueue_event(self, invocation: Invocation) -> None: + message_body = SQSInvocation(invocation).encode() + sqs_client = get_sqs_client(self.version_manager.function_version) + try: + sqs_client.send_message(QueueUrl=self.event_queue_url, MessageBody=message_body) + except Exception: + LOG.error( + "Failed to enqueue Lambda event into queue %s. Invocation: request_id=%s, invoked_arn=%s", + self.event_queue_url, + invocation.request_id, + invocation.invoked_arn, + ) + raise + + def start(self) -> None: + LOG.debug( + "Starting event manager %s id %s", + self.version_manager.function_version.id.qualified_arn(), + id(self), + ) + with self.lifecycle_lock: + if self.stopped.is_set(): + LOG.debug("Event manager already stopped before started.") + return + sqs_client = get_sqs_client(self.version_manager.function_version) + function_id = self.version_manager.function_version.id + # Truncate function name to ensure queue name limit of max 80 characters + function_name_short = function_id.function_name[:47] + # The instance id MUST be unique to the function and a given LocalStack instance + queue_namespace = ( + f"{function_id.qualified_arn()}-{self.version_manager.function.instance_id}" + ) + queue_name = f"{function_name_short}-{md5(queue_namespace)}" + create_queue_response = sqs_client.create_queue(QueueName=queue_name) + self.event_queue_url = create_queue_response["QueueUrl"] + # We don't need to purge the queue for persistence or cloud pods because the instance id is MUST be unique + + self.poller = Poller(self.version_manager, self.event_queue_url) + self.poller_thread = FuncThread( + self.poller.run, + name=f"lambda-poller-{function_id.function_name}:{function_id.qualifier}", + ) + self.poller_thread.start() + + def stop_for_update(self) -> None: + LOG.debug( + "Stopping event manager but keep queue %s id %s", + self.version_manager.function_version.qualified_arn, + id(self), + ) + with self.lifecycle_lock: + if self.stopped.is_set(): + LOG.debug("Event manager already stopped!") + return + self.stopped.set() + if self.poller: + self.poller.stop() + self.poller_thread.join(timeout=3) + LOG.debug("Waited for poller thread %s", self.poller_thread) + if self.poller_thread.is_alive(): + LOG.error("Poller did not shutdown %s", self.poller_thread) + self.poller = None + + def stop(self) -> None: + LOG.debug( + "Stopping event manager %s: %s id %s", + self.version_manager.function_version.qualified_arn, + self.poller, + id(self), + ) + with self.lifecycle_lock: + if self.stopped.is_set(): + LOG.debug("Event manager already stopped!") + return + self.stopped.set() + if self.poller: + self.poller.stop() + self.poller_thread.join(timeout=3) + LOG.debug("Waited for poller thread %s", self.poller_thread) + if self.poller_thread.is_alive(): + LOG.error("Poller did not shutdown %s", self.poller_thread) + self.poller = None + if self.event_queue_url: + sqs_client = get_sqs_client( + self.version_manager.function_version, client_config=CLIENT_CONFIG + ) + sqs_client.delete_queue(QueueUrl=self.event_queue_url) + self.event_queue_url = None diff --git a/localstack-core/localstack/services/lambda_/invocation/execution_environment.py b/localstack-core/localstack/services/lambda_/invocation/execution_environment.py new file mode 100644 index 0000000000000..139ec4d877fbe --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/execution_environment.py @@ -0,0 +1,405 @@ +import logging +import random +import string +import time +from datetime import date, datetime +from enum import Enum, auto +from threading import RLock, Timer +from typing import Callable, Dict, Optional + +from localstack import config +from localstack.aws.connect import connect_to +from localstack.services.lambda_.invocation.lambda_models import ( + Credentials, + FunctionVersion, + InitializationType, + Invocation, + InvocationResult, +) +from localstack.services.lambda_.invocation.runtime_executor import ( + RuntimeExecutor, + get_runtime_executor, +) +from localstack.utils.lambda_debug_mode.lambda_debug_mode import ( + DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS, + is_lambda_debug_timeout_enabled_for, +) +from localstack.utils.strings import to_str +from localstack.utils.xray.trace_header import TraceHeader + +STARTUP_TIMEOUT_SEC = config.LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT +HEX_CHARS = [str(num) for num in range(10)] + ["a", "b", "c", "d", "e", "f"] + +LOG = logging.getLogger(__name__) + + +class RuntimeStatus(Enum): + INACTIVE = auto() + STARTING = auto() + READY = auto() + INVOKING = auto() + STARTUP_FAILED = auto() + STARTUP_TIMED_OUT = auto() + STOPPED = auto() + TIMING_OUT = auto() + + +class InvalidStatusException(Exception): + def __init__(self, message: str): + super().__init__(message) + + +class EnvironmentStartupTimeoutException(Exception): + def __init__(self, message: str): + super().__init__(message) + + +def generate_runtime_id() -> str: + return "".join(random.choices(string.hexdigits[:16], k=32)).lower() + + +# TODO: add status callback +class ExecutionEnvironment: + runtime_executor: RuntimeExecutor + status_lock: RLock + status: RuntimeStatus + initialization_type: InitializationType + last_returned: datetime + startup_timer: Optional[Timer] + keepalive_timer: Optional[Timer] + on_timeout: Callable[[str, str], None] + + def __init__( + self, + function_version: FunctionVersion, + initialization_type: InitializationType, + on_timeout: Callable[[str, str], None], + version_manager_id: str, + ): + self.id = generate_runtime_id() + self.status = RuntimeStatus.INACTIVE + # Lock for updating the runtime status + self.status_lock = RLock() + self.function_version = function_version + self.initialization_type = initialization_type + self.runtime_executor = get_runtime_executor()(self.id, function_version) + self.last_returned = datetime.min + self.startup_timer = None + self.keepalive_timer = Timer(0, lambda *args, **kwargs: None) + self.on_timeout = on_timeout + self.version_manager_id = version_manager_id + + def get_log_group_name(self) -> str: + return f"/aws/lambda/{self.function_version.id.function_name}" + + def get_log_stream_name(self) -> str: + return f"{date.today():%Y/%m/%d}/[{self.function_version.id.qualifier}]{self.id}" + + def get_environment_variables(self) -> Dict[str, str]: + """ + Returns the environment variable set for the runtime container + :return: Dict of environment variables + """ + credentials = self.get_credentials() + env_vars = { + # 1) Public AWS defined runtime environment variables (in same order): + # https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html + # a) Reserved environment variables + # _HANDLER conditionally added below + # TODO: _X_AMZN_TRACE_ID + "AWS_DEFAULT_REGION": self.function_version.id.region, + "AWS_REGION": self.function_version.id.region, + # AWS_EXECUTION_ENV conditionally added below + "AWS_LAMBDA_FUNCTION_NAME": self.function_version.id.function_name, + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.function_version.config.memory_size, + "AWS_LAMBDA_FUNCTION_VERSION": self.function_version.id.qualifier, + "AWS_LAMBDA_INITIALIZATION_TYPE": self.initialization_type, + "AWS_LAMBDA_LOG_GROUP_NAME": self.get_log_group_name(), + "AWS_LAMBDA_LOG_STREAM_NAME": self.get_log_stream_name(), + # Access IDs for role + "AWS_ACCESS_KEY_ID": credentials["AccessKeyId"], + "AWS_SECRET_ACCESS_KEY": credentials["SecretAccessKey"], + "AWS_SESSION_TOKEN": credentials["SessionToken"], + # AWS_LAMBDA_RUNTIME_API is set in the runtime interface emulator (RIE) + "LAMBDA_TASK_ROOT": "/var/task", + "LAMBDA_RUNTIME_DIR": "/var/runtime", + # b) Unreserved environment variables + # LANG + # LD_LIBRARY_PATH + # NODE_PATH + # PYTHONPATH + # GEM_PATH + "AWS_XRAY_CONTEXT_MISSING": "LOG_ERROR", + # TODO: allow configuration of xray address + "AWS_XRAY_DAEMON_ADDRESS": "127.0.0.1:2000", + # not 100% sure who sets these two + # extensions are not supposed to have them in their envs => TODO: test if init removes them + "_AWS_XRAY_DAEMON_PORT": "2000", + "_AWS_XRAY_DAEMON_ADDRESS": "127.0.0.1", + # AWS_LAMBDA_DOTNET_PREJIT + "TZ": ":UTC", + # 2) Public AWS RIE interface: https://github.com/aws/aws-lambda-runtime-interface-emulator + "AWS_LAMBDA_FUNCTION_TIMEOUT": self._get_execution_timeout_seconds(), + # 3) Public LocalStack endpoint + "LOCALSTACK_HOSTNAME": self.runtime_executor.get_endpoint_from_executor(), + "EDGE_PORT": str(config.GATEWAY_LISTEN[0].port), + # AWS_ENDPOINT_URL conditionally added below + # 4) Internal LocalStack runtime API + "LOCALSTACK_RUNTIME_ID": self.id, + "LOCALSTACK_RUNTIME_ENDPOINT": self.runtime_executor.get_runtime_endpoint(), + # 5) Account of the function (necessary for extensions API) + "LOCALSTACK_FUNCTION_ACCOUNT_ID": self.function_version.id.account, + # used by the init to spawn the x-ray daemon + # LOCALSTACK_USER conditionally added below + } + # Conditionally added environment variables + if not config.LAMBDA_DISABLE_AWS_ENDPOINT_URL: + env_vars["AWS_ENDPOINT_URL"] = ( + f"http://{self.runtime_executor.get_endpoint_from_executor()}:{config.GATEWAY_LISTEN[0].port}" + ) + # config.handler is None for image lambdas and will be populated at runtime (e.g., by RIE) + if self.function_version.config.handler: + env_vars["_HANDLER"] = self.function_version.config.handler + # Will be overridden by the runtime itself unless it is a provided runtime + if self.function_version.config.runtime: + env_vars["AWS_EXECUTION_ENV"] = "AWS_Lambda_rapid" + if self.function_version.config.environment: + env_vars.update(self.function_version.config.environment) + if config.LAMBDA_INIT_DEBUG: + # Disable dropping privileges because it breaks debugging + env_vars["LOCALSTACK_USER"] = "root" + # Forcefully overwrite the user might break debugging! + if config.LAMBDA_INIT_USER is not None: + env_vars["LOCALSTACK_USER"] = config.LAMBDA_INIT_USER + if config.LS_LOG in config.TRACE_LOG_LEVELS: + env_vars["LOCALSTACK_INIT_LOG_LEVEL"] = "info" + if config.LAMBDA_INIT_POST_INVOKE_WAIT_MS: + env_vars["LOCALSTACK_POST_INVOKE_WAIT_MS"] = int(config.LAMBDA_INIT_POST_INVOKE_WAIT_MS) + if config.LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES: + env_vars["LOCALSTACK_MAX_PAYLOAD_SIZE"] = int( + config.LAMBDA_LIMITS_MAX_FUNCTION_PAYLOAD_SIZE_BYTES + ) + return env_vars + + # Lifecycle methods + def start(self) -> None: + """ + Starting the runtime environment + """ + with self.status_lock: + if self.status != RuntimeStatus.INACTIVE: + raise InvalidStatusException( + f"Execution environment {self.id} can only be started when inactive. Current status: {self.status}" + ) + self.status = RuntimeStatus.STARTING + + startup_time_seconds: int = self._get_startup_timeout_seconds() + self.startup_timer = Timer(startup_time_seconds, self.timed_out) + self.startup_timer.start() + + try: + time_before = time.perf_counter() + self.runtime_executor.start(self.get_environment_variables()) + LOG.debug( + "Start of execution environment %s for function %s took %0.2fms", + self.id, + self.function_version.qualified_arn, + (time.perf_counter() - time_before) * 1000, + ) + + with self.status_lock: + self.status = RuntimeStatus.READY + # TODO: Distinguish between expected errors (e.g., timeout, cancellation due to deletion update) and + # other unexpected exceptions. Improve control flow after implementing error reporting in Go init. + except Exception as e: + if self.status == RuntimeStatus.STARTUP_TIMED_OUT: + raise EnvironmentStartupTimeoutException( + "Execution environment timed out during startup." + ) from e + else: + LOG.warning( + "Failed to start execution environment %s: %s", + self.id, + e, + ) + self.errored() + raise + finally: + if self.startup_timer: + self.startup_timer.cancel() + self.startup_timer = None + + def stop(self) -> None: + """ + Stopping the runtime environment + """ + with self.status_lock: + if self.status in [RuntimeStatus.INACTIVE, RuntimeStatus.STOPPED]: + raise InvalidStatusException( + f"Execution environment {self.id} cannot be stopped when inactive or already stopped." + f" Current status: {self.status}" + ) + self.status = RuntimeStatus.STOPPED + self.runtime_executor.stop() + self.keepalive_timer.cancel() + + # Status methods + def release(self) -> None: + self.last_returned = datetime.now() + with self.status_lock: + if self.status != RuntimeStatus.INVOKING: + raise InvalidStatusException( + f"Execution environment {self.id} can only be set to status ready while running." + f" Current status: {self.status}" + ) + self.status = RuntimeStatus.READY + + if self.initialization_type == "on-demand": + self.keepalive_timer = Timer(config.LAMBDA_KEEPALIVE_MS / 1000, self.keepalive_passed) + self.keepalive_timer.start() + + def reserve(self) -> None: + with self.status_lock: + if self.status != RuntimeStatus.READY: + raise InvalidStatusException( + f"Execution environment {self.id} can only be reserved if ready. " + f" Current status: {self.status}" + ) + self.status = RuntimeStatus.INVOKING + + self.keepalive_timer.cancel() + + def keepalive_passed(self) -> None: + LOG.debug( + "Execution environment %s for function %s has not received any invocations in a while. Stopping.", + self.id, + self.function_version.qualified_arn, + ) + # The stop() method allows to interrupt invocations (on purpose), which might cancel running invocations + # which we should not do when the keepalive timer passed. + # The new TIMING_OUT state prevents this race condition + with self.status_lock: + if self.status != RuntimeStatus.READY: + LOG.debug( + "Keepalive timer passed, but current runtime status is %s. Aborting keepalive stop.", + self.status, + ) + return + self.status = RuntimeStatus.TIMING_OUT + self.stop() + # Notify assignment service via callback to remove from environments list + self.on_timeout(self.version_manager_id, self.id) + + def timed_out(self) -> None: + """Handle status updates if the startup of an execution environment times out. + Invoked asynchronously by the startup timer in a separate thread.""" + # TODO: De-emphasize the error part after fixing control flow and tests for test_lambda_runtime_exit + LOG.warning( + "Execution environment %s for function %s timed out during startup." + " Check for errors during the startup of your Lambda function and" + " consider increasing the startup timeout via LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT.", + self.id, + self.function_version.qualified_arn, + ) + if LOG.isEnabledFor(logging.DEBUG): + LOG.debug( + "Logs from the execution environment %s after startup timeout:\n%s", + self.id, + self.get_prefixed_logs(), + ) + with self.status_lock: + if self.status != RuntimeStatus.STARTING: + raise InvalidStatusException( + f"Execution environment {self.id} can only time out while starting. Current status: {self.status}" + ) + self.status = RuntimeStatus.STARTUP_TIMED_OUT + try: + self.runtime_executor.stop() + except Exception as e: + LOG.debug("Unable to shutdown execution environment %s after timeout: %s", self.id, e) + + def errored(self) -> None: + """Handle status updates if the startup of an execution environment fails. + Invoked synchronously when an unexpected error occurs during startup.""" + LOG.warning( + "Execution environment %s for function %s failed during startup." + " Check for errors during the startup of your Lambda function.", + self.id, + self.function_version.qualified_arn, + ) + if LOG.isEnabledFor(logging.DEBUG): + LOG.debug( + "Logs from the execution environment %s after startup error:\n%s", + self.id, + self.get_prefixed_logs(), + ) + with self.status_lock: + if self.status != RuntimeStatus.STARTING: + raise InvalidStatusException( + f"Execution environment {self.id} can only error while starting. Current status: {self.status}" + ) + self.status = RuntimeStatus.STARTUP_FAILED + try: + self.runtime_executor.stop() + except Exception as e: + LOG.debug("Unable to shutdown execution environment %s after error: %s", self.id, e) + + def get_prefixed_logs(self) -> str: + """Returns prefixed lambda containers logs""" + logs = self.runtime_executor.get_logs() + prefix = f"[lambda {self.id}] " + prefixed_logs = logs.replace("\n", f"\n{prefix}") + return f"{prefix}{prefixed_logs}" + + def invoke(self, invocation: Invocation) -> InvocationResult: + assert self.status == RuntimeStatus.INVOKING + # Async/event invokes might miss an aws_trace_header, then we need to create a new root trace id. + aws_trace_header = ( + invocation.trace_context.get("aws_trace_header") or TraceHeader().ensure_root_exists() + ) + # The Lambda RIE requires a full tracing header including Root, Parent, and Samples. Otherwise, tracing fails + # with the warning "Subsegment ## handler discarded due to Lambda worker still initializing" + aws_trace_header.ensure_sampled_exists() + # TODO: replace this random parent id with actual parent segment created within the Lambda provider using X-Ray + aws_trace_header.ensure_parent_exists() + # TODO: test and implement Active and PassThrough tracing and sampling decisions. + # TODO: implement Lambda lineage: https://docs.aws.amazon.com/lambda/latest/dg/invocation-recursion.html + invoke_payload = { + "invoke-id": invocation.request_id, # TODO: rename to request-id (requires change in lambda-init) + "invoked-function-arn": invocation.invoked_arn, + "payload": to_str(invocation.payload), + "trace-id": aws_trace_header.to_header_str(), + } + return self.runtime_executor.invoke(payload=invoke_payload) + + def get_credentials(self) -> Credentials: + sts_client = connect_to(region_name=self.function_version.id.region).sts.request_metadata( + service_principal="lambda" + ) + role_session_name = self.function_version.id.function_name + + # To handle single character function names #9016 + if len(role_session_name) == 1: + role_session_name += "@lambda_function" + # TODO we should probably set a maximum alive duration for environments, due to the session expiration + return sts_client.assume_role( + RoleArn=self.function_version.config.role, + RoleSessionName=role_session_name, + DurationSeconds=43200, + )["Credentials"] + + def _get_execution_timeout_seconds(self) -> int: + # Returns the timeout value in seconds to be enforced during the execution of the + # lambda function. This is the configured value or the DEBUG MODE default if this + # is enabled. + if is_lambda_debug_timeout_enabled_for(self.function_version.qualified_arn): + return DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS + return self.function_version.config.timeout + + def _get_startup_timeout_seconds(self) -> int: + # Returns the timeout value in seconds to be enforced during lambda container startups. + # This is the value defined through LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT or the LAMBDA + # DEBUG MODE default if this is enabled. + if is_lambda_debug_timeout_enabled_for(self.function_version.qualified_arn): + return DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS + return STARTUP_TIMEOUT_SEC diff --git a/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py b/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py new file mode 100644 index 0000000000000..eea6e0c77ebaa --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/executor_endpoint.py @@ -0,0 +1,282 @@ +import abc +import logging +import time +from concurrent.futures import CancelledError, Future +from http import HTTPStatus +from typing import Any, Dict, Optional + +import requests +from werkzeug import Request + +from localstack.http import Response, route +from localstack.services.edge import ROUTER +from localstack.services.lambda_.invocation.lambda_models import InvocationResult +from localstack.utils.backoff import ExponentialBackoff +from localstack.utils.lambda_debug_mode.lambda_debug_mode import ( + DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS, + is_lambda_debug_mode, +) +from localstack.utils.objects import singleton_factory +from localstack.utils.strings import to_str + +LOG = logging.getLogger(__name__) +INVOCATION_PORT = 9563 + +NAMESPACE = "/_localstack_lambda" + + +class InvokeSendError(Exception): + def __init__(self, message): + super().__init__(message) + + +class StatusErrorException(Exception): + payload: bytes + + def __init__(self, message, payload: bytes): + super().__init__(message) + self.payload = payload + + +class ShutdownDuringStartup(Exception): + def __init__(self, message): + super().__init__(message) + + +class Endpoint(abc.ABC): + @abc.abstractmethod + def invocation_response(self, request: Request, req_id: str) -> Response: + pass + + @abc.abstractmethod + def invocation_error(self, request: Request, req_id: str) -> Response: + pass + + @abc.abstractmethod + def invocation_logs(self, request: Request, invoke_id: str) -> Response: + pass + + @abc.abstractmethod + def status_ready(self, request: Request, executor_id: str) -> Response: + pass + + @abc.abstractmethod + def status_error(self, request: Request, executor_id: str) -> Response: + pass + + +class ExecutorRouter: + endpoints: dict[str, Endpoint] + + def __init__(self): + self.endpoints = {} + + def register_endpoint(self, executor_id: str, endpoint: Endpoint): + self.endpoints[executor_id] = endpoint + + def unregister_endpoint(self, executor_id: str): + self.endpoints.pop(executor_id) + + @route(f"{NAMESPACE}//invocations//response", methods=["POST"]) + def invocation_response(self, request: Request, executor_id: str, req_id: str) -> Response: + endpoint = self.endpoints[executor_id] + return endpoint.invocation_response(request, req_id) + + @route(f"{NAMESPACE}//invocations//error", methods=["POST"]) + def invocation_error(self, request: Request, executor_id: str, req_id: str) -> Response: + endpoint = self.endpoints[executor_id] + return endpoint.invocation_error(request, req_id) + + @route(f"{NAMESPACE}//invocations//logs", methods=["POST"]) + def invocation_logs(self, request: Request, executor_id: str, invoke_id: str) -> Response: + endpoint = self.endpoints[executor_id] + return endpoint.invocation_logs(request, invoke_id) + + @route(f"{NAMESPACE}//status//ready", methods=["POST"]) + def status_ready(self, request: Request, env_id: str, executor_id: str) -> Response: + endpoint = self.endpoints[executor_id] + return endpoint.status_ready(request, executor_id) + + @route(f"{NAMESPACE}//status//error", methods=["POST"]) + def status_error(self, request: Request, env_id: str, executor_id: str) -> Response: + endpoint = self.endpoints[executor_id] + return endpoint.status_error(request, executor_id) + + +@singleton_factory +def executor_router(): + router = ExecutorRouter() + ROUTER.add(router) + return router + + +class ExecutorEndpoint(Endpoint): + container_address: str + container_port: int + executor_id: str + startup_future: Future[bool] | None + invocation_future: Future[InvocationResult] | None + logs: str | None + + def __init__( + self, + executor_id: str, + container_address: Optional[str] = None, + container_port: Optional[int] = INVOCATION_PORT, + ) -> None: + self.container_address = container_address + self.container_port = container_port + self.executor_id = executor_id + self.startup_future = None + self.invocation_future = None + self.logs = None + + def invocation_response(self, request: Request, req_id: str) -> Response: + result = InvocationResult(req_id, request.data, is_error=False, logs=self.logs) + self.invocation_future.set_result(result) + return Response(status=HTTPStatus.ACCEPTED) + + def invocation_error(self, request: Request, req_id: str) -> Response: + result = InvocationResult(req_id, request.data, is_error=True, logs=self.logs) + self.invocation_future.set_result(result) + return Response(status=HTTPStatus.ACCEPTED) + + def invocation_logs(self, request: Request, invoke_id: str) -> Response: + logs = request.json + if isinstance(logs, Dict): + self.logs = logs["logs"] + else: + LOG.error("Invalid logs from init! Logs: %s", logs) + return Response(status=HTTPStatus.ACCEPTED) + + def status_ready(self, request: Request, executor_id: str) -> Response: + self.startup_future.set_result(True) + return Response(status=HTTPStatus.ACCEPTED) + + def status_error(self, request: Request, executor_id: str) -> Response: + LOG.warning("Execution environment startup failed: %s", to_str(request.data)) + # TODO: debug Lambda runtime init to not send `runtime/init/error` twice + if self.startup_future.done(): + return Response(status=HTTPStatus.BAD_REQUEST) + self.startup_future.set_exception( + StatusErrorException("Environment startup failed", payload=request.data) + ) + return Response(status=HTTPStatus.ACCEPTED) + + def start(self) -> None: + executor_router().register_endpoint(self.executor_id, self) + self.startup_future = Future() + + def wait_for_startup(self): + try: + self.startup_future.result() + except CancelledError as e: + # Only happens if we shutdown the container during execution environment startup + # Daniel: potential problem if we have a shutdown while we start the container (e.g., timeout) but wait_for_startup is not yet called + raise ShutdownDuringStartup( + "Executor environment shutdown during container startup" + ) from e + + def get_endpoint_prefix(self): + return f"{NAMESPACE}/{self.executor_id}" + + def shutdown(self) -> None: + executor_router().unregister_endpoint(self.executor_id) + self.startup_future.cancel() + if self.invocation_future: + self.invocation_future.cancel() + + def invoke(self, payload: Dict[str, str]) -> InvocationResult: + self.invocation_future = Future() + self.logs = None + if not self.container_address: + raise ValueError("Container address not set, but got an invoke.") + invocation_url = f"http://{self.container_address}:{self.container_port}/invoke" + # disable proxies for internal requests + proxies = {"http": "", "https": ""} + response = self._perform_invoke( + invocation_url=invocation_url, proxies=proxies, payload=payload + ) + if not response.ok: + raise InvokeSendError( + f"Error while sending invocation {payload} to {invocation_url}. Error Code: {response.status_code}" + ) + + # Set a reference future awaiting limit to ensure this process eventually ends, + # with timeout errors being handled by the lambda evaluator. + # The following logic selects which maximum waiting time to consider depending + # on whether the application is being debugged or not. + # Note that if timeouts are enforced for the lambda function invoked at this endpoint + # (this is needs to be configured in the Lambda Debug Mode Config file), the lambda + # function will continue to enforce the expected timeouts. + if is_lambda_debug_mode(): + # The value is set to a default high value to ensure eventual termination. + timeout_seconds = DEFAULT_LAMBDA_DEBUG_MODE_TIMEOUT_SECONDS + else: + # Do not wait longer for an invoke than the maximum lambda timeout plus a buffer + lambda_max_timeout_seconds = 900 + invoke_timeout_buffer_seconds = 5 + timeout_seconds = lambda_max_timeout_seconds + invoke_timeout_buffer_seconds + return self.invocation_future.result(timeout=timeout_seconds) + + @staticmethod + def _perform_invoke( + invocation_url: str, + proxies: dict[str, str], + payload: dict[str, Any], + ) -> requests.Response: + """ + Dispatches a Lambda invocation request to the specified container endpoint, with automatic + retries in case of connection errors, using exponential backoff. + + The first attempt is made immediately. If it fails, exponential backoff is applied with + retry intervals starting at 100ms, doubling each time for up to 5 total retries. + + Parameters: + invocation_url (str): The full URL of the container's invocation endpoint. + proxies (dict[str, str]): Proxy settings to be used for the HTTP request. + payload (dict[str, Any]): The JSON payload to send to the container. + + Returns: + Response: The successful HTTP response from the container. + + Raises: + requests.exceptions.ConnectionError: If all retry attempts fail to connect. + """ + backoff = None + last_exception = None + max_retry_on_connection_error = 5 + + for attempt_count in range(max_retry_on_connection_error + 1): # 1 initial + n retries + try: + response = requests.post(url=invocation_url, json=payload, proxies=proxies) + return response + except requests.exceptions.ConnectionError as connection_error: + last_exception = connection_error + + if backoff is None: + LOG.debug( + "Initial connection attempt failed: %s. Starting backoff retries.", + connection_error, + ) + backoff = ExponentialBackoff( + max_retries=max_retry_on_connection_error, + initial_interval=0.1, + multiplier=2.0, + randomization_factor=0.0, + max_interval=1, + max_time_elapsed=-1, + ) + + delay = backoff.next_backoff() + if delay > 0: + LOG.debug( + "Connection error on invoke attempt #%d: %s. Retrying in %.2f seconds", + attempt_count, + connection_error, + delay, + ) + time.sleep(delay) + + LOG.debug("Connection error after all attempts exhausted: %s", last_exception) + raise last_exception diff --git a/localstack-core/localstack/services/lambda_/invocation/internal_sqs_queue.py b/localstack-core/localstack/services/lambda_/invocation/internal_sqs_queue.py new file mode 100644 index 0000000000000..41da58b681701 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/internal_sqs_queue.py @@ -0,0 +1,215 @@ +import logging +import threading +from typing import Iterable + +from localstack import config +from localstack.aws.api.sqs import ( + AttributeNameList, + CreateQueueResult, + GetQueueAttributesResult, + Message, + MessageAttributeNameList, + MessageBodyAttributeMap, + MessageBodySystemAttributeMap, + MessageSystemAttributeName, + NullableInteger, + QueueAttributeMap, + ReceiveMessageResult, + SendMessageResult, + String, + TagMap, +) +from localstack.services.sqs.models import SqsQueue, StandardQueue +from localstack.services.sqs.provider import ( + QueueUpdateWorker, + _create_message_attribute_hash, + to_sqs_api_message, +) +from localstack.services.sqs.utils import generate_message_id +from localstack.utils.objects import singleton_factory +from localstack.utils.strings import md5 +from localstack.utils.time import now + +LOG = logging.getLogger(__name__) + + +class EventQueueUpdateWorker(QueueUpdateWorker): + """ + Regularly re-queues inflight and delayed messages whose visibility timeout has expired or delay deadline has been + reached. + """ + + def __init__(self) -> None: + super().__init__() + self.queues = [] + + def add_queue(self, queue: SqsQueue): + self.queues.append(queue) + + def remove_queue(self, queue: SqsQueue): + self.queues.remove(queue) + + def iter_queues(self) -> Iterable[SqsQueue]: + return iter(self.queues) + + +class QueueManager: + queues: dict[str, StandardQueue] + queue_lock: threading.RLock + queue_update_worker: EventQueueUpdateWorker + + def __init__(self): + self.queues = {} + # lock for handling queue lifecycle and avoiding duplicates + self.queue_lock = threading.RLock() + self.queue_update_worker = EventQueueUpdateWorker() + + def start(self): + self.queue_update_worker.start() + + def stop(self): + self.queue_update_worker.stop() + + def get_queue(self, queue_name: str): + if queue_name not in self.queues: + raise ValueError("Queue not available") + return self.queues[queue_name] + + def create_queue(self, queue_name: str) -> SqsQueue: + """ + Creates a queue. + :param queue_name: Queue name, has to be unique + :return: Queue Object + """ + with self.queue_lock: + if queue_name in self.queues: + return self.queues[queue_name] + + queue = StandardQueue( + name=queue_name, + region="us-east-1", + account_id=config.INTERNAL_RESOURCE_ACCOUNT, + ) + self.queues[queue_name] = queue + self.queue_update_worker.add_queue(queue) + return queue + + def delete_queue(self, queue_name: str) -> None: + with self.queue_lock: + if queue_name not in self.queues: + raise ValueError(f"Queue '{queue_name}' not available") + + queue = self.queues.pop(queue_name) + self.queue_update_worker.remove_queue(queue) + + +class FakeSqsClient: + def __init__(self, queue_manager: QueueManager): + self.queue_manager = queue_manager + + def create_queue( + self, QueueName: String, Attributes: QueueAttributeMap = None, tags: TagMap = None + ) -> CreateQueueResult: + self.queue_manager.create_queue(queue_name=QueueName) + return {"QueueUrl": QueueName} + + def delete_queue(self, QueueUrl: String) -> None: + self.queue_manager.delete_queue(queue_name=QueueUrl) + + def get_queue_attributes( + self, QueueUrl: String, AttributeNames: AttributeNameList = None + ) -> GetQueueAttributesResult: + queue = self.queue_manager.get_queue(queue_name=QueueUrl) + result = queue.get_queue_attributes(AttributeNames) + return {"Attributes": result} + + def purge_queue(self, QueueUrl: String) -> None: + queue = self.queue_manager.get_queue(queue_name=QueueUrl) + queue.clear() + + def receive_message( + self, + QueueUrl: String, + AttributeNames: AttributeNameList = None, + MessageAttributeNames: MessageAttributeNameList = None, + MaxNumberOfMessages: NullableInteger = None, + VisibilityTimeout: NullableInteger = None, + WaitTimeSeconds: NullableInteger = None, + ReceiveRequestAttemptId: String = None, + ) -> ReceiveMessageResult: + queue = self.queue_manager.get_queue(queue_name=QueueUrl) + num = MaxNumberOfMessages or 1 + result = queue.receive( + num_messages=num, + visibility_timeout=VisibilityTimeout, + wait_time_seconds=WaitTimeSeconds, + ) + + messages = [] + for i, standard_message in enumerate(result.successful): + message = to_sqs_api_message(standard_message, AttributeNames, MessageAttributeNames) + message["ReceiptHandle"] = result.receipt_handles[i] + messages.append(message) + + return {"Messages": messages if messages else None} + + def delete_message(self, QueueUrl: String, ReceiptHandle: String) -> None: + queue = self.queue_manager.get_queue(queue_name=QueueUrl) + queue.remove(ReceiptHandle) + + def _create_message_attributes( + self, + message_system_attributes: MessageBodySystemAttributeMap = None, + ) -> dict[str, str]: + result = { + MessageSystemAttributeName.SenderId: config.INTERNAL_RESOURCE_ACCOUNT, # not the account ID in AWS + MessageSystemAttributeName.SentTimestamp: str(now(millis=True)), + } + + if message_system_attributes is not None: + for attr in message_system_attributes: + result[attr] = message_system_attributes[attr]["StringValue"] + + return result + + def send_message( + self, + QueueUrl: String, + MessageBody: String, + DelaySeconds: NullableInteger = None, + MessageAttributes: MessageBodyAttributeMap = None, + MessageSystemAttributes: MessageBodySystemAttributeMap = None, + MessageDeduplicationId: String = None, + MessageGroupId: String = None, + ) -> SendMessageResult: + queue = self.queue_manager.get_queue(queue_name=QueueUrl) + + message = Message( + MessageId=generate_message_id(), + MD5OfBody=md5(MessageBody), + Body=MessageBody, + Attributes=self._create_message_attributes(MessageSystemAttributes), + MD5OfMessageAttributes=_create_message_attribute_hash(MessageAttributes), + MessageAttributes=MessageAttributes, + ) + queue_item = queue.put( + message=message, + message_deduplication_id=MessageDeduplicationId, + message_group_id=MessageGroupId, + delay_seconds=int(DelaySeconds) if DelaySeconds is not None else None, + ) + message = queue_item.message + return { + "MessageId": message["MessageId"], + "MD5OfMessageBody": message["MD5OfBody"], + "MD5OfMessageAttributes": message.get("MD5OfMessageAttributes"), + "SequenceNumber": queue_item.sequence_number, + "MD5OfMessageSystemAttributes": _create_message_attribute_hash(MessageSystemAttributes), + } + + +@singleton_factory +def get_fake_sqs_client(): + queue_manager = QueueManager() + queue_manager.start() + return FakeSqsClient(queue_manager) diff --git a/localstack-core/localstack/services/lambda_/invocation/lambda_models.py b/localstack-core/localstack/services/lambda_/invocation/lambda_models.py new file mode 100644 index 0000000000000..0ce171cff6cc6 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/lambda_models.py @@ -0,0 +1,650 @@ +"""Lambda models for internal use and persistence. +The LambdaProviderPro in localstack-pro imports this model and configures persistence. +The actual function code is stored in S3 (see S3Code). +""" + +import dataclasses +import logging +import os.path +import shutil +import tempfile +import threading +from abc import ABCMeta, abstractmethod +from datetime import datetime +from pathlib import Path +from typing import IO, Dict, Literal, Optional, TypedDict + +from botocore.exceptions import ClientError + +from localstack import config +from localstack.aws.api import CommonServiceException +from localstack.aws.api.lambda_ import ( + AllowedPublishers, + Architecture, + CodeSigningPolicies, + Cors, + DestinationConfig, + FunctionUrlAuthType, + InvocationType, + InvokeMode, + LastUpdateStatus, + LoggingConfig, + PackageType, + ProvisionedConcurrencyStatusEnum, + RecursiveLoop, + Runtime, + RuntimeVersionConfig, + SnapStartResponse, + State, + StateReasonCode, + TracingMode, +) +from localstack.aws.connect import connect_to +from localstack.constants import AWS_REGION_US_EAST_1 +from localstack.services.lambda_.api_utils import qualified_lambda_arn, unqualified_lambda_arn +from localstack.utils.archives import unzip +from localstack.utils.strings import long_uid, short_uid + +LOG = logging.getLogger(__name__) + + +# TODO: maybe we should make this more "transient" by always initializing to Pending and *not* persisting it? +@dataclasses.dataclass(frozen=True) +class VersionState: + state: State + code: Optional[StateReasonCode] = None + reason: Optional[str] = None + + +@dataclasses.dataclass +class Invocation: + payload: bytes + invoked_arn: str + client_context: str | None + invocation_type: InvocationType + invoke_time: datetime + # = invocation_id + request_id: str + trace_context: dict + + +InitializationType = Literal["on-demand", "provisioned-concurrency"] + + +class ArchiveCode(metaclass=ABCMeta): + @abstractmethod + def generate_presigned_url(self, endpoint_url: str | None = None): + """ + Generates a presigned url pointing to the code archive + """ + pass + + @abstractmethod + def is_hot_reloading(self): + """ + Whether this code archive is for hot reloading. + This means it should mount the location from the host, and should instruct the runtimes to listen for changes + + :return: True if this object represents hot reloading, False otherwise + """ + pass + + @abstractmethod + def get_unzipped_code_location(self): + """ + Get the location of the unzipped archive on disk + """ + pass + + @abstractmethod + def prepare_for_execution(self): + """ + Unzips the code archive to the proper destination on disk, if not already present + """ + pass + + @abstractmethod + def destroy_cached(self): + """ + Destroys the code object on disk, if it was saved on disk before + """ + pass + + @abstractmethod + def destroy(self): + """ + Deletes the code object from S3 and the unzipped version from disk + """ + pass + + +@dataclasses.dataclass(frozen=True) +class S3Code(ArchiveCode): + """ + Objects representing a code archive stored in an internal S3 bucket. + + S3 Store: + Code archives represented by this method are stored in a bucket awslambda-{region_name}-tasks, + (e.g. awslambda-us-east-1-tasks), when correctly created using create_lambda_archive. + The "awslambda" prefix matches the behavior at real AWS. + + This class will then provide different properties / methods to be operated on the stored code, + like the ability to create presigned-urls, checking the code hash etc. + + A call to destroy() of this class will delete the code object from both the S3 store and the local cache + Unzipped Cache: + After a call to prepare_for_execution, an unzipped version of the represented code will be stored on disk, + ready to mount/copy. + + It will be present at the location returned by get_unzipped_code_location, + namely /tmp/lambda/{bucket_name}/{id}/code + + The cache on disk will be deleted after a call to destroy_cached (or destroy) + """ + + id: str + account_id: str + s3_bucket: str + s3_key: str + s3_object_version: str | None + code_sha256: str + code_size: int + _disk_lock: threading.RLock = dataclasses.field(default_factory=threading.RLock) + + def _download_archive_to_file(self, target_file: IO) -> None: + """ + Download the code archive into a given file + + :param target_file: File the code archive should be downloaded into (IO object) + """ + s3_client = connect_to( + region_name=AWS_REGION_US_EAST_1, + aws_access_key_id=config.INTERNAL_RESOURCE_ACCOUNT, + ).s3 + extra_args = {"VersionId": self.s3_object_version} if self.s3_object_version else {} + s3_client.download_fileobj( + Bucket=self.s3_bucket, Key=self.s3_key, Fileobj=target_file, ExtraArgs=extra_args + ) + target_file.flush() + + def generate_presigned_url(self, endpoint_url: str | None = None) -> str: + """ + Generates a presigned url pointing to the code archive + """ + s3_client = connect_to( + region_name=AWS_REGION_US_EAST_1, + aws_access_key_id=config.INTERNAL_RESOURCE_ACCOUNT, + endpoint_url=endpoint_url, + ).s3 + params = {"Bucket": self.s3_bucket, "Key": self.s3_key} + if self.s3_object_version: + params["VersionId"] = self.s3_object_version + return s3_client.generate_presigned_url("get_object", Params=params) + + def is_hot_reloading(self) -> bool: + """ + Whether this code archive is hot reloading + + :return: True if it must it represents hot reloading, False otherwise + """ + return False + + def get_unzipped_code_location(self) -> Path: + """ + Get the location of the unzipped archive on disk + """ + return Path(f"{tempfile.gettempdir()}/lambda/{self.s3_bucket}/{self.id}/code") + + def prepare_for_execution(self) -> None: + """ + Unzips the code archive to the proper destination on disk, if not already present + """ + target_path = self.get_unzipped_code_location() + with self._disk_lock: + if target_path.exists(): + return + LOG.debug("Saving code %s to disk", self.id) + target_path.mkdir(parents=True, exist_ok=True) + with tempfile.NamedTemporaryFile() as file: + self._download_archive_to_file(file) + unzip(file.name, str(target_path)) + + def destroy_cached(self) -> None: + """ + Destroys the code object on disk, if it was saved on disk before + """ + # delete parent folder to delete the whole code location + code_path = self.get_unzipped_code_location().parent + if not code_path.exists(): + return + try: + shutil.rmtree(code_path) + except OSError as e: + LOG.debug( + "Could not cleanup function code path %s due to error %s while deleting file %s", + code_path, + e.strerror, + e.filename, + ) + + def destroy(self) -> None: + """ + Deletes the code object from S3 and the unzipped version from disk + """ + LOG.debug("Final code destruction for %s", self.id) + self.destroy_cached() + s3_client = connect_to( + region_name=AWS_REGION_US_EAST_1, + aws_access_key_id=config.INTERNAL_RESOURCE_ACCOUNT, + ).s3 + kwargs = {"VersionId": self.s3_object_version} if self.s3_object_version else {} + try: + s3_client.delete_object(Bucket=self.s3_bucket, Key=self.s3_key, **kwargs) + except ClientError as e: + LOG.debug( + "Cannot delete lambda archive %s in bucket %s: %s", self.s3_key, self.s3_bucket, e + ) + + +@dataclasses.dataclass(frozen=True) +class HotReloadingCode(ArchiveCode): + """ + Objects representing code which is mounted from a given directory from the host, for hot reloading + """ + + host_path: str + code_sha256: str = "hot-reloading-hash-not-available" + code_size: int = 0 + + def generate_presigned_url(self, endpoint_url: str | None = None) -> str: + return f"Code location: {self.host_path}" + + def get_unzipped_code_location(self) -> Path: + path = os.path.expandvars(self.host_path) + return Path(path) + + def is_hot_reloading(self) -> bool: + """ + Whether this code archive is for hot reloading. + This means it should mount the location from the host, and should instruct the runtimes to listen for changes + + :return: True if it represents hot reloading, False otherwise + """ + return True + + def prepare_for_execution(self) -> None: + pass + + def destroy_cached(self) -> None: + """ + Destroys the code object on disk, if it was saved on disk before + """ + pass + + def destroy(self) -> None: + """ + Deletes the code object from S3 and the unzipped version from disk + """ + pass + + +@dataclasses.dataclass(frozen=True) +class ImageCode: + image_uri: str + repository_type: str + code_sha256: str + + @property + def resolved_image_uri(self): + return f"{self.image_uri.rpartition(':')[0]}@sha256:{self.code_sha256}" + + +@dataclasses.dataclass +class DeadLetterConfig: + target_arn: str + + +@dataclasses.dataclass +class FileSystemConfig: + arn: str + local_mount_path: str + + +@dataclasses.dataclass(frozen=True) +class ImageConfig: + working_directory: str + command: list[str] = dataclasses.field(default_factory=list) + entrypoint: list[str] = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass +class VpcConfig: + vpc_id: str + security_group_ids: list[str] = dataclasses.field(default_factory=list) + subnet_ids: list[str] = dataclasses.field(default_factory=list) + + +@dataclasses.dataclass(frozen=True) +class UpdateStatus: + status: LastUpdateStatus | None + code: str | None = None # TODO: probably not a string + reason: str | None = None + + +@dataclasses.dataclass +class LambdaEphemeralStorage: + size: int + + +@dataclasses.dataclass +class FunctionUrlConfig: + """ + * HTTP(s) + * You can apply function URLs to any function alias, or to the $LATEST unpublished function version. You can't add a function URL to any other function version. + * Once you create a function URL, its URL endpoint never changes + """ + + function_arn: str # fully qualified ARN + function_name: str # resolved name + cors: Cors + url_id: str # Custom URL (via tag), or generated unique subdomain id e.g. pfn5bdb2dl5mzkbn6eb2oi3xfe0nthdn + url: str # full URL (e.g. "https://pfn5bdb2dl5mzkbn6eb2oi3xfe0nthdn.lambda-url.eu-west-3.on.aws/") + auth_type: FunctionUrlAuthType + creation_time: str # time + last_modified_time: Optional[str] = ( + None # TODO: check if this is the creation time when initially creating + ) + function_qualifier: Optional[str] = "$LATEST" # only $LATEST or alias name + invoke_mode: Optional[InvokeMode] = None + + +@dataclasses.dataclass +class ProvisionedConcurrencyConfiguration: + provisioned_concurrent_executions: int + last_modified: str # date + + +@dataclasses.dataclass +class ProvisionedConcurrencyState: + """transient items""" + + allocated: int = 0 + available: int = 0 + status: ProvisionedConcurrencyStatusEnum = dataclasses.field( + default=ProvisionedConcurrencyStatusEnum.IN_PROGRESS + ) + status_reason: Optional[str] = None + + +@dataclasses.dataclass +class AliasRoutingConfig: + version_weights: Dict[str, float] + + +@dataclasses.dataclass(frozen=True) +class VersionIdentifier: + function_name: str + qualifier: str + region: str + account: str + + def qualified_arn(self): + return qualified_lambda_arn( + function_name=self.function_name, + qualifier=self.qualifier, + region=self.region, + account=self.account, + ) + + def unqualified_arn(self): + return unqualified_lambda_arn( + function_name=self.function_name, + region=self.region, + account=self.account, + ) + + +@dataclasses.dataclass(frozen=True) +class VersionAlias: + function_version: str + name: str + description: str | None + routing_configuration: AliasRoutingConfig | None = None + revision_id: str = dataclasses.field(init=False, default_factory=long_uid) + + +@dataclasses.dataclass +class ResourcePolicy: + Version: str + Id: str + Statement: list[dict] + + +@dataclasses.dataclass +class FunctionResourcePolicy: + policy: ResourcePolicy + + +@dataclasses.dataclass +class EventInvokeConfig: + function_name: str + qualifier: str + + last_modified: Optional[str] = dataclasses.field(compare=False) + destination_config: Optional[DestinationConfig] = None + maximum_retry_attempts: Optional[int] = None + maximum_event_age_in_seconds: Optional[int] = None + + +# Result Models +@dataclasses.dataclass +class InvocationResult: + request_id: str + payload: bytes | None + is_error: bool + logs: str | None + executed_version: str | None = None + + +@dataclasses.dataclass +class InvocationLogs: + request_id: str + logs: str + + +class Credentials(TypedDict): + AccessKeyId: str + SecretAccessKey: str + SessionToken: str + Expiration: datetime + + +class OtherServiceEndpoint: + def status_ready(self, executor_id: str) -> None: + """ + Processes a status ready report by RAPID + :param executor_id: Executor ID this ready report is for + """ + raise NotImplementedError() + + def status_error(self, executor_id: str) -> None: + """ + Processes a status error report by RAPID + :param executor_id: Executor ID this error report is for + """ + raise NotImplementedError() + + +@dataclasses.dataclass(frozen=True) +class CodeSigningConfig: + csc_id: str + arn: str + + allowed_publishers: AllowedPublishers + policies: CodeSigningPolicies + last_modified: str + description: Optional[str] = None + + +@dataclasses.dataclass +class LayerPolicyStatement: + sid: str + action: str + principal: str + organization_id: Optional[str] + + +@dataclasses.dataclass +class LayerPolicy: + revision_id: str = dataclasses.field(init=False, default_factory=long_uid) + id: str = "default" # static + version: str = "2012-10-17" # static + statements: dict[str, LayerPolicyStatement] = dataclasses.field( + default_factory=dict + ) # statement ID => statement + + +@dataclasses.dataclass +class LayerVersion: + layer_version_arn: str + layer_arn: str + + version: int + code: ArchiveCode + license_info: str + compatible_runtimes: list[Runtime] + compatible_architectures: list[Architecture] + created: str # date + description: str = "" + + policy: LayerPolicy = None + + +@dataclasses.dataclass +class Layer: + arn: str + next_version: int = 1 + next_version_lock: threading.RLock = dataclasses.field(default_factory=threading.RLock) + layer_versions: dict[str, LayerVersion] = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass(frozen=True) +class VersionFunctionConfiguration: + # fields + description: str + role: str + timeout: int + runtime: Runtime + memory_size: int + handler: str + package_type: PackageType + environment: dict[str, str] + architectures: list[Architecture] + # internal revision is updated when runtime restart is necessary + internal_revision: str + ephemeral_storage: LambdaEphemeralStorage + snap_start: SnapStartResponse + + tracing_config_mode: TracingMode + code: ArchiveCode + last_modified: str # ISO string + state: VersionState + + image: Optional[ImageCode] = None + image_config: Optional[ImageConfig] = None + runtime_version_config: Optional[RuntimeVersionConfig] = None + last_update: Optional[UpdateStatus] = None + revision_id: str = dataclasses.field(init=False, default_factory=long_uid) + layers: list[LayerVersion] = dataclasses.field(default_factory=list) + + dead_letter_arn: Optional[str] = None + + # kms_key_arn: str + # file_system_configs: FileSystemConfig + vpc_config: Optional[VpcConfig] = None + + logging_config: LoggingConfig = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass(frozen=True) +class FunctionVersion: + id: VersionIdentifier + config: VersionFunctionConfiguration + + @property + def qualified_arn(self) -> str: + return self.id.qualified_arn() + + +@dataclasses.dataclass +class Function: + function_name: str + code_signing_config_arn: Optional[str] = None + aliases: dict[str, VersionAlias] = dataclasses.field(default_factory=dict) + versions: dict[str, FunctionVersion] = dataclasses.field(default_factory=dict) + function_url_configs: dict[str, FunctionUrlConfig] = dataclasses.field( + default_factory=dict + ) # key is $LATEST, version, or alias + permissions: dict[str, FunctionResourcePolicy] = dataclasses.field( + default_factory=dict + ) # key is $LATEST, version or alias + event_invoke_configs: dict[str, EventInvokeConfig] = dataclasses.field( + default_factory=dict + ) # key is $LATEST(?), version or alias + reserved_concurrent_executions: Optional[int] = None + recursive_loop: RecursiveLoop = RecursiveLoop.Terminate + provisioned_concurrency_configs: dict[str, ProvisionedConcurrencyConfiguration] = ( + dataclasses.field(default_factory=dict) + ) + + lock: threading.RLock = dataclasses.field(default_factory=threading.RLock) + next_version: int = 1 + + def latest(self) -> FunctionVersion: + return self.versions["$LATEST"] + + # HACK to model a volatile variable that should be ignored for persistence + def __post_init__(self): + # Identifier unique to this function and LocalStack instance. + # A LocalStack restart or persistence load should create a new instance id. + # Used for retaining invoke queues across version updates for $LATEST, but separate unrelated instances. + self.instance_id = short_uid() + + def __getstate__(self): + """Ignore certain volatile fields for pickling. + # https://docs.python.org/3/library/pickle.html#handling-stateful-objects + """ + # Copy the object's state from self.__dict__ which contains + # all our instance attributes. Always use the dict.copy() + # method to avoid modifying the original state. + state = self.__dict__.copy() + # Remove the volatile entries. + del state["instance_id"] + return state + + def __setstate__(self, state): + # Inject persistent state + self.__dict__.update(state) + # Create new instance id + self.__post_init__() + + +class ValidationException(CommonServiceException): + def __init__(self, message: str): + super().__init__(code="ValidationException", status_code=400, message=message) + + +class RequestEntityTooLargeException(CommonServiceException): + def __init__(self, message: str): + super().__init__(code="RequestEntityTooLargeException", status_code=413, message=message) + + +# note: we might at some point want to generalize these limits across all services and fetch them from there + + +@dataclasses.dataclass +class AccountSettings: + total_code_size: int = config.LAMBDA_LIMITS_TOTAL_CODE_SIZE + code_size_zipped: int = config.LAMBDA_LIMITS_CODE_SIZE_ZIPPED + code_size_unzipped: int = config.LAMBDA_LIMITS_CODE_SIZE_UNZIPPED + concurrent_executions: int = config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS diff --git a/localstack-core/localstack/services/lambda_/invocation/lambda_service.py b/localstack-core/localstack/services/lambda_/invocation/lambda_service.py new file mode 100644 index 0000000000000..837d766444c5d --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/lambda_service.py @@ -0,0 +1,680 @@ +import base64 +import concurrent.futures +import dataclasses +import io +import logging +import os.path +import random +import uuid +from concurrent.futures import Executor, Future, ThreadPoolExecutor +from datetime import datetime +from hashlib import sha256 +from pathlib import PurePosixPath, PureWindowsPath +from threading import RLock +from typing import TYPE_CHECKING, Optional + +from localstack import config +from localstack.aws.api.lambda_ import ( + InvalidParameterValueException, + InvalidRequestContentException, + InvocationType, + LastUpdateStatus, + ResourceConflictException, + ResourceNotFoundException, + State, +) +from localstack.aws.connect import connect_to +from localstack.constants import AWS_REGION_US_EAST_1 +from localstack.services.lambda_.analytics import ( + FunctionOperation, + FunctionStatus, + function_counter, + hotreload_counter, +) +from localstack.services.lambda_.api_utils import ( + lambda_arn, + qualified_lambda_arn, + qualifier_is_alias, +) +from localstack.services.lambda_.invocation.assignment import AssignmentService +from localstack.services.lambda_.invocation.counting_service import CountingService +from localstack.services.lambda_.invocation.event_manager import LambdaEventManager +from localstack.services.lambda_.invocation.lambda_models import ( + ArchiveCode, + Function, + FunctionVersion, + HotReloadingCode, + ImageCode, + Invocation, + InvocationResult, + S3Code, + UpdateStatus, + VersionAlias, + VersionState, +) +from localstack.services.lambda_.invocation.models import lambda_stores +from localstack.services.lambda_.invocation.version_manager import LambdaVersionManager +from localstack.services.lambda_.lambda_utils import HINT_LOG +from localstack.utils.archives import get_unzipped_size, is_zip_file +from localstack.utils.container_utils.container_client import ContainerException +from localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT +from localstack.utils.strings import short_uid, to_str + +if TYPE_CHECKING: + from mypy_boto3_s3 import S3Client + +LOG = logging.getLogger(__name__) + +LAMBDA_DEFAULT_TIMEOUT_SECONDS = 3 +LAMBDA_DEFAULT_MEMORY_SIZE = 128 + + +class LambdaService: + # mapping from qualified ARN to version manager + lambda_running_versions: dict[str, LambdaVersionManager] + lambda_starting_versions: dict[str, LambdaVersionManager] + # mapping from qualified ARN to event manager + event_managers = dict[str, LambdaEventManager] + lambda_version_manager_lock: RLock + task_executor: Executor + + assignment_service: AssignmentService + counting_service: CountingService + + def __init__(self) -> None: + self.lambda_running_versions = {} + self.lambda_starting_versions = {} + self.event_managers = {} + self.lambda_version_manager_lock = RLock() + self.task_executor = ThreadPoolExecutor(thread_name_prefix="lambda-service-task") + self.assignment_service = AssignmentService() + self.counting_service = CountingService() + + def stop(self) -> None: + """ + Stop the whole lambda service + """ + shutdown_futures = [] + for event_manager in self.event_managers.values(): + shutdown_futures.append(self.task_executor.submit(event_manager.stop)) + # TODO: switch shutdown order? yes, shutdown starting versions before the running versions would make more sense + for version_manager in self.lambda_running_versions.values(): + shutdown_futures.append(self.task_executor.submit(version_manager.stop)) + for version_manager in self.lambda_starting_versions.values(): + shutdown_futures.append(self.task_executor.submit(version_manager.stop)) + shutdown_futures.append( + self.task_executor.submit( + version_manager.function_version.config.code.destroy_cached + ) + ) + _, not_done = concurrent.futures.wait(shutdown_futures, timeout=5) + if not_done: + LOG.debug("Shutdown not complete, missing threads: %s", not_done) + self.task_executor.shutdown(cancel_futures=True) + self.assignment_service.stop() + + def stop_version(self, qualified_arn: str) -> None: + """ + Stops a specific lambda service version + :param qualified_arn: Qualified arn for the version to stop + """ + LOG.debug("Stopping version %s", qualified_arn) + event_manager = self.event_managers.pop(qualified_arn, None) + if not event_manager: + LOG.debug("Could not find event manager to stop for function %s...", qualified_arn) + else: + self.task_executor.submit(event_manager.stop) + version_manager = self.lambda_running_versions.pop( + qualified_arn, self.lambda_starting_versions.pop(qualified_arn, None) + ) + if not version_manager: + raise ValueError(f"Unable to find version manager for {qualified_arn}") + self.task_executor.submit(version_manager.stop) + + def get_lambda_version_manager(self, function_arn: str) -> LambdaVersionManager: + """ + Get the lambda version for the given arn + :param function_arn: qualified arn for the lambda version + :return: LambdaVersionManager for the arn + """ + version_manager = self.lambda_running_versions.get(function_arn) + if not version_manager: + raise ValueError(f"Could not find version '{function_arn}'. Is it created?") + + return version_manager + + def get_lambda_event_manager(self, function_arn: str) -> LambdaEventManager: + """ + Get the lambda event manager for the given arn + :param function_arn: qualified arn for the lambda version + :return: LambdaEventManager for the arn + """ + event_manager = self.event_managers.get(function_arn) + if not event_manager: + raise ValueError(f"Could not find event manager '{function_arn}'. Is it created?") + + return event_manager + + def _start_lambda_version(self, version_manager: LambdaVersionManager) -> None: + new_state = version_manager.start() + self.update_version_state( + function_version=version_manager.function_version, new_state=new_state + ) + + def create_function_version(self, function_version: FunctionVersion) -> Future[None]: + """ + Creates a new function version (manager), and puts it in the startup dict + + :param function_version: Function Version to create + """ + with self.lambda_version_manager_lock: + qualified_arn = function_version.id.qualified_arn() + version_manager = self.lambda_starting_versions.get(qualified_arn) + if version_manager: + raise ResourceConflictException( + f"The operation cannot be performed at this time. An update is in progress for resource: {function_version.id.unqualified_arn()}", + Type="User", + ) + state = lambda_stores[function_version.id.account][function_version.id.region] + fn = state.functions.get(function_version.id.function_name) + version_manager = LambdaVersionManager( + function_arn=qualified_arn, + function_version=function_version, + function=fn, + counting_service=self.counting_service, + assignment_service=self.assignment_service, + ) + self.lambda_starting_versions[qualified_arn] = version_manager + return self.task_executor.submit(self._start_lambda_version, version_manager) + + def publish_version(self, function_version: FunctionVersion): + """ + Synchronously create a function version (manager) + Should only be called on publishing new versions, which basically clone an existing one. + The new version needs to be added to the lambda store before invoking this. + After successful completion of this method, the lambda version stored will be modified to be active, with a new revision id. + It will then be active for execution, and should be retrieved again from the store before returning the data over the API. + + :param function_version: Function Version to create + """ + with self.lambda_version_manager_lock: + qualified_arn = function_version.id.qualified_arn() + version_manager = self.lambda_starting_versions.get(qualified_arn) + if version_manager: + raise Exception( + "Version '%s' already starting up and in state %s", + qualified_arn, + version_manager.state, + ) + state = lambda_stores[function_version.id.account][function_version.id.region] + fn = state.functions.get(function_version.id.function_name) + version_manager = LambdaVersionManager( + function_arn=qualified_arn, + function_version=function_version, + function=fn, + counting_service=self.counting_service, + assignment_service=self.assignment_service, + ) + self.lambda_starting_versions[qualified_arn] = version_manager + self._start_lambda_version(version_manager) + + # Commands + def invoke( + self, + function_name: str, + qualifier: str, + region: str, + account_id: str, + invocation_type: InvocationType | None, + client_context: str | None, + request_id: str, + payload: bytes | None, + trace_context: dict | None = None, + ) -> InvocationResult | None: + """ + Invokes a specific version of a lambda + + :param request_id: context request ID + :param function_name: Function name + :param qualifier: Function version qualifier + :param region: Region of the function + :param account_id: Account id of the function + :param invocation_type: Invocation Type + :param client_context: Client Context, if applicable + :param trace_context: tracing information such as X-Ray header + :param payload: Invocation payload + :return: The invocation result + """ + # NOTE: consider making the trace_context mandatory once we update all usages (should be easier after v4.0) + trace_context = trace_context or {} + # Invoked arn (for lambda context) does not have qualifier if not supplied + invoked_arn = lambda_arn( + function_name=function_name, + qualifier=qualifier, + account=account_id, + region=region, + ) + qualifier = qualifier or "$LATEST" + state = lambda_stores[account_id][region] + function = state.functions.get(function_name) + + if function is None: + raise ResourceNotFoundException(f"Function not found: {invoked_arn}", Type="User") + + if qualifier_is_alias(qualifier): + alias = function.aliases.get(qualifier) + if not alias: + raise ResourceNotFoundException(f"Function not found: {invoked_arn}", Type="User") + version_qualifier = alias.function_version + if alias.routing_configuration: + version, probability = next( + iter(alias.routing_configuration.version_weights.items()) + ) + if random.random() < probability: + version_qualifier = version + else: + version_qualifier = qualifier + + # Need the qualified arn to exactly get the target lambda + qualified_arn = qualified_lambda_arn(function_name, version_qualifier, account_id, region) + version = function.versions.get(version_qualifier) + runtime = version.config.runtime or "n/a" + package_type = version.config.package_type + try: + version_manager = self.get_lambda_version_manager(qualified_arn) + event_manager = self.get_lambda_event_manager(qualified_arn) + except ValueError as e: + state = version and version.config.state.state + if state == State.Failed: + status = FunctionStatus.failed_state_error + HINT_LOG.error( + f"Failed to create the runtime executor for the function {function_name}. " + "Please ensure that Docker is available in the LocalStack container by adding the volume mount " + '"/var/run/docker.sock:/var/run/docker.sock" to your LocalStack startup. ' + "Check out https://docs.localstack.cloud/user-guide/aws/lambda/#docker-not-available" + ) + elif state == State.Pending: + status = FunctionStatus.pending_state_error + HINT_LOG.warning( + "Lambda functions are created and updated asynchronously in the new lambda provider like in AWS. " + f"Before invoking {function_name}, please wait until the function transitioned from the state " + "Pending to Active using: " + f'"awslocal lambda wait function-active-v2 --function-name {function_name}" ' + "Check out https://docs.localstack.cloud/user-guide/aws/lambda/#function-in-pending-state" + ) + else: + status = FunctionStatus.unhandled_state_error + LOG.error("Unexpected state %s for Lambda function %s", state, function_name) + function_counter.labels( + operation=FunctionOperation.invoke, + runtime=runtime, + status=status, + invocation_type=invocation_type, + package_type=package_type, + ).increment() + raise ResourceConflictException( + f"The operation cannot be performed at this time. The function is currently in the following state: {state}" + ) from e + # empty payloads have to work as well + if payload is None: + payload = b"{}" + else: + # detect invalid payloads early before creating an execution environment + try: + to_str(payload) + except Exception as e: + function_counter.labels( + operation=FunctionOperation.invoke, + runtime=runtime, + status=FunctionStatus.invalid_payload_error, + invocation_type=invocation_type, + package_type=package_type, + ).increment() + # MAYBE: improve parity of detailed exception message (quite cumbersome) + raise InvalidRequestContentException( + f"Could not parse request body into json: Could not parse payload into json: {e}", + Type="User", + ) + if invocation_type is None: + invocation_type = InvocationType.RequestResponse + if invocation_type == InvocationType.DryRun: + return None + # TODO payload verification An error occurred (InvalidRequestContentException) when calling the Invoke operation: Could not parse request body into json: Could not parse payload into json: Unexpected character (''' (code 39)): expected a valid value (JSON String, Number, Array, Object or token 'null', 'true' or 'false') + # at [Source: (byte[])"'test'"; line: 1, column: 2] + # + if invocation_type == InvocationType.Event: + return event_manager.enqueue_event( + invocation=Invocation( + payload=payload, + invoked_arn=invoked_arn, + client_context=client_context, + invocation_type=invocation_type, + invoke_time=datetime.now(), + request_id=request_id, + trace_context=trace_context, + ) + ) + + invocation_result = version_manager.invoke( + invocation=Invocation( + payload=payload, + invoked_arn=invoked_arn, + client_context=client_context, + invocation_type=invocation_type, + invoke_time=datetime.now(), + request_id=request_id, + trace_context=trace_context, + ) + ) + status = ( + FunctionStatus.invocation_error + if invocation_result.is_error + else FunctionStatus.success + ) + function_counter.labels( + operation=FunctionOperation.invoke, + runtime=runtime, + status=status, + invocation_type=invocation_type, + package_type=package_type, + ).increment() + return invocation_result + + def update_version(self, new_version: FunctionVersion) -> Future[None]: + """ + Updates a given version. Will perform a rollover, so the old version will be active until the new one is ready + to be invoked + + :param new_version: New version (with the same qualifier as an older one) + """ + if new_version.qualified_arn not in self.lambda_running_versions: + raise ValueError( + f"Version {new_version.qualified_arn} cannot be updated if an old one is not running" + ) + + return self.create_function_version(function_version=new_version) + + def update_version_state( + self, function_version: FunctionVersion, new_state: VersionState + ) -> None: + """ + Update the version state for the given function version. + + This will perform a rollover to the given function if the new state is active and there is a previously + running version registered. The old version will be shutdown and its code deleted. + + If the new state is failed, it will abort the update and mark it as failed. + If an older version is still running, it will keep running. + + :param function_version: Version reporting the state + :param new_state: New state + """ + function_arn = function_version.qualified_arn + try: + old_version = None + old_event_manager = None + with self.lambda_version_manager_lock: + new_version_manager = self.lambda_starting_versions.pop(function_arn) + if not new_version_manager: + raise ValueError( + f"Version {function_arn} reporting state {new_state.state} does exist in the starting versions." + ) + if new_state.state == State.Active: + old_version = self.lambda_running_versions.get(function_arn, None) + old_event_manager = self.event_managers.get(function_arn, None) + self.lambda_running_versions[function_arn] = new_version_manager + self.event_managers[function_arn] = LambdaEventManager( + version_manager=new_version_manager + ) + self.event_managers[function_arn].start() + update_status = UpdateStatus(status=LastUpdateStatus.Successful) + elif new_state.state == State.Failed: + update_status = UpdateStatus(status=LastUpdateStatus.Failed) + self.task_executor.submit(new_version_manager.stop) + else: + # TODO what to do if state pending or inactive is supported? + self.task_executor.submit(new_version_manager.stop) + LOG.error( + "State %s for version %s should not have been reported. New version will be stopped.", + new_state, + function_arn, + ) + return + + # TODO is it necessary to get the version again? Should be locked for modification anyway + # Without updating the new state, the function would not change to active, last_update would be missing, and + # the revision id would not be updated. + state = lambda_stores[function_version.id.account][function_version.id.region] + # FIXME this will fail if the function is deleted during this code lines here + function = state.functions.get(function_version.id.function_name) + if old_event_manager: + self.task_executor.submit(old_event_manager.stop_for_update) + if old_version: + # if there is an old version, we assume it is an update, and stop the old one + self.task_executor.submit(old_version.stop) + if function: + self.task_executor.submit( + destroy_code_if_not_used, old_version.function_version.config.code, function + ) + if not function: + LOG.debug("Function %s was deleted during status update", function_arn) + return + current_version = function.versions[function_version.id.qualifier] + new_version_manager.state = new_state + new_version_state = dataclasses.replace( + current_version, + config=dataclasses.replace( + current_version.config, state=new_state, last_update=update_status + ), + ) + state.functions[function_version.id.function_name].versions[ + function_version.id.qualifier + ] = new_version_state + + except Exception: + LOG.exception("Failed to update function version for arn %s", function_arn) + + def update_alias(self, old_alias: VersionAlias, new_alias: VersionAlias, function: Function): + # if pointer changed, need to restart provisioned + provisioned_concurrency_config = function.provisioned_concurrency_configs.get( + old_alias.name + ) + if ( + old_alias.function_version != new_alias.function_version + and provisioned_concurrency_config is not None + ): + LOG.warning("Deprovisioning") + fn_version_old = function.versions.get(old_alias.function_version) + vm_old = self.get_lambda_version_manager(function_arn=fn_version_old.qualified_arn) + fn_version_new = function.versions.get(new_alias.function_version) + vm_new = self.get_lambda_version_manager(function_arn=fn_version_new.qualified_arn) + + # TODO: we might need to pull provisioned concurrency state a bit more out of the version manager for get_provisioned_concurrency_config + # TODO: make this fully async + vm_old.update_provisioned_concurrency_config(0).result(timeout=4) # sync + vm_new.update_provisioned_concurrency_config( + provisioned_concurrency_config.provisioned_concurrent_executions + ) # async again + + def can_assume_role(self, role_arn: str, region: str) -> bool: + """ + Checks whether lambda can assume the given role. + This _should_ only fail if IAM enforcement is enabled. + + :param role_arn: Role to assume + :return: True if the role can be assumed by lambda, false otherwise + """ + sts_client = connect_to(region_name=region).sts.request_metadata(service_principal="lambda") + try: + sts_client.assume_role( + RoleArn=role_arn, + RoleSessionName=f"test-assume-{short_uid()}", + DurationSeconds=900, + ) + return True + except Exception as e: + LOG.debug("Cannot assume role %s: %s", role_arn, e) + return False + + +# TODO: Move helper functions out of lambda_service into a separate module + + +def is_code_used(code: S3Code, function: Function) -> bool: + """ + Check if given code is still used in some version of the function + + :param code: Code object + :param function: function to check + :return: bool whether code is used in another version of the function + """ + with function.lock: + return any(code == version.config.code for version in function.versions.values()) + + +def destroy_code_if_not_used(code: S3Code, function: Function) -> None: + """ + Destroy the given code if it is not used in some version of the function + Do nothing otherwise + + :param code: Code object + :param function: Function the code belongs too + """ + with function.lock: + if not is_code_used(code, function): + code.destroy() + + +def store_lambda_archive( + archive_file: bytes, function_name: str, region_name: str, account_id: str +) -> S3Code: + """ + Stores the given lambda archive in an internal s3 bucket. + Also checks if zipfile matches the specifications + + :param archive_file: Archive file to store + :param function_name: function name the archive should be stored for + :param region_name: region name the archive should be stored for + :param account_id: account id the archive should be stored for + :return: S3 Code object representing the archive stored in S3 + """ + # check if zip file + if not is_zip_file(archive_file): + raise InvalidParameterValueException( + "Could not unzip uploaded file. Please check your file, then try to upload again.", + Type="User", + ) + # check unzipped size + unzipped_size = get_unzipped_size(zip_file=io.BytesIO(archive_file)) + if unzipped_size >= config.LAMBDA_LIMITS_CODE_SIZE_UNZIPPED: + raise InvalidParameterValueException( + f"Unzipped size must be smaller than {config.LAMBDA_LIMITS_CODE_SIZE_UNZIPPED} bytes", + Type="User", + ) + # store all buckets in us-east-1 for now + s3_client = connect_to( + region_name=AWS_REGION_US_EAST_1, aws_access_key_id=config.INTERNAL_RESOURCE_ACCOUNT + ).s3 + bucket_name = f"awslambda-{region_name}-tasks" + # s3 create bucket is idempotent in us-east-1 + s3_client.create_bucket(Bucket=bucket_name) + code_id = f"{function_name}-{uuid.uuid4()}" + key = f"snapshots/{account_id}/{code_id}" + s3_client.upload_fileobj(Fileobj=io.BytesIO(archive_file), Bucket=bucket_name, Key=key) + code_sha256 = to_str(base64.b64encode(sha256(archive_file).digest())) + return S3Code( + id=code_id, + account_id=account_id, + s3_bucket=bucket_name, + s3_key=key, + s3_object_version=None, + code_sha256=code_sha256, + code_size=len(archive_file), + ) + + +def assert_hot_reloading_path_absolute(path: str) -> None: + """ + Check whether a given path, after environment variable substitution, is an absolute path. + Accepts either posix or windows paths, with environment placeholders. + Example placeholders: $ENV_VAR, ${ENV_VAR} + + :param path: Posix or windows path, potentially containing environment variable placeholders. + Example: `$ENV_VAR/lambda/src` with `ENV_VAR=/home/user/test-repo` set. + """ + # expand variables in path before checking for an absolute path + expanded_path = os.path.expandvars(path) + if ( + not PurePosixPath(expanded_path).is_absolute() + and not PureWindowsPath(expanded_path).is_absolute() + ): + raise InvalidParameterValueException( + f"When using hot reloading, the archive key has to be an absolute path! Your archive key: {path}", + ) + + +def create_hot_reloading_code(path: str) -> HotReloadingCode: + assert_hot_reloading_path_absolute(path) + return HotReloadingCode(host_path=path) + + +def store_s3_bucket_archive( + archive_bucket: str, + archive_key: str, + archive_version: Optional[str], + function_name: str, + region_name: str, + account_id: str, +) -> ArchiveCode: + """ + Takes the lambda archive stored in the given bucket and stores it in an internal s3 bucket + + :param archive_bucket: Bucket the archive is stored in + :param archive_key: Key the archive is stored under + :param archive_version: Version of the archive object in the bucket + :param function_name: function name the archive should be stored for + :param region_name: region name the archive should be stored for + :param account_id: account id the archive should be stored for + :return: S3 Code object representing the archive stored in S3 + """ + if archive_bucket == config.BUCKET_MARKER_LOCAL: + hotreload_counter.labels(operation="create").increment() + return create_hot_reloading_code(path=archive_key) + s3_client: "S3Client" = connect_to().s3 + kwargs = {"VersionId": archive_version} if archive_version else {} + archive_file = s3_client.get_object(Bucket=archive_bucket, Key=archive_key, **kwargs)[ + "Body" + ].read() + return store_lambda_archive( + archive_file, function_name=function_name, region_name=region_name, account_id=account_id + ) + + +def create_image_code(image_uri: str) -> ImageCode: + """ + Creates an image code by inspecting the provided image + + :param image_uri: Image URI of the image to inspect + :return: Image code object + """ + code_sha256 = "" + if CONTAINER_CLIENT.has_docker(): + try: + CONTAINER_CLIENT.pull_image(docker_image=image_uri) + except ContainerException: + LOG.debug("Cannot pull image %s. Maybe only available locally?", image_uri) + try: + code_sha256 = CONTAINER_CLIENT.inspect_image(image_name=image_uri)["RepoDigests"][ + 0 + ].rpartition(":")[2] + except Exception as e: + LOG.debug( + "Cannot inspect image %s. Is this image and/or docker available: %s", image_uri, e + ) + else: + LOG.warning( + "Unable to get image hash for image %s - no docker socket available." + "Image hash returned by Lambda will not be correct.", + image_uri, + ) + return ImageCode(image_uri=image_uri, code_sha256=code_sha256, repository_type="ECR") diff --git a/localstack-core/localstack/services/lambda_/invocation/logs.py b/localstack-core/localstack/services/lambda_/invocation/logs.py new file mode 100644 index 0000000000000..2ff2ab35d951b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/logs.py @@ -0,0 +1,108 @@ +import dataclasses +import logging +import threading +import time +from queue import Queue +from typing import Optional, Union + +from localstack.aws.connect import connect_to +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.threads import FuncThread + +LOG = logging.getLogger(__name__) + + +class ShutdownPill: + pass + + +QUEUE_SHUTDOWN = ShutdownPill() + + +@dataclasses.dataclass(frozen=True) +class LogItem: + log_group: str + log_stream: str + logs: str + + +class LogHandler: + log_queue: "Queue[Union[LogItem, ShutdownPill]]" + role_arn: str + _thread: Optional[FuncThread] + _shutdown_event: threading.Event + + def __init__(self, role_arn: str, region: str) -> None: + self.role_arn = role_arn + self.region = region + self.log_queue = Queue() + self._shutdown_event = threading.Event() + self._thread = None + + def run_log_loop(self, *args, **kwargs) -> None: + logs_client = connect_to.with_assumed_role( + region_name=self.region, + role_arn=self.role_arn, + service_principal=ServicePrincipal.lambda_, + ).logs + while not self._shutdown_event.is_set(): + log_item = self.log_queue.get() + if log_item is QUEUE_SHUTDOWN: + return + # we need to split by newline - but keep the newlines in the strings + # strips empty lines, as they are not accepted by cloudwatch + logs = [line + "\n" for line in log_item.logs.split("\n") if line] + # until we have a better way to have timestamps, log events have the same time for a single invocation + log_events = [ + {"timestamp": int(time.time() * 1000), "message": log_line} for log_line in logs + ] + try: + try: + logs_client.put_log_events( + logGroupName=log_item.log_group, + logStreamName=log_item.log_stream, + logEvents=log_events, + ) + except logs_client.exceptions.ResourceNotFoundException: + # create new log group + try: + logs_client.create_log_group(logGroupName=log_item.log_group) + except logs_client.exceptions.ResourceAlreadyExistsException: + pass + logs_client.create_log_stream( + logGroupName=log_item.log_group, logStreamName=log_item.log_stream + ) + logs_client.put_log_events( + logGroupName=log_item.log_group, + logStreamName=log_item.log_stream, + logEvents=log_events, + ) + except Exception as e: + LOG.warning( + "Error saving logs to group %s in region %s: %s", + log_item.log_group, + self.region, + e, + ) + + def start_subscriber(self) -> None: + if not is_api_enabled("logs"): + LOG.debug("Service 'logs' is disabled, not storing any logs for lambda executions") + return + self._thread = FuncThread(self.run_log_loop, name="log_handler") + self._thread.start() + + def add_logs(self, log_item: LogItem) -> None: + if not is_api_enabled("logs"): + return + self.log_queue.put(log_item) + + def stop(self) -> None: + self._shutdown_event.set() + if self._thread: + self.log_queue.put(QUEUE_SHUTDOWN) + self._thread.join(timeout=2) + if self._thread.is_alive(): + LOG.error("Could not stop log subscriber in time") + self._thread = None diff --git a/localstack-core/localstack/services/lambda_/invocation/metrics.py b/localstack-core/localstack/services/lambda_/invocation/metrics.py new file mode 100644 index 0000000000000..b9fcefa89f44b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/metrics.py @@ -0,0 +1,38 @@ +import logging + +from localstack.utils.cloudwatch.cloudwatch_util import publish_lambda_metric + +LOG = logging.getLogger(__name__) + + +def record_cw_metric_invocation(function_name: str, account_id: str, region_name: str): + try: + publish_lambda_metric( + "Invocations", + 1, + {"func_name": function_name}, + region_name=region_name, + account_id=account_id, + ) + except Exception as e: + LOG.debug("Failed to send CloudWatch metric for Lambda invocation: %s", e) + + +def record_cw_metric_error(function_name: str, account_id: str, region_name: str): + try: + publish_lambda_metric( + "Invocations", + 1, + {"func_name": function_name}, + region_name=region_name, + account_id=account_id, + ) + publish_lambda_metric( + "Errors", + 1, + {"func_name": function_name}, + account_id=account_id, + region_name=region_name, + ) + except Exception as e: + LOG.debug("Failed to send CloudWatch metric for Lambda invocation error: %s", e) diff --git a/localstack-core/localstack/services/lambda_/invocation/models.py b/localstack-core/localstack/services/lambda_/invocation/models.py new file mode 100644 index 0000000000000..bc0eef5e7ebf0 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/models.py @@ -0,0 +1,24 @@ +from localstack.aws.api.lambda_ import EventSourceMappingConfiguration +from localstack.services.lambda_.invocation.lambda_models import CodeSigningConfig, Function, Layer +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute +from localstack.utils.tagging import TaggingService + + +class LambdaStore(BaseStore): + # maps function names to the respective Function + functions: dict[str, Function] = LocalAttribute(default=dict) + + # maps EventSourceMapping UUIDs to the respective EventSourceMapping + event_source_mappings: dict[str, EventSourceMappingConfiguration] = LocalAttribute(default=dict) + + # maps CodeSigningConfig ARNs to the respective CodeSigningConfig + code_signing_configs: dict[str, CodeSigningConfig] = LocalAttribute(default=dict) + + # maps layer names to Layers + layers: dict[str, Layer] = LocalAttribute(default=dict) + + # maps resource ARNs for EventSourceMappings and CodeSigningConfiguration to tags + TAGS = LocalAttribute(default=TaggingService) + + +lambda_stores = AccountRegionBundle("lambda", LambdaStore) diff --git a/localstack-core/localstack/services/lambda_/invocation/plugins.py b/localstack-core/localstack/services/lambda_/invocation/plugins.py new file mode 100644 index 0000000000000..0941b4118a957 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/plugins.py @@ -0,0 +1,16 @@ +from plux import Plugin + + +class RuntimeExecutorPlugin(Plugin): + namespace = "localstack.lambda.runtime_executor" + + +class DockerRuntimeExecutorPlugin(RuntimeExecutorPlugin): + name = "docker" + + def load(self, *args, **kwargs): + from localstack.services.lambda_.invocation.docker_runtime_executor import ( + DockerRuntimeExecutor, + ) + + return DockerRuntimeExecutor diff --git a/localstack/services/awslambda/invocation/runtime_executor.py b/localstack-core/localstack/services/lambda_/invocation/runtime_executor.py similarity index 75% rename from localstack/services/awslambda/invocation/runtime_executor.py rename to localstack-core/localstack/services/lambda_/invocation/runtime_executor.py index c2ea97be86c1b..93ed5cc600532 100644 --- a/localstack/services/awslambda/invocation/runtime_executor.py +++ b/localstack-core/localstack/services/lambda_/invocation/runtime_executor.py @@ -1,12 +1,14 @@ +import dataclasses import logging from abc import ABC, abstractmethod -from typing import Type +from pathlib import Path +from typing import Type, TypedDict -from plugin import PluginManager +from plux import PluginManager from localstack import config -from localstack.services.awslambda.invocation.lambda_models import FunctionVersion, ServiceEndpoint -from localstack.services.awslambda.invocation.plugins import RuntimeExecutorPlugin +from localstack.services.lambda_.invocation.lambda_models import FunctionVersion, InvocationResult +from localstack.services.lambda_.invocation.plugins import RuntimeExecutorPlugin LOG = logging.getLogger(__name__) @@ -16,14 +18,15 @@ class RuntimeExecutor(ABC): function_version: FunctionVersion def __init__( - self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint + self, + id: str, + function_version: FunctionVersion, ) -> None: """ Runtime executor class responsible for executing a runtime in specific environment :param id: ID string of the runtime executor :param function_version: Function version to be executed - :param service_endpoint: Service endpoint for execution related callbacks """ self.id = id self.function_version = function_version @@ -72,7 +75,7 @@ def get_runtime_endpoint(self) -> str: pass @abstractmethod - def invoke(self, payload: dict[str, str]) -> None: + def invoke(self, payload: dict[str, str]) -> InvocationResult: """ Send an invocation to the execution environment @@ -80,6 +83,11 @@ def invoke(self, payload: dict[str, str]) -> None: """ pass + @abstractmethod + def get_logs(self) -> str: + """Get all logs of a given execution environment""" + pass + @classmethod @abstractmethod def prepare_version(cls, function_version: FunctionVersion) -> None: @@ -101,12 +109,30 @@ def cleanup_version(cls, function_version: FunctionVersion): """ pass + @classmethod + def validate_environment(cls) -> bool: + """Validates the setup of the environment and provides an opportunity to log warnings. + Returns False if an invalid environment is detected and True otherwise.""" + return True + class LambdaRuntimeException(Exception): def __init__(self, message: str): super().__init__(message) +@dataclasses.dataclass +class LambdaPrebuildContext: + docker_file_content: str + context_path: Path + function_version: FunctionVersion + + +class ChmodPath(TypedDict): + path: str + mode: str + + EXECUTOR_PLUGIN_MANAGER: PluginManager[Type[RuntimeExecutor]] = PluginManager( RuntimeExecutorPlugin.namespace ) diff --git a/localstack-core/localstack/services/lambda_/invocation/version_manager.py b/localstack-core/localstack/services/lambda_/invocation/version_manager.py new file mode 100644 index 0000000000000..e53049dc82754 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/invocation/version_manager.py @@ -0,0 +1,274 @@ +import concurrent.futures +import logging +import threading +import time +from concurrent.futures import Future + +from localstack import config +from localstack.aws.api.lambda_ import ( + ProvisionedConcurrencyStatusEnum, + ServiceException, + State, + StateReasonCode, +) +from localstack.services.lambda_.invocation.assignment import AssignmentService +from localstack.services.lambda_.invocation.counting_service import CountingService +from localstack.services.lambda_.invocation.execution_environment import ExecutionEnvironment +from localstack.services.lambda_.invocation.executor_endpoint import StatusErrorException +from localstack.services.lambda_.invocation.lambda_models import ( + Function, + FunctionVersion, + Invocation, + InvocationResult, + ProvisionedConcurrencyState, + VersionState, +) +from localstack.services.lambda_.invocation.logs import LogHandler, LogItem +from localstack.services.lambda_.invocation.metrics import ( + record_cw_metric_error, + record_cw_metric_invocation, +) +from localstack.services.lambda_.invocation.runtime_executor import get_runtime_executor +from localstack.utils.strings import long_uid, truncate +from localstack.utils.threads import FuncThread, start_thread + +LOG = logging.getLogger(__name__) + + +class LambdaVersionManager: + # arn this Lambda Version manager manages + function_arn: str + function_version: FunctionVersion + function: Function + + # Scale provisioned concurrency up and down + provisioning_thread: FuncThread | None + # Additional guard to prevent scheduling invocation on version during shutdown + shutdown_event: threading.Event + + state: VersionState | None + provisioned_state: ProvisionedConcurrencyState | None # TODO: remove? + log_handler: LogHandler + counting_service: CountingService + assignment_service: AssignmentService + + def __init__( + self, + function_arn: str, + function_version: FunctionVersion, + # HACK allowing None for Lambda@Edge; only used in invoke for get_invocation_lease + function: Function | None, + counting_service: CountingService, + assignment_service: AssignmentService, + ): + self.id = long_uid() + self.function_arn = function_arn + self.function_version = function_version + self.function = function + self.counting_service = counting_service + self.assignment_service = assignment_service + self.log_handler = LogHandler(function_version.config.role, function_version.id.region) + + # async + self.provisioning_thread = None + self.shutdown_event = threading.Event() + + # async state + self.provisioned_state = None + self.provisioned_state_lock = threading.RLock() + # https://aws.amazon.com/blogs/compute/coming-soon-expansion-of-aws-lambda-states-to-all-functions/ + self.state = VersionState(state=State.Pending) + + def start(self) -> VersionState: + try: + self.log_handler.start_subscriber() + time_before = time.perf_counter() + get_runtime_executor().prepare_version(self.function_version) # TODO: make pluggable? + LOG.debug( + "Version preparation of function %s took %0.2fms", + self.function_version.qualified_arn, + (time.perf_counter() - time_before) * 1000, + ) + + # code and reason not set for success scenario because only failed states provide this field: + # https://docs.aws.amazon.com/lambda/latest/dg/API_GetFunctionConfiguration.html#SSS-GetFunctionConfiguration-response-LastUpdateStatusReasonCode + self.state = VersionState(state=State.Active) + LOG.debug( + "Changing Lambda %s (id %s) to active", + self.function_arn, + self.function_version.config.internal_revision, + ) + except Exception as e: + self.state = VersionState( + state=State.Failed, + code=StateReasonCode.InternalError, + reason=f"Error while creating lambda: {e}", + ) + LOG.debug( + "Changing Lambda %s (id %s) to failed. Reason: %s", + self.function_arn, + self.function_version.config.internal_revision, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + return self.state + + def stop(self) -> None: + LOG.debug("Stopping lambda version '%s'", self.function_arn) + self.state = VersionState( + state=State.Inactive, code=StateReasonCode.Idle, reason="Shutting down" + ) + self.shutdown_event.set() + self.log_handler.stop() + self.assignment_service.stop_environments_for_version(self.id) + get_runtime_executor().cleanup_version(self.function_version) # TODO: make pluggable? + + def update_provisioned_concurrency_config( + self, provisioned_concurrent_executions: int + ) -> Future[None]: + """ + TODO: implement update while in progress (see test_provisioned_concurrency test) + TODO: loop until diff == 0 and retry to remove/add diff environments + TODO: alias routing & allocated (i.e., the status while updating provisioned concurrency) + TODO: ProvisionedConcurrencyStatusEnum.FAILED + TODO: status reason + + :param provisioned_concurrent_executions: set to 0 to stop all provisioned environments + """ + with self.provisioned_state_lock: + # LocalStack limitation: cannot update provisioned concurrency while another update is in progress + if ( + self.provisioned_state + and self.provisioned_state.status == ProvisionedConcurrencyStatusEnum.IN_PROGRESS + ): + raise ServiceException( + "Updating provisioned concurrency configuration while IN_PROGRESS is not supported yet." + ) + + if not self.provisioned_state: + self.provisioned_state = ProvisionedConcurrencyState() + + def scale_environments(*args, **kwargs) -> None: + futures = self.assignment_service.scale_provisioned_concurrency( + self.id, self.function_version, provisioned_concurrent_executions + ) + + concurrent.futures.wait(futures) + + with self.provisioned_state_lock: + if provisioned_concurrent_executions == 0: + self.provisioned_state = None + else: + self.provisioned_state.available = provisioned_concurrent_executions + self.provisioned_state.allocated = provisioned_concurrent_executions + self.provisioned_state.status = ProvisionedConcurrencyStatusEnum.READY + + self.provisioning_thread = start_thread(scale_environments) + return self.provisioning_thread.result_future + + # Extract environment handling + + def invoke(self, *, invocation: Invocation) -> InvocationResult: + """ + synchronous invoke entrypoint + + 0. check counter, get lease + 1. try to get an inactive (no active invoke) environment + 2.(allgood) send invoke to environment + 3. wait for invocation result + 4. return invocation result & release lease + + 2.(nogood) fail fast fail hard + + """ + LOG.debug( + "Got an invocation for function %s with request_id %s", + self.function_arn, + invocation.request_id, + ) + if self.shutdown_event.is_set(): + message = f"Got an invocation with request_id {invocation.request_id} for a version shutting down" + LOG.warning(message) + raise ServiceException(message) + + with self.counting_service.get_invocation_lease( + self.function, self.function_version + ) as provisioning_type: + # TODO: potential race condition when changing provisioned concurrency after getting the lease but before + # getting an environment + try: + # Blocks and potentially creates a new execution environment for this invocation + with self.assignment_service.get_environment( + self.id, self.function_version, provisioning_type + ) as execution_env: + invocation_result = execution_env.invoke(invocation) + invocation_result.executed_version = self.function_version.id.qualifier + self.store_logs( + invocation_result=invocation_result, execution_env=execution_env + ) + except StatusErrorException as e: + invocation_result = InvocationResult( + request_id="", + payload=e.payload, + is_error=True, + logs="", + executed_version=self.function_version.id.qualifier, + ) + + function_id = self.function_version.id + # Record CloudWatch metrics in separate threads + # MAYBE reuse threads rather than starting new threads upon every invocation + if invocation_result.is_error: + start_thread( + lambda *args, **kwargs: record_cw_metric_error( + function_name=function_id.function_name, + account_id=function_id.account, + region_name=function_id.region, + ), + name=f"record-cloudwatch-metric-error-{function_id.function_name}:{function_id.qualifier}", + ) + else: + start_thread( + lambda *args, **kwargs: record_cw_metric_invocation( + function_name=function_id.function_name, + account_id=function_id.account, + region_name=function_id.region, + ), + name=f"record-cloudwatch-metric-{function_id.function_name}:{function_id.qualifier}", + ) + # TODO: consider using the same prefix logging as in error case for execution environment. + # possibly as separate named logger. + if invocation_result.logs is not None: + LOG.debug("Got logs for invocation '%s'", invocation.request_id) + for log_line in invocation_result.logs.splitlines(): + LOG.debug( + "[%s-%s] %s", + function_id.function_name, + invocation.request_id, + truncate(log_line, config.LAMBDA_TRUNCATE_STDOUT), + ) + else: + LOG.warning( + "[%s] Error while printing logs for function '%s': Received no logs from environment.", + invocation.request_id, + function_id.function_name, + ) + return invocation_result + + def store_logs( + self, invocation_result: InvocationResult, execution_env: ExecutionEnvironment + ) -> None: + if invocation_result.logs: + log_item = LogItem( + execution_env.get_log_group_name(), + execution_env.get_log_stream_name(), + invocation_result.logs, + ) + self.log_handler.add_logs(log_item) + else: + LOG.warning( + "Received no logs from invocation with id %s for lambda %s. Execution environment logs: \n%s", + invocation_result.request_id, + self.function_arn, + execution_env.get_prefixed_logs(), + ) diff --git a/localstack-core/localstack/services/lambda_/lambda_utils.py b/localstack-core/localstack/services/lambda_/lambda_utils.py new file mode 100644 index 0000000000000..e66eab9812e58 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/lambda_utils.py @@ -0,0 +1,46 @@ +"""Lambda utilities for behavior and implicit functionality. +Everything related to API operations goes into `api_utils.py`. +""" + +import logging +import os + +from localstack.aws.api.lambda_ import Runtime + +# Custom logger for proactive advice +HINT_LOG = logging.getLogger("localstack.services.lambda_.hints") + + +def get_handler_file_from_name(handler_name: str, runtime: str = None): + # Previously used DEFAULT_LAMBDA_RUNTIME here but that is only relevant for testing and this helper is still used in + # a CloudFormation model in localstack.services.cloudformation.models.lambda_.LambdaFunction.get_lambda_code_param + runtime = runtime or Runtime.python3_12 + + # TODO: consider using localstack/testing/aws/lambda_utils.py:RUNTIMES_AGGREGATED for testing or moving the constant + # RUNTIMES_AGGREGATED to LocalStack core if this helper remains relevant within CloudFormation. + if runtime.startswith(Runtime.provided): + return "bootstrap" + if runtime.startswith("nodejs"): + return format_name_to_path(handler_name, ".", ".js") + if runtime.startswith(Runtime.go1_x): + return handler_name + if runtime.startswith("dotnet"): + return format_name_to_path(handler_name, ":", ".dll") + if runtime.startswith("ruby"): + return format_name_to_path(handler_name, ".", ".rb") + + return format_name_to_path(handler_name, ".", ".py") + + +def format_name_to_path(handler_name: str, delimiter: str, extension: str): + file_path = handler_name.rpartition(delimiter)[0] + if delimiter == ":": + file_path = file_path.split(delimiter)[0] + + if os.path.sep not in file_path: + file_path = file_path.replace(".", os.path.sep) + + if file_path.startswith(f".{os.path.sep}"): + file_path = file_path[2:] + + return f"{file_path}{extension}" diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/__init__.py b/localstack-core/localstack/services/lambda_/layerfetcher/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_execution/__init__.py rename to localstack-core/localstack/services/lambda_/layerfetcher/__init__.py diff --git a/localstack/services/awslambda/layerfetcher/layer_fetcher.py b/localstack-core/localstack/services/lambda_/layerfetcher/layer_fetcher.py similarity index 87% rename from localstack/services/awslambda/layerfetcher/layer_fetcher.py rename to localstack-core/localstack/services/lambda_/layerfetcher/layer_fetcher.py index ad38c471df88b..4b4c67da860e7 100644 --- a/localstack/services/awslambda/layerfetcher/layer_fetcher.py +++ b/localstack-core/localstack/services/lambda_/layerfetcher/layer_fetcher.py @@ -1,6 +1,6 @@ from abc import abstractmethod -from localstack.services.awslambda.invocation.lambda_models import Layer +from localstack.services.lambda_.invocation.lambda_models import Layer class LayerFetcher: diff --git a/localstack-core/localstack/services/lambda_/networking.py b/localstack-core/localstack/services/lambda_/networking.py new file mode 100644 index 0000000000000..0f47926d79475 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/networking.py @@ -0,0 +1,29 @@ +from localstack import config +from localstack.utils.container_networking import ( + get_endpoint_for_network, + get_main_container_network, +) + +# IP address of main Docker container (lazily initialized) +DOCKER_MAIN_CONTAINER_IP = None +LAMBDA_CONTAINER_NETWORK = None + + +def get_main_endpoint_from_container() -> str: + if config.HOSTNAME_FROM_LAMBDA: + return config.HOSTNAME_FROM_LAMBDA + return get_endpoint_for_network(network=get_main_container_network_for_lambda()) + + +def get_main_container_network_for_lambda() -> str: + global LAMBDA_CONTAINER_NETWORK + if config.LAMBDA_DOCKER_NETWORK: + return config.LAMBDA_DOCKER_NETWORK.split(",")[0] + return get_main_container_network() + + +def get_all_container_networks_for_lambda() -> list[str]: + global LAMBDA_CONTAINER_NETWORK + if config.LAMBDA_DOCKER_NETWORK: + return config.LAMBDA_DOCKER_NETWORK.split(",") + return [get_main_container_network()] diff --git a/localstack-core/localstack/services/lambda_/packages.py b/localstack-core/localstack/services/lambda_/packages.py new file mode 100644 index 0000000000000..fd549c1c7ad34 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/packages.py @@ -0,0 +1,100 @@ +"""Package installers for external Lambda dependencies.""" + +import os +import stat +from functools import cache +from pathlib import Path +from typing import List + +from localstack import config +from localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller +from localstack.utils.platform import get_arch + +"""Customized LocalStack version of the AWS Lambda Runtime Interface Emulator (RIE). +https://github.com/localstack/lambda-runtime-init/blob/localstack/README-LOCALSTACK.md +""" +LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.33-pre" +LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION +LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}" + +"""Unmaintained Java utilities and JUnit integration for LocalStack released to Maven Central. +https://github.com/localstack/localstack-java-utils +We recommend the Testcontainers LocalStack Java module as an alternative: +https://java.testcontainers.org/modules/localstack/ +""" +LOCALSTACK_MAVEN_VERSION = "0.2.21" +MAVEN_REPO_URL = "https://repo1.maven.org/maven2" +URL_LOCALSTACK_FAT_JAR = ( + "{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar" +) + + +class LambdaRuntimePackage(Package): + """Golang binary containing the lambda-runtime-init.""" + + def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION): + super().__init__(name="Lambda", default_version=default_version) + + def get_versions(self) -> List[str]: + return [LAMBDA_RUNTIME_VERSION] + + def _get_installer(self, version: str) -> PackageInstaller: + return LambdaRuntimePackageInstaller(name="lambda-runtime", version=version) + + +class LambdaRuntimePackageInstaller(DownloadInstaller): + """Installer for the lambda-runtime-init Golang binary.""" + + # TODO: Architecture should ideally be configurable in the installer for proper cross-architecture support. + # We currently hope the native binary works within emulated containers. + def _get_arch(self): + arch = get_arch() + return "x86_64" if arch == "amd64" else arch + + def _get_download_url(self) -> str: + arch = self._get_arch() + return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch) + + def _get_install_dir(self, target: InstallTarget) -> str: + install_dir = super()._get_install_dir(target) + arch = self._get_arch() + return os.path.join(install_dir, arch) + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "var", "rapid", "init") + + def _install(self, target: InstallTarget) -> None: + super()._install(target) + install_location = self.get_executable_path() + st = os.stat(install_location) + os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + + +# TODO: replace usage in LocalStack tests with locally built Java jar and remove this unmaintained dependency. +class LambdaJavaPackage(Package): + def __init__(self): + super().__init__("LambdaJavaLibs", "0.2.22") + + def get_versions(self) -> List[str]: + return ["0.2.22", "0.2.21"] + + def _get_installer(self, version: str) -> PackageInstaller: + return LambdaJavaPackageInstaller("lambda-java-libs", version) + + +class LambdaJavaPackageInstaller(DownloadInstaller): + def _get_download_url(self) -> str: + return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL) + + +lambda_runtime_package = LambdaRuntimePackage() +lambda_java_libs_package = LambdaJavaPackage() + + +# TODO: handle architecture-specific installer and caching because we currently assume that the lambda-runtime-init +# Golang binary is cross-architecture compatible. +@cache +def get_runtime_client_path() -> Path: + installer = lambda_runtime_package.get_installer() + installer.install() + return Path(installer.get_installed_dir()) diff --git a/localstack-core/localstack/services/lambda_/plugins.py b/localstack-core/localstack/services/lambda_/plugins.py new file mode 100644 index 0000000000000..646dc170fb9b8 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/plugins.py @@ -0,0 +1,46 @@ +import logging + +from werkzeug.routing import Rule + +from localstack.config import LAMBDA_DOCKER_NETWORK +from localstack.packages import Package, package +from localstack.runtime import hooks +from localstack.services.edge import ROUTER +from localstack.services.lambda_.custom_endpoints import LambdaCustomEndpoints + +LOG = logging.getLogger(__name__) + +CUSTOM_ROUTER_RULES: list[Rule] = [] + + +@package(name="lambda-runtime") +def lambda_runtime_package() -> Package: + from localstack.services.lambda_.packages import lambda_runtime_package + + return lambda_runtime_package + + +@package(name="lambda-java-libs") +def lambda_java_libs() -> Package: + from localstack.services.lambda_.packages import lambda_java_libs_package + + return lambda_java_libs_package + + +@hooks.on_infra_start() +def validate_configuration() -> None: + if LAMBDA_DOCKER_NETWORK == "host": + LOG.warning( + "The configuration LAMBDA_DOCKER_NETWORK=host is currently not supported with the new lambda provider." + ) + + +@hooks.on_infra_start() +def register_custom_endpoints() -> None: + global CUSTOM_ROUTER_RULES + CUSTOM_ROUTER_RULES = ROUTER.add(LambdaCustomEndpoints()) + + +@hooks.on_infra_shutdown() +def remove_custom_endpoints() -> None: + ROUTER.remove(CUSTOM_ROUTER_RULES) diff --git a/localstack-core/localstack/services/lambda_/provider.py b/localstack-core/localstack/services/lambda_/provider.py new file mode 100644 index 0000000000000..516b931723293 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/provider.py @@ -0,0 +1,4297 @@ +import base64 +import dataclasses +import datetime +import itertools +import json +import logging +import re +import threading +import time +from typing import IO, Any, Optional, Tuple + +from botocore.exceptions import ClientError + +from localstack import config +from localstack.aws.api import RequestContext, ServiceException, handler +from localstack.aws.api.lambda_ import ( + AccountLimit, + AccountUsage, + AddLayerVersionPermissionResponse, + AddPermissionRequest, + AddPermissionResponse, + Alias, + AliasConfiguration, + AliasRoutingConfiguration, + AllowedPublishers, + Architecture, + Arn, + Blob, + BlobStream, + CodeSigningConfigArn, + CodeSigningConfigNotFoundException, + CodeSigningPolicies, + CompatibleArchitectures, + CompatibleRuntimes, + Concurrency, + Cors, + CreateCodeSigningConfigResponse, + CreateEventSourceMappingRequest, + CreateFunctionRequest, + CreateFunctionUrlConfigResponse, + DeleteCodeSigningConfigResponse, + Description, + DestinationConfig, + EventSourceMappingConfiguration, + FunctionCodeLocation, + FunctionConfiguration, + FunctionEventInvokeConfig, + FunctionName, + FunctionUrlAuthType, + FunctionUrlQualifier, + GetAccountSettingsResponse, + GetCodeSigningConfigResponse, + GetFunctionCodeSigningConfigResponse, + GetFunctionConcurrencyResponse, + GetFunctionRecursionConfigResponse, + GetFunctionResponse, + GetFunctionUrlConfigResponse, + GetLayerVersionPolicyResponse, + GetLayerVersionResponse, + GetPolicyResponse, + GetProvisionedConcurrencyConfigResponse, + InvalidParameterValueException, + InvocationResponse, + InvocationType, + InvokeAsyncResponse, + InvokeMode, + LambdaApi, + LastUpdateStatus, + LayerName, + LayerPermissionAllowedAction, + LayerPermissionAllowedPrincipal, + LayersListItem, + LayerVersionArn, + LayerVersionContentInput, + LayerVersionNumber, + LicenseInfo, + ListAliasesResponse, + ListCodeSigningConfigsResponse, + ListEventSourceMappingsResponse, + ListFunctionEventInvokeConfigsResponse, + ListFunctionsByCodeSigningConfigResponse, + ListFunctionsResponse, + ListFunctionUrlConfigsResponse, + ListLayersResponse, + ListLayerVersionsResponse, + ListProvisionedConcurrencyConfigsResponse, + ListTagsResponse, + ListVersionsByFunctionResponse, + LogFormat, + LoggingConfig, + LogType, + MasterRegion, + MaxFunctionEventInvokeConfigListItems, + MaximumEventAgeInSeconds, + MaximumRetryAttempts, + MaxItems, + MaxLayerListItems, + MaxListItems, + MaxProvisionedConcurrencyConfigListItems, + NamespacedFunctionName, + NamespacedStatementId, + OnFailure, + OnSuccess, + OrganizationId, + PackageType, + PositiveInteger, + PreconditionFailedException, + ProvisionedConcurrencyConfigListItem, + ProvisionedConcurrencyConfigNotFoundException, + ProvisionedConcurrencyStatusEnum, + PublishLayerVersionResponse, + PutFunctionCodeSigningConfigResponse, + PutFunctionRecursionConfigResponse, + PutProvisionedConcurrencyConfigResponse, + Qualifier, + RecursiveLoop, + ReservedConcurrentExecutions, + ResourceConflictException, + ResourceNotFoundException, + Runtime, + RuntimeVersionConfig, + SnapStart, + SnapStartApplyOn, + SnapStartOptimizationStatus, + SnapStartResponse, + State, + StatementId, + StateReasonCode, + String, + TaggableResource, + TagKeyList, + Tags, + TracingMode, + UnqualifiedFunctionName, + UpdateCodeSigningConfigResponse, + UpdateEventSourceMappingRequest, + UpdateFunctionCodeRequest, + UpdateFunctionConfigurationRequest, + UpdateFunctionUrlConfigResponse, + Version, +) +from localstack.aws.api.lambda_ import FunctionVersion as FunctionVersionApi +from localstack.aws.api.lambda_ import ServiceException as LambdaServiceException +from localstack.aws.api.pipes import ( + DynamoDBStreamStartPosition, + KinesisStreamStartPosition, +) +from localstack.aws.connect import connect_to +from localstack.aws.spec import load_service +from localstack.services.edge import ROUTER +from localstack.services.lambda_ import api_utils +from localstack.services.lambda_ import hooks as lambda_hooks +from localstack.services.lambda_.analytics import ( + FunctionOperation, + FunctionStatus, + function_counter, +) +from localstack.services.lambda_.api_utils import ( + ARCHITECTURES, + STATEMENT_ID_REGEX, + SUBNET_ID_REGEX, + function_locators_from_arn, +) +from localstack.services.lambda_.event_source_mapping.esm_config_factory import ( + EsmConfigFactory, +) +from localstack.services.lambda_.event_source_mapping.esm_worker import ( + EsmState, + EsmWorker, +) +from localstack.services.lambda_.event_source_mapping.esm_worker_factory import ( + EsmWorkerFactory, +) +from localstack.services.lambda_.event_source_mapping.pipe_utils import get_internal_client +from localstack.services.lambda_.invocation import AccessDeniedException +from localstack.services.lambda_.invocation.execution_environment import ( + EnvironmentStartupTimeoutException, +) +from localstack.services.lambda_.invocation.lambda_models import ( + AliasRoutingConfig, + CodeSigningConfig, + EventInvokeConfig, + Function, + FunctionResourcePolicy, + FunctionUrlConfig, + FunctionVersion, + ImageConfig, + LambdaEphemeralStorage, + Layer, + LayerPolicy, + LayerPolicyStatement, + LayerVersion, + ProvisionedConcurrencyConfiguration, + RequestEntityTooLargeException, + ResourcePolicy, + UpdateStatus, + ValidationException, + VersionAlias, + VersionFunctionConfiguration, + VersionIdentifier, + VersionState, + VpcConfig, +) +from localstack.services.lambda_.invocation.lambda_service import ( + LambdaService, + create_image_code, + destroy_code_if_not_used, + lambda_stores, + store_lambda_archive, + store_s3_bucket_archive, +) +from localstack.services.lambda_.invocation.models import LambdaStore +from localstack.services.lambda_.invocation.runtime_executor import get_runtime_executor +from localstack.services.lambda_.lambda_utils import HINT_LOG +from localstack.services.lambda_.layerfetcher.layer_fetcher import LayerFetcher +from localstack.services.lambda_.provider_utils import ( + LambdaLayerVersionIdentifier, + get_function_version, + get_function_version_from_arn, +) +from localstack.services.lambda_.runtimes import ( + ALL_RUNTIMES, + DEPRECATED_RUNTIMES, + DEPRECATED_RUNTIMES_UPGRADES, + RUNTIMES_AGGREGATED, + SNAP_START_SUPPORTED_RUNTIMES, + VALID_RUNTIMES, +) +from localstack.services.lambda_.urlrouter import FunctionUrlRouter +from localstack.services.plugins import ServiceLifecycleHook +from localstack.state import StateVisitor +from localstack.utils.aws.arns import ( + ArnData, + extract_resource_from_arn, + extract_service_from_arn, + get_partition, + lambda_event_source_mapping_arn, + parse_arn, +) +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.collections import PaginatedList +from localstack.utils.event_matcher import validate_event_pattern +from localstack.utils.lambda_debug_mode.lambda_debug_mode_session import LambdaDebugModeSession +from localstack.utils.strings import get_random_hex, short_uid, to_bytes, to_str +from localstack.utils.sync import poll_condition +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +LAMBDA_DEFAULT_TIMEOUT = 3 +LAMBDA_DEFAULT_MEMORY_SIZE = 128 + +LAMBDA_TAG_LIMIT_PER_RESOURCE = 50 +LAMBDA_LAYERS_LIMIT_PER_FUNCTION = 5 + +TAG_KEY_CUSTOM_URL = "_custom_id_" +# Requirements (from RFC3986 & co): not longer than 63, first char must be +# alpha, then alphanumeric or hyphen, except cannot start or end with hyphen +TAG_KEY_CUSTOM_URL_VALIDATOR = re.compile(r"^[A-Za-z]([A-Za-z0-9\-]{0,61}[A-Za-z0-9])?$") + + +class LambdaProvider(LambdaApi, ServiceLifecycleHook): + lambda_service: LambdaService + create_fn_lock: threading.RLock + create_layer_lock: threading.RLock + router: FunctionUrlRouter + esm_workers: dict[str, EsmWorker] + layer_fetcher: LayerFetcher | None + + def __init__(self) -> None: + self.lambda_service = LambdaService() + self.create_fn_lock = threading.RLock() + self.create_layer_lock = threading.RLock() + self.router = FunctionUrlRouter(ROUTER, self.lambda_service) + self.esm_workers = {} + self.layer_fetcher = None + lambda_hooks.inject_layer_fetcher.run(self) + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(lambda_stores) + + def on_before_start(self): + # Attempt to start the Lambda Debug Mode session object. + try: + lambda_debug_mode_session = LambdaDebugModeSession.get() + lambda_debug_mode_session.ensure_running() + except Exception as ex: + LOG.error( + "Unexpected error encountered when attempting to initialise Lambda Debug Mode '%s'.", + ex, + ) + + def on_before_state_reset(self): + self.lambda_service.stop() + + def on_after_state_reset(self): + self.router.lambda_service = self.lambda_service = LambdaService() + + def on_before_state_load(self): + self.lambda_service.stop() + + def on_after_state_load(self): + self.lambda_service = LambdaService() + self.router.lambda_service = self.lambda_service + + for account_id, account_bundle in lambda_stores.items(): + for region_name, state in account_bundle.items(): + for fn in state.functions.values(): + for fn_version in fn.versions.values(): + # restore the "Pending" state for every function version and start it + try: + new_state = VersionState( + state=State.Pending, + code=StateReasonCode.Creating, + reason="The function is being created.", + ) + new_config = dataclasses.replace(fn_version.config, state=new_state) + new_version = dataclasses.replace(fn_version, config=new_config) + fn.versions[fn_version.id.qualifier] = new_version + self.lambda_service.create_function_version(fn_version).result( + timeout=5 + ) + except Exception: + LOG.warning( + "Failed to restore function version %s", + fn_version.id.qualified_arn(), + exc_info=True, + ) + # restore provisioned concurrency per function considering both versions and aliases + for ( + provisioned_qualifier, + provisioned_config, + ) in fn.provisioned_concurrency_configs.items(): + fn_arn = None + try: + if api_utils.qualifier_is_alias(provisioned_qualifier): + alias = fn.aliases.get(provisioned_qualifier) + resolved_version = fn.versions.get(alias.function_version) + fn_arn = resolved_version.id.qualified_arn() + elif api_utils.qualifier_is_version(provisioned_qualifier): + fn_version = fn.versions.get(provisioned_qualifier) + fn_arn = fn_version.id.qualified_arn() + else: + raise InvalidParameterValueException( + "Invalid qualifier type:" + " Qualifier can only be an alias or a version for provisioned concurrency." + ) + + manager = self.lambda_service.get_lambda_version_manager(fn_arn) + manager.update_provisioned_concurrency_config( + provisioned_config.provisioned_concurrent_executions + ) + except Exception: + LOG.warning( + "Failed to restore provisioned concurrency %s for function %s", + provisioned_config, + fn_arn, + exc_info=True, + ) + + for esm in state.event_source_mappings.values(): + # Restores event source workers + function_arn = esm.get("FunctionArn") + + # TODO: How do we know the event source is up? + # A basic poll to see if the mapped Lambda function is active/failed + if not poll_condition( + lambda: get_function_version_from_arn(function_arn).config.state.state + in [State.Active, State.Failed], + timeout=10, + ): + LOG.warning( + "Creating ESM for Lambda that is not in running state: %s", + function_arn, + ) + + function_version = get_function_version_from_arn(function_arn) + function_role = function_version.config.role + + is_esm_enabled = esm.get("State", EsmState.DISABLED) not in ( + EsmState.DISABLED, + EsmState.DISABLING, + ) + esm_worker = EsmWorkerFactory( + esm, function_role, is_esm_enabled + ).get_esm_worker() + + # Note: a worker is created in the DISABLED state if not enabled + esm_worker.create() + # TODO: assigning the esm_worker to the dict only works after .create(). Could it cause a race + # condition if we get a shutdown here and have a worker thread spawned but not accounted for? + self.esm_workers[esm_worker.uuid] = esm_worker + + def on_after_init(self): + self.router.register_routes() + get_runtime_executor().validate_environment() + + def on_before_stop(self) -> None: + for esm_worker in self.esm_workers.values(): + esm_worker.stop_for_shutdown() + + # TODO: should probably unregister routes? + self.lambda_service.stop() + # Attempt to signal to the Lambda Debug Mode session object to stop. + try: + lambda_debug_mode_session = LambdaDebugModeSession.get() + lambda_debug_mode_session.signal_stop() + except Exception as ex: + LOG.error( + "Unexpected error encountered when attempting to signal Lambda Debug Mode to stop '%s'.", + ex, + ) + + @staticmethod + def _get_function(function_name: str, account_id: str, region: str) -> Function: + state = lambda_stores[account_id][region] + function = state.functions.get(function_name) + if not function: + arn = api_utils.unqualified_lambda_arn( + function_name=function_name, + account=account_id, + region=region, + ) + raise ResourceNotFoundException( + f"Function not found: {arn}", + Type="User", + ) + return function + + @staticmethod + def _get_esm(uuid: str, account_id: str, region: str) -> EventSourceMappingConfiguration: + state = lambda_stores[account_id][region] + esm = state.event_source_mappings.get(uuid) + if not esm: + arn = lambda_event_source_mapping_arn(uuid, account_id, region) + raise ResourceNotFoundException( + f"Event source mapping not found: {arn}", + Type="User", + ) + return esm + + @staticmethod + def _validate_qualifier_expression(qualifier: str) -> None: + if error_messages := api_utils.validate_qualifier(qualifier): + raise ValidationException( + message=api_utils.construct_validation_exception_message(error_messages) + ) + + @staticmethod + def _resolve_fn_qualifier(resolved_fn: Function, qualifier: str | None) -> tuple[str, str]: + """Attempts to resolve a given qualifier and returns a qualifier that exists or + raises an appropriate ResourceNotFoundException. + + :param resolved_fn: The resolved lambda function + :param qualifier: The qualifier to be resolved or None + :return: Tuple of (resolved qualifier, function arn either qualified or unqualified)""" + function_name = resolved_fn.function_name + # assuming function versions need to live in the same account and region + account_id = resolved_fn.latest().id.account + region = resolved_fn.latest().id.region + fn_arn = api_utils.unqualified_lambda_arn(function_name, account_id, region) + if qualifier is not None: + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + if api_utils.qualifier_is_alias(qualifier): + if qualifier not in resolved_fn.aliases: + raise ResourceNotFoundException(f"Cannot find alias arn: {fn_arn}", Type="User") + elif api_utils.qualifier_is_version(qualifier) or qualifier == "$LATEST": + if qualifier not in resolved_fn.versions: + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + else: + # matches qualifier pattern but invalid alias or version + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + resolved_qualifier = qualifier or "$LATEST" + return resolved_qualifier, fn_arn + + @staticmethod + def _function_revision_id(resolved_fn: Function, resolved_qualifier: str) -> str: + if api_utils.qualifier_is_alias(resolved_qualifier): + return resolved_fn.aliases[resolved_qualifier].revision_id + # Assumes that a non-alias is a version + else: + return resolved_fn.versions[resolved_qualifier].config.revision_id + + def _resolve_vpc_id(self, account_id: str, region_name: str, subnet_id: str) -> str: + ec2_client = connect_to(aws_access_key_id=account_id, region_name=region_name).ec2 + try: + return ec2_client.describe_subnets(SubnetIds=[subnet_id])["Subnets"][0]["VpcId"] + except ec2_client.exceptions.ClientError as e: + code = e.response["Error"]["Code"] + message = e.response["Error"]["Message"] + raise InvalidParameterValueException( + f"Error occurred while DescribeSubnets. EC2 Error Code: {code}. EC2 Error Message: {message}", + Type="User", + ) + + def _build_vpc_config( + self, + account_id: str, + region_name: str, + vpc_config: Optional[dict] = None, + ) -> VpcConfig | None: + if not vpc_config or not is_api_enabled("ec2"): + return None + + subnet_ids = vpc_config.get("SubnetIds", []) + if subnet_ids is not None and len(subnet_ids) == 0: + return VpcConfig(vpc_id="", security_group_ids=[], subnet_ids=[]) + + subnet_id = subnet_ids[0] + if not bool(SUBNET_ID_REGEX.match(subnet_id)): + raise ValidationException( + f"1 validation error detected: Value '[{subnet_id}]' at 'vpcConfig.subnetIds' failed to satisfy constraint: Member must satisfy constraint: [Member must have length less than or equal to 1024, Member must have length greater than or equal to 0, Member must satisfy regular expression pattern: ^subnet-[0-9a-z]*$]" + ) + + return VpcConfig( + vpc_id=self._resolve_vpc_id(account_id, region_name, subnet_id), + security_group_ids=vpc_config.get("SecurityGroupIds", []), + subnet_ids=subnet_ids, + ) + + def _create_version_model( + self, + function_name: str, + region: str, + account_id: str, + description: str | None = None, + revision_id: str | None = None, + code_sha256: str | None = None, + ) -> tuple[FunctionVersion, bool]: + """ + Release a new version to the model if all restrictions are met. + Restrictions: + - CodeSha256, if provided, must equal the current latest version code hash + - RevisionId, if provided, must equal the current latest version revision id + - Some changes have been done to the latest version since last publish + Will return a tuple of the version, and whether the version was published (True) or the latest available version was taken (False). + This can happen if the latest version has not been changed since the last version publish, in this case the last version will be returned. + + :param function_name: Function name to be published + :param region: Region of the function + :param account_id: Account of the function + :param description: new description of the version (will be the description of the function if missing) + :param revision_id: Revision id, function will raise error if it does not match latest revision id + :param code_sha256: Code sha256, function will raise error if it does not match latest code hash + :return: Tuple of (published version, whether version was released or last released version returned, since nothing changed) + """ + current_latest_version = get_function_version( + function_name=function_name, qualifier="$LATEST", account_id=account_id, region=region + ) + if revision_id and current_latest_version.config.revision_id != revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + + # check if code hashes match if they are specified + current_hash = ( + current_latest_version.config.code.code_sha256 + if current_latest_version.config.package_type == PackageType.Zip + else current_latest_version.config.image.code_sha256 + ) + # if the code is a zip package and hot reloaded (hot reloading is currently only supported for zip packagetypes) + # we cannot enforce the codesha256 check + is_hot_reloaded_zip_package = ( + current_latest_version.config.package_type == PackageType.Zip + and current_latest_version.config.code.is_hot_reloading() + ) + if code_sha256 and current_hash != code_sha256 and not is_hot_reloaded_zip_package: + raise InvalidParameterValueException( + f"CodeSHA256 ({code_sha256}) is different from current CodeSHA256 in $LATEST ({current_hash}). Please try again with the CodeSHA256 in $LATEST.", + Type="User", + ) + + state = lambda_stores[account_id][region] + function = state.functions.get(function_name) + changes = {} + if description is not None: + changes["description"] = description + # TODO copy environment instead of restarting one, get rid of all the "Pending"s + + with function.lock: + if function.next_version > 1 and ( + prev_version := function.versions.get(str(function.next_version - 1)) + ): + if ( + prev_version.config.internal_revision + == current_latest_version.config.internal_revision + ): + return prev_version, False + # TODO check if there was a change since last version + next_version = str(function.next_version) + function.next_version += 1 + new_id = VersionIdentifier( + function_name=function_name, + qualifier=next_version, + region=region, + account=account_id, + ) + apply_on = current_latest_version.config.snap_start["ApplyOn"] + optimization_status = SnapStartOptimizationStatus.Off + if apply_on == SnapStartApplyOn.PublishedVersions: + optimization_status = SnapStartOptimizationStatus.On + snap_start = SnapStartResponse( + ApplyOn=apply_on, + OptimizationStatus=optimization_status, + ) + new_version = dataclasses.replace( + current_latest_version, + config=dataclasses.replace( + current_latest_version.config, + last_update=None, # versions never have a last update status + state=VersionState( + state=State.Pending, + code=StateReasonCode.Creating, + reason="The function is being created.", + ), + snap_start=snap_start, + **changes, + ), + id=new_id, + ) + function.versions[next_version] = new_version + return new_version, True + + def _publish_version_from_existing_version( + self, + function_name: str, + region: str, + account_id: str, + description: str | None = None, + revision_id: str | None = None, + code_sha256: str | None = None, + ) -> FunctionVersion: + """ + Publish version from an existing, already initialized LATEST + + :param function_name: Function name + :param region: region + :param account_id: account id + :param description: description + :param revision_id: revision id (check if current version matches) + :param code_sha256: code sha (check if current code matches) + :return: new version + """ + new_version, changed = self._create_version_model( + function_name=function_name, + region=region, + account_id=account_id, + description=description, + revision_id=revision_id, + code_sha256=code_sha256, + ) + if not changed: + return new_version + self.lambda_service.publish_version(new_version) + state = lambda_stores[account_id][region] + function = state.functions.get(function_name) + # TODO: re-evaluate data model to prevent this dirty hack just for bumping the revision id + latest_version = function.versions["$LATEST"] + function.versions["$LATEST"] = dataclasses.replace( + latest_version, config=dataclasses.replace(latest_version.config) + ) + return function.versions.get(new_version.id.qualifier) + + def _publish_version_with_changes( + self, + function_name: str, + region: str, + account_id: str, + description: str | None = None, + revision_id: str | None = None, + code_sha256: str | None = None, + ) -> FunctionVersion: + """ + Publish version together with a new latest version (publish on create / update) + + :param function_name: Function name + :param region: region + :param account_id: account id + :param description: description + :param revision_id: revision id (check if current version matches) + :param code_sha256: code sha (check if current code matches) + :return: new version + """ + new_version, changed = self._create_version_model( + function_name=function_name, + region=region, + account_id=account_id, + description=description, + revision_id=revision_id, + code_sha256=code_sha256, + ) + if not changed: + return new_version + self.lambda_service.create_function_version(new_version) + return new_version + + @staticmethod + def _verify_env_variables(env_vars: dict[str, str]): + dumped_env_vars = json.dumps(env_vars, separators=(",", ":")) + if ( + len(dumped_env_vars.encode("utf-8")) + > config.LAMBDA_LIMITS_MAX_FUNCTION_ENVVAR_SIZE_BYTES + ): + raise InvalidParameterValueException( + f"Lambda was unable to configure your environment variables because the environment variables you have provided exceeded the 4KB limit. String measured: {dumped_env_vars}", + Type="User", + ) + + @staticmethod + def _validate_snapstart(snap_start: SnapStart, runtime: Runtime): + apply_on = snap_start.get("ApplyOn") + if apply_on not in [ + SnapStartApplyOn.PublishedVersions, + SnapStartApplyOn.None_, + ]: + raise ValidationException( + f"1 validation error detected: Value '{apply_on}' at 'snapStart.applyOn' failed to satisfy constraint: Member must satisfy enum value set: [PublishedVersions, None]" + ) + + if runtime not in SNAP_START_SUPPORTED_RUNTIMES: + raise InvalidParameterValueException( + f"{runtime} is not supported for SnapStart enabled functions.", Type="User" + ) + + def _validate_layers(self, new_layers: list[str], region: str, account_id: str): + if len(new_layers) > LAMBDA_LAYERS_LIMIT_PER_FUNCTION: + raise InvalidParameterValueException( + "Cannot reference more than 5 layers.", Type="User" + ) + + visited_layers = dict() + for layer_version_arn in new_layers: + ( + layer_region, + layer_account_id, + layer_name, + layer_version_str, + ) = api_utils.parse_layer_arn(layer_version_arn) + if layer_version_str is None: + raise ValidationException( + f"1 validation error detected: Value '[{layer_version_arn}]'" + + r" at 'layers' failed to satisfy constraint: Member must satisfy constraint: [Member must have length less than or equal to 140, Member must have length greater than or equal to 1, Member must satisfy regular expression pattern: (arn:[a-zA-Z0-9-]+:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1}:\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+)|(arn:[a-zA-Z0-9-]+:lambda:::awslayer:[a-zA-Z0-9-_]+), Member must not be null]", + ) + + state = lambda_stores[layer_account_id][layer_region] + layer = state.layers.get(layer_name) + layer_version = None + if layer is not None: + layer_version = layer.layer_versions.get(layer_version_str) + if layer_account_id == account_id: + if region and layer_region != region: + raise InvalidParameterValueException( + f"Layers are not in the same region as the function. " + f"Layers are expected to be in region {region}.", + Type="User", + ) + if layer is None or layer.layer_versions.get(layer_version_str) is None: + raise InvalidParameterValueException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + else: # External layer from other account + # TODO: validate IAM layer policy here, allowing access by default for now and only checking region + if region and layer_region != region: + # TODO: detect user or role from context when IAM users are implemented + user = "user/localstack-testing" + raise AccessDeniedException( + f"User: arn:{get_partition(region)}:iam::{account_id}:{user} is not authorized to perform: lambda:GetLayerVersion on resource: {layer_version_arn} because no resource-based policy allows the lambda:GetLayerVersion action" + ) + if layer is None or layer_version is None: + # Limitation: cannot fetch external layers when using the same account id as the target layer + # because we do not want to trigger the layer fetcher for every non-existing layer. + if self.layer_fetcher is None: + raise NotImplementedError( + "Fetching shared layers from AWS is a pro feature." + ) + + layer = self.layer_fetcher.fetch_layer(layer_version_arn) + if layer is None: + # TODO: detect user or role from context when IAM users are implemented + user = "user/localstack-testing" + raise AccessDeniedException( + f"User: arn:{get_partition(region)}:iam::{account_id}:{user} is not authorized to perform: lambda:GetLayerVersion on resource: {layer_version_arn} because no resource-based policy allows the lambda:GetLayerVersion action" + ) + + # Distinguish between new layer and new layer version + if layer_version is None: + # Create whole layer from scratch + state.layers[layer_name] = layer + else: + # Create layer version if another version of the same layer already exists + state.layers[layer_name].layer_versions[layer_version_str] = ( + layer.layer_versions.get(layer_version_str) + ) + + # only the first two matches in the array are considered for the error message + layer_arn = ":".join(layer_version_arn.split(":")[:-1]) + if layer_arn in visited_layers: + conflict_layer_version_arn = visited_layers[layer_arn] + raise InvalidParameterValueException( + f"Two different versions of the same layer are not allowed to be referenced in the same function. {conflict_layer_version_arn} and {layer_version_arn} are versions of the same layer.", + Type="User", + ) + visited_layers[layer_arn] = layer_version_arn + + @staticmethod + def map_layers(new_layers: list[str]) -> list[LayerVersion]: + layers = [] + for layer_version_arn in new_layers: + region_name, account_id, layer_name, layer_version = api_utils.parse_layer_arn( + layer_version_arn + ) + layer = lambda_stores[account_id][region_name].layers.get(layer_name) + layer_version = layer.layer_versions.get(layer_version) + layers.append(layer_version) + return layers + + def get_function_recursion_config( + self, + context: RequestContext, + function_name: UnqualifiedFunctionName, + **kwargs, + ) -> GetFunctionRecursionConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + fn = self._get_function(function_name=function_name, region=region, account_id=account_id) + return GetFunctionRecursionConfigResponse(RecursiveLoop=fn.recursive_loop) + + def put_function_recursion_config( + self, + context: RequestContext, + function_name: UnqualifiedFunctionName, + recursive_loop: RecursiveLoop, + **kwargs, + ) -> PutFunctionRecursionConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + + fn = self._get_function(function_name=function_name, region=region, account_id=account_id) + + allowed_values = list(RecursiveLoop.__members__.values()) + if recursive_loop not in allowed_values: + raise ValidationException( + f"1 validation error detected: Value '{recursive_loop}' at 'recursiveLoop' failed to satisfy constraint: " + f"Member must satisfy enum value set: [Terminate, Allow]" + ) + + fn.recursive_loop = recursive_loop + return PutFunctionRecursionConfigResponse(RecursiveLoop=fn.recursive_loop) + + @handler(operation="CreateFunction", expand=False) + def create_function( + self, + context: RequestContext, + request: CreateFunctionRequest, + ) -> FunctionConfiguration: + context_region = context.region + context_account_id = context.account_id + + zip_file = request.get("Code", {}).get("ZipFile") + if zip_file and len(zip_file) > config.LAMBDA_LIMITS_CODE_SIZE_ZIPPED: + raise RequestEntityTooLargeException( + f"Zipped size must be smaller than {config.LAMBDA_LIMITS_CODE_SIZE_ZIPPED} bytes" + ) + + if context.request.content_length > config.LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE: + raise RequestEntityTooLargeException( + f"Request must be smaller than {config.LAMBDA_LIMITS_CREATE_FUNCTION_REQUEST_SIZE} bytes for the CreateFunction operation" + ) + + if architectures := request.get("Architectures"): + if len(architectures) != 1: + raise ValidationException( + f"1 validation error detected: Value '[{', '.join(architectures)}]' at 'architectures' failed to " + f"satisfy constraint: Member must have length less than or equal to 1", + ) + if architectures[0] not in ARCHITECTURES: + raise ValidationException( + f"1 validation error detected: Value '[{', '.join(architectures)}]' at 'architectures' failed to " + f"satisfy constraint: Member must satisfy constraint: [Member must satisfy enum value set: " + f"[x86_64, arm64], Member must not be null]", + ) + + if env_vars := request.get("Environment", {}).get("Variables"): + self._verify_env_variables(env_vars) + + if layers := request.get("Layers", []): + self._validate_layers(layers, region=context_region, account_id=context_account_id) + + if not api_utils.is_role_arn(request.get("Role")): + raise ValidationException( + f"1 validation error detected: Value '{request.get('Role')}'" + + " at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + ) + if not self.lambda_service.can_assume_role(request.get("Role"), context.region): + raise InvalidParameterValueException( + "The role defined for the function cannot be assumed by Lambda.", Type="User" + ) + package_type = request.get("PackageType", PackageType.Zip) + runtime = request.get("Runtime") + self._validate_runtime(package_type, runtime) + + request_function_name = request.get("FunctionName") + + function_name, *_ = api_utils.get_name_and_qualifier( + function_arn_or_name=request_function_name, + qualifier=None, + context=context, + ) + + if runtime in DEPRECATED_RUNTIMES: + LOG.warning( + "The Lambda runtime %s} is deprecated. " + "Please upgrade the runtime for the function %s: " + "https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html", + runtime, + function_name, + ) + if snap_start := request.get("SnapStart"): + self._validate_snapstart(snap_start, runtime) + state = lambda_stores[context_account_id][context_region] + + with self.create_fn_lock: + if function_name in state.functions: + raise ResourceConflictException(f"Function already exist: {function_name}") + fn = Function(function_name=function_name) + arn = VersionIdentifier( + function_name=function_name, + qualifier="$LATEST", + region=context_region, + account=context_account_id, + ) + # save function code to s3 + code = None + image = None + image_config = None + runtime_version_config = RuntimeVersionConfig( + # Limitation: the runtime id (presumably sha256 of image) is currently hardcoded + # Potential implementation: provide (cached) sha256 hash of used Docker image + RuntimeVersionArn=f"arn:{context.partition}:lambda:{context_region}::runtime:8eeff65f6809a3ce81507fe733fe09b835899b99481ba22fd75b5a7338290ec1" + ) + request_code = request.get("Code") + if package_type == PackageType.Zip: + # TODO verify if correct combination of code is set + if zip_file := request_code.get("ZipFile"): + code = store_lambda_archive( + archive_file=zip_file, + function_name=function_name, + region_name=context_region, + account_id=context_account_id, + ) + elif s3_bucket := request_code.get("S3Bucket"): + s3_key = request_code["S3Key"] + s3_object_version = request_code.get("S3ObjectVersion") + code = store_s3_bucket_archive( + archive_bucket=s3_bucket, + archive_key=s3_key, + archive_version=s3_object_version, + function_name=function_name, + region_name=context_region, + account_id=context_account_id, + ) + else: + raise LambdaServiceException("Gotta have s3 bucket or zip file") + elif package_type == PackageType.Image: + image = request_code.get("ImageUri") + if not image: + raise LambdaServiceException("Gotta have an image when package type is image") + image = create_image_code(image_uri=image) + + image_config_req = request.get("ImageConfig", {}) + image_config = ImageConfig( + command=image_config_req.get("Command"), + entrypoint=image_config_req.get("EntryPoint"), + working_directory=image_config_req.get("WorkingDirectory"), + ) + # Runtime management controls are not available when providing a custom image + runtime_version_config = None + if "LoggingConfig" in request: + logging_config = request["LoggingConfig"] + LOG.warning( + "Advanced Lambda Logging Configuration is currently mocked " + "and will not impact the logging behavior. " + "Please create a feature request if needed." + ) + + # when switching to JSON, app and system level log is auto set to INFO + if logging_config.get("LogFormat", None) == LogFormat.JSON: + logging_config = { + "ApplicationLogLevel": "INFO", + "SystemLogLevel": "INFO", + "LogGroup": f"/aws/lambda/{function_name}", + } | logging_config + else: + logging_config = ( + LoggingConfig( + LogFormat=LogFormat.Text, LogGroup=f"/aws/lambda/{function_name}" + ) + | logging_config + ) + + else: + logging_config = LoggingConfig( + LogFormat=LogFormat.Text, LogGroup=f"/aws/lambda/{function_name}" + ) + + version = FunctionVersion( + id=arn, + config=VersionFunctionConfiguration( + last_modified=api_utils.format_lambda_date(datetime.datetime.now()), + description=request.get("Description", ""), + role=request["Role"], + timeout=request.get("Timeout", LAMBDA_DEFAULT_TIMEOUT), + runtime=request.get("Runtime"), + memory_size=request.get("MemorySize", LAMBDA_DEFAULT_MEMORY_SIZE), + handler=request.get("Handler"), + package_type=package_type, + environment=env_vars, + architectures=request.get("Architectures") or [Architecture.x86_64], + tracing_config_mode=request.get("TracingConfig", {}).get( + "Mode", TracingMode.PassThrough + ), + image=image, + image_config=image_config, + code=code, + layers=self.map_layers(layers), + internal_revision=short_uid(), + ephemeral_storage=LambdaEphemeralStorage( + size=request.get("EphemeralStorage", {}).get("Size", 512) + ), + snap_start=SnapStartResponse( + ApplyOn=request.get("SnapStart", {}).get("ApplyOn", SnapStartApplyOn.None_), + OptimizationStatus=SnapStartOptimizationStatus.Off, + ), + runtime_version_config=runtime_version_config, + dead_letter_arn=request.get("DeadLetterConfig", {}).get("TargetArn"), + vpc_config=self._build_vpc_config( + context_account_id, context_region, request.get("VpcConfig") + ), + state=VersionState( + state=State.Pending, + code=StateReasonCode.Creating, + reason="The function is being created.", + ), + logging_config=logging_config, + ), + ) + fn.versions["$LATEST"] = version + state.functions[function_name] = fn + function_counter.labels( + operation=FunctionOperation.create, + runtime=runtime or "n/a", + status=FunctionStatus.success, + invocation_type="n/a", + package_type=package_type, + ) + self.lambda_service.create_function_version(version) + + if tags := request.get("Tags"): + # This will check whether the function exists. + self._store_tags(arn.unqualified_arn(), tags) + + if request.get("Publish"): + version = self._publish_version_with_changes( + function_name=function_name, region=context_region, account_id=context_account_id + ) + + if config.LAMBDA_SYNCHRONOUS_CREATE: + # block via retrying until "terminal" condition reached before returning + if not poll_condition( + lambda: get_function_version( + function_name, version.id.qualifier, version.id.account, version.id.region + ).config.state.state + in [State.Active, State.Failed], + timeout=10, + ): + LOG.warning( + "LAMBDA_SYNCHRONOUS_CREATE is active, but waiting for %s reached timeout.", + function_name, + ) + + return api_utils.map_config_out( + version, return_qualified_arn=False, return_update_status=False + ) + + def _validate_runtime(self, package_type, runtime): + runtimes = ALL_RUNTIMES + if config.LAMBDA_RUNTIME_VALIDATION: + runtimes = list(itertools.chain(RUNTIMES_AGGREGATED.values())) + + if package_type == PackageType.Zip and runtime not in runtimes: + # deprecated runtimes have different error + if runtime in DEPRECATED_RUNTIMES: + HINT_LOG.info( + "Set env variable LAMBDA_RUNTIME_VALIDATION to 0" + " in order to allow usage of deprecated runtimes" + ) + self._check_for_recomended_migration_target(runtime) + + raise InvalidParameterValueException( + f"Value {runtime} at 'runtime' failed to satisfy constraint: Member must satisfy enum value set: {VALID_RUNTIMES} or be a valid ARN", + Type="User", + ) + + def _check_for_recomended_migration_target(self, deprecated_runtime): + # AWS offers recommended runtime for migration for "newly" deprecated runtimes + # in order to preserve parity with error messages we need the code bellow + latest_runtime = DEPRECATED_RUNTIMES_UPGRADES.get(deprecated_runtime) + + if latest_runtime is not None: + LOG.debug( + "The Lambda runtime %s is deprecated. Please upgrade to a supported Lambda runtime such as %s.", + deprecated_runtime, + latest_runtime, + ) + raise InvalidParameterValueException( + f"The runtime parameter of {deprecated_runtime} is no longer supported for creating or updating AWS Lambda functions. We recommend you use a supported runtime while creating or updating functions.", + Type="User", + ) + + @handler(operation="UpdateFunctionConfiguration", expand=False) + def update_function_configuration( + self, context: RequestContext, request: UpdateFunctionConfigurationRequest + ) -> FunctionConfiguration: + """updates the $LATEST version of the function""" + function_name = request.get("FunctionName") + + # in case we got ARN or partial ARN + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier(function_name, None, context) + state = lambda_stores[account_id][region] + + if function_name not in state.functions: + raise ResourceNotFoundException( + f"Function not found: {api_utils.unqualified_lambda_arn(function_name=function_name, region=region, account=account_id)}", + Type="User", + ) + function = state.functions[function_name] + + # TODO: lock modification of latest version + # TODO: notify service for changes relevant to re-provisioning of $LATEST + latest_version = function.latest() + latest_version_config = latest_version.config + + revision_id = request.get("RevisionId") + if revision_id and revision_id != latest_version.config.revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + + replace_kwargs = {} + if "EphemeralStorage" in request: + replace_kwargs["ephemeral_storage"] = LambdaEphemeralStorage( + request.get("EphemeralStorage", {}).get("Size", 512) + ) # TODO: do defaults here apply as well? + + if "Role" in request: + if not api_utils.is_role_arn(request["Role"]): + raise ValidationException( + f"1 validation error detected: Value '{request.get('Role')}'" + + " at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + ) + replace_kwargs["role"] = request["Role"] + + if "Description" in request: + replace_kwargs["description"] = request["Description"] + + if "Timeout" in request: + replace_kwargs["timeout"] = request["Timeout"] + + if "MemorySize" in request: + replace_kwargs["memory_size"] = request["MemorySize"] + + if "DeadLetterConfig" in request: + replace_kwargs["dead_letter_arn"] = request.get("DeadLetterConfig", {}).get("TargetArn") + + if vpc_config := request.get("VpcConfig"): + replace_kwargs["vpc_config"] = self._build_vpc_config(account_id, region, vpc_config) + + if "Handler" in request: + replace_kwargs["handler"] = request["Handler"] + + if "Runtime" in request: + runtime = request["Runtime"] + + if runtime not in ALL_RUNTIMES: + raise InvalidParameterValueException( + f"Value {runtime} at 'runtime' failed to satisfy constraint: Member must satisfy enum value set: {VALID_RUNTIMES} or be a valid ARN", + Type="User", + ) + if runtime in DEPRECATED_RUNTIMES: + LOG.warning( + "The Lambda runtime %s is deprecated. " + "Please upgrade the runtime for the function %s: " + "https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html", + runtime, + function_name, + ) + replace_kwargs["runtime"] = request["Runtime"] + + if snap_start := request.get("SnapStart"): + runtime = replace_kwargs.get("runtime") or latest_version_config.runtime + self._validate_snapstart(snap_start, runtime) + replace_kwargs["snap_start"] = SnapStartResponse( + ApplyOn=snap_start.get("ApplyOn", SnapStartApplyOn.None_), + OptimizationStatus=SnapStartOptimizationStatus.Off, + ) + + if "Environment" in request: + if env_vars := request.get("Environment", {}).get("Variables", {}): + self._verify_env_variables(env_vars) + replace_kwargs["environment"] = env_vars + + if "Layers" in request: + new_layers = request["Layers"] + if new_layers: + self._validate_layers(new_layers, region=region, account_id=account_id) + replace_kwargs["layers"] = self.map_layers(new_layers) + + if "ImageConfig" in request: + new_image_config = request["ImageConfig"] + replace_kwargs["image_config"] = ImageConfig( + command=new_image_config.get("Command"), + entrypoint=new_image_config.get("EntryPoint"), + working_directory=new_image_config.get("WorkingDirectory"), + ) + + if "LoggingConfig" in request: + logging_config = request["LoggingConfig"] + LOG.warning( + "Advanced Lambda Logging Configuration is currently mocked " + "and will not impact the logging behavior. " + "Please create a feature request if needed." + ) + + # when switching to JSON, app and system level log is auto set to INFO + if logging_config.get("LogFormat", None) == LogFormat.JSON: + logging_config = { + "ApplicationLogLevel": "INFO", + "SystemLogLevel": "INFO", + } | logging_config + + last_config = latest_version_config.logging_config + + # add partial update + new_logging_config = last_config | logging_config + + # in case we switched from JSON to Text we need to remove LogLevel keys + if ( + new_logging_config.get("LogFormat") == LogFormat.Text + and last_config.get("LogFormat") == LogFormat.JSON + ): + new_logging_config.pop("ApplicationLogLevel", None) + new_logging_config.pop("SystemLogLevel", None) + + replace_kwargs["logging_config"] = new_logging_config + + if "TracingConfig" in request: + new_mode = request.get("TracingConfig", {}).get("Mode") + if new_mode: + replace_kwargs["tracing_config_mode"] = new_mode + + new_latest_version = dataclasses.replace( + latest_version, + config=dataclasses.replace( + latest_version_config, + last_modified=api_utils.generate_lambda_date(), + internal_revision=short_uid(), + last_update=UpdateStatus( + status=LastUpdateStatus.InProgress, + code="Creating", + reason="The function is being created.", + ), + **replace_kwargs, + ), + ) + function.versions["$LATEST"] = new_latest_version # TODO: notify + self.lambda_service.update_version(new_version=new_latest_version) + + return api_utils.map_config_out(new_latest_version) + + @handler(operation="UpdateFunctionCode", expand=False) + def update_function_code( + self, context: RequestContext, request: UpdateFunctionCodeRequest + ) -> FunctionConfiguration: + """updates the $LATEST version of the function""" + # only supports normal zip packaging atm + # if request.get("Publish"): + # self.lambda_service.create_function_version() + + function_name = request.get("FunctionName") + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier(function_name, None, context) + + store = lambda_stores[account_id][region] + if function_name not in store.functions: + raise ResourceNotFoundException( + f"Function not found: {api_utils.unqualified_lambda_arn(function_name=function_name, region=region, account=account_id)}", + Type="User", + ) + function = store.functions[function_name] + + revision_id = request.get("RevisionId") + if revision_id and revision_id != function.latest().config.revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + + # TODO verify if correct combination of code is set + image = None + if ( + request.get("ZipFile") or request.get("S3Bucket") + ) and function.latest().config.package_type == PackageType.Image: + raise InvalidParameterValueException( + "Please provide ImageUri when updating a function with packageType Image.", + Type="User", + ) + elif request.get("ImageUri") and function.latest().config.package_type == PackageType.Zip: + raise InvalidParameterValueException( + "Please don't provide ImageUri when updating a function with packageType Zip.", + Type="User", + ) + + if zip_file := request.get("ZipFile"): + code = store_lambda_archive( + archive_file=zip_file, + function_name=function_name, + region_name=region, + account_id=account_id, + ) + elif s3_bucket := request.get("S3Bucket"): + s3_key = request["S3Key"] + s3_object_version = request.get("S3ObjectVersion") + code = store_s3_bucket_archive( + archive_bucket=s3_bucket, + archive_key=s3_key, + archive_version=s3_object_version, + function_name=function_name, + region_name=region, + account_id=account_id, + ) + elif image := request.get("ImageUri"): + code = None + image = create_image_code(image_uri=image) + else: + raise LambdaServiceException("Gotta have s3 bucket or zip file or image") + + old_function_version = function.versions.get("$LATEST") + replace_kwargs = {"code": code} if code else {"image": image} + + if architectures := request.get("Architectures"): + if len(architectures) != 1: + raise ValidationException( + f"1 validation error detected: Value '[{', '.join(architectures)}]' at 'architectures' failed to " + f"satisfy constraint: Member must have length less than or equal to 1", + ) + # An empty list of architectures is also forbidden. Further exceptions are tested here for create_function: + # tests.aws.services.lambda_.test_lambda_api.TestLambdaFunction.test_create_lambda_exceptions + if architectures[0] not in ARCHITECTURES: + raise ValidationException( + f"1 validation error detected: Value '[{', '.join(architectures)}]' at 'architectures' failed to " + f"satisfy constraint: Member must satisfy constraint: [Member must satisfy enum value set: " + f"[x86_64, arm64], Member must not be null]", + ) + replace_kwargs["architectures"] = architectures + + config = dataclasses.replace( + old_function_version.config, + internal_revision=short_uid(), + last_modified=api_utils.generate_lambda_date(), + last_update=UpdateStatus( + status=LastUpdateStatus.InProgress, + code="Creating", + reason="The function is being created.", + ), + **replace_kwargs, + ) + function_version = dataclasses.replace(old_function_version, config=config) + function.versions["$LATEST"] = function_version + + self.lambda_service.update_version(new_version=function_version) + if request.get("Publish"): + function_version = self._publish_version_with_changes( + function_name=function_name, region=region, account_id=account_id + ) + return api_utils.map_config_out( + function_version, return_qualified_arn=bool(request.get("Publish")) + ) + + # TODO: does deleting the latest published version affect the next versions number? + # TODO: what happens when we call this with a qualifier and a fully qualified ARN? (+ conflicts?) + # TODO: test different ARN patterns (shorthand ARN?) + # TODO: test deleting across regions? + # TODO: test mismatch between context region and region in ARN + # TODO: test qualifier $LATEST, alias-name and version + def delete_function( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + if qualifier and api_utils.qualifier_is_alias(qualifier): + raise InvalidParameterValueException( + "Deletion of aliases is not currently supported.", + Type="User", + ) + + store = lambda_stores[account_id][region] + if qualifier == "$LATEST": + raise InvalidParameterValueException( + "$LATEST version cannot be deleted without deleting the function.", Type="User" + ) + + if function_name not in store.functions: + e = ResourceNotFoundException( + f"Function not found: {api_utils.unqualified_lambda_arn(function_name=function_name, region=region, account=account_id)}", + Type="User", + ) + raise e + function = store.functions.get(function_name) + + if qualifier: + # delete a version of the function + version = function.versions.pop(qualifier, None) + if version: + self.lambda_service.stop_version(version.id.qualified_arn()) + destroy_code_if_not_used(code=version.config.code, function=function) + else: + # delete the whole function + # TODO: introduce locking for safe deletion: We could create a new version at the API layer before + # the old version gets cleaned up in the internal lambda service. + function = store.functions.pop(function_name) + for version in function.versions.values(): + self.lambda_service.stop_version(qualified_arn=version.id.qualified_arn()) + # we can safely destroy the code here + if version.config.code: + version.config.code.destroy() + + def list_functions( + self, + context: RequestContext, + master_region: MasterRegion = None, # (only relevant for lambda@edge) + function_version: FunctionVersionApi = None, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListFunctionsResponse: + state = lambda_stores[context.account_id][context.region] + + if function_version and function_version != FunctionVersionApi.ALL: + raise ValidationException( + f"1 validation error detected: Value '{function_version}'" + + " at 'functionVersion' failed to satisfy constraint: Member must satisfy enum value set: [ALL]" + ) + + if function_version == FunctionVersionApi.ALL: + # include all versions for all function + versions = [v for f in state.functions.values() for v in f.versions.values()] + return_qualified_arn = True + else: + versions = [f.latest() for f in state.functions.values()] + return_qualified_arn = False + + versions = [ + api_utils.map_to_list_response( + api_utils.map_config_out(fc, return_qualified_arn=return_qualified_arn) + ) + for fc in versions + ] + versions = PaginatedList(versions) + page, token = versions.get_page( + lambda version: version["FunctionArn"], + marker, + max_items, + ) + return ListFunctionsResponse(Functions=page, NextMarker=token) + + def get_function( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> GetFunctionResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + fn = lambda_stores[account_id][region].functions.get(function_name) + if fn is None: + if qualifier is None: + raise ResourceNotFoundException( + f"Function not found: {api_utils.unqualified_lambda_arn(function_name, account_id, region)}", + Type="User", + ) + else: + raise ResourceNotFoundException( + f"Function not found: {api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region)}", + Type="User", + ) + alias_name = None + if qualifier and api_utils.qualifier_is_alias(qualifier): + if qualifier not in fn.aliases: + alias_arn = api_utils.qualified_lambda_arn( + function_name, qualifier, account_id, region + ) + raise ResourceNotFoundException(f"Function not found: {alias_arn}", Type="User") + alias_name = qualifier + qualifier = fn.aliases[alias_name].function_version + + version = get_function_version( + function_name=function_name, + qualifier=qualifier, + account_id=account_id, + region=region, + ) + tags = self._get_tags(api_utils.unqualified_lambda_arn(function_name, account_id, region)) + additional_fields = {} + if tags: + additional_fields["Tags"] = tags + code_location = None + if code := version.config.code: + code_location = FunctionCodeLocation( + Location=code.generate_presigned_url(), RepositoryType="S3" + ) + elif image := version.config.image: + code_location = FunctionCodeLocation( + ImageUri=image.image_uri, + RepositoryType=image.repository_type, + ResolvedImageUri=image.resolved_image_uri, + ) + concurrency = None + if fn.reserved_concurrent_executions: + concurrency = Concurrency( + ReservedConcurrentExecutions=fn.reserved_concurrent_executions + ) + + return GetFunctionResponse( + Configuration=api_utils.map_config_out( + version, return_qualified_arn=bool(qualifier), alias_name=alias_name + ), + Code=code_location, # TODO + Concurrency=concurrency, + **additional_fields, + ) + + def get_function_configuration( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> FunctionConfiguration: + account_id, region = api_utils.get_account_and_region(function_name, context) + # CAVE: THIS RETURN VALUE IS *NOT* THE SAME AS IN get_function (!) but seems to be only configuration part? + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + version = get_function_version( + function_name=function_name, + qualifier=qualifier, + account_id=account_id, + region=region, + ) + return api_utils.map_config_out(version, return_qualified_arn=bool(qualifier)) + + def invoke( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + invocation_type: InvocationType = None, + log_type: LogType = None, + client_context: String = None, + payload: IO[Blob] = None, + qualifier: Qualifier = None, + **kwargs, + ) -> InvocationResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + time_before = time.perf_counter() + try: + invocation_result = self.lambda_service.invoke( + function_name=function_name, + qualifier=qualifier, + region=region, + account_id=account_id, + invocation_type=invocation_type, + client_context=client_context, + request_id=context.request_id, + trace_context=context.trace_context, + payload=payload.read() if payload else None, + ) + except ServiceException: + raise + except EnvironmentStartupTimeoutException as e: + raise LambdaServiceException( + f"[{context.request_id}] Timeout while starting up lambda environment for function {function_name}:{qualifier}" + ) from e + except Exception as e: + LOG.error( + "[%s] Error while invoking lambda %s", + context.request_id, + function_name, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + raise LambdaServiceException( + f"[{context.request_id}] Internal error while executing lambda {function_name}:{qualifier}. Caused by {type(e).__name__}: {e}" + ) from e + + if invocation_type == InvocationType.Event: + # This happens when invocation type is event + return InvocationResponse(StatusCode=202) + if invocation_type == InvocationType.DryRun: + # This happens when invocation type is dryrun + return InvocationResponse(StatusCode=204) + LOG.debug("Lambda invocation duration: %0.2fms", (time.perf_counter() - time_before) * 1000) + + response = InvocationResponse( + StatusCode=200, + Payload=invocation_result.payload, + ExecutedVersion=invocation_result.executed_version, + ) + + if invocation_result.is_error: + response["FunctionError"] = "Unhandled" + + if log_type == LogType.Tail: + response["LogResult"] = to_str( + base64.b64encode(to_bytes(invocation_result.logs)[-4096:]) + ) + + return response + + # Version operations + def publish_version( + self, + context: RequestContext, + function_name: FunctionName, + code_sha256: String = None, + description: Description = None, + revision_id: String = None, + **kwargs, + ) -> FunctionConfiguration: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + new_version = self._publish_version_from_existing_version( + function_name=function_name, + description=description, + account_id=account_id, + region=region, + revision_id=revision_id, + code_sha256=code_sha256, + ) + return api_utils.map_config_out(new_version, return_qualified_arn=True) + + def list_versions_by_function( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListVersionsByFunctionResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + versions = [ + api_utils.map_to_list_response( + api_utils.map_config_out(version=version, return_qualified_arn=True) + ) + for version in function.versions.values() + ] + items = PaginatedList(versions) + page, token = items.get_page( + lambda item: item, + marker, + max_items, + ) + return ListVersionsByFunctionResponse(Versions=page, NextMarker=token) + + # Alias + + def _create_routing_config_model( + self, routing_config_dict: dict[str, float], function_version: FunctionVersion + ): + if len(routing_config_dict) > 1: + raise InvalidParameterValueException( + "Number of items in AdditionalVersionWeights cannot be greater than 1", + Type="User", + ) + # should be exactly one item here, still iterating, might be supported in the future + for key, value in routing_config_dict.items(): + if value < 0.0 or value >= 1.0: + raise ValidationException( + f"1 validation error detected: Value '{{{key}={value}}}' at 'routingConfig.additionalVersionWeights' failed to satisfy constraint: Map value must satisfy constraint: [Member must have value less than or equal to 1.0, Member must have value greater than or equal to 0.0, Member must not be null]" + ) + if key == function_version.id.qualifier: + raise InvalidParameterValueException( + f"Invalid function version {function_version.id.qualifier}. Function version {function_version.id.qualifier} is already included in routing configuration.", + Type="User", + ) + # check if version target is latest, then no routing config is allowed + if function_version.id.qualifier == "$LATEST": + raise InvalidParameterValueException( + "$LATEST is not supported for an alias pointing to more than 1 version" + ) + if not api_utils.qualifier_is_version(key): + raise ValidationException( + f"1 validation error detected: Value '{{{key}={value}}}' at 'routingConfig.additionalVersionWeights' failed to satisfy constraint: Map keys must satisfy constraint: [Member must have length less than or equal to 1024, Member must have length greater than or equal to 1, Member must satisfy regular expression pattern: [0-9]+, Member must not be null]" + ) + + # checking if the version in the config exists + get_function_version( + function_name=function_version.id.function_name, + qualifier=key, + region=function_version.id.region, + account_id=function_version.id.account, + ) + return AliasRoutingConfig(version_weights=routing_config_dict) + + def create_alias( + self, + context: RequestContext, + function_name: FunctionName, + name: Alias, + function_version: Version, + description: Description = None, + routing_config: AliasRoutingConfiguration = None, + **kwargs, + ) -> AliasConfiguration: + if not api_utils.qualifier_is_alias(name): + raise ValidationException( + f"1 validation error detected: Value '{name}' at 'name' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!^[0-9]+$)([a-zA-Z0-9-_]+)" + ) + + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + target_version = get_function_version( + function_name=function_name, + qualifier=function_version, + region=region, + account_id=account_id, + ) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + # description is always present, if not specified it's an empty string + description = description or "" + with function.lock: + if existing_alias := function.aliases.get(name): + raise ResourceConflictException( + f"Alias already exists: {api_utils.map_alias_out(alias=existing_alias, function=function)['AliasArn']}", + Type="User", + ) + # checking if the version exists + routing_configuration = None + if routing_config and ( + routing_config_dict := routing_config.get("AdditionalVersionWeights") + ): + routing_configuration = self._create_routing_config_model( + routing_config_dict, target_version + ) + + alias = VersionAlias( + name=name, + function_version=function_version, + description=description, + routing_configuration=routing_configuration, + ) + function.aliases[name] = alias + return api_utils.map_alias_out(alias=alias, function=function) + + def list_aliases( + self, + context: RequestContext, + function_name: FunctionName, + function_version: Version = None, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListAliasesResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + aliases = [ + api_utils.map_alias_out(alias, function) + for alias in function.aliases.values() + if function_version is None or alias.function_version == function_version + ] + + aliases = PaginatedList(aliases) + page, token = aliases.get_page( + lambda alias: alias["AliasArn"], + marker, + max_items, + ) + + return ListAliasesResponse(Aliases=page, NextMarker=token) + + def delete_alias( + self, context: RequestContext, function_name: FunctionName, name: Alias, **kwargs + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + version_alias = function.aliases.pop(name, None) + + # cleanup related resources + if name in function.provisioned_concurrency_configs: + function.provisioned_concurrency_configs.pop(name) + + # TODO: Allow for deactivating/unregistering specific Lambda URLs + if version_alias and name in function.function_url_configs: + url_config = function.function_url_configs.pop(name) + LOG.debug( + "Stopping aliased Lambda Function URL %s for %s", + url_config.url, + url_config.function_name, + ) + + def get_alias( + self, context: RequestContext, function_name: FunctionName, name: Alias, **kwargs + ) -> AliasConfiguration: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + if not (alias := function.aliases.get(name)): + raise ResourceNotFoundException( + f"Cannot find alias arn: {api_utils.qualified_lambda_arn(function_name=function_name, qualifier=name, region=region, account=account_id)}", + Type="User", + ) + return api_utils.map_alias_out(alias=alias, function=function) + + def update_alias( + self, + context: RequestContext, + function_name: FunctionName, + name: Alias, + function_version: Version = None, + description: Description = None, + routing_config: AliasRoutingConfiguration = None, + revision_id: String = None, + **kwargs, + ) -> AliasConfiguration: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + function = self._get_function( + function_name=function_name, region=region, account_id=account_id + ) + if not (alias := function.aliases.get(name)): + fn_arn = api_utils.qualified_lambda_arn(function_name, name, account_id, region) + raise ResourceNotFoundException( + f"Alias not found: {fn_arn}", + Type="User", + ) + if revision_id and alias.revision_id != revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + changes = {} + if function_version is not None: + changes |= {"function_version": function_version} + if description is not None: + changes |= {"description": description} + if routing_config is not None: + # if it is an empty dict or AdditionalVersionWeights is empty, set routing config to None + new_routing_config = None + if routing_config_dict := routing_config.get("AdditionalVersionWeights"): + new_routing_config = self._create_routing_config_model(routing_config_dict) + changes |= {"routing_configuration": new_routing_config} + # even if no changes are done, we have to update revision id for some reason + old_alias = alias + alias = dataclasses.replace(alias, **changes) + function.aliases[name] = alias + + # TODO: signal lambda service that pointer potentially changed + self.lambda_service.update_alias(old_alias=old_alias, new_alias=alias, function=function) + + return api_utils.map_alias_out(alias=alias, function=function) + + # ======================================= + # ======= EVENT SOURCE MAPPINGS ========= + # ======================================= + def check_service_resource_exists( + self, service: str, resource_arn: str, function_arn: str, function_role_arn: str + ): + """ + Check if the service resource exists and if the function has access to it. + + Raises: + InvalidParameterValueException: If the service resource does not exist or the function does not have access to it. + """ + arn = parse_arn(resource_arn) + source_client = get_internal_client( + arn=resource_arn, + role_arn=function_role_arn, + service_principal=ServicePrincipal.lambda_, + source_arn=function_arn, + ) + if service in ["sqs", "sqs-fifo"]: + try: + # AWS uses `GetQueueAttributes` internally to verify the queue existence, but we need the `QueueUrl` + # which is not given directly. We build out a dummy `QueueUrl` which can be parsed by SQS to return + # the right value + queue_name = arn["resource"].split("/")[-1] + queue_url = f"http://sqs.{arn['region']}.domain/{arn['account']}/{queue_name}" + source_client.get_queue_attributes(QueueUrl=queue_url) + except ClientError as e: + error_code = e.response["Error"]["Code"] + if error_code == "AWS.SimpleQueueService.NonExistentQueue": + raise InvalidParameterValueException( + f"Error occurred while ReceiveMessage. SQS Error Code: {error_code}. SQS Error Message: {e.response['Error']['Message']}", + Type="User", + ) + raise e + elif service in ["kinesis"]: + try: + source_client.describe_stream(StreamARN=resource_arn) + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + raise InvalidParameterValueException( + f"Stream not found: {resource_arn}", + Type="User", + ) + raise e + elif service in ["dynamodb"]: + try: + source_client.describe_stream(StreamArn=resource_arn) + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + raise InvalidParameterValueException( + f"Stream not found: {resource_arn}", + Type="User", + ) + raise e + + @handler("CreateEventSourceMapping", expand=False) + def create_event_source_mapping( + self, + context: RequestContext, + request: CreateEventSourceMappingRequest, + ) -> EventSourceMappingConfiguration: + return self.create_event_source_mapping_v2(context, request) + + def create_event_source_mapping_v2( + self, + context: RequestContext, + request: CreateEventSourceMappingRequest, + ) -> EventSourceMappingConfiguration: + # Validations + function_arn, function_name, state, function_version, function_role = ( + self.validate_event_source_mapping(context, request) + ) + + esm_config = EsmConfigFactory(request, context, function_arn).get_esm_config() + + # Copy esm_config to avoid a race condition with potential async update in the store + state.event_source_mappings[esm_config["UUID"]] = esm_config.copy() + enabled = request.get("Enabled", True) + # TODO: check for potential async race condition update -> think about locking + esm_worker = EsmWorkerFactory(esm_config, function_role, enabled).get_esm_worker() + self.esm_workers[esm_worker.uuid] = esm_worker + # TODO: check StateTransitionReason, LastModified, LastProcessingResult (concurrent updates requires locking!) + if tags := request.get("Tags"): + self._store_tags(esm_config.get("EventSourceMappingArn"), tags) + esm_worker.create() + return esm_config + + def validate_event_source_mapping(self, context, request): + # TODO: test whether stream ARNs are valid sources for Pipes or ESM or whether only DynamoDB table ARNs work + # TODO: Validate MaxRecordAgeInSeconds (i.e cannot subceed 60s but can be -1) and MaxRetryAttempts parameters. + # See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html#cfn-lambda-eventsourcemapping-maximumrecordageinseconds + is_create_esm_request = context.operation.name == self.create_event_source_mapping.operation + + if destination_config := request.get("DestinationConfig"): + if "OnSuccess" in destination_config: + raise InvalidParameterValueException( + "Unsupported DestinationConfig parameter for given event source mapping type.", + Type="User", + ) + + service = None + if "SelfManagedEventSource" in request: + service = "kafka" + if "SourceAccessConfigurations" not in request: + raise InvalidParameterValueException( + "Required 'sourceAccessConfigurations' parameter is missing.", Type="User" + ) + if service is None and "EventSourceArn" not in request: + raise InvalidParameterValueException("Unrecognized event source.", Type="User") + if service is None: + service = extract_service_from_arn(request["EventSourceArn"]) + + batch_size = api_utils.validate_and_set_batch_size(service, request.get("BatchSize")) + if service in ["dynamodb", "kinesis"]: + starting_position = request.get("StartingPosition") + if not starting_position: + raise InvalidParameterValueException( + "1 validation error detected: Value null at 'startingPosition' failed to satisfy constraint: Member must not be null.", + Type="User", + ) + + if starting_position not in KinesisStreamStartPosition.__members__: + raise ValidationException( + f"1 validation error detected: Value '{starting_position}' at 'startingPosition' failed to satisfy constraint: Member must satisfy enum value set: [LATEST, AT_TIMESTAMP, TRIM_HORIZON]" + ) + # AT_TIMESTAMP is not allowed for DynamoDB Streams + elif ( + service == "dynamodb" + and starting_position not in DynamoDBStreamStartPosition.__members__ + ): + raise InvalidParameterValueException( + f"Unsupported starting position for arn type: {request['EventSourceArn']}", + Type="User", + ) + + if service in ["sqs", "sqs-fifo"]: + if batch_size > 10 and request.get("MaximumBatchingWindowInSeconds", 0) == 0: + raise InvalidParameterValueException( + "Maximum batch window in seconds must be greater than 0 if maximum batch size is greater than 10", + Type="User", + ) + + if (filter_criteria := request.get("FilterCriteria")) is not None: + for filter_ in filter_criteria.get("Filters", []): + pattern_str = filter_.get("Pattern") + if not pattern_str or not isinstance(pattern_str, str): + raise InvalidParameterValueException( + "Invalid filter pattern definition.", Type="User" + ) + + if not validate_event_pattern(pattern_str): + raise InvalidParameterValueException( + "Invalid filter pattern definition.", Type="User" + ) + + # Can either have a FunctionName (i.e CreateEventSourceMapping request) or + # an internal EventSourceMappingConfiguration representation + request_function_name = request.get("FunctionName") or request.get("FunctionArn") + # can be either a partial arn or a full arn for the version/alias + function_name, qualifier, account, region = function_locators_from_arn( + request_function_name + ) + # TODO: validate `context.region` vs. `region(request["FunctionName"])` vs. `region(request["EventSourceArn"])` + account = account or context.account_id + region = region or context.region + state = lambda_stores[account][region] + fn = state.functions.get(function_name) + if not fn: + raise InvalidParameterValueException("Function does not exist", Type="User") + + if qualifier: + # make sure the function version/alias exists + if api_utils.qualifier_is_alias(qualifier): + fn_alias = fn.aliases.get(qualifier) + if not fn_alias: + raise Exception("unknown alias") # TODO: cover via test + elif api_utils.qualifier_is_version(qualifier): + fn_version = fn.versions.get(qualifier) + if not fn_version: + raise Exception("unknown version") # TODO: cover via test + elif qualifier == "$LATEST": + pass + else: + raise Exception("invalid functionname") # TODO: cover via test + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account, region) + + else: + fn_arn = api_utils.unqualified_lambda_arn(function_name, account, region) + + function_version = get_function_version_from_arn(fn_arn) + function_role = function_version.config.role + + if source_arn := request.get("EventSourceArn"): + self.check_service_resource_exists(service, source_arn, fn_arn, function_role) + # Check we are validating a CreateEventSourceMapping request + if is_create_esm_request: + + def _get_mapping_sources(mapping: dict[str, Any]) -> list[str]: + if event_source_arn := mapping.get("EventSourceArn"): + return [event_source_arn] + return ( + mapping.get("SelfManagedEventSource", {}) + .get("Endpoints", {}) + .get("KAFKA_BOOTSTRAP_SERVERS", []) + ) + + # check for event source duplicates + # TODO: currently validated for sqs, kinesis, and dynamodb + service_id = load_service(service).service_id + for uuid, mapping in state.event_source_mappings.items(): + mapping_sources = _get_mapping_sources(mapping) + request_sources = _get_mapping_sources(request) + if mapping["FunctionArn"] == fn_arn and ( + set(mapping_sources).intersection(request_sources) + ): + if service == "sqs": + # *shakes fist at SQS* + raise ResourceConflictException( + f'An event source mapping with {service_id} arn (" {mapping["EventSourceArn"]} ") ' + f'and function (" {function_name} ") already exists. Please update or delete the ' + f"existing mapping with UUID {uuid}", + Type="User", + ) + elif service == "kafka": + if set(mapping["Topics"]).intersection(request["Topics"]): + raise ResourceConflictException( + f'An event source mapping with event source ("{",".join(request_sources)}"), ' + f'function ("{fn_arn}"), ' + f'topics ("{",".join(request["Topics"])}") already exists. Please update or delete the ' + f"existing mapping with UUID {uuid}", + Type="User", + ) + else: + raise ResourceConflictException( + f'The event source arn (" {mapping["EventSourceArn"]} ") and function ' + f'(" {function_name} ") provided mapping already exists. Please update or delete the ' + f"existing mapping with UUID {uuid}", + Type="User", + ) + return fn_arn, function_name, state, function_version, function_role + + @handler("UpdateEventSourceMapping", expand=False) + def update_event_source_mapping( + self, + context: RequestContext, + request: UpdateEventSourceMappingRequest, + ) -> EventSourceMappingConfiguration: + return self.update_event_source_mapping_v2(context, request) + + def update_event_source_mapping_v2( + self, + context: RequestContext, + request: UpdateEventSourceMappingRequest, + ) -> EventSourceMappingConfiguration: + # TODO: test and implement this properly (quite complex with many validations and limitations!) + LOG.warning( + "Updating Lambda Event Source Mapping is in experimental state and not yet fully tested." + ) + state = lambda_stores[context.account_id][context.region] + request_data = {**request} + uuid = request_data.pop("UUID", None) + if not uuid: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + old_event_source_mapping = state.event_source_mappings.get(uuid) + esm_worker = self.esm_workers.get(uuid) + if old_event_source_mapping is None or esm_worker is None: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) # TODO: test? + + # normalize values to overwrite + event_source_mapping = old_event_source_mapping | request_data + + temp_params = {} # values only set for the returned response, not saved internally (e.g. transient state) + + # Validate the newly updated ESM object. We ignore the output here since we only care whether an Exception is raised. + function_arn, _, _, function_version, function_role = self.validate_event_source_mapping( + context, event_source_mapping + ) + + # remove the FunctionName field + event_source_mapping.pop("FunctionName", None) + + if function_arn: + event_source_mapping["FunctionArn"] = function_arn + + # Only apply update if the desired state differs + enabled = request.get("Enabled") + if enabled is not None: + if enabled and old_event_source_mapping["State"] != EsmState.ENABLED: + event_source_mapping["State"] = EsmState.ENABLING + # TODO: What happens when trying to update during an update or failed state?! + elif not enabled and old_event_source_mapping["State"] == EsmState.ENABLED: + event_source_mapping["State"] = EsmState.DISABLING + else: + event_source_mapping["State"] = EsmState.UPDATING + + # To ensure parity, certain responses need to be immediately returned + temp_params["State"] = event_source_mapping["State"] + + state.event_source_mappings[uuid] = event_source_mapping + + # TODO: Currently, we re-create the entire ESM worker. Look into approach with better performance. + worker_factory = EsmWorkerFactory( + event_source_mapping, function_role, request.get("Enabled", esm_worker.enabled) + ) + + # Get a new ESM worker object but do not active it, since the factory holds all logic for creating new worker from configuration. + updated_esm_worker = worker_factory.get_esm_worker() + self.esm_workers[uuid] = updated_esm_worker + + # We should stop() the worker since the delete() will remove the ESM from the state mapping. + esm_worker.stop() + # This will either create an EsmWorker in the CREATING state if enabled. Otherwise, the DISABLING state is set. + updated_esm_worker.create() + + return {**event_source_mapping, **temp_params} + + def delete_event_source_mapping( + self, context: RequestContext, uuid: String, **kwargs + ) -> EventSourceMappingConfiguration: + state = lambda_stores[context.account_id][context.region] + event_source_mapping = state.event_source_mappings.get(uuid) + if not event_source_mapping: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + esm = state.event_source_mappings[uuid] + # TODO: add proper locking + esm_worker = self.esm_workers.pop(uuid, None) + # Asynchronous delete in v2 + if not esm_worker: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + esm_worker.delete() + return {**esm, "State": EsmState.DELETING} + + def get_event_source_mapping( + self, context: RequestContext, uuid: String, **kwargs + ) -> EventSourceMappingConfiguration: + state = lambda_stores[context.account_id][context.region] + event_source_mapping = state.event_source_mappings.get(uuid) + if not event_source_mapping: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + esm_worker = self.esm_workers.get(uuid) + if not esm_worker: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + event_source_mapping["State"] = esm_worker.current_state + event_source_mapping["StateTransitionReason"] = esm_worker.state_transition_reason + return event_source_mapping + + def list_event_source_mappings( + self, + context: RequestContext, + event_source_arn: Arn = None, + function_name: FunctionName = None, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListEventSourceMappingsResponse: + state = lambda_stores[context.account_id][context.region] + + esms = state.event_source_mappings.values() + # TODO: update and test State and StateTransitionReason for ESM v2 + + if event_source_arn: # TODO: validate pattern + esms = [e for e in esms if e.get("EventSourceArn") == event_source_arn] + + if function_name: + esms = [e for e in esms if function_name in e["FunctionArn"]] + + esms = PaginatedList(esms) + page, token = esms.get_page( + lambda x: x["UUID"], + marker, + max_items, + ) + return ListEventSourceMappingsResponse(EventSourceMappings=page, NextMarker=token) + + def get_source_type_from_request(self, request: dict[str, Any]) -> str: + if event_source_arn := request.get("EventSourceArn", ""): + service = extract_service_from_arn(event_source_arn) + if service == "sqs" and "fifo" in event_source_arn: + service = "sqs-fifo" + return service + elif request.get("SelfManagedEventSource"): + return "kafka" + + # ======================================= + # ============ FUNCTION URLS ============ + # ======================================= + + @staticmethod + def _validate_qualifier(qualifier: str) -> None: + if qualifier == "$LATEST" or (qualifier and api_utils.qualifier_is_version(qualifier)): + raise ValidationException( + f"1 validation error detected: Value '{qualifier}' at 'qualifier' failed to satisfy constraint: Member must satisfy regular expression pattern: ((?!^\\d+$)^[0-9a-zA-Z-_]+$)" + ) + + @staticmethod + def _validate_invoke_mode(invoke_mode: str) -> None: + if invoke_mode and invoke_mode not in [InvokeMode.BUFFERED, InvokeMode.RESPONSE_STREAM]: + raise ValidationException( + f"1 validation error detected: Value '{invoke_mode}' at 'invokeMode' failed to satisfy constraint: Member must satisfy enum value set: [RESPONSE_STREAM, BUFFERED]" + ) + if invoke_mode == InvokeMode.RESPONSE_STREAM: + # TODO should we actually fail for setting RESPONSE_STREAM? + # It should trigger InvokeWithResponseStream which is not implemented + LOG.warning( + "The invokeMode 'RESPONSE_STREAM' is not yet supported on LocalStack. The property is only mocked, the execution will still be 'BUFFERED'" + ) + + # TODO: what happens if function state is not active? + def create_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + auth_type: FunctionUrlAuthType, + qualifier: FunctionUrlQualifier = None, + cors: Cors = None, + invoke_mode: InvokeMode = None, + **kwargs, + ) -> CreateFunctionUrlConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + state = lambda_stores[account_id][region] + self._validate_qualifier(qualifier) + self._validate_invoke_mode(invoke_mode) + + fn = state.functions.get(function_name) + if fn is None: + raise ResourceNotFoundException("Function does not exist", Type="User") + + url_config = fn.function_url_configs.get(qualifier or "$LATEST") + if url_config: + raise ResourceConflictException( + f"Failed to create function url config for [functionArn = {url_config.function_arn}]. Error message: FunctionUrlConfig exists for this Lambda function", + Type="User", + ) + + if qualifier and qualifier != "$LATEST" and qualifier not in fn.aliases: + raise ResourceNotFoundException("Function does not exist", Type="User") + + normalized_qualifier = qualifier or "$LATEST" + + function_arn = ( + api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + if qualifier + else api_utils.unqualified_lambda_arn(function_name, account_id, region) + ) + + custom_id: str | None = None + + tags = self._get_tags(api_utils.unqualified_lambda_arn(function_name, account_id, region)) + if TAG_KEY_CUSTOM_URL in tags: + # Note: I really wanted to add verification here that the + # url_id is unique, so we could surface that to the user ASAP. + # However, it seems like that information isn't available yet, + # since (as far as I can tell) we call + # self.router.register_routes() once, in a single shot, for all + # of the routes -- and we need to verify that it's unique not + # just for this particular lambda function, but for the entire + # lambda provider. Therefore... that idea proved non-trivial! + custom_id_tag_value = ( + f"{tags[TAG_KEY_CUSTOM_URL]}-{qualifier}" if qualifier else tags[TAG_KEY_CUSTOM_URL] + ) + if TAG_KEY_CUSTOM_URL_VALIDATOR.match(custom_id_tag_value): + custom_id = custom_id_tag_value + + else: + # Note: we're logging here instead of raising to prioritize + # strict parity with AWS over the localstack-only custom_id + LOG.warning( + "Invalid custom ID tag value for lambda URL (%s=%s). " + "Replaced with default (random id)", + TAG_KEY_CUSTOM_URL, + custom_id_tag_value, + ) + + # The url_id is the subdomain used for the URL we're creating. This + # is either created randomly (as in AWS), or can be passed as a tag + # to the lambda itself (localstack-only). + url_id: str + if custom_id is None: + url_id = api_utils.generate_random_url_id() + else: + url_id = custom_id + + host_definition = localstack_host(custom_port=config.GATEWAY_LISTEN[0].port) + fn.function_url_configs[normalized_qualifier] = FunctionUrlConfig( + function_arn=function_arn, + function_name=function_name, + cors=cors, + url_id=url_id, + url=f"http://{url_id}.lambda-url.{context.region}.{host_definition.host_and_port()}/", # TODO: https support + auth_type=auth_type, + creation_time=api_utils.generate_lambda_date(), + last_modified_time=api_utils.generate_lambda_date(), + invoke_mode=invoke_mode, + ) + + # persist and start URL + # TODO: implement URL invoke + api_url_config = api_utils.map_function_url_config( + fn.function_url_configs[normalized_qualifier] + ) + + return CreateFunctionUrlConfigResponse( + FunctionUrl=api_url_config["FunctionUrl"], + FunctionArn=api_url_config["FunctionArn"], + AuthType=api_url_config["AuthType"], + Cors=api_url_config["Cors"], + CreationTime=api_url_config["CreationTime"], + InvokeMode=api_url_config["InvokeMode"], + ) + + def get_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier = None, + **kwargs, + ) -> GetFunctionUrlConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + + fn_name, qualifier = api_utils.get_name_and_qualifier(function_name, qualifier, context) + + self._validate_qualifier(qualifier) + + resolved_fn = state.functions.get(fn_name) + if not resolved_fn: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + qualifier = qualifier or "$LATEST" + url_config = resolved_fn.function_url_configs.get(qualifier) + if not url_config: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + return api_utils.map_function_url_config(url_config) + + def update_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier = None, + auth_type: FunctionUrlAuthType = None, + cors: Cors = None, + invoke_mode: InvokeMode = None, + **kwargs, + ) -> UpdateFunctionUrlConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + self._validate_qualifier(qualifier) + self._validate_invoke_mode(invoke_mode) + + fn = state.functions.get(function_name) + if not fn: + raise ResourceNotFoundException("Function does not exist", Type="User") + + normalized_qualifier = qualifier or "$LATEST" + + if ( + api_utils.qualifier_is_alias(normalized_qualifier) + and normalized_qualifier not in fn.aliases + ): + raise ResourceNotFoundException("Function does not exist", Type="User") + + url_config = fn.function_url_configs.get(normalized_qualifier) + if not url_config: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + changes = { + "last_modified_time": api_utils.generate_lambda_date(), + **({"cors": cors} if cors is not None else {}), + **({"auth_type": auth_type} if auth_type is not None else {}), + } + + if invoke_mode: + changes["invoke_mode"] = invoke_mode + + new_url_config = dataclasses.replace(url_config, **changes) + fn.function_url_configs[normalized_qualifier] = new_url_config + + return UpdateFunctionUrlConfigResponse( + FunctionUrl=new_url_config.url, + FunctionArn=new_url_config.function_arn, + AuthType=new_url_config.auth_type, + Cors=new_url_config.cors, + CreationTime=new_url_config.creation_time, + LastModifiedTime=new_url_config.last_modified_time, + InvokeMode=new_url_config.invoke_mode, + ) + + def delete_function_url_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: FunctionUrlQualifier = None, + **kwargs, + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + self._validate_qualifier(qualifier) + + resolved_fn = state.functions.get(function_name) + if not resolved_fn: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + qualifier = qualifier or "$LATEST" + url_config = resolved_fn.function_url_configs.get(qualifier) + if not url_config: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + del resolved_fn.function_url_configs[qualifier] + + def list_function_url_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String = None, + max_items: MaxItems = None, + **kwargs, + ) -> ListFunctionUrlConfigsResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + + fn_name = api_utils.get_function_name(function_name, context) + resolved_fn = state.functions.get(fn_name) + if not resolved_fn: + raise ResourceNotFoundException("Function does not exist", Type="User") + + url_configs = [ + api_utils.map_function_url_config(fn_conf) + for fn_conf in resolved_fn.function_url_configs.values() + ] + url_configs = PaginatedList(url_configs) + page, token = url_configs.get_page( + lambda url_config: url_config["FunctionArn"], + marker, + max_items, + ) + url_configs = page + return ListFunctionUrlConfigsResponse(FunctionUrlConfigs=url_configs, NextMarker=token) + + # ======================================= + # ============ Permissions ============ + # ======================================= + + @handler("AddPermission", expand=False) + def add_permission( + self, + context: RequestContext, + request: AddPermissionRequest, + ) -> AddPermissionResponse: + function_name, qualifier = api_utils.get_name_and_qualifier( + request.get("FunctionName"), request.get("Qualifier"), context + ) + + # validate qualifier + if qualifier is not None: + self._validate_qualifier_expression(qualifier) + if qualifier == "$LATEST": + raise InvalidParameterValueException( + "We currently do not support adding policies for $LATEST.", Type="User" + ) + account_id, region = api_utils.get_account_and_region(request.get("FunctionName"), context) + + resolved_fn = self._get_function(function_name, account_id, region) + resolved_qualifier, fn_arn = self._resolve_fn_qualifier(resolved_fn, qualifier) + + revision_id = request.get("RevisionId") + if revision_id: + fn_revision_id = self._function_revision_id(resolved_fn, resolved_qualifier) + if revision_id != fn_revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + + request_sid = request["StatementId"] + if not bool(STATEMENT_ID_REGEX.match(request_sid)): + raise ValidationException( + f"1 validation error detected: Value '{request_sid}' at 'statementId' failed to satisfy constraint: Member must satisfy regular expression pattern: ([a-zA-Z0-9-_]+)" + ) + # check for an already existing policy and any conflicts in existing statements + existing_policy = resolved_fn.permissions.get(resolved_qualifier) + if existing_policy: + if request_sid in [s["Sid"] for s in existing_policy.policy.Statement]: + # uniqueness scope: statement id needs to be unique per qualified function ($LATEST, version, or alias) + # Counterexample: the same sid can exist within $LATEST, version, and alias + raise ResourceConflictException( + f"The statement id ({request_sid}) provided already exists. Please provide a new statement id, or remove the existing statement.", + Type="User", + ) + + permission_statement = api_utils.build_statement( + partition=context.partition, + resource_arn=fn_arn, + statement_id=request["StatementId"], + action=request["Action"], + principal=request["Principal"], + source_arn=request.get("SourceArn"), + source_account=request.get("SourceAccount"), + principal_org_id=request.get("PrincipalOrgID"), + event_source_token=request.get("EventSourceToken"), + auth_type=request.get("FunctionUrlAuthType"), + ) + new_policy = existing_policy + if not existing_policy: + new_policy = FunctionResourcePolicy( + policy=ResourcePolicy(Version="2012-10-17", Id="default", Statement=[]) + ) + new_policy.policy.Statement.append(permission_statement) + if not existing_policy: + resolved_fn.permissions[resolved_qualifier] = new_policy + + # Update revision id of alias or version + # TODO: re-evaluate data model to prevent this dirty hack just for bumping the revision id + # TODO: does that need a `with function.lock` for atomic updates of the policy + revision_id? + if api_utils.qualifier_is_alias(resolved_qualifier): + resolved_alias = resolved_fn.aliases[resolved_qualifier] + resolved_fn.aliases[resolved_qualifier] = dataclasses.replace(resolved_alias) + # Assumes that a non-alias is a version + else: + resolved_version = resolved_fn.versions[resolved_qualifier] + resolved_fn.versions[resolved_qualifier] = dataclasses.replace( + resolved_version, config=dataclasses.replace(resolved_version.config) + ) + return AddPermissionResponse(Statement=json.dumps(permission_statement)) + + def remove_permission( + self, + context: RequestContext, + function_name: FunctionName, + statement_id: NamespacedStatementId, + qualifier: Qualifier = None, + revision_id: String = None, + **kwargs, + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + if qualifier is not None: + self._validate_qualifier_expression(qualifier) + + state = lambda_stores[account_id][region] + resolved_fn = state.functions.get(function_name) + if resolved_fn is None: + fn_arn = api_utils.unqualified_lambda_arn(function_name, account_id, region) + raise ResourceNotFoundException(f"No policy found for: {fn_arn}", Type="User") + + resolved_qualifier, _ = self._resolve_fn_qualifier(resolved_fn, qualifier) + function_permission = resolved_fn.permissions.get(resolved_qualifier) + if not function_permission: + raise ResourceNotFoundException( + "No policy is associated with the given resource.", Type="User" + ) + + # try to find statement in policy and delete it + statement = None + for s in function_permission.policy.Statement: + if s["Sid"] == statement_id: + statement = s + break + + if not statement: + raise ResourceNotFoundException( + f"Statement {statement_id} is not found in resource policy.", Type="User" + ) + fn_revision_id = self._function_revision_id(resolved_fn, resolved_qualifier) + if revision_id and revision_id != fn_revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetFunction/GetAlias API to retrieve the latest Revision Id", + Type="User", + ) + function_permission.policy.Statement.remove(statement) + + # Update revision id for alias or version + # TODO: re-evaluate data model to prevent this dirty hack just for bumping the revision id + # TODO: does that need a `with function.lock` for atomic updates of the policy + revision_id? + if api_utils.qualifier_is_alias(resolved_qualifier): + resolved_alias = resolved_fn.aliases[resolved_qualifier] + resolved_fn.aliases[resolved_qualifier] = dataclasses.replace(resolved_alias) + # Assumes that a non-alias is a version + else: + resolved_version = resolved_fn.versions[resolved_qualifier] + resolved_fn.versions[resolved_qualifier] = dataclasses.replace( + resolved_version, config=dataclasses.replace(resolved_version.config) + ) + + # remove the policy as a whole when there's no statement left in it + if len(function_permission.policy.Statement) == 0: + del resolved_fn.permissions[resolved_qualifier] + + def get_policy( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> GetPolicyResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + if qualifier is not None: + self._validate_qualifier_expression(qualifier) + + resolved_fn = self._get_function(function_name, account_id, region) + + resolved_qualifier = qualifier or "$LATEST" + function_permission = resolved_fn.permissions.get(resolved_qualifier) + if not function_permission: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + fn_revision_id = None + if api_utils.qualifier_is_alias(resolved_qualifier): + resolved_alias = resolved_fn.aliases[resolved_qualifier] + fn_revision_id = resolved_alias.revision_id + # Assumes that a non-alias is a version + else: + resolved_version = resolved_fn.versions[resolved_qualifier] + fn_revision_id = resolved_version.config.revision_id + + return GetPolicyResponse( + Policy=json.dumps(dataclasses.asdict(function_permission.policy)), + RevisionId=fn_revision_id, + ) + + # ======================================= + # ======== Code signing config ======== + # ======================================= + + def create_code_signing_config( + self, + context: RequestContext, + allowed_publishers: AllowedPublishers, + description: Description = None, + code_signing_policies: CodeSigningPolicies = None, + tags: Tags = None, + **kwargs, + ) -> CreateCodeSigningConfigResponse: + account = context.account_id + region = context.region + + state = lambda_stores[account][region] + # TODO: can there be duplicates? + csc_id = f"csc-{get_random_hex(17)}" # e.g. 'csc-077c33b4c19e26036' + csc_arn = f"arn:{context.partition}:lambda:{region}:{account}:code-signing-config:{csc_id}" + csc = CodeSigningConfig( + csc_id=csc_id, + arn=csc_arn, + allowed_publishers=allowed_publishers, + policies=code_signing_policies, + last_modified=api_utils.generate_lambda_date(), + description=description, + ) + state.code_signing_configs[csc_arn] = csc + return CreateCodeSigningConfigResponse(CodeSigningConfig=api_utils.map_csc(csc)) + + def put_function_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + function_name: FunctionName, + **kwargs, + ) -> PutFunctionCodeSigningConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name = api_utils.get_function_name(function_name, context) + + csc = state.code_signing_configs.get(code_signing_config_arn) + if not csc: + raise CodeSigningConfigNotFoundException( + f"The code signing configuration cannot be found. Check that the provided configuration is not deleted: {code_signing_config_arn}.", + Type="User", + ) + + fn = state.functions.get(function_name) + fn_arn = api_utils.unqualified_lambda_arn(function_name, account_id, region) + if not fn: + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + + fn.code_signing_config_arn = code_signing_config_arn + return PutFunctionCodeSigningConfigResponse( + CodeSigningConfigArn=code_signing_config_arn, FunctionName=function_name + ) + + def update_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + description: Description = None, + allowed_publishers: AllowedPublishers = None, + code_signing_policies: CodeSigningPolicies = None, + **kwargs, + ) -> UpdateCodeSigningConfigResponse: + state = lambda_stores[context.account_id][context.region] + csc = state.code_signing_configs.get(code_signing_config_arn) + if not csc: + raise ResourceNotFoundException( + f"The Lambda code signing configuration {code_signing_config_arn} can not be found." + ) + + changes = { + **( + {"allowed_publishers": allowed_publishers} if allowed_publishers is not None else {} + ), + **({"policies": code_signing_policies} if code_signing_policies is not None else {}), + **({"description": description} if description is not None else {}), + } + new_csc = dataclasses.replace( + csc, last_modified=api_utils.generate_lambda_date(), **changes + ) + state.code_signing_configs[code_signing_config_arn] = new_csc + + return UpdateCodeSigningConfigResponse(CodeSigningConfig=api_utils.map_csc(new_csc)) + + def get_code_signing_config( + self, context: RequestContext, code_signing_config_arn: CodeSigningConfigArn, **kwargs + ) -> GetCodeSigningConfigResponse: + state = lambda_stores[context.account_id][context.region] + csc = state.code_signing_configs.get(code_signing_config_arn) + if not csc: + raise ResourceNotFoundException( + f"The Lambda code signing configuration {code_signing_config_arn} can not be found." + ) + + return GetCodeSigningConfigResponse(CodeSigningConfig=api_utils.map_csc(csc)) + + def get_function_code_signing_config( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> GetFunctionCodeSigningConfigResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name = api_utils.get_function_name(function_name, context) + fn = state.functions.get(function_name) + fn_arn = api_utils.unqualified_lambda_arn(function_name, account_id, region) + if not fn: + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + + if fn.code_signing_config_arn: + return GetFunctionCodeSigningConfigResponse( + CodeSigningConfigArn=fn.code_signing_config_arn, FunctionName=function_name + ) + + return GetFunctionCodeSigningConfigResponse() + + def delete_function_code_signing_config( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name = api_utils.get_function_name(function_name, context) + fn = state.functions.get(function_name) + fn_arn = api_utils.unqualified_lambda_arn(function_name, account_id, region) + if not fn: + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + + fn.code_signing_config_arn = None + + def delete_code_signing_config( + self, context: RequestContext, code_signing_config_arn: CodeSigningConfigArn, **kwargs + ) -> DeleteCodeSigningConfigResponse: + state = lambda_stores[context.account_id][context.region] + + csc = state.code_signing_configs.get(code_signing_config_arn) + if not csc: + raise ResourceNotFoundException( + f"The Lambda code signing configuration {code_signing_config_arn} can not be found." + ) + + del state.code_signing_configs[code_signing_config_arn] + + return DeleteCodeSigningConfigResponse() + + def list_code_signing_configs( + self, + context: RequestContext, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListCodeSigningConfigsResponse: + state = lambda_stores[context.account_id][context.region] + + cscs = [api_utils.map_csc(csc) for csc in state.code_signing_configs.values()] + cscs = PaginatedList(cscs) + page, token = cscs.get_page( + lambda csc: csc["CodeSigningConfigId"], + marker, + max_items, + ) + return ListCodeSigningConfigsResponse(CodeSigningConfigs=page, NextMarker=token) + + def list_functions_by_code_signing_config( + self, + context: RequestContext, + code_signing_config_arn: CodeSigningConfigArn, + marker: String = None, + max_items: MaxListItems = None, + **kwargs, + ) -> ListFunctionsByCodeSigningConfigResponse: + account = context.account_id + region = context.region + + state = lambda_stores[account][region] + + if code_signing_config_arn not in state.code_signing_configs: + raise ResourceNotFoundException( + f"The Lambda code signing configuration {code_signing_config_arn} can not be found." + ) + + fn_arns = [ + api_utils.unqualified_lambda_arn(fn.function_name, account, region) + for fn in state.functions.values() + if fn.code_signing_config_arn == code_signing_config_arn + ] + + cscs = PaginatedList(fn_arns) + page, token = cscs.get_page( + lambda x: x, + marker, + max_items, + ) + return ListFunctionsByCodeSigningConfigResponse(FunctionArns=page, NextMarker=token) + + # ======================================= + # ========= Account Settings ========= + # ======================================= + + # CAVE: these settings & usages are *per* region! + # Lambda quotas: https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html + def get_account_settings(self, context: RequestContext, **kwargs) -> GetAccountSettingsResponse: + state = lambda_stores[context.account_id][context.region] + + fn_count = 0 + code_size_sum = 0 + reserved_concurrency_sum = 0 + for fn in state.functions.values(): + fn_count += 1 + for fn_version in fn.versions.values(): + # Image-based Lambdas do not have a code attribute and count against the ECR quotas instead + if fn_version.config.package_type == PackageType.Zip: + code_size_sum += fn_version.config.code.code_size + if fn.reserved_concurrent_executions is not None: + reserved_concurrency_sum += fn.reserved_concurrent_executions + for c in fn.provisioned_concurrency_configs.values(): + reserved_concurrency_sum += c.provisioned_concurrent_executions + for layer in state.layers.values(): + for layer_version in layer.layer_versions.values(): + code_size_sum += layer_version.code.code_size + return GetAccountSettingsResponse( + AccountLimit=AccountLimit( + TotalCodeSize=config.LAMBDA_LIMITS_TOTAL_CODE_SIZE, + CodeSizeZipped=config.LAMBDA_LIMITS_CODE_SIZE_ZIPPED, + CodeSizeUnzipped=config.LAMBDA_LIMITS_CODE_SIZE_UNZIPPED, + ConcurrentExecutions=config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS, + UnreservedConcurrentExecutions=config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS + - reserved_concurrency_sum, + ), + AccountUsage=AccountUsage( + TotalCodeSize=code_size_sum, + FunctionCount=fn_count, + ), + ) + + # ======================================= + # == Provisioned Concurrency Config == + # ======================================= + + def _get_provisioned_config( + self, context: RequestContext, function_name: str, qualifier: str + ) -> ProvisionedConcurrencyConfiguration | None: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name = api_utils.get_function_name(function_name, context) + fn = state.functions.get(function_name) + if api_utils.qualifier_is_alias(qualifier): + fn_alias = None + if fn: + fn_alias = fn.aliases.get(qualifier) + if fn_alias is None: + raise ResourceNotFoundException( + f"Cannot find alias arn: {api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region)}", + Type="User", + ) + elif api_utils.qualifier_is_version(qualifier): + fn_version = None + if fn: + fn_version = fn.versions.get(qualifier) + if fn_version is None: + raise ResourceNotFoundException( + f"Function not found: {api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region)}", + Type="User", + ) + + return fn.provisioned_concurrency_configs.get(qualifier) + + def put_provisioned_concurrency_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier, + provisioned_concurrent_executions: PositiveInteger, + **kwargs, + ) -> PutProvisionedConcurrencyConfigResponse: + if provisioned_concurrent_executions <= 0: + raise ValidationException( + f"1 validation error detected: Value '{provisioned_concurrent_executions}' at 'provisionedConcurrentExecutions' failed to satisfy constraint: Member must have value greater than or equal to 1" + ) + + if qualifier == "$LATEST": + raise InvalidParameterValueException( + "Provisioned Concurrency Configs cannot be applied to unpublished function versions.", + Type="User", + ) + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + state = lambda_stores[account_id][region] + fn = state.functions.get(function_name) + + provisioned_config = self._get_provisioned_config(context, function_name, qualifier) + + if provisioned_config: # TODO: merge? + # TODO: add a test for partial updates (if possible) + LOG.warning( + "Partial update of provisioned concurrency config is currently not supported." + ) + + other_provisioned_sum = sum( + [ + provisioned_configs.provisioned_concurrent_executions + for provisioned_qualifier, provisioned_configs in fn.provisioned_concurrency_configs.items() + if provisioned_qualifier != qualifier + ] + ) + + if ( + fn.reserved_concurrent_executions is not None + and fn.reserved_concurrent_executions + < other_provisioned_sum + provisioned_concurrent_executions + ): + raise InvalidParameterValueException( + "Requested Provisioned Concurrency should not be greater than the reservedConcurrentExecution for function", + Type="User", + ) + + if provisioned_concurrent_executions > config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS: + raise InvalidParameterValueException( + f"Specified ConcurrentExecutions for function is greater than account's unreserved concurrency" + f" [{config.LAMBDA_LIMITS_CONCURRENT_EXECUTIONS}]." + ) + + settings = self.get_account_settings(context) + unreserved_concurrent_executions = settings["AccountLimit"][ + "UnreservedConcurrentExecutions" + ] + if ( + unreserved_concurrent_executions - provisioned_concurrent_executions + < config.LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY + ): + raise InvalidParameterValueException( + f"Specified ConcurrentExecutions for function decreases account's UnreservedConcurrentExecution below" + f" its minimum value of [{config.LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY}]." + ) + + provisioned_config = ProvisionedConcurrencyConfiguration( + provisioned_concurrent_executions, api_utils.generate_lambda_date() + ) + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + + if api_utils.qualifier_is_alias(qualifier): + alias = fn.aliases.get(qualifier) + resolved_version = fn.versions.get(alias.function_version) + + if ( + resolved_version + and fn.provisioned_concurrency_configs.get(alias.function_version) is not None + ): + raise ResourceConflictException( + "Alias can't be used for Provisioned Concurrency configuration on an already Provisioned version", + Type="User", + ) + fn_arn = resolved_version.id.qualified_arn() + elif api_utils.qualifier_is_version(qualifier): + fn_version = fn.versions.get(qualifier) + + # TODO: might be useful other places, utilize + pointing_aliases = [] + for alias in fn.aliases.values(): + if ( + alias.function_version == qualifier + and fn.provisioned_concurrency_configs.get(alias.name) is not None + ): + pointing_aliases.append(alias.name) + if pointing_aliases: + raise ResourceConflictException( + "Version is pointed by a Provisioned Concurrency alias", Type="User" + ) + + fn_arn = fn_version.id.qualified_arn() + + manager = self.lambda_service.get_lambda_version_manager(fn_arn) + + fn.provisioned_concurrency_configs[qualifier] = provisioned_config + + manager.update_provisioned_concurrency_config( + provisioned_config.provisioned_concurrent_executions + ) + + return PutProvisionedConcurrencyConfigResponse( + RequestedProvisionedConcurrentExecutions=provisioned_config.provisioned_concurrent_executions, + AvailableProvisionedConcurrentExecutions=0, + AllocatedProvisionedConcurrentExecutions=0, + Status=ProvisionedConcurrencyStatusEnum.IN_PROGRESS, + # StatusReason=manager.provisioned_state.status_reason, + LastModified=provisioned_config.last_modified, # TODO: does change with configuration or also with state changes? + ) + + def get_provisioned_concurrency_config( + self, context: RequestContext, function_name: FunctionName, qualifier: Qualifier, **kwargs + ) -> GetProvisionedConcurrencyConfigResponse: + if qualifier == "$LATEST": + raise InvalidParameterValueException( + "The function resource provided must be an alias or a published version.", + Type="User", + ) + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + provisioned_config = self._get_provisioned_config(context, function_name, qualifier) + if not provisioned_config: + raise ProvisionedConcurrencyConfigNotFoundException( + "No Provisioned Concurrency Config found for this function", Type="User" + ) + + # TODO: make this compatible with alias pointer migration on update + if api_utils.qualifier_is_alias(qualifier): + state = lambda_stores[account_id][region] + fn = state.functions.get(function_name) + alias = fn.aliases.get(qualifier) + fn_arn = api_utils.qualified_lambda_arn( + function_name, alias.function_version, account_id, region + ) + else: + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + + ver_manager = self.lambda_service.get_lambda_version_manager(fn_arn) + + return GetProvisionedConcurrencyConfigResponse( + RequestedProvisionedConcurrentExecutions=provisioned_config.provisioned_concurrent_executions, + LastModified=provisioned_config.last_modified, + AvailableProvisionedConcurrentExecutions=ver_manager.provisioned_state.available, + AllocatedProvisionedConcurrentExecutions=ver_manager.provisioned_state.allocated, + Status=ver_manager.provisioned_state.status, + StatusReason=ver_manager.provisioned_state.status_reason, + ) + + def list_provisioned_concurrency_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String = None, + max_items: MaxProvisionedConcurrencyConfigListItems = None, + **kwargs, + ) -> ListProvisionedConcurrencyConfigsResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + + function_name = api_utils.get_function_name(function_name, context) + fn = state.functions.get(function_name) + if fn is None: + raise ResourceNotFoundException( + f"Function not found: {api_utils.unqualified_lambda_arn(function_name, account_id, region)}", + Type="User", + ) + + configs = [] + for qualifier, pc_config in fn.provisioned_concurrency_configs.items(): + if api_utils.qualifier_is_alias(qualifier): + alias = fn.aliases.get(qualifier) + fn_arn = api_utils.qualified_lambda_arn( + function_name, alias.function_version, account_id, region + ) + else: + fn_arn = api_utils.qualified_lambda_arn( + function_name, qualifier, account_id, region + ) + + manager = self.lambda_service.get_lambda_version_manager(fn_arn) + + configs.append( + ProvisionedConcurrencyConfigListItem( + FunctionArn=api_utils.qualified_lambda_arn( + function_name, qualifier, account_id, region + ), + RequestedProvisionedConcurrentExecutions=pc_config.provisioned_concurrent_executions, + AvailableProvisionedConcurrentExecutions=manager.provisioned_state.available, + AllocatedProvisionedConcurrentExecutions=manager.provisioned_state.allocated, + Status=manager.provisioned_state.status, + StatusReason=manager.provisioned_state.status_reason, + LastModified=pc_config.last_modified, + ) + ) + + provisioned_concurrency_configs = configs + provisioned_concurrency_configs = PaginatedList(provisioned_concurrency_configs) + page, token = provisioned_concurrency_configs.get_page( + lambda x: x, + marker, + max_items, + ) + return ListProvisionedConcurrencyConfigsResponse( + ProvisionedConcurrencyConfigs=page, NextMarker=token + ) + + def delete_provisioned_concurrency_config( + self, context: RequestContext, function_name: FunctionName, qualifier: Qualifier, **kwargs + ) -> None: + if qualifier == "$LATEST": + raise InvalidParameterValueException( + "The function resource provided must be an alias or a published version.", + Type="User", + ) + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + state = lambda_stores[account_id][region] + fn = state.functions.get(function_name) + + provisioned_config = self._get_provisioned_config(context, function_name, qualifier) + # delete is idempotent and doesn't actually care about the provisioned concurrency config not existing + if provisioned_config: + fn.provisioned_concurrency_configs.pop(qualifier) + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + manager = self.lambda_service.get_lambda_version_manager(fn_arn) + manager.update_provisioned_concurrency_config(0) + + # ======================================= + # ======= Event Invoke Config ======== + # ======================================= + + # "1 validation error detected: Value 'arn:aws:_-/!lambda::111111111111:function:' at 'destinationConfig.onFailure.destination' failed to satisfy constraint: Member must satisfy regular expression pattern: ^$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" + # "1 validation error detected: Value 'arn:aws:_-/!lambda::111111111111:function:' at 'destinationConfig.onFailure.destination' failed to satisfy constraint: Member must satisfy regular expression pattern: ^$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]2((-gov)|(-iso(b?)))?-[a-z]+-\\d1)?:(\\d12)?:(.*)" ... (expected → actual) + + def _validate_destination_config( + self, store: LambdaStore, function_name: str, destination_config: DestinationConfig + ): + def _validate_destination_arn(destination_arn) -> bool: + if not api_utils.DESTINATION_ARN_PATTERN.match(destination_arn): + # technically we shouldn't handle this in the provider + raise ValidationException( + "1 validation error detected: Value '" + + destination_arn + + r"' at 'destinationConfig.onFailure.destination' failed to satisfy constraint: Member must satisfy regular expression pattern: ^$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-])+:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1})?:(\d{12})?:(.*)" + ) + + match destination_arn.split(":")[2]: + case "lambda": + fn_parts = api_utils.FULL_FN_ARN_PATTERN.search(destination_arn).groupdict() + if fn_parts: + # check if it exists + fn = store.functions.get(fn_parts["function_name"]) + if not fn: + raise InvalidParameterValueException( + f"The destination ARN {destination_arn} is invalid.", Type="User" + ) + if fn_parts["function_name"] == function_name: + raise InvalidParameterValueException( + "You can't specify the function as a destination for itself.", + Type="User", + ) + case "sns" | "sqs" | "events": + pass + case _: + return False + return True + + validation_err = False + + failure_destination = destination_config.get("OnFailure", {}).get("Destination") + if failure_destination: + validation_err = validation_err or not _validate_destination_arn(failure_destination) + + success_destination = destination_config.get("OnSuccess", {}).get("Destination") + if success_destination: + validation_err = validation_err or not _validate_destination_arn(success_destination) + + if validation_err: + on_success_part = ( + f"OnSuccess(destination={success_destination})" if success_destination else "null" + ) + on_failure_part = ( + f"OnFailure(destination={failure_destination})" if failure_destination else "null" + ) + raise InvalidParameterValueException( + f"The provided destination config DestinationConfig(onSuccess={on_success_part}, onFailure={on_failure_part}) is invalid.", + Type="User", + ) + + def put_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier = None, + maximum_retry_attempts: MaximumRetryAttempts = None, + maximum_event_age_in_seconds: MaximumEventAgeInSeconds = None, + destination_config: DestinationConfig = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + """ + Destination ARNs can be: + * SQS arn + * SNS arn + * Lambda arn + * EventBridge arn + + Differences between put_ and update_: + * put overwrites any existing config + * update allows changes only single values while keeping the rest of existing ones + * update fails on non-existing configs + + Differences between destination and DLQ + * "However, a dead-letter queue is part of a function's version-specific configuration, so it is locked in when you publish a version." + * "On-failure destinations also support additional targets and include details about the function's response in the invocation record." + + """ + if ( + maximum_event_age_in_seconds is None + and maximum_retry_attempts is None + and destination_config is None + ): + raise InvalidParameterValueException( + "You must specify at least one of error handling or destination setting.", + Type="User", + ) + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + fn = state.functions.get(function_name) + if not fn or (qualifier and not (qualifier in fn.aliases or qualifier in fn.versions)): + raise ResourceNotFoundException("The function doesn't exist.", Type="User") + + qualifier = qualifier or "$LATEST" + + # validate and normalize destination config + if destination_config: + self._validate_destination_config(state, function_name, destination_config) + + destination_config = DestinationConfig( + OnSuccess=OnSuccess( + Destination=(destination_config or {}).get("OnSuccess", {}).get("Destination") + ), + OnFailure=OnFailure( + Destination=(destination_config or {}).get("OnFailure", {}).get("Destination") + ), + ) + + config = EventInvokeConfig( + function_name=function_name, + qualifier=qualifier, + maximum_event_age_in_seconds=maximum_event_age_in_seconds, + maximum_retry_attempts=maximum_retry_attempts, + last_modified=api_utils.generate_lambda_date(), + destination_config=destination_config, + ) + fn.event_invoke_configs[qualifier] = config + + return FunctionEventInvokeConfig( + LastModified=datetime.datetime.strptime( + config.last_modified, api_utils.LAMBDA_DATE_FORMAT + ), + FunctionArn=api_utils.qualified_lambda_arn( + function_name, qualifier or "$LATEST", account_id, region + ), + DestinationConfig=destination_config, + MaximumEventAgeInSeconds=maximum_event_age_in_seconds, + MaximumRetryAttempts=maximum_retry_attempts, + ) + + def get_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + qualifier = qualifier or "$LATEST" + fn = state.functions.get(function_name) + if not fn: + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + raise ResourceNotFoundException( + f"The function {fn_arn} doesn't have an EventInvokeConfig", Type="User" + ) + + config = fn.event_invoke_configs.get(qualifier) + if not config: + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + raise ResourceNotFoundException( + f"The function {fn_arn} doesn't have an EventInvokeConfig", Type="User" + ) + + return FunctionEventInvokeConfig( + LastModified=datetime.datetime.strptime( + config.last_modified, api_utils.LAMBDA_DATE_FORMAT + ), + FunctionArn=api_utils.qualified_lambda_arn( + function_name, qualifier, account_id, region + ), + DestinationConfig=config.destination_config, + MaximumEventAgeInSeconds=config.maximum_event_age_in_seconds, + MaximumRetryAttempts=config.maximum_retry_attempts, + ) + + def list_function_event_invoke_configs( + self, + context: RequestContext, + function_name: FunctionName, + marker: String = None, + max_items: MaxFunctionEventInvokeConfigListItems = None, + **kwargs, + ) -> ListFunctionEventInvokeConfigsResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + fn = state.functions.get(function_name) + if not fn: + raise ResourceNotFoundException("The function doesn't exist.", Type="User") + + event_invoke_configs = [ + FunctionEventInvokeConfig( + LastModified=c.last_modified, + FunctionArn=api_utils.qualified_lambda_arn( + function_name, c.qualifier, account_id, region + ), + MaximumEventAgeInSeconds=c.maximum_event_age_in_seconds, + MaximumRetryAttempts=c.maximum_retry_attempts, + DestinationConfig=c.destination_config, + ) + for c in fn.event_invoke_configs.values() + ] + + event_invoke_configs = PaginatedList(event_invoke_configs) + page, token = event_invoke_configs.get_page( + lambda x: x["FunctionArn"], + marker, + max_items, + ) + return ListFunctionEventInvokeConfigsResponse( + FunctionEventInvokeConfigs=page, NextMarker=token + ) + + def delete_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier = None, + **kwargs, + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + state = lambda_stores[account_id][region] + fn = state.functions.get(function_name) + resolved_qualifier = qualifier or "$LATEST" + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + if not fn: + raise ResourceNotFoundException( + f"The function {fn_arn} doesn't have an EventInvokeConfig", Type="User" + ) + + config = fn.event_invoke_configs.get(resolved_qualifier) + if not config: + raise ResourceNotFoundException( + f"The function {fn_arn} doesn't have an EventInvokeConfig", Type="User" + ) + + del fn.event_invoke_configs[resolved_qualifier] + + def update_function_event_invoke_config( + self, + context: RequestContext, + function_name: FunctionName, + qualifier: Qualifier = None, + maximum_retry_attempts: MaximumRetryAttempts = None, + maximum_event_age_in_seconds: MaximumEventAgeInSeconds = None, + destination_config: DestinationConfig = None, + **kwargs, + ) -> FunctionEventInvokeConfig: + # like put but only update single fields via replace + account_id, region = api_utils.get_account_and_region(function_name, context) + state = lambda_stores[account_id][region] + function_name, qualifier = api_utils.get_name_and_qualifier( + function_name, qualifier, context + ) + + if ( + maximum_event_age_in_seconds is None + and maximum_retry_attempts is None + and destination_config is None + ): + raise InvalidParameterValueException( + "You must specify at least one of error handling or destination setting.", + Type="User", + ) + + fn = state.functions.get(function_name) + if not fn or (qualifier and not (qualifier in fn.aliases or qualifier in fn.versions)): + raise ResourceNotFoundException("The function doesn't exist.", Type="User") + + qualifier = qualifier or "$LATEST" + + config = fn.event_invoke_configs.get(qualifier) + if not config: + fn_arn = api_utils.qualified_lambda_arn(function_name, qualifier, account_id, region) + raise ResourceNotFoundException( + f"The function {fn_arn} doesn't have an EventInvokeConfig", Type="User" + ) + + if destination_config: + self._validate_destination_config(state, function_name, destination_config) + + optional_kwargs = { + k: v + for k, v in { + "destination_config": destination_config, + "maximum_retry_attempts": maximum_retry_attempts, + "maximum_event_age_in_seconds": maximum_event_age_in_seconds, + }.items() + if v is not None + } + + new_config = dataclasses.replace( + config, last_modified=api_utils.generate_lambda_date(), **optional_kwargs + ) + fn.event_invoke_configs[qualifier] = new_config + + return FunctionEventInvokeConfig( + LastModified=datetime.datetime.strptime( + new_config.last_modified, api_utils.LAMBDA_DATE_FORMAT + ), + FunctionArn=api_utils.qualified_lambda_arn( + function_name, qualifier or "$LATEST", account_id, region + ), + DestinationConfig=new_config.destination_config, + MaximumEventAgeInSeconds=new_config.maximum_event_age_in_seconds, + MaximumRetryAttempts=new_config.maximum_retry_attempts, + ) + + # ======================================= + # ====== Layer & Layer Versions ======= + # ======================================= + + @staticmethod + def _resolve_layer( + layer_name_or_arn: str, context: RequestContext + ) -> Tuple[str, str, str, Optional[str]]: + """ + Return locator attributes for a given Lambda layer. + + :param layer_name_or_arn: Layer name or ARN + :param context: Request context + :return: Tuple of region, account ID, layer name, layer version + """ + if api_utils.is_layer_arn(layer_name_or_arn): + return api_utils.parse_layer_arn(layer_name_or_arn) + + return context.region, context.account_id, layer_name_or_arn, None + + def publish_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + content: LayerVersionContentInput, + description: Description = None, + compatible_runtimes: CompatibleRuntimes = None, + license_info: LicenseInfo = None, + compatible_architectures: CompatibleArchitectures = None, + **kwargs, + ) -> PublishLayerVersionResponse: + """ + On first use of a LayerName a new layer is created and for each subsequent call with the same LayerName a new version is created. + Note that there are no $LATEST versions with layers! + + """ + account = context.account_id + region = context.region + + validation_errors = api_utils.validate_layer_runtimes_and_architectures( + compatible_runtimes, compatible_architectures + ) + if validation_errors: + raise ValidationException( + f"{len(validation_errors)} validation error{'s' if len(validation_errors) > 1 else ''} detected: {'; '.join(validation_errors)}" + ) + + state = lambda_stores[account][region] + with self.create_layer_lock: + if layer_name not in state.layers: + # we don't have a version so create new layer object + # lock is required to avoid creating two v1 objects for the same name + layer = Layer( + arn=api_utils.layer_arn(layer_name=layer_name, account=account, region=region) + ) + state.layers[layer_name] = layer + + layer = state.layers[layer_name] + with layer.next_version_lock: + next_version = LambdaLayerVersionIdentifier( + account_id=account, region=region, layer_name=layer_name + ).generate(next_version=layer.next_version) + # When creating a layer with user defined layer version, it is possible that we + # create layer versions out of order. + # ie. a user could replicate layer v2 then layer v1. It is important to always keep the maximum possible + # value for next layer to avoid overwriting existing versions + if layer.next_version <= next_version: + # We don't need to update layer.next_version if the created version is lower than the "next in line" + layer.next_version = max(next_version, layer.next_version) + 1 + + # creating a new layer + if content.get("ZipFile"): + code = store_lambda_archive( + archive_file=content["ZipFile"], + function_name=layer_name, + region_name=region, + account_id=account, + ) + else: + code = store_s3_bucket_archive( + archive_bucket=content["S3Bucket"], + archive_key=content["S3Key"], + archive_version=content.get("S3ObjectVersion"), + function_name=layer_name, + region_name=region, + account_id=account, + ) + + new_layer_version = LayerVersion( + layer_version_arn=api_utils.layer_version_arn( + layer_name=layer_name, + account=account, + region=region, + version=str(next_version), + ), + layer_arn=layer.arn, + version=next_version, + description=description or "", + license_info=license_info, + compatible_runtimes=compatible_runtimes, + compatible_architectures=compatible_architectures, + created=api_utils.generate_lambda_date(), + code=code, + ) + + layer.layer_versions[str(next_version)] = new_layer_version + + return api_utils.map_layer_out(new_layer_version) + + def get_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> GetLayerVersionResponse: + # TODO: handle layer_name as an ARN + + region_name, account_id, layer_name, _ = LambdaProvider._resolve_layer(layer_name, context) + state = lambda_stores[account_id][region_name] + + layer = state.layers.get(layer_name) + if version_number < 1: + raise InvalidParameterValueException("Layer Version Cannot be less than 1", Type="User") + if layer is None: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + layer_version = layer.layer_versions.get(str(version_number)) + if layer_version is None: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + return api_utils.map_layer_out(layer_version) + + def get_layer_version_by_arn( + self, context: RequestContext, arn: LayerVersionArn, **kwargs + ) -> GetLayerVersionResponse: + region_name, account_id, layer_name, layer_version = LambdaProvider._resolve_layer( + arn, context + ) + + if not layer_version: + raise ValidationException( + f"1 validation error detected: Value '{arn}' at 'arn' failed to satisfy constraint: Member must satisfy regular expression pattern: " + + "(arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+)|(arn:[a-zA-Z0-9-]+:lambda:::awslayer:[a-zA-Z0-9-_]+)" + ) + + store = lambda_stores[account_id][region_name] + if not (layers := store.layers.get(layer_name)): + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + layer_version = layers.layer_versions.get(layer_version) + + if not layer_version: + raise ResourceNotFoundException( + "The resource you requested does not exist.", Type="User" + ) + + return api_utils.map_layer_out(layer_version) + + def list_layers( + self, + context: RequestContext, + compatible_runtime: Runtime = None, + marker: String = None, + max_items: MaxLayerListItems = None, + compatible_architecture: Architecture = None, + **kwargs, + ) -> ListLayersResponse: + validation_errors = [] + + validation_error_arch = api_utils.validate_layer_architecture(compatible_architecture) + if validation_error_arch: + validation_errors.append(validation_error_arch) + + validation_error_runtime = api_utils.validate_layer_runtime(compatible_runtime) + if validation_error_runtime: + validation_errors.append(validation_error_runtime) + + if validation_errors: + raise ValidationException( + f"{len(validation_errors)} validation error{'s' if len(validation_errors) > 1 else ''} detected: {';'.join(validation_errors)}" + ) + # TODO: handle filter: compatible_runtime + # TODO: handle filter: compatible_architecture + + state = lambda_stores[context.account_id][context.region] + layers = state.layers + + # TODO: test how filters behave together with only returning layers here? Does it return the latest "matching" layer, i.e. does it ignore later layer versions that don't match? + + responses: list[LayersListItem] = [] + for layer_name, layer in layers.items(): + # fetch latest version + layer_versions = list(layer.layer_versions.values()) + sorted(layer_versions, key=lambda x: x.version) + latest_layer_version = layer_versions[-1] + responses.append( + LayersListItem( + LayerName=layer_name, + LayerArn=layer.arn, + LatestMatchingVersion=api_utils.map_layer_out(latest_layer_version), + ) + ) + + responses = PaginatedList(responses) + page, token = responses.get_page( + lambda version: version, + marker, + max_items, + ) + + return ListLayersResponse(NextMarker=token, Layers=page) + + def list_layer_versions( + self, + context: RequestContext, + layer_name: LayerName, + compatible_runtime: Runtime = None, + marker: String = None, + max_items: MaxLayerListItems = None, + compatible_architecture: Architecture = None, + **kwargs, + ) -> ListLayerVersionsResponse: + validation_errors = api_utils.validate_layer_runtimes_and_architectures( + [compatible_runtime] if compatible_runtime else [], + [compatible_architecture] if compatible_architecture else [], + ) + if validation_errors: + raise ValidationException( + f"{len(validation_errors)} validation error{'s' if len(validation_errors) > 1 else ''} detected: {';'.join(validation_errors)}" + ) + + region_name, account_id, layer_name, layer_version = LambdaProvider._resolve_layer( + layer_name, context + ) + state = lambda_stores[account_id][region_name] + + # TODO: Test & handle filter: compatible_runtime + # TODO: Test & handle filter: compatible_architecture + all_layer_versions = [] + layer = state.layers.get(layer_name) + if layer is not None: + for layer_version in layer.layer_versions.values(): + all_layer_versions.append(api_utils.map_layer_out(layer_version)) + + all_layer_versions.sort(key=lambda x: x["Version"], reverse=True) + all_layer_versions = PaginatedList(all_layer_versions) + page, token = all_layer_versions.get_page( + lambda version: version["LayerVersionArn"], + marker, + max_items, + ) + return ListLayerVersionsResponse(NextMarker=token, LayerVersions=page) + + def delete_layer_version( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> None: + if version_number < 1: + raise InvalidParameterValueException("Layer Version Cannot be less than 1", Type="User") + + region_name, account_id, layer_name, layer_version = LambdaProvider._resolve_layer( + layer_name, context + ) + + store = lambda_stores[account_id][region_name] + layer = store.layers.get(layer_name, {}) + if layer: + layer.layer_versions.pop(str(version_number), None) + + # ======================================= + # ===== Layer Version Permissions ===== + # ======================================= + # TODO: lock updates that change revision IDs + + def add_layer_version_permission( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + statement_id: StatementId, + action: LayerPermissionAllowedAction, + principal: LayerPermissionAllowedPrincipal, + organization_id: OrganizationId = None, + revision_id: String = None, + **kwargs, + ) -> AddLayerVersionPermissionResponse: + # `layer_name` can either be layer name or ARN. It is used to generate error messages. + # `layer_n` contains the layer name. + region_name, account_id, layer_n, _ = LambdaProvider._resolve_layer(layer_name, context) + + if action != "lambda:GetLayerVersion": + raise ValidationException( + f"1 validation error detected: Value '{action}' at 'action' failed to satisfy constraint: Member must satisfy regular expression pattern: lambda:GetLayerVersion" + ) + + store = lambda_stores[account_id][region_name] + layer = store.layers.get(layer_n) + + layer_version_arn = api_utils.layer_version_arn( + layer_name, account_id, region_name, str(version_number) + ) + + if layer is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + layer_version = layer.layer_versions.get(str(version_number)) + if layer_version is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + # do we have a policy? if not set one + if layer_version.policy is None: + layer_version.policy = LayerPolicy() + + if statement_id in layer_version.policy.statements: + raise ResourceConflictException( + f"The statement id ({statement_id}) provided already exists. Please provide a new statement id, or remove the existing statement.", + Type="User", + ) + + if revision_id and layer_version.policy.revision_id != revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetLayerPolicy API to retrieve the latest Revision Id", + Type="User", + ) + + statement = LayerPolicyStatement( + sid=statement_id, action=action, principal=principal, organization_id=organization_id + ) + + old_statements = layer_version.policy.statements + layer_version.policy = dataclasses.replace( + layer_version.policy, statements={**old_statements, statement_id: statement} + ) + + return AddLayerVersionPermissionResponse( + Statement=json.dumps( + { + "Sid": statement.sid, + "Effect": "Allow", + "Principal": statement.principal, + "Action": statement.action, + "Resource": layer_version.layer_version_arn, + } + ), + RevisionId=layer_version.policy.revision_id, + ) + + def remove_layer_version_permission( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + statement_id: StatementId, + revision_id: String = None, + **kwargs, + ) -> None: + # `layer_name` can either be layer name or ARN. It is used to generate error messages. + # `layer_n` contains the layer name. + region_name, account_id, layer_n, layer_version = LambdaProvider._resolve_layer( + layer_name, context + ) + + layer_version_arn = api_utils.layer_version_arn( + layer_name, account_id, region_name, str(version_number) + ) + + state = lambda_stores[account_id][region_name] + layer = state.layers.get(layer_n) + if layer is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + layer_version = layer.layer_versions.get(str(version_number)) + if layer_version is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + + if revision_id and layer_version.policy.revision_id != revision_id: + raise PreconditionFailedException( + "The Revision Id provided does not match the latest Revision Id. " + "Call the GetLayerPolicy API to retrieve the latest Revision Id", + Type="User", + ) + + if statement_id not in layer_version.policy.statements: + raise ResourceNotFoundException( + f"Statement {statement_id} is not found in resource policy.", Type="User" + ) + + old_statements = layer_version.policy.statements + layer_version.policy = dataclasses.replace( + layer_version.policy, + statements={k: v for k, v in old_statements.items() if k != statement_id}, + ) + + def get_layer_version_policy( + self, + context: RequestContext, + layer_name: LayerName, + version_number: LayerVersionNumber, + **kwargs, + ) -> GetLayerVersionPolicyResponse: + # `layer_name` can either be layer name or ARN. It is used to generate error messages. + # `layer_n` contains the layer name. + region_name, account_id, layer_n, _ = LambdaProvider._resolve_layer(layer_name, context) + + layer_version_arn = api_utils.layer_version_arn( + layer_name, account_id, region_name, str(version_number) + ) + + store = lambda_stores[account_id][region_name] + layer = store.layers.get(layer_n) + + if layer is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + + layer_version = layer.layer_versions.get(str(version_number)) + if layer_version is None: + raise ResourceNotFoundException( + f"Layer version {layer_version_arn} does not exist.", Type="User" + ) + + if layer_version.policy is None: + raise ResourceNotFoundException( + "No policy is associated with the given resource.", Type="User" + ) + + return GetLayerVersionPolicyResponse( + Policy=json.dumps( + { + "Version": layer_version.policy.version, + "Id": layer_version.policy.id, + "Statement": [ + { + "Sid": ps.sid, + "Effect": "Allow", + "Principal": ps.principal, + "Action": ps.action, + "Resource": layer_version.layer_version_arn, + } + for ps in layer_version.policy.statements.values() + ], + } + ), + RevisionId=layer_version.policy.revision_id, + ) + + # ======================================= + # ======= Function Concurrency ======== + # ======================================= + # (Reserved) function concurrency is scoped to the whole function + + def get_function_concurrency( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> GetFunctionConcurrencyResponse: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name = api_utils.get_function_name(function_name, context) + fn = self._get_function(function_name=function_name, region=region, account_id=account_id) + return GetFunctionConcurrencyResponse( + ReservedConcurrentExecutions=fn.reserved_concurrent_executions + ) + + def put_function_concurrency( + self, + context: RequestContext, + function_name: FunctionName, + reserved_concurrent_executions: ReservedConcurrentExecutions, + **kwargs, + ) -> Concurrency: + account_id, region = api_utils.get_account_and_region(function_name, context) + + function_name, qualifier = api_utils.get_name_and_qualifier(function_name, None, context) + if qualifier: + raise InvalidParameterValueException( + "This operation is permitted on Lambda functions only. Aliases and versions do not support this operation. Please specify either a function name or an unqualified function ARN.", + Type="User", + ) + + store = lambda_stores[account_id][region] + fn = store.functions.get(function_name) + if not fn: + fn_arn = api_utils.qualified_lambda_arn( + function_name, + qualifier="$LATEST", + account=account_id, + region=region, + ) + raise ResourceNotFoundException(f"Function not found: {fn_arn}", Type="User") + + settings = self.get_account_settings(context) + unreserved_concurrent_executions = settings["AccountLimit"][ + "UnreservedConcurrentExecutions" + ] + + # The existing reserved concurrent executions for the same function are already deduced in + # unreserved_concurrent_executions but must not count because the new one will replace the existing one. + # Joel tested this behavior manually against AWS (2023-11-28). + existing_reserved_concurrent_executions = ( + fn.reserved_concurrent_executions if fn.reserved_concurrent_executions else 0 + ) + if ( + unreserved_concurrent_executions + - reserved_concurrent_executions + + existing_reserved_concurrent_executions + ) < config.LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY: + raise InvalidParameterValueException( + f"Specified ReservedConcurrentExecutions for function decreases account's UnreservedConcurrentExecution below its minimum value of [{config.LAMBDA_LIMITS_MINIMUM_UNRESERVED_CONCURRENCY}]." + ) + + total_provisioned_concurrency = sum( + [ + provisioned_configs.provisioned_concurrent_executions + for provisioned_configs in fn.provisioned_concurrency_configs.values() + ] + ) + if total_provisioned_concurrency > reserved_concurrent_executions: + raise InvalidParameterValueException( + f" ReservedConcurrentExecutions {reserved_concurrent_executions} should not be lower than function's total provisioned concurrency [{total_provisioned_concurrency}]." + ) + + fn.reserved_concurrent_executions = reserved_concurrent_executions + + return Concurrency(ReservedConcurrentExecutions=fn.reserved_concurrent_executions) + + def delete_function_concurrency( + self, context: RequestContext, function_name: FunctionName, **kwargs + ) -> None: + account_id, region = api_utils.get_account_and_region(function_name, context) + function_name, qualifier = api_utils.get_name_and_qualifier(function_name, None, context) + store = lambda_stores[account_id][region] + fn = store.functions.get(function_name) + fn.reserved_concurrent_executions = None + + # ======================================= + # =============== TAGS =============== + # ======================================= + # only Function, Event Source Mapping, and Code Signing Config (not currently supported by LocalStack) ARNs an are available for tagging in AWS + + def _get_tags(self, resource: TaggableResource) -> dict[str, str]: + state = self.fetch_lambda_store_for_tagging(resource) + lambda_adapted_tags = { + tag["Key"]: tag["Value"] + for tag in state.TAGS.list_tags_for_resource(resource).get("Tags") + } + return lambda_adapted_tags + + def _store_tags(self, resource: TaggableResource, tags: dict[str, str]): + state = self.fetch_lambda_store_for_tagging(resource) + if len(state.TAGS.tags.get(resource, {}) | tags) > LAMBDA_TAG_LIMIT_PER_RESOURCE: + raise InvalidParameterValueException( + "Number of tags exceeds resource tag limit.", Type="User" + ) + + tag_svc_adapted_tags = [{"Key": key, "Value": value} for key, value in tags.items()] + state.TAGS.tag_resource(resource, tag_svc_adapted_tags) + + def fetch_lambda_store_for_tagging(self, resource: TaggableResource) -> LambdaStore: + """ + Takes a resource ARN for a TaggableResource (Lambda Function, Event Source Mapping, or Code Signing Config) and returns a corresponding + LambdaStore for its region and account. + + In addition, this function validates that the ARN is a valid TaggableResource type, and that the TaggableResource exists. + + Raises: + ValidationException: If the resource ARN is not a full ARN for a TaggableResource. + ResourceNotFoundException: If the specified resource does not exist. + InvalidParameterValueException: If the resource ARN is a qualified Lambda Function. + """ + + def _raise_validation_exception(): + raise ValidationException( + f"1 validation error detected: Value '{resource}' at 'resource' failed to satisfy constraint: Member must satisfy regular expression pattern: {api_utils.TAGGABLE_RESOURCE_ARN_PATTERN}" + ) + + # Check whether the ARN we have been passed is correctly formatted + parsed_resource_arn: ArnData = None + try: + parsed_resource_arn = parse_arn(resource) + except Exception: + _raise_validation_exception() + + # TODO: Should we be checking whether this is a full ARN? + region, account_id, resource_type = map( + parsed_resource_arn.get, ("region", "account", "resource") + ) + + if not all((region, account_id, resource_type)): + _raise_validation_exception() + + if not (parts := resource_type.split(":")): + _raise_validation_exception() + + resource_type, resource_identifier, *qualifier = parts + if resource_type not in {"event-source-mapping", "code-signing-config", "function"}: + _raise_validation_exception() + + if qualifier: + if resource_type == "function": + raise InvalidParameterValueException( + "Tags on function aliases and versions are not supported. Please specify a function ARN.", + Type="User", + ) + _raise_validation_exception() + + match resource_type: + case "event-source-mapping": + self._get_esm(resource_identifier, account_id, region) + case "code-signing-config": + raise NotImplementedError("Resource tagging on CSC not yet implemented.") + case "function": + self._get_function( + function_name=resource_identifier, account_id=account_id, region=region + ) + + # If no exceptions are raised, assume ARN and referenced resource is valid for tag operations + return lambda_stores[account_id][region] + + def tag_resource( + self, context: RequestContext, resource: TaggableResource, tags: Tags, **kwargs + ) -> None: + if not tags: + raise InvalidParameterValueException( + "An error occurred and the request cannot be processed.", Type="User" + ) + self._store_tags(resource, tags) + + if (resource_id := extract_resource_from_arn(resource)) and resource_id.startswith( + "function" + ): + name, _, account, region = function_locators_from_arn(resource) + function = self._get_function(name, account, region) + with function.lock: + # dirty hack for changed revision id, should reevaluate model to prevent this: + latest_version = function.versions["$LATEST"] + function.versions["$LATEST"] = dataclasses.replace( + latest_version, config=dataclasses.replace(latest_version.config) + ) + + def list_tags( + self, context: RequestContext, resource: TaggableResource, **kwargs + ) -> ListTagsResponse: + tags = self._get_tags(resource) + return ListTagsResponse(Tags=tags) + + def untag_resource( + self, context: RequestContext, resource: TaggableResource, tag_keys: TagKeyList, **kwargs + ) -> None: + if not tag_keys: + raise ValidationException( + "1 validation error detected: Value null at 'tagKeys' failed to satisfy constraint: Member must not be null" + ) # should probably be generalized a bit + + state = self.fetch_lambda_store_for_tagging(resource) + state.TAGS.untag_resource(resource, tag_keys) + + if (resource_id := extract_resource_from_arn(resource)) and resource_id.startswith( + "function" + ): + name, _, account, region = function_locators_from_arn(resource) + function = self._get_function(name, account, region) + # TODO: Potential race condition + with function.lock: + # dirty hack for changed revision id, should reevaluate model to prevent this: + latest_version = function.versions["$LATEST"] + function.versions["$LATEST"] = dataclasses.replace( + latest_version, config=dataclasses.replace(latest_version.config) + ) + + # ======================================= + # ======= LEGACY / DEPRECATED ======== + # ======================================= + + def invoke_async( + self, + context: RequestContext, + function_name: NamespacedFunctionName, + invoke_args: IO[BlobStream], + **kwargs, + ) -> InvokeAsyncResponse: + """LEGACY API endpoint. Even AWS heavily discourages its usage.""" + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/provider_utils.py b/localstack-core/localstack/services/lambda_/provider_utils.py new file mode 100644 index 0000000000000..4c0c4e7e1bc8b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/provider_utils.py @@ -0,0 +1,92 @@ +from typing import TYPE_CHECKING + +from localstack.aws.api.lambda_ import ResourceNotFoundException +from localstack.services.lambda_.api_utils import ( + function_locators_from_arn, + lambda_arn, + qualified_lambda_arn, + qualifier_is_alias, + unqualified_lambda_arn, +) +from localstack.services.lambda_.invocation.models import lambda_stores +from localstack.utils.id_generator import ExistingIds, ResourceIdentifier, Tags, localstack_id + +if TYPE_CHECKING: + from localstack.services.lambda_.invocation.lambda_models import ( + FunctionVersion, + ) + + +def get_function_version_from_arn(function_arn: str) -> "FunctionVersion": + function_name, qualifier, account_id, region = function_locators_from_arn(function_arn) + fn = lambda_stores[account_id][region].functions.get(function_name) + if fn is None: + if qualifier is None: + raise ResourceNotFoundException( + f"Function not found: {unqualified_lambda_arn(function_name, account_id, region)}", + Type="User", + ) + else: + raise ResourceNotFoundException( + f"Function not found: {qualified_lambda_arn(function_name, qualifier, account_id, region)}", + Type="User", + ) + if qualifier and qualifier_is_alias(qualifier): + if qualifier not in fn.aliases: + alias_arn = qualified_lambda_arn(function_name, qualifier, account_id, region) + raise ResourceNotFoundException(f"Function not found: {alias_arn}", Type="User") + alias_name = qualifier + qualifier = fn.aliases[alias_name].function_version + + version = get_function_version( + function_name=function_name, + qualifier=qualifier, + account_id=account_id, + region=region, + ) + return version + + +def get_function_version( + function_name: str, qualifier: str | None, account_id: str, region: str +) -> "FunctionVersion": + state = lambda_stores[account_id][region] + function = state.functions.get(function_name) + qualifier_or_latest = qualifier or "$LATEST" + version = function and function.versions.get(qualifier_or_latest) + if not function or not version: + arn = lambda_arn( + function_name=function_name, + qualifier=qualifier, + account=account_id, + region=region, + ) + raise ResourceNotFoundException( + f"Function not found: {arn}", + Type="User", + ) + # TODO what if version is missing? + return version + + +class LambdaLayerVersionIdentifier(ResourceIdentifier): + service = "lambda" + resource = "layer-version" + + def __init__(self, account_id: str, region: str, layer_name: str): + super(LambdaLayerVersionIdentifier, self).__init__(account_id, region, layer_name) + + def generate( + self, existing_ids: ExistingIds = None, tags: Tags = None, next_version: int = None + ) -> int: + return int(generate_layer_version(self, next_version=next_version)) + + +@localstack_id +def generate_layer_version( + resource_identifier: ResourceIdentifier, + existing_ids: ExistingIds = None, + tags: Tags = None, + next_version: int = 0, +): + return next_version diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/__init__.py b/localstack-core/localstack/services/lambda_/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_execution/state_map/__init__.py rename to localstack-core/localstack/services/lambda_/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.py new file mode 100644 index 0000000000000..8a23156e4ab13 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.py @@ -0,0 +1,118 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaCodeSigningConfigProperties(TypedDict): + AllowedPublishers: Optional[AllowedPublishers] + CodeSigningConfigArn: Optional[str] + CodeSigningConfigId: Optional[str] + CodeSigningPolicies: Optional[CodeSigningPolicies] + Description: Optional[str] + + +class AllowedPublishers(TypedDict): + SigningProfileVersionArns: Optional[list[str]] + + +class CodeSigningPolicies(TypedDict): + UntrustedArtifactOnDeployment: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaCodeSigningConfigProvider(ResourceProvider[LambdaCodeSigningConfigProperties]): + TYPE = "AWS::Lambda::CodeSigningConfig" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaCodeSigningConfigProperties], + ) -> ProgressEvent[LambdaCodeSigningConfigProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/CodeSigningConfigArn + + Required properties: + - AllowedPublishers + + + + Read-only properties: + - /properties/CodeSigningConfigId + - /properties/CodeSigningConfigArn + + IAM permissions required: + - lambda:CreateCodeSigningConfig + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + response = lambda_client.create_code_signing_config(**model) + model["CodeSigningConfigArn"] = response["CodeSigningConfig"]["CodeSigningConfigArn"] + model["CodeSigningConfigId"] = response["CodeSigningConfig"]["CodeSigningConfigId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaCodeSigningConfigProperties], + ) -> ProgressEvent[LambdaCodeSigningConfigProperties]: + """ + Fetch resource information + + IAM permissions required: + - lambda:GetCodeSigningConfig + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaCodeSigningConfigProperties], + ) -> ProgressEvent[LambdaCodeSigningConfigProperties]: + """ + Delete a resource + + IAM permissions required: + - lambda:DeleteCodeSigningConfig + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + lambda_client.delete_code_signing_config(CodeSigningConfigArn=model["CodeSigningConfigArn"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaCodeSigningConfigProperties], + ) -> ProgressEvent[LambdaCodeSigningConfigProperties]: + """ + Update a resource + + IAM permissions required: + - lambda:UpdateCodeSigningConfig + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.schema.json new file mode 100644 index 0000000000000..75c28a58fa3b1 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig.schema.json @@ -0,0 +1,111 @@ +{ + "typeName": "AWS::Lambda::CodeSigningConfig", + "description": "Resource Type definition for AWS::Lambda::CodeSigningConfig.", + "additionalProperties": false, + "properties": { + "Description": { + "description": "A description of the CodeSigningConfig", + "type": "string", + "minLength": 0, + "maxLength": 256 + }, + "AllowedPublishers": { + "description": "When the CodeSigningConfig is later on attached to a function, the function code will be expected to be signed by profiles from this list", + "$ref": "#/definitions/AllowedPublishers" + }, + "CodeSigningPolicies": { + "description": "Policies to control how to act if a signature is invalid", + "$ref": "#/definitions/CodeSigningPolicies" + }, + "CodeSigningConfigId": { + "description": "A unique identifier for CodeSigningConfig resource", + "type": "string", + "pattern": "csc-[a-zA-Z0-9-_\\.]{17}" + }, + "CodeSigningConfigArn": { + "description": "A unique Arn for CodeSigningConfig resource", + "type": "string", + "pattern": "arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" + } + }, + "definitions": { + "AllowedPublishers": { + "type": "object", + "description": "When the CodeSigningConfig is later on attached to a function, the function code will be expected to be signed by profiles from this list", + "additionalProperties": false, + "properties": { + "SigningProfileVersionArns": { + "type": "array", + "description": "List of Signing profile version Arns", + "minItems": 1, + "maxItems": 20, + "items": { + "type": "string", + "pattern": "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", + "minLength": 12, + "maxLength": 1024 + } + } + }, + "required": [ + "SigningProfileVersionArns" + ] + }, + "CodeSigningPolicies": { + "type": "object", + "description": "Policies to control how to act if a signature is invalid", + "additionalProperties": false, + "properties": { + "UntrustedArtifactOnDeployment": { + "type": "string", + "description": "Indicates how Lambda operations involve updating the code artifact will operate. Default to Warn if not provided", + "default": "Warn", + "enum": [ + "Warn", + "Enforce" + ] + } + }, + "required": [ + "UntrustedArtifactOnDeployment" + ] + } + }, + "required": [ + "AllowedPublishers" + ], + "readOnlyProperties": [ + "/properties/CodeSigningConfigId", + "/properties/CodeSigningConfigArn" + ], + "primaryIdentifier": [ + "/properties/CodeSigningConfigArn" + ], + "handlers": { + "create": { + "permissions": [ + "lambda:CreateCodeSigningConfig" + ] + }, + "read": { + "permissions": [ + "lambda:GetCodeSigningConfig" + ] + }, + "update": { + "permissions": [ + "lambda:UpdateCodeSigningConfig" + ] + }, + "delete": { + "permissions": [ + "lambda:DeleteCodeSigningConfig" + ] + }, + "list": { + "permissions": [ + "lambda:ListCodeSigningConfigs" + ] + } + } +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig_plugin.py new file mode 100644 index 0000000000000..b165c1253e910 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_codesigningconfig_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaCodeSigningConfigProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::CodeSigningConfig" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_codesigningconfig import ( + LambdaCodeSigningConfigProvider, + ) + + self.factory = LambdaCodeSigningConfigProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.py new file mode 100644 index 0000000000000..60dec55595e95 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.py @@ -0,0 +1,124 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import uuid +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaEventInvokeConfigProperties(TypedDict): + FunctionName: Optional[str] + Qualifier: Optional[str] + DestinationConfig: Optional[DestinationConfig] + Id: Optional[str] + MaximumEventAgeInSeconds: Optional[int] + MaximumRetryAttempts: Optional[int] + + +class OnSuccess(TypedDict): + Destination: Optional[str] + + +class OnFailure(TypedDict): + Destination: Optional[str] + + +class DestinationConfig(TypedDict): + OnFailure: Optional[OnFailure] + OnSuccess: Optional[OnSuccess] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaEventInvokeConfigProvider(ResourceProvider[LambdaEventInvokeConfigProperties]): + TYPE = "AWS::Lambda::EventInvokeConfig" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaEventInvokeConfigProperties], + ) -> ProgressEvent[LambdaEventInvokeConfigProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + - Qualifier + + Create-only properties: + - /properties/FunctionName + - /properties/Qualifier + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + lambda_client.put_function_event_invoke_config(**model) + model["Id"] = str(uuid.uuid4()) # TODO: not actually a UUIDv4 + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaEventInvokeConfigProperties], + ) -> ProgressEvent[LambdaEventInvokeConfigProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaEventInvokeConfigProperties], + ) -> ProgressEvent[LambdaEventInvokeConfigProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + lambda_client.delete_function_event_invoke_config( + FunctionName=model["FunctionName"], Qualifier=model["Qualifier"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaEventInvokeConfigProperties], + ) -> ProgressEvent[LambdaEventInvokeConfigProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.schema.json new file mode 100644 index 0000000000000..f188bcbfdaca4 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig.schema.json @@ -0,0 +1,77 @@ +{ + "typeName": "AWS::Lambda::EventInvokeConfig", + "description": "Resource Type definition for AWS::Lambda::EventInvokeConfig", + "additionalProperties": false, + "properties": { + "FunctionName": { + "type": "string" + }, + "MaximumRetryAttempts": { + "type": "integer" + }, + "Qualifier": { + "type": "string" + }, + "DestinationConfig": { + "$ref": "#/definitions/DestinationConfig" + }, + "Id": { + "type": "string" + }, + "MaximumEventAgeInSeconds": { + "type": "integer" + } + }, + "definitions": { + "DestinationConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "OnSuccess": { + "$ref": "#/definitions/OnSuccess" + }, + "OnFailure": { + "$ref": "#/definitions/OnFailure" + } + } + }, + "OnSuccess": { + "type": "object", + "additionalProperties": false, + "properties": { + "Destination": { + "type": "string" + } + }, + "required": [ + "Destination" + ] + }, + "OnFailure": { + "type": "object", + "additionalProperties": false, + "properties": { + "Destination": { + "type": "string" + } + }, + "required": [ + "Destination" + ] + } + }, + "required": [ + "FunctionName", + "Qualifier" + ], + "createOnlyProperties": [ + "/properties/FunctionName", + "/properties/Qualifier" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig_plugin.py new file mode 100644 index 0000000000000..6ebda8450ef65 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventinvokeconfig_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaEventInvokeConfigProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::EventInvokeConfig" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_eventinvokeconfig import ( + LambdaEventInvokeConfigProvider, + ) + + self.factory = LambdaEventInvokeConfigProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.py new file mode 100644 index 0000000000000..1f82478526dd8 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.py @@ -0,0 +1,224 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import copy +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaEventSourceMappingProperties(TypedDict): + FunctionName: Optional[str] + AmazonManagedKafkaEventSourceConfig: Optional[AmazonManagedKafkaEventSourceConfig] + BatchSize: Optional[int] + BisectBatchOnFunctionError: Optional[bool] + DestinationConfig: Optional[DestinationConfig] + DocumentDBEventSourceConfig: Optional[DocumentDBEventSourceConfig] + Enabled: Optional[bool] + EventSourceArn: Optional[str] + FilterCriteria: Optional[FilterCriteria] + FunctionResponseTypes: Optional[list[str]] + Id: Optional[str] + MaximumBatchingWindowInSeconds: Optional[int] + MaximumRecordAgeInSeconds: Optional[int] + MaximumRetryAttempts: Optional[int] + ParallelizationFactor: Optional[int] + Queues: Optional[list[str]] + ScalingConfig: Optional[ScalingConfig] + SelfManagedEventSource: Optional[SelfManagedEventSource] + SelfManagedKafkaEventSourceConfig: Optional[SelfManagedKafkaEventSourceConfig] + SourceAccessConfigurations: Optional[list[SourceAccessConfiguration]] + StartingPosition: Optional[str] + StartingPositionTimestamp: Optional[float] + Topics: Optional[list[str]] + TumblingWindowInSeconds: Optional[int] + + +class OnFailure(TypedDict): + Destination: Optional[str] + + +class DestinationConfig(TypedDict): + OnFailure: Optional[OnFailure] + + +class Filter(TypedDict): + Pattern: Optional[str] + + +class FilterCriteria(TypedDict): + Filters: Optional[list[Filter]] + + +class SourceAccessConfiguration(TypedDict): + Type: Optional[str] + URI: Optional[str] + + +class Endpoints(TypedDict): + KafkaBootstrapServers: Optional[list[str]] + + +class SelfManagedEventSource(TypedDict): + Endpoints: Optional[Endpoints] + + +class AmazonManagedKafkaEventSourceConfig(TypedDict): + ConsumerGroupId: Optional[str] + + +class SelfManagedKafkaEventSourceConfig(TypedDict): + ConsumerGroupId: Optional[str] + + +class ScalingConfig(TypedDict): + MaximumConcurrency: Optional[int] + + +class DocumentDBEventSourceConfig(TypedDict): + CollectionName: Optional[str] + DatabaseName: Optional[str] + FullDocument: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaEventSourceMappingProvider(ResourceProvider[LambdaEventSourceMappingProperties]): + TYPE = "AWS::Lambda::EventSourceMapping" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaEventSourceMappingProperties], + ) -> ProgressEvent[LambdaEventSourceMappingProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + + Create-only properties: + - /properties/EventSourceArn + - /properties/StartingPosition + - /properties/StartingPositionTimestamp + - /properties/SelfManagedEventSource + - /properties/AmazonManagedKafkaEventSourceConfig + - /properties/SelfManagedKafkaEventSourceConfig + + Read-only properties: + - /properties/Id + + IAM permissions required: + - lambda:CreateEventSourceMapping + - lambda:GetEventSourceMapping + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + params = copy.deepcopy(model) + if tags := params.get("Tags"): + transformed_tags = {} + for tag_definition in tags: + transformed_tags[tag_definition["Key"]] = tag_definition["Value"] + params["Tags"] = transformed_tags + + response = lambda_client.create_event_source_mapping(**params) + model["Id"] = response["UUID"] + model["EventSourceMappingArn"] = response["EventSourceMappingArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaEventSourceMappingProperties], + ) -> ProgressEvent[LambdaEventSourceMappingProperties]: + """ + Fetch resource information + + IAM permissions required: + - lambda:GetEventSourceMapping + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaEventSourceMappingProperties], + ) -> ProgressEvent[LambdaEventSourceMappingProperties]: + """ + Delete a resource + + IAM permissions required: + - lambda:DeleteEventSourceMapping + - lambda:GetEventSourceMapping + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + lambda_client.delete_event_source_mapping(UUID=model["Id"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaEventSourceMappingProperties], + ) -> ProgressEvent[LambdaEventSourceMappingProperties]: + """ + Update a resource + + IAM permissions required: + - lambda:UpdateEventSourceMapping + - lambda:GetEventSourceMapping + """ + current_state = request.previous_state + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + params = util.select_attributes( + model=model, + params=[ + "FunctionName", + "Enabled", + "BatchSize", + "FilterCriteria", + "MaximumBatchingWindowInSeconds", + "DestinationConfig", + "MaximumRecordAgeInSeconds", + "BisectBatchOnFunctionError", + "MaximumRetryAttempts", + "ParallelizationFactor", + "SourceAccessConfigurations", + "TumblingWindowInSeconds", + "FunctionResponseTypes", + "ScalingConfig", + "DocumentDBEventSourceConfig", + ], + ) + lambda_client.update_event_source_mapping(UUID=current_state["Id"], **params) + + model["Id"] = current_state["Id"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.schema.json new file mode 100644 index 0000000000000..2071c3dad1d10 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping.schema.json @@ -0,0 +1,398 @@ +{ + "typeName": "AWS::Lambda::EventSourceMapping", + "description": "Resource Type definition for AWS::Lambda::EventSourceMapping", + "additionalProperties": false, + "properties": { + "Id": { + "description": "Event Source Mapping Identifier UUID.", + "type": "string", + "pattern": "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}", + "minLength": 36, + "maxLength": 36 + }, + "BatchSize": { + "description": "The maximum number of items to retrieve in a single batch.", + "type": "integer", + "minimum": 1, + "maximum": 10000 + }, + "BisectBatchOnFunctionError": { + "description": "(Streams) If the function returns an error, split the batch in two and retry.", + "type": "boolean" + }, + "DestinationConfig": { + "description": "(Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.", + "$ref": "#/definitions/DestinationConfig" + }, + "Enabled": { + "description": "Disables the event source mapping to pause polling and invocation.", + "type": "boolean" + }, + "EventSourceArn": { + "description": "The Amazon Resource Name (ARN) of the event source.", + "type": "string", + "pattern": "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", + "minLength": 12, + "maxLength": 1024 + }, + "FilterCriteria": { + "description": "The filter criteria to control event filtering.", + "$ref": "#/definitions/FilterCriteria" + }, + "FunctionName": { + "description": "The name of the Lambda function.", + "type": "string", + "pattern": "(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?", + "minLength": 1, + "maxLength": 140 + }, + "MaximumBatchingWindowInSeconds": { + "description": "(Streams) The maximum amount of time to gather records before invoking the function, in seconds.", + "type": "integer", + "minimum": 0, + "maximum": 300 + }, + "MaximumRecordAgeInSeconds": { + "description": "(Streams) The maximum age of a record that Lambda sends to a function for processing.", + "type": "integer", + "minimum": -1, + "maximum": 604800 + }, + "MaximumRetryAttempts": { + "description": "(Streams) The maximum number of times to retry when the function returns an error.", + "type": "integer", + "minimum": -1, + "maximum": 10000 + }, + "ParallelizationFactor": { + "description": "(Streams) The number of batches to process from each shard concurrently.", + "type": "integer", + "minimum": 1, + "maximum": 10 + }, + "StartingPosition": { + "description": "The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Streams sources.", + "type": "string", + "pattern": "(LATEST|TRIM_HORIZON|AT_TIMESTAMP)+", + "minLength": 6, + "maxLength": 12 + }, + "StartingPositionTimestamp": { + "description": "With StartingPosition set to AT_TIMESTAMP, the time from which to start reading, in Unix time seconds.", + "type": "number" + }, + "Topics": { + "description": "(Kafka) A list of Kafka topics.", + "type": "array", + "uniqueItems": true, + "items": { + "type": "string", + "pattern": "^[^.]([a-zA-Z0-9\\-_.]+)", + "minLength": 1, + "maxLength": 249 + }, + "minItems": 1, + "maxItems": 1 + }, + "Queues": { + "description": "(ActiveMQ) A list of ActiveMQ queues.", + "type": "array", + "uniqueItems": true, + "items": { + "type": "string", + "pattern": "[\\s\\S]*", + "minLength": 1, + "maxLength": 1000 + }, + "minItems": 1, + "maxItems": 1 + }, + "SourceAccessConfigurations": { + "description": "A list of SourceAccessConfiguration.", + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/SourceAccessConfiguration" + }, + "minItems": 1, + "maxItems": 22 + }, + "TumblingWindowInSeconds": { + "description": "(Streams) Tumbling window (non-overlapping time window) duration to perform aggregations.", + "type": "integer", + "minimum": 0, + "maximum": 900 + }, + "FunctionResponseTypes": { + "description": "(Streams) A list of response types supported by the function.", + "type": "array", + "uniqueItems": true, + "items": { + "type": "string", + "enum": [ + "ReportBatchItemFailures" + ] + }, + "minLength": 0, + "maxLength": 1 + }, + "SelfManagedEventSource": { + "description": "Self-managed event source endpoints.", + "$ref": "#/definitions/SelfManagedEventSource" + }, + "AmazonManagedKafkaEventSourceConfig": { + "description": "Specific configuration settings for an MSK event source.", + "$ref": "#/definitions/AmazonManagedKafkaEventSourceConfig" + }, + "SelfManagedKafkaEventSourceConfig": { + "description": "Specific configuration settings for a Self-Managed Apache Kafka event source.", + "$ref": "#/definitions/SelfManagedKafkaEventSourceConfig" + }, + "ScalingConfig": { + "description": "The scaling configuration for the event source.", + "$ref": "#/definitions/ScalingConfig" + }, + "DocumentDBEventSourceConfig": { + "description": "Document db event source config.", + "$ref": "#/definitions/DocumentDBEventSourceConfig" + } + }, + "definitions": { + "DestinationConfig": { + "type": "object", + "additionalProperties": false, + "description": "(Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.", + "properties": { + "OnFailure": { + "description": "The destination configuration for failed invocations.", + "$ref": "#/definitions/OnFailure" + } + } + }, + "FilterCriteria": { + "type": "object", + "description": "The filter criteria to control event filtering.", + "additionalProperties": false, + "properties": { + "Filters": { + "description": "List of filters of this FilterCriteria", + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/Filter" + }, + "minItems": 1, + "maxItems": 20 + } + } + }, + "Filter": { + "type": "object", + "description": "The filter object that defines parameters for ESM filtering.", + "additionalProperties": false, + "properties": { + "Pattern": { + "type": "string", + "description": "The filter pattern that defines which events should be passed for invocations.", + "pattern": ".*", + "minLength": 0, + "maxLength": 4096 + } + } + }, + "OnFailure": { + "type": "object", + "description": "A destination for events that failed processing.", + "additionalProperties": false, + "properties": { + "Destination": { + "description": "The Amazon Resource Name (ARN) of the destination resource.", + "type": "string", + "pattern": "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", + "minLength": 12, + "maxLength": 1024 + } + } + }, + "SourceAccessConfiguration": { + "type": "object", + "additionalProperties": false, + "description": "The configuration used by AWS Lambda to access event source", + "properties": { + "Type": { + "description": "The type of source access configuration.", + "enum": [ + "BASIC_AUTH", + "VPC_SUBNET", + "VPC_SECURITY_GROUP", + "SASL_SCRAM_512_AUTH", + "SASL_SCRAM_256_AUTH", + "VIRTUAL_HOST", + "CLIENT_CERTIFICATE_TLS_AUTH", + "SERVER_ROOT_CA_CERTIFICATE" + ], + "type": "string" + }, + "URI": { + "description": "The URI for the source access configuration resource.", + "type": "string", + "pattern": "[a-zA-Z0-9-\\/*:_+=.@-]*", + "minLength": 1, + "maxLength": 200 + } + } + }, + "SelfManagedEventSource": { + "type": "object", + "additionalProperties": false, + "description": "The configuration used by AWS Lambda to access a self-managed event source.", + "properties": { + "Endpoints": { + "description": "The endpoints for a self-managed event source.", + "$ref": "#/definitions/Endpoints" + } + } + }, + "Endpoints": { + "type": "object", + "additionalProperties": false, + "description": "The endpoints used by AWS Lambda to access a self-managed event source.", + "properties": { + "KafkaBootstrapServers": { + "type": "array", + "description": "A list of Kafka server endpoints.", + "uniqueItems": true, + "items": { + "type": "string", + "description": "The URL of a Kafka server.", + "pattern": "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}", + "minLength": 1, + "maxLength": 300 + }, + "minItems": 1, + "maxItems": 10 + } + } + }, + "ConsumerGroupId": { + "description": "The identifier for the Kafka Consumer Group to join.", + "type": "string", + "pattern": "[a-zA-Z0-9-\\/*:_+=.@-]*", + "minLength": 1, + "maxLength": 200 + }, + "AmazonManagedKafkaEventSourceConfig": { + "description": "Specific configuration settings for an MSK event source.", + "type": "object", + "additionalProperties": false, + "properties": { + "ConsumerGroupId": { + "description": "The identifier for the Kafka Consumer Group to join.", + "$ref": "#/definitions/ConsumerGroupId" + } + } + }, + "SelfManagedKafkaEventSourceConfig": { + "description": "Specific configuration settings for a Self-Managed Apache Kafka event source.", + "type": "object", + "additionalProperties": false, + "properties": { + "ConsumerGroupId": { + "description": "The identifier for the Kafka Consumer Group to join.", + "$ref": "#/definitions/ConsumerGroupId" + } + } + }, + "MaximumConcurrency": { + "description": "The maximum number of concurrent functions that an event source can invoke.", + "type": "integer", + "minimum": 2, + "maximum": 1000 + }, + "ScalingConfig": { + "description": "The scaling configuration for the event source.", + "type": "object", + "additionalProperties": false, + "properties": { + "MaximumConcurrency": { + "description": "The maximum number of concurrent functions that the event source can invoke.", + "$ref": "#/definitions/MaximumConcurrency" + } + } + }, + "DocumentDBEventSourceConfig": { + "description": "Document db event source config.", + "type": "object", + "additionalProperties": false, + "properties": { + "DatabaseName": { + "description": "The database name to connect to.", + "type": "string", + "minLength": 1, + "maxLength": 63 + }, + "CollectionName": { + "description": "The collection name to connect to.", + "type": "string", + "minLength": 1, + "maxLength": 57 + }, + "FullDocument": { + "description": "Include full document in change stream response. The default option will only send the changes made to documents to Lambda. If you want the complete document sent to Lambda, set this to UpdateLookup.", + "type": "string", + "enum": [ + "UpdateLookup", + "Default" + ] + } + } + } + }, + "required": [ + "FunctionName" + ], + "createOnlyProperties": [ + "/properties/EventSourceArn", + "/properties/StartingPosition", + "/properties/StartingPositionTimestamp", + "/properties/SelfManagedEventSource", + "/properties/AmazonManagedKafkaEventSourceConfig", + "/properties/SelfManagedKafkaEventSourceConfig" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "lambda:CreateEventSourceMapping", + "lambda:GetEventSourceMapping" + ] + }, + "delete": { + "permissions": [ + "lambda:DeleteEventSourceMapping", + "lambda:GetEventSourceMapping" + ] + }, + "list": { + "permissions": [ + "lambda:ListEventSourceMappings" + ] + }, + "read": { + "permissions": [ + "lambda:GetEventSourceMapping" + ] + }, + "update": { + "permissions": [ + "lambda:UpdateEventSourceMapping", + "lambda:GetEventSourceMapping" + ] + } + } +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping_plugin.py new file mode 100644 index 0000000000000..f4dd5b69a5423 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_eventsourcemapping_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaEventSourceMappingProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::EventSourceMapping" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_eventsourcemapping import ( + LambdaEventSourceMappingProvider, + ) + + self.factory = LambdaEventSourceMappingProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py new file mode 100644 index 0000000000000..bbcc61e335934 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.py @@ -0,0 +1,585 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import os +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.services.lambda_.lambda_utils import get_handler_file_from_name +from localstack.utils.archives import is_zip_file +from localstack.utils.files import mkdir, new_tmp_dir, rm_rf, save_file +from localstack.utils.strings import is_base64, to_bytes +from localstack.utils.testutil import create_zip_file + + +class LambdaFunctionProperties(TypedDict): + Code: Optional[Code] + Role: Optional[str] + Architectures: Optional[list[str]] + Arn: Optional[str] + CodeSigningConfigArn: Optional[str] + DeadLetterConfig: Optional[DeadLetterConfig] + Description: Optional[str] + Environment: Optional[Environment] + EphemeralStorage: Optional[EphemeralStorage] + FileSystemConfigs: Optional[list[FileSystemConfig]] + FunctionName: Optional[str] + Handler: Optional[str] + ImageConfig: Optional[ImageConfig] + KmsKeyArn: Optional[str] + Layers: Optional[list[str]] + MemorySize: Optional[int] + PackageType: Optional[str] + ReservedConcurrentExecutions: Optional[int] + Runtime: Optional[str] + RuntimeManagementConfig: Optional[RuntimeManagementConfig] + SnapStart: Optional[SnapStart] + SnapStartResponse: Optional[SnapStartResponse] + Tags: Optional[list[Tag]] + Timeout: Optional[int] + TracingConfig: Optional[TracingConfig] + VpcConfig: Optional[VpcConfig] + + +class TracingConfig(TypedDict): + Mode: Optional[str] + + +class VpcConfig(TypedDict): + SecurityGroupIds: Optional[list[str]] + SubnetIds: Optional[list[str]] + + +class RuntimeManagementConfig(TypedDict): + UpdateRuntimeOn: Optional[str] + RuntimeVersionArn: Optional[str] + + +class SnapStart(TypedDict): + ApplyOn: Optional[str] + + +class FileSystemConfig(TypedDict): + Arn: Optional[str] + LocalMountPath: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class ImageConfig(TypedDict): + Command: Optional[list[str]] + EntryPoint: Optional[list[str]] + WorkingDirectory: Optional[str] + + +class DeadLetterConfig(TypedDict): + TargetArn: Optional[str] + + +class SnapStartResponse(TypedDict): + ApplyOn: Optional[str] + OptimizationStatus: Optional[str] + + +class Code(TypedDict): + ImageUri: Optional[str] + S3Bucket: Optional[str] + S3Key: Optional[str] + S3ObjectVersion: Optional[str] + ZipFile: Optional[str] + + +class LoggingConfig(TypedDict): + ApplicationLogLevel: Optional[str] + LogFormat: Optional[str] + LogGroup: Optional[str] + SystemLogLevel: Optional[str] + + +class Environment(TypedDict): + Variables: Optional[dict] + + +class EphemeralStorage(TypedDict): + Size: Optional[int] + + +REPEATED_INVOCATION = "repeated_invocation" + +# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html +PYTHON_CFN_RESPONSE_CONTENT = """ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +from __future__ import print_function +import urllib3 +import json + +SUCCESS = "SUCCESS" +FAILED = "FAILED" + +http = urllib3.PoolManager() + + +def send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False, reason=None): + responseUrl = event['ResponseURL'] + + print(responseUrl) + + responseBody = { + 'Status' : responseStatus, + 'Reason' : reason or "See the details in CloudWatch Log Stream: {}".format(context.log_stream_name), + 'PhysicalResourceId' : physicalResourceId or context.log_stream_name, + 'StackId' : event['StackId'], + 'RequestId' : event['RequestId'], + 'LogicalResourceId' : event['LogicalResourceId'], + 'NoEcho' : noEcho, + 'Data' : responseData + } + + json_responseBody = json.dumps(responseBody) + + print("Response body:") + print(json_responseBody) + + headers = { + 'content-type' : '', + 'content-length' : str(len(json_responseBody)) + } + + try: + response = http.request('PUT', responseUrl, headers=headers, body=json_responseBody) + print("Status code:", response.status) + + + except Exception as e: + + print("send(..) failed executing http.request(..):", e) +""" + +# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html +NODEJS_CFN_RESPONSE_CONTENT = r""" +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT-0 + +exports.SUCCESS = "SUCCESS"; +exports.FAILED = "FAILED"; + +exports.send = function(event, context, responseStatus, responseData, physicalResourceId, noEcho) { + + var responseBody = JSON.stringify({ + Status: responseStatus, + Reason: "See the details in CloudWatch Log Stream: " + context.logStreamName, + PhysicalResourceId: physicalResourceId || context.logStreamName, + StackId: event.StackId, + RequestId: event.RequestId, + LogicalResourceId: event.LogicalResourceId, + NoEcho: noEcho || false, + Data: responseData + }); + + console.log("Response body:\n", responseBody); + + var https = require("https"); + var url = require("url"); + + var parsedUrl = url.parse(event.ResponseURL); + var options = { + hostname: parsedUrl.hostname, + port: parsedUrl.port, // Modified line: LS uses port 4566 for https; hard coded 443 causes error + path: parsedUrl.path, + method: "PUT", + headers: { + "content-type": "", + "content-length": responseBody.length + } + }; + + var request = https.request(options, function(response) { + console.log("Status code: " + parseInt(response.statusCode)); + context.done(); + }); + + request.on("error", function(error) { + console.log("send(..) failed executing https.request(..): " + error); + context.done(); + }); + + request.write(responseBody); + request.end(); +} +""" + + +def _runtime_supports_inline_code(runtime: str) -> bool: + return runtime.startswith("python") or runtime.startswith("node") + + +def _get_lambda_code_param( + properties: LambdaFunctionProperties, + _include_arch=False, +): + # code here is mostly taken directly from legacy implementation + code = properties.get("Code", {}).copy() + + # TODO: verify only one of "ImageUri" | "S3Bucket" | "ZipFile" is set + zip_file = code.get("ZipFile") + if zip_file and not _runtime_supports_inline_code(properties["Runtime"]): + raise Exception( + f"Runtime {properties['Runtime']} doesn't support inlining code via the 'ZipFile' property." + ) # TODO: message not validated + if zip_file and not is_base64(zip_file) and not is_zip_file(to_bytes(zip_file)): + tmp_dir = new_tmp_dir() + try: + handler_file = get_handler_file_from_name( + properties["Handler"], runtime=properties["Runtime"] + ) + tmp_file = os.path.join(tmp_dir, handler_file) + save_file(tmp_file, zip_file) + + # CloudFormation only includes cfn-response libs if an import is detected + # TODO: add snapshots for this behavior + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html + if properties["Runtime"].lower().startswith("node") and ( + "require('cfn-response')" in zip_file or 'require("cfn-response")' in zip_file + ): + # the check if cfn-response is used is pretty simplistic and apparently based on simple string matching + # having the import commented out will also lead to cfn-response.js being injected + # this is added under both cfn-response.js and node_modules/cfn-response.js + cfn_response_mod_dir = os.path.join(tmp_dir, "node_modules") + mkdir(cfn_response_mod_dir) + save_file( + os.path.join(cfn_response_mod_dir, "cfn-response.js"), + NODEJS_CFN_RESPONSE_CONTENT, + ) + save_file(os.path.join(tmp_dir, "cfn-response.js"), NODEJS_CFN_RESPONSE_CONTENT) + elif ( + properties["Runtime"].lower().startswith("python") + and "import cfnresponse" in zip_file + ): + save_file(os.path.join(tmp_dir, "cfnresponse.py"), PYTHON_CFN_RESPONSE_CONTENT) + + # create zip file + zip_file = create_zip_file(tmp_dir, get_content=True) + code["ZipFile"] = zip_file + finally: + rm_rf(tmp_dir) + if _include_arch and "Architectures" in properties: + code["Architectures"] = properties.get("Architectures") + return code + + +def _transform_function_to_model(function): + model_properties = [ + "MemorySize", + "Description", + "TracingConfig", + "Timeout", + "Handler", + "SnapStartResponse", + "Role", + "FileSystemConfigs", + "FunctionName", + "Runtime", + "PackageType", + "LoggingConfig", + "Environment", + "Arn", + "EphemeralStorage", + "Architectures", + ] + response_model = util.select_attributes(function, model_properties) + response_model["Arn"] = function["FunctionArn"] + return response_model + + +class LambdaFunctionProvider(ResourceProvider[LambdaFunctionProperties]): + TYPE = "AWS::Lambda::Function" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaFunctionProperties], + ) -> ProgressEvent[LambdaFunctionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/FunctionName + + Required properties: + - Code + - Role + + Create-only properties: + - /properties/FunctionName + + Read-only properties: + - /properties/Arn + - /properties/SnapStartResponse + - /properties/SnapStartResponse/ApplyOn + - /properties/SnapStartResponse/OptimizationStatus + + IAM permissions required: + - lambda:CreateFunction + - lambda:GetFunction + - lambda:PutFunctionConcurrency + - iam:PassRole + - s3:GetObject + - s3:GetObjectVersion + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - elasticfilesystem:DescribeMountTargets + - kms:CreateGrant + - kms:Decrypt + - kms:Encrypt + - kms:GenerateDataKey + - lambda:GetCodeSigningConfig + - lambda:GetFunctionCodeSigningConfig + - lambda:GetLayerVersion + - lambda:GetRuntimeManagementConfig + - lambda:PutRuntimeManagementConfig + - lambda:TagResource + - lambda:GetPolicy + - lambda:AddPermission + - lambda:RemovePermission + - lambda:GetResourcePolicy + - lambda:PutResourcePolicy + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + + name = model.get("FunctionName") + if not name: + name = util.generate_default_name(request.stack_name, request.logical_resource_id) + model["FunctionName"] = name + + kwargs = util.select_attributes( + model, + [ + "Architectures", + "DeadLetterConfig", + "Description", + "FunctionName", + "Handler", + "ImageConfig", + "PackageType", + "Layers", + "MemorySize", + "Runtime", + "Role", + "Timeout", + "TracingConfig", + "VpcConfig", + "LoggingConfig", + ], + ) + if "Timeout" in kwargs: + kwargs["Timeout"] = int(kwargs["Timeout"]) + if "MemorySize" in kwargs: + kwargs["MemorySize"] = int(kwargs["MemorySize"]) + if model_tags := model.get("Tags"): + tags = {} + for tag in model_tags: + tags[tag["Key"]] = tag["Value"] + kwargs["Tags"] = tags + + # botocore/data/lambda/2015-03-31/service-2.json:1161 (EnvironmentVariableValue) + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-environment.html + if "Environment" in model: + environment_variables = model["Environment"].get("Variables", {}) + kwargs["Environment"] = { + "Variables": {k: str(v) for k, v in environment_variables.items()} + } + + kwargs["Code"] = _get_lambda_code_param(model) + create_response = lambda_client.create_function(**kwargs) + model["Arn"] = create_response["FunctionArn"] + + get_fn_response = lambda_client.get_function(FunctionName=model["Arn"]) + match get_fn_response["Configuration"]["State"]: + case "Pending": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + case "Active": + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + case "Inactive": + # This might happen when setting LAMBDA_KEEPALIVE_MS=0 + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + case "Failed": + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + error_code=get_fn_response["Configuration"].get("StateReasonCode", "unknown"), + message=get_fn_response["Configuration"].get("StateReason", "unknown"), + ) + case unknown_state: # invalid state, should technically never happen + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + error_code="InternalException", + message=f"Invalid state returned: {unknown_state}", + ) + + def read( + self, + request: ResourceRequest[LambdaFunctionProperties], + ) -> ProgressEvent[LambdaFunctionProperties]: + """ + Fetch resource information + + IAM permissions required: + - lambda:GetFunction + - lambda:GetFunctionCodeSigningConfig + """ + function_name = request.desired_state["FunctionName"] + lambda_client = request.aws_client_factory.lambda_ + get_fn_response = lambda_client.get_function(FunctionName=function_name) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=_transform_function_to_model(get_fn_response["Configuration"]), + ) + + def delete( + self, + request: ResourceRequest[LambdaFunctionProperties], + ) -> ProgressEvent[LambdaFunctionProperties]: + """ + Delete a resource + + IAM permissions required: + - lambda:DeleteFunction + - ec2:DescribeNetworkInterfaces + """ + try: + lambda_client = request.aws_client_factory.lambda_ + lambda_client.delete_function(FunctionName=request.previous_state["FunctionName"]) + except request.aws_client_factory.lambda_.exceptions.ResourceNotFoundException: + pass + # any other exception will be propagated + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[LambdaFunctionProperties], + ) -> ProgressEvent[LambdaFunctionProperties]: + """ + Update a resource + + IAM permissions required: + - lambda:DeleteFunctionConcurrency + - lambda:GetFunction + - lambda:PutFunctionConcurrency + - lambda:ListTags + - lambda:TagResource + - lambda:UntagResource + - lambda:UpdateFunctionConfiguration + - lambda:UpdateFunctionCode + - iam:PassRole + - s3:GetObject + - s3:GetObjectVersion + - ec2:DescribeSecurityGroups + - ec2:DescribeSubnets + - ec2:DescribeVpcs + - elasticfilesystem:DescribeMountTargets + - kms:CreateGrant + - kms:Decrypt + - kms:GenerateDataKey + - lambda:GetRuntimeManagementConfig + - lambda:PutRuntimeManagementConfig + - lambda:PutFunctionCodeSigningConfig + - lambda:DeleteFunctionCodeSigningConfig + - lambda:GetCodeSigningConfig + - lambda:GetFunctionCodeSigningConfig + - lambda:GetPolicy + - lambda:AddPermission + - lambda:RemovePermission + - lambda:GetResourcePolicy + - lambda:PutResourcePolicy + - lambda:DeleteResourcePolicy + """ + client = request.aws_client_factory.lambda_ + + # TODO: handle defaults properly + old_name = request.previous_state["FunctionName"] + new_name = request.desired_state.get("FunctionName") + if new_name and old_name != new_name: + # replacement (!) => shouldn't be handled here but in the engine + self.delete(request) + return self.create(request) + + config_keys = [ + "Description", + "DeadLetterConfig", + "Environment", + "Handler", + "ImageConfig", + "Layers", + "MemorySize", + "Role", + "Runtime", + "Timeout", + "TracingConfig", + "VpcConfig", + "LoggingConfig", + ] + update_config_props = util.select_attributes(request.desired_state, config_keys) + function_name = request.previous_state["FunctionName"] + update_config_props["FunctionName"] = function_name + + if "Timeout" in update_config_props: + update_config_props["Timeout"] = int(update_config_props["Timeout"]) + if "MemorySize" in update_config_props: + update_config_props["MemorySize"] = int(update_config_props["MemorySize"]) + if "Code" in request.desired_state: + code = request.desired_state["Code"] or {} + if not code.get("ZipFile"): + request.logger.debug( + 'Updating code for Lambda "%s" from location: %s', function_name, code + ) + code = _get_lambda_code_param( + request.desired_state, + _include_arch=True, + ) + client.update_function_code(FunctionName=function_name, **code) + client.get_waiter("function_updated_v2").wait(FunctionName=function_name) + if "Environment" in update_config_props: + environment_variables = update_config_props["Environment"].get("Variables", {}) + update_config_props["Environment"]["Variables"] = { + k: str(v) for k, v in environment_variables.items() + } + client.update_function_configuration(**update_config_props) + client.get_waiter("function_updated_v2").wait(FunctionName=function_name) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={**request.previous_state, **request.desired_state}, + ) + + def list( + self, + request: ResourceRequest[LambdaFunctionProperties], + ) -> ProgressEvent[LambdaFunctionProperties]: + functions = request.aws_client_factory.lambda_.list_functions() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[_transform_function_to_model(fn) for fn in functions["Functions"]], + ) diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json new file mode 100644 index 0000000000000..b1d128047b150 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_function.schema.json @@ -0,0 +1,566 @@ +{ + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "tagProperty": "/properties/Tags", + "cloudFormationSystemTags": true + }, + "handlers": { + "read": { + "permissions": [ + "lambda:GetFunction", + "lambda:GetFunctionCodeSigningConfig" + ] + }, + "create": { + "permissions": [ + "lambda:CreateFunction", + "lambda:GetFunction", + "lambda:PutFunctionConcurrency", + "iam:PassRole", + "s3:GetObject", + "s3:GetObjectVersion", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "elasticfilesystem:DescribeMountTargets", + "kms:CreateGrant", + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey", + "lambda:GetCodeSigningConfig", + "lambda:GetFunctionCodeSigningConfig", + "lambda:GetLayerVersion", + "lambda:GetRuntimeManagementConfig", + "lambda:PutRuntimeManagementConfig", + "lambda:TagResource", + "lambda:GetPolicy", + "lambda:AddPermission", + "lambda:RemovePermission", + "lambda:GetResourcePolicy", + "lambda:PutResourcePolicy" + ] + }, + "update": { + "permissions": [ + "lambda:DeleteFunctionConcurrency", + "lambda:GetFunction", + "lambda:PutFunctionConcurrency", + "lambda:ListTags", + "lambda:TagResource", + "lambda:UntagResource", + "lambda:UpdateFunctionConfiguration", + "lambda:UpdateFunctionCode", + "iam:PassRole", + "s3:GetObject", + "s3:GetObjectVersion", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "elasticfilesystem:DescribeMountTargets", + "kms:CreateGrant", + "kms:Decrypt", + "kms:GenerateDataKey", + "lambda:GetRuntimeManagementConfig", + "lambda:PutRuntimeManagementConfig", + "lambda:PutFunctionCodeSigningConfig", + "lambda:DeleteFunctionCodeSigningConfig", + "lambda:GetCodeSigningConfig", + "lambda:GetFunctionCodeSigningConfig", + "lambda:GetPolicy", + "lambda:AddPermission", + "lambda:RemovePermission", + "lambda:GetResourcePolicy", + "lambda:PutResourcePolicy", + "lambda:DeleteResourcePolicy" + ] + }, + "list": { + "permissions": [ + "lambda:ListFunctions" + ] + }, + "delete": { + "permissions": [ + "lambda:DeleteFunction", + "ec2:DescribeNetworkInterfaces" + ] + } + }, + "typeName": "AWS::Lambda::Function", + "readOnlyProperties": [ + "/properties/SnapStartResponse", + "/properties/SnapStartResponse/ApplyOn", + "/properties/SnapStartResponse/OptimizationStatus", + "/properties/Arn" + ], + "description": "Resource Type definition for AWS::Lambda::Function in region", + "writeOnlyProperties": [ + "/properties/SnapStart", + "/properties/SnapStart/ApplyOn", + "/properties/Code", + "/properties/Code/ImageUri", + "/properties/Code/S3Bucket", + "/properties/Code/S3Key", + "/properties/Code/S3ObjectVersion", + "/properties/Code/ZipFile" + ], + "createOnlyProperties": [ + "/properties/FunctionName" + ], + "additionalProperties": false, + "primaryIdentifier": [ + "/properties/FunctionName" + ], + "definitions": { + "ImageConfig": { + "additionalProperties": false, + "type": "object", + "properties": { + "WorkingDirectory": { + "description": "WorkingDirectory.", + "type": "string" + }, + "Command": { + "maxItems": 1500, + "uniqueItems": true, + "description": "Command.", + "type": "array", + "items": { + "type": "string" + } + }, + "EntryPoint": { + "maxItems": 1500, + "uniqueItems": true, + "description": "EntryPoint.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "TracingConfig": { + "description": "The function's AWS X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.", + "additionalProperties": false, + "type": "object", + "properties": { + "Mode": { + "description": "The tracing mode.", + "type": "string", + "enum": [ + "Active", + "PassThrough" + ] + } + } + }, + "VpcConfig": { + "description": "The VPC security groups and subnets that are attached to a Lambda function. When you connect a function to a VPC, Lambda creates an elastic network interface for each combination of security group and subnet in the function's VPC configuration. The function can only access resources and the internet through that VPC.", + "additionalProperties": false, + "type": "object", + "properties": { + "Ipv6AllowedForDualStack": { + "description": "A boolean indicating whether IPv6 protocols will be allowed for dual stack subnets", + "type": "boolean" + }, + "SecurityGroupIds": { + "maxItems": 5, + "uniqueItems": false, + "description": "A list of VPC security groups IDs.", + "type": "array", + "items": { + "type": "string" + } + }, + "SubnetIds": { + "maxItems": 16, + "uniqueItems": false, + "description": "A list of VPC subnet IDs.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "DeadLetterConfig": { + "description": "The dead-letter queue for failed asynchronous invocations.", + "additionalProperties": false, + "type": "object", + "properties": { + "TargetArn": { + "pattern": "^(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()$", + "description": "The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.", + "type": "string" + } + } + }, + "RuntimeManagementConfig": { + "additionalProperties": false, + "type": "object", + "properties": { + "UpdateRuntimeOn": { + "description": "Trigger for runtime update", + "type": "string", + "enum": [ + "Auto", + "FunctionUpdate", + "Manual" + ] + }, + "RuntimeVersionArn": { + "description": "Unique identifier for a runtime version arn", + "type": "string" + } + }, + "required": [ + "UpdateRuntimeOn" + ] + }, + "SnapStart": { + "description": "The function's SnapStart setting. When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.", + "additionalProperties": false, + "type": "object", + "properties": { + "ApplyOn": { + "description": "Applying SnapStart setting on function resource type.", + "type": "string", + "enum": [ + "PublishedVersions", + "None" + ] + } + }, + "required": [ + "ApplyOn" + ] + }, + "SnapStartResponse": { + "description": "The function's SnapStart Response. When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.", + "additionalProperties": false, + "type": "object", + "properties": { + "OptimizationStatus": { + "description": "Indicates whether SnapStart is activated for the specified function version.", + "type": "string", + "enum": [ + "On", + "Off" + ] + }, + "ApplyOn": { + "description": "Applying SnapStart setting on function resource type.", + "type": "string", + "enum": [ + "PublishedVersions", + "None" + ] + } + } + }, + "Code": { + "additionalProperties": false, + "type": "object", + "properties": { + "S3ObjectVersion": { + "minLength": 1, + "description": "For versioned objects, the version of the deployment package object to use.", + "type": "string", + "maxLength": 1024 + }, + "S3Bucket": { + "minLength": 3, + "pattern": "^[0-9A-Za-z\\.\\-_]*(? ProgressEvent[LambdaLayerVersionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/LayerVersionArn + + Required properties: + - Content + + Create-only properties: + - /properties/CompatibleRuntimes + - /properties/LicenseInfo + - /properties/CompatibleArchitectures + - /properties/LayerName + - /properties/Description + - /properties/Content + + Read-only properties: + - /properties/LayerVersionArn + + IAM permissions required: + - lambda:PublishLayerVersion + - s3:GetObject + - s3:GetObjectVersion + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + if not model.get("LayerName"): + model["LayerName"] = f"layer-{short_uid()}" + response = lambda_client.publish_layer_version(**model) + model["LayerVersionArn"] = response["LayerVersionArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaLayerVersionProperties], + ) -> ProgressEvent[LambdaLayerVersionProperties]: + """ + Fetch resource information + + IAM permissions required: + - lambda:GetLayerVersion + """ + lambda_client = request.aws_client_factory.lambda_ + layer_version_arn = request.desired_state.get("LayerVersionArn") + + try: + _, _, layer_name, version = parse_layer_arn(layer_version_arn) + except AttributeError as e: + LOG.info( + "Invalid Arn: '%s', %s", + layer_version_arn, + e, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + return ProgressEvent( + status=OperationStatus.FAILED, + message="Caught unexpected syntax violation. Consider using ARN.fromString().", + error_code="InternalFailure", + ) + + if not version: + return ProgressEvent( + status=OperationStatus.FAILED, + message="Invalid request provided: Layer Version ARN contains invalid layer name or version", + error_code="InvalidRequest", + ) + + try: + response = lambda_client.get_layer_version_by_arn(Arn=layer_version_arn) + except lambda_client.exceptions.ResourceNotFoundException as e: + return ProgressEvent( + status=OperationStatus.FAILED, + message="The resource you requested does not exist. " + f"(Service: Lambda, Status Code: 404, Request ID: {e.response['ResponseMetadata']['RequestId']})", + error_code="NotFound", + ) + layer = util.select_attributes( + response, + [ + "CompatibleRuntimes", + "Description", + "LayerVersionArn", + "CompatibleArchitectures", + ], + ) + layer.setdefault("CompatibleRuntimes", []) + layer.setdefault("CompatibleArchitectures", []) + layer.setdefault("LayerName", layer_name) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=layer, + custom_context=request.custom_context, + ) + + def delete( + self, + request: ResourceRequest[LambdaLayerVersionProperties], + ) -> ProgressEvent[LambdaLayerVersionProperties]: + """ + Delete a resource + + IAM permissions required: + - lambda:GetLayerVersion + - lambda:DeleteLayerVersion + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + version = int(model["LayerVersionArn"].split(":")[-1]) + + lambda_client.delete_layer_version(LayerName=model["LayerName"], VersionNumber=version) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaLayerVersionProperties], + ) -> ProgressEvent[LambdaLayerVersionProperties]: + """ + Update a resource + + + """ + raise NotImplementedError + + def list(self, request: ResourceRequest[Properties]) -> ProgressEvent[Properties]: + """ + List resources + + IAM permissions required: + - lambda:ListLayerVersions + """ + + lambda_client = request.aws_client_factory.lambda_ + + lambda_layer = request.desired_state.get("LayerName") + if not lambda_layer: + return ProgressEvent( + status=OperationStatus.FAILED, + message="Layer Name cannot be empty", + error_code="InvalidRequest", + ) + + layer_versions = lambda_client.list_layer_versions(LayerName=lambda_layer) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + LambdaLayerVersionProperties(LayerVersionArn=layer_version["LayerVersionArn"]) + for layer_version in layer_versions["LayerVersions"] + ], + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion.schema.json new file mode 100644 index 0000000000000..7bc8e494ecd93 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion.schema.json @@ -0,0 +1,120 @@ +{ + "typeName": "AWS::Lambda::LayerVersion", + "description": "Resource Type definition for AWS::Lambda::LayerVersion", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-lambda.git", + "definitions": { + "Content": { + "type": "object", + "additionalProperties": false, + "properties": { + "S3ObjectVersion": { + "description": "For versioned objects, the version of the layer archive object to use.", + "type": "string" + }, + "S3Bucket": { + "description": "The Amazon S3 bucket of the layer archive.", + "type": "string" + }, + "S3Key": { + "description": "The Amazon S3 key of the layer archive.", + "type": "string" + } + }, + "required": [ + "S3Bucket", + "S3Key" + ] + } + }, + "properties": { + "CompatibleRuntimes": { + "description": "A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.", + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "LicenseInfo": { + "description": "The layer's software license.", + "type": "string" + }, + "Description": { + "description": "The description of the version.", + "type": "string" + }, + "LayerName": { + "description": "The name or Amazon Resource Name (ARN) of the layer.", + "type": "string" + }, + "Content": { + "description": "The function layer archive.", + "$ref": "#/definitions/Content" + }, + "LayerVersionArn": { + "type": "string" + }, + "CompatibleArchitectures": { + "description": "A list of compatible instruction set architectures.", + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "required": [ + "Content" + ], + "createOnlyProperties": [ + "/properties/CompatibleRuntimes", + "/properties/LicenseInfo", + "/properties/CompatibleArchitectures", + "/properties/LayerName", + "/properties/Description", + "/properties/Content" + ], + "readOnlyProperties": [ + "/properties/LayerVersionArn" + ], + "writeOnlyProperties": [ + "/properties/Content" + ], + "primaryIdentifier": [ + "/properties/LayerVersionArn" + ], + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "create": { + "permissions": [ + "lambda:PublishLayerVersion", + "s3:GetObject", + "s3:GetObjectVersion" + ] + }, + "read": { + "permissions": [ + "lambda:GetLayerVersion" + ] + }, + "delete": { + "permissions": [ + "lambda:GetLayerVersion", + "lambda:DeleteLayerVersion" + ] + }, + "list": { + "permissions": [ + "lambda:ListLayerVersions" + ] + } + } +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion_plugin.py new file mode 100644 index 0000000000000..7ebe11a9c7647 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversion_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaLayerVersionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::LayerVersion" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_layerversion import ( + LambdaLayerVersionProvider, + ) + + self.factory = LambdaLayerVersionProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.py new file mode 100644 index 0000000000000..e6622141a165c --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.py @@ -0,0 +1,134 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaLayerVersionPermissionProperties(TypedDict): + Action: Optional[str] + LayerVersionArn: Optional[str] + Principal: Optional[str] + Id: Optional[str] + OrganizationId: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaLayerVersionPermissionProvider( + ResourceProvider[LambdaLayerVersionPermissionProperties] +): + TYPE = "AWS::Lambda::LayerVersionPermission" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaLayerVersionPermissionProperties], + ) -> ProgressEvent[LambdaLayerVersionPermissionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - LayerVersionArn + - Action + - Principal + + Create-only properties: + - /properties/OrganizationId + - /properties/Principal + - /properties/Action + - /properties/LayerVersionArn + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + layer_name, version_number = self.layer_name_and_version(model) + + params = util.select_attributes(model, ["Action", "Principal"]) + params["StatementId"] = model["Id"].split("-")[-1] + params["LayerName"] = layer_name + params["VersionNumber"] = version_number + + lambda_client.add_layer_version_permission(**params) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + @staticmethod + def layer_name_and_version(params): + layer_arn = params.get("LayerVersionArn", "") + parts = layer_arn.split(":") + layer_name = parts[6] if ":" in layer_arn else layer_arn + version_number = int(parts[7] if len(parts) > 7 else 1) # TODO fetch latest version number + return layer_name, version_number + + def read( + self, + request: ResourceRequest[LambdaLayerVersionPermissionProperties], + ) -> ProgressEvent[LambdaLayerVersionPermissionProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaLayerVersionPermissionProperties], + ) -> ProgressEvent[LambdaLayerVersionPermissionProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + layer_name, version_number = self.layer_name_and_version(model) + params = { + "StatementId": model["Id"].split("-")[-1], + "LayerName": layer_name, + "VersionNumber": version_number, + } + + lambda_client.remove_layer_version_permission(**params) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaLayerVersionPermissionProperties], + ) -> ProgressEvent[LambdaLayerVersionPermissionProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.schema.json new file mode 100644 index 0000000000000..423d497ab5e36 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission.schema.json @@ -0,0 +1,39 @@ +{ + "typeName": "AWS::Lambda::LayerVersionPermission", + "description": "Resource Type definition for AWS::Lambda::LayerVersionPermission", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Action": { + "type": "string" + }, + "LayerVersionArn": { + "type": "string" + }, + "OrganizationId": { + "type": "string" + }, + "Principal": { + "type": "string" + } + }, + "required": [ + "LayerVersionArn", + "Action", + "Principal" + ], + "createOnlyProperties": [ + "/properties/OrganizationId", + "/properties/Principal", + "/properties/Action", + "/properties/LayerVersionArn" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission_plugin.py new file mode 100644 index 0000000000000..339765439b293 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_layerversionpermission_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaLayerVersionPermissionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::LayerVersionPermission" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_layerversionpermission import ( + LambdaLayerVersionPermissionProvider, + ) + + self.factory = LambdaLayerVersionPermissionProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.py new file mode 100644 index 0000000000000..315e5a015502e --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.py @@ -0,0 +1,155 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaPermissionProperties(TypedDict): + Action: Optional[str] + FunctionName: Optional[str] + Principal: Optional[str] + EventSourceToken: Optional[str] + FunctionUrlAuthType: Optional[str] + Id: Optional[str] + PrincipalOrgID: Optional[str] + SourceAccount: Optional[str] + SourceArn: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaPermissionProvider(ResourceProvider[LambdaPermissionProperties]): + TYPE = "AWS::Lambda::Permission" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaPermissionProperties], + ) -> ProgressEvent[LambdaPermissionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + - Action + - Principal + + Create-only properties: + - /properties/SourceAccount + - /properties/FunctionUrlAuthType + - /properties/SourceArn + - /properties/Principal + - /properties/Action + - /properties/FunctionName + - /properties/EventSourceToken + - /properties/PrincipalOrgID + + Read-only properties: + - /properties/Id + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + params = util.select_attributes( + model=model, params=["FunctionName", "Action", "Principal", "SourceArn"] + ) + + params["StatementId"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + response = lambda_client.add_permission(**params) + + parsed_statement = json.loads(response["Statement"]) + model["Id"] = parsed_statement["Sid"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaPermissionProperties], + ) -> ProgressEvent[LambdaPermissionProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaPermissionProperties], + ) -> ProgressEvent[LambdaPermissionProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + try: + lambda_client.remove_permission( + FunctionName=model.get("FunctionName"), StatementId=model["Id"] + ) + except lambda_client.exceptions.ResourceNotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaPermissionProperties], + ) -> ProgressEvent[LambdaPermissionProperties]: + """ + Update a resource + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + if not model.get("Id"): + model["Id"] = request.previous_state["Id"] + + params = util.select_attributes( + model=model, params=["FunctionName", "Action", "Principal", "SourceArn"] + ) + + try: + lambda_client.remove_permission( + FunctionName=model.get("FunctionName"), StatementId=model["Id"] + ) + except lambda_client.exceptions.ResourceNotFoundException: + pass + + lambda_client.add_permission(StatementId=model["Id"], **params) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.schema.json new file mode 100644 index 0000000000000..15f7f30168b41 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission.schema.json @@ -0,0 +1,55 @@ +{ + "typeName": "AWS::Lambda::Permission", + "description": "Resource Type definition for AWS::Lambda::Permission", + "additionalProperties": false, + "properties": { + "FunctionName": { + "type": "string" + }, + "Action": { + "type": "string" + }, + "EventSourceToken": { + "type": "string" + }, + "FunctionUrlAuthType": { + "type": "string" + }, + "SourceArn": { + "type": "string" + }, + "SourceAccount": { + "type": "string" + }, + "PrincipalOrgID": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "Principal": { + "type": "string" + } + }, + "required": [ + "FunctionName", + "Action", + "Principal" + ], + "createOnlyProperties": [ + "/properties/SourceAccount", + "/properties/FunctionUrlAuthType", + "/properties/SourceArn", + "/properties/Principal", + "/properties/Action", + "/properties/FunctionName", + "/properties/EventSourceToken", + "/properties/PrincipalOrgID" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission_plugin.py new file mode 100644 index 0000000000000..4a06f49c62ee4 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_permission_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaPermissionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::Permission" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_permission import ( + LambdaPermissionProvider, + ) + + self.factory = LambdaPermissionProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.py new file mode 100644 index 0000000000000..c9b157dd26a89 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.py @@ -0,0 +1,131 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaUrlProperties(TypedDict): + AuthType: Optional[str] + TargetFunctionArn: Optional[str] + Cors: Optional[Cors] + FunctionArn: Optional[str] + FunctionUrl: Optional[str] + InvokeMode: Optional[str] + Qualifier: Optional[str] + + +class Cors(TypedDict): + AllowCredentials: Optional[bool] + AllowHeaders: Optional[list[str]] + AllowMethods: Optional[list[str]] + AllowOrigins: Optional[list[str]] + ExposeHeaders: Optional[list[str]] + MaxAge: Optional[int] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaUrlProvider(ResourceProvider[LambdaUrlProperties]): + TYPE = "AWS::Lambda::Url" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaUrlProperties], + ) -> ProgressEvent[LambdaUrlProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/FunctionArn + + Required properties: + - TargetFunctionArn + - AuthType + + Create-only properties: + - /properties/TargetFunctionArn + - /properties/Qualifier + + Read-only properties: + - /properties/FunctionUrl + - /properties/FunctionArn + + IAM permissions required: + - lambda:CreateFunctionUrlConfig + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + params = util.select_attributes(model, ["Qualifier", "Cors", "AuthType"]) + params["FunctionName"] = model["TargetFunctionArn"] + + response = lambda_client.create_function_url_config(**params) + + model["FunctionArn"] = response["FunctionArn"] + model["FunctionUrl"] = response["FunctionUrl"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LambdaUrlProperties], + ) -> ProgressEvent[LambdaUrlProperties]: + """ + Fetch resource information + + IAM permissions required: + - lambda:GetFunctionUrlConfig + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaUrlProperties], + ) -> ProgressEvent[LambdaUrlProperties]: + """ + Delete a resource + + IAM permissions required: + - lambda:DeleteFunctionUrlConfig + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + params = {"FunctionName": model["TargetFunctionArn"]} + + if qualifier := model.get("Qualifier"): + params["Qualifier"] = qualifier + + lambda_client.delete_function_url_config(**params) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaUrlProperties], + ) -> ProgressEvent[LambdaUrlProperties]: + """ + Update a resource + + IAM permissions required: + - lambda:UpdateFunctionUrlConfig + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.schema.json new file mode 100644 index 0000000000000..de715b7e1506b --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url.schema.json @@ -0,0 +1,180 @@ +{ + "typeName": "AWS::Lambda::Url", + "description": "Resource Type definition for AWS::Lambda::Url", + "additionalProperties": false, + "tagging": { + "taggable": false + }, + "properties": { + "TargetFunctionArn": { + "description": "The Amazon Resource Name (ARN) of the function associated with the Function URL.", + "type": "string", + "pattern": "^(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:((?!\\d+)[0-9a-zA-Z-_]+))?$" + }, + "Qualifier": { + "description": "The alias qualifier for the target function. If TargetFunctionArn is unqualified then Qualifier must be passed.", + "type": "string", + "minLength": 1, + "maxLength": 128, + "pattern": "((?!^[0-9]+$)([a-zA-Z0-9-_]+))" + }, + "AuthType": { + "description": "Can be either AWS_IAM if the requests are authorized via IAM, or NONE if no authorization is configured on the Function URL.", + "type": "string", + "enum": [ + "AWS_IAM", + "NONE" + ] + }, + "InvokeMode": { + "description": "The invocation mode for the function\u2019s URL. Set to BUFFERED if you want to buffer responses before returning them to the client. Set to RESPONSE_STREAM if you want to stream responses, allowing faster time to first byte and larger response payload sizes. If not set, defaults to BUFFERED.", + "type": "string", + "enum": [ + "BUFFERED", + "RESPONSE_STREAM" + ] + }, + "FunctionArn": { + "description": "The full Amazon Resource Name (ARN) of the function associated with the Function URL.", + "type": "string", + "pattern": "^(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:((?!\\d+)[0-9a-zA-Z-_]+))?$" + }, + "FunctionUrl": { + "description": "The generated url for this resource.", + "type": "string" + }, + "Cors": { + "$ref": "#/definitions/Cors" + } + }, + "definitions": { + "AllowHeaders": { + "items": { + "type": "string", + "minLength": 1, + "maxLength": 1024 + }, + "type": "array", + "minItems": 1, + "maxItems": 100, + "insertionOrder": true + }, + "AllowMethods": { + "items": { + "type": "string", + "enum": [ + "GET", + "PUT", + "HEAD", + "POST", + "PATCH", + "DELETE", + "*" + ] + }, + "type": "array", + "minItems": 1, + "maxItems": 6, + "insertionOrder": true + }, + "AllowOrigins": { + "items": { + "type": "string", + "minLength": 1, + "maxLength": 253 + }, + "type": "array", + "minItems": 1, + "maxItems": 100, + "insertionOrder": true + }, + "ExposeHeaders": { + "items": { + "type": "string", + "minLength": 1, + "maxLength": 1024 + }, + "type": "array", + "minItems": 1, + "maxItems": 100, + "insertionOrder": true + }, + "Cors": { + "additionalProperties": false, + "properties": { + "AllowCredentials": { + "description": "Specifies whether credentials are included in the CORS request.", + "type": "boolean" + }, + "AllowHeaders": { + "description": "Represents a collection of allowed headers.", + "$ref": "#/definitions/AllowHeaders" + }, + "AllowMethods": { + "description": "Represents a collection of allowed HTTP methods.", + "$ref": "#/definitions/AllowMethods" + }, + "AllowOrigins": { + "description": "Represents a collection of allowed origins.", + "$ref": "#/definitions/AllowOrigins" + }, + "ExposeHeaders": { + "description": "Represents a collection of exposed headers.", + "$ref": "#/definitions/ExposeHeaders" + }, + "MaxAge": { + "type": "integer", + "minimum": 0, + "maximum": 86400 + } + }, + "type": "object" + } + }, + "required": [ + "TargetFunctionArn", + "AuthType" + ], + "createOnlyProperties": [ + "/properties/TargetFunctionArn", + "/properties/Qualifier" + ], + "readOnlyProperties": [ + "/properties/FunctionUrl", + "/properties/FunctionArn" + ], + "writeOnlyProperties": [ + "/properties/TargetFunctionArn", + "/properties/Qualifier" + ], + "primaryIdentifier": [ + "/properties/FunctionArn" + ], + "handlers": { + "create": { + "permissions": [ + "lambda:CreateFunctionUrlConfig" + ] + }, + "read": { + "permissions": [ + "lambda:GetFunctionUrlConfig" + ] + }, + "update": { + "permissions": [ + "lambda:UpdateFunctionUrlConfig" + ] + }, + "list": { + "permissions": [ + "lambda:ListFunctionUrlConfigs" + ] + }, + "delete": { + "permissions": [ + "lambda:DeleteFunctionUrlConfig" + ] + } + } +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url_plugin.py new file mode 100644 index 0000000000000..afa2d4a2b92b5 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_url_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaUrlProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::Url" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_url import LambdaUrlProvider + + self.factory = LambdaUrlProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.py new file mode 100644 index 0000000000000..adc04756a59c5 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.py @@ -0,0 +1,171 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaVersionProperties(TypedDict): + FunctionName: Optional[str] + CodeSha256: Optional[str] + Description: Optional[str] + Id: Optional[str] + ProvisionedConcurrencyConfig: Optional[ProvisionedConcurrencyConfiguration] + Version: Optional[str] + + +class ProvisionedConcurrencyConfiguration(TypedDict): + ProvisionedConcurrentExecutions: Optional[int] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaVersionProvider(ResourceProvider[LambdaVersionProperties]): + TYPE = "AWS::Lambda::Version" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaVersionProperties], + ) -> ProgressEvent[LambdaVersionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + + Create-only properties: + - /properties/FunctionName + + Read-only properties: + - /properties/Id + - /properties/Version + + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + ctx = request.custom_context + + params = util.select_attributes(model, ["FunctionName", "CodeSha256", "Description"]) + + if not ctx.get(REPEATED_INVOCATION): + response = lambda_client.publish_version(**params) + model["Version"] = response["Version"] + model["Id"] = response["FunctionArn"] + if model.get("ProvisionedConcurrencyConfig"): + lambda_client.put_provisioned_concurrency_config( + FunctionName=model["FunctionName"], + Qualifier=model["Version"], + ProvisionedConcurrentExecutions=model["ProvisionedConcurrencyConfig"][ + "ProvisionedConcurrentExecutions" + ], + ) + ctx[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + if model.get("ProvisionedConcurrencyConfig"): + # Assumption: Ready provisioned concurrency implies the function version is ready + provisioned_concurrency_config = lambda_client.get_provisioned_concurrency_config( + FunctionName=model["FunctionName"], + Qualifier=model["Version"], + ) + if provisioned_concurrency_config["Status"] == "IN_PROGRESS": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + elif provisioned_concurrency_config["Status"] == "READY": + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + else: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="", + error_code="VersionStateFailure", # TODO: not parity tested + ) + else: + version = lambda_client.get_function(FunctionName=model["Id"]) + if version["Configuration"]["State"] == "Pending": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + elif version["Configuration"]["State"] == "Active": + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + else: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="", + error_code="VersionStateFailure", # TODO: not parity tested + ) + + def read( + self, + request: ResourceRequest[LambdaVersionProperties], + ) -> ProgressEvent[LambdaVersionProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaVersionProperties], + ) -> ProgressEvent[LambdaVersionProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + lambda_client = request.aws_client_factory.lambda_ + + # without qualifier entire function is deleted instead of just version + # provisioned concurrency is automatically deleted upon deleting a function or function version + lambda_client.delete_function(FunctionName=model["Id"], Qualifier=model["Version"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LambdaVersionProperties], + ) -> ProgressEvent[LambdaVersionProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.schema.json new file mode 100644 index 0000000000000..f4a0320af6231 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version.schema.json @@ -0,0 +1,52 @@ +{ + "typeName": "AWS::Lambda::Version", + "description": "Resource Type definition for AWS::Lambda::Version", + "additionalProperties": false, + "properties": { + "FunctionName": { + "type": "string" + }, + "ProvisionedConcurrencyConfig": { + "$ref": "#/definitions/ProvisionedConcurrencyConfiguration" + }, + "Description": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "CodeSha256": { + "type": "string" + }, + "Version": { + "type": "string" + } + }, + "definitions": { + "ProvisionedConcurrencyConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "ProvisionedConcurrentExecutions": { + "type": "integer" + } + }, + "required": [ + "ProvisionedConcurrentExecutions" + ] + } + }, + "required": [ + "FunctionName" + ], + "createOnlyProperties": [ + "/properties/FunctionName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Version" + ] +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version_plugin.py new file mode 100644 index 0000000000000..ad6e92edb426d --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/aws_lambda_version_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaVersionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::Version" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.aws_lambda_version import ( + LambdaVersionProvider, + ) + + self.factory = LambdaVersionProvider diff --git a/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.py b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.py new file mode 100644 index 0000000000000..044eeed162845 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.py @@ -0,0 +1,174 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LambdaAliasProperties(TypedDict): + FunctionName: Optional[str] + FunctionVersion: Optional[str] + Name: Optional[str] + Description: Optional[str] + Id: Optional[str] + ProvisionedConcurrencyConfig: Optional[ProvisionedConcurrencyConfiguration] + RoutingConfig: Optional[AliasRoutingConfiguration] + + +class ProvisionedConcurrencyConfiguration(TypedDict): + ProvisionedConcurrentExecutions: Optional[int] + + +class VersionWeight(TypedDict): + FunctionVersion: Optional[str] + FunctionWeight: Optional[float] + + +class AliasRoutingConfiguration(TypedDict): + AdditionalVersionWeights: Optional[list[VersionWeight]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LambdaAliasProvider(ResourceProvider[LambdaAliasProperties]): + TYPE = "AWS::Lambda::Alias" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LambdaAliasProperties], + ) -> ProgressEvent[LambdaAliasProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - FunctionName + - FunctionVersion + - Name + + Create-only properties: + - /properties/Name + - /properties/FunctionName + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + lambda_ = request.aws_client_factory.lambda_ + + create_params = util.select_attributes( + model, ["FunctionName", "FunctionVersion", "Name", "Description", "RoutingConfig"] + ) + + ctx = request.custom_context + if not ctx.get(REPEATED_INVOCATION): + result = lambda_.create_alias(**create_params) + model["Id"] = result["AliasArn"] + ctx[REPEATED_INVOCATION] = True + + if model.get("ProvisionedConcurrencyConfig"): + lambda_.put_provisioned_concurrency_config( + FunctionName=model["FunctionName"], + Qualifier=model["Name"], + ProvisionedConcurrentExecutions=model["ProvisionedConcurrencyConfig"][ + "ProvisionedConcurrentExecutions" + ], + ) + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=ctx, + ) + + if ctx.get(REPEATED_INVOCATION) and model.get("ProvisionedConcurrencyConfig"): + # get provisioned config status + result = lambda_.get_provisioned_concurrency_config( + FunctionName=model["FunctionName"], + Qualifier=model["Name"], + ) + if result["Status"] == "IN_PROGRESS": + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + ) + elif result["Status"] == "READY": + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + else: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + message="", + error_code="VersionStateFailure", # TODO: not parity tested + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[LambdaAliasProperties], + ) -> ProgressEvent[LambdaAliasProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LambdaAliasProperties], + ) -> ProgressEvent[LambdaAliasProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + lambda_ = request.aws_client_factory.lambda_ + + try: + # provisioned concurrency is automatically deleted upon deleting a function alias + lambda_.delete_alias( + FunctionName=model["FunctionName"], + Name=model["Name"], + ) + except lambda_.exceptions.ResourceNotFoundException: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=request.previous_state, + ) + + def update( + self, + request: ResourceRequest[LambdaAliasProperties], + ) -> ProgressEvent[LambdaAliasProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.schema.json b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.schema.json new file mode 100644 index 0000000000000..05686e6432be3 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias.schema.json @@ -0,0 +1,89 @@ +{ + "typeName": "AWS::Lambda::Alias", + "description": "Resource Type definition for AWS::Lambda::Alias", + "additionalProperties": false, + "properties": { + "FunctionName": { + "type": "string" + }, + "ProvisionedConcurrencyConfig": { + "$ref": "#/definitions/ProvisionedConcurrencyConfiguration" + }, + "Description": { + "type": "string" + }, + "FunctionVersion": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "RoutingConfig": { + "$ref": "#/definitions/AliasRoutingConfiguration" + }, + "Name": { + "type": "string" + } + }, + "definitions": { + "ProvisionedConcurrencyConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "ProvisionedConcurrentExecutions": { + "type": "integer" + } + }, + "required": [ + "ProvisionedConcurrentExecutions" + ] + }, + "VersionWeight": { + "type": "object", + "additionalProperties": false, + "properties": { + "FunctionWeight": { + "type": "number" + }, + "FunctionVersion": { + "type": "string" + } + }, + "required": [ + "FunctionVersion", + "FunctionWeight" + ] + }, + "AliasRoutingConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "AdditionalVersionWeights": { + "type": "array", + "uniqueItems": true, + "items": { + "$ref": "#/definitions/VersionWeight" + } + } + }, + "required": [ + "AdditionalVersionWeights" + ] + } + }, + "required": [ + "FunctionName", + "FunctionVersion", + "Name" + ], + "createOnlyProperties": [ + "/properties/Name", + "/properties/FunctionName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias_plugin.py b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias_plugin.py new file mode 100644 index 0000000000000..406b05deddd45 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/resource_providers/lambda_alias_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LambdaAliasProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Lambda::Alias" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.lambda_.resource_providers.lambda_alias import LambdaAliasProvider + + self.factory = LambdaAliasProvider diff --git a/localstack-core/localstack/services/lambda_/runtimes.py b/localstack-core/localstack/services/lambda_/runtimes.py new file mode 100644 index 0000000000000..3fa96216257f6 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/runtimes.py @@ -0,0 +1,168 @@ +"""This Lambda Runtimes reference defines everything around Lambda runtimes to facilitate adding new runtimes.""" + +from typing import Optional + +from localstack.aws.api.lambda_ import Runtime + +# LocalStack Lambda runtimes support policy +# We support all Lambda runtimes currently actively supported at AWS. +# Further, we aim to provide best-effort support for deprecated runtimes at least until function updates are blocked, +# ideally a bit longer to help users migrate their Lambda runtimes. However, we do not actively test them anymore. + +# HOWTO add a new Lambda runtime: +# 1. Update botocore and generate the Lambda API stubs using `python3 -m localstack.aws.scaffold upgrade` +# => This usually happens automatically through the Github Action "Update ASF APIs" +# 2. Add the new runtime to these variables below: +# a) `IMAGE_MAPPING` +# b) `RUNTIMES_AGGREGATED` +# c) `SNAP_START_SUPPORTED_RUNTIMES` if supported (currently only new Java runtimes) +# 3. Re-create snapshots for Lambda tests with the marker @markers.lambda_runtime_update +# => Filter the tests using pytest -m lambda_runtime_update (i.e., additional arguments in PyCharm) +# Depending on the runtime, `test_lambda_runtimes.py` might require further snapshot updates. +# 4. Add the new runtime to these variables below: +# a) `VALID_RUNTIMES` matching the order of the snapshots +# b) `VALID_LAYER_RUNTIMES` matching the order of the snapshots +# 5. Run the unit test to check the runtime setup: +# tests.unit.services.lambda_.test_api_utils.TestApiUtils.test_check_runtime +# 6. Review special tests including: +# a) [ext] tests.aws.services.lambda_.test_lambda_endpoint_injection +# 7. Before merging, run the ext integration tests to cover transparent endpoint injection testing. +# 8. Add the new runtime to the K8 image build: https://github.com/localstack/lambda-images +# 9. Inform the web team to update the resource browser (consider offering an endpoint in the future) + +# Mapping from a) AWS Lambda runtime identifier => b) official AWS image on Amazon ECR Public +# a) AWS Lambda runtimes: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html +# b) Amazon ECR Lambda images: https://gallery.ecr.aws/lambda +# => Synchronize the order with the "Supported runtimes" under "AWS Lambda runtimes" (a) +# => Add comments for deprecated runtimes using => => +IMAGE_MAPPING: dict[Runtime, str] = { + Runtime.nodejs22_x: "nodejs:22", + Runtime.nodejs20_x: "nodejs:20", + Runtime.nodejs18_x: "nodejs:18", + Runtime.nodejs16_x: "nodejs:16", + Runtime.nodejs14_x: "nodejs:14", # deprecated Dec 4, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.nodejs12_x: "nodejs:12", # deprecated Mar 31, 2023 => Mar 31, 2023 => Apr 30, 2023 + Runtime.python3_13: "python:3.13", + Runtime.python3_12: "python:3.12", + Runtime.python3_11: "python:3.11", + Runtime.python3_10: "python:3.10", + Runtime.python3_9: "python:3.9", + Runtime.python3_8: "python:3.8", + Runtime.python3_7: "python:3.7", # deprecated Dec 4, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.java21: "java:21", + Runtime.java17: "java:17", + Runtime.java11: "java:11", + Runtime.java8_al2: "java:8.al2", + Runtime.java8: "java:8", # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 + Runtime.dotnet8: "dotnet:8", + # dotnet7 (container only) + Runtime.dotnet6: "dotnet:6", + Runtime.dotnetcore3_1: "dotnet:core3.1", # deprecated Apr 3, 2023 => Apr 3, 2023 => May 3, 2023 + Runtime.go1_x: "go:1", # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 + Runtime.ruby3_4: "ruby:3.4", + Runtime.ruby3_3: "ruby:3.3", + Runtime.ruby3_2: "ruby:3.2", + Runtime.ruby2_7: "ruby:2.7", # deprecated Dec 7, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.provided_al2023: "provided:al2023", + Runtime.provided_al2: "provided:al2", + Runtime.provided: "provided:alami", # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 +} + + +# A list of all deprecated Lambda runtimes, with upgrade recommendations +# ideally ordered by deprecation date (following the AWS docs). +# LocalStack can still provide best-effort support. + +# TODO: Consider removing these as AWS is not using them anymore and they likely get outdated. +# We currently use them in LocalStack logs as bonus recommendation (DevX). +# When updating the recommendation, +# please regenerate all tests with @markers.lambda_runtime_update +DEPRECATED_RUNTIMES_UPGRADES: dict[Runtime, Optional[Runtime]] = { + # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 + Runtime.java8: Runtime.java21, + # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 + Runtime.go1_x: Runtime.provided_al2023, + # deprecated Jan 8, 2024 => Feb 8, 2024 => Mar 12, 2024 + Runtime.provided: Runtime.provided_al2023, + # deprecated Dec 7, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.ruby2_7: Runtime.ruby3_2, + # deprecated Dec 4, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.nodejs14_x: Runtime.nodejs20_x, + # deprecated Dec 4, 2023 => Jan 9, 2024 => Feb 8, 2024 + Runtime.python3_7: Runtime.python3_12, + # deprecated Apr 3, 2023 => Apr 3, 2023 => May 3, 2023 + Runtime.dotnetcore3_1: Runtime.dotnet6, + # deprecated Mar 31, 2023 => Mar 31, 2023 => Apr 30, 2023 + Runtime.nodejs12_x: Runtime.nodejs18_x, +} + + +DEPRECATED_RUNTIMES: list[Runtime] = list(DEPRECATED_RUNTIMES_UPGRADES.keys()) + +# An unordered list of all AWS-supported runtimes. +SUPPORTED_RUNTIMES: list[Runtime] = list(set(IMAGE_MAPPING.keys()) - set(DEPRECATED_RUNTIMES)) + +# A temporary list of missing runtimes not yet supported in LocalStack. Used for modular updates. +MISSING_RUNTIMES = [] + +# An unordered list of all Lambda runtimes supported by LocalStack. +ALL_RUNTIMES: list[Runtime] = list(IMAGE_MAPPING.keys()) + +# Grouped supported runtimes by language for testing. Moved here from `lambda_utils` for easier runtime updates. +# => Remove deprecated runtimes from this testing list +RUNTIMES_AGGREGATED = { + "nodejs": [ + Runtime.nodejs22_x, + Runtime.nodejs20_x, + Runtime.nodejs18_x, + Runtime.nodejs16_x, + ], + "python": [ + Runtime.python3_13, + Runtime.python3_12, + Runtime.python3_11, + Runtime.python3_10, + Runtime.python3_9, + Runtime.python3_8, + ], + "java": [ + Runtime.java21, + Runtime.java17, + Runtime.java11, + Runtime.java8_al2, + ], + "ruby": [ + Runtime.ruby3_2, + Runtime.ruby3_3, + Runtime.ruby3_4, + ], + "dotnet": [ + Runtime.dotnet6, + Runtime.dotnet8, + ], + "provided": [ + Runtime.provided_al2023, + Runtime.provided_al2, + ], +} + +# An unordered list of all tested runtimes listed in `RUNTIMES_AGGREGATED` +TESTED_RUNTIMES: list[Runtime] = [ + runtime for runtime_group in RUNTIMES_AGGREGATED.values() for runtime in runtime_group +] + +# An unordered list of snapstart-enabled runtimes. Related to snapshots in test_snapstart_exceptions +# https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html +SNAP_START_SUPPORTED_RUNTIMES = [ + Runtime.java11, + Runtime.java17, + Runtime.java21, + Runtime.python3_12, + Runtime.python3_13, + Runtime.dotnet8, +] + +# An ordered list of all Lambda runtimes considered valid by AWS. Matching snapshots in test_create_lambda_exceptions +VALID_RUNTIMES: str = "[nodejs20.x, provided.al2023, python3.12, python3.13, nodejs22.x, java17, nodejs16.x, dotnet8, python3.10, java11, python3.11, dotnet6, java21, nodejs18.x, provided.al2, ruby3.3, ruby3.4, java8.al2, ruby3.2, python3.8, python3.9]" +# An ordered list of all Lambda runtimes for layers considered valid by AWS. Matching snapshots in test_layer_exceptions +VALID_LAYER_RUNTIMES: str = "[ruby2.6, dotnetcore1.0, python3.7, nodejs8.10, nasa, ruby2.7, python2.7-greengrass, dotnetcore2.0, python3.8, java21, dotnet6, dotnetcore2.1, python3.9, java11, nodejs6.10, provided, dotnetcore3.1, dotnet8, java25, java17, nodejs, nodejs4.3, java8.al2, go1.x, dotnet10, nodejs20.x, go1.9, byol, nodejs10.x, provided.al2023, nodejs22.x, python3.10, java8, nodejs12.x, python3.11, nodejs24.x, nodejs8.x, python3.12, nodejs14.x, nodejs8.9, python3.13, python3.14, nodejs16.x, provided.al2, nodejs4.3-edge, nodejs18.x, ruby3.2, python3.4, ruby3.3, ruby3.4, ruby2.5, python3.6, python2.7]" diff --git a/localstack-core/localstack/services/lambda_/urlrouter.py b/localstack-core/localstack/services/lambda_/urlrouter.py new file mode 100644 index 0000000000000..992909d0e57a2 --- /dev/null +++ b/localstack-core/localstack/services/lambda_/urlrouter.py @@ -0,0 +1,224 @@ +"""Routing for Lambda function URLs: https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html""" + +import base64 +import json +import logging +from datetime import datetime +from http import HTTPStatus + +from rolo.request import restore_payload + +from localstack.aws.api.lambda_ import InvocationType +from localstack.aws.protocol.serializer import gen_amzn_requestid +from localstack.http import Request, Response, Router +from localstack.http.dispatcher import Handler +from localstack.services.lambda_.api_utils import FULL_FN_ARN_PATTERN +from localstack.services.lambda_.invocation.lambda_models import InvocationResult +from localstack.services.lambda_.invocation.lambda_service import LambdaService +from localstack.services.lambda_.invocation.models import lambda_stores +from localstack.utils.aws.request_context import AWS_REGION_REGEX +from localstack.utils.strings import long_uid, to_bytes, to_str +from localstack.utils.time import TIMESTAMP_READABLE_FORMAT, mktime, timestamp +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + + +class FunctionUrlRouter: + router: Router[Handler] + lambda_service: LambdaService + + def __init__(self, router: Router[Handler], lambda_service: LambdaService): + self.router = router + self.registered = False + self.lambda_service = lambda_service + + def register_routes(self) -> None: + if self.registered: + LOG.debug("Skipped Lambda URL route registration (routes already registered).") + return + self.registered = True + + LOG.debug("Registering parameterized Lambda routes.") + + self.router.add( + "/", + host=f".lambda-url..", + endpoint=self.handle_lambda_url_invocation, + defaults={"path": ""}, + ) + self.router.add( + "/", + host=f".lambda-url..", + endpoint=self.handle_lambda_url_invocation, + ) + + def handle_lambda_url_invocation( + self, + request: Request, + api_id: str, + region: str, + **url_params: str, + ) -> Response: + response = Response() + response.mimetype = "application/json" + + lambda_url_config = None + + for account_id in lambda_stores.keys(): + store = lambda_stores[account_id][region] + for fn in store.functions.values(): + for url_config in fn.function_url_configs.values(): + # AWS tags are case sensitive, but domains are not. + # So we normalize them here to maximize both AWS and RFC + # conformance + if url_config.url_id.lower() == api_id.lower(): + lambda_url_config = url_config + + # TODO: check if errors are different when the URL has existed previously + if lambda_url_config is None: + LOG.info("Lambda URL %s does not exist", request.url) + response.data = '{"Message":null}' + response.status = 403 + response.headers["x-amzn-ErrorType"] = "AccessDeniedException" + # TODO: x-amzn-requestid + return response + + event = event_for_lambda_url(api_id, request) + + match = FULL_FN_ARN_PATTERN.search(lambda_url_config.function_arn).groupdict() + + result = self.lambda_service.invoke( + function_name=match.get("function_name"), + qualifier=match.get("qualifier"), + account_id=match.get("account_id"), + region=match.get("region_name"), + invocation_type=InvocationType.RequestResponse, + client_context="{}", # TODO: test + payload=to_bytes(json.dumps(event)), + request_id=gen_amzn_requestid(), + ) + if result.is_error: + response = Response("Internal Server Error", HTTPStatus.BAD_GATEWAY) + else: + response = lambda_result_to_response(result) + return response + + +def event_for_lambda_url(api_id: str, request: Request) -> dict: + partitioned_uri = request.full_path.partition("?") + raw_path = partitioned_uri[0] + raw_query_string = partitioned_uri[2] + + query_string_parameters = {k: ",".join(request.args.getlist(k)) for k in request.args.keys()} + + now = datetime.utcnow() + readable = timestamp(time=now, format=TIMESTAMP_READABLE_FORMAT) + if not any(char in readable for char in ["+", "-"]): + readable += "+0000" + + data = restore_payload(request) + headers = request.headers + source_ip = headers.get("Remote-Addr", "") + request_context = { + "accountId": "anonymous", + "apiId": api_id, + "domainName": headers.get("Host", ""), + "domainPrefix": api_id, + "http": { + "method": request.method, + "path": raw_path, + "protocol": "HTTP/1.1", + "sourceIp": source_ip, + "userAgent": headers.get("User-Agent", ""), + }, + "requestId": long_uid(), + "routeKey": "$default", + "stage": "$default", + "time": readable, + "timeEpoch": mktime(ts=now, millis=True), + } + + content_type = headers.get("Content-Type", "").lower() + content_type_is_text = any(text_type in content_type for text_type in ["text", "json", "xml"]) + + is_base64_encoded = not (data.isascii() and content_type_is_text) if data else False + body = base64.b64encode(data).decode() if is_base64_encoded else data + if isinstance(body, bytes): + body = to_str(body) + + ignored_headers = ["connection", "x-localstack-tgt-api", "x-localstack-request-url"] + event_headers = {k.lower(): v for k, v in headers.items() if k.lower() not in ignored_headers} + + event_headers.update( + { + "x-amzn-tls-cipher-suite": "ECDHE-RSA-AES128-GCM-SHA256", + "x-amzn-tls-version": "TLSv1.2", + "x-forwarded-proto": "http", + "x-forwarded-for": source_ip, + "x-forwarded-port": str(localstack_host().port), + } + ) + + event = { + "version": "2.0", + "routeKey": "$default", + "rawPath": raw_path, + "rawQueryString": raw_query_string, + "headers": event_headers, + "queryStringParameters": query_string_parameters, + "requestContext": request_context, + "body": body, + "isBase64Encoded": is_base64_encoded, + } + + if not data: + event.pop("body") + + return event + + +def lambda_result_to_response(result: InvocationResult): + response = Response() + + # Set default headers + response.headers.update( + { + "Content-Type": "application/json", + "Connection": "keep-alive", + "x-amzn-requestid": result.request_id, + "x-amzn-trace-id": long_uid(), # TODO: get the proper trace id here + } + ) + + original_payload = to_str(result.payload) + parsed_result = json.loads(original_payload) + + # patch to fix whitespaces + # TODO: check if this is a downstream issue of invocation result serialization + original_payload = json.dumps(parsed_result, separators=(",", ":")) + + if isinstance(parsed_result, str): + # a string is a special case here and is returned as-is + response.data = parsed_result + elif isinstance(parsed_result, dict): + # if it's a dict it might be a proper response + if isinstance(parsed_result.get("headers"), dict): + response.headers.update(parsed_result.get("headers")) + if "statusCode" in parsed_result: + response.status_code = int(parsed_result["statusCode"]) + if "body" not in parsed_result: + # TODO: test if providing a status code but no body actually works + response.data = original_payload + elif isinstance(parsed_result.get("body"), dict): + response.data = json.dumps(parsed_result.get("body")) + elif parsed_result.get("isBase64Encoded", False): + body_bytes = to_bytes(to_str(parsed_result.get("body", ""))) + decoded_body_bytes = base64.b64decode(body_bytes) + response.data = decoded_body_bytes + else: + response.data = parsed_result.get("body") + else: + response.data = original_payload + + return response diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/itemprocessor/__init__.py b/localstack-core/localstack/services/logs/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_execution/state_map/itemprocessor/__init__.py rename to localstack-core/localstack/services/logs/__init__.py diff --git a/localstack-core/localstack/services/logs/models.py b/localstack-core/localstack/services/logs/models.py new file mode 100644 index 0000000000000..5e2ba973cab93 --- /dev/null +++ b/localstack-core/localstack/services/logs/models.py @@ -0,0 +1,18 @@ +from typing import Dict + +from moto.logs.models import LogsBackend as MotoLogsBackend +from moto.logs.models import logs_backends as moto_logs_backend + +from localstack.services.stores import AccountRegionBundle, BaseStore, CrossRegionAttribute + + +def get_moto_logs_backend(account_id: str, region_name: str) -> MotoLogsBackend: + return moto_logs_backend[account_id][region_name] + + +class LogsStore(BaseStore): + # maps resource ARN to tags + TAGS: Dict[str, Dict[str, str]] = CrossRegionAttribute(default=dict) + + +logs_stores = AccountRegionBundle("logs", LogsStore) diff --git a/localstack-core/localstack/services/logs/provider.py b/localstack-core/localstack/services/logs/provider.py new file mode 100644 index 0000000000000..2ded5f5d31f0d --- /dev/null +++ b/localstack-core/localstack/services/logs/provider.py @@ -0,0 +1,467 @@ +import base64 +import copy +import io +import json +import logging +from gzip import GzipFile +from typing import Callable, Dict + +from moto.core.utils import unix_time_millis +from moto.logs.models import LogEvent, LogsBackend +from moto.logs.models import LogGroup as MotoLogGroup +from moto.logs.models import LogStream as MotoLogStream + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.logs import ( + AmazonResourceName, + DescribeLogGroupsRequest, + DescribeLogGroupsResponse, + DescribeLogStreamsRequest, + DescribeLogStreamsResponse, + Entity, + InputLogEvents, + InvalidParameterException, + KmsKeyId, + ListTagsForResourceResponse, + ListTagsLogGroupResponse, + LogGroupClass, + LogGroupName, + LogsApi, + LogStreamName, + PutLogEventsResponse, + ResourceNotFoundException, + SequenceToken, + TagKeyList, + TagList, + Tags, +) +from localstack.aws.connect import connect_to +from localstack.services import moto +from localstack.services.logs.models import get_moto_logs_backend, logs_stores +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.aws import arns +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.common import is_number +from localstack.utils.patch import patch + +LOG = logging.getLogger(__name__) + + +class LogsProvider(LogsApi, ServiceLifecycleHook): + def __init__(self): + super().__init__() + self.cw_client = connect_to().cloudwatch + + def put_log_events( + self, + context: RequestContext, + log_group_name: LogGroupName, + log_stream_name: LogStreamName, + log_events: InputLogEvents, + sequence_token: SequenceToken = None, + entity: Entity = None, + **kwargs, + ) -> PutLogEventsResponse: + logs_backend = get_moto_logs_backend(context.account_id, context.region) + metric_filters = logs_backend.filters.metric_filters if is_api_enabled("cloudwatch") else [] + for metric_filter in metric_filters: + pattern = metric_filter.get("filterPattern", "") + transformations = metric_filter.get("metricTransformations", []) + matches = get_pattern_matcher(pattern) + for log_event in log_events: + if matches(pattern, log_event): + for tf in transformations: + value = tf.get("metricValue") or "1" + if "$size" in value: + LOG.info( + "Expression not yet supported for log filter metricValue", value + ) + value = float(value) if is_number(value) else 1 + data = [{"MetricName": tf["metricName"], "Value": value}] + try: + client = connect_to( + aws_access_key_id=context.account_id, region_name=context.region + ).cloudwatch + client.put_metric_data(Namespace=tf["metricNamespace"], MetricData=data) + except Exception as e: + LOG.info( + "Unable to put metric data for matching CloudWatch log events", e + ) + return call_moto(context) + + @handler("DescribeLogGroups", expand=False) + def describe_log_groups( + self, context: RequestContext, request: DescribeLogGroupsRequest + ) -> DescribeLogGroupsResponse: + region_backend = get_moto_logs_backend(context.account_id, context.region) + + prefix: str = request.get("logGroupNamePrefix", "") + pattern: str = request.get("logGroupNamePattern", "") + + if pattern and prefix: + raise InvalidParameterException( + "LogGroup name prefix and LogGroup name pattern are mutually exclusive parameters." + ) + + copy_groups = copy.deepcopy(region_backend.groups) + + groups = [ + group.to_describe_dict() + for name, group in copy_groups.items() + if not (prefix or pattern) + or (prefix and name.startswith(prefix)) + or (pattern and pattern in name) + ] + + groups = sorted(groups, key=lambda x: x["logGroupName"]) + return DescribeLogGroupsResponse(logGroups=groups) + + @handler("DescribeLogStreams", expand=False) + def describe_log_streams( + self, context: RequestContext, request: DescribeLogStreamsRequest + ) -> DescribeLogStreamsResponse: + log_group_name: str = request.get("logGroupName") + log_group_identifier: str = request.get("logGroupIdentifier") + + if log_group_identifier and log_group_name: + raise CommonServiceException( + "ValidationException", + "LogGroup name and LogGroup ARN are mutually exclusive parameters.", + ) + request_copy = copy.deepcopy(request) + if log_group_identifier: + request_copy.pop("logGroupIdentifier") + # identifier can be arn or name + request_copy["logGroupName"] = log_group_identifier.split(":")[-1] + + return moto.call_moto_with_request(context, request_copy) + + def create_log_group( + self, + context: RequestContext, + log_group_name: LogGroupName, + kms_key_id: KmsKeyId = None, + tags: Tags = None, + log_group_class: LogGroupClass = None, + **kwargs, + ) -> None: + call_moto(context) + if tags: + resource_arn = arns.log_group_arn( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + store = logs_stores[context.account_id][context.region] + store.TAGS.setdefault(resource_arn, {}).update(tags) + + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceResponse: + self._check_resource_arn_tagging(resource_arn) + store = logs_stores[context.account_id][context.region] + tags = store.TAGS.get(resource_arn, {}) + return ListTagsForResourceResponse(tags=tags) + + def list_tags_log_group( + self, context: RequestContext, log_group_name: LogGroupName, **kwargs + ) -> ListTagsLogGroupResponse: + # deprecated implementation, new one: list_tags_for_resource + self._verify_log_group_exists( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + resource_arn = arns.log_group_arn( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + store = logs_stores[context.account_id][context.region] + tags = store.TAGS.get(resource_arn, {}) + return ListTagsLogGroupResponse(tags=tags) + + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> None: + self._check_resource_arn_tagging(resource_arn) + store = logs_stores[context.account_id][context.region] + tags_stored = store.TAGS.get(resource_arn, {}) + for tag in tag_keys: + tags_stored.pop(tag, None) + + def untag_log_group( + self, context: RequestContext, log_group_name: LogGroupName, tags: TagList, **kwargs + ) -> None: + # deprecated implementation -> new one: untag_resource + self._verify_log_group_exists( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + resource_arn = arns.log_group_arn( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + store = logs_stores[context.account_id][context.region] + tags_stored = store.TAGS.get(resource_arn, {}) + for tag in tags: + tags_stored.pop(tag, None) + + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: Tags, **kwargs + ) -> None: + self._check_resource_arn_tagging(resource_arn) + store = logs_stores[context.account_id][context.region] + store.TAGS.get(resource_arn, {}).update(tags or {}) + + def tag_log_group( + self, context: RequestContext, log_group_name: LogGroupName, tags: Tags, **kwargs + ) -> None: + # deprecated implementation -> new one: tag_resource + self._verify_log_group_exists( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + resource_arn = arns.log_group_arn( + group_name=log_group_name, account_id=context.account_id, region_name=context.region + ) + store = logs_stores[context.account_id][context.region] + store.TAGS.get(resource_arn, {}).update(tags or {}) + + def _verify_log_group_exists(self, group_name: LogGroupName, account_id: str, region_name: str): + store = get_moto_logs_backend(account_id, region_name) + if group_name not in store.groups: + raise ResourceNotFoundException() + + def _check_resource_arn_tagging(self, resource_arn): + service = arns.extract_service_from_arn(resource_arn) + region = arns.extract_region_from_arn(resource_arn) + account = arns.extract_account_id_from_arn(resource_arn) + + # AWS currently only supports tagging for Log Group and Destinations + # LS: we only verify if log group exists, and create tags for other resources + if service.lower().startswith("log-group:"): + self._verify_log_group_exists( + service.split(":")[-1], account_id=account, region_name=region + ) + + +def get_pattern_matcher(pattern: str) -> Callable[[str, Dict], bool]: + """Returns a pattern matcher. Can be patched by plugins to return a more sophisticated pattern matcher.""" + return lambda _pattern, _log_event: True + + +@patch(LogsBackend.put_subscription_filter) +def moto_put_subscription_filter(fn, self, *args, **kwargs): + log_group_name = args[0] + filter_name = args[1] + filter_pattern = args[2] + destination_arn = args[3] + role_arn = args[4] + + log_group = self.groups.get(log_group_name) + log_group_arn = arns.log_group_arn(log_group_name, self.account_id, self.region_name) + + if not log_group: + raise ResourceNotFoundException("The specified log group does not exist.") + + arn_data = arns.parse_arn(destination_arn) + + if role_arn: + factory = connect_to.with_assumed_role( + role_arn=role_arn, + service_principal=ServicePrincipal.logs, + region_name=arn_data["region"], + ) + else: + factory = connect_to(aws_access_key_id=arn_data["account"], region_name=arn_data["region"]) + + if ":lambda:" in destination_arn: + client = factory.lambda_.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + try: + client.get_function(FunctionName=destination_arn) + except Exception: + raise InvalidParameterException( + "destinationArn for vendor lambda cannot be used with roleArn" + ) + + elif ":kinesis:" in destination_arn: + client = factory.kinesis.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + stream_name = arns.kinesis_stream_name(destination_arn) + try: + # Kinesis-Local DescribeStream does not support StreamArn param, so use StreamName instead + client.describe_stream(StreamName=stream_name) + except Exception: + raise InvalidParameterException( + "Could not deliver message to specified Kinesis stream. " + "Ensure that the Kinesis stream exists and is ACTIVE." + ) + + elif ":firehose:" in destination_arn: + client = factory.firehose.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + firehose_name = arns.firehose_name(destination_arn) + try: + client.describe_delivery_stream(DeliveryStreamName=firehose_name) + except Exception: + raise InvalidParameterException( + "Could not deliver message to specified Firehose stream. " + "Ensure that the Firehose stream exists and is ACTIVE." + ) + + else: + raise InvalidParameterException( + f"PutSubscriptionFilter operation cannot work with destinationArn for vendor {arn_data['service']}" + ) + + if filter_pattern: + for stream in log_group.streams.values(): + stream.filter_pattern = filter_pattern + + log_group.put_subscription_filter(filter_name, filter_pattern, destination_arn, role_arn) + + +@patch(MotoLogStream.put_log_events, pass_target=False) +def moto_put_log_events(self: "MotoLogStream", log_events): + # TODO: call/patch upstream method here, instead of duplicating the code! + self.last_ingestion_time = int(unix_time_millis()) + self.stored_bytes += sum([len(log_event["message"]) for log_event in log_events]) + events = [LogEvent(self.last_ingestion_time, log_event) for log_event in log_events] + self.events += events + self.upload_sequence_token += 1 + + # apply filter_pattern -> only forward what matches the pattern + for subscription_filter in self.log_group.subscription_filters.values(): + if subscription_filter.filter_pattern: + # TODO only patched in pro + matches = get_pattern_matcher(subscription_filter.filter_pattern) + events = [ + LogEvent(self.last_ingestion_time, event) + for event in log_events + if matches(subscription_filter.filter_pattern, event) + ] + + if events and subscription_filter.destination_arn: + destination_arn = subscription_filter.destination_arn + log_events = [ + { + "id": str(event.event_id), + "timestamp": event.timestamp, + "message": event.message, + } + for event in events + ] + + data = { + "messageType": "DATA_MESSAGE", + "owner": self.account_id, # AWS Account ID of the originating log data + "logGroup": self.log_group.name, + "logStream": self.log_stream_name, + "subscriptionFilters": [subscription_filter.name], + "logEvents": log_events, + } + + output = io.BytesIO() + with GzipFile(fileobj=output, mode="w") as f: + f.write(json.dumps(data, separators=(",", ":")).encode("utf-8")) + payload_gz_encoded = output.getvalue() + event = {"awslogs": {"data": base64.b64encode(output.getvalue()).decode("utf-8")}} + + log_group_arn = arns.log_group_arn(self.log_group.name, self.account_id, self.region) + arn_data = arns.parse_arn(destination_arn) + + if subscription_filter.role_arn: + factory = connect_to.with_assumed_role( + role_arn=subscription_filter.role_arn, + service_principal=ServicePrincipal.logs, + region_name=arn_data["region"], + ) + else: + factory = connect_to( + aws_access_key_id=arn_data["account"], region_name=arn_data["region"] + ) + + if ":lambda:" in destination_arn: + client = factory.lambda_.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + client.invoke(FunctionName=destination_arn, Payload=json.dumps(event)) + + if ":kinesis:" in destination_arn: + client = factory.kinesis.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + stream_name = arns.kinesis_stream_name(destination_arn) + client.put_record( + StreamName=stream_name, + Data=payload_gz_encoded, + PartitionKey=self.log_group.name, + ) + + if ":firehose:" in destination_arn: + client = factory.firehose.request_metadata( + source_arn=log_group_arn, service_principal=ServicePrincipal.logs + ) + firehose_name = arns.firehose_name(destination_arn) + client.put_record( + DeliveryStreamName=firehose_name, + Record={"Data": payload_gz_encoded}, + ) + + return "{:056d}".format(self.upload_sequence_token) + + +@patch(MotoLogStream.filter_log_events) +def moto_filter_log_events( + filter_log_events, self, start_time, end_time, filter_pattern, *args, **kwargs +): + # moto currently raises an exception if filter_patterns is None, so we skip it + events = filter_log_events( + self, *args, start_time=start_time, end_time=end_time, filter_pattern=None, **kwargs + ) + + if not filter_pattern: + return events + + matches = get_pattern_matcher(filter_pattern) + return [event for event in events if matches(filter_pattern, event)] + + +@patch(MotoLogGroup.create_log_stream) +def moto_create_log_stream(target, self, log_stream_name): + target(self, log_stream_name) + stream = self.streams[log_stream_name] + filters = self.describe_subscription_filters() + stream.filter_pattern = filters[0]["filterPattern"] if filters else None + + +@patch(MotoLogGroup.to_describe_dict) +def moto_to_describe_dict(target, self): + # reported race condition in https://github.com/localstack/localstack/issues/8011 + # making copy of "streams" dict here to avoid issues while summing up storedBytes + copy_streams = copy.deepcopy(self.streams) + # parity tests shows that the arn ends with ":*" + arn = self.arn if self.arn.endswith(":*") else f"{self.arn}:*" + log_group = { + "arn": arn, + "creationTime": self.creation_time, + "logGroupName": self.name, + "metricFilterCount": 0, + "storedBytes": sum(s.stored_bytes for s in copy_streams.values()), + } + if self.retention_in_days: + log_group["retentionInDays"] = self.retention_in_days + if self.kms_key_id: + log_group["kmsKeyId"] = self.kms_key_id + return log_group + + +@patch(MotoLogGroup.get_log_events) +def moto_get_log_events( + target, self, log_stream_name, start_time, end_time, limit, next_token, start_from_head +): + if log_stream_name not in self.streams: + raise ResourceNotFoundException("The specified log stream does not exist.") + return target(self, log_stream_name, start_time, end_time, limit, next_token, start_from_head) diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/__init__.py b/localstack-core/localstack/services/logs/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/__init__.py rename to localstack-core/localstack/services/logs/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.py new file mode 100644 index 0000000000000..6dd6b66190bf3 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.py @@ -0,0 +1,144 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LogsLogGroupProperties(TypedDict): + Arn: Optional[str] + DataProtectionPolicy: Optional[dict] + KmsKeyId: Optional[str] + LogGroupName: Optional[str] + RetentionInDays: Optional[int] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LogsLogGroupProvider(ResourceProvider[LogsLogGroupProperties]): + TYPE = "AWS::Logs::LogGroup" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LogsLogGroupProperties], + ) -> ProgressEvent[LogsLogGroupProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/LogGroupName + + Create-only properties: + - /properties/LogGroupName + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - logs:DescribeLogGroups + - logs:CreateLogGroup + - logs:PutRetentionPolicy + - logs:TagLogGroup + - logs:GetDataProtectionPolicy + - logs:PutDataProtectionPolicy + - logs:CreateLogDelivery + - s3:REST.PUT.OBJECT + - firehose:TagDeliveryStream + - logs:PutResourcePolicy + - logs:DescribeResourcePolicies + + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + if not model.get("LogGroupName"): + model["LogGroupName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + logs.create_log_group(logGroupName=model["LogGroupName"]) + + describe_result = logs.describe_log_groups(logGroupNamePrefix=model["LogGroupName"]) + model["Arn"] = describe_result["logGroups"][0]["arn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LogsLogGroupProperties], + ) -> ProgressEvent[LogsLogGroupProperties]: + """ + Fetch resource information + + IAM permissions required: + - logs:DescribeLogGroups + - logs:ListTagsLogGroup + - logs:GetDataProtectionPolicy + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LogsLogGroupProperties], + ) -> ProgressEvent[LogsLogGroupProperties]: + """ + Delete a resource + + IAM permissions required: + - logs:DescribeLogGroups + - logs:DeleteLogGroup + - logs:DeleteDataProtectionPolicy + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + logs.delete_log_group(logGroupName=model["LogGroupName"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LogsLogGroupProperties], + ) -> ProgressEvent[LogsLogGroupProperties]: + """ + Update a resource + + IAM permissions required: + - logs:DescribeLogGroups + - logs:AssociateKmsKey + - logs:DisassociateKmsKey + - logs:PutRetentionPolicy + - logs:DeleteRetentionPolicy + - logs:TagLogGroup + - logs:UntagLogGroup + - logs:GetDataProtectionPolicy + - logs:PutDataProtectionPolicy + - logs:CreateLogDelivery + - s3:REST.PUT.OBJECT + - firehose:TagDeliveryStream + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.schema.json b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.schema.json new file mode 100644 index 0000000000000..5e0c8f041671b --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup.schema.json @@ -0,0 +1,168 @@ +{ + "typeName": "AWS::Logs::LogGroup", + "description": "Resource schema for AWS::Logs::LogGroup", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-logs.git", + "definitions": { + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., :, /, =, +, - and @.", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., :, /, =, +, - and @.", + "minLength": 0, + "maxLength": 256 + } + }, + "required": [ + "Key", + "Value" + ] + } + }, + "properties": { + "LogGroupName": { + "description": "The name of the log group. If you don't specify a name, AWS CloudFormation generates a unique ID for the log group.", + "type": "string", + "minLength": 1, + "maxLength": 512, + "pattern": "^[.\\-_/#A-Za-z0-9]{1,512}\\Z" + }, + "KmsKeyId": { + "description": "The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.", + "type": "string", + "maxLength": 256, + "pattern": "^arn:[a-z0-9-]+:kms:[a-z0-9-]+:\\d{12}:(key|alias)/.+\\Z" + }, + "DataProtectionPolicy": { + "description": "The body of the policy document you want to use for this topic.\n\nYou can only add one policy per topic.\n\nThe policy must be in JSON string format.\n\nLength Constraints: Maximum length of 30720", + "type": "object" + }, + "RetentionInDays": { + "description": "The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, and 3653.", + "type": "integer", + "enum": [ + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] + }, + "Tags": { + "description": "An array of key-value pairs to apply to this resource.", + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Arn": { + "description": "The CloudWatch log group ARN.", + "type": "string" + } + }, + "handlers": { + "create": { + "permissions": [ + "logs:DescribeLogGroups", + "logs:CreateLogGroup", + "logs:PutRetentionPolicy", + "logs:TagLogGroup", + "logs:GetDataProtectionPolicy", + "logs:PutDataProtectionPolicy", + "logs:CreateLogDelivery", + "s3:REST.PUT.OBJECT", + "firehose:TagDeliveryStream", + "logs:PutResourcePolicy", + "logs:DescribeResourcePolicies" + ] + }, + "read": { + "permissions": [ + "logs:DescribeLogGroups", + "logs:ListTagsLogGroup", + "logs:GetDataProtectionPolicy" + ] + }, + "update": { + "permissions": [ + "logs:DescribeLogGroups", + "logs:AssociateKmsKey", + "logs:DisassociateKmsKey", + "logs:PutRetentionPolicy", + "logs:DeleteRetentionPolicy", + "logs:TagLogGroup", + "logs:UntagLogGroup", + "logs:GetDataProtectionPolicy", + "logs:PutDataProtectionPolicy", + "logs:CreateLogDelivery", + "s3:REST.PUT.OBJECT", + "firehose:TagDeliveryStream" + ] + }, + "delete": { + "permissions": [ + "logs:DescribeLogGroups", + "logs:DeleteLogGroup", + "logs:DeleteDataProtectionPolicy" + ] + }, + "list": { + "permissions": [ + "logs:DescribeLogGroups", + "logs:ListTagsLogGroup" + ], + "handlerSchema": { + "properties": { + "LogGroupName": { + "$ref": "resource-schema.json#/properties/LogGroupName" + } + }, + "required": [] + } + } + }, + "createOnlyProperties": [ + "/properties/LogGroupName" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "primaryIdentifier": [ + "/properties/LogGroupName" + ], + "additionalProperties": false, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + } +} diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup_plugin.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup_plugin.py new file mode 100644 index 0000000000000..5dd8087a87561 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_loggroup_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LogsLogGroupProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Logs::LogGroup" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.logs.resource_providers.aws_logs_loggroup import ( + LogsLogGroupProvider, + ) + + self.factory = LogsLogGroupProvider diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.py new file mode 100644 index 0000000000000..4cb21339b6e77 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.py @@ -0,0 +1,108 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LogsLogStreamProperties(TypedDict): + LogGroupName: Optional[str] + Id: Optional[str] + LogStreamName: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LogsLogStreamProvider(ResourceProvider[LogsLogStreamProperties]): + TYPE = "AWS::Logs::LogStream" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LogsLogStreamProperties], + ) -> ProgressEvent[LogsLogStreamProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - LogGroupName + + Create-only properties: + - /properties/LogGroupName + - /properties/LogStreamName + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + if not model.get("LogStreamName"): + model["LogStreamName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + logs.create_log_stream( + logGroupName=model["LogGroupName"], logStreamName=model["LogStreamName"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LogsLogStreamProperties], + ) -> ProgressEvent[LogsLogStreamProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LogsLogStreamProperties], + ) -> ProgressEvent[LogsLogStreamProperties]: + """ + Delete a resource + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + logs.delete_log_stream( + logGroupName=model["LogGroupName"], logStreamName=model["LogStreamName"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LogsLogStreamProperties], + ) -> ProgressEvent[LogsLogStreamProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.schema.json b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.schema.json new file mode 100644 index 0000000000000..1f42acd0f2f4f --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream.schema.json @@ -0,0 +1,29 @@ +{ + "typeName": "AWS::Logs::LogStream", + "description": "Resource Type definition for AWS::Logs::LogStream", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "LogGroupName": { + "type": "string" + }, + "LogStreamName": { + "type": "string" + } + }, + "required": [ + "LogGroupName" + ], + "createOnlyProperties": [ + "/properties/LogGroupName", + "/properties/LogStreamName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream_plugin.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream_plugin.py new file mode 100644 index 0000000000000..578e23c4ae628 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_logstream_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LogsLogStreamProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Logs::LogStream" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.logs.resource_providers.aws_logs_logstream import ( + LogsLogStreamProvider, + ) + + self.factory = LogsLogStreamProvider diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.py new file mode 100644 index 0000000000000..26f204e52e78e --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.py @@ -0,0 +1,123 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class LogsSubscriptionFilterProperties(TypedDict): + DestinationArn: Optional[str] + FilterPattern: Optional[str] + LogGroupName: Optional[str] + Distribution: Optional[str] + FilterName: Optional[str] + RoleArn: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class LogsSubscriptionFilterProvider(ResourceProvider[LogsSubscriptionFilterProperties]): + TYPE = "AWS::Logs::SubscriptionFilter" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[LogsSubscriptionFilterProperties], + ) -> ProgressEvent[LogsSubscriptionFilterProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/FilterName + - /properties/LogGroupName + + Required properties: + - DestinationArn + - FilterPattern + - LogGroupName + + Create-only properties: + - /properties/FilterName + - /properties/LogGroupName + + + + IAM permissions required: + - iam:PassRole + - logs:PutSubscriptionFilter + - logs:DescribeSubscriptionFilters + + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + logs.put_subscription_filter( + logGroupName=model["LogGroupName"], + filterName=model["LogGroupName"], + filterPattern=model["FilterPattern"], + destinationArn=model["DestinationArn"], + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[LogsSubscriptionFilterProperties], + ) -> ProgressEvent[LogsSubscriptionFilterProperties]: + """ + Fetch resource information + + IAM permissions required: + - logs:DescribeSubscriptionFilters + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[LogsSubscriptionFilterProperties], + ) -> ProgressEvent[LogsSubscriptionFilterProperties]: + """ + Delete a resource + + IAM permissions required: + - logs:DeleteSubscriptionFilter + """ + model = request.desired_state + logs = request.aws_client_factory.logs + + logs.delete_subscription_filter( + logGroupName=model["LogGroupName"], + filterName=model["LogGroupName"], + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[LogsSubscriptionFilterProperties], + ) -> ProgressEvent[LogsSubscriptionFilterProperties]: + """ + Update a resource + + IAM permissions required: + - logs:PutSubscriptionFilter + - logs:DescribeSubscriptionFilters + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.schema.json b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.schema.json new file mode 100644 index 0000000000000..2198cbc7aca94 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter.schema.json @@ -0,0 +1,97 @@ +{ + "typeName": "AWS::Logs::SubscriptionFilter", + "$schema": "https://raw.githubusercontent.com/aws-cloudformation/cloudformation-cli/master/src/rpdk/core/data/schema/provider.definition.schema.v1.json", + "description": "Subscription filters allow you to subscribe to a real-time stream of log events and have them delivered to a specific destination.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-logs", + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "replacementStrategy": "delete_then_create", + "properties": { + "FilterName": { + "description": "The name of the filter generated by resource.", + "type": "string" + }, + "DestinationArn": { + "description": "The Amazon Resource Name (ARN) of the destination.", + "type": "string" + }, + "FilterPattern": { + "description": "The filtering expressions that restrict what gets delivered to the destination AWS resource.", + "type": "string" + }, + "LogGroupName": { + "description": "Existing log group that you want to associate with this filter.", + "type": "string" + }, + "RoleArn": { + "description": "The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.", + "type": "string" + }, + "Distribution": { + "description": "The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to random for a more even distribution. This property is only applicable when the destination is an Amazon Kinesis stream.", + "type": "string", + "enum": [ + "Random", + "ByLogStream" + ] + } + }, + "handlers": { + "create": { + "permissions": [ + "iam:PassRole", + "logs:PutSubscriptionFilter", + "logs:DescribeSubscriptionFilters" + ] + }, + "read": { + "permissions": [ + "logs:DescribeSubscriptionFilters" + ] + }, + "update": { + "permissions": [ + "logs:PutSubscriptionFilter", + "logs:DescribeSubscriptionFilters" + ] + }, + "delete": { + "permissions": [ + "logs:DeleteSubscriptionFilter" + ] + }, + "list": { + "permissions": [ + "logs:DescribeSubscriptionFilters" + ], + "handlerSchema": { + "properties": { + "LogGroupName": { + "$ref": "resource-schema.json#/properties/LogGroupName" + } + }, + "required": [ + "LogGroupName" + ] + } + } + }, + "required": [ + "DestinationArn", + "FilterPattern", + "LogGroupName" + ], + "createOnlyProperties": [ + "/properties/FilterName", + "/properties/LogGroupName" + ], + "primaryIdentifier": [ + "/properties/FilterName", + "/properties/LogGroupName" + ], + "additionalProperties": false +} diff --git a/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter_plugin.py b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter_plugin.py new file mode 100644 index 0000000000000..def55ff386045 --- /dev/null +++ b/localstack-core/localstack/services/logs/resource_providers/aws_logs_subscriptionfilter_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class LogsSubscriptionFilterProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Logs::SubscriptionFilter" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.logs.resource_providers.aws_logs_subscriptionfilter import ( + LogsSubscriptionFilterProvider, + ) + + self.factory = LogsSubscriptionFilterProvider diff --git a/localstack/services/moto.py b/localstack-core/localstack/services/moto.py similarity index 77% rename from localstack/services/moto.py rename to localstack-core/localstack/services/moto.py index 795bb7ca73249..c98989c39a967 100644 --- a/localstack/services/moto.py +++ b/localstack-core/localstack/services/moto.py @@ -1,22 +1,22 @@ """ This module provides tools to call moto using moto and botocore internals without going through the moto HTTP server. """ + +import copy import sys from functools import lru_cache from typing import Callable, Optional, Union import moto.backends as moto_backends -from moto.core import BackendDict +from moto.core.base_backend import BackendDict from moto.core.exceptions import RESTError -from moto.moto_server.utilities import RegexConverter +from rolo.router import RegexConverter from werkzeug.exceptions import NotFound from werkzeug.routing import Map, Rule -from localstack import __version__ as localstack_version -from localstack import config +from localstack import constants from localstack.aws.api import ( CommonServiceException, - HttpRequest, RequestContext, ServiceRequest, ServiceResponse, @@ -28,11 +28,13 @@ ) from localstack.aws.skeleton import DispatchTable from localstack.constants import DEFAULT_AWS_ACCOUNT_ID +from localstack.constants import VERSION as LOCALSTACK_VERSION from localstack.http import Response +from localstack.http.request import Request, get_full_raw_path, get_raw_current_url -MotoDispatcher = Callable[[HttpRequest, str, dict], Response] +MotoDispatcher = Callable[[Request, str, dict], Response] -user_agent = f"Localstack/{localstack_version} Python/{sys.version.split(' ')[0]}" +user_agent = f"Localstack/{LOCALSTACK_VERSION} Python/{sys.version.split(' ')[0]}" def call_moto(context: RequestContext, include_response_metadata=False) -> ServiceResponse: @@ -64,8 +66,10 @@ def call_moto_with_request( parameters=service_request, region=context.region, ) - - local_context.request.headers.update(context.request.headers) + # we keep the headers from the original request, but override them with the ones created from the `service_request` + headers = copy.deepcopy(context.request.headers) + headers.update(local_context.request.headers) + local_context.request.headers = headers return call_moto(local_context) @@ -104,11 +108,19 @@ def dispatch_to_moto(context: RequestContext) -> Response: service = context.service request = context.request + # Werkzeug might have an issue (to be determined where the responsibility lies) with proxied requests where the + # HTTP location is a full URI and not only a path. + # We need to use the full_raw_url as moto does some path decoding (in S3 for example) + full_raw_path = get_full_raw_path(request) + # remove the query string from the full path to do the matching of the request + raw_path_only = full_raw_path.split("?")[0] # this is where we skip the HTTP roundtrip between the moto server and the boto client - dispatch = get_dispatcher(service.service_name, request.path) - + dispatch = get_dispatcher(service.service_name, raw_path_only) try: - response = dispatch(request, request.url, request.headers) + raw_url = get_raw_current_url( + request.scheme, request.host, request.root_path, full_raw_path + ) + response = dispatch(request, raw_url, request.headers) if not response: # some operations are only partially implemented by moto # e.g. the request will be resolved, but then the request method is not handled @@ -132,7 +144,7 @@ def get_dispatcher(service: str, path: str) -> MotoDispatcher: rule = next(url_map.iter_rules()) return rule.endpoint - matcher = url_map.bind(config.LOCALSTACK_HOSTNAME) + matcher = url_map.bind(constants.LOCALHOST) try: endpoint, _ = matcher.match(path_info=path) except NotFound as e: @@ -169,7 +181,7 @@ def load_moto_routing_table(service: str) -> Map: backend = backend_dict["global"] url_map = Map() - url_map.converters["regex"] = RegexConverter + url_map.converters["regex"] = _PartIsolatingRegexConverter for url_path, handler in backend.flask_paths.items(): # Some URL patterns in moto have optional trailing slashes, for example the route53 pattern: @@ -184,3 +196,16 @@ def load_moto_routing_table(service: str) -> Map: url_map.add(Rule(url_path, endpoint=endpoint, strict_slashes=strict_slashes)) return url_map + + +class _PartIsolatingRegexConverter(RegexConverter): + """ + Werkzeug converter with disabled path isolation. + This converter is equivalent to moto.moto_server.utilities.RegexConverter. + It is necessary to be duplicated here to avoid a transitive import of flask. + """ + + part_isolating = False + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/__init__.py b/localstack-core/localstack/services/opensearch/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_execution/state_task/__init__.py rename to localstack-core/localstack/services/opensearch/__init__.py diff --git a/localstack/services/opensearch/cluster.py b/localstack-core/localstack/services/opensearch/cluster.py similarity index 85% rename from localstack/services/opensearch/cluster.py rename to localstack-core/localstack/services/opensearch/cluster.py index 10fe5352ae65b..cae1916c90b09 100644 --- a/localstack/services/opensearch/cluster.py +++ b/localstack-core/localstack/services/opensearch/cluster.py @@ -17,9 +17,9 @@ from localstack.http.client import SimpleRequestsClient from localstack.http.proxy import ProxyHandler from localstack.services.edge import ROUTER -from localstack.services.infra import DEFAULT_BACKEND_HOST from localstack.services.opensearch import versions from localstack.services.opensearch.packages import elasticsearch_package, opensearch_package +from localstack.utils.aws.arns import parse_arn from localstack.utils.common import ( ShellCommandThread, chmod_r, @@ -31,9 +31,11 @@ from localstack.utils.run import FuncThread from localstack.utils.serving import Server from localstack.utils.sync import poll_condition +from localstack.utils.urls import localstack_host LOG = logging.getLogger(__name__) INTERNAL_USER_AUTH = ("localstack-internal", "localstack-internal") +DEFAULT_BACKEND_HOST = "127.0.0.1" CommandSettings = Dict[str, str] @@ -46,13 +48,18 @@ class Directories(NamedTuple): backup: str -def get_cluster_health_status(url: str, auth: Tuple[str, str] | None) -> Optional[str]: +def get_cluster_health_status( + url: str, auth: Tuple[str, str] | None, host: str | None = None +) -> Optional[str]: """ Queries the health endpoint of OpenSearch/Elasticsearch and returns either the status ('green', 'yellow', ...) or None if the response returned a non-200 response. Authentication needs to be set in case the security plugin is enabled. """ - resp = requests.get(url + "/_cluster/health", verify=False, auth=auth) + headers = {} + if host: + headers["Host"] = host + resp = requests.get(url + "/_cluster/health", headers=headers, verify=False, auth=auth) if resp and resp.ok: opensearch_status = resp.json() @@ -82,7 +89,7 @@ def init_directories(dirs: Directories): chmod_r(dirs.backup, 0o777) # clear potentially existing lock files (which cause problems since ES 7.10) - for d, dirs, files in os.walk(dirs.data, True): + for d, _, files in os.walk(dirs.data, True): for f in files: if f.endswith(".lock"): rm_rf(os.path.join(d, f)) @@ -126,7 +133,7 @@ def build_cluster_run_command(cluster_bin: str, settings: CommandSettings) -> Li :param settings: dictionary where each item will be set as a command arguments :return: list of strings for the command with the settings to be executed as a shell command """ - cmd_settings = [f"-E {k}={v}" for k, v, in settings.items()] + cmd_settings = [f"-E {k}={v}" for k, v in settings.items()] return [cluster_bin] + cmd_settings @@ -236,51 +243,49 @@ def register_cluster( strategy = config.OPENSEARCH_ENDPOINT_STRATEGY # custom endpoints override any endpoint strategy if custom_endpoint and custom_endpoint.enabled: - LOG.debug(f"Registering route from {host}{path} to {endpoint.proxy.forward_base_url}") - assert not ( - host == config.LOCALSTACK_HOSTNAME and (not path or path == "/") - ), "trying to register an illegal catch all route" + LOG.debug("Registering route from %s%s to %s", host, path, endpoint.proxy.forward_base_url) + assert not (host == localstack_host().host and (not path or path == "/")), ( + "trying to register an illegal catch all route" + ) rules.append( ROUTER.add( path=path, endpoint=endpoint, - host=f'{host}', + host=f"{host}", ) ) rules.append( ROUTER.add( f"{path}/", endpoint=endpoint, - host=f'{host}', + host=f"{host}", ) ) elif strategy == "domain": - LOG.debug(f"Registering route from {host} to {endpoint.proxy.forward_base_url}") - assert ( - not host == config.LOCALSTACK_HOSTNAME - ), "trying to register an illegal catch all route" + LOG.debug("Registering route from %s to %s", host, endpoint.proxy.forward_base_url) + assert not host == localstack_host().host, "trying to register an illegal catch all route" rules.append( ROUTER.add( "/", endpoint=endpoint, - host=f"{host}", + host=f"{host}", ) ) rules.append( ROUTER.add( "/", endpoint=endpoint, - host=f"{host}", + host=f"{host}", ) ) elif strategy == "path": - LOG.debug(f"Registering route from {path} to {endpoint.proxy.forward_base_url}") + LOG.debug("Registering route from %s to %s", path, endpoint.proxy.forward_base_url) assert path and not path == "/", "trying to register an illegal catch all route" rules.append(ROUTER.add(path, endpoint=endpoint)) rules.append(ROUTER.add(f"{path}/", endpoint=endpoint)) elif strategy != "port": - LOG.warning(f"Attempted to register route for cluster with invalid strategy '{strategy}'") + LOG.warning("Attempted to register route for cluster with invalid strategy '%s'", strategy) return rules @@ -303,6 +308,10 @@ def __init__( self.is_security_enabled = self.security_options and self.security_options.enabled self.auth = security_options.auth if self.is_security_enabled else None + parsed_arn = parse_arn(arn) + self.account_id = parsed_arn["account"] + self.region_name = parsed_arn["region"] + @property def default_version(self) -> str: return constants.OPENSEARCH_DEFAULT_VERSION @@ -411,7 +420,7 @@ def _base_settings(self, dirs) -> CommandSettings: "http.publish_port": self.port, "transport.port": "0", "network.host": self.host, - "http.compression": "false", + "http.compression": "true", "path.data": f'"{dirs.data}"', "path.repo": f'"{dirs.backup}"', "discovery.type": "single-node", @@ -435,9 +444,9 @@ def _base_settings(self, dirs) -> CommandSettings: settings["plugins.security.ssl.http.pemcert_filepath"] = "cert.crt" settings["plugins.security.ssl.http.pemtrustedcas_filepath"] = "cert.crt" settings["plugins.security.allow_default_init_securityindex"] = "true" - settings[ - "plugins.security.restapi.roles_enabled" - ] = "all_access,security_rest_api_access" + settings["plugins.security.restapi.roles_enabled"] = ( + "all_access,security_rest_api_access" + ) return settings @@ -456,11 +465,23 @@ def _create_run_command( return cmd def _create_env_vars(self, directories: Directories) -> Dict: - return { + env_vars = { + "JAVA_HOME": os.path.join(directories.install, "jdk"), "OPENSEARCH_JAVA_OPTS": os.environ.get("OPENSEARCH_JAVA_OPTS", "-Xms200m -Xmx600m"), "OPENSEARCH_TMPDIR": directories.tmp, } + # if the "opensearch-knn" plugin exists and has a "lib" directory, add it to the LD_LIBRARY_PATH + # see https://forum.opensearch.org/t/issue-with-opensearch-knn/12633 + knn_lib_dir = os.path.join(directories.install, "plugins", "opensearch-knn", "lib") + if os.path.isdir(knn_lib_dir): + prefix = ( + f"{os.environ.get('LD_LIBRARY_PATH')}:" if "LD_LIBRARY_PATH" in os.environ else "" + ) + env_vars["LD_LIBRARY_PATH"] = prefix + f"{knn_lib_dir}" + + return env_vars + def _log_listener(self, line, **_kwargs): # logging the port before each line to be able to connect logs to specific instances LOG.info("[%s] %s", self.port, line.rstrip()) @@ -484,8 +505,7 @@ def register(self): ) def unregister(self): - for rule in self.routing_rules: - ROUTER.remove_rule(rule) + ROUTER.remove(self.routing_rules) self.routing_rules.clear() @@ -550,6 +570,10 @@ def __init__( self.cluster_port = None self.proxy = None + parsed_arn = parse_arn(arn) + self.account_id = parsed_arn["account"] + self.region_name = parsed_arn["region"] + @property def version(self): return self._version @@ -574,7 +598,15 @@ def is_up(self): def health(self): """calls the health endpoint of cluster through the proxy, making sure implicitly that both are running""" - return get_cluster_health_status(self.url, self.auth) + + # The user may have customised `LOCALSTACK_HOST`, so we need to rewrite the health + # check endpoint to always make a request against localhost.localstack.cloud (since we + # are always running in the same container), but in order to match the registered HTTP + # route, we must set the host header to the original URL used by this cluster. + url = self.url.replace(config.LOCALSTACK_HOST.host, constants.LOCALHOST_HOSTNAME) + url = url.replace(str(config.LOCALSTACK_HOST.port), str(config.GATEWAY_LISTEN[0].port)) + host = self._url.hostname + return get_cluster_health_status(url, self.auth, host=host) def _backend_cluster(self) -> OpensearchCluster: return OpensearchCluster( @@ -643,14 +675,25 @@ def _base_settings(self, dirs) -> CommandSettings: settings = { "http.port": self.port, "http.publish_port": self.port, - "transport.port": "0", "network.host": self.host, "http.compression": "false", "path.data": f'"{dirs.data}"', "path.repo": f'"{dirs.backup}"', - "discovery.type": "single-node", } + # This config option was renamed between 6.7 and 6.8, yet not documented as a breaking change + # See https://github.com/elastic/elasticsearch/blob/f220abaf/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java#L1349-L1353 + if self.version.startswith("Elasticsearch_5.") or ( + self.version.startswith("Elasticsearch_6.") and self.version != "Elasticsearch_6.8" + ): + settings["transport.tcp.port"] = "0" + else: + settings["transport.port"] = "0" + + # `discovery.type` had a different meaning in 5.x + if not self.version.startswith("Elasticsearch_5."): + settings["discovery.type"] = "single-node" + if os.path.exists(os.path.join(dirs.mods, "x-pack-ml")): settings["xpack.ml.enabled"] = "false" @@ -658,6 +701,7 @@ def _base_settings(self, dirs) -> CommandSettings: def _create_env_vars(self, directories: Directories) -> Dict: return { + **elasticsearch_package.get_installer(self.version).get_java_env_vars(), "ES_JAVA_OPTS": os.environ.get("ES_JAVA_OPTS", "-Xms200m -Xmx600m"), "ES_TMPDIR": directories.tmp, } diff --git a/localstack/services/opensearch/cluster_manager.py b/localstack-core/localstack/services/opensearch/cluster_manager.py similarity index 93% rename from localstack/services/opensearch/cluster_manager.py rename to localstack-core/localstack/services/opensearch/cluster_manager.py index 19630d3b78cfe..8a286daf661fc 100644 --- a/localstack/services/opensearch/cluster_manager.py +++ b/localstack-core/localstack/services/opensearch/cluster_manager.py @@ -7,8 +7,7 @@ from localstack import config from localstack.aws.api.opensearch import DomainEndpointOptions, EngineType -from localstack.config import EDGE_BIND_HOST -from localstack.constants import LOCALHOST, LOCALHOST_HOSTNAME +from localstack.constants import LOCALHOST from localstack.services.opensearch import versions from localstack.services.opensearch.cluster import ( CustomEndpoint, @@ -20,6 +19,7 @@ OpensearchCluster, SecurityOptions, ) +from localstack.utils.aws.arns import get_partition from localstack.utils.common import ( PortNotAvailableException, call_safe, @@ -28,6 +28,7 @@ start_thread, ) from localstack.utils.serving import Server +from localstack.utils.urls import localstack_host LOG = logging.getLogger(__name__) @@ -62,7 +63,7 @@ class DomainKey: @property def arn(self): - return f"arn:aws:es:{self.region}:{self.account}:domain/{self.domain_name}" + return f"arn:{get_partition(self.region)}:es:{self.region}:{self.account}:domain/{self.domain_name}" @staticmethod def from_arn(arn: str) -> "DomainKey": @@ -110,16 +111,22 @@ def build_cluster_endpoint( assigned_port = external_service_ports.reserve_port(preferred_port) except PortNotAvailableException: LOG.warning( - f"Preferred port {preferred_port} is not available, trying to reserve another port." + "Preferred port %s is not available, trying to reserve another port.", + preferred_port, ) assigned_port = external_service_ports.reserve_port() else: assigned_port = external_service_ports.reserve_port() - return f"{config.LOCALSTACK_HOSTNAME}:{assigned_port}" + + host_definition = localstack_host(custom_port=assigned_port) + return host_definition.host_and_port() if config.OPENSEARCH_ENDPOINT_STRATEGY == "path": - return f"{config.LOCALSTACK_HOSTNAME}:{config.EDGE_PORT}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}" + host_definition = localstack_host() + return f"{host_definition.host_and_port()}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}" + # or through a subdomain (domain-name.region.opensearch.localhost.localstack.cloud) - return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT}" + host_definition = localstack_host() + return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{host_definition.host_and_port()}" def determine_custom_endpoint( @@ -339,10 +346,12 @@ def _create_cluster( else: port = _get_port_from_url(url) if engine_type == EngineType.OpenSearch: - return OpensearchCluster(port=port, host=EDGE_BIND_HOST, arn=arn, version=version) + return OpensearchCluster( + port=port, host=config.GATEWAY_LISTEN[0].host, arn=arn, version=version + ) else: return ElasticsearchCluster( - port=port, host=EDGE_BIND_HOST, arn=arn, version=version + port=port, host=config.GATEWAY_LISTEN[0].host, arn=arn, version=version ) @@ -390,7 +399,7 @@ def _create_cluster( if engine_type == EngineType.OpenSearch: self.cluster = OpensearchCluster( port=port, - host=EDGE_BIND_HOST, + host=config.GATEWAY_LISTEN[0].host, version=version, arn=arn, security_options=security_options, diff --git a/localstack/services/opensearch/models.py b/localstack-core/localstack/services/opensearch/models.py similarity index 100% rename from localstack/services/opensearch/models.py rename to localstack-core/localstack/services/opensearch/models.py diff --git a/localstack-core/localstack/services/opensearch/packages.py b/localstack-core/localstack/services/opensearch/packages.py new file mode 100644 index 0000000000000..35a7fd933ea91 --- /dev/null +++ b/localstack-core/localstack/services/opensearch/packages.py @@ -0,0 +1,371 @@ +import glob +import logging +import os +import re +import shutil +import textwrap +import threading +from typing import List + +import semver + +from localstack import config +from localstack.constants import ( + ELASTICSEARCH_DEFAULT_VERSION, + ELASTICSEARCH_DELETE_MODULES, + ELASTICSEARCH_PLUGIN_LIST, + OPENSEARCH_DEFAULT_VERSION, + OPENSEARCH_PLUGIN_LIST, +) +from localstack.packages import InstallTarget, Package, PackageInstaller +from localstack.packages.java import java_package +from localstack.services.opensearch import versions +from localstack.utils.archives import download_and_extract_with_retry +from localstack.utils.files import chmod_r, load_file, mkdir, rm_rf, save_file +from localstack.utils.java import ( + java_system_properties_proxy, + java_system_properties_ssl, + system_properties_to_cli_args, +) +from localstack.utils.run import run +from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available +from localstack.utils.sync import SynchronizedDefaultDict, retry + +LOG = logging.getLogger(__name__) + + +_OPENSEARCH_INSTALL_LOCKS = SynchronizedDefaultDict(threading.RLock) + + +class OpensearchPackage(Package): + def __init__(self, default_version: str = OPENSEARCH_DEFAULT_VERSION): + super().__init__(name="OpenSearch", default_version=default_version) + + def _get_installer(self, version: str) -> PackageInstaller: + if version in versions._prefixed_elasticsearch_install_versions: + if version.startswith("Elasticsearch_5.") or version.startswith("Elasticsearch_6."): + return ElasticsearchLegacyPackageInstaller(version) + return ElasticsearchPackageInstaller(version) + else: + return OpensearchPackageInstaller(version) + + def get_versions(self) -> List[str]: + return list(versions.install_versions.keys()) + + +class OpensearchPackageInstaller(PackageInstaller): + def __init__(self, version: str): + super().__init__("opensearch", version) + + def _install(self, target: InstallTarget): + # locally import to avoid having a dependency on ASF when starting the CLI + from localstack.aws.api.opensearch import EngineType + from localstack.services.opensearch import versions + + version = self._get_opensearch_install_version() + install_dir = self._get_install_dir(target) + with _OPENSEARCH_INSTALL_LOCKS[version]: + if not os.path.exists(install_dir): + opensearch_url = versions.get_download_url(version, EngineType.OpenSearch) + install_dir_parent = os.path.dirname(install_dir) + mkdir(install_dir_parent) + # download and extract archive + tmp_archive = os.path.join( + config.dirs.cache, f"localstack.{os.path.basename(opensearch_url)}" + ) + download_and_extract_with_retry(opensearch_url, tmp_archive, install_dir_parent) + opensearch_dir = glob.glob(os.path.join(install_dir_parent, "opensearch*")) + if not opensearch_dir: + raise Exception(f"Unable to find OpenSearch folder in {install_dir_parent}") + shutil.move(opensearch_dir[0], install_dir) + + for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): + dir_path = os.path.join(install_dir, dir_name) + mkdir(dir_path) + chmod_r(dir_path, 0o777) + + parsed_version = semver.VersionInfo.parse(version) + + # setup security based on the version + self._setup_security(install_dir, parsed_version) + + # install other default plugins for opensearch 1.1+ + # https://forum.opensearch.org/t/ingest-attachment-cannot-be-installed/6494/12 + if parsed_version >= "1.1.0": + # Determine network configuration to use for plugin downloads + sys_props = { + **java_system_properties_proxy(), + **java_system_properties_ssl( + os.path.join(install_dir, "jdk", "bin", "keytool"), + {"JAVA_HOME": os.path.join(install_dir, "jdk")}, + ), + } + java_opts = system_properties_to_cli_args(sys_props) + + for plugin in OPENSEARCH_PLUGIN_LIST: + plugin_binary = os.path.join(install_dir, "bin", "opensearch-plugin") + plugin_dir = os.path.join(install_dir, "plugins", plugin) + if not os.path.exists(plugin_dir): + LOG.info("Installing OpenSearch plugin %s", plugin) + + def try_install(): + output = run( + [plugin_binary, "install", "-b", plugin], + env_vars={"OPENSEARCH_JAVA_OPTS": " ".join(java_opts)}, + ) + LOG.debug("Plugin installation output: %s", output) + + # We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries + download_attempts = 3 + try: + retry(try_install, retries=download_attempts - 1, sleep=2) + except Exception: + LOG.warning( + "Unable to download OpenSearch plugin '%s' after %s attempts", + plugin, + download_attempts, + ) + if not os.environ.get("IGNORE_OS_DOWNLOAD_ERRORS"): + raise + + def _setup_security(self, install_dir: str, parsed_version: semver.VersionInfo): + """ + Prepares the usage of the SecurityPlugin for the different versions of OpenSearch. + :param install_dir: root installation directory for OpenSearch which should be configured + :param parsed_version: parsed semantic version of the OpenSearch installation which should be configured + """ + # create & copy SSL certs to opensearch config dir + install_predefined_cert_if_available() + config_path = os.path.join(install_dir, "config") + _, cert_file_name, key_file_name = create_ssl_cert() + shutil.copyfile(cert_file_name, os.path.join(config_path, "cert.crt")) + shutil.copyfile(key_file_name, os.path.join(config_path, "cert.key")) + + # configure the default roles, roles_mappings, and internal_users + if parsed_version >= "2.0.0": + # with version 2 of opensearch and the security plugin, the config moved to the root config folder + security_config_folder = os.path.join(install_dir, "config", "opensearch-security") + else: + security_config_folder = os.path.join( + install_dir, "plugins", "opensearch-security", "securityconfig" + ) + + # no non-default roles (not even the demo roles) should be set up + roles_path = os.path.join(security_config_folder, "roles.yml") + save_file( + file=roles_path, + permissions=0o666, + content=textwrap.dedent( + """\ + _meta: + type: "roles" + config_version: 2 + """ + ), + ) + + # create the internal user which allows localstack to manage the running instance + internal_users_path = os.path.join(security_config_folder, "internal_users.yml") + save_file( + file=internal_users_path, + permissions=0o666, + content=textwrap.dedent( + """\ + _meta: + type: "internalusers" + config_version: 2 + + # Define your internal users here + localstack-internal: + hash: "$2y$12$ZvpKLI2nsdGj1ResAmlLne7ki5o45XpBppyg9nXF2RLNfmwjbFY22" + reserved: true + hidden: true + backend_roles: [] + attributes: {} + opendistro_security_roles: [] + static: false + """ + ), + ) + + # define the necessary roles mappings for the internal user + roles_mapping_path = os.path.join(security_config_folder, "roles_mapping.yml") + save_file( + file=roles_mapping_path, + permissions=0o666, + content=textwrap.dedent( + """\ + _meta: + type: "rolesmapping" + config_version: 2 + + security_manager: + hosts: [] + users: + - localstack-internal + reserved: false + hidden: false + backend_roles: [] + and_backend_roles: [] + + all_access: + hosts: [] + users: + - localstack-internal + reserved: false + hidden: false + backend_roles: [] + and_backend_roles: [] + """ + ), + ) + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "bin", "opensearch") + + def _get_opensearch_install_version(self) -> str: + from localstack.services.opensearch import versions + + if config.SKIP_INFRA_DOWNLOADS: + self.version = OPENSEARCH_DEFAULT_VERSION + + return versions.get_install_version(self.version) + + +class ElasticsearchPackageInstaller(PackageInstaller): + def __init__(self, version: str): + super().__init__("elasticsearch", version) + + def get_java_env_vars(self) -> dict[str, str]: + install_dir = self.get_installed_dir() + return { + "JAVA_HOME": os.path.join(install_dir, "jdk"), + } + + def _install(self, target: InstallTarget): + # locally import to avoid having a dependency on ASF when starting the CLI + from localstack.aws.api.opensearch import EngineType + from localstack.services.opensearch import versions + + version = self.get_elasticsearch_install_version() + install_dir = self._get_install_dir(target) + installed_executable = os.path.join(install_dir, "bin", "elasticsearch") + if not os.path.exists(installed_executable): + es_url = versions.get_download_url(version, EngineType.Elasticsearch) + install_dir_parent = os.path.dirname(install_dir) + mkdir(install_dir_parent) + # download and extract archive + tmp_archive = os.path.join(config.dirs.cache, f"localstack.{os.path.basename(es_url)}") + download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent) + elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*")) + if not elasticsearch_dir: + raise Exception(f"Unable to find Elasticsearch folder in {install_dir_parent}") + shutil.move(elasticsearch_dir[0], install_dir) + + for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): + dir_path = os.path.join(install_dir, dir_name) + mkdir(dir_path) + chmod_r(dir_path, 0o777) + + # Determine network configuration to use for plugin downloads + sys_props = { + **java_system_properties_proxy(), + **java_system_properties_ssl( + os.path.join(install_dir, "jdk", "bin", "keytool"), + self.get_java_env_vars(), + ), + } + java_opts = system_properties_to_cli_args(sys_props) + + # install default plugins + for plugin in ELASTICSEARCH_PLUGIN_LIST: + plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin") + plugin_dir = os.path.join(install_dir, "plugins", plugin) + if not os.path.exists(plugin_dir): + LOG.info("Installing Elasticsearch plugin %s", plugin) + + def try_install(): + output = run( + [plugin_binary, "install", "-b", plugin], + env_vars={"ES_JAVA_OPTS": " ".join(java_opts)}, + ) + LOG.debug("Plugin installation output: %s", output) + + # We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries + download_attempts = 3 + try: + retry(try_install, retries=download_attempts - 1, sleep=2) + except Exception: + LOG.warning( + "Unable to download Elasticsearch plugin '%s' after %s attempts", + plugin, + download_attempts, + ) + if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"): + raise + + # delete some plugins to free up space + for plugin in ELASTICSEARCH_DELETE_MODULES: + module_dir = os.path.join(install_dir, "modules", plugin) + rm_rf(module_dir) + + # disable x-pack-ml plugin (not working on Alpine) + xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform") + rm_rf(xpack_dir) + + # patch JVM options file - replace hardcoded heap size settings + jvm_options_file = os.path.join(install_dir, "config", "jvm.options") + if os.path.exists(jvm_options_file): + jvm_options = load_file(jvm_options_file) + jvm_options_replaced = re.sub( + r"(^-Xm[sx][a-zA-Z0-9.]+$)", r"# \1", jvm_options, flags=re.MULTILINE + ) + if jvm_options != jvm_options_replaced: + save_file(jvm_options_file, jvm_options_replaced) + + # patch JVM options file - replace hardcoded heap size settings + jvm_options_file = os.path.join(install_dir, "config", "jvm.options") + if os.path.exists(jvm_options_file): + jvm_options = load_file(jvm_options_file) + jvm_options_replaced = re.sub( + r"(^-Xm[sx][a-zA-Z0-9.]+$)", r"# \1", jvm_options, flags=re.MULTILINE + ) + if jvm_options != jvm_options_replaced: + save_file(jvm_options_file, jvm_options_replaced) + + def _get_install_marker_path(self, install_dir: str) -> str: + return os.path.join(install_dir, "bin", "elasticsearch") + + def get_elasticsearch_install_version(self) -> str: + from localstack.services.opensearch import versions + + if config.SKIP_INFRA_DOWNLOADS: + return ELASTICSEARCH_DEFAULT_VERSION + + return versions.get_install_version(self.version) + + +class ElasticsearchLegacyPackageInstaller(ElasticsearchPackageInstaller): + """ + Specialised package installer for ElasticSearch 5.x and 6.x + + It installs Java during setup because these releases of ES do not have a bundled JDK. + This should be removed after these versions are dropped in line with AWS EOL, scheduled for Nov 2026. + https://docs.aws.amazon.com/opensearch-service/latest/developerguide/what-is.html#choosing-version + """ + + # ES 5.x and 6.x require Java 8 + # See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/zip-targz.html + JAVA_VERSION = "8" + + def _prepare_installation(self, target: InstallTarget) -> None: + java_package.get_installer(self.JAVA_VERSION).install(target) + + def get_java_env_vars(self) -> dict[str, str]: + return { + "JAVA_HOME": java_package.get_installer(self.JAVA_VERSION).get_java_home(), + } + + +opensearch_package = OpensearchPackage(default_version=OPENSEARCH_DEFAULT_VERSION) +elasticsearch_package = OpensearchPackage(default_version=ELASTICSEARCH_DEFAULT_VERSION) diff --git a/localstack/services/opensearch/plugins.py b/localstack-core/localstack/services/opensearch/plugins.py similarity index 100% rename from localstack/services/opensearch/plugins.py rename to localstack-core/localstack/services/opensearch/plugins.py diff --git a/localstack-core/localstack/services/opensearch/provider.py b/localstack-core/localstack/services/opensearch/provider.py new file mode 100644 index 0000000000000..b56a835ae6c64 --- /dev/null +++ b/localstack-core/localstack/services/opensearch/provider.py @@ -0,0 +1,699 @@ +import logging +import os +import re +import threading +from datetime import datetime, timezone +from random import randint +from typing import Dict, Optional +from urllib.parse import urlparse + +from localstack import config +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.opensearch import ( + ARN, + AccessPoliciesStatus, + AdvancedOptions, + AdvancedOptionsStatus, + AdvancedSecurityOptions, + AdvancedSecurityOptionsInput, + AdvancedSecurityOptionsStatus, + AIMLOptionsInput, + AutoTuneDesiredState, + AutoTuneOptions, + AutoTuneOptionsInput, + AutoTuneOptionsOutput, + AutoTuneOptionsStatus, + AutoTuneState, + AutoTuneStatus, + ClusterConfig, + ClusterConfigStatus, + CognitoOptions, + CognitoOptionsStatus, + ColdStorageOptions, + CreateDomainResponse, + DeleteDomainResponse, + DeploymentStatus, + DescribeDomainConfigResponse, + DescribeDomainResponse, + DescribeDomainsResponse, + DomainConfig, + DomainEndpointOptions, + DomainEndpointOptionsStatus, + DomainInfo, + DomainName, + DomainNameList, + DomainStatus, + EBSOptions, + EBSOptionsStatus, + EncryptionAtRestOptions, + EncryptionAtRestOptionsStatus, + EngineType, + GetCompatibleVersionsResponse, + IdentityCenterOptionsInput, + IPAddressType, + ListDomainNamesResponse, + ListTagsResponse, + ListVersionsResponse, + LogPublishingOptions, + LogPublishingOptionsStatus, + MaxResults, + NextToken, + NodeToNodeEncryptionOptions, + NodeToNodeEncryptionOptionsStatus, + OffPeakWindowOptions, + OpensearchApi, + OpenSearchPartitionInstanceType, + OptionState, + OptionStatus, + PolicyDocument, + ResourceAlreadyExistsException, + ResourceNotFoundException, + RollbackOnDisable, + ServiceSoftwareOptions, + SnapshotOptions, + SnapshotOptionsStatus, + SoftwareUpdateOptions, + StringList, + TagList, + TLSSecurityPolicy, + UpdateDomainConfigRequest, + UpdateDomainConfigResponse, + ValidationException, + VersionStatus, + VersionString, + VolumeType, + VPCDerivedInfoStatus, + VPCOptions, +) +from localstack.constants import OPENSEARCH_DEFAULT_VERSION +from localstack.services.opensearch import versions +from localstack.services.opensearch.cluster import SecurityOptions +from localstack.services.opensearch.cluster_manager import ( + ClusterManager, + DomainKey, + create_cluster_manager, +) +from localstack.services.opensearch.models import OpenSearchStore, opensearch_stores +from localstack.services.plugins import ServiceLifecycleHook +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws.arns import parse_arn +from localstack.utils.collections import PaginatedList, remove_none_values_from_dict +from localstack.utils.serving import Server +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +# The singleton for the ClusterManager instance. +# The singleton is implemented this way only to be able to overwrite its value during tests. +__CLUSTER_MANAGER = None + +# mutex for modifying domains +_domain_mutex = threading.RLock() + +DEFAULT_OPENSEARCH_CLUSTER_CONFIG = ClusterConfig( + InstanceType=OpenSearchPartitionInstanceType.m3_medium_search, + InstanceCount=1, + DedicatedMasterEnabled=True, + ZoneAwarenessEnabled=False, + DedicatedMasterType=OpenSearchPartitionInstanceType.m3_medium_search, + DedicatedMasterCount=1, +) + +DEFAULT_OPENSEARCH_DOMAIN_ENDPOINT_OPTIONS = DomainEndpointOptions( + EnforceHTTPS=False, + TLSSecurityPolicy=TLSSecurityPolicy.Policy_Min_TLS_1_0_2019_07, + CustomEndpointEnabled=False, +) + + +def cluster_manager() -> ClusterManager: + global __CLUSTER_MANAGER + if __CLUSTER_MANAGER is None: + __CLUSTER_MANAGER = create_cluster_manager() + return __CLUSTER_MANAGER + + +def _run_cluster_startup_monitor(cluster: Server, domain_name: str, region: str): + LOG.debug("running cluster startup monitor for cluster %s", cluster) + + # wait until the cluster is started + # NOTE: does not work when DNS rebind protection is active for localhost.localstack.cloud + is_up = cluster.wait_is_up() + + LOG.debug("cluster state polling for %s returned! status = %s", domain_name, is_up) + with _domain_mutex: + store = OpensearchProvider.get_store(cluster.account_id, cluster.region_name) + status = store.opensearch_domains.get(domain_name) + if status is not None: + status["Processing"] = False + + +def create_cluster( + domain_key: DomainKey, + engine_version: str, + domain_endpoint_options: Optional[DomainEndpointOptions], + security_options: Optional[SecurityOptions], + preferred_port: Optional[int] = None, +): + """ + Uses the ClusterManager to create a new cluster for the given domain key. NOT thread safe, needs to be called + around _domain_mutex. + If the preferred_port is given, this port will be preferred (if OPENSEARCH_ENDPOINT_STRATEGY == "port"). + """ + store = opensearch_stores[domain_key.account][domain_key.region] + + manager = cluster_manager() + engine_version = engine_version or OPENSEARCH_DEFAULT_VERSION + cluster = manager.create( + arn=domain_key.arn, + version=engine_version, + endpoint_options=domain_endpoint_options, + security_options=security_options, + preferred_port=preferred_port, + ) + + # FIXME: in AWS, the Endpoint is set once the cluster is running, not before (like here), but our tests and + # in particular cloudformation currently relies on the assumption that it is set when the domain is created. + status = store.opensearch_domains[domain_key.domain_name] + # Replacing only 0.0.0.0 here as usage of this bind address mostly means running in docker which is used locally + # If another bind address is used we want to keep it in the endpoint as this is a conscious user decision to + # access from another device on the network. + status["Endpoint"] = cluster.url.split("://")[-1].replace("0.0.0.0", localstack_host().host) + status["EngineVersion"] = engine_version + + if cluster.is_up(): + status["Processing"] = False + else: + # run a background thread that will update all domains that use this cluster to set + # the cluster state once it is started, or the CLUSTER_STARTUP_TIMEOUT is reached + threading.Thread( + target=_run_cluster_startup_monitor, + args=(cluster, domain_key.domain_name, domain_key.region), + daemon=True, + ).start() + + +def _remove_cluster(domain_key: DomainKey): + parsed_arn = parse_arn(domain_key.arn) + store = OpensearchProvider.get_store(parsed_arn["account"], parsed_arn["region"]) + cluster_manager().remove(domain_key.arn) + del store.opensearch_domains[domain_key.domain_name] + + +def get_domain_config(domain_key) -> DomainConfig: + status = get_domain_status(domain_key) + return _status_to_config(status) + + +def _status_to_config(status: DomainStatus) -> DomainConfig: + cluster_cfg = status.get("ClusterConfig") or {} + default_cfg = DEFAULT_OPENSEARCH_CLUSTER_CONFIG + config_status = get_domain_config_status() + return DomainConfig( + AccessPolicies=AccessPoliciesStatus( + Options=PolicyDocument(""), + Status=config_status, + ), + AdvancedOptions=AdvancedOptionsStatus( + Options={ + "override_main_response_version": "false", + "rest.action.multi.allow_explicit_index": "true", + }, + Status=config_status, + ), + EBSOptions=EBSOptionsStatus( + Options=EBSOptions( + EBSEnabled=True, + VolumeSize=100, + VolumeType=VolumeType.gp2, + ), + Status=config_status, + ), + ClusterConfig=ClusterConfigStatus( + Options=ClusterConfig( + DedicatedMasterCount=cluster_cfg.get( + "DedicatedMasterCount", default_cfg["DedicatedMasterCount"] + ), + DedicatedMasterEnabled=cluster_cfg.get( + "DedicatedMasterEnabled", default_cfg["DedicatedMasterEnabled"] + ), + DedicatedMasterType=cluster_cfg.get( + "DedicatedMasterType", default_cfg["DedicatedMasterType"] + ), + InstanceCount=cluster_cfg.get("InstanceCount", default_cfg["InstanceCount"]), + InstanceType=cluster_cfg.get("InstanceType", default_cfg["InstanceType"]), + ZoneAwarenessEnabled=cluster_cfg.get( + "ZoneAwarenessEnabled", default_cfg["ZoneAwarenessEnabled"] + ), + ), + Status=config_status, + ), + CognitoOptions=CognitoOptionsStatus( + Options=CognitoOptions(Enabled=False), Status=config_status + ), + EngineVersion=VersionStatus(Options=status.get("EngineVersion"), Status=config_status), + EncryptionAtRestOptions=EncryptionAtRestOptionsStatus( + Options=EncryptionAtRestOptions(Enabled=False), + Status=config_status, + ), + LogPublishingOptions=LogPublishingOptionsStatus( + Options={}, + Status=config_status, + ), + SnapshotOptions=SnapshotOptionsStatus( + Options=SnapshotOptions(AutomatedSnapshotStartHour=randint(0, 23)), + Status=config_status, + ), + VPCOptions=VPCDerivedInfoStatus( + Options={}, + Status=config_status, + ), + DomainEndpointOptions=DomainEndpointOptionsStatus( + Options=status.get("DomainEndpointOptions", {}), + Status=config_status, + ), + NodeToNodeEncryptionOptions=NodeToNodeEncryptionOptionsStatus( + Options=NodeToNodeEncryptionOptions(Enabled=False), + Status=config_status, + ), + AdvancedSecurityOptions=AdvancedSecurityOptionsStatus( + Options=status.get("AdvancedSecurityOptions", {}), Status=config_status + ), + AutoTuneOptions=AutoTuneOptionsStatus( + Options=AutoTuneOptions( + DesiredState=AutoTuneDesiredState.ENABLED, + RollbackOnDisable=RollbackOnDisable.NO_ROLLBACK, + MaintenanceSchedules=[], + ), + Status=AutoTuneStatus( + CreationDate=config_status.get("CreationDate"), + UpdateDate=config_status.get("UpdateDate"), + UpdateVersion=config_status.get("UpdateVersion"), + State=AutoTuneState.ENABLED, + PendingDeletion=config_status.get("PendingDeletion"), + ), + ), + ) + + +def get_domain_config_status() -> OptionStatus: + return OptionStatus( + CreationDate=datetime.now(), + PendingDeletion=False, + State=OptionState.Active, + UpdateDate=datetime.now(), + UpdateVersion=randint(1, 100), + ) + + +def get_domain_status(domain_key: DomainKey, deleted=False) -> DomainStatus: + parsed_arn = parse_arn(domain_key.arn) + store = OpensearchProvider.get_store(parsed_arn["account"], parsed_arn["region"]) + stored_status: DomainStatus = ( + store.opensearch_domains.get(domain_key.domain_name) or DomainStatus() + ) + cluster_cfg = stored_status.get("ClusterConfig") or {} + default_cfg = DEFAULT_OPENSEARCH_CLUSTER_CONFIG + + new_status = DomainStatus( + ARN=domain_key.arn, + Created=True, + Deleted=deleted, + Processing=stored_status.get("Processing", True), + DomainId=f"{domain_key.account}/{domain_key.domain_name}", + DomainName=domain_key.domain_name, + ClusterConfig=ClusterConfig( + DedicatedMasterCount=cluster_cfg.get( + "DedicatedMasterCount", default_cfg["DedicatedMasterCount"] + ), + DedicatedMasterEnabled=cluster_cfg.get( + "DedicatedMasterEnabled", default_cfg["DedicatedMasterEnabled"] + ), + DedicatedMasterType=cluster_cfg.get( + "DedicatedMasterType", default_cfg["DedicatedMasterType"] + ), + InstanceCount=cluster_cfg.get("InstanceCount", default_cfg["InstanceCount"]), + InstanceType=cluster_cfg.get("InstanceType", default_cfg["InstanceType"]), + ZoneAwarenessEnabled=cluster_cfg.get( + "ZoneAwarenessEnabled", default_cfg["ZoneAwarenessEnabled"] + ), + WarmEnabled=False, + ColdStorageOptions=ColdStorageOptions(Enabled=False), + ), + EngineVersion=stored_status.get("EngineVersion") or OPENSEARCH_DEFAULT_VERSION, + Endpoint=stored_status.get("Endpoint", None), + EBSOptions=EBSOptions(EBSEnabled=True, VolumeType=VolumeType.gp2, VolumeSize=10, Iops=0), + CognitoOptions=CognitoOptions(Enabled=False), + UpgradeProcessing=False, + AccessPolicies="", + SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0), + EncryptionAtRestOptions=EncryptionAtRestOptions(Enabled=False), + NodeToNodeEncryptionOptions=NodeToNodeEncryptionOptions(Enabled=False), + AdvancedOptions={ + "override_main_response_version": "false", + "rest.action.multi.allow_explicit_index": "true", + }, + ServiceSoftwareOptions=ServiceSoftwareOptions( + CurrentVersion="", + NewVersion="", + UpdateAvailable=False, + Cancellable=False, + UpdateStatus=DeploymentStatus.COMPLETED, + Description="There is no software update available for this domain.", + AutomatedUpdateDate=datetime.fromtimestamp(0, tz=timezone.utc), + OptionalDeployment=True, + ), + DomainEndpointOptions=stored_status.get("DomainEndpointOptions") + or DEFAULT_OPENSEARCH_DOMAIN_ENDPOINT_OPTIONS, + AdvancedSecurityOptions=AdvancedSecurityOptions( + Enabled=False, InternalUserDatabaseEnabled=False + ), + AutoTuneOptions=AutoTuneOptionsOutput(State=AutoTuneState.ENABLE_IN_PROGRESS), + ) + return new_status + + +def _ensure_domain_exists(arn: ARN) -> None: + """ + Checks if the domain for the given ARN exists. Otherwise, a ValidationException is raised. + + :param arn: ARN string to lookup the domain for + :return: None if the domain exists, otherwise raises an exception + :raises: ValidationException if the domain for the given ARN cannot be found + """ + parsed_arn = parse_arn(arn) + store = OpensearchProvider.get_store(parsed_arn["account"], parsed_arn["region"]) + domain_key = DomainKey.from_arn(arn) + domain_status = store.opensearch_domains.get(domain_key.domain_name) + if domain_status is None: + raise ValidationException("Invalid ARN. Domain not found.") + + +def _update_domain_config_request_to_status(request: UpdateDomainConfigRequest) -> DomainStatus: + request: Dict + request.pop("DryRun", None) + request.pop("DomainName", None) + return request + + +_domain_name_pattern = re.compile(r"[a-z][a-z0-9\\-]{3,28}") + + +def is_valid_domain_name(name: str) -> bool: + return True if _domain_name_pattern.match(name) else False + + +def validate_endpoint_options(endpoint_options: DomainEndpointOptions): + custom_endpoint = endpoint_options.get("CustomEndpoint", "") + custom_endpoint_enabled = endpoint_options.get("CustomEndpointEnabled", False) + + if custom_endpoint and not custom_endpoint_enabled: + raise ValidationException( + "CustomEndpointEnabled flag should be set in order to use CustomEndpoint." + ) + if custom_endpoint_enabled and not custom_endpoint: + raise ValidationException( + "Please provide CustomEndpoint field to create a custom endpoint." + ) + + +class OpensearchProvider(OpensearchApi, ServiceLifecycleHook): + @staticmethod + def get_store(account_id: str, region_name: str) -> OpenSearchStore: + return opensearch_stores[account_id][region_name] + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(opensearch_stores) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, "opensearch"))) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, "elasticsearch"))) + + def on_after_state_load(self): + """Starts clusters whose metadata has been restored.""" + for account_id, region, store in opensearch_stores.iter_stores(): + for domain_name, domain_status in store.opensearch_domains.items(): + domain_key = DomainKey(domain_name, region, account_id) + if cluster_manager().get(domain_key.arn): + # cluster already restored in previous call to on_after_state_load + continue + + LOG.info("Restoring domain %s in region %s.", domain_name, region) + try: + preferred_port = None + if config.OPENSEARCH_ENDPOINT_STRATEGY == "port": + # try to parse the previous port to re-use it for the re-created cluster + if "Endpoint" in domain_status: + preferred_port = urlparse(f"http://{domain_status['Endpoint']}").port + + engine_version = domain_status.get("EngineVersion") + domain_endpoint_options = domain_status.get("DomainEndpointOptions", {}) + security_options = SecurityOptions.from_input( + domain_status.get("AdvancedSecurityOptions") + ) + + create_cluster( + domain_key=domain_key, + engine_version=engine_version, + domain_endpoint_options=domain_endpoint_options, + security_options=security_options, + preferred_port=preferred_port, + ) + except Exception: + LOG.exception( + "Could not restore domain %s in region %s.", + domain_name, + region, + ) + + def on_before_state_reset(self): + self._stop_clusters() + + def on_before_stop(self): + self._stop_clusters() + + def _stop_clusters(self): + for account_id, region, store in opensearch_stores.iter_stores(): + for domain_name in store.opensearch_domains.keys(): + cluster_manager().remove(DomainKey(domain_name, region, account_id).arn) + + def create_domain( + self, + context: RequestContext, + domain_name: DomainName, + engine_version: VersionString = None, + cluster_config: ClusterConfig = None, + ebs_options: EBSOptions = None, + access_policies: PolicyDocument = None, + ip_address_type: IPAddressType = None, + snapshot_options: SnapshotOptions = None, + vpc_options: VPCOptions = None, + cognito_options: CognitoOptions = None, + encryption_at_rest_options: EncryptionAtRestOptions = None, + node_to_node_encryption_options: NodeToNodeEncryptionOptions = None, + advanced_options: AdvancedOptions = None, + log_publishing_options: LogPublishingOptions = None, + domain_endpoint_options: DomainEndpointOptions = None, + advanced_security_options: AdvancedSecurityOptionsInput = None, + identity_center_options: IdentityCenterOptionsInput = None, + tag_list: TagList = None, + auto_tune_options: AutoTuneOptionsInput = None, + off_peak_window_options: OffPeakWindowOptions = None, + software_update_options: SoftwareUpdateOptions = None, + aiml_options: AIMLOptionsInput = None, + **kwargs, + ) -> CreateDomainResponse: + store = self.get_store(context.account_id, context.region) + + if not is_valid_domain_name(domain_name): + # TODO: this should use the server-side validation framework at some point. + raise ValidationException( + "Member must satisfy regular expression pattern: [a-z][a-z0-9\\-]+" + ) + + if domain_endpoint_options: + validate_endpoint_options(domain_endpoint_options) + + with _domain_mutex: + if domain_name in store.opensearch_domains: + raise ResourceAlreadyExistsException( + f"domain {domain_name} already exists in region {context.region}" + ) + domain_key = DomainKey( + domain_name=domain_name, + region=context.region, + account=context.account_id, + ) + security_options = SecurityOptions.from_input(advanced_security_options) + + # "create" domain data + store.opensearch_domains[domain_name] = get_domain_status(domain_key) + if domain_endpoint_options: + store.opensearch_domains[domain_name]["DomainEndpointOptions"] = ( + DEFAULT_OPENSEARCH_DOMAIN_ENDPOINT_OPTIONS | domain_endpoint_options + ) + + # lazy-init the cluster (sets the Endpoint and Processing flag of the domain status) + # TODO handle additional parameters (cluster config,...) + create_cluster(domain_key, engine_version, domain_endpoint_options, security_options) + + # set the tags + self.add_tags(context, domain_key.arn, tag_list) + + # get the (updated) status + status = get_domain_status(domain_key) + + return CreateDomainResponse(DomainStatus=status) + + def delete_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DeleteDomainResponse: + domain_key = DomainKey( + domain_name=domain_name, + region=context.region, + account=context.account_id, + ) + store = self.get_store(context.account_id, context.region) + with _domain_mutex: + if domain_name not in store.opensearch_domains: + raise ResourceNotFoundException(f"Domain not found: {domain_name}") + + status = get_domain_status(domain_key, deleted=True) + _remove_cluster(domain_key) + + return DeleteDomainResponse(DomainStatus=status) + + def describe_domain( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainResponse: + store = self.get_store(context.account_id, context.region) + domain_key = DomainKey( + domain_name=domain_name, + region=context.region, + account=context.account_id, + ) + with _domain_mutex: + if domain_name not in store.opensearch_domains: + raise ResourceNotFoundException(f"Domain not found: {domain_name}") + + status = get_domain_status(domain_key) + return DescribeDomainResponse(DomainStatus=status) + + @handler("UpdateDomainConfig", expand=False) + def update_domain_config( + self, context: RequestContext, payload: UpdateDomainConfigRequest + ) -> UpdateDomainConfigResponse: + domain_key = DomainKey( + domain_name=payload["DomainName"], + region=context.region, + account=context.account_id, + ) + store = self.get_store(context.account_id, context.region) + with _domain_mutex: + domain_status = store.opensearch_domains.get(domain_key.domain_name, None) + if domain_status is None: + raise ResourceNotFoundException(f"Domain not found: {domain_key.domain_name}") + + status_update: Dict = _update_domain_config_request_to_status(payload) + domain_status.update(status_update) + + return UpdateDomainConfigResponse(DomainConfig=_status_to_config(domain_status)) + + def describe_domains( + self, context: RequestContext, domain_names: DomainNameList, **kwargs + ) -> DescribeDomainsResponse: + status_list = [] + with _domain_mutex: + for domain_name in domain_names: + try: + domain_status = self.describe_domain(context, domain_name)["DomainStatus"] + status_list.append(domain_status) + except ResourceNotFoundException: + # ResourceNotFoundExceptions are ignored, we just look for the next domain. + # If no domain can be found, the result will just be empty. + pass + return DescribeDomainsResponse(DomainStatusList=status_list) + + def list_domain_names( + self, context: RequestContext, engine_type: EngineType = None, **kwargs + ) -> ListDomainNamesResponse: + store = self.get_store(context.account_id, context.region) + domain_names = [ + DomainInfo( + DomainName=DomainName(domain_name), + EngineType=versions.get_engine_type(domain["EngineVersion"]), + ) + for domain_name, domain in store.opensearch_domains.items() + if engine_type is None + or versions.get_engine_type(domain["EngineVersion"]) == engine_type + ] + return ListDomainNamesResponse(DomainNames=domain_names) + + def list_versions( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListVersionsResponse: + version_list = PaginatedList(versions.install_versions.keys()) + page, nxt = version_list.get_page( + lambda x: x, + next_token=next_token, + page_size=max_results, + ) + response = ListVersionsResponse(Versions=page, NextToken=nxt) + return remove_none_values_from_dict(response) + + def get_compatible_versions( + self, context: RequestContext, domain_name: DomainName = None, **kwargs + ) -> GetCompatibleVersionsResponse: + version_filter = None + if domain_name: + store = self.get_store(context.account_id, context.region) + with _domain_mutex: + domain = store.opensearch_domains.get(domain_name) + if not domain: + raise ResourceNotFoundException(f"Domain not found: {domain_name}") + version_filter = domain.get("EngineVersion") + compatible_versions = list(versions.compatible_versions) + if version_filter is not None: + compatible_versions = [ + comp + for comp in versions.compatible_versions + if comp["SourceVersion"] == version_filter + ] + return GetCompatibleVersionsResponse(CompatibleVersions=compatible_versions) + + def describe_domain_config( + self, context: RequestContext, domain_name: DomainName, **kwargs + ) -> DescribeDomainConfigResponse: + domain_key = DomainKey( + domain_name=domain_name, + region=context.region, + account=context.account_id, + ) + store = self.get_store(context.account_id, context.region) + with _domain_mutex: + if domain_name not in store.opensearch_domains: + raise ResourceNotFoundException(f"Domain not found: {domain_name}") + domain_config = get_domain_config(domain_key) + return DescribeDomainConfigResponse(DomainConfig=domain_config) + + def add_tags(self, context: RequestContext, arn: ARN, tag_list: TagList, **kwargs) -> None: + _ensure_domain_exists(arn) + self.get_store(context.account_id, context.region).TAGS.tag_resource(arn, tag_list) + + def list_tags(self, context: RequestContext, arn: ARN, **kwargs) -> ListTagsResponse: + _ensure_domain_exists(arn) + + # The tagging service returns a dictionary with the given root name + store = self.get_store(context.account_id, context.region) + tags = store.TAGS.list_tags_for_resource(arn=arn, root_name="root") + # Extract the actual list of tags for the typed response + tag_list: TagList = tags["root"] + return ListTagsResponse(TagList=tag_list) + + def remove_tags( + self, context: RequestContext, arn: ARN, tag_keys: StringList, **kwargs + ) -> None: + _ensure_domain_exists(arn) + self.get_store(context.account_id, context.region).TAGS.untag_resource(arn, tag_keys) diff --git a/localstack/services/stepfunctions/asl/component/state/state_fail/__init__.py b/localstack-core/localstack/services/opensearch/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_fail/__init__.py rename to localstack-core/localstack/services/opensearch/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.py b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.py new file mode 100644 index 0000000000000..4de950b722ce9 --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.py @@ -0,0 +1,227 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import copy +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.aws.api.es import CreateElasticsearchDomainRequest +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.collections import convert_to_typed_dict + + +class ElasticsearchDomainProperties(TypedDict): + AccessPolicies: Optional[dict] + AdvancedOptions: Optional[dict] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + Arn: Optional[str] + CognitoOptions: Optional[CognitoOptions] + DomainArn: Optional[str] + DomainEndpoint: Optional[str] + DomainEndpointOptions: Optional[DomainEndpointOptions] + DomainName: Optional[str] + EBSOptions: Optional[EBSOptions] + ElasticsearchClusterConfig: Optional[ElasticsearchClusterConfig] + ElasticsearchVersion: Optional[str] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + Id: Optional[str] + LogPublishingOptions: Optional[dict] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + SnapshotOptions: Optional[SnapshotOptions] + Tags: Optional[list[Tag]] + VPCOptions: Optional[VPCOptions] + + +class ZoneAwarenessConfig(TypedDict): + AvailabilityZoneCount: Optional[int] + + +class ColdStorageOptions(TypedDict): + Enabled: Optional[bool] + + +class ElasticsearchClusterConfig(TypedDict): + ColdStorageOptions: Optional[ColdStorageOptions] + DedicatedMasterCount: Optional[int] + DedicatedMasterEnabled: Optional[bool] + DedicatedMasterType: Optional[str] + InstanceCount: Optional[int] + InstanceType: Optional[str] + WarmCount: Optional[int] + WarmEnabled: Optional[bool] + WarmType: Optional[str] + ZoneAwarenessConfig: Optional[ZoneAwarenessConfig] + ZoneAwarenessEnabled: Optional[bool] + + +class SnapshotOptions(TypedDict): + AutomatedSnapshotStartHour: Optional[int] + + +class VPCOptions(TypedDict): + SecurityGroupIds: Optional[list[str]] + SubnetIds: Optional[list[str]] + + +class NodeToNodeEncryptionOptions(TypedDict): + Enabled: Optional[bool] + + +class DomainEndpointOptions(TypedDict): + CustomEndpoint: Optional[str] + CustomEndpointCertificateArn: Optional[str] + CustomEndpointEnabled: Optional[bool] + EnforceHTTPS: Optional[bool] + TLSSecurityPolicy: Optional[str] + + +class CognitoOptions(TypedDict): + Enabled: Optional[bool] + IdentityPoolId: Optional[str] + RoleArn: Optional[str] + UserPoolId: Optional[str] + + +class MasterUserOptions(TypedDict): + MasterUserARN: Optional[str] + MasterUserName: Optional[str] + MasterUserPassword: Optional[str] + + +class AdvancedSecurityOptionsInput(TypedDict): + AnonymousAuthEnabled: Optional[bool] + Enabled: Optional[bool] + InternalUserDatabaseEnabled: Optional[bool] + MasterUserOptions: Optional[MasterUserOptions] + + +class EBSOptions(TypedDict): + EBSEnabled: Optional[bool] + Iops: Optional[int] + VolumeSize: Optional[int] + VolumeType: Optional[str] + + +class EncryptionAtRestOptions(TypedDict): + Enabled: Optional[bool] + KmsKeyId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ElasticsearchDomainProvider(ResourceProvider[ElasticsearchDomainProperties]): + TYPE = "AWS::Elasticsearch::Domain" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ElasticsearchDomainProperties], + ) -> ProgressEvent[ElasticsearchDomainProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + Create-only properties: + - /properties/DomainName + + Read-only properties: + - /properties/Id + - /properties/DomainArn + - /properties/DomainEndpoint + - /properties/Arn + + + + """ + model = request.desired_state + + client = request.aws_client_factory.es + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + request.custom_context[REPEATED_INVOCATION] = True + + # defaults + domain_name = model.get("DomainName") + if not domain_name: + model["DomainName"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + params = copy.deepcopy(model) + params = convert_to_typed_dict(CreateElasticsearchDomainRequest, params) + params = util.remove_none_values(params) + cluster_config = params.get("ElasticsearchClusterConfig") + if isinstance(cluster_config, dict): + # set defaults required for boto3 calls + cluster_config.setdefault("DedicatedMasterType", "m3.medium.elasticsearch") + cluster_config.setdefault("WarmType", "ultrawarm1.medium.elasticsearch") + + client.create_elasticsearch_domain(**params) + + domain = client.describe_elasticsearch_domain(DomainName=model["DomainName"]) + if domain["DomainStatus"]["Created"]: + # set data + model["Arn"] = domain["DomainStatus"]["ARN"] + model["Id"] = model["DomainName"] + model["DomainArn"] = domain["DomainStatus"]["ARN"] + model["DomainEndpoint"] = domain["DomainStatus"].get("Endpoint") + + if tags := model.get("Tags", []): + client.add_tags(ARN=model["Arn"], TagList=tags) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + else: + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + def read( + self, + request: ResourceRequest[ElasticsearchDomainProperties], + ) -> ProgressEvent[ElasticsearchDomainProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ElasticsearchDomainProperties], + ) -> ProgressEvent[ElasticsearchDomainProperties]: + """ + Delete a resource + + + """ + client = request.aws_client_factory.es + # TODO the delete is currently synchronous; + # if this changes, we should also reflect the OperationStatus here + client.delete_elasticsearch_domain(DomainName=request.previous_state["DomainName"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[ElasticsearchDomainProperties], + ) -> ProgressEvent[ElasticsearchDomainProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.schema.json b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.schema.json new file mode 100644 index 0000000000000..691137e956431 --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain.schema.json @@ -0,0 +1,317 @@ +{ + "typeName": "AWS::Elasticsearch::Domain", + "description": "Resource Type definition for AWS::Elasticsearch::Domain", + "additionalProperties": false, + "properties": { + "ElasticsearchClusterConfig": { + "$ref": "#/definitions/ElasticsearchClusterConfig" + }, + "DomainName": { + "type": "string" + }, + "ElasticsearchVersion": { + "type": "string" + }, + "LogPublishingOptions": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "$ref": "#/definitions/LogPublishingOption" + } + } + }, + "SnapshotOptions": { + "$ref": "#/definitions/SnapshotOptions" + }, + "VPCOptions": { + "$ref": "#/definitions/VPCOptions" + }, + "NodeToNodeEncryptionOptions": { + "$ref": "#/definitions/NodeToNodeEncryptionOptions" + }, + "AccessPolicies": { + "type": "object" + }, + "DomainEndpointOptions": { + "$ref": "#/definitions/DomainEndpointOptions" + }, + "DomainArn": { + "type": "string" + }, + "CognitoOptions": { + "$ref": "#/definitions/CognitoOptions" + }, + "AdvancedOptions": { + "type": "object", + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "AdvancedSecurityOptions": { + "$ref": "#/definitions/AdvancedSecurityOptionsInput" + }, + "DomainEndpoint": { + "type": "string" + }, + "EBSOptions": { + "$ref": "#/definitions/EBSOptions" + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "EncryptionAtRestOptions": { + "$ref": "#/definitions/EncryptionAtRestOptions" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "LogPublishingOption": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchLogsLogGroupArn": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + } + }, + "ElasticsearchClusterConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "InstanceCount": { + "type": "integer" + }, + "WarmEnabled": { + "type": "boolean" + }, + "WarmCount": { + "type": "integer" + }, + "DedicatedMasterEnabled": { + "type": "boolean" + }, + "ZoneAwarenessConfig": { + "$ref": "#/definitions/ZoneAwarenessConfig" + }, + "ColdStorageOptions": { + "$ref": "#/definitions/ColdStorageOptions" + }, + "DedicatedMasterCount": { + "type": "integer" + }, + "InstanceType": { + "type": "string" + }, + "WarmType": { + "type": "string" + }, + "ZoneAwarenessEnabled": { + "type": "boolean" + }, + "DedicatedMasterType": { + "type": "string" + } + } + }, + "VPCOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "SecurityGroupIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "SubnetIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + } + } + }, + "SnapshotOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "AutomatedSnapshotStartHour": { + "type": "integer" + } + } + }, + "ZoneAwarenessConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "AvailabilityZoneCount": { + "type": "integer" + } + } + }, + "NodeToNodeEncryptionOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + } + }, + "ColdStorageOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + } + }, + "DomainEndpointOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "CustomEndpointCertificateArn": { + "type": "string" + }, + "CustomEndpointEnabled": { + "type": "boolean" + }, + "EnforceHTTPS": { + "type": "boolean" + }, + "CustomEndpoint": { + "type": "string" + }, + "TLSSecurityPolicy": { + "type": "string" + } + } + }, + "CognitoOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "IdentityPoolId": { + "type": "string" + }, + "UserPoolId": { + "type": "string" + }, + "RoleArn": { + "type": "string" + } + } + }, + "EBSOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "EBSEnabled": { + "type": "boolean" + }, + "VolumeType": { + "type": "string" + }, + "Iops": { + "type": "integer" + }, + "VolumeSize": { + "type": "integer" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "EncryptionAtRestOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + } + }, + "MasterUserOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "MasterUserPassword": { + "type": "string" + }, + "MasterUserName": { + "type": "string" + }, + "MasterUserARN": { + "type": "string" + } + } + }, + "AdvancedSecurityOptionsInput": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "MasterUserOptions": { + "$ref": "#/definitions/MasterUserOptions" + }, + "AnonymousAuthEnabled": { + "type": "boolean" + }, + "InternalUserDatabaseEnabled": { + "type": "boolean" + } + } + } + }, + "createOnlyProperties": [ + "/properties/DomainName" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/DomainArn", + "/properties/DomainEndpoint", + "/properties/Arn" + ] +} diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain_plugin.py b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain_plugin.py new file mode 100644 index 0000000000000..c5f22fa0b816e --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_elasticsearch_domain_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ElasticsearchDomainProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Elasticsearch::Domain" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.opensearch.resource_providers.aws_elasticsearch_domain import ( + ElasticsearchDomainProvider, + ) + + self.factory = ElasticsearchDomainProvider diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.py b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.py new file mode 100644 index 0000000000000..96b8c60ec0b2b --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.py @@ -0,0 +1,312 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class OpenSearchServiceDomainProperties(TypedDict): + AccessPolicies: Optional[dict] + AdvancedOptions: Optional[dict] + AdvancedSecurityOptions: Optional[AdvancedSecurityOptionsInput] + Arn: Optional[str] + ClusterConfig: Optional[ClusterConfig] + CognitoOptions: Optional[CognitoOptions] + DomainArn: Optional[str] + DomainEndpoint: Optional[str] + DomainEndpointOptions: Optional[DomainEndpointOptions] + DomainEndpoints: Optional[dict] + DomainName: Optional[str] + EBSOptions: Optional[EBSOptions] + EncryptionAtRestOptions: Optional[EncryptionAtRestOptions] + EngineVersion: Optional[str] + Id: Optional[str] + LogPublishingOptions: Optional[dict] + NodeToNodeEncryptionOptions: Optional[NodeToNodeEncryptionOptions] + OffPeakWindowOptions: Optional[OffPeakWindowOptions] + ServiceSoftwareOptions: Optional[ServiceSoftwareOptions] + SnapshotOptions: Optional[SnapshotOptions] + SoftwareUpdateOptions: Optional[SoftwareUpdateOptions] + Tags: Optional[list[Tag]] + VPCOptions: Optional[VPCOptions] + + +class ZoneAwarenessConfig(TypedDict): + AvailabilityZoneCount: Optional[int] + + +class ClusterConfig(TypedDict): + DedicatedMasterCount: Optional[int] + DedicatedMasterEnabled: Optional[bool] + DedicatedMasterType: Optional[str] + InstanceCount: Optional[int] + InstanceType: Optional[str] + WarmCount: Optional[int] + WarmEnabled: Optional[bool] + WarmType: Optional[str] + ZoneAwarenessConfig: Optional[ZoneAwarenessConfig] + ZoneAwarenessEnabled: Optional[bool] + + +class SnapshotOptions(TypedDict): + AutomatedSnapshotStartHour: Optional[int] + + +class VPCOptions(TypedDict): + SecurityGroupIds: Optional[list[str]] + SubnetIds: Optional[list[str]] + + +class NodeToNodeEncryptionOptions(TypedDict): + Enabled: Optional[bool] + + +class DomainEndpointOptions(TypedDict): + CustomEndpoint: Optional[str] + CustomEndpointCertificateArn: Optional[str] + CustomEndpointEnabled: Optional[bool] + EnforceHTTPS: Optional[bool] + TLSSecurityPolicy: Optional[str] + + +class CognitoOptions(TypedDict): + Enabled: Optional[bool] + IdentityPoolId: Optional[str] + RoleArn: Optional[str] + UserPoolId: Optional[str] + + +class MasterUserOptions(TypedDict): + MasterUserARN: Optional[str] + MasterUserName: Optional[str] + MasterUserPassword: Optional[str] + + +class Idp(TypedDict): + EntityId: Optional[str] + MetadataContent: Optional[str] + + +class SAMLOptions(TypedDict): + Enabled: Optional[bool] + Idp: Optional[Idp] + MasterBackendRole: Optional[str] + MasterUserName: Optional[str] + RolesKey: Optional[str] + SessionTimeoutMinutes: Optional[int] + SubjectKey: Optional[str] + + +class AdvancedSecurityOptionsInput(TypedDict): + AnonymousAuthDisableDate: Optional[str] + AnonymousAuthEnabled: Optional[bool] + Enabled: Optional[bool] + InternalUserDatabaseEnabled: Optional[bool] + MasterUserOptions: Optional[MasterUserOptions] + SAMLOptions: Optional[SAMLOptions] + + +class EBSOptions(TypedDict): + EBSEnabled: Optional[bool] + Iops: Optional[int] + Throughput: Optional[int] + VolumeSize: Optional[int] + VolumeType: Optional[str] + + +class EncryptionAtRestOptions(TypedDict): + Enabled: Optional[bool] + KmsKeyId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class ServiceSoftwareOptions(TypedDict): + AutomatedUpdateDate: Optional[str] + Cancellable: Optional[bool] + CurrentVersion: Optional[str] + Description: Optional[str] + NewVersion: Optional[str] + OptionalDeployment: Optional[bool] + UpdateAvailable: Optional[bool] + UpdateStatus: Optional[str] + + +class WindowStartTime(TypedDict): + Hours: Optional[int] + Minutes: Optional[int] + + +class OffPeakWindow(TypedDict): + WindowStartTime: Optional[WindowStartTime] + + +class OffPeakWindowOptions(TypedDict): + Enabled: Optional[bool] + OffPeakWindow: Optional[OffPeakWindow] + + +class SoftwareUpdateOptions(TypedDict): + AutoSoftwareUpdateEnabled: Optional[bool] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class OpenSearchServiceDomainProvider(ResourceProvider[OpenSearchServiceDomainProperties]): + TYPE = "AWS::OpenSearchService::Domain" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[OpenSearchServiceDomainProperties], + ) -> ProgressEvent[OpenSearchServiceDomainProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/DomainName + + + + Create-only properties: + - /properties/DomainName + + Read-only properties: + - /properties/Id + - /properties/Arn + - /properties/DomainArn + - /properties/DomainEndpoint + - /properties/DomainEndpoints + - /properties/ServiceSoftwareOptions + - /properties/AdvancedSecurityOptions/AnonymousAuthDisableDate + + IAM permissions required: + - es:CreateDomain + - es:DescribeDomain + - es:AddTags + - es:ListTags + + """ + model = request.desired_state + opensearch_client = request.aws_client_factory.opensearch + if not request.custom_context.get(REPEATED_INVOCATION): + # resource is not ready + # this is the first time this callback is invoked + request.custom_context[REPEATED_INVOCATION] = True + + # defaults + domain_name = model.get("DomainName") + if not domain_name: + domain_name = util.generate_default_name( + request.stack_name, request.logical_resource_id + ).lower()[0:28] + model["DomainName"] = domain_name + + properties = util.remove_none_values(model) + cluster_config = properties.get("ClusterConfig") + if isinstance(cluster_config, dict): + # set defaults required for boto3 calls + cluster_config.setdefault("DedicatedMasterType", "m3.medium.search") + cluster_config.setdefault("WarmType", "ultrawarm1.medium.search") + + for key in ["DedicatedMasterCount", "InstanceCount", "WarmCount"]: + if key in cluster_config and isinstance(cluster_config[key], str): + cluster_config[key] = int(cluster_config[key]) + + if properties.get("AccessPolicies"): + properties["AccessPolicies"] = json.dumps(properties["AccessPolicies"]) + + if ebs_options := properties.get("EBSOptions"): + for key in ["Iops", "Throughput", "VolumeSize"]: + if key in ebs_options and isinstance(ebs_options[key], str): + ebs_options[key] = int(ebs_options[key]) + + create_kwargs = {**util.deselect_attributes(properties, ["Tags"])} + if tags := properties.get("Tags"): + create_kwargs["TagList"] = tags + opensearch_client.create_domain(**create_kwargs) + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + opensearch_domain = opensearch_client.describe_domain(DomainName=model["DomainName"]) + if opensearch_domain["DomainStatus"]["Processing"] is False: + # set data + model["Arn"] = opensearch_domain["DomainStatus"]["ARN"] + model["Id"] = opensearch_domain["DomainStatus"]["DomainId"] + model["DomainArn"] = opensearch_domain["DomainStatus"]["ARN"] + model["DomainEndpoint"] = opensearch_domain["DomainStatus"].get("Endpoint") + model["DomainEndpoints"] = opensearch_domain["DomainStatus"].get("Endpoints") + model["ServiceSoftwareOptions"] = opensearch_domain["DomainStatus"].get( + "ServiceSoftwareOptions" + ) + model.setdefault("AdvancedSecurityOptions", {})["AnonymousAuthDisableDate"] = ( + opensearch_domain["DomainStatus"] + .get("AdvancedSecurityOptions", {}) + .get("AnonymousAuthDisableDate") + ) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + else: + return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + def read( + self, + request: ResourceRequest[OpenSearchServiceDomainProperties], + ) -> ProgressEvent[OpenSearchServiceDomainProperties]: + """ + Fetch resource information + + IAM permissions required: + - es:DescribeDomain + - es:ListTags + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[OpenSearchServiceDomainProperties], + ) -> ProgressEvent[OpenSearchServiceDomainProperties]: + """ + Delete a resource + + IAM permissions required: + - es:DeleteDomain + - es:DescribeDomain + """ + opensearch_client = request.aws_client_factory.opensearch + # TODO the delete is currently synchronous; + # if this changes, we should also reflect the OperationStatus here + opensearch_client.delete_domain(DomainName=request.previous_state["DomainName"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[OpenSearchServiceDomainProperties], + ) -> ProgressEvent[OpenSearchServiceDomainProperties]: + """ + Update a resource + + IAM permissions required: + - es:UpdateDomain + - es:UpgradeDomain + - es:DescribeDomain + - es:AddTags + - es:RemoveTags + - es:ListTags + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.schema.json b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.schema.json new file mode 100644 index 0000000000000..1e75c1642c1ae --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain.schema.json @@ -0,0 +1,511 @@ +{ + "typeName": "AWS::OpenSearchService::Domain", + "description": "An example resource schema demonstrating some basic constructs and validation rules.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "ZoneAwarenessConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "AvailabilityZoneCount": { + "type": "integer" + } + } + }, + "ClusterConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "InstanceCount": { + "type": "integer" + }, + "WarmEnabled": { + "type": "boolean" + }, + "WarmCount": { + "type": "integer" + }, + "DedicatedMasterEnabled": { + "type": "boolean" + }, + "ZoneAwarenessConfig": { + "$ref": "#/definitions/ZoneAwarenessConfig" + }, + "DedicatedMasterCount": { + "type": "integer" + }, + "InstanceType": { + "type": "string" + }, + "WarmType": { + "type": "string" + }, + "ZoneAwarenessEnabled": { + "type": "boolean" + }, + "DedicatedMasterType": { + "type": "string" + } + } + }, + "LogPublishingOption": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchLogsLogGroupArn": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + } + }, + "SnapshotOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "AutomatedSnapshotStartHour": { + "type": "integer" + } + } + }, + "VPCOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "SecurityGroupIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + }, + "SubnetIds": { + "type": "array", + "uniqueItems": true, + "items": { + "type": "string" + } + } + } + }, + "NodeToNodeEncryptionOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + } + }, + "DomainEndpointOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "CustomEndpointCertificateArn": { + "type": "string" + }, + "CustomEndpointEnabled": { + "type": "boolean" + }, + "EnforceHTTPS": { + "type": "boolean" + }, + "CustomEndpoint": { + "type": "string" + }, + "TLSSecurityPolicy": { + "type": "string" + } + } + }, + "CognitoOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "IdentityPoolId": { + "type": "string" + }, + "UserPoolId": { + "type": "string" + }, + "RoleArn": { + "type": "string" + } + } + }, + "MasterUserOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "MasterUserPassword": { + "type": "string" + }, + "MasterUserName": { + "type": "string" + }, + "MasterUserARN": { + "type": "string" + } + } + }, + "Idp": { + "type": "object", + "additionalProperties": false, + "properties": { + "MetadataContent": { + "type": "string", + "maxLength": 20480, + "minLength": 1 + }, + "EntityId": { + "type": "string" + } + }, + "required": [ + "MetadataContent", + "EntityId" + ] + }, + "SAMLOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "Idp": { + "$ref": "#/definitions/Idp" + }, + "MasterUserName": { + "type": "string" + }, + "MasterBackendRole": { + "type": "string" + }, + "SubjectKey": { + "type": "string" + }, + "RolesKey": { + "type": "string" + }, + "SessionTimeoutMinutes": { + "type": "integer" + } + } + }, + "AdvancedSecurityOptionsInput": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "MasterUserOptions": { + "$ref": "#/definitions/MasterUserOptions" + }, + "InternalUserDatabaseEnabled": { + "type": "boolean" + }, + "AnonymousAuthEnabled": { + "type": "boolean" + }, + "SAMLOptions": { + "$ref": "#/definitions/SAMLOptions" + }, + "AnonymousAuthDisableDate": { + "type": "string" + } + } + }, + "EBSOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "EBSEnabled": { + "type": "boolean" + }, + "VolumeType": { + "type": "string" + }, + "Iops": { + "type": "integer" + }, + "VolumeSize": { + "type": "integer" + }, + "Throughput": { + "type": "integer" + } + } + }, + "EncryptionAtRestOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "type": "string" + }, + "Enabled": { + "type": "boolean" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "description": "The key of the tag.", + "type": "string", + "minLength": 0, + "maxLength": 256 + }, + "Key": { + "description": "The value of the tag.", + "type": "string", + "minLength": 0, + "maxLength": 128 + } + }, + "required": [ + "Value", + "Key" + ] + }, + "ServiceSoftwareOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "CurrentVersion": { + "type": "string" + }, + "NewVersion": { + "type": "string" + }, + "UpdateAvailable": { + "type": "boolean" + }, + "Cancellable": { + "type": "boolean" + }, + "UpdateStatus": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "AutomatedUpdateDate": { + "type": "string" + }, + "OptionalDeployment": { + "type": "boolean" + } + } + }, + "WindowStartTime": { + "type": "object", + "additionalProperties": false, + "properties": { + "Hours": { + "type": "integer", + "minimum": 0, + "maximum": 23 + }, + "Minutes": { + "type": "integer", + "minimum": 0, + "maximum": 59 + } + }, + "required": [ + "Hours", + "Minutes" + ] + }, + "OffPeakWindow": { + "type": "object", + "additionalProperties": false, + "properties": { + "WindowStartTime": { + "$ref": "#/definitions/WindowStartTime" + } + } + }, + "OffPeakWindowOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + }, + "OffPeakWindow": { + "$ref": "#/definitions/OffPeakWindow" + } + } + }, + "SoftwareUpdateOptions": { + "type": "object", + "additionalProperties": false, + "properties": { + "AutoSoftwareUpdateEnabled": { + "type": "boolean" + } + } + } + }, + "properties": { + "ClusterConfig": { + "$ref": "#/definitions/ClusterConfig" + }, + "DomainName": { + "type": "string" + }, + "AccessPolicies": { + "type": "object" + }, + "EngineVersion": { + "type": "string" + }, + "AdvancedOptions": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "type": "string" + } + } + }, + "LogPublishingOptions": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "[a-zA-Z0-9]+": { + "$ref": "#/definitions/LogPublishingOption" + } + } + }, + "SnapshotOptions": { + "$ref": "#/definitions/SnapshotOptions" + }, + "VPCOptions": { + "$ref": "#/definitions/VPCOptions" + }, + "NodeToNodeEncryptionOptions": { + "$ref": "#/definitions/NodeToNodeEncryptionOptions" + }, + "DomainEndpointOptions": { + "$ref": "#/definitions/DomainEndpointOptions" + }, + "CognitoOptions": { + "$ref": "#/definitions/CognitoOptions" + }, + "AdvancedSecurityOptions": { + "$ref": "#/definitions/AdvancedSecurityOptionsInput" + }, + "DomainEndpoint": { + "type": "string" + }, + "DomainEndpoints": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.*$": { + "type": "string" + } + } + }, + "EBSOptions": { + "$ref": "#/definitions/EBSOptions" + }, + "Id": { + "type": "string" + }, + "Arn": { + "type": "string" + }, + "DomainArn": { + "type": "string" + }, + "EncryptionAtRestOptions": { + "$ref": "#/definitions/EncryptionAtRestOptions" + }, + "Tags": { + "description": "An arbitrary set of tags (key-value pairs) for this Domain.", + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array", + "uniqueItems": true + }, + "ServiceSoftwareOptions": { + "$ref": "#/definitions/ServiceSoftwareOptions" + }, + "OffPeakWindowOptions": { + "$ref": "#/definitions/OffPeakWindowOptions" + }, + "SoftwareUpdateOptions": { + "$ref": "#/definitions/SoftwareUpdateOptions" + } + }, + "additionalProperties": false, + "createOnlyProperties": [ + "/properties/DomainName" + ], + "conditionalCreateOnlyProperties": [ + "/properties/EncryptionAtRestOptions/properties", + "/properties/AdvancedSecurityOptions/properties/Enabled" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Arn", + "/properties/DomainArn", + "/properties/DomainEndpoint", + "/properties/DomainEndpoints", + "/properties/ServiceSoftwareOptions", + "/properties/AdvancedSecurityOptions/AnonymousAuthDisableDate" + ], + "writeOnlyProperties": [ + "/properties/AdvancedSecurityOptions/MasterUserOptions", + "/properties/AdvancedSecurityOptions/SAMLOptions/MasterUserName", + "/properties/AdvancedSecurityOptions/SAMLOptions/MasterBackendRole" + ], + "primaryIdentifier": [ + "/properties/DomainName" + ], + "handlers": { + "create": { + "permissions": [ + "es:CreateDomain", + "es:DescribeDomain", + "es:AddTags", + "es:ListTags" + ] + }, + "read": { + "permissions": [ + "es:DescribeDomain", + "es:ListTags" + ] + }, + "update": { + "permissions": [ + "es:UpdateDomain", + "es:UpgradeDomain", + "es:DescribeDomain", + "es:AddTags", + "es:RemoveTags", + "es:ListTags" + ] + }, + "delete": { + "permissions": [ + "es:DeleteDomain", + "es:DescribeDomain" + ] + } + } +} diff --git a/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain_plugin.py b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain_plugin.py new file mode 100644 index 0000000000000..029076b1aefa8 --- /dev/null +++ b/localstack-core/localstack/services/opensearch/resource_providers/aws_opensearchservice_domain_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class OpenSearchServiceDomainProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::OpenSearchService::Domain" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.opensearch.resource_providers.aws_opensearchservice_domain import ( + OpenSearchServiceDomainProvider, + ) + + self.factory = OpenSearchServiceDomainProvider diff --git a/localstack/services/opensearch/versions.py b/localstack-core/localstack/services/opensearch/versions.py similarity index 88% rename from localstack/services/opensearch/versions.py rename to localstack-core/localstack/services/opensearch/versions.py index 2bb9c30b96186..205b9b33d5202 100644 --- a/localstack/services/opensearch/versions.py +++ b/localstack-core/localstack/services/opensearch/versions.py @@ -5,6 +5,7 @@ python -m localstack.services.opensearch.versions """ + from typing import Dict import semver @@ -14,11 +15,16 @@ # Internal representation of the OpenSearch versions (without the "OpenSearch_" prefix) _opensearch_install_versions = { - "1.0": "1.0.0", - "1.1": "1.1.0", - "1.2": "1.2.4", - "1.3": "1.3.6", + "2.13": "2.13.0", + "2.11": "2.11.1", + "2.9": "2.9.0", + "2.7": "2.7.0", + "2.5": "2.5.0", "2.3": "2.3.0", + "1.3": "1.3.12", + "1.2": "1.2.4", + "1.1": "1.1.0", + "1.0": "1.0.0", } # Internal representation of the Elasticsearch versions (without the "Elasticsearch_" prefix) _elasticsearch_install_versions = { @@ -210,7 +216,40 @@ ), CompatibleVersionsMap( SourceVersion="OpenSearch_1.3", - TargetVersions=["OpenSearch_2.3"], + TargetVersions=[ + "OpenSearch_2.3", + "OpenSearch_2.5", + "OpenSearch_2.7", + "OpenSearch_2.9", + "OpenSearch_2.11", + "OpenSearch_2.13", + ], + ), + CompatibleVersionsMap( + SourceVersion="OpenSearch_2.3", + TargetVersions=[ + "OpenSearch_2.5", + "OpenSearch_2.7", + "OpenSearch_2.9", + "OpenSearch_2.11", + "OpenSearch_2.13", + ], + ), + CompatibleVersionsMap( + SourceVersion="OpenSearch_2.5", + TargetVersions=["OpenSearch_2.7", "OpenSearch_2.9", "OpenSearch_2.11", "OpenSearch_2.13"], + ), + CompatibleVersionsMap( + SourceVersion="OpenSearch_2.7", + TargetVersions=["OpenSearch_2.9", "OpenSearch_2.11", "OpenSearch_2.13"], + ), + CompatibleVersionsMap( + SourceVersion="OpenSearch_2.9", + TargetVersions=["OpenSearch_2.11", "OpenSearch_2.13"], + ), + CompatibleVersionsMap( + SourceVersion="OpenSearch_2.11", + TargetVersions=["OpenSearch_2.13"], ), ] @@ -318,4 +357,4 @@ def fetch_latest_versions() -> Dict[str, str]: # pragma: no cover if __name__ == "__main__": # pragma: no cover from pprint import pprint - pprint(fetch_latest_versions()) + pprint(fetch_latest_versions(), sort_dicts=False) diff --git a/localstack-core/localstack/services/plugins.py b/localstack-core/localstack/services/plugins.py new file mode 100644 index 0000000000000..fbd75a53f0ca7 --- /dev/null +++ b/localstack-core/localstack/services/plugins.py @@ -0,0 +1,710 @@ +import abc +import functools +import logging +import threading +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from enum import Enum +from typing import Callable, Dict, List, Optional, Protocol, Tuple + +from plux import Plugin, PluginLifecycleListener, PluginManager, PluginSpec + +from localstack import config +from localstack.aws.skeleton import DispatchTable, Skeleton +from localstack.aws.spec import load_service +from localstack.config import ServiceProviderConfig +from localstack.runtime import hooks +from localstack.state import StateLifecycleHook, StateVisitable, StateVisitor +from localstack.utils.bootstrap import get_enabled_apis, is_api_enabled, log_duration +from localstack.utils.functions import call_safe +from localstack.utils.sync import SynchronizedDefaultDict, poll_condition + +# set up logger +LOG = logging.getLogger(__name__) + +# namespace for AWS provider plugins +PLUGIN_NAMESPACE = "localstack.aws.provider" + +_default = object() # sentinel object indicating a default value + + +# ----------------- +# PLUGIN UTILITIES +# ----------------- + + +class ServiceException(Exception): + pass + + +class ServiceDisabled(ServiceException): + pass + + +class ServiceStateException(ServiceException): + pass + + +class ServiceLifecycleHook(StateLifecycleHook): + def on_after_init(self): + pass + + def on_before_start(self): + pass + + def on_before_stop(self): + pass + + def on_exception(self): + pass + + +class ServiceProvider(Protocol): + service: str + + +class Service: + """ + FIXME: this has become frankenstein's monster, and it has to go. once we've rid ourselves of the legacy edge + proxy, we can get rid of the ``listener`` concept. we should then do one iteration over all the + ``start_dynamodb``, ``start_``, ``check_``, etc. methods, to make all of those integral part + of the service provider. the assumption that every service provider starts a backend server is outdated, and then + we can get rid of ``start``, and ``check``. + """ + + def __init__( + self, + name, + start=_default, + check=None, + skeleton=None, + active=False, + stop=None, + lifecycle_hook: ServiceLifecycleHook = None, + ): + self.plugin_name = name + self.start_function = start + self.skeleton = skeleton + self.check_function = check + self.default_active = active + self.stop_function = stop + self.lifecycle_hook = lifecycle_hook or ServiceLifecycleHook() + self._provider = None + call_safe(self.lifecycle_hook.on_after_init) + + def start(self, asynchronous): + call_safe(self.lifecycle_hook.on_before_start) + + if not self.start_function: + return + + if self.start_function is _default: + return + + kwargs = {"asynchronous": asynchronous} + if self.skeleton: + kwargs["update_listener"] = self.skeleton + return self.start_function(**kwargs) + + def stop(self): + call_safe(self.lifecycle_hook.on_before_stop) + if not self.stop_function: + return + return self.stop_function() + + def check(self, expect_shutdown=False, print_error=False): + if not self.check_function: + return + return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error) + + def name(self): + return self.plugin_name + + def is_enabled(self): + return is_api_enabled(self.plugin_name) + + def accept_state_visitor(self, visitor: StateVisitor): + """ + Passes the StateVisitor to the ASF provider if it is set and implements the StateVisitable. Otherwise, it uses + the ReflectionStateLocator to visit the service state. + + :param visitor: the visitor + """ + if self._provider and isinstance(self._provider, StateVisitable): + self._provider.accept_state_visitor(visitor) + return + + from localstack.state.inspect import ReflectionStateLocator + + ReflectionStateLocator(service=self.name()).accept_state_visitor(visitor) + + @staticmethod + def for_provider( + provider: ServiceProvider, + dispatch_table_factory: Callable[[ServiceProvider], DispatchTable] = None, + service_lifecycle_hook: ServiceLifecycleHook = None, + ) -> "Service": + """ + Factory method for creating services for providers. This method hides a bunch of legacy code and + band-aids/adapters to make persistence visitors work, while providing compatibility with the legacy edge proxy. + + :param provider: the service provider, i.e., the implementation of the generated ASF service API. + :param dispatch_table_factory: a `MotoFallbackDispatcher` or something similar that uses the provider to + create a dispatch table. this one's a bit clumsy. + :param service_lifecycle_hook: if left empty, the factory checks whether the provider is a ServiceLifecycleHook. + :return: a service instance + """ + # determine the service_lifecycle_hook + if service_lifecycle_hook is None: + if isinstance(provider, ServiceLifecycleHook): + service_lifecycle_hook = provider + + # determine the delegate for injecting into the skeleton + delegate = dispatch_table_factory(provider) if dispatch_table_factory else provider + service = Service( + name=provider.service, + skeleton=Skeleton(load_service(provider.service), delegate), + lifecycle_hook=service_lifecycle_hook, + ) + service._provider = provider + + return service + + +class ServiceState(Enum): + UNKNOWN = "unknown" + AVAILABLE = "available" + DISABLED = "disabled" + STARTING = "starting" + RUNNING = "running" + STOPPING = "stopping" + STOPPED = "stopped" + ERROR = "error" + + +class ServiceContainer: + """ + Holds a service, its state, and exposes lifecycle methods of the service. + """ + + service: Service + state: ServiceState + lock: threading.RLock + errors: List[Exception] + + def __init__(self, service: Service, state=ServiceState.UNKNOWN): + self.service = service + self.state = state + self.lock = threading.RLock() + self.errors = [] + + def get(self) -> Service: + return self.service + + def start(self) -> bool: + try: + self.state = ServiceState.STARTING + self.service.start(asynchronous=True) + except Exception as e: + self.state = ServiceState.ERROR + self.errors.append(e) + LOG.error("error while starting service %s: %s", self.service.name(), e) + return False + return self.check() + + def check(self) -> bool: + try: + self.service.check(print_error=True) + self.state = ServiceState.RUNNING + return True + except Exception as e: + self.state = ServiceState.ERROR + self.errors.append(e) + LOG.error("error while checking service %s: %s", self.service.name(), e) + return False + + def stop(self): + try: + self.state = ServiceState.STOPPING + self.service.stop() + self.state = ServiceState.STOPPED + except Exception as e: + self.state = ServiceState.ERROR + self.errors.append(e) + + +class ServiceManager: + def __init__(self) -> None: + super().__init__() + self._services: Dict[str, ServiceContainer] = {} + self._mutex = threading.RLock() + + def get_service_container(self, name: str) -> Optional[ServiceContainer]: + return self._services.get(name) + + def get_service(self, name: str) -> Optional[Service]: + container = self.get_service_container(name) + return container.service if container else None + + def add_service(self, service: Service) -> bool: + state = ServiceState.AVAILABLE if service.is_enabled() else ServiceState.DISABLED + self._services[service.name()] = ServiceContainer(service, state) + + return True + + def list_available(self) -> List[str]: + return list(self._services.keys()) + + def exists(self, name: str) -> bool: + return name in self._services + + def is_running(self, name: str) -> bool: + return self.get_state(name) == ServiceState.RUNNING + + def check(self, name: str) -> bool: + if self.get_state(name) in [ServiceState.RUNNING, ServiceState.ERROR]: + return self.get_service_container(name).check() + + def check_all(self): + return any(self.check(service_name) for service_name in self.list_available()) + + def get_state(self, name: str) -> Optional[ServiceState]: + container = self.get_service_container(name) + return container.state if container else None + + def get_states(self) -> Dict[str, ServiceState]: + return {name: self.get_state(name) for name in self.list_available()} + + @log_duration() + def require(self, name: str) -> Service: + """ + High level function that always returns a running service, or raises an error. If the service is in a state + that it could be transitioned into a running state, then invoking this function will attempt that transition, + e.g., by starting the service if it is available. + """ + container = self.get_service_container(name) + + if not container: + raise ValueError("no such service %s" % name) + + if container.state == ServiceState.STARTING: + if not poll_condition(lambda: container.state != ServiceState.STARTING, timeout=30): + raise TimeoutError("gave up waiting for service %s to start" % name) + + if container.state == ServiceState.STOPPING: + if not poll_condition(lambda: container.state == ServiceState.STOPPED, timeout=30): + raise TimeoutError("gave up waiting for service %s to stop" % name) + + with container.lock: + if container.state == ServiceState.DISABLED: + raise ServiceDisabled("service %s is disabled" % name) + + if container.state == ServiceState.RUNNING: + return container.service + + if container.state == ServiceState.ERROR: + # raise any capture error + raise container.errors[-1] + + if container.state == ServiceState.AVAILABLE or container.state == ServiceState.STOPPED: + if container.start(): + return container.service + else: + raise container.errors[-1] + + raise ServiceStateException( + "service %s is not ready (%s) and could not be started" % (name, container.state) + ) + + # legacy map compatibility + + def items(self): + return { + container.service.name(): container.service for container in self._services.values() + }.items() + + def keys(self): + return self._services.keys() + + def values(self): + return [container.service for container in self._services.values()] + + def get(self, key): + return self.get_service(key) + + def __iter__(self): + return self._services + + +class ServicePlugin(Plugin): + service: Service + api: str + + @abc.abstractmethod + def create_service(self) -> Service: + raise NotImplementedError + + def load(self): + self.service = self.create_service() + return self.service + + +class ServicePluginAdapter(ServicePlugin): + def __init__( + self, + api: str, + create_service: Callable[[], Service], + should_load: Callable[[], bool] = None, + ) -> None: + super().__init__() + self.api = api + self._create_service = create_service + self._should_load = should_load + + def should_load(self) -> bool: + if self._should_load: + return self._should_load() + return True + + def create_service(self) -> Service: + return self._create_service() + + +def aws_provider(api: str = None, name="default", should_load: Callable[[], bool] = None): + """ + Decorator for marking methods that create a Service instance as a ServicePlugin. Methods marked with this + decorator are discoverable as a PluginSpec within the namespace "localstack.aws.provider", with the name + ":". If api is not explicitly specified, then the method name is used as api name. + """ + + def wrapper(fn): + # sugar for being able to name the function like the api + _api = api or fn.__name__ + + # this causes the plugin framework into pointing the entrypoint to the original function rather than the + # nested factory function + @functools.wraps(fn) + def factory() -> ServicePluginAdapter: + return ServicePluginAdapter(api=_api, should_load=should_load, create_service=fn) + + return PluginSpec(PLUGIN_NAMESPACE, f"{_api}:{name}", factory=factory) + + return wrapper + + +class ServicePluginErrorCollector(PluginLifecycleListener): + """ + A PluginLifecycleListener that collects errors related to service plugins. + """ + + errors: Dict[Tuple[str, str], Exception] # keys are: (api, provider) + + def __init__(self, errors: Dict[str, Exception] = None) -> None: + super().__init__() + self.errors = errors or {} + + def get_key(self, plugin_name) -> Tuple[str, str]: + # the convention is :, currently we don't really expose the provider + # TODO: faulty plugin names would break this + return tuple(plugin_name.split(":", maxsplit=1)) + + def on_resolve_exception(self, namespace: str, entrypoint, exception: Exception): + self.errors[self.get_key(entrypoint.name)] = exception + + def on_init_exception(self, plugin_spec: PluginSpec, exception: Exception): + self.errors[self.get_key(plugin_spec.name)] = exception + + def on_load_exception(self, plugin_spec: PluginSpec, plugin: Plugin, exception: Exception): + self.errors[self.get_key(plugin_spec.name)] = exception + + def has_errors(self, api: str, provider: str = None) -> bool: + for e_api, e_provider in self.errors.keys(): + if api == e_api: + if not provider: + return True + else: + return e_provider == provider + + return False + + +class ServicePluginManager(ServiceManager): + plugin_manager: PluginManager[ServicePlugin] + plugin_errors: ServicePluginErrorCollector + + def __init__( + self, + plugin_manager: PluginManager[ServicePlugin] = None, + provider_config: ServiceProviderConfig = None, + ) -> None: + super().__init__() + self.plugin_errors = ServicePluginErrorCollector() + self.plugin_manager = plugin_manager or PluginManager( + PLUGIN_NAMESPACE, listener=self.plugin_errors + ) + self._api_provider_specs = None + self.provider_config = provider_config or config.SERVICE_PROVIDER_CONFIG + + # locks used to make sure plugin loading is thread safe - will be cleared after single use + self._plugin_load_locks: Dict[str, threading.RLock] = SynchronizedDefaultDict( + threading.RLock + ) + + def get_active_provider(self, service: str) -> str: + """ + Get configured provider for a given service + + :param service: Service name + :return: configured provider + """ + return self.provider_config.get_provider(service) + + def get_default_provider(self) -> str: + """ + Get the default provider + + :return: default provider + """ + return self.provider_config.default_value + + # TODO make the abstraction clearer, to provide better information if service is available versus discoverable + # especially important when considering pro services + def list_available(self) -> List[str]: + """ + List all available services, which have an available, configured provider + + :return: List of service names + """ + return [ + service + for service, providers in self.api_provider_specs.items() + if self.get_active_provider(service) in providers + ] + + def _get_loaded_service_containers( + self, services: Optional[List[str]] = None + ) -> List[ServiceContainer]: + """ + Returns all the available service containers. + :param services: the list of services to restrict the search to. If empty or NULL then service containers for + all available services are queried. + :return: a list of all the available service containers. + """ + services = services or self.list_available() + return [ + c for s in services if (c := super(ServicePluginManager, self).get_service_container(s)) + ] + + def list_loaded_services(self) -> List[str]: + """ + Lists all the services which have a provider that has been initialized + + :return: a list of service names + """ + return [ + service_container.service.name() + for service_container in self._get_loaded_service_containers() + ] + + def list_active_services(self) -> List[str]: + """ + Lists all services that have an initialised provider and are currently running. + + :return: the list of active service names. + """ + return [ + service_container.service.name() + for service_container in self._get_loaded_service_containers() + if service_container.state == ServiceState.RUNNING + ] + + def exists(self, name: str) -> bool: + return name in self.list_available() + + def get_state(self, name: str) -> Optional[ServiceState]: + if name in self._services: + # ServiceContainer exists, which means the plugin has been loaded + return super().get_state(name) + + if not self.exists(name): + # there's definitely no service with this name + return None + + # if a PluginSpec exists, then we can get the container and check whether there was an error loading the plugin + provider = self.get_active_provider(name) + if self.plugin_errors.has_errors(name, provider): + return ServiceState.ERROR + + return ServiceState.AVAILABLE if is_api_enabled(name) else ServiceState.DISABLED + + def get_service_container(self, name: str) -> Optional[ServiceContainer]: + if container := self._services.get(name): + return container + + if not self.exists(name): + return None + + load_lock = self._plugin_load_locks[name] + with load_lock: + # check once again to avoid race conditions + if container := self._services.get(name): + return container + + # this is where we start lazy loading. we now know the PluginSpec for the API exists, + # but the ServiceContainer has not been created. + # this control path will be executed once per service + plugin = self._load_service_plugin(name) + if not plugin or not plugin.service: + return None + + with self._mutex: + super().add_service(plugin.service) + + del self._plugin_load_locks[name] # we only needed the service lock once + + return self._services.get(name) + + @property + def api_provider_specs(self) -> Dict[str, List[str]]: + """ + Returns all provider names within the service plugin namespace and parses their name according to the convention, + that is ":". The result is a dictionary that maps api => List[str (name of a provider)]. + """ + if self._api_provider_specs is not None: + return self._api_provider_specs + + with self._mutex: + if self._api_provider_specs is None: + self._api_provider_specs = self._resolve_api_provider_specs() + return self._api_provider_specs + + @log_duration() + def _load_service_plugin(self, name: str) -> Optional[ServicePlugin]: + providers = self.api_provider_specs.get(name) + if not providers: + # no providers for this api + return None + + preferred_provider = self.get_active_provider(name) + if preferred_provider in providers: + provider = preferred_provider + else: + default = self.get_default_provider() + LOG.warning( + "Configured provider (%s) does not exist for service (%s). Available options are: %s. " + "Falling back to default provider '%s'. This can impact the availability of Pro functionality, " + "please fix this configuration issue as soon as possible.", + preferred_provider, + name, + providers, + default, + ) + provider = default + + plugin_name = f"{name}:{provider}" + plugin = self.plugin_manager.load(plugin_name) + plugin.name = plugin_name + + return plugin + + @log_duration() + def _resolve_api_provider_specs(self) -> Dict[str, List[str]]: + result = defaultdict(list) + + for spec in self.plugin_manager.list_plugin_specs(): + api, provider = spec.name.split( + ":" + ) # TODO: error handling, faulty plugins could break the runtime + result[api].append(provider) + + return result + + def apis_with_provider(self, provider: str) -> List[str]: + """ + Lists all apis where a given provider exists for. + :param provider: Name of the provider + :return: List of apis the given provider provides + """ + apis = [] + for api, providers in self.api_provider_specs.items(): + if provider in providers: + apis.append(api) + return apis + + def _stop_services(self, service_containers: List[ServiceContainer]) -> None: + """ + Atomically attempts to stop all given 'ServiceState.STARTING' and 'ServiceState.RUNNING' services. + :param service_containers: the list of service containers to be stopped. + """ + target_service_states = {ServiceState.STARTING, ServiceState.RUNNING} + with self._mutex: + for service_container in service_containers: + if service_container.state in target_service_states: + service_container.stop() + + def stop_services(self, services: List[str] = None): + """ + Stops services for this service manager, if they are currently active. + Will not stop services not already started or in and error state. + + :param services: Service names to stop. If not provided, all services for this manager will be stopped. + """ + target_service_containers = self._get_loaded_service_containers(services=services) + self._stop_services(target_service_containers) + + def stop_all_services(self) -> None: + """ + Stops all services for this service manager, if they are currently active. + Will not stop services not already started or in and error state. + """ + target_service_containers = self._get_loaded_service_containers() + self._stop_services(target_service_containers) + + +# map of service plugins, mapping from service name to plugin details +SERVICE_PLUGINS: ServicePluginManager = ServicePluginManager() + + +# ----------------------------- +# INFRASTRUCTURE HEALTH CHECKS +# ----------------------------- + + +def wait_for_infra_shutdown(): + apis = get_enabled_apis() + + names = [name for name, plugin in SERVICE_PLUGINS.items() if name in apis] + + def check(name): + check_service_health(api=name, expect_shutdown=True) + LOG.debug("[shutdown] api %s has shut down", name) + + # no special significance to 10 workers, seems like a reasonable number given the number of services we have + with ThreadPoolExecutor(max_workers=10) as executor: + executor.map(check, names) + + +def check_service_health(api, expect_shutdown=False): + status = SERVICE_PLUGINS.check(api) + if status == expect_shutdown: + if not expect_shutdown: + LOG.warning('Service "%s" not yet available, retrying...', api) + else: + LOG.warning('Service "%s" still shutting down, retrying...', api) + raise Exception("Service check failed for api: %s" % api) + + +@hooks.on_infra_start(should_load=lambda: config.EAGER_SERVICE_LOADING) +def eager_load_services(): + from localstack.utils.bootstrap import get_preloaded_services + + preloaded_apis = get_preloaded_services() + LOG.debug("Eager loading services: %s", sorted(preloaded_apis)) + + for api in preloaded_apis: + try: + SERVICE_PLUGINS.require(api) + except ServiceDisabled as e: + LOG.debug("%s", e) + except Exception: + LOG.exception("could not load service plugin %s", api) diff --git a/localstack-core/localstack/services/providers.py b/localstack-core/localstack/services/providers.py new file mode 100644 index 0000000000000..810c7fd097b16 --- /dev/null +++ b/localstack-core/localstack/services/providers.py @@ -0,0 +1,438 @@ +from localstack.aws.forwarder import HttpFallbackDispatcher +from localstack.services.plugins import ( + Service, + aws_provider, +) + + +@aws_provider() +def acm(): + from localstack.services.acm.provider import AcmProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = AcmProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def apigateway(): + from localstack.services.apigateway.next_gen.provider import ApigatewayNextGenProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = ApigatewayNextGenProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="apigateway", name="next_gen") +def apigateway_next_gen(): + from localstack.services.apigateway.next_gen.provider import ApigatewayNextGenProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = ApigatewayNextGenProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="apigateway", name="legacy") +def apigateway_legacy(): + from localstack.services.apigateway.legacy.provider import ApigatewayProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = ApigatewayProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def cloudformation(): + from localstack.services.cloudformation.provider import CloudformationProvider + + provider = CloudformationProvider() + return Service.for_provider(provider) + + +@aws_provider(api="cloudformation", name="engine-v2") +def cloudformation_v2(): + from localstack.services.cloudformation.v2.provider import CloudformationProviderV2 + + provider = CloudformationProviderV2() + return Service.for_provider(provider) + + +@aws_provider(api="config") +def awsconfig(): + from localstack.services.configservice.provider import ConfigProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = ConfigProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="cloudwatch", name="default") +def cloudwatch(): + from localstack.services.cloudwatch.provider_v2 import CloudwatchProvider + + provider = CloudwatchProvider() + return Service.for_provider(provider) + + +@aws_provider(api="cloudwatch", name="v1") +def cloudwatch_v1(): + from localstack.services.cloudwatch.provider import CloudwatchProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = CloudwatchProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="cloudwatch", name="v2") +def cloudwatch_v2(): + from localstack.services.cloudwatch.provider_v2 import CloudwatchProvider + + provider = CloudwatchProvider() + return Service.for_provider(provider) + + +@aws_provider() +def dynamodb(): + from localstack.services.dynamodb.provider import DynamoDBProvider + + provider = DynamoDBProvider() + return Service.for_provider( + provider, + dispatch_table_factory=lambda _provider: HttpFallbackDispatcher( + _provider, _provider.get_forward_url + ), + ) + + +@aws_provider(api="dynamodbstreams", name="v2") +def dynamodbstreams_v2(): + from localstack.services.dynamodbstreams.v2.provider import DynamoDBStreamsProvider + + provider = DynamoDBStreamsProvider() + return Service.for_provider(provider) + + +@aws_provider(api="dynamodb", name="v2") +def dynamodb_v2(): + from localstack.services.dynamodb.v2.provider import DynamoDBProvider + + provider = DynamoDBProvider() + return Service.for_provider( + provider, + dispatch_table_factory=lambda _provider: HttpFallbackDispatcher( + _provider, _provider.get_forward_url + ), + ) + + +@aws_provider() +def dynamodbstreams(): + from localstack.services.dynamodbstreams.provider import DynamoDBStreamsProvider + + provider = DynamoDBStreamsProvider() + return Service.for_provider(provider) + + +@aws_provider() +def ec2(): + from localstack.services.ec2.provider import Ec2Provider + from localstack.services.moto import MotoFallbackDispatcher + + provider = Ec2Provider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def es(): + from localstack.services.es.provider import EsProvider + + provider = EsProvider() + return Service.for_provider(provider) + + +@aws_provider() +def firehose(): + from localstack.services.firehose.provider import FirehoseProvider + + provider = FirehoseProvider() + return Service.for_provider(provider) + + +@aws_provider() +def iam(): + from localstack.services.iam.provider import IamProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = IamProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def sts(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.sts.provider import StsProvider + + provider = StsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def kinesis(): + from localstack.services.kinesis.provider import KinesisProvider + + provider = KinesisProvider() + return Service.for_provider( + provider, + dispatch_table_factory=lambda _provider: HttpFallbackDispatcher( + _provider, _provider.get_forward_url + ), + ) + + +@aws_provider() +def kms(): + from localstack.services.kms.provider import KmsProvider + + provider = KmsProvider() + return Service.for_provider(provider) + + +@aws_provider(api="lambda") +def lambda_(): + from localstack.services.lambda_.provider import LambdaProvider + + provider = LambdaProvider() + return Service.for_provider(provider) + + +@aws_provider(api="lambda", name="asf") +def lambda_asf(): + from localstack.services.lambda_.provider import LambdaProvider + + provider = LambdaProvider() + return Service.for_provider(provider) + + +@aws_provider(api="lambda", name="v2") +def lambda_v2(): + from localstack.services.lambda_.provider import LambdaProvider + + provider = LambdaProvider() + return Service.for_provider(provider) + + +@aws_provider() +def logs(): + from localstack.services.logs.provider import LogsProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = LogsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def opensearch(): + from localstack.services.opensearch.provider import OpensearchProvider + + provider = OpensearchProvider() + return Service.for_provider(provider) + + +@aws_provider() +def redshift(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.redshift.provider import RedshiftProvider + + provider = RedshiftProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def route53(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.route53.provider import Route53Provider + + provider = Route53Provider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def route53resolver(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.route53resolver.provider import Route53ResolverProvider + + provider = Route53ResolverProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def s3(): + from localstack.services.s3.provider import S3Provider + + provider = S3Provider() + return Service.for_provider(provider) + + +@aws_provider() +def s3control(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.s3control.provider import S3ControlProvider + + provider = S3ControlProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def scheduler(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.scheduler.provider import SchedulerProvider + + provider = SchedulerProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def secretsmanager(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.secretsmanager.provider import SecretsmanagerProvider + + provider = SecretsmanagerProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def ses(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.ses.provider import SesProvider + + provider = SesProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def sns(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.sns.provider import SnsProvider + + provider = SnsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def sqs(): + from localstack.services import edge + from localstack.services.sqs import query_api + from localstack.services.sqs.provider import SqsProvider + + query_api.register(edge.ROUTER) + + provider = SqsProvider() + return Service.for_provider(provider) + + +@aws_provider() +def ssm(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.ssm.provider import SsmProvider + + provider = SsmProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="events", name="default") +def events(): + from localstack.services.events.provider import EventsProvider + + provider = EventsProvider() + return Service.for_provider(provider) + + +@aws_provider(api="events", name="v2") +def events_v2(): + from localstack.services.events.provider import EventsProvider + + provider = EventsProvider() + return Service.for_provider(provider) + + +@aws_provider(api="events", name="v1") +def events_v1(): + from localstack.services.events.v1.provider import EventsProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = EventsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="events", name="legacy") +def events_legacy(): + from localstack.services.events.v1.provider import EventsProvider + from localstack.services.moto import MotoFallbackDispatcher + + provider = EventsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def stepfunctions(): + from localstack.services.stepfunctions.provider import StepFunctionsProvider + + provider = StepFunctionsProvider() + return Service.for_provider(provider) + + +# TODO: remove with 4.1.0 to allow smooth deprecation path for users that have v2 set manually +@aws_provider(api="stepfunctions", name="v2") +def stepfunctions_v2(): + # provider for people still manually using `v2` + from localstack.services.stepfunctions.provider import StepFunctionsProvider + + provider = StepFunctionsProvider() + return Service.for_provider(provider) + + +@aws_provider() +def swf(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.swf.provider import SWFProvider + + provider = SWFProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def resourcegroupstaggingapi(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.resourcegroupstaggingapi.provider import ( + ResourcegroupstaggingapiProvider, + ) + + provider = ResourcegroupstaggingapiProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider(api="resource-groups") +def resource_groups(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.resource_groups.provider import ResourceGroupsProvider + + provider = ResourceGroupsProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def support(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.support.provider import SupportProvider + + provider = SupportProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) + + +@aws_provider() +def transcribe(): + from localstack.services.moto import MotoFallbackDispatcher + from localstack.services.transcribe.provider import TranscribeProvider + + provider = TranscribeProvider() + return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher) diff --git a/localstack/services/stepfunctions/asl/component/state/state_pass/__init__.py b/localstack-core/localstack/services/redshift/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_pass/__init__.py rename to localstack-core/localstack/services/redshift/__init__.py diff --git a/localstack-core/localstack/services/redshift/provider.py b/localstack-core/localstack/services/redshift/provider.py new file mode 100644 index 0000000000000..4f432e3a1aef5 --- /dev/null +++ b/localstack-core/localstack/services/redshift/provider.py @@ -0,0 +1,57 @@ +import os + +from moto.redshift import responses as redshift_responses +from moto.redshift.models import redshift_backends + +from localstack import config +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.redshift import ( + ClusterSecurityGroupMessage, + DescribeClusterSecurityGroupsMessage, + RedshiftApi, +) +from localstack.services.moto import call_moto +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.common import recurse_object +from localstack.utils.patch import patch + + +@patch(redshift_responses.itemize) +def itemize(fn, data, parent_key=None, *args, **kwargs): + # TODO: potentially add additional required tags here! + list_parent_tags = ["ClusterSubnetGroups"] + + def fix_keys(o, **kwargs): + if isinstance(o, dict): + for k, v in o.items(): + if k in list_parent_tags: + if isinstance(v, dict) and "item" in v: + v[k[:-1]] = v.pop("item") + return o + + result = fn(data, *args, **kwargs) + recurse_object(result, fix_keys) + return result + + +class RedshiftProvider(RedshiftApi): + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(redshift_backends) + visitor.visit(AssetDirectory(self.service, os.path.join(config.dirs.data, "redshift"))) + + @handler("DescribeClusterSecurityGroups", expand=False) + def describe_cluster_security_groups( + self, + context: RequestContext, + request: DescribeClusterSecurityGroupsMessage, + ) -> ClusterSecurityGroupMessage: + result = call_moto(context) + backend = redshift_backends[context.account_id][context.region] + for group in result.get("ClusterSecurityGroups", []): + if group.get("IPRanges"): + continue + sgroup = backend.security_groups.get(group["ClusterSecurityGroupName"]) + group["IPRanges"] = [ + {"Status": "authorized", "CIDRIP": ip} for ip in sgroup.ingress_rules + ] + return result diff --git a/localstack/services/stepfunctions/asl/component/state/state_succeed/__init__.py b/localstack-core/localstack/services/redshift/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_succeed/__init__.py rename to localstack-core/localstack/services/redshift/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.py b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.py new file mode 100644 index 0000000000000..629a7ca7a5b2e --- /dev/null +++ b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.py @@ -0,0 +1,262 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class RedshiftClusterProperties(TypedDict): + ClusterType: Optional[str] + DBName: Optional[str] + MasterUserPassword: Optional[str] + MasterUsername: Optional[str] + NodeType: Optional[str] + AllowVersionUpgrade: Optional[bool] + AquaConfigurationStatus: Optional[str] + AutomatedSnapshotRetentionPeriod: Optional[int] + AvailabilityZone: Optional[str] + AvailabilityZoneRelocation: Optional[bool] + AvailabilityZoneRelocationStatus: Optional[str] + Classic: Optional[bool] + ClusterIdentifier: Optional[str] + ClusterParameterGroupName: Optional[str] + ClusterSecurityGroups: Optional[list[str]] + ClusterSubnetGroupName: Optional[str] + ClusterVersion: Optional[str] + DeferMaintenance: Optional[bool] + DeferMaintenanceDuration: Optional[int] + DeferMaintenanceEndTime: Optional[str] + DeferMaintenanceIdentifier: Optional[str] + DeferMaintenanceStartTime: Optional[str] + DestinationRegion: Optional[str] + ElasticIp: Optional[str] + Encrypted: Optional[bool] + Endpoint: Optional[Endpoint] + EnhancedVpcRouting: Optional[bool] + HsmClientCertificateIdentifier: Optional[str] + HsmConfigurationIdentifier: Optional[str] + IamRoles: Optional[list[str]] + Id: Optional[str] + KmsKeyId: Optional[str] + LoggingProperties: Optional[LoggingProperties] + MaintenanceTrackName: Optional[str] + ManualSnapshotRetentionPeriod: Optional[int] + NumberOfNodes: Optional[int] + OwnerAccount: Optional[str] + Port: Optional[int] + PreferredMaintenanceWindow: Optional[str] + PubliclyAccessible: Optional[bool] + ResourceAction: Optional[str] + RevisionTarget: Optional[str] + RotateEncryptionKey: Optional[bool] + SnapshotClusterIdentifier: Optional[str] + SnapshotCopyGrantName: Optional[str] + SnapshotCopyManual: Optional[bool] + SnapshotCopyRetentionPeriod: Optional[int] + SnapshotIdentifier: Optional[str] + Tags: Optional[list[Tag]] + VpcSecurityGroupIds: Optional[list[str]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class LoggingProperties(TypedDict): + BucketName: Optional[str] + S3KeyPrefix: Optional[str] + + +class Endpoint(TypedDict): + Address: Optional[str] + Port: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class RedshiftClusterProvider(ResourceProvider[RedshiftClusterProperties]): + TYPE = "AWS::Redshift::Cluster" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[RedshiftClusterProperties], + ) -> ProgressEvent[RedshiftClusterProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/ClusterIdentifier + + Required properties: + - MasterUserPassword + - NodeType + - MasterUsername + - DBName + - ClusterType + + Create-only properties: + - /properties/ClusterIdentifier + - /properties/OwnerAccount + - /properties/SnapshotIdentifier + - /properties/DBName + - /properties/SnapshotClusterIdentifier + - /properties/ClusterSubnetGroupName + - /properties/MasterUsername + + Read-only properties: + - /properties/Id + - /properties/DeferMaintenanceIdentifier + - /properties/Endpoint/Port + - /properties/Endpoint/Address + + IAM permissions required: + - redshift:DescribeClusters + - redshift:CreateCluster + - redshift:RestoreFromClusterSnapshot + - redshift:EnableLogging + + """ + model = request.desired_state + redshift = request.aws_client_factory.redshift + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + + if not model.get("ClusterIdentifier"): + model["ClusterIdentifier"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + result = redshift.create_cluster(**model) + model["Id"] = result["Cluster"]["ClusterIdentifier"] + + try: + cluster = redshift.describe_clusters(ClusterIdentifier=model["ClusterIdentifier"])[ + "Clusters" + ][0] + match cluster["ClusterStatus"]: + case "available": + model.setdefault("Endpoint", {}) + model["Endpoint"]["Address"] = cluster["Endpoint"]["Address"] + model["Endpoint"]["Port"] = str(cluster["Endpoint"]["Port"]) + # getting "Attribute 'DeferMaintenanceIdentifier' does not exist." on AWS + # model["DeferMaintenanceIdentifier"] = "?" + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + case failed_state: + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message=f"Cluster in failed state: {failed_state}", + ) + + except redshift.exceptions.ClusterNotFoundFault: + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[RedshiftClusterProperties], + ) -> ProgressEvent[RedshiftClusterProperties]: + """ + Fetch resource information + + IAM permissions required: + - redshift:DescribeClusters + - redshift:DescribeLoggingStatus + - redshift:DescribeSnapshotCopyGrant + - redshift:DescribeClusterDbRevisions + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[RedshiftClusterProperties], + ) -> ProgressEvent[RedshiftClusterProperties]: + """ + Delete a resource + + IAM permissions required: + - redshift:DescribeClusters + - redshift:DeleteCluster + """ + model = request.desired_state + redshift = request.aws_client_factory.redshift + + if not request.custom_context.get(REPEATED_INVOCATION): + request.custom_context[REPEATED_INVOCATION] = True + redshift.delete_cluster(ClusterIdentifier=model["ClusterIdentifier"]) + + try: + cluster = redshift.describe_clusters(ClusterIdentifier=model["ClusterIdentifier"])[ + "Clusters" + ][0] + match cluster["ClusterStatus"]: + case "creating" | "modifying": + return ProgressEvent( + status=OperationStatus.FAILED, + resource_model=model, + custom_context=request.custom_context, + message=f"Redshift cluster in unexpected status: {cluster['ClusterStatus']}", + ) + case _: + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + except redshift.exceptions.ClusterNotFoundFault: + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[RedshiftClusterProperties], + ) -> ProgressEvent[RedshiftClusterProperties]: + """ + Update a resource + + IAM permissions required: + - redshift:DescribeClusters + - redshift:ModifyCluster + - redshift:ModifyClusterIamRoles + - redshift:EnableLogging + - redshift:CreateTags + - redshift:DeleteTags + - redshift:DisableLogging + - redshift:RebootCluster + - redshift:EnableSnapshotCopy + - redshift:DisableSnapshotCopy + - redshift:ModifySnapshotCopyRetentionPeriod + - redshift:ModifyAquaConfiguration + - redshift:ResizeCluster + - redshift:ModifyClusterMaintenance + - redshift:DescribeClusterDbRevisions + - redshift:ModifyClusterDbRevisions + - redshift:PauseCluster + - redshift:ResumeCluster + - redshift:RotateEncryptionKey + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.schema.json b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.schema.json new file mode 100644 index 0000000000000..89feee84e0f67 --- /dev/null +++ b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster.schema.json @@ -0,0 +1,367 @@ +{ + "typeName": "AWS::Redshift::Cluster", + "description": "An example resource schema demonstrating some basic constructs and validation rules.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "Tag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 1, + "maxLength": 127 + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "minLength": 1, + "maxLength": 255 + } + }, + "required": [ + "Value", + "Key" + ] + }, + "LoggingProperties": { + "type": "object", + "additionalProperties": false, + "properties": { + "BucketName": { + "type": "string" + }, + "S3KeyPrefix": { + "type": "string" + } + }, + "required": [ + "BucketName" + ] + }, + "Endpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "Port": { + "type": "string" + }, + "Address": { + "type": "string" + } + } + } + }, + "properties": { + "ClusterIdentifier": { + "description": "A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. All alphabetical characters must be lower case, no hypens at the end, no two consecutive hyphens. Cluster name should be unique for all clusters within an AWS account", + "type": "string", + "maxLength": 63 + }, + "MasterUsername": { + "description": "The user name associated with the master user account for the cluster that is being created. The user name can't be PUBLIC and first character must be a letter.", + "type": "string", + "maxLength": 128 + }, + "MasterUserPassword": { + "description": "The password associated with the master user account for the cluster that is being created. Password must be between 8 and 64 characters in length, should have at least one uppercase letter.Must contain at least one lowercase letter.Must contain one number.Can be any printable ASCII character.", + "type": "string", + "maxLength": 64 + }, + "NodeType": { + "description": "The node type to be provisioned for the cluster.Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge", + "type": "string" + }, + "AllowVersionUpgrade": { + "description": "Major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default value is True", + "type": "boolean" + }, + "AutomatedSnapshotRetentionPeriod": { + "description": "The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Default value is 1", + "type": "integer" + }, + "AvailabilityZone": { + "description": "The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint", + "type": "string" + }, + "ClusterParameterGroupName": { + "description": "The name of the parameter group to be associated with this cluster.", + "type": "string", + "maxLength": 255 + }, + "ClusterType": { + "description": "The type of the cluster. When cluster type is specified as single-node, the NumberOfNodes parameter is not required and if multi-node, the NumberOfNodes parameter is required", + "type": "string" + }, + "ClusterVersion": { + "description": "The version of the Amazon Redshift engine software that you want to deploy on the cluster.The version selected runs on all the nodes in the cluster.", + "type": "string" + }, + "ClusterSubnetGroupName": { + "description": "The name of a cluster subnet group to be associated with this cluster.", + "type": "string" + }, + "DBName": { + "description": "The name of the first database to be created when the cluster is created. To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database.", + "type": "string" + }, + "ElasticIp": { + "description": "The Elastic IP (EIP) address for the cluster.", + "type": "string" + }, + "Encrypted": { + "description": "If true, the data in the cluster is encrypted at rest.", + "type": "boolean" + }, + "HsmClientCertificateIdentifier": { + "description": "Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM", + "type": "string" + }, + "HsmConfigurationIdentifier": { + "description": "Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.", + "type": "string" + }, + "KmsKeyId": { + "description": "The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.", + "type": "string" + }, + "NumberOfNodes": { + "description": "The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node.", + "type": "integer" + }, + "Port": { + "description": "The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings", + "type": "integer" + }, + "PreferredMaintenanceWindow": { + "description": "The weekly time range (in UTC) during which automated cluster maintenance can occur.", + "type": "string" + }, + "PubliclyAccessible": { + "description": "If true, the cluster can be accessed from a public network.", + "type": "boolean" + }, + "ClusterSecurityGroups": { + "description": "A list of security groups to be associated with this cluster.", + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "IamRoles": { + "description": "A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 50 IAM roles in a single request", + "type": "array", + "insertionOrder": false, + "maxItems": 50, + "items": { + "type": "string" + } + }, + "Tags": { + "description": "The list of tags for the cluster parameter group.", + "type": "array", + "insertionOrder": false, + "maxItems": 50, + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "VpcSecurityGroupIds": { + "description": "A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.", + "type": "array", + "insertionOrder": false, + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "SnapshotClusterIdentifier": { + "description": "The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.", + "type": "string" + }, + "SnapshotIdentifier": { + "description": "The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive.", + "type": "string" + }, + "Id": { + "type": "string" + }, + "OwnerAccount": { + "type": "string" + }, + "LoggingProperties": { + "$ref": "#/definitions/LoggingProperties" + }, + "Endpoint": { + "$ref": "#/definitions/Endpoint" + }, + "DestinationRegion": { + "description": "The destination AWS Region that you want to copy snapshots to. Constraints: Must be the name of a valid AWS Region. For more information, see Regions and Endpoints in the Amazon Web Services [https://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region] General Reference", + "type": "string" + }, + "SnapshotCopyRetentionPeriod": { + "description": "The number of days to retain automated snapshots in the destination region after they are copied from the source region. \n\n Default is 7. \n\n Constraints: Must be at least 1 and no more than 35.", + "type": "integer" + }, + "SnapshotCopyGrantName": { + "description": "The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.", + "type": "string" + }, + "ManualSnapshotRetentionPeriod": { + "description": "The number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is -1, the manual snapshot is retained indefinitely.\n\nThe value must be either -1 or an integer between 1 and 3,653.", + "type": "integer" + }, + "SnapshotCopyManual": { + "description": "Indicates whether to apply the snapshot retention period to newly copied manual snapshots instead of automated snapshots.", + "type": "boolean" + }, + "AvailabilityZoneRelocation": { + "description": "The option to enable relocation for an Amazon Redshift cluster between Availability Zones after the cluster modification is complete.", + "type": "boolean" + }, + "AvailabilityZoneRelocationStatus": { + "description": "The availability zone relocation status of the cluster", + "type": "string" + }, + "AquaConfigurationStatus": { + "description": "The value represents how the cluster is configured to use AQUA (Advanced Query Accelerator) after the cluster is restored. Possible values include the following.\n\nenabled - Use AQUA if it is available for the current Region and Amazon Redshift node type.\ndisabled - Don't use AQUA.\nauto - Amazon Redshift determines whether to use AQUA.\n", + "type": "string" + }, + "Classic": { + "description": "A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to false , the resize type is elastic.", + "type": "boolean" + }, + "EnhancedVpcRouting": { + "description": "An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.\n\nIf this option is true , enhanced VPC routing is enabled.\n\nDefault: false", + "type": "boolean" + }, + "MaintenanceTrackName": { + "description": "The name for the maintenance track that you want to assign for the cluster. This name change is asynchronous. The new track name stays in the PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied.", + "type": "string" + }, + "DeferMaintenance": { + "description": "A boolean indicating whether to enable the deferred maintenance window.", + "type": "boolean" + }, + "DeferMaintenanceIdentifier": { + "description": "A unique identifier for the deferred maintenance window.", + "type": "string" + }, + "DeferMaintenanceStartTime": { + "description": "A timestamp indicating the start time for the deferred maintenance window.", + "type": "string" + }, + "DeferMaintenanceEndTime": { + "description": "A timestamp indicating end time for the deferred maintenance window. If you specify an end time, you can't specify a duration.", + "type": "string" + }, + "DeferMaintenanceDuration": { + "description": "An integer indicating the duration of the maintenance window in days. If you specify a duration, you can't specify an end time. The duration must be 45 days or less.", + "type": "integer" + }, + "RevisionTarget": { + "description": "The identifier of the database revision. You can retrieve this value from the response to the DescribeClusterDbRevisions request.", + "type": "string" + }, + "ResourceAction": { + "description": "The Redshift operation to be performed. Resource Action supports pause-cluster, resume-cluster APIs", + "type": "string" + }, + "RotateEncryptionKey": { + "description": "A boolean indicating if we want to rotate Encryption Keys.", + "type": "boolean" + } + }, + "additionalProperties": false, + "required": [ + "MasterUserPassword", + "NodeType", + "MasterUsername", + "DBName", + "ClusterType" + ], + "primaryIdentifier": [ + "/properties/ClusterIdentifier" + ], + "readOnlyProperties": [ + "/properties/Id", + "/properties/DeferMaintenanceIdentifier", + "/properties/Endpoint/Port", + "/properties/Endpoint/Address" + ], + "createOnlyProperties": [ + "/properties/ClusterIdentifier", + "/properties/OwnerAccount", + "/properties/SnapshotIdentifier", + "/properties/DBName", + "/properties/SnapshotClusterIdentifier", + "/properties/ClusterSubnetGroupName", + "/properties/MasterUsername" + ], + "writeOnlyProperties": [ + "/properties/MasterUserPassword" + ], + "tagging": { + "taggable": true + }, + "handlers": { + "create": { + "permissions": [ + "redshift:DescribeClusters", + "redshift:CreateCluster", + "redshift:RestoreFromClusterSnapshot", + "redshift:EnableLogging" + ], + "timeoutInMinutes": 2160 + }, + "read": { + "permissions": [ + "redshift:DescribeClusters", + "redshift:DescribeLoggingStatus", + "redshift:DescribeSnapshotCopyGrant", + "redshift:DescribeClusterDbRevisions" + ] + }, + "update": { + "permissions": [ + "redshift:DescribeClusters", + "redshift:ModifyCluster", + "redshift:ModifyClusterIamRoles", + "redshift:EnableLogging", + "redshift:CreateTags", + "redshift:DeleteTags", + "redshift:DisableLogging", + "redshift:RebootCluster", + "redshift:EnableSnapshotCopy", + "redshift:DisableSnapshotCopy", + "redshift:ModifySnapshotCopyRetentionPeriod", + "redshift:ModifyAquaConfiguration", + "redshift:ResizeCluster", + "redshift:ModifyClusterMaintenance", + "redshift:DescribeClusterDbRevisions", + "redshift:ModifyClusterDbRevisions", + "redshift:PauseCluster", + "redshift:ResumeCluster", + "redshift:RotateEncryptionKey" + ], + "timeoutInMinutes": 2160 + }, + "delete": { + "permissions": [ + "redshift:DescribeClusters", + "redshift:DeleteCluster" + ], + "timeoutInMinutes": 2160 + }, + "list": { + "permissions": [ + "redshift:DescribeClusters" + ] + } + } +} diff --git a/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster_plugin.py b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster_plugin.py new file mode 100644 index 0000000000000..742fa8c2c1c39 --- /dev/null +++ b/localstack-core/localstack/services/redshift/resource_providers/aws_redshift_cluster_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class RedshiftClusterProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Redshift::Cluster" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.redshift.resource_providers.aws_redshift_cluster import ( + RedshiftClusterProvider, + ) + + self.factory = RedshiftClusterProvider diff --git a/localstack/services/stepfunctions/asl/component/state/state_wait/__init__.py b/localstack-core/localstack/services/resource_groups/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_wait/__init__.py rename to localstack-core/localstack/services/resource_groups/__init__.py diff --git a/localstack/services/resourcegroups/provider.py b/localstack-core/localstack/services/resource_groups/provider.py similarity index 100% rename from localstack/services/resourcegroups/provider.py rename to localstack-core/localstack/services/resource_groups/provider.py diff --git a/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/__init__.py b/localstack-core/localstack/services/resource_groups/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/__init__.py rename to localstack-core/localstack/services/resource_groups/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.py b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.py new file mode 100644 index 0000000000000..0105de3b2233f --- /dev/null +++ b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.py @@ -0,0 +1,172 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class ResourceGroupsGroupProperties(TypedDict): + Name: Optional[str] + Arn: Optional[str] + Configuration: Optional[list[ConfigurationItem]] + Description: Optional[str] + ResourceQuery: Optional[ResourceQuery] + Resources: Optional[list[str]] + Tags: Optional[list[Tag]] + + +class TagFilter(TypedDict): + Key: Optional[str] + Values: Optional[list[str]] + + +class Query(TypedDict): + ResourceTypeFilters: Optional[list[str]] + StackIdentifier: Optional[str] + TagFilters: Optional[list[TagFilter]] + + +class ResourceQuery(TypedDict): + Query: Optional[Query] + Type: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class ConfigurationParameter(TypedDict): + Name: Optional[str] + Values: Optional[list[str]] + + +class ConfigurationItem(TypedDict): + Parameters: Optional[list[ConfigurationParameter]] + Type: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class ResourceGroupsGroupProvider(ResourceProvider[ResourceGroupsGroupProperties]): + TYPE = "AWS::ResourceGroups::Group" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[ResourceGroupsGroupProperties], + ) -> ProgressEvent[ResourceGroupsGroupProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + Required properties: + - Name + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - resource-groups:CreateGroup + - resource-groups:Tag + - cloudformation:DescribeStacks + - cloudformation:ListStackResources + - resource-groups:ListGroupResources + - resource-groups:GroupResources + + """ + model = request.desired_state + client = request.aws_client_factory.resource_groups + + if not model.get("Name"): + raise ValueError("Name is a required property") + + # Default query + resource_query = model.get("ResourceQuery", {}) + if ( + not resource_query.get("Query") + and resource_query.get("Type") == "CLOUDFORMATION_STACK_1_0" + ): + resource_query["Query"] = json.dumps( + {"ResourceTypeFilters": ["AWS::AllSupported"], "StackIdentifier": request.stack_id} + ) + + params = util.select_attributes( + model, ["Name", "Description", "ResourceQuery", "Configuration"] + ) + + if tags := model.get("Tags"): + params["Tags"] = util.transform_list_to_dict(tags) + + result = client.create_group(**params) + model["Arn"] = result["Group"]["GroupArn"] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def read( + self, + request: ResourceRequest[ResourceGroupsGroupProperties], + ) -> ProgressEvent[ResourceGroupsGroupProperties]: + """ + Fetch resource information + + IAM permissions required: + - resource-groups:GetGroup + - resource-groups:GetGroupQuery + - resource-groups:GetTags + - resource-groups:GetGroupConfiguration + - resource-groups:ListGroupResources + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[ResourceGroupsGroupProperties], + ) -> ProgressEvent[ResourceGroupsGroupProperties]: + """ + Delete a resource + + IAM permissions required: + - resource-groups:DeleteGroup + - resource-groups:UnGroupResources + """ + client = request.aws_client_factory.resource_groups + client.delete_group(GroupName=request.desired_state["Name"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[ResourceGroupsGroupProperties], + ) -> ProgressEvent[ResourceGroupsGroupProperties]: + """ + Update a resource + + IAM permissions required: + - resource-groups:UpdateGroup + - resource-groups:GetTags + - resource-groups:GetGroupQuery + - resource-groups:UpdateGroupQuery + - resource-groups:Tag + - resource-groups:Untag + - resource-groups:PutGroupConfiguration + - resource-groups:GetGroupConfiguration + - resource-groups:ListGroupResources + - resource-groups:GroupResources + - resource-groups:UnGroupResources + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.schema.json b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.schema.json new file mode 100644 index 0000000000000..1902baa33a0cc --- /dev/null +++ b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group.schema.json @@ -0,0 +1,209 @@ +{ + "typeName": "AWS::ResourceGroups::Group", + "description": "Schema for ResourceGroups::Group", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions": { + "ResourceQuery": { + "type": "object", + "properties": { + "Type": { + "type": "string", + "enum": [ + "TAG_FILTERS_1_0", + "CLOUDFORMATION_STACK_1_0" + ] + }, + "Query": { + "$ref": "#/definitions/Query" + } + }, + "additionalProperties": false + }, + "Query": { + "type": "object", + "properties": { + "ResourceTypeFilters": { + "type": "array", + "items": { + "type": "string" + } + }, + "StackIdentifier": { + "type": "string" + }, + "TagFilters": { + "type": "array", + "items": { + "$ref": "#/definitions/TagFilter" + } + } + }, + "additionalProperties": false + }, + "TagFilter": { + "type": "object", + "properties": { + "Key": { + "type": "string" + }, + "Values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "Tag": { + "type": "object", + "properties": { + "Key": { + "type": "string", + "pattern": "^(?!aws:).+" + }, + "Value": { + "type": "string" + } + }, + "additionalProperties": false + }, + "Configuration": { + "type": "array", + "items": { + "$ref": "#/definitions/ConfigurationItem" + } + }, + "ConfigurationItem": { + "type": "object", + "properties": { + "Type": { + "type": "string" + }, + "Parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/ConfigurationParameter" + } + } + }, + "additionalProperties": false + }, + "ConfigurationParameter": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "Values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + } + }, + "properties": { + "Name": { + "description": "The name of the resource group", + "type": "string", + "maxLength": 128 + }, + "Description": { + "description": "The description of the resource group", + "type": "string", + "maxLength": 512 + }, + "ResourceQuery": { + "$ref": "#/definitions/ResourceQuery" + }, + "Tags": { + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Arn": { + "description": "The Resource Group ARN.", + "type": "string" + }, + "Configuration": { + "$ref": "#/definitions/Configuration" + }, + "Resources": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "required": [ + "Name" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "resource-groups:CreateGroup", + "resource-groups:Tag", + "cloudformation:DescribeStacks", + "cloudformation:ListStackResources", + "resource-groups:ListGroupResources", + "resource-groups:GroupResources" + ] + }, + "read": { + "permissions": [ + "resource-groups:GetGroup", + "resource-groups:GetGroupQuery", + "resource-groups:GetTags", + "resource-groups:GetGroupConfiguration", + "resource-groups:ListGroupResources" + ] + }, + "update": { + "permissions": [ + "resource-groups:UpdateGroup", + "resource-groups:GetTags", + "resource-groups:GetGroupQuery", + "resource-groups:UpdateGroupQuery", + "resource-groups:Tag", + "resource-groups:Untag", + "resource-groups:PutGroupConfiguration", + "resource-groups:GetGroupConfiguration", + "resource-groups:ListGroupResources", + "resource-groups:GroupResources", + "resource-groups:UnGroupResources" + ] + }, + "delete": { + "permissions": [ + "resource-groups:DeleteGroup", + "resource-groups:UnGroupResources" + ] + }, + "list": { + "permissions": [ + "resource-groups:ListGroups" + ] + } + } +} diff --git a/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group_plugin.py b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group_plugin.py new file mode 100644 index 0000000000000..99e589abd9722 --- /dev/null +++ b/localstack-core/localstack/services/resource_groups/resource_providers/aws_resourcegroups_group_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class ResourceGroupsGroupProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::ResourceGroups::Group" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.resource_groups.resource_providers.aws_resourcegroups_group import ( + ResourceGroupsGroupProvider, + ) + + self.factory = ResourceGroupsGroupProvider diff --git a/localstack/services/stepfunctions/asl/eval/__init__.py b/localstack-core/localstack/services/resourcegroupstaggingapi/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/eval/__init__.py rename to localstack-core/localstack/services/resourcegroupstaggingapi/__init__.py diff --git a/localstack/services/resourcegroupstaggingapi/provider.py b/localstack-core/localstack/services/resourcegroupstaggingapi/provider.py similarity index 100% rename from localstack/services/resourcegroupstaggingapi/provider.py rename to localstack-core/localstack/services/resourcegroupstaggingapi/provider.py diff --git a/localstack/services/stepfunctions/asl/eval/contextobject/__init__.py b/localstack-core/localstack/services/route53/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/eval/contextobject/__init__.py rename to localstack-core/localstack/services/route53/__init__.py diff --git a/localstack/services/route53/models.py b/localstack-core/localstack/services/route53/models.py similarity index 100% rename from localstack/services/route53/models.py rename to localstack-core/localstack/services/route53/models.py diff --git a/localstack-core/localstack/services/route53/provider.py b/localstack-core/localstack/services/route53/provider.py new file mode 100644 index 0000000000000..cdd3650adf274 --- /dev/null +++ b/localstack-core/localstack/services/route53/provider.py @@ -0,0 +1,123 @@ +from datetime import datetime +from typing import Optional + +import moto.route53.models as route53_models +from botocore.exceptions import ClientError +from moto.route53.models import route53_backends + +from localstack.aws.api import RequestContext +from localstack.aws.api.route53 import ( + VPC, + ChangeInfo, + ChangeStatus, + CreateHostedZoneResponse, + DeleteHealthCheckResponse, + DNSName, + GetChangeResponse, + GetHealthCheckResponse, + HealthCheck, + HealthCheckId, + HostedZoneConfig, + InvalidVPCId, + Nonce, + NoSuchHealthCheck, + ResourceId, + Route53Api, +) +from localstack.aws.connect import connect_to +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook + + +class Route53Provider(Route53Api, ServiceLifecycleHook): + def create_hosted_zone( + self, + context: RequestContext, + name: DNSName, + caller_reference: Nonce, + vpc: VPC = None, + hosted_zone_config: HostedZoneConfig = None, + delegation_set_id: ResourceId = None, + **kwargs, + ) -> CreateHostedZoneResponse: + # private hosted zones cannot be created in a VPC that does not exist + # check that the VPC exists + if vpc: + vpc_id = vpc.get("VPCId") + vpc_region = vpc.get("VPCRegion") + if not vpc_id or not vpc_region: + raise Exception( + "VPCId and VPCRegion must be specified when creating a private hosted zone" + ) + try: + connect_to( + aws_access_key_id=context.account_id, region_name=vpc_region + ).ec2.describe_vpcs(VpcIds=[vpc_id]) + except ClientError as e: + if e.response.get("Error", {}).get("Code") == "InvalidVpcID.NotFound": + raise InvalidVPCId("The VPC ID is invalid.", sender_fault=True) from e + raise e + + response = call_moto(context) + + # moto does not populate the VPC struct of the response if creating a private hosted zone + if ( + hosted_zone_config + and hosted_zone_config.get("PrivateZone", False) + and "VPC" in response + and vpc + ): + response["VPC"]["VPCId"] = response["VPC"]["VPCId"] or vpc.get("VPCId", "") + response["VPC"]["VPCRegion"] = response["VPC"]["VPCRegion"] or vpc.get("VPCRegion", "") + + return response + + def get_change(self, context: RequestContext, id: ResourceId, **kwargs) -> GetChangeResponse: + change_info = ChangeInfo(Id=id, Status=ChangeStatus.INSYNC, SubmittedAt=datetime.now()) + return GetChangeResponse(ChangeInfo=change_info) + + def get_health_check( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> GetHealthCheckResponse: + health_check: Optional[route53_models.HealthCheck] = route53_backends[context.account_id][ + context.partition + ].health_checks.get(health_check_id, None) + if not health_check: + raise NoSuchHealthCheck( + f"No health check exists with the specified ID {health_check_id}" + ) + health_check_config = { + "Disabled": health_check.disabled, + "EnableSNI": health_check.enable_sni, + "FailureThreshold": health_check.failure_threshold, + "FullyQualifiedDomainName": health_check.fqdn, + "HealthThreshold": health_check.health_threshold, + "Inverted": health_check.inverted, + "IPAddress": health_check.ip_address, + "MeasureLatency": health_check.measure_latency, + "Port": health_check.port, + "RequestInterval": health_check.request_interval, + "ResourcePath": health_check.resource_path, + "Type": health_check.type_, + } + return GetHealthCheckResponse( + HealthCheck=HealthCheck( + Id=health_check.id, + CallerReference=health_check.caller_reference, + HealthCheckConfig=health_check_config, + ) + ) + + def delete_health_check( + self, context: RequestContext, health_check_id: HealthCheckId, **kwargs + ) -> DeleteHealthCheckResponse: + if ( + health_check_id + not in route53_backends[context.account_id][context.partition].health_checks + ): + raise NoSuchHealthCheck( + f"No health check exists with the specified ID {health_check_id}" + ) + + route53_backends[context.account_id][context.partition].delete_health_check(health_check_id) + return {} diff --git a/localstack/services/stepfunctions/asl/eval/event/__init__.py b/localstack-core/localstack/services/route53/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/eval/event/__init__.py rename to localstack-core/localstack/services/route53/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.py b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.py new file mode 100644 index 0000000000000..ddd156b7e638f --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.py @@ -0,0 +1,118 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class Route53HealthCheckProperties(TypedDict): + HealthCheckConfig: Optional[dict] + HealthCheckId: Optional[str] + HealthCheckTags: Optional[list[HealthCheckTag]] + + +class HealthCheckTag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class Route53HealthCheckProvider(ResourceProvider[Route53HealthCheckProperties]): + TYPE = "AWS::Route53::HealthCheck" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[Route53HealthCheckProperties], + ) -> ProgressEvent[Route53HealthCheckProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/HealthCheckId + + Required properties: + - HealthCheckConfig + + Create-only properties: + - /properties/HealthCheckConfig/Type + - /properties/HealthCheckConfig/MeasureLatency + - /properties/HealthCheckConfig/RequestInterval + + Read-only properties: + - /properties/HealthCheckId + + IAM permissions required: + - route53:CreateHealthCheck + - route53:ChangeTagsForResource + - cloudwatch:DescribeAlarms + - route53-recovery-control-config:DescribeRoutingControl + + """ + model = request.desired_state + create_params = util.select_attributes(model, ["HealthCheckConfig", "CallerReference"]) + if not create_params.get("CallerReference"): + create_params["CallerReference"] = util.generate_default_name_without_stack( + request.logical_resource_id + ) + result = request.aws_client_factory.route53.create_health_check(**create_params) + model["HealthCheckId"] = result["HealthCheck"]["Id"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[Route53HealthCheckProperties], + ) -> ProgressEvent[Route53HealthCheckProperties]: + """ + Fetch resource information + + IAM permissions required: + - route53:GetHealthCheck + - route53:ListTagsForResource + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[Route53HealthCheckProperties], + ) -> ProgressEvent[Route53HealthCheckProperties]: + """ + Delete a resource + + IAM permissions required: + - route53:DeleteHealthCheck + """ + model = request.desired_state + request.aws_client_factory.route53.delete_health_check(HealthCheckId=model["HealthCheckId"]) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[Route53HealthCheckProperties], + ) -> ProgressEvent[Route53HealthCheckProperties]: + """ + Update a resource + + IAM permissions required: + - route53:UpdateHealthCheck + - route53:ChangeTagsForResource + - route53:ListTagsForResource + - cloudwatch:DescribeAlarms + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.schema.json b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.schema.json new file mode 100644 index 0000000000000..2033fc1ca1a15 --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck.schema.json @@ -0,0 +1,215 @@ +{ + "typeName": "AWS::Route53::HealthCheck", + "description": "Resource schema for AWS::Route53::HealthCheck.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-route53.git", + "definitions": { + "AlarmIdentifier": { + "description": "A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether the specified health check is healthy.", + "type": "object", + "additionalProperties": false, + "properties": { + "Name": { + "description": "The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.", + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "Region": { + "description": "For the CloudWatch alarm that you want Route 53 health checkers to use to determine whether this health check is healthy, the region that the alarm was created in.", + "type": "string" + } + }, + "required": [ + "Name", + "Region" + ] + }, + "HealthCheckTag": { + "description": "A key-value pair to associate with a resource.", + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag.", + "maxLength": 128 + }, + "Value": { + "type": "string", + "description": "The value for the tag.", + "maxLength": 256 + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "properties": { + "HealthCheckId": { + "type": "string" + }, + "HealthCheckConfig": { + "description": "A complex type that contains information about the health check.", + "type": "object", + "properties": { + "AlarmIdentifier": { + "$ref": "#/definitions/AlarmIdentifier" + }, + "ChildHealthChecks": { + "type": "array", + "items": { + "type": "string" + }, + "maxItems": 256, + "insertionOrder": false + }, + "EnableSNI": { + "type": "boolean" + }, + "FailureThreshold": { + "type": "integer", + "minimum": 1, + "maximum": 10 + }, + "FullyQualifiedDomainName": { + "type": "string", + "maxLength": 255 + }, + "HealthThreshold": { + "type": "integer", + "minimum": 0, + "maximum": 256 + }, + "InsufficientDataHealthStatus": { + "type": "string", + "enum": [ + "Healthy", + "LastKnownStatus", + "Unhealthy" + ] + }, + "Inverted": { + "type": "boolean" + }, + "IPAddress": { + "type": "string", + "maxLength": 45, + "pattern": "^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$|^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$" + }, + "MeasureLatency": { + "type": "boolean" + }, + "Port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "Regions": { + "type": "array", + "items": { + "type": "string" + }, + "maxItems": 64, + "insertionOrder": false + }, + "RequestInterval": { + "type": "integer", + "minimum": 10, + "maximum": 30 + }, + "ResourcePath": { + "type": "string", + "maxLength": 255 + }, + "SearchString": { + "type": "string", + "maxLength": 255 + }, + "RoutingControlArn": { + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "Type": { + "type": "string", + "enum": [ + "CALCULATED", + "CLOUDWATCH_METRIC", + "HTTP", + "HTTP_STR_MATCH", + "HTTPS", + "HTTPS_STR_MATCH", + "TCP", + "RECOVERY_CONTROL" + ] + } + }, + "required": [ + "Type" + ], + "additionalProperties": false + }, + "HealthCheckTags": { + "type": "array", + "uniqueItems": true, + "insertionOrder": false, + "description": "An array of key-value pairs to apply to this resource.", + "items": { + "$ref": "#/definitions/HealthCheckTag" + } + } + }, + "additionalProperties": false, + "required": [ + "HealthCheckConfig" + ], + "createOnlyProperties": [ + "/properties/HealthCheckConfig/Type", + "/properties/HealthCheckConfig/MeasureLatency", + "/properties/HealthCheckConfig/RequestInterval" + ], + "readOnlyProperties": [ + "/properties/HealthCheckId" + ], + "primaryIdentifier": [ + "/properties/HealthCheckId" + ], + "handlers": { + "create": { + "permissions": [ + "route53:CreateHealthCheck", + "route53:ChangeTagsForResource", + "cloudwatch:DescribeAlarms", + "route53-recovery-control-config:DescribeRoutingControl" + ] + }, + "read": { + "permissions": [ + "route53:GetHealthCheck", + "route53:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "route53:UpdateHealthCheck", + "route53:ChangeTagsForResource", + "route53:ListTagsForResource", + "cloudwatch:DescribeAlarms" + ] + }, + "delete": { + "permissions": [ + "route53:DeleteHealthCheck" + ] + }, + "list": { + "permissions": [ + "route53:ListHealthChecks", + "route53:ListTagsForResource" + ] + } + }, + "taggable": true +} diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck_plugin.py b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck_plugin.py new file mode 100644 index 0000000000000..7a8e244561cf8 --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_healthcheck_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class Route53HealthCheckProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Route53::HealthCheck" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.route53.resource_providers.aws_route53_healthcheck import ( + Route53HealthCheckProvider, + ) + + self.factory = Route53HealthCheckProvider diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.py b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.py new file mode 100644 index 0000000000000..c3d0e3866e14c --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.py @@ -0,0 +1,213 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Optional, TypedDict + +if TYPE_CHECKING: + from mypy_boto3_route53 import Route53Client + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class Route53RecordSetProperties(TypedDict): + Name: Optional[str] + Type: Optional[str] + AliasTarget: Optional[AliasTarget] + CidrRoutingConfig: Optional[CidrRoutingConfig] + Comment: Optional[str] + Failover: Optional[str] + GeoLocation: Optional[GeoLocation] + HealthCheckId: Optional[str] + HostedZoneId: Optional[str] + HostedZoneName: Optional[str] + Id: Optional[str] + MultiValueAnswer: Optional[bool] + Region: Optional[str] + ResourceRecords: Optional[list[str]] + SetIdentifier: Optional[str] + TTL: Optional[str] + Weight: Optional[int] + + +class AliasTarget(TypedDict): + DNSName: Optional[str] + HostedZoneId: Optional[str] + EvaluateTargetHealth: Optional[bool] + + +class CidrRoutingConfig(TypedDict): + CollectionId: Optional[str] + LocationName: Optional[str] + + +class GeoLocation(TypedDict): + ContinentCode: Optional[str] + CountryCode: Optional[str] + SubdivisionCode: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class Route53RecordSetProvider(ResourceProvider[Route53RecordSetProperties]): + TYPE = "AWS::Route53::RecordSet" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[Route53RecordSetProperties], + ) -> ProgressEvent[Route53RecordSetProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - Type + - Name + + Create-only properties: + - /properties/HostedZoneName + - /properties/Name + - /properties/HostedZoneId + + Read-only properties: + - /properties/Id + """ + model = request.desired_state + route53 = request.aws_client_factory.route53 + + if not model.get("HostedZoneId"): + # if only name was provided for hosted zone + hosted_zone_name = model.get("HostedZoneName") + hosted_zone_id = self.get_hosted_zone_id_from_name(hosted_zone_name, route53) + model["HostedZoneId"] = hosted_zone_id + + attr_names = [ + "Name", + "Type", + "SetIdentifier", + "Weight", + "Region", + "GeoLocation", + "Failover", + "MultiValueAnswer", + "TTL", + "ResourceRecords", + "AliasTarget", + "HealthCheckId", + ] + attrs = util.select_attributes(model, attr_names) + + if "AliasTarget" in attrs: + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-route53-recordset-aliastarget.html + if "EvaluateTargetHealth" not in attrs["AliasTarget"]: + attrs["AliasTarget"]["EvaluateTargetHealth"] = False + else: + # TODO: CNAME & SOA only allow 1 record type. should we check that here? + attrs["ResourceRecords"] = [{"Value": record} for record in attrs["ResourceRecords"]] + + if "TTL" in attrs: + if isinstance(attrs["TTL"], str): + attrs["TTL"] = int(attrs["TTL"]) + + route53.change_resource_record_sets( + HostedZoneId=model["HostedZoneId"], + ChangeBatch={ + "Changes": [ + { + "Action": "UPSERT", + "ResourceRecordSet": attrs, + }, + ] + }, + ) + # TODO: not 100% sure this behaves the same between alias and non-alias records + model["Id"] = model["Name"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def get_hosted_zone_id_from_name(self, hosted_zone_name: str, client: "Route53Client"): + if not hosted_zone_name: + raise Exception("Either HostedZoneId or HostedZoneName must be present.") + + zones = client.list_hosted_zones_by_name(DNSName=hosted_zone_name)["HostedZones"] + if len(zones) != 1: + raise Exception(f"Ambiguous HostedZoneName {hosted_zone_name} provided.") + + hosted_zone_id = zones[0]["Id"] + return hosted_zone_id + + def read( + self, + request: ResourceRequest[Route53RecordSetProperties], + ) -> ProgressEvent[Route53RecordSetProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[Route53RecordSetProperties], + ) -> ProgressEvent[Route53RecordSetProperties]: + """ + Delete a resource + + + """ + model = request.previous_state + route53 = request.aws_client_factory.route53 + rrset_kwargs = { + "Name": model["Name"], + "Type": model["Type"], + } + + if "AliasTarget" in model: + rrset_kwargs["AliasTarget"] = model["AliasTarget"] + if "ResourceRecords" in model: + rrset_kwargs["ResourceRecords"] = [ + {"Value": record} for record in model["ResourceRecords"] + ] + if "TTL" in model: + rrset_kwargs["TTL"] = int(model["TTL"]) + + route53.change_resource_record_sets( + HostedZoneId=model["HostedZoneId"], + ChangeBatch={ + "Changes": [ + { + "Action": "DELETE", + "ResourceRecordSet": rrset_kwargs, + }, + ] + }, + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def update( + self, + request: ResourceRequest[Route53RecordSetProperties], + ) -> ProgressEvent[Route53RecordSetProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.schema.json b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.schema.json new file mode 100644 index 0000000000000..300099a7f978c --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset.schema.json @@ -0,0 +1,129 @@ +{ + "typeName": "AWS::Route53::RecordSet", + "description": "Resource Type definition for AWS::Route53::RecordSet", + "additionalProperties": false, + "properties": { + "HealthCheckId": { + "type": "string" + }, + "AliasTarget": { + "$ref": "#/definitions/AliasTarget" + }, + "Comment": { + "type": "string" + }, + "HostedZoneName": { + "type": "string" + }, + "ResourceRecords": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "HostedZoneId": { + "type": "string" + }, + "SetIdentifier": { + "type": "string" + }, + "TTL": { + "type": "string" + }, + "Weight": { + "type": "integer" + }, + "Name": { + "type": "string" + }, + "Type": { + "type": "string" + }, + "CidrRoutingConfig": { + "$ref": "#/definitions/CidrRoutingConfig" + }, + "Failover": { + "type": "string" + }, + "Region": { + "type": "string" + }, + "GeoLocation": { + "$ref": "#/definitions/GeoLocation" + }, + "Id": { + "type": "string" + }, + "MultiValueAnswer": { + "type": "boolean" + } + }, + "definitions": { + "AliasTarget": { + "type": "object", + "additionalProperties": false, + "properties": { + "DNSName": { + "type": "string" + }, + "HostedZoneId": { + "type": "string" + }, + "EvaluateTargetHealth": { + "type": "boolean" + } + }, + "required": [ + "HostedZoneId", + "DNSName" + ] + }, + "CidrRoutingConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "CollectionId": { + "type": "string" + }, + "LocationName": { + "type": "string" + } + }, + "required": [ + "CollectionId", + "LocationName" + ] + }, + "GeoLocation": { + "type": "object", + "additionalProperties": false, + "properties": { + "ContinentCode": { + "type": "string" + }, + "CountryCode": { + "type": "string" + }, + "SubdivisionCode": { + "type": "string" + } + } + } + }, + "required": [ + "Type", + "Name" + ], + "createOnlyProperties": [ + "/properties/HostedZoneName", + "/properties/Name", + "/properties/HostedZoneId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset_plugin.py b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset_plugin.py new file mode 100644 index 0000000000000..bdecf6996b78d --- /dev/null +++ b/localstack-core/localstack/services/route53/resource_providers/aws_route53_recordset_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class Route53RecordSetProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Route53::RecordSet" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.route53.resource_providers.aws_route53_recordset import ( + Route53RecordSetProvider, + ) + + self.factory = Route53RecordSetProvider diff --git a/localstack/services/stepfunctions/asl/eval/programstate/__init__.py b/localstack-core/localstack/services/route53resolver/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/eval/programstate/__init__.py rename to localstack-core/localstack/services/route53resolver/__init__.py diff --git a/localstack-core/localstack/services/route53resolver/models.py b/localstack-core/localstack/services/route53resolver/models.py new file mode 100644 index 0000000000000..57c5436bab568 --- /dev/null +++ b/localstack-core/localstack/services/route53resolver/models.py @@ -0,0 +1,180 @@ +from typing import Dict + +import localstack.services.route53resolver.utils +from localstack.aws.api.route53resolver import ( + FirewallConfig, + FirewallDomainList, + FirewallDomains, + FirewallRule, + FirewallRuleGroup, + FirewallRuleGroupAssociation, + ResolverQueryLogConfig, + ResolverQueryLogConfigAssociation, + ResolverQueryLogConfigStatus, + ResourceNotFoundException, +) +from localstack.services.route53resolver.utils import get_firewall_config_id, validate_vpc +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute + + +class Route53ResolverStore(BaseStore): + firewall_configs: Dict[str, FirewallConfig] = LocalAttribute(default=dict) + firewall_domain_lists: Dict[str, FirewallDomainList] = LocalAttribute(default=dict) + firewall_domains: Dict[str, FirewallDomains] = LocalAttribute(default=dict) + firewall_rules: Dict[str, FirewallRule] = LocalAttribute(default=dict) + firewall_rule_groups: Dict[str, FirewallRuleGroup] = LocalAttribute(default=dict) + firewall_rule_group_associations: Dict[str, FirewallRuleGroupAssociation] = LocalAttribute( + default=dict + ) + resolver_query_log_configs: Dict[str, ResolverQueryLogConfig] = LocalAttribute(default=dict) + resolver_query_log_config_associations: Dict[str, ResolverQueryLogConfigAssociation] = ( + LocalAttribute(default=dict) + ) + + def get_firewall_rule_group(self, id): + """returns firewall rule group with the given id if it exists""" + + firewall_rule_group = self.firewall_rule_groups.get(id) + if not firewall_rule_group: + raise ResourceNotFoundException( + f"Can't find the resource with ID '{id}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return firewall_rule_group + + def delete_firewall_rule_group(self, id): + """deletes the firewall rule group with the given id""" + # if firewall_rule_groups doesn't exist it will throw an error + + firewall_rule_group = self.get_firewall_rule_group(id) + self.firewall_rule_groups.pop(id) + return firewall_rule_group + + def get_firewall_rule_group_association(self, id): + """returns firewall rule group association with the given id if it exists""" + + firewall_rule_group_association = self.firewall_rule_group_associations.get(id) + if not firewall_rule_group_association: + raise ResourceNotFoundException( + f"[RSLVR-02025] Can't find the resource with ID '{id}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return self.firewall_rule_group_associations.get(id) + + def delete_firewall_rule_group_association(self, id): + """deletes the firewall rule group association with the given id""" + # if firewall_rule_group_associations doesn't exist it will throw an error + + firewall_rule_group_associations = self.get_firewall_rule_group_association(id) + self.firewall_rule_group_associations.pop(id) + return firewall_rule_group_associations + + def get_firewall_domain(self, d): + """returns firewall domain with the given id if it exists""" + # firewall_domain can return none + + firewall_domain = self.firewall_domains.get(id) + return firewall_domain + + def get_firewall_domain_list(self, id): + """returns firewall domain list with the given id if it exists""" + + firewall_domain_list = self.firewall_domain_lists.get(id) + if not firewall_domain_list: + raise ResourceNotFoundException( + f"Can't find the resource with ID '{id}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return firewall_domain_list + + def delete_firewall_domain_list(self, id): + """deletes the firewall domain list with the given id""" + # if firewall_domain_lists doesn't exist it will throw an error + + firewall_domain_list = self.get_firewall_domain_list(id) + self.firewall_domain_lists.pop(id) + return firewall_domain_list + + def get_firewall_rule(self, firewall_rule_group_id, firewall_domain_list_id): + """returns firewall rule with the given id if it exists""" + + firewall_rule = self.firewall_rules.get(firewall_rule_group_id, {}).get( + firewall_domain_list_id + ) + if not firewall_rule: + raise ResourceNotFoundException( + f"Can't find the resource with ID '{firewall_rule_group_id}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return firewall_rule + + def delete_firewall_rule(self, firewall_rule_group_id, firewall_domain_list_id): + """deletes the firewall rule with the given id""" + # if firewall_rules doesn't exist it will throw an error + + firewall_rule = self.get_firewall_rule(firewall_rule_group_id, firewall_domain_list_id) + self.firewall_rules.get(firewall_rule_group_id, {}).pop(firewall_domain_list_id) + return firewall_rule + + def get_resolver_query_log_config(self, id): + """returns resolver query log config with the given id if it exists""" + + resolver_query_log_config = self.resolver_query_log_configs.get(id) + if not resolver_query_log_config: + raise ResourceNotFoundException( + f"[RSLVR-01601] The specified query logging configuration doesn't exist. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return resolver_query_log_config + + def delete_resolver_query_log_config(self, id): + """deletes the resolver query log config with the given id""" + + self.get_resolver_query_log_config(id) + resolver_query_log_config = self.resolver_query_log_configs.pop(id) + resolver_query_log_config["Status"] = ResolverQueryLogConfigStatus.DELETING + return resolver_query_log_config + + def get_resolver_query_log_config_associations(self, id): + """returns resolver query log config association with the given id if it exists""" + + resolver_query_log_config_association = self.resolver_query_log_config_associations.get(id) + if not resolver_query_log_config_association: + raise ResourceNotFoundException( + f"[RSLVR-01601] The specified query logging configuration doesn't exist. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + return resolver_query_log_config_association + + def delete_resolver_query_log_config_associations( + self, resolver_query_log_config_id, resource_id + ): + """deletes the resolver query log config association with the given id and vpc id""" + + association_id = None + for association in self.resolver_query_log_config_associations.values(): + if not ( + association.get("ResolverQueryLogConfigId") == resolver_query_log_config_id + and association.get("ResourceId") == resource_id + ): + raise ResourceNotFoundException( + f"[RSLVR-01602] The specified query logging configuration association doesn't exist. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + association["Status"] = "DELETING" + association_id = association.get("Id") + return self.resolver_query_log_config_associations.pop(association_id) + + def get_or_create_firewall_config(self, resource_id: str, region: str, account_id: str): + """returns the firewall config with the given id if it exists or creates a new one""" + + validate_vpc(resource_id, region, account_id) + firewall_config: FirewallConfig + if self.firewall_configs.get(resource_id): + firewall_config = self.firewall_configs[resource_id] + else: + id = get_firewall_config_id() + firewall_config = FirewallConfig( + Id=id, + ResourceId=resource_id, + OwnerId=account_id, + FirewallFailOpen="DISABLED", + ) + self.firewall_configs[resource_id] = firewall_config + return firewall_config + + +route53resolver_stores = AccountRegionBundle("route53resolver", Route53ResolverStore) diff --git a/localstack-core/localstack/services/route53resolver/provider.py b/localstack-core/localstack/services/route53resolver/provider.py new file mode 100644 index 0000000000000..e002748d9aa17 --- /dev/null +++ b/localstack-core/localstack/services/route53resolver/provider.py @@ -0,0 +1,844 @@ +from datetime import datetime, timezone + +from moto.route53resolver.models import Route53ResolverBackend as MotoRoute53ResolverBackend +from moto.route53resolver.models import route53resolver_backends + +import localstack.services.route53resolver.utils +from localstack.aws.api import RequestContext +from localstack.aws.api.route53resolver import ( + Action, + AssociateFirewallRuleGroupResponse, + AssociateResolverQueryLogConfigResponse, + BlockOverrideDnsType, + BlockOverrideDomain, + BlockOverrideTtl, + BlockResponse, + ConfidenceThreshold, + CreateFirewallDomainListResponse, + CreateFirewallRuleGroupResponse, + CreateFirewallRuleResponse, + CreateResolverEndpointResponse, + CreateResolverQueryLogConfigResponse, + CreatorRequestId, + DeleteFirewallDomainListResponse, + DeleteFirewallRuleGroupResponse, + DeleteFirewallRuleResponse, + DeleteResolverQueryLogConfigResponse, + DestinationArn, + DisassociateFirewallRuleGroupResponse, + DisassociateResolverQueryLogConfigResponse, + DnsThreatProtection, + Filters, + FirewallConfig, + FirewallDomainList, + FirewallDomainListMetadata, + FirewallDomainName, + FirewallDomainRedirectionAction, + FirewallDomains, + FirewallDomainUpdateOperation, + FirewallFailOpenStatus, + FirewallRule, + FirewallRuleGroup, + FirewallRuleGroupAssociation, + FirewallRuleGroupMetadata, + GetFirewallConfigResponse, + GetFirewallDomainListResponse, + GetFirewallRuleGroupAssociationResponse, + GetFirewallRuleGroupResponse, + GetResolverQueryLogConfigAssociationResponse, + GetResolverQueryLogConfigResponse, + InvalidParameterException, + InvalidRequestException, + IpAddressesRequest, + ListDomainMaxResults, + ListFirewallConfigsMaxResult, + ListFirewallConfigsResponse, + ListFirewallDomainListsResponse, + ListFirewallDomainsResponse, + ListFirewallRuleGroupsResponse, + ListFirewallRulesResponse, + ListResolverQueryLogConfigAssociationsResponse, + ListResolverQueryLogConfigsResponse, + MaxResults, + MutationProtectionStatus, + Name, + NextToken, + OutpostArn, + OutpostInstanceType, + Priority, + ProtocolList, + Qtype, + ResolverEndpointDirection, + ResolverEndpointType, + ResolverQueryLogConfig, + ResolverQueryLogConfigAssociation, + ResolverQueryLogConfigName, + ResolverQueryLogConfigStatus, + ResourceId, + ResourceNotFoundException, + Route53ResolverApi, + SecurityGroupIds, + SortByKey, + SortOrder, + TagList, + UpdateFirewallConfigResponse, + UpdateFirewallDomainsResponse, + UpdateFirewallRuleGroupAssociationResponse, + UpdateFirewallRuleResponse, + ValidationException, +) +from localstack.services.ec2.models import get_ec2_backend +from localstack.services.moto import call_moto +from localstack.services.route53resolver.models import Route53ResolverStore, route53resolver_stores +from localstack.services.route53resolver.utils import ( + get_resolver_query_log_config_id, + get_route53_resolver_firewall_domain_list_id, + get_route53_resolver_firewall_rule_group_association_id, + get_route53_resolver_firewall_rule_group_id, + get_route53_resolver_query_log_config_association_id, + validate_destination_arn, + validate_mutation_protection, + validate_priority, +) +from localstack.utils.aws import arns +from localstack.utils.aws.arns import extract_account_id_from_arn, extract_region_from_arn +from localstack.utils.collections import select_from_typed_dict +from localstack.utils.patch import patch + + +class Route53ResolverProvider(Route53ResolverApi): + @staticmethod + def get_store(account_id: str, region: str) -> Route53ResolverStore: + return route53resolver_stores[account_id][region] + + def create_firewall_rule_group( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + name: Name, + tags: TagList = None, + **kwargs, + ) -> CreateFirewallRuleGroupResponse: + """Create a Firewall Rule Group.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_group_id = get_route53_resolver_firewall_rule_group_id() + arn = arns.route53_resolver_firewall_rule_group_arn( + firewall_rule_group_id, context.account_id, context.region + ) + firewall_rule_group = FirewallRuleGroup( + Id=firewall_rule_group_id, + Arn=arn, + Name=name, + RuleCount=0, + Status="COMPLETE", + OwnerId=context.account_id, + ShareStatus="NOT_SHARED", + StatusMessage="Created Firewall Rule Group", + CreatorRequestId=creator_request_id, + CreationTime=datetime.now(timezone.utc).isoformat(), + ModificationTime=datetime.now(timezone.utc).isoformat(), + ) + store.firewall_rule_groups[firewall_rule_group_id] = firewall_rule_group + store.firewall_rules[firewall_rule_group_id] = {} + route53resolver_backends[context.account_id][context.region].tagger.tag_resource( + arn, tags or [] + ) + return CreateFirewallRuleGroupResponse(FirewallRuleGroup=firewall_rule_group) + + def delete_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_id: ResourceId, **kwargs + ) -> DeleteFirewallRuleGroupResponse: + """Delete a Firewall Rule Group.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_group: FirewallRuleGroup = store.delete_firewall_rule_group( + firewall_rule_group_id + ) + return DeleteFirewallRuleGroupResponse(FirewallRuleGroup=firewall_rule_group) + + def get_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_id: ResourceId, **kwargs + ) -> GetFirewallRuleGroupResponse: + """Get the details of a Firewall Rule Group.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_group: FirewallRuleGroup = store.get_firewall_rule_group( + firewall_rule_group_id + ) + return GetFirewallRuleGroupResponse(FirewallRuleGroup=firewall_rule_group) + + def list_firewall_rule_groups( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListFirewallRuleGroupsResponse: + """List Firewall Rule Groups.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_groups = [] + for firewall_rule_group in store.firewall_rule_groups.values(): + firewall_rule_groups.append( + select_from_typed_dict(FirewallRuleGroupMetadata, firewall_rule_group) + ) + return ListFirewallRuleGroupsResponse(FirewallRuleGroups=firewall_rule_groups) + + def create_firewall_domain_list( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + name: Name, + tags: TagList = None, + **kwargs, + ) -> CreateFirewallDomainListResponse: + """Create a Firewall Domain List.""" + store = self.get_store(context.account_id, context.region) + id = get_route53_resolver_firewall_domain_list_id() + arn = arns.route53_resolver_firewall_domain_list_arn(id, context.account_id, context.region) + firewall_domain_list = FirewallDomainList( + Id=id, + Arn=arn, + Name=name, + DomainCount=0, + Status="COMPLETE", + StatusMessage="Created Firewall Domain List", + ManagedOwnerName=context.account_id, + CreatorRequestId=creator_request_id, + CreationTime=datetime.now(timezone.utc).isoformat(), + ModificationTime=datetime.now(timezone.utc).isoformat(), + ) + store.firewall_domain_lists[id] = firewall_domain_list + route53resolver_backends[context.account_id][context.region].tagger.tag_resource( + arn, tags or [] + ) + return CreateFirewallDomainListResponse(FirewallDomainList=firewall_domain_list) + + def delete_firewall_domain_list( + self, context: RequestContext, firewall_domain_list_id: ResourceId, **kwargs + ) -> DeleteFirewallDomainListResponse: + """Delete a Firewall Domain List.""" + store = self.get_store(context.account_id, context.region) + firewall_domain_list: FirewallDomainList = store.delete_firewall_domain_list( + firewall_domain_list_id + ) + return DeleteFirewallDomainListResponse(FirewallDomainList=firewall_domain_list) + + def get_firewall_domain_list( + self, context: RequestContext, firewall_domain_list_id: ResourceId, **kwargs + ) -> GetFirewallDomainListResponse: + """Get the details of a Firewall Domain List.""" + store = self.get_store(context.account_id, context.region) + firewall_domain_list: FirewallDomainList = store.get_firewall_domain_list( + firewall_domain_list_id + ) + return GetFirewallDomainListResponse(FirewallDomainList=firewall_domain_list) + + def list_firewall_domain_lists( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListFirewallDomainListsResponse: + """List all Firewall Domain Lists.""" + store = self.get_store(context.account_id, context.region) + firewall_domain_lists = [] + for firewall_domain_list in store.firewall_domain_lists.values(): + firewall_domain_lists.append( + select_from_typed_dict(FirewallDomainListMetadata, firewall_domain_list) + ) + return ListFirewallDomainListsResponse(FirewallDomainLists=firewall_domain_lists) + + def update_firewall_domains( + self, + context: RequestContext, + firewall_domain_list_id: ResourceId, + operation: FirewallDomainUpdateOperation, + domains: FirewallDomains, + **kwargs, + ) -> UpdateFirewallDomainsResponse: + """Update the domains in a Firewall Domain List.""" + store = self.get_store(context.account_id, context.region) + + firewall_domain_list: FirewallDomainList = store.get_firewall_domain_list( + firewall_domain_list_id + ) + firewall_domains = store.get_firewall_domain(firewall_domain_list_id) + + if operation == FirewallDomainUpdateOperation.ADD: + if not firewall_domains: + store.firewall_domains[firewall_domain_list_id] = domains + else: + store.firewall_domains[firewall_domain_list_id].append(domains) + + if operation == FirewallDomainUpdateOperation.REMOVE: + if firewall_domains: + for domain in domains: + if domain in firewall_domains: + firewall_domains.remove(domain) + else: + raise ValidationException( + f"[RSLVR-02502] The following domains don't exist in the DNS Firewall domain list '{firewall_domain_list_id}'. You can't delete a domain that isn't in a domain list. Example unknown domain: '{domain}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + + if operation == FirewallDomainUpdateOperation.REPLACE: + store.firewall_domains[firewall_domain_list_id] = domains + + firewall_domain_list["StatusMessage"] = "Finished domain list update" + firewall_domain_list["ModificationTime"] = datetime.now(timezone.utc).isoformat() + return UpdateFirewallDomainsResponse( + Id=firewall_domain_list.get("Id"), + Name=firewall_domain_list.get("Name"), + Status=firewall_domain_list.get("Status"), + StatusMessage=firewall_domain_list.get("StatusMessage"), + ) + + def list_firewall_domains( + self, + context: RequestContext, + firewall_domain_list_id: ResourceId, + max_results: ListDomainMaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListFirewallDomainsResponse: + """List the domains in a DNS Firewall domain list.""" + store = self.get_store(context.account_id, context.region) + firewall_domains: FirewallDomains[FirewallDomainName] = [] + if store.firewall_domains.get(firewall_domain_list_id): + for firewall_domain in store.firewall_domains.get(firewall_domain_list_id): + firewall_domains.append(FirewallDomainName(firewall_domain)) + return ListFirewallDomainsResponse(Domains=firewall_domains) + + def create_firewall_rule( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + firewall_rule_group_id: ResourceId, + priority: Priority, + action: Action, + name: Name, + firewall_domain_list_id: ResourceId = None, + block_response: BlockResponse = None, + block_override_domain: BlockOverrideDomain = None, + block_override_dns_type: BlockOverrideDnsType = None, + block_override_ttl: BlockOverrideTtl = None, + firewall_domain_redirection_action: FirewallDomainRedirectionAction = None, + qtype: Qtype = None, + dns_threat_protection: DnsThreatProtection = None, + confidence_threshold: ConfidenceThreshold = None, + **kwargs, + ) -> CreateFirewallRuleResponse: + """Create a new firewall rule""" + # TODO add support for firewall_domain_list_id, dns_threat_protection, and confidence_threshold + store = self.get_store(context.account_id, context.region) + firewall_rule = FirewallRule( + FirewallRuleGroupId=firewall_rule_group_id, + FirewallDomainListId=firewall_domain_list_id, + Name=name, + Priority=priority, + Action=action, + BlockResponse=block_response, + BlockOverrideDomain=block_override_domain, + BlockOverrideDnsType=block_override_dns_type, + BlockOverrideTtl=block_override_ttl, + CreatorRequestId=creator_request_id, + CreationTime=datetime.now(timezone.utc).isoformat(), + ModificationTime=datetime.now(timezone.utc).isoformat(), + FirewallDomainRedirectionAction=firewall_domain_redirection_action, + Qtype=qtype, + ) + if firewall_rule_group_id in store.firewall_rules: + store.firewall_rules[firewall_rule_group_id][firewall_domain_list_id] = firewall_rule + # TODO: handle missing firewall-rule-group-id + return CreateFirewallRuleResponse(FirewallRule=firewall_rule) + + def delete_firewall_rule( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + firewall_domain_list_id: ResourceId = None, + firewall_threat_protection_id: ResourceId = None, + qtype: Qtype = None, + **kwargs, + ) -> DeleteFirewallRuleResponse: + """Delete a firewall rule""" + store = self.get_store(context.account_id, context.region) + firewall_rule: FirewallRule = store.delete_firewall_rule( + firewall_rule_group_id, firewall_domain_list_id + ) + return DeleteFirewallRuleResponse( + FirewallRule=firewall_rule, + ) + + def list_firewall_rules( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + priority: Priority = None, + action: Action = None, + max_results: MaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> ListFirewallRulesResponse: + """List firewall rules in a firewall rule group. + + Rules will be filtered by priority and action if values for these params are provided. + + Raises: + ResourceNotFound: If a firewall group by the provided id does not exist. + """ + store = self.get_store(context.account_id, context.region) + firewall_rule_group = store.firewall_rules.get(firewall_rule_group_id) + if firewall_rule_group is None: + raise ResourceNotFoundException( + f"Can't find the resource with ID '{firewall_rule_group_id}'. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + + firewall_rules = [ + FirewallRule(rule) + for rule in firewall_rule_group.values() + if (action is None or action == rule["Action"]) + and (priority is None or priority == rule["Priority"]) + ] + + # TODO: implement max_results filtering and next_token handling + return ListFirewallRulesResponse(FirewallRules=firewall_rules) + + def update_firewall_rule( + self, + context: RequestContext, + firewall_rule_group_id: ResourceId, + firewall_domain_list_id: ResourceId = None, + firewall_threat_protection_id: ResourceId = None, + priority: Priority = None, + action: Action = None, + block_response: BlockResponse = None, + block_override_domain: BlockOverrideDomain = None, + block_override_dns_type: BlockOverrideDnsType = None, + block_override_ttl: BlockOverrideTtl = None, + name: Name = None, + firewall_domain_redirection_action: FirewallDomainRedirectionAction = None, + qtype: Qtype = None, + dns_threat_protection: DnsThreatProtection = None, + confidence_threshold: ConfidenceThreshold = None, + **kwargs, + ) -> UpdateFirewallRuleResponse: + """Updates a firewall rule""" + store = self.get_store(context.account_id, context.region) + firewall_rule: FirewallRule = store.get_firewall_rule( + firewall_rule_group_id, firewall_domain_list_id + ) + + if priority: + firewall_rule["Priority"] = priority + if action: + firewall_rule["Action"] = action + if block_response: + firewall_rule["BlockResponse"] = block_response + if block_override_domain: + firewall_rule["BlockOverrideDomain"] = block_override_domain + if block_override_dns_type: + firewall_rule["BlockOverrideDnsType"] = block_override_dns_type + if block_override_ttl: + firewall_rule["BlockOverrideTtl"] = block_override_ttl + if name: + firewall_rule["Name"] = name + if firewall_domain_redirection_action: + firewall_rule["FirewallDomainRedirectionAction"] = firewall_domain_redirection_action + if qtype: + firewall_rule["Qtype"] = qtype + return UpdateFirewallRuleResponse( + FirewallRule=firewall_rule, + ) + + def associate_firewall_rule_group( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + firewall_rule_group_id: ResourceId, + vpc_id: ResourceId, + priority: Priority, + name: Name, + mutation_protection: MutationProtectionStatus = None, + tags: TagList = None, + **kwargs, + ) -> AssociateFirewallRuleGroupResponse: + """Associate a firewall rule group with a VPC.""" + store = self.get_store(context.account_id, context.region) + validate_priority(priority=priority) + validate_mutation_protection(mutation_protection=mutation_protection) + + for firewall_rule_group_association in store.firewall_rule_group_associations.values(): + if ( + firewall_rule_group_association.get("VpcId") == vpc_id + and firewall_rule_group_association.get("FirewallRuleGroupId") + == firewall_rule_group_id + ): + raise ValidationException( + f"[RSLVR-02302] This DNS Firewall rule group can't be associated to a VPC: '{vpc_id}'. It is already associated to VPC '{firewall_rule_group_id}'. Try again with another VPC or DNS Firewall rule group. Trace Id: '{localstack.services.route53resolver.utils.get_trace_id()}'" + ) + + id = get_route53_resolver_firewall_rule_group_association_id() + arn = arns.route53_resolver_firewall_rule_group_associations_arn( + id, context.account_id, context.region + ) + + firewall_rule_group_association = FirewallRuleGroupAssociation( + Id=id, + Arn=arn, + FirewallRuleGroupId=firewall_rule_group_id, + VpcId=vpc_id, + Name=name, + Priority=priority, + MutationProtection=mutation_protection or "DISABLED", + Status="COMPLETE", + StatusMessage="Creating Firewall Rule Group Association", + CreatorRequestId=creator_request_id, + CreationTime=datetime.now(timezone.utc).isoformat(), + ModificationTime=datetime.now(timezone.utc).isoformat(), + ) + store.firewall_rule_group_associations[id] = firewall_rule_group_association + route53resolver_backends[context.account_id][context.region].tagger.tag_resource( + arn, tags or [] + ) + return AssociateFirewallRuleGroupResponse( + FirewallRuleGroupAssociation=firewall_rule_group_association + ) + + def disassociate_firewall_rule_group( + self, context: RequestContext, firewall_rule_group_association_id: ResourceId, **kwargs + ) -> DisassociateFirewallRuleGroupResponse: + """Disassociate a DNS Firewall rule group from a VPC.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_group_association: FirewallRuleGroupAssociation = ( + store.delete_firewall_rule_group_association(firewall_rule_group_association_id) + ) + return DisassociateFirewallRuleGroupResponse( + FirewallRuleGroupAssociation=firewall_rule_group_association + ) + + def get_firewall_rule_group_association( + self, context: RequestContext, firewall_rule_group_association_id: ResourceId, **kwargs + ) -> GetFirewallRuleGroupAssociationResponse: + """Returns the Firewall Rule Group Association that you specified.""" + store = self.get_store(context.account_id, context.region) + firewall_rule_group_association: FirewallRuleGroupAssociation = ( + store.get_firewall_rule_group_association(firewall_rule_group_association_id) + ) + return GetFirewallRuleGroupAssociationResponse( + FirewallRuleGroupAssociation=firewall_rule_group_association + ) + + def update_firewall_rule_group_association( + self, + context: RequestContext, + firewall_rule_group_association_id: ResourceId, + priority: Priority = None, + mutation_protection: MutationProtectionStatus = None, + name: Name = None, + **kwargs, + ) -> UpdateFirewallRuleGroupAssociationResponse: + """Updates the specified Firewall Rule Group Association.""" + store = self.get_store(context.account_id, context.region) + validate_priority(priority=priority) + validate_mutation_protection(mutation_protection=mutation_protection) + + firewall_rule_group_association: FirewallRuleGroupAssociation = ( + store.get_firewall_rule_group_association(firewall_rule_group_association_id) + ) + + if priority: + firewall_rule_group_association["Priority"] = priority + if mutation_protection: + firewall_rule_group_association["MutationProtection"] = mutation_protection + if name: + firewall_rule_group_association["Name"] = name + + return UpdateFirewallRuleGroupAssociationResponse( + FirewallRuleGroupAssociation=firewall_rule_group_association + ) + + def create_resolver_query_log_config( + self, + context: RequestContext, + name: ResolverQueryLogConfigName, + destination_arn: DestinationArn, + creator_request_id: CreatorRequestId, + tags: TagList = None, + **kwargs, + ) -> CreateResolverQueryLogConfigResponse: + store = self.get_store(context.account_id, context.region) + validate_destination_arn(destination_arn) + id = get_resolver_query_log_config_id() + arn = arns.route53_resolver_query_log_config_arn(id, context.account_id, context.region) + resolver_query_log_config: ResolverQueryLogConfig = ResolverQueryLogConfig( + Id=id, + Arn=arn, + Name=name, + AssociationCount=0, + Status="CREATED", + OwnerId=context.account_id, + ShareStatus="NOT_SHARED", + DestinationArn=destination_arn, + CreatorRequestId=creator_request_id, + CreationTime=datetime.now(timezone.utc).isoformat(), + ) + store.resolver_query_log_configs[id] = resolver_query_log_config + route53resolver_backends[context.account_id][context.region].tagger.tag_resource( + arn, tags or [] + ) + return CreateResolverQueryLogConfigResponse( + ResolverQueryLogConfig=resolver_query_log_config + ) + + def create_resolver_endpoint( + self, + context: RequestContext, + creator_request_id: CreatorRequestId, + security_group_ids: SecurityGroupIds, + direction: ResolverEndpointDirection, + ip_addresses: IpAddressesRequest, + name: Name = None, + outpost_arn: OutpostArn = None, + preferred_instance_type: OutpostInstanceType = None, + tags: TagList = None, + resolver_endpoint_type: ResolverEndpointType = None, + protocols: ProtocolList = None, + **kwargs, + ) -> CreateResolverEndpointResponse: + create_resolver_endpoint_resp = call_moto(context) + create_resolver_endpoint_resp["ResolverEndpoint"]["Status"] = ( + ResolverQueryLogConfigStatus.CREATING + ) + return create_resolver_endpoint_resp + + def get_resolver_query_log_config( + self, context: RequestContext, resolver_query_log_config_id: ResourceId, **kwargs + ) -> GetResolverQueryLogConfigResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_config: ResolverQueryLogConfig = store.get_resolver_query_log_config( + resolver_query_log_config_id + ) + return GetResolverQueryLogConfigResponse(ResolverQueryLogConfig=resolver_query_log_config) + + def delete_resolver_query_log_config( + self, context: RequestContext, resolver_query_log_config_id: ResourceId, **kwargs + ) -> DeleteResolverQueryLogConfigResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_config: ResolverQueryLogConfig = store.delete_resolver_query_log_config( + resolver_query_log_config_id + ) + return DeleteResolverQueryLogConfigResponse( + ResolverQueryLogConfig=resolver_query_log_config + ) + + def list_resolver_query_log_configs( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + filters: Filters = None, + sort_by: SortByKey = None, + sort_order: SortOrder = None, + **kwargs, + ) -> ListResolverQueryLogConfigsResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_configs = [] + for resolver_query_log_config in store.resolver_query_log_configs.values(): + resolver_query_log_configs.append(ResolverQueryLogConfig(resolver_query_log_config)) + return ListResolverQueryLogConfigsResponse( + ResolverQueryLogConfigs=resolver_query_log_configs, + TotalCount=len(resolver_query_log_configs), + ) + + def associate_resolver_query_log_config( + self, + context: RequestContext, + resolver_query_log_config_id: ResourceId, + resource_id: ResourceId, + **kwargs, + ) -> AssociateResolverQueryLogConfigResponse: + store = self.get_store(context.account_id, context.region) + id = get_route53_resolver_query_log_config_association_id() + + resolver_query_log_config_association: ResolverQueryLogConfigAssociation = ( + ResolverQueryLogConfigAssociation( + Id=id, + ResolverQueryLogConfigId=resolver_query_log_config_id, + ResourceId=resource_id, + Status="ACTIVE", + Error="NONE", + ErrorMessage="", + CreationTime=datetime.now(timezone.utc).isoformat(), + ) + ) + + store.resolver_query_log_config_associations[id] = resolver_query_log_config_association + + return AssociateResolverQueryLogConfigResponse( + ResolverQueryLogConfigAssociation=resolver_query_log_config_association + ) + + def disassociate_resolver_query_log_config( + self, + context: RequestContext, + resolver_query_log_config_id: ResourceId, + resource_id: ResourceId, + **kwargs, + ) -> DisassociateResolverQueryLogConfigResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_config_association = store.delete_resolver_query_log_config_associations( + resolver_query_log_config_id, resource_id + ) + + return DisassociateResolverQueryLogConfigResponse( + ResolverQueryLogConfigAssociation=resolver_query_log_config_association + ) + + def get_resolver_query_log_config_association( + self, + context: RequestContext, + resolver_query_log_config_association_id: ResourceId, + **kwargs, + ) -> GetResolverQueryLogConfigAssociationResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_config_association: ResolverQueryLogConfigAssociation = ( + store.get_resolver_query_log_config_associations( + resolver_query_log_config_association_id + ) + ) + return GetResolverQueryLogConfigAssociationResponse( + ResolverQueryLogConfigAssociation=resolver_query_log_config_association + ) + + def list_resolver_query_log_config_associations( + self, + context: RequestContext, + max_results: MaxResults = None, + next_token: NextToken = None, + filters: Filters = None, + sort_by: SortByKey = None, + sort_order: SortOrder = None, + **kwargs, + ) -> ListResolverQueryLogConfigAssociationsResponse: + store = self.get_store(context.account_id, context.region) + resolver_query_log_config_associations = [] + for ( + resolver_query_log_config_association + ) in store.resolver_query_log_config_associations.values(): + resolver_query_log_config_associations.append( + ResolverQueryLogConfigAssociation(resolver_query_log_config_association) + ) + return ListResolverQueryLogConfigAssociationsResponse( + TotalCount=len(resolver_query_log_config_associations), + ResolverQueryLogConfigAssociations=resolver_query_log_config_associations, + ) + + def get_firewall_config( + self, context: RequestContext, resource_id: ResourceId, **kwargs + ) -> GetFirewallConfigResponse: + store = self.get_store(context.account_id, context.region) + firewall_config = store.get_or_create_firewall_config( + resource_id, context.region, context.account_id + ) + return GetFirewallConfigResponse(FirewallConfig=firewall_config) + + def list_firewall_configs( + self, + context: RequestContext, + max_results: ListFirewallConfigsMaxResult = None, + next_token: NextToken = None, + **kwargs, + ) -> ListFirewallConfigsResponse: + store = self.get_store(context.account_id, context.region) + firewall_configs = [] + backend = get_ec2_backend(context.account_id, context.region) + for vpc in backend.vpcs: + if vpc not in store.firewall_configs: + store.get_or_create_firewall_config(vpc, context.region, context.account_id) + for firewall_config in store.firewall_configs.values(): + firewall_configs.append(select_from_typed_dict(FirewallConfig, firewall_config)) + return ListFirewallConfigsResponse(FirewallConfigs=firewall_configs) + + def update_firewall_config( + self, + context: RequestContext, + resource_id: ResourceId, + firewall_fail_open: FirewallFailOpenStatus, + **kwargs, + ) -> UpdateFirewallConfigResponse: + store = self.get_store(context.account_id, context.region) + backend = get_ec2_backend(context.account_id, context.region) + for resource_id in backend.vpcs: + if resource_id not in store.firewall_configs: + firewall_config = store.get_or_create_firewall_config( + resource_id, context.region, context.account_id + ) + firewall_config["FirewallFailOpen"] = firewall_fail_open + else: + firewall_config = store.firewall_configs[resource_id] + firewall_config["FirewallFailOpen"] = firewall_fail_open + return UpdateFirewallConfigResponse(FirewallConfig=firewall_config) + + +@patch(MotoRoute53ResolverBackend._matched_arn) +def Route53ResolverBackend_matched_arn(fn, self, resource_arn): + """Given ARN, raise exception if there is no corresponding resource.""" + account_id = extract_account_id_from_arn(resource_arn) + region_name = extract_region_from_arn(resource_arn) + store = Route53ResolverProvider.get_store(account_id, region_name) + + for firewall_rule_group in store.firewall_rule_groups.values(): + if firewall_rule_group.get("Arn") == resource_arn: + return + for firewall_domain_list in store.firewall_domain_lists.values(): + if firewall_domain_list.get("Arn") == resource_arn: + return + for firewall_rule_group_association in store.firewall_rule_group_associations.values(): + if firewall_rule_group_association.get("Arn") == resource_arn: + return + for resolver_query_log_config in store.resolver_query_log_configs.values(): + if resolver_query_log_config.get("Arn") == resource_arn: + return + fn(self, resource_arn) + + +@patch(MotoRoute53ResolverBackend.disassociate_resolver_rule) +def moto_disassociate_resolver_rule(fn, self, resolver_rule_id, vpc_id): + if resolver_rule_id not in self.resolver_rules: + raise ResourceNotFoundException( + f'[RSLVR-00703] Resolver rule with ID "{resolver_rule_id}" does not exist.' + ) + return fn(self, resolver_rule_id, vpc_id) + + +@patch(MotoRoute53ResolverBackend.create_resolver_endpoint) +def moto_create_resolver_endpoint(fn, self, *args, **kwargs): + for group_id in kwargs.get("security_group_ids"): + if not group_id.startswith("sg-"): + raise InvalidParameterException( + f'[RSLVR-00408] Malformed security group ID: "Invalid id: "{group_id}" ' + f'(expecting "sg-...")".' + ) + return fn(self, *args, **kwargs) + + +@patch(MotoRoute53ResolverBackend.delete_resolver_rule) +def moto_delete_resolver_endpoint(fn, self, resolver_rule_id): + if resolver_rule_id not in self.resolver_rules: + raise ResourceNotFoundException( + f'[RSLVR-00703] Resolver rule with ID "{resolver_rule_id}" does not exist.' + ) + return fn(self, resolver_rule_id) + + +@patch(MotoRoute53ResolverBackend.create_resolver_rule) +def moto_create_resolver_rule(fn, self, *args, **kwargs): + direction = [ + x.direction + for x in self.resolver_endpoints.values() + if x.id == kwargs.get("resolver_endpoint_id") + ] + if direction and direction[0] == ResolverEndpointDirection.INBOUND: + raise InvalidRequestException( + "[RSLVR-00700] Resolver rules can only be associated to OUTBOUND resolver endpoints." + ) + return fn(self, *args, **kwargs) diff --git a/localstack-core/localstack/services/route53resolver/utils.py b/localstack-core/localstack/services/route53resolver/utils.py new file mode 100644 index 0000000000000..bcc7357ae5a31 --- /dev/null +++ b/localstack-core/localstack/services/route53resolver/utils.py @@ -0,0 +1,68 @@ +import re + +from localstack.aws.api.route53resolver import ResourceNotFoundException, ValidationException +from localstack.services.ec2.models import get_ec2_backend +from localstack.utils.aws.arns import ARN_PARTITION_REGEX +from localstack.utils.strings import get_random_hex + + +def get_route53_resolver_firewall_rule_group_id(): + return f"rslvr-frg-{get_random_hex(17)}" + + +def get_route53_resolver_firewall_domain_list_id(): + return f"rslvr-fdl-{get_random_hex(17)}" + + +def get_route53_resolver_firewall_rule_group_association_id(): + return f"rslvr-frgassoc-{get_random_hex(17)}" + + +def get_resolver_query_log_config_id(): + return f"rslvr-rqlc-{get_random_hex(17)}" + + +def get_route53_resolver_query_log_config_association_id(): + return f"rslvr-qlcassoc-{get_random_hex(17)}" + + +def get_firewall_config_id(): + return f"rslvr-fc-{get_random_hex(17)}" + + +def validate_priority(priority): + # value of priority can be null in case of update + if priority: + if priority not in range(100, 9900): + raise ValidationException( + f"[RSLVR-02017] The priority value you provided is reserved. Provide a number between '100' and '9900'. Trace Id: '{get_trace_id()}'" + ) + + +def validate_mutation_protection(mutation_protection): + if mutation_protection: + if mutation_protection not in ["ENABLED", "DISABLED"]: + raise ValidationException( + f"[RSLVR-02018] The mutation protection value you provided is reserved. Provide a value of 'ENABLED' or 'DISABLED'. Trace Id: '{get_trace_id()}'" + ) + + +def validate_destination_arn(destination_arn): + arn_pattern = rf"{ARN_PARTITION_REGEX}:(kinesis|logs|s3):?(.*)" + if not re.match(arn_pattern, destination_arn): + raise ResourceNotFoundException( + f"[RSLVR-01014] An Amazon Resource Name (ARN) for the destination is required. Trace Id: '{get_trace_id()}'" + ) + + +def validate_vpc(vpc_id: str, region: str, account_id: str): + backend = get_ec2_backend(account_id, region) + + if vpc_id not in backend.vpcs: + raise ValidationException( + f"[RSLVR-02025] Can't find the resource with ID : '{vpc_id}'. Trace Id: '{get_trace_id()}'" + ) + + +def get_trace_id(): + return f"1-{get_random_hex(8)}-{get_random_hex(24)}" diff --git a/localstack/services/stepfunctions/asl/parse/__init__.py b/localstack-core/localstack/services/s3/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/parse/__init__.py rename to localstack-core/localstack/services/s3/__init__.py diff --git a/localstack-core/localstack/services/s3/checksums.py b/localstack-core/localstack/services/s3/checksums.py new file mode 100644 index 0000000000000..a3cc9ae0f8f77 --- /dev/null +++ b/localstack-core/localstack/services/s3/checksums.py @@ -0,0 +1,169 @@ +# Code ported/inspired from https://github.com/aliyun/aliyun-oss-python-sdk/blob/master/oss2/crc64_combine.py +# This code implements checksum combinations: the ability to get the full checksum of an object with the checksums of +# its parts. +import sys + +_CRC64NVME_POLYNOMIAL = 0xAD93D23594C93659 +_CRC32_POLYNOMIAL = 0x104C11DB7 +_CRC32C_POLYNOMIAL = 0x1EDC6F41 +_CRC64_XOR_OUT = 0xFFFFFFFFFFFFFFFF +_CRC32_XOR_OUT = 0xFFFFFFFF +_GF2_DIM_64 = 64 +_GF2_DIM_32 = 32 + + +def gf2_matrix_square(square, mat): + for n in range(len(mat)): + square[n] = gf2_matrix_times(mat, mat[n]) + + +def gf2_matrix_times(mat, vec): + summary = 0 + mat_index = 0 + + while vec: + if vec & 1: + summary ^= mat[mat_index] + + vec >>= 1 + mat_index += 1 + + return summary + + +def _combine( + poly: int, + size_bits: int, + init_crc: int, + rev: bool, + xor_out: int, + crc1: int, + crc2: int, + len2: int, +) -> bytes: + if len2 == 0: + return _encode_to_bytes(crc1, size_bits) + + even = [0] * size_bits + odd = [0] * size_bits + + crc1 ^= init_crc ^ xor_out + + if rev: + # put operator for one zero bit in odd + odd[0] = poly # CRC-64 polynomial + row = 1 + for n in range(1, size_bits): + odd[n] = row + row <<= 1 + else: + row = 2 + for n in range(0, size_bits - 1): + odd[n] = row + row <<= 1 + odd[size_bits - 1] = poly + + gf2_matrix_square(even, odd) + + gf2_matrix_square(odd, even) + + while True: + gf2_matrix_square(even, odd) + if len2 & 1: + crc1 = gf2_matrix_times(even, crc1) + len2 >>= 1 + if len2 == 0: + break + + gf2_matrix_square(odd, even) + if len2 & 1: + crc1 = gf2_matrix_times(odd, crc1) + len2 >>= 1 + + if len2 == 0: + break + + crc1 ^= crc2 + + return _encode_to_bytes(crc1, size_bits) + + +def _encode_to_bytes(crc: int, size_bits: int) -> bytes: + if size_bits == 64: + return crc.to_bytes(8, byteorder="big") + elif size_bits == 32: + return crc.to_bytes(4, byteorder="big") + else: + raise ValueError("size_bites must be 32 or 64") + + +def _bitrev(x: int, n: int): + # Bit reverse the input value. + x = int(x) + y = 0 + for i in range(n): + y = (y << 1) | (x & 1) + x = x >> 1 + if ((1 << n) - 1) <= sys.maxsize: + return int(y) + return y + + +def _verify_params(size_bits: int, init_crc: int, xor_out: int): + """ + The following function validates the parameters of the CRC, namely, poly, and initial/final XOR values. + It returns the size of the CRC (in bits), and "sanitized" initial/final XOR values. + """ + mask = (1 << size_bits) - 1 + + # Adjust the initial CRC to the correct data type (unsigned value). + init_crc = int(init_crc) & mask + if mask <= sys.maxsize: + init_crc = int(init_crc) + + # Similar for XOR-out value. + xor_out = int(xor_out) & mask + if mask <= sys.maxsize: + xor_out = int(xor_out) + + return size_bits, init_crc, xor_out + + +def create_combine_function(poly: int, size_bits: int, init_crc=~0, rev=True, xor_out=0): + """ + The function returns the proper function depending on the checksum algorithm wanted. + Example, for the CRC64NVME function, you need to pass the proper polynomial, its size (64), and the proper XOR_OUT + (taken for the botocore/httpchecksums.py file). + :param poly: the CRC polynomial used (each algorithm has its own, for ex. CRC32C is called Castagnioli) + :param size_bits: the size of the algorithm, 32 for CRC32 and 64 for CRC64 + :param init_crc: the init_crc, always 0 in our case + :param rev: reversing the polynomial, true in our case as well + :param xor_out: value used to initialize the register as we don't specify init_crc + :return: + """ + size_bits, init_crc, xor_out = _verify_params(size_bits, init_crc, xor_out) + + mask = (1 << size_bits) - 1 + if rev: + poly = _bitrev(poly & mask, size_bits) + else: + poly = poly & mask + + def combine_func(crc1: bytes | int, crc2: bytes | int, len2: int): + if isinstance(crc1, bytes): + crc1 = int.from_bytes(crc1, byteorder="big") + if isinstance(crc2, bytes): + crc2 = int.from_bytes(crc2, byteorder="big") + return _combine(poly, size_bits, init_crc ^ xor_out, rev, xor_out, crc1, crc2, len2) + + return combine_func + + +combine_crc64_nvme = create_combine_function( + _CRC64NVME_POLYNOMIAL, 64, init_crc=0, xor_out=_CRC64_XOR_OUT +) +combine_crc32 = create_combine_function(_CRC32_POLYNOMIAL, 32, init_crc=0, xor_out=_CRC32_XOR_OUT) +combine_crc32c = create_combine_function(_CRC32C_POLYNOMIAL, 32, init_crc=0, xor_out=_CRC32_XOR_OUT) + + +__all__ = ["combine_crc32", "combine_crc32c", "combine_crc64_nvme"] diff --git a/localstack-core/localstack/services/s3/codec.py b/localstack-core/localstack/services/s3/codec.py new file mode 100644 index 0000000000000..9d1b3167ccda8 --- /dev/null +++ b/localstack-core/localstack/services/s3/codec.py @@ -0,0 +1,126 @@ +import io +from typing import IO, Any, Optional + + +class AwsChunkedDecoder(io.RawIOBase): + """ + This helper class takes a IO[bytes] stream, and decodes it on the fly, so that S3 can directly access the stream + without worrying about implementation details of `aws-chunked`. + It needs to expose the trailing headers, which will be available once the stream is fully read. + You can also directly pass the S3 Object, so the stream would set the checksum value once it's done. + See `aws-chunked` format here: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + """ + + def readable(self): + return True + + def __init__( + self, stream: IO[bytes], decoded_content_length: int, s3_object: Optional[Any] = None + ): + self._stream = stream + + self._decoded_length = decoded_content_length # Length of the encoded object + self._new_chunk = True + self._end_chunk = False + self._trailing_set = False + self._chunk_size = 0 + self._trailing_headers = {} + self.s3_object = s3_object + + @property + def trailing_headers(self): + if not self._trailing_set: + raise AttributeError( + "The stream has not been fully read yet, the trailing headers are not available." + ) + return self._trailing_headers + + def seekable(self): + return self._stream.seekable() + + def readinto(self, b): + with memoryview(b) as view, view.cast("B") as byte_view: + data = self.read(len(byte_view)) + byte_view[: len(data)] = data + return len(data) + + def read(self, size=-1): + """ + Read from the underlying stream, and return at most `size` decoded bytes. + If a chunk is smaller than `size`, we will return less than asked, but we will always return data if there + are chunks left + :param size: amount to read, please note that it can return less than asked + :return: bytes from the underlying stream + """ + if size < 0: + return self.readall() + + if not size: + return b"" + + if self._end_chunk: + # if it's the end of a chunk we need to strip the newline at the end of the chunk + # before jumping to the new one + self._strip_chunk_new_lines() + self._new_chunk = True + self._end_chunk = False + + if self._new_chunk: + # If the _new_chunk flag is set, we have to jump to the next chunk, if there's one + self._get_next_chunk_length() + self._new_chunk = False + + if self._chunk_size == 0 and self._decoded_length <= 0: + # If the next chunk is 0, and we decoded everything, try to get the trailing headers + self._get_trailing_headers() + if self.s3_object: + self._set_checksum_value() + return b"" + + # take the minimum account between the requested size, and the left chunk size + # (to not over read from the chunk) + amount = min(self._chunk_size, size) + data = self._stream.read(amount) + + if data == b"": + raise EOFError("Encoded file ended before the end-of-stream marker was reached") + + read = len(data) + self._chunk_size -= read + if self._chunk_size <= 0: + self._end_chunk = True + + self._decoded_length -= read + + return data + + def _strip_chunk_new_lines(self): + self._stream.read(2) + + def _get_next_chunk_length(self): + line = self._stream.readline() + chunk_length = int(line.split(b";")[0], 16) + self._chunk_size = chunk_length + + def _get_trailing_headers(self): + """ + Once the stream content is read, we try to parse the trailing headers. + """ + # try to get all trailing headers until the end of the stream + while line := self._stream.readline(): + if trailing_header := line.strip(): + header_key, header_value = trailing_header.decode("utf-8").split(":", maxsplit=1) + self._trailing_headers[header_key.lower()] = header_value.strip() + self._trailing_set = True + + def _set_checksum_value(self): + """ + If an S3 object was passed, we check the presence of the `checksum_algorithm` field, so that we can properly + get the right checksum header value, and set it directly to the object. It allows us to transparently access + the provided checksum value by the client in the S3 logic. + """ + if checksum_algorithm := getattr(self.s3_object, "checksum_algorithm", None): + if checksum_value := self._trailing_headers.get( + f"x-amz-checksum-{checksum_algorithm.lower()}" + ): + self.s3_object.checksum_value = checksum_value diff --git a/localstack-core/localstack/services/s3/constants.py b/localstack-core/localstack/services/s3/constants.py new file mode 100644 index 0000000000000..510494d048d47 --- /dev/null +++ b/localstack-core/localstack/services/s3/constants.py @@ -0,0 +1,124 @@ +from localstack.aws.api.s3 import ( + ChecksumAlgorithm, + Grantee, + Permission, + PublicAccessBlockConfiguration, + ServerSideEncryption, + ServerSideEncryptionByDefault, + ServerSideEncryptionRule, + StorageClass, +) +from localstack.aws.api.s3 import Type as GranteeType + +S3_VIRTUAL_HOST_FORWARDED_HEADER = "x-s3-vhost-forwarded-for" + +S3_UPLOAD_PART_MIN_SIZE = 5242880 +""" +This is minimum size allowed by S3 when uploading more than one part for a Multipart Upload, except for the last part +""" + +# These 2 values have been the historical hardcoded values for S3 credentials if needing to validate S3 pre-signed URLs +DEFAULT_PRE_SIGNED_ACCESS_KEY_ID = "test" +DEFAULT_PRE_SIGNED_SECRET_ACCESS_KEY = "test" + +AUTHENTICATED_USERS_ACL_GROUP = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" +ALL_USERS_ACL_GROUP = "http://acs.amazonaws.com/groups/global/AllUsers" +LOG_DELIVERY_ACL_GROUP = "http://acs.amazonaws.com/groups/s3/LogDelivery" + +VALID_ACL_PREDEFINED_GROUPS = { + AUTHENTICATED_USERS_ACL_GROUP, + ALL_USERS_ACL_GROUP, + LOG_DELIVERY_ACL_GROUP, +} + +VALID_GRANTEE_PERMISSIONS = { + Permission.FULL_CONTROL, + Permission.READ, + Permission.READ_ACP, + Permission.WRITE, + Permission.WRITE_ACP, +} + +VALID_STORAGE_CLASSES = [ + StorageClass.STANDARD, + StorageClass.STANDARD_IA, + StorageClass.GLACIER, + StorageClass.GLACIER_IR, + StorageClass.REDUCED_REDUNDANCY, + StorageClass.ONEZONE_IA, + StorageClass.INTELLIGENT_TIERING, + StorageClass.DEEP_ARCHIVE, +] + +ARCHIVES_STORAGE_CLASSES = [ + StorageClass.GLACIER, + StorageClass.DEEP_ARCHIVE, +] + +CHECKSUM_ALGORITHMS: list[ChecksumAlgorithm] = [ + ChecksumAlgorithm.SHA1, + ChecksumAlgorithm.SHA256, + ChecksumAlgorithm.CRC32, + ChecksumAlgorithm.CRC32C, + ChecksumAlgorithm.CRC64NVME, +] + +# response header overrides the client may request +ALLOWED_HEADER_OVERRIDES = { + "ResponseContentType": "ContentType", + "ResponseContentLanguage": "ContentLanguage", + "ResponseExpires": "Expires", + "ResponseCacheControl": "CacheControl", + "ResponseContentDisposition": "ContentDisposition", + "ResponseContentEncoding": "ContentEncoding", +} + +# Whether to enable S3 bucket policy enforcement in moto - currently disabled, as some recent CDK versions +# are creating bucket policies that enforce aws:SecureTransport, which makes the CDK deployment fail. +# TODO: potentially look into making configurable +ENABLE_MOTO_BUCKET_POLICY_ENFORCEMENT = False + + +SYSTEM_METADATA_SETTABLE_HEADERS = [ + "CacheControl", + "ContentDisposition", + "ContentEncoding", + "ContentLanguage", + "ContentType", +] + +# params are required in presigned url +SIGNATURE_V2_PARAMS = ["Signature", "Expires", "AWSAccessKeyId"] + +SIGNATURE_V4_PARAMS = [ + "X-Amz-Algorithm", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Expires", + "X-Amz-SignedHeaders", + "X-Amz-Signature", +] + +# The chunk size to use when iterating over and writing to S3 streams. +# chosen as middle ground between memory usage and amount of iterations over the S3 object body +# This is AWS recommended size when uploading chunks +# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +S3_CHUNK_SIZE = 65536 + +DEFAULT_BUCKET_ENCRYPTION = ServerSideEncryptionRule( + ApplyServerSideEncryptionByDefault=ServerSideEncryptionByDefault( + SSEAlgorithm=ServerSideEncryption.AES256, + ), + BucketKeyEnabled=False, +) + +DEFAULT_PUBLIC_BLOCK_ACCESS = PublicAccessBlockConfiguration( + BlockPublicAcls=True, + BlockPublicPolicy=True, + RestrictPublicBuckets=True, + IgnorePublicAcls=True, +) + +AUTHENTICATED_USERS_ACL_GRANTEE = Grantee(URI=AUTHENTICATED_USERS_ACL_GROUP, Type=GranteeType.Group) +ALL_USERS_ACL_GRANTEE = Grantee(URI=ALL_USERS_ACL_GROUP, Type=GranteeType.Group) +LOG_DELIVERY_ACL_GRANTEE = Grantee(URI=LOG_DELIVERY_ACL_GROUP, Type=GranteeType.Group) diff --git a/localstack-core/localstack/services/s3/cors.py b/localstack-core/localstack/services/s3/cors.py new file mode 100644 index 0000000000000..325393e724a92 --- /dev/null +++ b/localstack-core/localstack/services/s3/cors.py @@ -0,0 +1,307 @@ +import logging +import re +from typing import Optional, Protocol, Tuple + +from werkzeug.datastructures import Headers + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.s3 import ( + AccessForbidden, + BadRequest, + CORSConfiguration, + CORSRule, + CORSRules, +) +from localstack.aws.chain import Handler, HandlerChain + +# TODO: refactor those to expose the needed methods +from localstack.aws.handlers.cors import CorsEnforcer, CorsResponseEnricher +from localstack.aws.protocol.op_router import RestServiceOperationRouter +from localstack.aws.spec import get_service_catalog +from localstack.config import S3_VIRTUAL_HOSTNAME +from localstack.http import Request, Response +from localstack.services.s3.utils import S3_VIRTUAL_HOSTNAME_REGEX + +# TODO: add more logging statements +LOG = logging.getLogger(__name__) + +_s3_virtual_host_regex = re.compile(S3_VIRTUAL_HOSTNAME_REGEX) +FAKE_HOST_ID = "9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=" + +# TODO: refactor those to expose the needed methods maybe in another way that both can import +add_default_headers = CorsResponseEnricher.add_cors_headers +is_origin_allowed_default = CorsEnforcer.is_cors_origin_allowed + + +class BucketCorsIndex(Protocol): + @property + def cors(self) -> dict[str, CORSConfiguration]: + raise NotImplementedError + + @property + def buckets(self) -> set[str]: + raise NotImplementedError + + def invalidate(self): + raise NotImplementedError + + +class S3CorsHandler(Handler): + bucket_cors_index: BucketCorsIndex + + def __init__(self, bucket_cors_index: BucketCorsIndex): + self.bucket_cors_index = bucket_cors_index + self._service = get_service_catalog().get("s3") + self._s3_op_router = RestServiceOperationRouter(self._service) + + def __call__(self, chain: HandlerChain, context: RequestContext, response: Response): + self.handle_cors(chain, context, response) + + def pre_parse_s3_request(self, request: Request) -> Tuple[bool, Optional[str]]: + """ + Parse the request to try to determine if it's directed towards S3. It tries to match on host, then check + if the targeted bucket exists. If we could not determine it was a s3 request from the host, but the first + element in the path contains an existing bucket, we can think it's S3. + :param request: Request from the context + :return: is_s3, whether we're certain it's a s3 request, and bucket_name if the bucket exists + """ + is_s3: bool + bucket_name: str + + path = request.path + host = request.host + + # first, try to figure out best-effort whether the request is an s3 request + if host.startswith(S3_VIRTUAL_HOSTNAME): + is_s3 = True + bucket_name = path.split("/")[1] + # try to extract the bucket from the hostname (the "in" check is a minor optimization) + elif ".s3" in host and (match := _s3_virtual_host_regex.match(host)): + is_s3 = True + bucket_name = match.group("bucket") + # otherwise we're not sure, and whether it's s3 depends on whether the bucket exists. check later + else: + is_s3 = False + bucket_name = path.split("/")[1] + + existing_buckets = self.bucket_cors_index.buckets + if bucket_name not in existing_buckets: + return is_s3, None + + return True, bucket_name + + def handle_cors(self, chain: HandlerChain, context: RequestContext, response: Response): + """ + Handle CORS for S3 requests. S3 CORS rules can be configured. + https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html + https://docs.aws.amazon.com/AmazonS3/latest/userguide/ManageCorsUsing.html + """ + + # this is used with the new ASF S3 provider + # although, we could use it to pre-parse the request and set the context to move the service name parser + if config.DISABLE_CUSTOM_CORS_S3: + return + + request = context.request + is_s3, bucket_name = self.pre_parse_s3_request(context.request) + + if not is_s3: + # continue the chain, let the default CORS handler take care of the request + return + + # set the service so that the regular CORS enforcer knows it needs to ignore this request + context.service = self._service + + is_options_request = request.method == "OPTIONS" + + def stop_options_chain(): + """ + Stops the chain to avoid the OPTIONS request being parsed. The request is ready to be returned to the + client. We also need to add specific headers normally added by the serializer for regular requests. + """ + request_id = context.request_id + response.headers["x-amz-request-id"] = request_id + response.headers["x-amz-id-2"] = ( + f"MzRISOwyjmnup{request_id}7/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp" + ) + + response.set_response(b"") + response.headers.pop("Content-Type", None) + chain.stop() + + # check the presence of the Origin header. If not there, it means the request is not concerned about CORS + if not (origin := request.headers.get("Origin")): + if is_options_request: + context.operation = self._get_op_from_request(request) + raise BadRequest( + "Insufficient information. Origin request header needed.", HostId=FAKE_HOST_ID + ) + else: + # If the header is missing, Amazon S3 doesn't treat the request as a cross-origin request, + # and doesn't send CORS response headers in the response. + return + + is_origin_allowed_by_default = is_origin_allowed_default(request.headers) + + # The bucket does not exist or does have CORS configured + # might apply default LS CORS or raise AWS specific errors + if not bucket_name or bucket_name not in self.bucket_cors_index.cors: + # if the origin is allowed by localstack per default, adds default LS CORS headers + if is_origin_allowed_by_default: + add_default_headers( + response_headers=response.headers, request_headers=request.headers + ) + if is_options_request: + stop_options_chain() + return + # if the origin is not allowed, raise a specific S3 options in case of OPTIONS + # if it's a regular request, simply return without adding CORS + else: + if is_options_request: + if not bucket_name: + message = "CORSResponse: Bucket not found" + else: + message = "CORSResponse: CORS is not enabled for this bucket." + + context.operation = self._get_op_from_request(request) + raise AccessForbidden( + message, + HostId=FAKE_HOST_ID, + Method=request.headers.get("Access-Control-Request-Method", "OPTIONS"), + ResourceType="BUCKET", + ) + + # we return without adding any CORS headers, we could even block the request with 403 here + return + + rules = self.bucket_cors_index.cors[bucket_name]["CORSRules"] + + if not (rule := self.match_rules(request, rules)): + if is_options_request: + context.operation = self._get_op_from_request(request) + raise AccessForbidden( + "CORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted by the resource's CORS spec.", + HostId=FAKE_HOST_ID, + Method=request.headers.get("Access-Control-Request-Method"), + ResourceType="OBJECT", + ) + + if is_options_request: + stop_options_chain() + return + + is_wildcard = "*" in rule["AllowedOrigins"] + # this is contrary to CORS specs. The Access-Control-Allow-Origin should always return the request Origin + response.headers["Access-Control-Allow-Origin"] = origin if not is_wildcard else "*" + if not is_wildcard: + response.headers["Access-Control-Allow-Credentials"] = "true" + + response.headers["Vary"] = ( + "Origin, Access-Control-Request-Headers, Access-Control-Request-Method" + ) + + response.headers["Access-Control-Allow-Methods"] = ", ".join(rule["AllowedMethods"]) + + if requested_headers := request.headers.get("Access-Control-Request-Headers"): + # if the rule matched, it means all Requested Headers are allowed + response.headers["Access-Control-Allow-Headers"] = requested_headers.lower() + + if expose_headers := rule.get("ExposeHeaders"): + response.headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers) + + if max_age := rule.get("MaxAgeSeconds"): + response.headers["Access-Control-Max-Age"] = str(max_age) + + if is_options_request: + stop_options_chain() + + def invalidate_cache(self): + self.bucket_cors_index.invalidate() + + def match_rules(self, request: Request, rules: CORSRules) -> Optional[CORSRule]: + """ + Try to match the request to the bucket rules. How to match rules: + - The request's Origin header must match AllowedOrigin elements. + - The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method + header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements. + - Every header specified in the Access-Control-Request-Headers request header of a pre-flight request + must match an AllowedHeader element. + :param request: RequestContext: + :param rules: CORSRules: the bucket CORS rules + :return: return a CORSRule if it finds a match, or None + """ + headers = request.headers + method = request.method + for rule in rules: + if matched_rule := self._match_rule(rule, method, headers): + return matched_rule + + @staticmethod + def _match_rule(rule: CORSRule, method: str, headers: Headers) -> Optional[CORSRule]: + """ + Check if the request method and headers matches the given CORS rule. + :param rule: CORSRule: a CORS Rule from the bucket + :param method: HTTP method of the request + :param headers: Headers of the request + :return: CORSRule if the rule match, or None + """ + # AWS treats any method as an OPTIONS if it has the specific OPTIONS CORS headers + request_method = headers.get("Access-Control-Request-Method") or method + origin = headers.get("Origin") + if request_method not in rule["AllowedMethods"]: + return + + if "*" not in rule["AllowedOrigins"] and not any( + # Escapes any characters that needs escaping and replaces * with .+ + # Transforms http://*.localhost:1234 to http://.+\\.localhost:1234 + re.match(re.escape(allowed_origin).replace("\\*", ".+") + "$", origin) + for allowed_origin in rule["AllowedOrigins"] + ): + return + + if request_headers := headers.get("Access-Control-Request-Headers"): + if not (allowed_headers := rule.get("AllowedHeaders")): + return + + lower_case_allowed_headers = {header.lower() for header in allowed_headers} + if "*" not in allowed_headers and not all( + header in lower_case_allowed_headers + for header in request_headers.lower().split(", ") + ): + return + + return rule + + def _get_op_from_request(self, request: Request): + try: + op, _ = self._s3_op_router.match(request) + return op + except Exception: + # if we can't parse the request, just set GetObject + return self._service.operation_model("GetObject") + + +def s3_cors_request_handler(chain: HandlerChain, context: RequestContext, response: Response): + """ + Handler to add default CORS headers to S3 operations not concerned with CORS configuration + """ + # if DISABLE_CUSTOM_CORS_S3 is true, the default CORS handling will take place, so we won't need to do it here + if config.DISABLE_CUSTOM_CORS_S3: + return + + if not context.service or context.service.service_name != "s3": + return + + if not context.operation or context.operation.name not in ("ListBuckets", "CreateBucket"): + return + + if not config.DISABLE_CORS_CHECKS and not is_origin_allowed_default(context.request.headers): + LOG.info( + "Blocked CORS request from forbidden origin %s", + context.request.headers.get("origin") or context.request.headers.get("referer"), + ) + response.status_code = 403 + chain.terminate() + + add_default_headers(response_headers=response.headers, request_headers=context.request.headers) diff --git a/localstack-core/localstack/services/s3/exceptions.py b/localstack-core/localstack/services/s3/exceptions.py new file mode 100644 index 0000000000000..4e00d8dce33a2 --- /dev/null +++ b/localstack-core/localstack/services/s3/exceptions.py @@ -0,0 +1,48 @@ +from localstack.aws.api import CommonServiceException + + +class MalformedXML(CommonServiceException): + def __init__(self, message=None): + if not message: + message = "The XML you provided was not well-formed or did not validate against our published schema" + super().__init__("MalformedXML", status_code=400, message=message) + + +class MalformedACLError(CommonServiceException): + def __init__(self, message=None): + super().__init__("MalformedACLError", status_code=400, message=message) + + +class InvalidRequest(CommonServiceException): + def __init__(self, message=None): + super().__init__("InvalidRequest", status_code=400, message=message) + + +class UnexpectedContent(CommonServiceException): + def __init__(self, message=None): + super().__init__("UnexpectedContent", status_code=400, message=message) + + +class NoSuchConfiguration(CommonServiceException): + def __init__(self, message=None): + super().__init__("NoSuchConfiguration", status_code=404, message=message) + + +class InvalidBucketState(CommonServiceException): + def __init__(self, message=None): + super().__init__("InvalidBucketState", status_code=409, message=message) + + +class NoSuchObjectLockConfiguration(CommonServiceException): + def __init__(self, message=None): + super().__init__("NoSuchObjectLockConfiguration", status_code=404, message=message) + + +class MalformedPolicy(CommonServiceException): + def __init__(self, message=None): + super().__init__("MalformedPolicy", status_code=400, message=message) + + +class InvalidBucketOwnerAWSAccountID(CommonServiceException): + def __init__(self, message=None) -> None: + super().__init__("InvalidBucketOwnerAWSAccountID", status_code=400, message=message) diff --git a/localstack-core/localstack/services/s3/models.py b/localstack-core/localstack/services/s3/models.py new file mode 100644 index 0000000000000..6d96b55b83521 --- /dev/null +++ b/localstack-core/localstack/services/s3/models.py @@ -0,0 +1,788 @@ +import base64 +import hashlib +import logging +from collections import defaultdict +from datetime import datetime +from secrets import token_urlsafe +from typing import Literal, NamedTuple, Optional, Union +from zoneinfo import ZoneInfo + +from localstack.aws.api import CommonServiceException +from localstack.aws.api.s3 import ( + AccessControlPolicy, + AccountId, + AnalyticsConfiguration, + AnalyticsId, + BadDigest, + BucketAccelerateStatus, + BucketKeyEnabled, + BucketName, + BucketRegion, + BucketVersioningStatus, + ChecksumAlgorithm, + ChecksumType, + CompletedPartList, + CORSConfiguration, + DefaultRetention, + EntityTooSmall, + ETag, + Expiration, + IntelligentTieringConfiguration, + IntelligentTieringId, + InvalidArgument, + InvalidPart, + InventoryConfiguration, + InventoryId, + LifecycleRules, + LoggingEnabled, + Metadata, + MethodNotAllowed, + MultipartUploadId, + NoSuchKey, + NoSuchVersion, + NotificationConfiguration, + ObjectKey, + ObjectLockLegalHoldStatus, + ObjectLockMode, + ObjectLockRetainUntilDate, + ObjectLockRetentionMode, + ObjectOwnership, + ObjectStorageClass, + ObjectVersionId, + Owner, + PartNumber, + Payer, + Policy, + PublicAccessBlockConfiguration, + ReplicationConfiguration, + Restore, + ServerSideEncryption, + ServerSideEncryptionRule, + Size, + SSECustomerKeyMD5, + SSEKMSKeyId, + StorageClass, + TransitionDefaultMinimumObjectSize, + WebsiteConfiguration, + WebsiteRedirectLocation, +) +from localstack.constants import AWS_REGION_US_EAST_1 +from localstack.services.s3.constants import ( + DEFAULT_BUCKET_ENCRYPTION, + DEFAULT_PUBLIC_BLOCK_ACCESS, + S3_UPLOAD_PART_MIN_SIZE, +) +from localstack.services.s3.exceptions import InvalidRequest +from localstack.services.s3.utils import CombinedCrcHash, get_s3_checksum, rfc_1123_datetime +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossAccountAttribute, + CrossRegionAttribute, + LocalAttribute, +) +from localstack.utils.aws import arns +from localstack.utils.tagging import TaggingService + +LOG = logging.getLogger(__name__) + +_gmt_zone_info = ZoneInfo("GMT") + + +# note: not really a need to use a dataclass here, as it has a lot of fields, but only a few are set at creation +class S3Bucket: + name: BucketName + bucket_account_id: AccountId + bucket_region: BucketRegion + creation_date: datetime + multiparts: dict[MultipartUploadId, "S3Multipart"] + objects: Union["KeyStore", "VersionedKeyStore"] + versioning_status: BucketVersioningStatus | None + lifecycle_rules: Optional[LifecycleRules] + transition_default_minimum_object_size: Optional[TransitionDefaultMinimumObjectSize] + policy: Optional[Policy] + website_configuration: Optional[WebsiteConfiguration] + acl: AccessControlPolicy + cors_rules: Optional[CORSConfiguration] + logging: LoggingEnabled + notification_configuration: NotificationConfiguration + payer: Payer + encryption_rule: Optional[ServerSideEncryptionRule] + public_access_block: Optional[PublicAccessBlockConfiguration] + accelerate_status: Optional[BucketAccelerateStatus] + object_lock_enabled: bool + object_ownership: ObjectOwnership + intelligent_tiering_configurations: dict[IntelligentTieringId, IntelligentTieringConfiguration] + analytics_configurations: dict[AnalyticsId, AnalyticsConfiguration] + inventory_configurations: dict[InventoryId, InventoryConfiguration] + object_lock_default_retention: Optional[DefaultRetention] + replication: ReplicationConfiguration + owner: Owner + + # set all buckets parameters here + def __init__( + self, + name: BucketName, + account_id: AccountId, + bucket_region: BucketRegion, + owner: Owner, + acl: AccessControlPolicy = None, + object_ownership: ObjectOwnership = None, + object_lock_enabled_for_bucket: bool = None, + ): + self.name = name + self.bucket_account_id = account_id + self.bucket_region = bucket_region + # If ObjectLock is enabled, it forces the bucket to be versioned as well + self.versioning_status = None if not object_lock_enabled_for_bucket else "Enabled" + self.objects = KeyStore() if not object_lock_enabled_for_bucket else VersionedKeyStore() + self.object_ownership = object_ownership or ObjectOwnership.BucketOwnerEnforced + self.object_lock_enabled = object_lock_enabled_for_bucket + self.encryption_rule = DEFAULT_BUCKET_ENCRYPTION + self.creation_date = datetime.now(tz=_gmt_zone_info) + self.payer = Payer.BucketOwner + self.public_access_block = DEFAULT_PUBLIC_BLOCK_ACCESS + self.multiparts = {} + self.notification_configuration = {} + self.logging = {} + self.cors_rules = None + self.lifecycle_rules = None + self.transition_default_minimum_object_size = None + self.website_configuration = None + self.policy = None + self.accelerate_status = None + self.intelligent_tiering_configurations = {} + self.analytics_configurations = {} + self.inventory_configurations = {} + self.object_lock_default_retention = {} + self.replication = None + self.acl = acl + # see https://docs.aws.amazon.com/AmazonS3/latest/API/API_Owner.html + self.owner = owner + self.bucket_arn = arns.s3_bucket_arn(self.name, region=bucket_region) + + def get_object( + self, + key: ObjectKey, + version_id: ObjectVersionId = None, + http_method: Literal["GET", "PUT", "HEAD", "DELETE"] = "GET", + ) -> "S3Object": + """ + :param key: the Object Key + :param version_id: optional, the versionId of the object + :param http_method: the HTTP method of the original call. This is necessary for the exception if the bucket is + versioned or suspended + see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html + :return: the S3Object from the bucket + :raises NoSuchKey if the object key does not exist at all, or if the object is a DeleteMarker + :raises MethodNotAllowed if the object is a DeleteMarker and the operation is not allowed against it + """ + + if self.versioning_status is None: + if version_id and version_id != "null": + raise InvalidArgument( + "Invalid version id specified", + ArgumentName="versionId", + ArgumentValue=version_id, + ) + + s3_object = self.objects.get(key) + + if not s3_object: + raise NoSuchKey("The specified key does not exist.", Key=key) + + else: + self.objects: VersionedKeyStore + if version_id: + s3_object_version = self.objects.get(key, version_id) + if not s3_object_version: + raise NoSuchVersion( + "The specified version does not exist.", + Key=key, + VersionId=version_id, + ) + elif isinstance(s3_object_version, S3DeleteMarker): + if http_method == "HEAD": + raise CommonServiceException( + code="405", + message="Method Not Allowed", + status_code=405, + ) + + raise MethodNotAllowed( + "The specified method is not allowed against this resource.", + Method=http_method, + ResourceType="DeleteMarker", + DeleteMarker=True, + Allow="DELETE", + VersionId=s3_object_version.version_id, + ) + return s3_object_version + + s3_object = self.objects.get(key) + + if not s3_object: + raise NoSuchKey("The specified key does not exist.", Key=key) + + elif isinstance(s3_object, S3DeleteMarker): + if http_method not in ("HEAD", "GET"): + raise MethodNotAllowed( + "The specified method is not allowed against this resource.", + Method=http_method, + ResourceType="DeleteMarker", + DeleteMarker=True, + Allow="DELETE", + VersionId=s3_object.version_id, + ) + + raise NoSuchKey( + "The specified key does not exist.", + Key=key, + DeleteMarker=True, + VersionId=s3_object.version_id, + ) + + return s3_object + + +class S3Object: + key: ObjectKey + version_id: Optional[ObjectVersionId] + bucket: BucketName + owner: Optional[Owner] + size: Optional[Size] + etag: Optional[ETag] + user_metadata: Metadata + system_metadata: Metadata + last_modified: datetime + expires: Optional[datetime] + expiration: Optional[Expiration] # right now, this is stored in the provider cache + storage_class: StorageClass | ObjectStorageClass + encryption: Optional[ServerSideEncryption] # inherit bucket + kms_key_id: Optional[SSEKMSKeyId] # inherit bucket + bucket_key_enabled: Optional[bool] # inherit bucket + sse_key_hash: Optional[SSECustomerKeyMD5] + checksum_algorithm: ChecksumAlgorithm + checksum_value: str + checksum_type: ChecksumType + lock_mode: Optional[ObjectLockMode | ObjectLockRetentionMode] + lock_legal_status: Optional[ObjectLockLegalHoldStatus] + lock_until: Optional[datetime] + website_redirect_location: Optional[WebsiteRedirectLocation] + acl: Optional[AccessControlPolicy] + is_current: bool + parts: Optional[dict[int, tuple[int, int]]] + restore: Optional[Restore] + internal_last_modified: int + + def __init__( + self, + key: ObjectKey, + etag: Optional[ETag] = None, + size: Optional[int] = None, + version_id: Optional[ObjectVersionId] = None, + user_metadata: Optional[Metadata] = None, + system_metadata: Optional[Metadata] = None, + storage_class: StorageClass = StorageClass.STANDARD, + expires: Optional[datetime] = None, + expiration: Optional[Expiration] = None, + checksum_algorithm: Optional[ChecksumAlgorithm] = None, + checksum_value: Optional[str] = None, + checksum_type: Optional[ChecksumType] = ChecksumType.FULL_OBJECT, + encryption: Optional[ServerSideEncryption] = None, + kms_key_id: Optional[SSEKMSKeyId] = None, + sse_key_hash: Optional[SSECustomerKeyMD5] = None, + bucket_key_enabled: bool = False, + lock_mode: Optional[ObjectLockMode | ObjectLockRetentionMode] = None, + lock_legal_status: Optional[ObjectLockLegalHoldStatus] = None, + lock_until: Optional[datetime] = None, + website_redirect_location: Optional[WebsiteRedirectLocation] = None, + acl: Optional[AccessControlPolicy] = None, # TODO + owner: Optional[Owner] = None, + ): + self.key = key + self.user_metadata = ( + {k.lower(): v for k, v in user_metadata.items()} if user_metadata else {} + ) + self.system_metadata = system_metadata or {} + self.version_id = version_id + self.storage_class = storage_class or StorageClass.STANDARD + self.etag = etag + self.size = size + self.expires = expires + self.checksum_algorithm = checksum_algorithm or ChecksumAlgorithm.CRC64NVME + self.checksum_value = checksum_value + self.checksum_type = checksum_type + self.encryption = encryption + self.kms_key_id = kms_key_id + self.bucket_key_enabled = bucket_key_enabled + self.sse_key_hash = sse_key_hash + self.lock_mode = lock_mode + self.lock_legal_status = lock_legal_status + self.lock_until = lock_until + self.acl = acl + self.expiration = expiration + self.website_redirect_location = website_redirect_location + self.is_current = True + self.last_modified = datetime.now(tz=_gmt_zone_info) + self.parts = {} + self.restore = None + self.owner = owner + self.internal_last_modified = 0 + + def get_system_metadata_fields(self) -> dict: + headers = { + "LastModified": self.last_modified_rfc1123, + "ContentLength": str(self.size), + "ETag": self.quoted_etag, + } + if self.expires: + headers["Expires"] = self.expires_rfc1123 + + for metadata_key, metadata_value in self.system_metadata.items(): + headers[metadata_key] = metadata_value + + if self.storage_class != StorageClass.STANDARD: + headers["StorageClass"] = self.storage_class + + return headers + + @property + def last_modified_rfc1123(self) -> str: + # TODO: verify if we need them with proper snapshot testing, for now it's copied from moto + # Different datetime formats depending on how the key is obtained + # https://github.com/boto/boto/issues/466 + return rfc_1123_datetime(self.last_modified) + + @property + def expires_rfc1123(self) -> str: + return rfc_1123_datetime(self.expires) + + @property + def quoted_etag(self) -> str: + return f'"{self.etag}"' + + def is_locked(self, bypass_governance: bool = False) -> bool: + if self.lock_legal_status == "ON": + return True + + if bypass_governance and self.lock_mode == ObjectLockMode.GOVERNANCE: + return False + + if self.lock_until: + return self.lock_until > datetime.now(tz=_gmt_zone_info) + + return False + + +# TODO: could use dataclass, validate after models are set +class S3DeleteMarker: + key: ObjectKey + version_id: str + last_modified: datetime + is_current: bool + + def __init__(self, key: ObjectKey, version_id: ObjectVersionId): + self.key = key + self.version_id = version_id + self.last_modified = datetime.now(tz=_gmt_zone_info) + self.is_current = True + + @staticmethod + def is_locked(*args, **kwargs) -> bool: + # an S3DeleteMarker cannot be lock protected + return False + + +# TODO: could use dataclass, validate after models are set +class S3Part: + part_number: PartNumber + etag: Optional[ETag] + last_modified: datetime + size: Optional[int] + checksum_algorithm: Optional[ChecksumAlgorithm] + checksum_value: Optional[str] + + def __init__( + self, + part_number: PartNumber, + size: int = None, + etag: ETag = None, + checksum_algorithm: Optional[ChecksumAlgorithm] = None, + checksum_value: Optional[str] = None, + ): + self.last_modified = datetime.now(tz=_gmt_zone_info) + self.part_number = part_number + self.size = size + self.etag = etag + self.checksum_algorithm = checksum_algorithm + self.checksum_value = checksum_value + + @property + def quoted_etag(self) -> str: + return f'"{self.etag}"' + + +class S3Multipart: + parts: dict[PartNumber, S3Part] + object: S3Object + upload_id: MultipartUploadId + checksum_value: Optional[str] + checksum_type: Optional[ChecksumType] + checksum_algorithm: ChecksumAlgorithm + initiated: datetime + precondition: bool + + def __init__( + self, + key: ObjectKey, + storage_class: StorageClass | ObjectStorageClass = StorageClass.STANDARD, + expires: Optional[datetime] = None, + expiration: Optional[datetime] = None, # come from lifecycle + checksum_algorithm: Optional[ChecksumAlgorithm] = None, + checksum_type: Optional[ChecksumType] = None, + encryption: Optional[ServerSideEncryption] = None, # inherit bucket + kms_key_id: Optional[SSEKMSKeyId] = None, # inherit bucket + bucket_key_enabled: bool = False, # inherit bucket + sse_key_hash: Optional[SSECustomerKeyMD5] = None, + lock_mode: Optional[ObjectLockMode] = None, + lock_legal_status: Optional[ObjectLockLegalHoldStatus] = None, + lock_until: Optional[datetime] = None, + website_redirect_location: Optional[WebsiteRedirectLocation] = None, + acl: Optional[AccessControlPolicy] = None, # TODO + user_metadata: Optional[Metadata] = None, + system_metadata: Optional[Metadata] = None, + initiator: Optional[Owner] = None, + tagging: Optional[dict[str, str]] = None, + owner: Optional[Owner] = None, + precondition: Optional[bool] = None, + ): + self.id = token_urlsafe(96) # MultipartUploadId is 128 characters long + self.initiated = datetime.now(tz=_gmt_zone_info) + self.parts = {} + self.initiator = initiator + self.tagging = tagging + self.checksum_value = None + self.checksum_type = checksum_type + self.checksum_algorithm = checksum_algorithm + self.precondition = precondition + self.object = S3Object( + key=key, + user_metadata=user_metadata, + system_metadata=system_metadata, + storage_class=storage_class or StorageClass.STANDARD, + expires=expires, + expiration=expiration, + checksum_algorithm=checksum_algorithm, + checksum_type=checksum_type, + encryption=encryption, + kms_key_id=kms_key_id, + bucket_key_enabled=bucket_key_enabled, + sse_key_hash=sse_key_hash, + lock_mode=lock_mode, + lock_legal_status=lock_legal_status, + lock_until=lock_until, + website_redirect_location=website_redirect_location, + acl=acl, + owner=owner, + ) + + def complete_multipart( + self, parts: CompletedPartList, mpu_size: int = None, validation_checksum: str = None + ): + last_part_index = len(parts) - 1 + object_etag = hashlib.md5(usedforsecurity=False) + has_checksum = self.checksum_algorithm is not None + checksum_hash = None + if has_checksum: + if self.checksum_type == ChecksumType.COMPOSITE: + checksum_hash = get_s3_checksum(self.checksum_algorithm) + else: + checksum_hash = CombinedCrcHash(self.checksum_algorithm) + + pos = 0 + parts_map = {} + for index, part in enumerate(parts): + part_number = part["PartNumber"] + part_etag = part["ETag"].strip('"') + + s3_part = self.parts.get(part_number) + if ( + not s3_part + or s3_part.etag != part_etag + or (not has_checksum and any(k.startswith("Checksum") for k in part)) + ): + raise InvalidPart( + "One or more of the specified parts could not be found. " + "The part may not have been uploaded, " + "or the specified entity tag may not match the part's entity tag.", + ETag=part_etag, + PartNumber=part_number, + UploadId=self.id, + ) + + if has_checksum: + checksum_key = f"Checksum{self.checksum_algorithm.upper()}" + if not (part_checksum := part.get(checksum_key)): + if self.checksum_type == ChecksumType.COMPOSITE: + # weird case, they still try to validate a different checksum type than the multipart + for field in part: + if field.startswith("Checksum"): + algo = field.removeprefix("Checksum").lower() + raise BadDigest( + f"The {algo} you specified for part {part_number} did not match what we received." + ) + + raise InvalidRequest( + f"The upload was created using a {self.checksum_algorithm.lower()} checksum. " + f"The complete request must include the checksum for each part. " + f"It was missing for part {part_number} in the request." + ) + elif part_checksum != s3_part.checksum_value: + raise InvalidPart( + "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + ETag=part_etag, + PartNumber=part_number, + UploadId=self.id, + ) + + part_checksum_value = base64.b64decode(s3_part.checksum_value) + if self.checksum_type == ChecksumType.COMPOSITE: + checksum_hash.update(part_checksum_value) + else: + checksum_hash.combine(part_checksum_value, s3_part.size) + + elif any(k.startswith("Checksum") for k in part): + raise InvalidPart( + "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + ETag=part_etag, + PartNumber=part_number, + UploadId=self.id, + ) + + if index != last_part_index and s3_part.size < S3_UPLOAD_PART_MIN_SIZE: + raise EntityTooSmall( + "Your proposed upload is smaller than the minimum allowed size", + ETag=part_etag, + PartNumber=part_number, + MinSizeAllowed=S3_UPLOAD_PART_MIN_SIZE, + ProposedSize=s3_part.size, + ) + + object_etag.update(bytes.fromhex(s3_part.etag)) + # keep track of the parts size, as it can be queried afterward on the object as a Range + parts_map[part_number] = (pos, s3_part.size) + pos += s3_part.size + + if mpu_size and mpu_size != pos: + raise InvalidRequest( + f"The provided 'x-amz-mp-object-size' header value {mpu_size} " + f"does not match what was computed: {pos}" + ) + + if has_checksum: + checksum_value = base64.b64encode(checksum_hash.digest()).decode() + if self.checksum_type == ChecksumType.COMPOSITE: + checksum_value = f"{checksum_value}-{len(parts)}" + + elif self.checksum_type == ChecksumType.FULL_OBJECT: + if validation_checksum and validation_checksum != checksum_value: + raise BadDigest( + f"The {self.object.checksum_algorithm.lower()} you specified did not match the calculated checksum." + ) + + self.checksum_value = checksum_value + self.object.checksum_value = checksum_value + + multipart_etag = f"{object_etag.hexdigest()}-{len(parts)}" + self.object.etag = multipart_etag + self.object.parts = parts_map + + +class KeyStore: + """ + Object representing an S3 Un-versioned Bucket's Key Store. An object is mapped by a key, and you can simply + retrieve the object from that key. + """ + + def __init__(self): + self._store = {} + + def get(self, object_key: ObjectKey) -> S3Object | None: + return self._store.get(object_key) + + def set(self, object_key: ObjectKey, s3_object: S3Object): + self._store[object_key] = s3_object + + def pop(self, object_key: ObjectKey, default=None) -> S3Object | None: + return self._store.pop(object_key, default) + + def values(self, *_, **__) -> list[S3Object | S3DeleteMarker]: + # we create a shallow copy with dict to avoid size changed during iteration + return [value for value in dict(self._store).values()] + + def is_empty(self) -> bool: + return not self._store + + def __contains__(self, item): + return item in self._store + + +class VersionedKeyStore: + """ + Object representing an S3 Versioned Bucket's Key Store. An object is mapped by a key, and adding an object to the + same key will create a new version of it. When deleting the object, a S3DeleteMarker is created and put on top + of the version stack, to signal the object has been "deleted". + This object allows easy retrieval and saving of new object versions. + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/versioning-workflows.html + """ + + def __init__(self): + self._store = defaultdict(dict) + + @classmethod + def from_key_store(cls, keystore: KeyStore) -> "VersionedKeyStore": + new_versioned_keystore = cls() + for s3_object in keystore.values(): + # TODO: maybe do the object mutation inside the provider instead? but would need to iterate twice + # or do this whole operation inside the provider instead, when actually working on versioning + s3_object.version_id = "null" + new_versioned_keystore.set(object_key=s3_object.key, s3_object=s3_object) + + return new_versioned_keystore + + def get( + self, object_key: ObjectKey, version_id: ObjectVersionId = None + ) -> S3Object | S3DeleteMarker | None: + """ + :param object_key: the key of the Object we need to retrieve + :param version_id: Optional, if not specified, return the current version (last one inserted) + :return: an S3Object or S3DeleteMarker + """ + if not version_id and (versions := self._store.get(object_key)): + for version_id in reversed(versions): + return versions.get(version_id) + + return self._store.get(object_key, {}).get(version_id) + + def set(self, object_key: ObjectKey, s3_object: S3Object | S3DeleteMarker): + """ + Set an S3 object, using its already set VersionId. + If the bucket versioning is `Enabled`, then we're just inserting a new Version. + If the bucket versioning is `Suspended`, the current object version will be set to `null`, so if setting a new + object at the same key, we will override it at the `null` versionId entry. + :param object_key: the key of the Object we are setting + :param s3_object: the S3 object or S3DeleteMarker to set + :return: None + """ + existing_s3_object = self.get(object_key) + if existing_s3_object: + existing_s3_object.is_current = False + + self._store[object_key][s3_object.version_id] = s3_object + + def pop( + self, object_key: ObjectKey, version_id: ObjectVersionId = None, default=None + ) -> S3Object | S3DeleteMarker | None: + versions = self._store.get(object_key) + if not versions: + return None + + object_version = versions.pop(version_id, default) + if not versions: + self._store.pop(object_key) + else: + existing_s3_object = self.get(object_key) + existing_s3_object.is_current = True + + return object_version + + def values(self, with_versions: bool = False) -> list[S3Object | S3DeleteMarker]: + if with_versions: + # we create a shallow copy with dict to avoid size changed during iteration + return [ + object_version + for values in dict(self._store).values() + for object_version in dict(values).values() + ] + + # if `with_versions` is False, then we need to return only the current version if it's not a DeleteMarker + objects = [] + for object_key, versions in dict(self._store).items(): + # we're getting the last set object in the versions dictionary + for version_id in reversed(versions): + current_object = versions[version_id] + if isinstance(current_object, S3DeleteMarker): + break + + objects.append(versions[version_id]) + break + + return objects + + def is_empty(self) -> bool: + return not self._store + + def __contains__(self, item): + return item in self._store + + +class S3Store(BaseStore): + buckets: dict[BucketName, S3Bucket] = CrossRegionAttribute(default=dict) + global_bucket_map: dict[BucketName, AccountId] = CrossAccountAttribute(default=dict) + aws_managed_kms_key_id: SSEKMSKeyId = LocalAttribute(default=str) + + # static tagging service instance + TAGS: TaggingService = CrossAccountAttribute(default=TaggingService) + + +class BucketCorsIndex: + def __init__(self): + self._cors_index_cache = None + self._bucket_index_cache = None + + @property + def cors(self) -> dict[str, CORSConfiguration]: + if self._cors_index_cache is None: + self._bucket_index_cache, self._cors_index_cache = self._build_index() + return self._cors_index_cache + + @property + def buckets(self) -> set[str]: + if self._bucket_index_cache is None: + self._bucket_index_cache, self._cors_index_cache = self._build_index() + return self._bucket_index_cache + + def invalidate(self): + self._cors_index_cache = None + self._bucket_index_cache = None + + @staticmethod + def _build_index() -> tuple[set[BucketName], dict[BucketName, CORSConfiguration]]: + buckets = set() + cors_index = {} + # we create a shallow copy with dict to avoid size changed during iteration, as the store could have new account + # or region create from any other requests + for account_id, regions in dict(s3_stores).items(): + for bucket_name, bucket in dict(regions[AWS_REGION_US_EAST_1].buckets).items(): + bucket: S3Bucket + buckets.add(bucket_name) + if bucket.cors_rules is not None: + cors_index[bucket_name] = bucket.cors_rules + + return buckets, cors_index + + +class EncryptionParameters(NamedTuple): + encryption: ServerSideEncryption + kms_key_id: SSEKMSKeyId + bucket_key_enabled: BucketKeyEnabled + + +class ObjectLockParameters(NamedTuple): + lock_until: ObjectLockRetainUntilDate + lock_legal_status: ObjectLockLegalHoldStatus + lock_mode: ObjectLockMode | ObjectLockRetentionMode + + +s3_stores = AccountRegionBundle[S3Store]("s3", S3Store) diff --git a/localstack-core/localstack/services/s3/notifications.py b/localstack-core/localstack/services/s3/notifications.py new file mode 100644 index 0000000000000..48ece2ab9e788 --- /dev/null +++ b/localstack-core/localstack/services/s3/notifications.py @@ -0,0 +1,786 @@ +from __future__ import annotations + +import datetime +import json +import logging +import re +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, TypedDict, Union +from urllib.parse import quote + +from botocore.exceptions import ClientError + +from localstack.aws.api import RequestContext +from localstack.aws.api.events import PutEventsRequestEntry +from localstack.aws.api.lambda_ import InvocationType +from localstack.aws.api.s3 import ( + AccountId, + BucketName, + BucketRegion, + Event, + EventBridgeConfiguration, + EventList, + LambdaFunctionArn, + LambdaFunctionConfiguration, + NotificationConfiguration, + NotificationConfigurationFilter, + NotificationId, + ObjectKey, + QueueArn, + QueueConfiguration, + StorageClass, + TopicArn, + TopicConfiguration, +) +from localstack.aws.connect import connect_to +from localstack.services.s3.models import S3Bucket, S3DeleteMarker, S3Object +from localstack.services.s3.utils import _create_invalid_argument_exc +from localstack.utils.aws import arns +from localstack.utils.aws.arns import ARN_PARTITION_REGEX, get_partition, parse_arn, s3_bucket_arn +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.strings import short_uid +from localstack.utils.time import parse_timestamp, timestamp_millis + +LOG = logging.getLogger(__name__) + +EVENT_OPERATION_MAP = { + "PutObject": Event.s3_ObjectCreated_Put, + "CopyObject": Event.s3_ObjectCreated_Copy, + "CompleteMultipartUpload": Event.s3_ObjectCreated_CompleteMultipartUpload, + "PostObject": Event.s3_ObjectCreated_Post, + "PutObjectTagging": Event.s3_ObjectTagging_Put, + "DeleteObjectTagging": Event.s3_ObjectTagging_Delete, + "DeleteObject": Event.s3_ObjectRemoved_Delete, + "DeleteObjects": Event.s3_ObjectRemoved_Delete, + "PutObjectAcl": Event.s3_ObjectAcl_Put, + "RestoreObject": Event.s3_ObjectRestore_Post, +} + +HEADER_AMZN_XRAY = "X-Amzn-Trace-Id" + + +class S3NotificationContent(TypedDict): + s3SchemaVersion: str + configurationId: NotificationId + bucket: Dict[str, str] # todo + object: Dict[str, Union[str, int]] # todo + + +class EventRecord(TypedDict): + eventVersion: str + eventSource: str + awsRegion: str + eventTime: str + eventName: str + userIdentity: Dict[str, str] + requestParameters: Dict[str, str] + responseElements: Dict[str, str] + s3: S3NotificationContent + + +class Notification(TypedDict): + Records: List[EventRecord] + + +@dataclass +class S3EventNotificationContext: + request_id: str + event_type: str + event_time: datetime.datetime + account_id: str + region: str + bucket_name: BucketName + key_name: ObjectKey + xray: str + bucket_location: BucketRegion + bucket_account_id: AccountId + caller: AccountId + key_size: int + key_etag: str + key_version_id: str + key_expiry: datetime.datetime + key_storage_class: Optional[StorageClass] + + @classmethod + def from_request_context_native( + cls, + request_context: RequestContext, + s3_bucket: S3Bucket, + s3_object: S3Object | S3DeleteMarker, + ) -> "S3EventNotificationContext": + """ + Create an S3EventNotificationContext from a RequestContext. + The key is not always present in the request context depending on the event type. In that case, we can use + a provided one. + :param request_context: RequestContext + :param s3_bucket: S3Bucket + :param s3_object: S3Object passed directly to the context + :return: S3EventNotificationContext + """ + bucket_name = request_context.service_request["Bucket"] + event_type = EVENT_OPERATION_MAP.get(request_context.operation.wire_name, "") + + if isinstance(s3_object, S3DeleteMarker): + # AWS sets the etag of a DeleteMarker to the etag of an empty object + etag = "d41d8cd98f00b204e9800998ecf8427e" + key_size = 0 + key_expiry = None + storage_class = "" + else: + etag = s3_object.etag.strip('"') + key_size = s3_object.size + key_expiry = s3_object.expires + storage_class = s3_object.storage_class + + return cls( + request_id=request_context.request_id, + event_type=event_type, + event_time=datetime.datetime.now(), + account_id=request_context.account_id, + region=request_context.region, + caller=request_context.account_id, # TODO: use it for `userIdentity` + bucket_name=bucket_name, + bucket_location=s3_bucket.bucket_region, + bucket_account_id=s3_bucket.bucket_account_id, # TODO: use it for bucket owner identity + key_name=quote(s3_object.key), + key_etag=etag, + key_size=key_size, + key_expiry=key_expiry, + key_storage_class=storage_class, + key_version_id=s3_object.version_id + if s3_bucket.versioning_status + else None, # todo: check this? + xray=request_context.request.headers.get(HEADER_AMZN_XRAY), + ) + + +@dataclass +class BucketVerificationContext: + """ + Context object for data required for sending a `s3:TestEvent` like message. + """ + + request_id: str + bucket_name: str + region: str + configuration: Dict + skip_destination_validation: bool + + +def _matching_event(events: EventList, event_name: str) -> bool: + """ + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-event-types-and-destinations.html + Checks if the event is part of the NotificationConfiguration, and returns if the event should be notified for + this configuration + :param events: the list of events of the NotificationConfiguration + :param event_name: the event type, like s3:ObjectCreated:* or s3:ObjectRemoved:Delete + :return: boolean indicating if the event should be sent to the notifiers + """ + if event_name in events: + return True + wildcard_pattern = f"{event_name[0 : event_name.rindex(':')]}:*" + return wildcard_pattern in events + + +def _matching_filter( + notification_filter: Optional[NotificationConfigurationFilter], key_name: str +) -> bool: + """ + See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html + S3 allows filtering notification events with rules about the key name. + If the key does not have a filter rule, or if it matches the rule, then returns that the event should be sent + :param notification_filter: the Filter structure from NotificationConfiguration + :param key_name: the key name of the key concerned by the event + :return: boolean indicating if the key name matches the rules and the event should be sent + """ + # TODO: implement wildcard filtering + if not notification_filter or not notification_filter.get("Key", {}).get("FilterRules"): + return True + filter_rules = notification_filter.get("Key").get("FilterRules") + for rule in filter_rules: + name = rule.get("Name", "").lower() + value = rule.get("Value", "") + if name == "prefix" and not key_name.startswith(value): + return False + elif name == "suffix" and not key_name.endswith(value): + return False + + return True + + +class BaseNotifier: + service_name: str + + def notify(self, ctx: S3EventNotificationContext, config: Dict): + raise NotImplementedError + + @staticmethod + def should_notify(ctx: S3EventNotificationContext, config: Dict) -> bool: + """ + Helper method checking if the event should be notified to the configured notifiers from the configuration + :param ctx: S3EventNotificationContext + :param config: the notification config + :return: if the notifier should send the event or not + """ + return _matching_event(config["Events"], ctx.event_type) and _matching_filter( + config.get("Filter"), ctx.key_name + ) + + @staticmethod + def _get_arn_value_and_name(notifier_configuration: Dict) -> Tuple[str, str]: + raise NotImplementedError + + def validate_configuration_for_notifier( + self, + configurations: List[Dict], + skip_destination_validation: bool, + context: RequestContext, + bucket_name: str, + ): + for configuration in configurations: + self._validate_notification( + BucketVerificationContext( + configuration=configuration, + bucket_name=bucket_name, + region=context.region, + request_id=context.request_id, + skip_destination_validation=skip_destination_validation, + ) + ) + + def _verify_target(self, target_arn: str, verification_ctx: BucketVerificationContext) -> None: + raise NotImplementedError + + def _validate_notification(self, verification_ctx: BucketVerificationContext): + """ + Validates the notification configuration + - setting default ID if not provided + - validate the arn pattern + - validating the Rule names (and normalizing to capitalized) + - check if the filter value is not empty + :param verification_ctx: the verification context containing necessary data for validation + :raises InvalidArgument if the rule is not valid, infos in ArgumentName and ArgumentValue members + :return: + """ + configuration = verification_ctx.configuration + # id's can be set the request, but need to be auto-generated if they are not provided + if not configuration.get("Id"): + configuration["Id"] = short_uid() + + arn, argument_name = self._get_arn_value_and_name(configuration) + + if not re.match(f"{ARN_PARTITION_REGEX}:{self.service_name}:", arn): + raise _create_invalid_argument_exc( + "The ARN could not be parsed", name=argument_name, value=arn + ) + if not verification_ctx.skip_destination_validation: + self._verify_target(arn, verification_ctx) + + if filter_rules := configuration.get("Filter", {}).get("Key", {}).get("FilterRules"): + for rule in filter_rules: + rule["Name"] = rule["Name"].capitalize() + if rule["Name"] not in ["Suffix", "Prefix"]: + raise _create_invalid_argument_exc( + "filter rule name must be either prefix or suffix", + rule["Name"], + rule["Value"], + ) + if not rule["Value"]: + raise _create_invalid_argument_exc( + "filter value cannot be empty", rule["Name"], rule["Value"] + ) + + @staticmethod + def _get_test_payload(verification_ctx: BucketVerificationContext): + return { + "Service": "Amazon S3", + "Event": "s3:TestEvent", + "Time": timestamp_millis(), + "Bucket": verification_ctx.bucket_name, + "RequestId": verification_ctx.request_id, + "HostId": "eftixk72aD6Ap51TnqcoF8eFidJG9Z/2", + } + + @staticmethod + def _get_event_payload( + ctx: S3EventNotificationContext, config_id: NotificationId + ) -> Notification: + # Based on: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html + # TODO: think about caching or generating the payload only once, because only the config_id changes + # except if it is EventBridge. Check that. + partition = get_partition(ctx.region) + record = EventRecord( + eventVersion="2.1", + eventSource="aws:s3", + awsRegion=ctx.bucket_location, + eventTime=timestamp_millis(ctx.event_time), + eventName=ctx.event_type.removeprefix("s3:"), + userIdentity={"principalId": "AIDAJDPLRKLG7UEXAMPLE"}, # TODO: use the real one? + requestParameters={ + "sourceIPAddress": "127.0.0.1" + }, # TODO sourceIPAddress was previously extracted from headers ("X-Forwarded-For") + responseElements={ + "x-amz-request-id": short_uid(), + # todo this one is tricky, as it's generated by the response serializer... + "x-amz-id-2": "eftixk72aD6Ap51TnqcoF8eFidJG9Z/2", # Amazon S3 host that processed the request + }, + s3=S3NotificationContent( + s3SchemaVersion="1.0", + configurationId=config_id, + bucket={ + "name": ctx.bucket_name, + "ownerIdentity": { + "principalId": "A3NL1KOZZKExample" + }, # TODO: use proper principal? + "arn": f"arn:{partition}:s3:::{ctx.bucket_name}", + }, + object={ + "key": ctx.key_name, + "sequencer": "0055AED6DCD90281E5", + }, + ), + ) + + if ctx.key_version_id and ctx.key_version_id != "null": + # object version if bucket is versioning-enabled, otherwise null + record["s3"]["object"]["versionId"] = ctx.key_version_id + + event_type = ctx.event_type.lower() + if any(e in event_type for e in ("created", "restore")): + record["s3"]["object"]["eTag"] = ctx.key_etag + # if we created a DeleteMarker, AWS does not set the `size` field + if "deletemarker" not in event_type: + record["s3"]["object"]["size"] = ctx.key_size + + if "ObjectTagging" in ctx.event_type or "ObjectAcl" in ctx.event_type: + record["eventVersion"] = "2.3" + record["s3"]["object"]["eTag"] = ctx.key_etag + record["s3"]["object"].pop("sequencer") + + if "objectrestore:completed" in event_type: + record["glacierEventData"] = { + "restoreEventData": { + "lifecycleRestorationExpiryTime": timestamp_millis(ctx.key_expiry), + "lifecycleRestoreStorageClass": ctx.key_storage_class, + } + } + record["userIdentity"]["principalId"] = ( + "AmazonCustomer:A3NL1KOZZKExample" # TODO: use proper principal? + ) + # a bit hacky, it is to ensure the eventTime is a bit after the `Post` event, as its instant in LS + # the best would be to delay the publishing of the event + event_time = parse_timestamp(record["eventTime"]) + datetime.timedelta(milliseconds=500) + record["eventTime"] = timestamp_millis(event_time) + + return {"Records": [record]} + + +class SqsNotifier(BaseNotifier): + service_name = "sqs" + + @staticmethod + def _get_arn_value_and_name(queue_configuration: QueueConfiguration) -> Tuple[QueueArn, str]: + return queue_configuration.get("QueueArn", ""), "QueueArn" + + def _verify_target(self, target_arn: str, verification_ctx: BucketVerificationContext) -> None: + if not is_api_enabled("sqs"): + LOG.warning( + "Service 'sqs' is not enabled: skipping validation of the following destination: '%s' " + "Please check your 'SERVICES' configuration variable.", + target_arn, + ) + return + + arn_data = parse_arn(target_arn) + sqs_client = connect_to( + aws_access_key_id=arn_data["account"], region_name=arn_data["region"] + ).sqs + # test if the destination exists (done on AWS side, no permission required) + try: + queue_url = sqs_client.get_queue_url( + QueueName=arn_data["resource"], QueueOwnerAWSAccountId=arn_data["account"] + )["QueueUrl"] + except ClientError: + LOG.exception("Could not validate the notification destination %s", target_arn) + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=target_arn, + value="The destination queue does not exist", + ) + # send test event with the request metadata for permissions + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-event-types-and-destinations.html#supported-notification-event-types + sqs_client = connect_to(region_name=arn_data["region"]).sqs.request_metadata( + source_arn=s3_bucket_arn(verification_ctx.bucket_name, region=verification_ctx.region), + service_principal=ServicePrincipal.s3, + ) + test_payload = self._get_test_payload(verification_ctx) + try: + sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(test_payload)) + except ClientError as e: + LOG.error( + 'Unable to send test notification for S3 bucket "%s" to SQS queue "%s"', + verification_ctx.bucket_name, + target_arn, + ) + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=target_arn, + value="Permissions on the destination queue do not allow S3 to publish notifications from this bucket", + ) from e + + def notify(self, ctx: S3EventNotificationContext, config: QueueConfiguration): + event_payload = self._get_event_payload(ctx, config.get("Id")) + message = json.dumps(event_payload) + queue_arn = config["QueueArn"] + + parsed_arn = parse_arn(queue_arn) + sqs_client = connect_to(region_name=parsed_arn["region"]).sqs.request_metadata( + source_arn=s3_bucket_arn(ctx.bucket_name, region=ctx.region), + service_principal=ServicePrincipal.s3, + ) + try: + queue_url = arns.sqs_queue_url_for_arn(queue_arn) + system_attributes = {} + if ctx.xray: + system_attributes["AWSTraceHeader"] = { + "DataType": "String", + "StringValue": ctx.xray, + } + sqs_client.send_message( + QueueUrl=queue_url, + MessageBody=message, + MessageSystemAttributes=system_attributes, + ) + except Exception: + LOG.exception( + 'Unable to send notification for S3 bucket "%s" to SQS queue "%s"', + ctx.bucket_name, + parsed_arn["resource"], + ) + + +class SnsNotifier(BaseNotifier): + service_name = "sns" + + @staticmethod + def _get_arn_value_and_name(topic_configuration: TopicConfiguration) -> [TopicArn, str]: + return topic_configuration.get("TopicArn", ""), "TopicArn" + + def _verify_target(self, target_arn: str, verification_ctx: BucketVerificationContext) -> None: + if not is_api_enabled("sns"): + LOG.warning( + "Service 'sns' is not enabled: skipping validation of the following destination: '%s' " + "Please check your 'SERVICES' configuration variable.", + target_arn, + ) + return + arn_data = parse_arn(target_arn) + sns_client = connect_to( + aws_access_key_id=arn_data["account"], region_name=arn_data["region"] + ).sns + try: + sns_client.get_topic_attributes(TopicArn=target_arn) + except ClientError: + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=target_arn, + value="The destination topic does not exist", + ) + + sns_client = connect_to(region_name=arn_data["region"]).sns.request_metadata( + source_arn=s3_bucket_arn(verification_ctx.bucket_name, region=verification_ctx.region), + service_principal=ServicePrincipal.s3, + ) + test_payload = self._get_test_payload(verification_ctx) + try: + sns_client.publish( + TopicArn=target_arn, + Message=json.dumps(test_payload), + Subject="Amazon S3 Notification", + ) + except ClientError as e: + LOG.error( + 'Unable to send test notification for S3 bucket "%s" to SNS topic "%s"', + verification_ctx.bucket_name, + target_arn, + ) + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=target_arn, + value="Permissions on the destination topic do not allow S3 to publish notifications from this bucket", + ) from e + + def notify(self, ctx: S3EventNotificationContext, config: TopicConfiguration): + LOG.debug( + "Task received by a worker for notification to %s for bucket %s, key %s, action %s", + self.service_name, + ctx.bucket_name, + ctx.key_name, + ctx.event_type, + ) + event_payload = self._get_event_payload(ctx, config.get("Id")) + message = json.dumps(event_payload) + topic_arn = config["TopicArn"] + + arn_data = parse_arn(topic_arn) + sns_client = connect_to(region_name=arn_data["region"]).sns.request_metadata( + source_arn=s3_bucket_arn(ctx.bucket_name, region=ctx.region), + service_principal=ServicePrincipal.s3, + ) + try: + sns_client.publish( + TopicArn=topic_arn, + Message=message, + Subject="Amazon S3 Notification", + ) + except Exception: + LOG.exception( + 'Unable to send notification for S3 bucket "%s" to SNS topic "%s"', + ctx.bucket_name, + topic_arn, + ) + + +class LambdaNotifier(BaseNotifier): + service_name = "lambda" + + @staticmethod + def _get_arn_value_and_name( + lambda_configuration: LambdaFunctionConfiguration, + ) -> Tuple[LambdaFunctionArn, str]: + return lambda_configuration.get("LambdaFunctionArn", ""), "LambdaFunctionArn" + + def _verify_target(self, target_arn: str, verification_ctx: BucketVerificationContext) -> None: + if not is_api_enabled("lambda"): + LOG.warning( + "Service 'lambda' is not enabled: skipping validation of the following destination: '%s' " + "Please check your 'SERVICES' configuration variable.", + target_arn, + ) + return + arn_data = parse_arn(arn=target_arn) + lambda_client = connect_to( + aws_access_key_id=arn_data["account"], region_name=arn_data["region"] + ).lambda_ + try: + lambda_client.get_function(FunctionName=target_arn) + except ClientError: + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=target_arn, + value="The destination Lambda does not exist", + ) + lambda_client = connect_to(region_name=arn_data["region"]).lambda_.request_metadata( + source_arn=s3_bucket_arn(verification_ctx.bucket_name, region=verification_ctx.region), + service_principal=ServicePrincipal.s3, + ) + try: + lambda_client.invoke(FunctionName=target_arn, InvocationType=InvocationType.DryRun) + except ClientError as e: + raise _create_invalid_argument_exc( + "Unable to validate the following destination configurations", + name=f"{target_arn}, null", + value=f"Not authorized to invoke function [{target_arn}]", + ) from e + + def notify(self, ctx: S3EventNotificationContext, config: LambdaFunctionConfiguration): + event_payload = self._get_event_payload(ctx, config.get("Id")) + payload = json.dumps(event_payload) + lambda_arn = config["LambdaFunctionArn"] + + arn_data = parse_arn(lambda_arn) + + lambda_client = connect_to(region_name=arn_data["region"]).lambda_.request_metadata( + source_arn=s3_bucket_arn(ctx.bucket_name, region=ctx.region), + service_principal=ServicePrincipal.s3, + ) + + try: + lambda_client.invoke( + FunctionName=lambda_arn, + InvocationType="Event", + Payload=payload, + ) + except Exception: + LOG.exception( + 'Unable to send notification for S3 bucket "%s" to Lambda function "%s".', + ctx.bucket_name, + lambda_arn, + ) + + +class EventBridgeNotifier(BaseNotifier): + service_name = "events" + + @staticmethod + def _get_event_payload( + ctx: S3EventNotificationContext, config_id: NotificationId = None + ) -> PutEventsRequestEntry: + # see https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html + # see also https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-events.html + partition = get_partition(ctx.region) + entry: PutEventsRequestEntry = { + "Source": "aws.s3", + "Resources": [f"arn:{partition}:s3:::{ctx.bucket_name}"], + "Time": ctx.event_time, + } + + if ctx.xray: + entry["TraceHeader"] = ctx.xray + + event_details = { + "version": "0", + "bucket": {"name": ctx.bucket_name}, + "object": { + "key": ctx.key_name, + "size": ctx.key_size, + "etag": ctx.key_etag, + "sequencer": "0062E99A88DC407460", + }, + "request-id": ctx.request_id, + "requester": "074255357339", + "source-ip-address": "127.0.0.1", + # TODO previously headers.get("X-Forwarded-For", "127.0.0.1").split(",")[0] + } + if ctx.key_version_id and ctx.key_version_id != "null": + event_details["object"]["version-id"] = ctx.key_version_id + + if "ObjectCreated" in ctx.event_type: + entry["DetailType"] = "Object Created" + event_type = ctx.event_type + event_action = event_type[event_type.rindex(":") + 1 :] + if event_action in ["Put", "Post", "Copy"]: + event_type = f"{event_action}Object" + # TODO: what about CompleteMultiformUpload?? + event_details["reason"] = event_type + + elif "ObjectRemoved" in ctx.event_type: + entry["DetailType"] = "Object Deleted" + event_details["reason"] = "DeleteObject" + if "DeleteMarkerCreated" in ctx.event_type: + delete_type = "Delete Marker Created" + else: + delete_type = "Permanently Deleted" + event_details["object"].pop("etag") + + event_details["deletion-type"] = delete_type + event_details["object"].pop("size") + + elif "ObjectTagging" in ctx.event_type: + entry["DetailType"] = ( + "Object Tags Added" if "Put" in ctx.event_type else "Object Tags Deleted" + ) + + elif "ObjectAcl" in ctx.event_type: + entry["DetailType"] = "Object ACL Updated" + event_details["object"].pop("size") + event_details["object"].pop("sequencer") + + elif "ObjectRestore" in ctx.event_type: + entry["DetailType"] = ( + "Object Restore Initiated" + if "Post" in ctx.event_type + else "Object Restore Completed" + ) + event_details["source-storage-class"] = ctx.key_storage_class + event_details["object"].pop("sequencer", None) + if ctx.event_type.endswith("Completed"): + event_details["restore-expiry-time"] = timestamp_millis(ctx.key_expiry) + event_details.pop("source-ip-address", None) + # a bit hacky, it is to ensure the eventTime is a bit after the `Post` event, as its instant in LS + # the best would be to delay the publishing of the event. We need at least 1s as it's the precision + # of the event + entry["Time"] = entry["Time"] + datetime.timedelta(seconds=1) + + entry["Detail"] = json.dumps(event_details) + return entry + + @staticmethod + def should_notify(ctx: S3EventNotificationContext, config: Dict) -> bool: + # Events are always passed to EventBridge, you can route the event in EventBridge + # See https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html + return True + + def validate_configuration_for_notifier( + self, + configurations: List[Dict], + skip_destination_validation: bool, + context: RequestContext, + bucket_name: str, + ): + # There are no configuration for EventBridge, simply passing an empty dict will enable notifications + return + + def _verify_target(self, target_arn: str, verification_ctx: BucketVerificationContext) -> None: + # There are no target for EventBridge + return + + def notify(self, ctx: S3EventNotificationContext, config: EventBridgeConfiguration): + # does not require permissions + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-permissions.html + # the account_id should be the bucket owner + # - account — The 12-digit AWS account ID of the bucket owner. + events_client = connect_to( + aws_access_key_id=ctx.bucket_account_id, region_name=ctx.bucket_location + ).events + entry = self._get_event_payload(ctx) + try: + events_client.put_events(Entries=[entry]) + except Exception: + LOG.exception( + 'Unable to send notification for S3 bucket "%s" to EventBridge', ctx.bucket_name + ) + + +class NotificationDispatcher: + notifiers = { + "QueueConfigurations": SqsNotifier(), + "TopicConfigurations": SnsNotifier(), + "LambdaFunctionConfigurations": LambdaNotifier(), + "EventBridgeConfiguration": EventBridgeNotifier(), + } + + def __init__(self, num_thread: int = 3): + self.executor = ThreadPoolExecutor(num_thread, thread_name_prefix="s3_ev") + + def shutdown(self): + self.executor.shutdown(wait=False) + + def send_notifications( + self, ctx: S3EventNotificationContext, notification_config: NotificationConfiguration + ): + for configuration_key, configurations in notification_config.items(): + notifier = self.notifiers[configuration_key] + if not is_api_enabled(notifier.service_name): + LOG.warning( + "Service '%s' is not enabled: skip sending notification. " + "Please check your 'SERVICES' configuration variable.", + notifier.service_name, + ) + continue + # there is not really a configuration for EventBridge, it is an empty dict + configurations = ( + configurations if isinstance(configurations, list) else [configurations] + ) + for config in configurations: + if notifier.should_notify(ctx, config): # we check before sending it to the thread + LOG.debug("Submitting task to the executor for notifier %s", notifier) + self._submit_notification(notifier, ctx, config) + + def _submit_notification(self, notifier, ctx, config): + "Required for patching submit with local thread context for EventStudio" + self.executor.submit(notifier.notify, ctx, config) + + def verify_configuration( + self, + notification_configurations: NotificationConfiguration, + skip_destination_validation, + context: RequestContext, + bucket_name: str, + ): + for notifier_type, notification_configuration in notification_configurations.items(): + self.notifiers[notifier_type].validate_configuration_for_notifier( + notification_configuration, skip_destination_validation, context, bucket_name + ) diff --git a/localstack-core/localstack/services/s3/presigned_url.py b/localstack-core/localstack/services/s3/presigned_url.py new file mode 100644 index 0000000000000..e696e82e2c2dc --- /dev/null +++ b/localstack-core/localstack/services/s3/presigned_url.py @@ -0,0 +1,943 @@ +import base64 +import copy +import datetime +import json +import logging +import re +import time +from collections import namedtuple +from functools import cache, cached_property +from typing import Mapping, Optional, TypedDict +from urllib import parse as urlparse + +from botocore.auth import HmacV1QueryAuth, S3SigV4QueryAuth +from botocore.awsrequest import AWSRequest, create_request_object +from botocore.compat import HTTPHeaders, urlsplit +from botocore.credentials import Credentials, ReadOnlyCredentials +from botocore.exceptions import NoCredentialsError +from botocore.model import ServiceModel +from botocore.utils import percent_encode_sequence +from werkzeug.datastructures import Headers, ImmutableMultiDict + +from localstack import config +from localstack.aws.accounts import get_account_id_from_access_key_id +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api.s3 import ( + AccessDenied, + AuthorizationQueryParametersError, + EntityTooLarge, + EntityTooSmall, + InvalidArgument, + InvalidBucketName, + SignatureDoesNotMatch, +) +from localstack.aws.chain import HandlerChain +from localstack.aws.protocol.op_router import RestServiceOperationRouter +from localstack.aws.spec import get_service_catalog +from localstack.http import Request, Response +from localstack.http.request import get_raw_path +from localstack.services.s3.constants import ( + DEFAULT_PRE_SIGNED_ACCESS_KEY_ID, + DEFAULT_PRE_SIGNED_SECRET_ACCESS_KEY, + SIGNATURE_V2_PARAMS, + SIGNATURE_V4_PARAMS, +) +from localstack.services.s3.utils import ( + S3_VIRTUAL_HOST_FORWARDED_HEADER, + _create_invalid_argument_exc, + capitalize_header_name_from_snake_case, + extract_bucket_name_and_key_from_headers_and_path, + forwarded_from_virtual_host_addressed_request, + is_bucket_name_valid, + is_presigned_url_request, + uses_host_addressing, +) +from localstack.utils.aws.arns import get_partition +from localstack.utils.strings import to_bytes + +LOG = logging.getLogger(__name__) + + +SIGNATURE_V2_POST_FIELDS = [ + "signature", + "awsaccesskeyid", +] + +SIGNATURE_V4_POST_FIELDS = [ + "x-amz-signature", + "x-amz-algorithm", + "x-amz-credential", + "x-amz-date", +] + +# Boto3 has some issues with some headers that it disregards and does not validate or adds to the signature +# we need to manually define them +# see https://github.com/boto/boto3/issues/4367 +SIGNATURE_V4_BOTO_IGNORED_PARAMS = [ + "if-none-match", + "if-match", +] + +# headers to blacklist from request_dict.signed_headers +BLACKLISTED_HEADERS = ["X-Amz-Security-Token"] + +IGNORED_SIGV4_HEADERS = [ + "x-amz-content-sha256", +] + +FAKE_HOST_ID = "9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=" + +HOST_COMBINATION_REGEX = r"^(.*)(:[\d]{0,6})" +PORT_REPLACEMENT = [":80", ":443", f":{config.GATEWAY_LISTEN[0].port}", ""] + +# STS policy expiration date format +POLICY_EXPIRATION_FORMAT1 = "%Y-%m-%dT%H:%M:%SZ" +POLICY_EXPIRATION_FORMAT2 = "%Y-%m-%dT%H:%M:%S.%fZ" + +PreSignedCredentials = namedtuple( + "PreSignedCredentials", ["access_key_id", "secret_access_key", "security_token"] +) + + +class NotValidSigV4SignatureContext(TypedDict): + signature_provided: str + string_to_sign: str + canonical_request: str + + +FindSigV4Result = tuple["S3SigV4SignatureContext", Optional[NotValidSigV4SignatureContext]] + + +class HmacV1QueryAuthValidation(HmacV1QueryAuth): + """ + Override _get_date for signature calculation, to use received date instead of adding a fixed Expired time + """ + + post_signature_headers = [ + header.lower() + for header in SIGNATURE_V2_PARAMS + BLACKLISTED_HEADERS + HmacV1QueryAuth.QSAOfInterest + ] + QSAOfInterest_low = [qs.lower() for qs in HmacV1QueryAuth.QSAOfInterest] + + def _get_date(self): + return str(int(self._expires)) # noqa + + def get_signature(self, method, split, headers: HTTPHeaders, expires=None, auth_path=None): + if self.credentials.token: + del headers["x-amz-security-token"] + headers["x-amz-security-token"] = self.credentials.token + string_to_sign = self.canonical_string(method, split, headers, auth_path=auth_path) + return self.sign_string(string_to_sign), string_to_sign + + +class S3SigV4QueryAuthValidation(S3SigV4QueryAuth): + """ + Override the timestamp for signature calculation, to use received timestamp instead of adding a fixed Expired time + """ + + def add_auth(self, request) -> tuple[bytes, str, str]: + if self.credentials is None: # noqa + raise NoCredentialsError() + canonical_request = self.canonical_request(request) + string_to_sign = self.string_to_sign(request, canonical_request) + signature = self.signature(string_to_sign, request) + + return signature, canonical_request, string_to_sign + + +# we are taking advantages of the fact that non-attached members are not returned +# those exceptions are polymorphic, they can have multiple shapes under the same name + + +def create_signature_does_not_match_sig_v2( + request_signature: str, string_to_sign: str, access_key_id: str +) -> SignatureDoesNotMatch: + ex = SignatureDoesNotMatch( + "The request signature we calculated does not match the signature you provided. Check your key and signing method." + ) + ex.AWSAccessKeyId = access_key_id + ex.HostId = FAKE_HOST_ID + ex.SignatureProvided = request_signature + ex.StringToSign = string_to_sign + ex.StringToSignBytes = to_bytes(string_to_sign).hex(sep=" ", bytes_per_sep=2).upper() + return ex + + +def create_signature_does_not_match_sig_v4( + not_valid_sig_v4: NotValidSigV4SignatureContext, access_key_id: str +) -> SignatureDoesNotMatch: + ex = create_signature_does_not_match_sig_v2( + request_signature=not_valid_sig_v4["signature_provided"], + string_to_sign=not_valid_sig_v4["string_to_sign"], + access_key_id=access_key_id, + ) + ex.CanonicalRequest = not_valid_sig_v4["canonical_request"] + ex.CanonicalRequestBytes = to_bytes(ex.CanonicalRequest).hex(sep=" ", bytes_per_sep=2).upper() + return ex + + +class S3PreSignedURLRequestHandler: + @cached_property + def _service(self) -> ServiceModel: + return get_service_catalog().get("s3") + + @cached_property + def _s3_op_router(self) -> RestServiceOperationRouter: + return RestServiceOperationRouter(self._service) + + def __call__(self, _: HandlerChain, context: RequestContext, __: Response): + """ + Handler to validate S3 pre-signed URL. Checks the validity of the request signature, and raises an error if + `S3_SKIP_SIGNATURE_VALIDATION` is set to False + """ + if not context.service or context.service.service_name != "s3": + return + try: + if not is_presigned_url_request(context): + # validate headers, as some can raise ValueError in Moto + _validate_headers_for_moto(context.request.headers) + return + # will raise exceptions if the url is not valid, except if S3_SKIP_SIGNATURE_VALIDATION is True + # will still try to validate it and log if there's an error + + # We save the query args as a set to save time for lookup in validation + query_arg_set = set(context.request.args) + + if is_valid_sig_v2(query_arg_set): + validate_presigned_url_s3(context) + + elif is_valid_sig_v4(query_arg_set): + validate_presigned_url_s3v4(context) + + _validate_headers_for_moto(context.request.headers) + LOG.debug("Valid presign url.") + # TODO: set the Authorization with the data from the pre-signed query string + + except Exception: + # as we are raising before the ServiceRequestParser, we need to + context.service = self._service + context.operation = self._get_op_from_request(context.request) + raise + + def _get_op_from_request(self, request: Request): + try: + op, _ = self._s3_op_router.match(request) + return op + except Exception: + # if we can't parse the request, just set GetObject + return self._service.operation_model("GetObject") + + +def get_credentials_from_parameters(parameters: dict, region: str) -> PreSignedCredentials: + """ + Extract and retrieves the credentials from the passed signed requests parameters (can be from the query string or + the form for POST requests) + :param parameters: + :return: + """ + # This is V2 signature AccessKeyId + if not (access_key_id := parameters.get("AWSAccessKeyId")): + # If not present, then it is a V4 signature (casing differs between QS parameters and form) + credential_value = parameters.get( + "X-Amz-Credential", parameters.get("x-amz-credential", "") + ).split("/") + if credential_value: + access_key_id = credential_value[0] + + if not access_key_id: + # fallback to the hardcoded value + access_key_id = DEFAULT_PRE_SIGNED_ACCESS_KEY_ID + + if not (secret_access_key := get_secret_access_key_from_access_key_id(access_key_id, region)): + # if we could not retrieve the secret access key, it means the access key was not registered in LocalStack, + # fallback to hardcoded necessary secret access key + secret_access_key = DEFAULT_PRE_SIGNED_SECRET_ACCESS_KEY + + security_token = parameters.get("X-Amz-Security-Token", None) + return PreSignedCredentials(access_key_id, secret_access_key, security_token) + + +@cache +def get_secret_access_key_from_access_key_id(access_key_id: str, region: str) -> Optional[str]: + """ + We need to retrieve the internal secret access key in order to also sign the request on our side to validate it + For now, we need to access Moto internals, as they are no public APIs to retrieve it for obvious reasons. + If the AccessKey is not registered, use the default `test` value that was historically used for pre-signed URLs, in + order to support default use cases + :param access_key_id: the provided AccessKeyID in the Credentials parameter + :param region: the region from the credentials + :return: the linked secret_access_key to the access_key + """ + try: + from moto.iam.models import AccessKey, iam_backends + except ImportError: + return + + account_id = get_account_id_from_access_key_id(access_key_id) + moto_access_key: AccessKey = iam_backends[account_id][get_partition(region)].access_keys.get( + access_key_id + ) + if not moto_access_key: + return + + return moto_access_key.secret_access_key + + +def is_expired(expiry_datetime: datetime.datetime): + now_datetime = datetime.datetime.now(tz=expiry_datetime.tzinfo) + return now_datetime > expiry_datetime + + +def is_valid_sig_v2(query_args: set) -> bool: + """ + :param query_args: a Set representing the query parameters from the presign URL request + :raises AccessDenied: if the query contains parts of the required parameters but not all + :return: True if the request is a valid SigV2 request, or False if no parameters are found to be related to SigV2 + """ + if any(p in query_args for p in SIGNATURE_V2_PARAMS): + if not all(p in query_args for p in SIGNATURE_V2_PARAMS): + LOG.info("Presign signature calculation failed") + raise AccessDenied( + "Query-string authentication requires the Signature, Expires and AWSAccessKeyId parameters", + HostId=FAKE_HOST_ID, + ) + + return True + return False + + +def is_valid_sig_v4(query_args: set) -> bool: + """ + :param query_args: a Set representing the query parameters from the presign URL request + :raises AuthorizationQueryParametersError: if the query contains parts of the required parameters but not all + :return: True if the request is a valid SigV4 request, or False if no parameters are found to be related to SigV4 + """ + if any(p in query_args for p in SIGNATURE_V4_PARAMS): + if not all(p in query_args for p in SIGNATURE_V4_PARAMS): + LOG.info("Presign signature calculation failed") + raise AuthorizationQueryParametersError( + "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HostId=FAKE_HOST_ID, + ) + + return True + return False + + +def validate_presigned_url_s3(context: RequestContext) -> None: + """ + Validate the presigned URL signed with SigV2. + :param context: RequestContext + """ + query_parameters = context.request.args + method = context.request.method + credentials = get_credentials_from_parameters(query_parameters, "us-east-1") + signing_credentials = Credentials( + access_key=credentials.access_key_id, + secret_key=credentials.secret_access_key, + token=credentials.security_token, + ) + try: + expires = int(query_parameters["Expires"]) + except (ValueError, TypeError): + # TODO: test this in AWS?? + raise SignatureDoesNotMatch("Expires error?") + + # Checking whether the url is expired or not + if expires < time.time(): + if config.S3_SKIP_SIGNATURE_VALIDATION: + LOG.warning( + "Signature is expired, but not raising an error, as S3_SKIP_SIGNATURE_VALIDATION=1" + ) + else: + raise AccessDenied( + "Request has expired", HostId=FAKE_HOST_ID, Expires=expires, ServerTime=time.time() + ) + + auth_signer = HmacV1QueryAuthValidation(credentials=signing_credentials, expires=expires) + + split_url, headers = _reverse_inject_signature_hmac_v1_query(context.request) + + signature, string_to_sign = auth_signer.get_signature( + method, split_url, headers, auth_path=None + ) + # after passing through the virtual host to path proxy, the signature is parsed and `+` are replaced by space + req_signature = context.request.args.get("Signature").replace(" ", "+") + + if not signature == req_signature: + if config.S3_SKIP_SIGNATURE_VALIDATION: + LOG.warning( + "Signatures do not match, but not raising an error, as S3_SKIP_SIGNATURE_VALIDATION=1" + ) + else: + ex: SignatureDoesNotMatch = create_signature_does_not_match_sig_v2( + request_signature=req_signature, + string_to_sign=string_to_sign, + access_key_id=credentials.access_key_id, + ) + raise ex + + add_headers_to_original_request(context, headers) + + +def _reverse_inject_signature_hmac_v1_query( + request: Request, +) -> tuple[urlparse.SplitResult, HTTPHeaders]: + """ + Reverses what does HmacV1QueryAuth._inject_signature while injecting the signature in the request. + Transforms the query string parameters in headers to recalculate the signature + see botocore.auth.HmacV1QueryAuth._inject_signature + :param request: the original request + :return: tuple of a split result from the reversed request, and the reversed headers + """ + new_headers = {} + new_query_string_dict = {} + + for header, value in request.args.items(): + header_low = header.lower() + if header_low not in HmacV1QueryAuthValidation.post_signature_headers: + new_headers[header] = value + elif header_low in HmacV1QueryAuthValidation.QSAOfInterest_low: + new_query_string_dict[header] = value + + # there should not be any headers here. If there are, it means they have been added by the client + # We should verify them, they will fail the signature except if they were part of the original request + for header, value in request.headers.items(): + header_low = header.lower() + if header_low.startswith("x-amz-") or header_low in ["content-type", "date", "content-md5"]: + new_headers[header_low] = value + + # rebuild the query string + new_query_string = percent_encode_sequence(new_query_string_dict) + + if bucket_name := uses_host_addressing(request.headers): + # if the request is host addressed, we need to remove the bucket from the host and set it in the path + path = f"/{bucket_name}{request.path}" + host = request.host.removeprefix(f"{bucket_name}.") + else: + path = request.path + host = request.host + + # we need to URL encode the path, as the key needs to be urlencoded for the signature to match + encoded_path = urlparse.quote(path) + + reversed_url = f"{request.scheme}://{host}{encoded_path}?{new_query_string}" + + reversed_headers = HTTPHeaders() + for key, value in new_headers.items(): + reversed_headers[key] = value + + return urlsplit(reversed_url), reversed_headers + + +def validate_presigned_url_s3v4(context: RequestContext) -> None: + """ + Validate the presigned URL signed with SigV4. + :param context: RequestContext + :return: + """ + + sigv4_context, exception = _find_valid_signature_through_ports(context) + add_headers_to_original_request(context, sigv4_context.headers_in_qs) + + if sigv4_context.missing_signed_headers: + if config.S3_SKIP_SIGNATURE_VALIDATION: + LOG.warning( + "There were headers present in the request which were not signed (%s), " + "but not raising an error, as S3_SKIP_SIGNATURE_VALIDATION=1", + ", ".join(sigv4_context.missing_signed_headers), + ) + else: + raise AccessDenied( + "There were headers present in the request which were not signed", + HostId=FAKE_HOST_ID, + HeadersNotSigned=", ".join(sigv4_context.missing_signed_headers), + ) + + if exception: + if config.S3_SKIP_SIGNATURE_VALIDATION: + LOG.warning( + "Signatures do not match, but not raising an error, as S3_SKIP_SIGNATURE_VALIDATION=1" + ) + else: + ex: SignatureDoesNotMatch = create_signature_does_not_match_sig_v4( + exception, sigv4_context.credentials.access_key_id + ) + raise ex + + # Checking whether the url is expired or not + query_parameters = context.request.args + # TODO: should maybe try/except here -> create auth params validation before checking signature, above!! + x_amz_date = datetime.datetime.strptime(query_parameters["X-Amz-Date"], "%Y%m%dT%H%M%SZ") + x_amz_expires = int(query_parameters["X-Amz-Expires"]) + x_amz_expires_dt = datetime.timedelta(seconds=x_amz_expires) + expiration_time = x_amz_date + x_amz_expires_dt + expiration_time = expiration_time.replace(tzinfo=datetime.timezone.utc) + + if is_expired(expiration_time): + if config.S3_SKIP_SIGNATURE_VALIDATION: + LOG.warning( + "Signature is expired, but not raising an error, as S3_SKIP_SIGNATURE_VALIDATION=1" + ) + else: + raise AccessDenied( + "Request has expired", + HostId=FAKE_HOST_ID, + Expires=expiration_time.timestamp(), + ServerTime=time.time(), + X_Amz_Expires=x_amz_expires, + ) + + +def _find_valid_signature_through_ports(context: RequestContext) -> FindSigV4Result: + """ + Tries to validate the signature of the received request. If it fails, it will iterate through known LocalStack + ports to try to find a match (the host is used for the calculation). + If it fails to find a valid match, it will return NotValidSigV4Signature context data + :param context: + :return: FindSigV4Result: contains a tuple with the signature if found, or NotValidSigV4Signature context + """ + request_sig = context.request.args["X-Amz-Signature"] + + sigv4_context = S3SigV4SignatureContext(context=context) + # get the port of the request + match = re.match(HOST_COMBINATION_REGEX, sigv4_context.host) + request_port = match.group(2) if match else None + + # get the signature from the request + signature, canonical_request, string_to_sign = sigv4_context.get_signature_data() + if signature == request_sig: + return sigv4_context, None + + # if the signature does not match, save the data for the exception + exception_context = NotValidSigV4SignatureContext( + signature_provided=request_sig, + string_to_sign=string_to_sign, + canonical_request=canonical_request, + ) + + # we try to iterate through possible ports, to match the signature + for port in PORT_REPLACEMENT: + if request_port: + # the request has already been tested before the loop, skip + if request_port == port: + continue + sigv4_context.update_host_port(new_host_port=port, original_host_port=request_port) + + else: + sigv4_context.update_host_port(new_host_port=port) + + # we ignore the additional data because we want the exception raised to match the original request + signature, _, _ = sigv4_context.get_signature_data() + if signature == request_sig: + return sigv4_context, None + + # Return the exception data from the original request after trying to loop through ports + return sigv4_context, exception_context + + +class S3SigV4SignatureContext: + def __init__(self, context: RequestContext): + self.request = context.request + self._query_parameters = context.request.args + self._headers = context.request.headers + self._bucket, _ = extract_bucket_name_and_key_from_headers_and_path( + context.request.headers, get_raw_path(context.request) + ) + self._bucket = urlparse.unquote(self._bucket) + self._request_method = context.request.method + self.missing_signed_headers = [] + + region = self._get_region_from_x_amz_credential(self._query_parameters["X-Amz-Credential"]) + credentials = get_credentials_from_parameters(self._query_parameters, region) + signing_credentials = ReadOnlyCredentials( + credentials.access_key_id, + credentials.secret_access_key, + credentials.security_token, + ) + self.credentials = credentials + expires = int(self._query_parameters["X-Amz-Expires"]) + self.signature_date = self._query_parameters["X-Amz-Date"] + + self.signer = S3SigV4QueryAuthValidation(signing_credentials, "s3", region, expires=expires) + sig_headers, qs, headers_in_qs = self._get_signed_headers_and_filtered_query_string() + self.signed_headers = sig_headers + self.request_query_string = qs + self.headers_in_qs = headers_in_qs | sig_headers + self.headers_in_qs["Authorization"] = self._get_authorization_header_from_qs( + self._query_parameters + ) + + if forwarded_from_virtual_host_addressed_request(self._headers): + # FIXME: maybe move this so it happens earlier in the chain when using virtual host? + if not is_bucket_name_valid(self._bucket): + raise InvalidBucketName(BucketName=self._bucket) + netloc = self._headers.get(S3_VIRTUAL_HOST_FORWARDED_HEADER) + self.host = netloc + self._original_host = netloc + self.signed_headers["host"] = netloc + # the request comes from the Virtual Host router, we need to remove the bucket from the path + splitted_path = self.request.path.split("/", maxsplit=2) + self.path = f"/{splitted_path[-1]}" + + else: + netloc = urlparse.urlparse(self.request.url).netloc + self.host = netloc + self._original_host = netloc + if (host_addressed := uses_host_addressing(self._headers)) and not is_bucket_name_valid( + self._bucket + ): + raise InvalidBucketName(BucketName=self._bucket) + + if not host_addressed and not self.request.path.startswith(f"/{self._bucket}"): + # if in path style, check that the path starts with the bucket + # our path has been sanitized, we should use the un-sanitized one + splitted_path = self.request.path.split("/", maxsplit=2) + self.path = f"/{self._bucket}/{splitted_path[-1]}" + else: + self.path = self.request.path + + # we need to URL encode the path, as the key needs to be urlencoded for the signature to match + self.path = urlparse.quote(self.path) + self.aws_request = self._get_aws_request() + + def update_host_port(self, new_host_port: str, original_host_port: str = None): + """ + Update the host port of the context with the provided one, format `:{port}` + :param new_host_port: + :param original_host_port: + :return: + """ + if original_host_port: + updated_netloc = self._original_host.replace(original_host_port, new_host_port) + else: + updated_netloc = f"{self._original_host}{new_host_port}" + self.host = updated_netloc + self.signed_headers["host"] = updated_netloc + self.aws_request = self._get_aws_request() + + @property + def request_url(self) -> str: + return f"{self.request.scheme}://{self.host}{self.path}?{self.request_query_string}" + + def get_signature_data(self) -> tuple[bytes, str, str]: + """ + Uses the signer to return the signature and the data used to calculate it + :return: signature, canonical_request and string_to_sign + """ + return self.signer.add_auth(self.aws_request) + + def _get_signed_headers_and_filtered_query_string( + self, + ) -> tuple[dict[str, str], str, dict[str, str]]: + """ + Transforms the original headers and query parameters to the headers and query string used to sign the + original request. + Allows us to recreate the original request, and also retrieve query string parameters that should be headers + :raises AccessDenied if the request contains headers that were not in X-Amz-SignedHeaders and started with x-amz + :return: the headers used to sign the request and the query string without X-Amz-Signature, and the query string + parameters which should be put back in the headers + """ + headers = copy.copy(self._headers) + # set automatically by the handler chain, we don't want that + headers.pop("Authorization", None) + signed_headers = self._query_parameters.get("X-Amz-SignedHeaders") + + new_query_args = {} + query_args_to_headers = {} + for qs_parameter, qs_value in self._query_parameters.items(): + # skip the signature + if qs_parameter == "X-Amz-Signature": + continue + + qs_param_low = qs_parameter.lower() + if ( + qs_parameter not in SIGNATURE_V4_PARAMS + and ( + qs_param_low.startswith("x-amz-") + or qs_param_low in SIGNATURE_V4_BOTO_IGNORED_PARAMS + ) + and qs_param_low not in headers + ): + if qs_param_low in signed_headers: + # AWS JS SDK does not behave like boto, and will add some parameters as query string when signing + # when boto would not. this difference in behaviour would lead to pre-signed URLs generated by the + # JS SDK to be invalid for the boto signer. + # This fixes the behaviour by manually adding the parameter to the headers like boto would, if the + # SDK put them in the SignedHeaders + # this is especially valid for headers starting with x-amz-server-side-encryption, treated + # specially in the old JS SDK v2 + headers.add(qs_param_low, qs_value) + else: + # The JS SDK is adding the `x-amz-checksum-crc32` header to query parameters, even though it cannot + # know in advance the actual checksum. Those are ignored by AWS, if they're not put in the + # SignedHeaders + if not qs_param_low.startswith("x-amz-checksum-"): + query_args_to_headers[qs_param_low] = qs_value + + new_query_args[qs_parameter] = qs_value + + signature_headers = {} + for header, value in headers.items(): + header_low = header.lower() + if header_low.startswith("x-amz-") and header_low not in signed_headers.lower(): + if header_low in IGNORED_SIGV4_HEADERS: + continue + self.missing_signed_headers.append(header_low) + if header_low in signed_headers: + signature_headers[header_low] = value + + new_query_string = percent_encode_sequence(new_query_args) + return signature_headers, new_query_string, query_args_to_headers + + def _get_aws_request(self) -> AWSRequest: + """ + Creates and returns the AWSRequest needed for S3SigV4QueryAuth signer + :return: AWSRequest + """ + request_dict = { + "method": self._request_method, + "url": self.request_url, + "body": b"", + "headers": self.signed_headers, + "context": { + "is_presign_request": True, + "use_global_endpoint": True, + "signing": {"bucket": self._bucket}, + "timestamp": self.signature_date, + }, + } + return create_request_object(request_dict) + + @staticmethod + def _get_region_from_x_amz_credential(credential: str) -> str: + if not (split_creds := credential.split("/")) or len(split_creds) != 5: + raise AuthorizationQueryParametersError( + 'Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting "/YYYYMMDD/REGION/SERVICE/aws4_request".', + HostId=FAKE_HOST_ID, + ) + + return split_creds[2] + + @staticmethod + def _get_authorization_header_from_qs(parameters: dict) -> str: + # See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + # Recreating the Authorization header from the query string parameters of a pre-signed request + authorization_keys = ["X-Amz-Credential", "X-Amz-SignedHeaders", "X-Amz-Signature"] + values = [ + f"{param.removeprefix('X-Amz-')}={parameters[param]}" for param in authorization_keys + ] + + authorization = f"{parameters['X-Amz-Algorithm']}{','.join(values)}" + return authorization + + +def add_headers_to_original_request(context: RequestContext, headers: Mapping[str, str]): + for header, value in headers.items(): + context.request.headers.add(header, value) + + +def _validate_headers_for_moto(headers: Headers) -> None: + """ + The headers can contain values that do not have the right type, and it will throw Exception when passed to Moto + Validate them before it get passed + :param headers: request headers + """ + if headers.get("x-amz-content-sha256", None) == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD": + # this is sign that this is a SigV4 request, with payload encoded + # we do not support payload encoding yet + # moto parses it to an int, it would raise a 500 + content_length = headers.get("x-amz-decoded-content-length") + if not content_length: + raise SignatureDoesNotMatch('"X-Amz-Decoded-Content-Length" header is missing') + try: + int(content_length) + except ValueError: + raise SignatureDoesNotMatch('Wrong "X-Amz-Decoded-Content-Length" header') + + +def validate_post_policy( + request_form: ImmutableMultiDict, additional_policy_metadata: dict +) -> None: + """ + Validate the pre-signed POST with its policy contained + For now, only validates its expiration + SigV2: https://docs.aws.amazon.com/AmazonS3/latest/userguide/HTTPPOSTExamples.html + SigV4: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-authentication-HTTPPOST.html + + :param request_form: the form data contained in the pre-signed POST request + :param additional_policy_metadata: additional metadata needed to validate the policy (bucket name, object size) + :raises AccessDenied, SignatureDoesNotMatch + :return: None + """ + if not request_form.get("key"): + ex: InvalidArgument = _create_invalid_argument_exc( + message="Bucket POST must contain a field named 'key'. If it is specified, please check the order of the fields.", + name="key", + value="", + host_id=FAKE_HOST_ID, + ) + raise ex + + form_dict = {k.lower(): v for k, v in request_form.items()} + + policy = form_dict.get("policy") + if not policy: + # A POST request needs a policy except if the bucket is publicly writable + return + + # TODO: this does validation of fields only for now + is_v4 = _is_match_with_signature_fields(form_dict, SIGNATURE_V4_POST_FIELDS) + is_v2 = _is_match_with_signature_fields(form_dict, SIGNATURE_V2_POST_FIELDS) + + if not is_v2 and not is_v4: + ex: AccessDenied = AccessDenied("Access Denied") + ex.HostId = FAKE_HOST_ID + raise ex + + try: + policy_decoded = json.loads(base64.b64decode(policy).decode("utf-8")) + except ValueError: + # this means the policy has been tampered with + signature = form_dict.get("signature") if is_v2 else form_dict.get("x-amz-signature") + credentials = get_credentials_from_parameters(request_form, "us-east-1") + ex: SignatureDoesNotMatch = create_signature_does_not_match_sig_v2( + request_signature=signature, + string_to_sign=policy, + access_key_id=credentials.access_key_id, + ) + raise ex + + if expiration := policy_decoded.get("expiration"): + if is_expired(_parse_policy_expiration_date(expiration)): + ex: AccessDenied = AccessDenied("Invalid according to Policy: Policy expired.") + ex.HostId = FAKE_HOST_ID + raise ex + + # TODO: validate the signature + + # See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html + # for the list of conditions and what matching they support + # TODO: + # 1. only support the kind of matching the field supports: `success_action_status` does not support `starts-with` + # matching + # 2. if there are fields that are not defined in the policy, we should reject it + + # Special case for LEGACY_V2: do not validate the conditions. Remove this check once we remove legacy_v2 + if not additional_policy_metadata: + return + + conditions = policy_decoded.get("conditions", []) + for condition in conditions: + if not _verify_condition(condition, form_dict, additional_policy_metadata): + str_condition = str(condition).replace("'", '"') + raise AccessDenied( + f"Invalid according to Policy: Policy Condition failed: {str_condition}", + HostId=FAKE_HOST_ID, + ) + + +def _verify_condition(condition: list | dict, form: dict, additional_policy_metadata: dict) -> bool: + if isinstance(condition, dict) and len(condition) > 1: + raise CommonServiceException( + code="InvalidPolicyDocument", + message="Invalid Policy: Invalid Simple-Condition: Simple-Conditions must have exactly one property specified.", + ) + + match condition: + case {**kwargs}: + # this is the most performant to check for a dict with only one key + # alternative version is `key, val = next(iter(dict))` + for key, val in kwargs.items(): + k = key.lower() + if k == "bucket": + return additional_policy_metadata.get("bucket") == val + else: + return form.get(k) == val + + case ["eq", key, value]: + k = key.lower() + if k == "$bucket": + return additional_policy_metadata.get("bucket") == value + + return k.startswith("$") and form.get(k.lstrip("$")) == value + + case ["starts-with", key, value]: + # You can set the `starts-with` value to an empty string to accept anything + return key.startswith("$") and ( + not value or form.get(key.lstrip("$").lower(), "").startswith(value) + ) + + case ["content-length-range", start, end]: + size = additional_policy_metadata.get("content_length", 0) + try: + start, end = int(start), int(end) + except ValueError: + return False + + if size < start: + raise EntityTooSmall( + "Your proposed upload is smaller than the minimum allowed size", + ProposedSize=size, + MinSizeAllowed=start, + ) + elif size > end: + raise EntityTooLarge( + "Your proposed upload exceeds the maximum allowed size", + ProposedSize=size, + MaxSizeAllowed=end, + HostId=FAKE_HOST_ID, + ) + else: + return True + + +def _parse_policy_expiration_date(expiration_string: str) -> datetime.datetime: + """ + Parses the Policy Expiration datetime string + :param expiration_string: a policy expiration string, can be of 2 format: `2007-12-01T12:00:00.000Z` or + `2007-12-01T12:00:00Z` + :return: a datetime object representing the expiration datetime + """ + try: + dt = datetime.datetime.strptime(expiration_string, POLICY_EXPIRATION_FORMAT1) + except Exception: + dt = datetime.datetime.strptime(expiration_string, POLICY_EXPIRATION_FORMAT2) + + # both date formats assume a UTC timezone ('Z' suffix), but it's not parsed as tzinfo into the datetime object + dt = dt.replace(tzinfo=datetime.timezone.utc) + return dt + + +def _is_match_with_signature_fields( + request_form: dict[str, str], signature_fields: list[str] +) -> bool: + """ + Checks if the form contains at least one of the required fields passed in `signature_fields` + If it contains at least one field, validates it contains all of them or raises InvalidArgument + :param request_form: ImmutableMultiDict: the pre-signed POST request form + :param signature_fields: the field we want to validate against + :raises InvalidArgument + :return: False if none of the fields are present, or True if it does + """ + if any(p in request_form for p in signature_fields): + for p in signature_fields: + if p not in request_form: + LOG.info("POST pre-sign missing fields") + argument_name = ( + capitalize_header_name_from_snake_case(p) if "-" in p else p.capitalize() + ) + # AWSAccessKeyId is a special case + if argument_name == "Awsaccesskeyid": + argument_name = "AWSAccessKeyId" + + ex: InvalidArgument = _create_invalid_argument_exc( + message=f"Bucket POST must contain a field named '{argument_name}'. If it is specified, please check the order of the fields.", + name=argument_name, + value="", + host_id=FAKE_HOST_ID, + ) + raise ex + + return True + return False diff --git a/localstack-core/localstack/services/s3/provider.py b/localstack-core/localstack/services/s3/provider.py new file mode 100644 index 0000000000000..6bab36e9457ba --- /dev/null +++ b/localstack-core/localstack/services/s3/provider.py @@ -0,0 +1,4670 @@ +import base64 +import copy +import datetime +import json +import logging +import re +from collections import defaultdict +from inspect import signature +from io import BytesIO +from operator import itemgetter +from typing import IO, Optional, Union +from urllib import parse as urlparse +from zoneinfo import ZoneInfo + +from localstack import config +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.s3 import ( + MFA, + AbortMultipartUploadOutput, + AccelerateConfiguration, + AccessControlPolicy, + AccessDenied, + AccountId, + AnalyticsConfiguration, + AnalyticsId, + BadDigest, + Body, + Bucket, + BucketAlreadyExists, + BucketAlreadyOwnedByYou, + BucketCannedACL, + BucketLifecycleConfiguration, + BucketLoggingStatus, + BucketName, + BucketNotEmpty, + BucketRegion, + BucketVersioningStatus, + BypassGovernanceRetention, + ChecksumAlgorithm, + ChecksumCRC32, + ChecksumCRC32C, + ChecksumCRC64NVME, + ChecksumSHA1, + ChecksumSHA256, + ChecksumType, + CommonPrefix, + CompletedMultipartUpload, + CompleteMultipartUploadOutput, + ConditionalRequestConflict, + ConfirmRemoveSelfBucketAccess, + ContentMD5, + CopyObjectOutput, + CopyObjectRequest, + CopyObjectResult, + CopyPartResult, + CORSConfiguration, + CreateBucketOutput, + CreateBucketRequest, + CreateMultipartUploadOutput, + CreateMultipartUploadRequest, + CrossLocationLoggingProhibitted, + Delete, + DeletedObject, + DeleteMarkerEntry, + DeleteObjectOutput, + DeleteObjectsOutput, + DeleteObjectTaggingOutput, + Delimiter, + EncodingType, + Error, + Expiration, + FetchOwner, + GetBucketAccelerateConfigurationOutput, + GetBucketAclOutput, + GetBucketAnalyticsConfigurationOutput, + GetBucketCorsOutput, + GetBucketEncryptionOutput, + GetBucketIntelligentTieringConfigurationOutput, + GetBucketInventoryConfigurationOutput, + GetBucketLifecycleConfigurationOutput, + GetBucketLocationOutput, + GetBucketLoggingOutput, + GetBucketOwnershipControlsOutput, + GetBucketPolicyOutput, + GetBucketPolicyStatusOutput, + GetBucketReplicationOutput, + GetBucketRequestPaymentOutput, + GetBucketTaggingOutput, + GetBucketVersioningOutput, + GetBucketWebsiteOutput, + GetObjectAclOutput, + GetObjectAttributesOutput, + GetObjectAttributesParts, + GetObjectAttributesRequest, + GetObjectLegalHoldOutput, + GetObjectLockConfigurationOutput, + GetObjectOutput, + GetObjectRequest, + GetObjectRetentionOutput, + GetObjectTaggingOutput, + GetObjectTorrentOutput, + GetPublicAccessBlockOutput, + HeadBucketOutput, + HeadObjectOutput, + HeadObjectRequest, + IfMatch, + IfMatchInitiatedTime, + IfMatchLastModifiedTime, + IfMatchSize, + IfNoneMatch, + IntelligentTieringConfiguration, + IntelligentTieringId, + InvalidArgument, + InvalidBucketName, + InvalidDigest, + InvalidLocationConstraint, + InvalidObjectState, + InvalidPartNumber, + InvalidPartOrder, + InvalidStorageClass, + InvalidTargetBucketForLogging, + InventoryConfiguration, + InventoryId, + KeyMarker, + LifecycleRules, + ListBucketAnalyticsConfigurationsOutput, + ListBucketIntelligentTieringConfigurationsOutput, + ListBucketInventoryConfigurationsOutput, + ListBucketsOutput, + ListMultipartUploadsOutput, + ListObjectsOutput, + ListObjectsV2Output, + ListObjectVersionsOutput, + ListPartsOutput, + Marker, + MaxBuckets, + MaxKeys, + MaxParts, + MaxUploads, + MethodNotAllowed, + MissingSecurityHeader, + MpuObjectSize, + MultipartUpload, + MultipartUploadId, + NoSuchBucket, + NoSuchBucketPolicy, + NoSuchCORSConfiguration, + NoSuchKey, + NoSuchLifecycleConfiguration, + NoSuchPublicAccessBlockConfiguration, + NoSuchTagSet, + NoSuchUpload, + NoSuchWebsiteConfiguration, + NotificationConfiguration, + Object, + ObjectIdentifier, + ObjectKey, + ObjectLockConfiguration, + ObjectLockConfigurationNotFoundError, + ObjectLockEnabled, + ObjectLockLegalHold, + ObjectLockMode, + ObjectLockRetention, + ObjectLockToken, + ObjectOwnership, + ObjectVersion, + ObjectVersionId, + ObjectVersionStorageClass, + OptionalObjectAttributesList, + Owner, + OwnershipControls, + OwnershipControlsNotFoundError, + Part, + PartNumber, + PartNumberMarker, + Policy, + PostResponse, + PreconditionFailed, + Prefix, + PublicAccessBlockConfiguration, + PutBucketAclRequest, + PutBucketLifecycleConfigurationOutput, + PutObjectAclOutput, + PutObjectAclRequest, + PutObjectLegalHoldOutput, + PutObjectLockConfigurationOutput, + PutObjectOutput, + PutObjectRequest, + PutObjectRetentionOutput, + PutObjectTaggingOutput, + ReplicationConfiguration, + ReplicationConfigurationNotFoundError, + RequestPayer, + RequestPaymentConfiguration, + RestoreObjectOutput, + RestoreRequest, + S3Api, + ServerSideEncryption, + ServerSideEncryptionConfiguration, + SkipValidation, + SSECustomerAlgorithm, + SSECustomerKey, + SSECustomerKeyMD5, + StartAfter, + StorageClass, + Tagging, + Token, + TransitionDefaultMinimumObjectSize, + UploadIdMarker, + UploadPartCopyOutput, + UploadPartCopyRequest, + UploadPartOutput, + UploadPartRequest, + VersionIdMarker, + VersioningConfiguration, + WebsiteConfiguration, +) +from localstack.aws.api.s3 import NotImplemented as NotImplementedException +from localstack.aws.handlers import ( + modify_service_response, + preprocess_request, + serve_custom_service_request_handlers, +) +from localstack.constants import AWS_REGION_US_EAST_1 +from localstack.services.edge import ROUTER +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.s3.codec import AwsChunkedDecoder +from localstack.services.s3.constants import ( + ALLOWED_HEADER_OVERRIDES, + ARCHIVES_STORAGE_CLASSES, + CHECKSUM_ALGORITHMS, + DEFAULT_BUCKET_ENCRYPTION, +) +from localstack.services.s3.cors import S3CorsHandler, s3_cors_request_handler +from localstack.services.s3.exceptions import ( + InvalidBucketOwnerAWSAccountID, + InvalidBucketState, + InvalidRequest, + MalformedPolicy, + MalformedXML, + NoSuchConfiguration, + NoSuchObjectLockConfiguration, + UnexpectedContent, +) +from localstack.services.s3.models import ( + BucketCorsIndex, + EncryptionParameters, + ObjectLockParameters, + S3Bucket, + S3DeleteMarker, + S3Multipart, + S3Object, + S3Part, + S3Store, + VersionedKeyStore, + s3_stores, +) +from localstack.services.s3.notifications import NotificationDispatcher, S3EventNotificationContext +from localstack.services.s3.presigned_url import validate_post_policy +from localstack.services.s3.storage.core import LimitedIterableStream, S3ObjectStore +from localstack.services.s3.storage.ephemeral import EphemeralS3ObjectStore +from localstack.services.s3.utils import ( + ObjectRange, + add_expiration_days_to_datetime, + base_64_content_md5_to_etag, + create_redirect_for_post_request, + create_s3_kms_managed_key_for_region, + etag_to_base_64_content_md5, + extract_bucket_key_version_id_from_copy_source, + generate_safe_version_id, + get_canned_acl, + get_class_attrs_from_spec_class, + get_failed_precondition_copy_source, + get_full_default_bucket_location, + get_kms_key_arn, + get_lifecycle_rule_from_object, + get_owner_for_account_id, + get_permission_from_header, + get_retention_from_now, + get_s3_checksum_algorithm_from_request, + get_s3_checksum_algorithm_from_trailing_headers, + get_system_metadata_from_request, + get_unique_key_id, + is_bucket_name_valid, + is_version_older_than_other, + parse_copy_source_range_header, + parse_post_object_tagging_xml, + parse_range_header, + parse_tagging_header, + s3_response_handler, + serialize_expiration_header, + str_to_rfc_1123_datetime, + validate_dict_fields, + validate_failed_precondition, + validate_kms_key_id, + validate_tag_set, +) +from localstack.services.s3.validation import ( + parse_grants_in_headers, + validate_acl_acp, + validate_bucket_analytics_configuration, + validate_bucket_intelligent_tiering_configuration, + validate_canned_acl, + validate_checksum_value, + validate_cors_configuration, + validate_inventory_configuration, + validate_lifecycle_configuration, + validate_object_key, + validate_sse_c, + validate_website_configuration, +) +from localstack.services.s3.website_hosting import register_website_hosting_routes +from localstack.state import AssetDirectory, StateVisitor +from localstack.utils.aws.arns import s3_bucket_name +from localstack.utils.strings import short_uid, to_bytes, to_str + +LOG = logging.getLogger(__name__) + +STORAGE_CLASSES = get_class_attrs_from_spec_class(StorageClass) +SSE_ALGORITHMS = get_class_attrs_from_spec_class(ServerSideEncryption) +OBJECT_OWNERSHIPS = get_class_attrs_from_spec_class(ObjectOwnership) + +DEFAULT_S3_TMP_DIR = "/tmp/localstack-s3-storage" + + +class S3Provider(S3Api, ServiceLifecycleHook): + def __init__(self, storage_backend: S3ObjectStore = None) -> None: + super().__init__() + self._storage_backend = storage_backend or EphemeralS3ObjectStore(DEFAULT_S3_TMP_DIR) + self._notification_dispatcher = NotificationDispatcher() + self._cors_handler = S3CorsHandler(BucketCorsIndex()) + + # runtime cache of Lifecycle Expiration headers, as they need to be calculated everytime we fetch an object + # in case the rules have changed + self._expiration_cache: dict[BucketName, dict[ObjectKey, Expiration]] = defaultdict(dict) + + def on_after_init(self): + preprocess_request.append(self._cors_handler) + serve_custom_service_request_handlers.append(s3_cors_request_handler) + modify_service_response.append(self.service, s3_response_handler) + register_website_hosting_routes(router=ROUTER) + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(s3_stores) + visitor.visit(AssetDirectory(self.service, self._storage_backend.root_directory)) + + def on_before_state_save(self): + self._storage_backend.flush() + + def on_after_state_reset(self): + self._cors_handler.invalidate_cache() + + def on_after_state_load(self): + self._cors_handler.invalidate_cache() + + def on_before_stop(self): + self._notification_dispatcher.shutdown() + self._storage_backend.close() + + def _notify( + self, + context: RequestContext, + s3_bucket: S3Bucket, + s3_object: S3Object | S3DeleteMarker = None, + s3_notif_ctx: S3EventNotificationContext = None, + ): + """ + :param context: the RequestContext, to retrieve more information about the incoming notification + :param s3_bucket: the S3Bucket object + :param s3_object: the S3Object object if S3EventNotificationContext is not given + :param s3_notif_ctx: S3EventNotificationContext, in case we need specific data only available in the API call + :return: + """ + if s3_bucket.notification_configuration: + if not s3_notif_ctx: + s3_notif_ctx = S3EventNotificationContext.from_request_context_native( + context, + s3_bucket=s3_bucket, + s3_object=s3_object, + ) + + self._notification_dispatcher.send_notifications( + s3_notif_ctx, s3_bucket.notification_configuration + ) + + def _verify_notification_configuration( + self, + notification_configuration: NotificationConfiguration, + skip_destination_validation: SkipValidation, + context: RequestContext, + bucket_name: str, + ): + self._notification_dispatcher.verify_configuration( + notification_configuration, skip_destination_validation, context, bucket_name + ) + + def _get_expiration_header( + self, + lifecycle_rules: LifecycleRules, + bucket: BucketName, + s3_object: S3Object, + object_tags: dict[str, str], + ) -> Expiration: + """ + This method will check if the key matches a Lifecycle filter, and return the serializer header if that's + the case. We're caching it because it can change depending on the set rules on the bucket. + We can't use `lru_cache` as the parameters needs to be hashable + :param lifecycle_rules: the bucket LifecycleRules + :param s3_object: S3Object + :param object_tags: the object tags + :return: the Expiration header if there's a rule matching + """ + if cached_exp := self._expiration_cache.get(bucket, {}).get(s3_object.key): + return cached_exp + + if lifecycle_rule := get_lifecycle_rule_from_object( + lifecycle_rules, s3_object.key, s3_object.size, object_tags + ): + expiration_header = serialize_expiration_header( + lifecycle_rule["ID"], + lifecycle_rule["Expiration"], + s3_object.last_modified, + ) + self._expiration_cache[bucket][s3_object.key] = expiration_header + return expiration_header + + def _get_cross_account_bucket( + self, + context: RequestContext, + bucket_name: BucketName, + *, + expected_bucket_owner: AccountId = None, + ) -> tuple[S3Store, S3Bucket]: + if expected_bucket_owner and not re.fullmatch(r"\w{12}", expected_bucket_owner): + raise InvalidBucketOwnerAWSAccountID( + f"The value of the expected bucket owner parameter must be an AWS Account ID... [{expected_bucket_owner}]", + ) + + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket_name)): + if not (account_id := store.global_bucket_map.get(bucket_name)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket_name) + + store = self.get_store(account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket_name)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=bucket_name) + + if expected_bucket_owner and s3_bucket.bucket_account_id != expected_bucket_owner: + raise AccessDenied("Access Denied") + + return store, s3_bucket + + @staticmethod + def get_store(account_id: str, region_name: str) -> S3Store: + # Use default account id for external access? would need an anonymous one + return s3_stores[account_id][region_name] + + @handler("CreateBucket", expand=False) + def create_bucket( + self, + context: RequestContext, + request: CreateBucketRequest, + ) -> CreateBucketOutput: + bucket_name = request["Bucket"] + + if not is_bucket_name_valid(bucket_name): + raise InvalidBucketName("The specified bucket is not valid.", BucketName=bucket_name) + + # the XML parser returns an empty dict if the body contains the following: + # + # but it also returns an empty dict if the body is fully empty. We need to differentiate the 2 cases by checking + # if the body is empty or not + if context.request.data and ( + (create_bucket_configuration := request.get("CreateBucketConfiguration")) is not None + ): + if not (bucket_region := create_bucket_configuration.get("LocationConstraint")): + raise MalformedXML() + + if context.region == AWS_REGION_US_EAST_1: + if bucket_region == "us-east-1": + raise InvalidLocationConstraint( + "The specified location-constraint is not valid", + LocationConstraint=bucket_region, + ) + elif context.region != bucket_region: + raise CommonServiceException( + code="IllegalLocationConstraintException", + message=f"The {bucket_region} location constraint is incompatible for the region specific endpoint this request was sent to.", + ) + else: + bucket_region = AWS_REGION_US_EAST_1 + if context.region != bucket_region: + raise CommonServiceException( + code="IllegalLocationConstraintException", + message="The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.", + ) + + store = self.get_store(context.account_id, bucket_region) + + if bucket_name in store.global_bucket_map: + existing_bucket_owner = store.global_bucket_map[bucket_name] + if existing_bucket_owner != context.account_id: + raise BucketAlreadyExists() + + # if the existing bucket has the same owner, the behaviour will depend on the region + if bucket_region != "us-east-1": + raise BucketAlreadyOwnedByYou( + "Your previous request to create the named bucket succeeded and you already own it.", + BucketName=bucket_name, + ) + else: + # CreateBucket is idempotent in us-east-1 + return CreateBucketOutput(Location=f"/{bucket_name}") + + if ( + object_ownership := request.get("ObjectOwnership") + ) is not None and object_ownership not in OBJECT_OWNERSHIPS: + raise InvalidArgument( + f"Invalid x-amz-object-ownership header: {object_ownership}", + ArgumentName="x-amz-object-ownership", + ) + # see https://docs.aws.amazon.com/AmazonS3/latest/API/API_Owner.html + owner = get_owner_for_account_id(context.account_id) + acl = get_access_control_policy_for_new_resource_request(request, owner=owner) + s3_bucket = S3Bucket( + name=bucket_name, + account_id=context.account_id, + bucket_region=bucket_region, + owner=owner, + acl=acl, + object_ownership=request.get("ObjectOwnership"), + object_lock_enabled_for_bucket=request.get("ObjectLockEnabledForBucket"), + ) + + store.buckets[bucket_name] = s3_bucket + store.global_bucket_map[bucket_name] = s3_bucket.bucket_account_id + self._cors_handler.invalidate_cache() + self._storage_backend.create_bucket(bucket_name) + + # Location is always contained in response -> full url for LocationConstraint outside us-east-1 + location = ( + f"/{bucket_name}" + if bucket_region == "us-east-1" + else get_full_default_bucket_location(bucket_name) + ) + response = CreateBucketOutput(Location=location) + return response + + def delete_bucket( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + # the bucket still contains objects + if not s3_bucket.objects.is_empty(): + message = "The bucket you tried to delete is not empty" + if s3_bucket.versioning_status: + message += ". You must delete all versions in the bucket." + raise BucketNotEmpty( + message, + BucketName=bucket, + ) + + store.buckets.pop(bucket) + store.global_bucket_map.pop(bucket) + self._cors_handler.invalidate_cache() + self._expiration_cache.pop(bucket, None) + # clean up the storage backend + self._storage_backend.delete_bucket(bucket) + + def list_buckets( + self, + context: RequestContext, + max_buckets: MaxBuckets = None, + continuation_token: Token = None, + prefix: Prefix = None, + bucket_region: BucketRegion = None, + **kwargs, + ) -> ListBucketsOutput: + owner = get_owner_for_account_id(context.account_id) + store = self.get_store(context.account_id, context.region) + + decoded_continuation_token = ( + to_str(base64.urlsafe_b64decode(continuation_token.encode())) + if continuation_token + else None + ) + + count = 0 + buckets: list[Bucket] = [] + next_continuation_token = None + + # Comparing strings with case sensitivity since AWS is case-sensitive + for bucket in sorted(store.buckets.values(), key=lambda r: r.name): + if continuation_token and bucket.name < decoded_continuation_token: + continue + + if prefix and not bucket.name.startswith(prefix): + continue + + if bucket_region and not bucket.bucket_region == bucket_region: + continue + + if max_buckets and count >= max_buckets: + next_continuation_token = to_str(base64.urlsafe_b64encode(bucket.name.encode())) + break + + output_bucket = Bucket( + Name=bucket.name, + CreationDate=bucket.creation_date, + BucketRegion=bucket.bucket_region, + ) + buckets.append(output_bucket) + count += 1 + + return ListBucketsOutput( + Owner=owner, Buckets=buckets, Prefix=prefix, ContinuationToken=next_continuation_token + ) + + def head_bucket( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> HeadBucketOutput: + store = self.get_store(context.account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + if not (account_id := store.global_bucket_map.get(bucket)): + # just to return the 404 error message + raise NoSuchBucket() + + store = self.get_store(account_id, context.region) + if not (s3_bucket := store.buckets.get(bucket)): + # just to return the 404 error message + raise NoSuchBucket() + + # TODO: this call is also used to check if the user has access/authorization for the bucket + # it can return 403 + return HeadBucketOutput(BucketRegion=s3_bucket.bucket_region) + + def get_bucket_location( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketLocationOutput: + """ + When implementing the ASF provider, this operation is implemented because: + - The spec defines a root element GetBucketLocationOutput containing a LocationConstraint member, where + S3 actually just returns the LocationConstraint on the root level (only operation so far that we know of). + - We circumvent the root level element here by patching the spec such that this operation returns a + single "payload" (the XML body response), which causes the serializer to directly take the payload element. + - The above "hack" causes the fix in the serializer to not be picked up here as we're passing the XML body as + the payload, which is why we need to manually do this here by manipulating the string. + Botocore implements this hack for parsing the response in `botocore.handlers.py#parse_get_bucket_location` + """ + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + location_constraint = ( + '\n' + '{{location}}' + ) + + location = s3_bucket.bucket_region if s3_bucket.bucket_region != "us-east-1" else "" + location_constraint = location_constraint.replace("{{location}}", location) + + response = GetBucketLocationOutput(LocationConstraint=location_constraint) + return response + + @handler("PutObject", expand=False) + def put_object( + self, + context: RequestContext, + request: PutObjectRequest, + ) -> PutObjectOutput: + # TODO: validate order of validation + # TODO: still need to handle following parameters + # request_payer: RequestPayer = None, + bucket_name = request["Bucket"] + key = request["Key"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + if (storage_class := request.get("StorageClass")) is not None and ( + storage_class not in STORAGE_CLASSES or storage_class == StorageClass.OUTPOSTS + ): + raise InvalidStorageClass( + "The storage class you specified is not valid", StorageClassRequested=storage_class + ) + + if not config.S3_SKIP_KMS_KEY_VALIDATION and (sse_kms_key_id := request.get("SSEKMSKeyId")): + validate_kms_key_id(sse_kms_key_id, s3_bucket) + + validate_object_key(key) + + if_match = request.get("IfMatch") + if (if_none_match := request.get("IfNoneMatch")) and if_match: + raise NotImplementedException( + "A header you provided implies functionality that is not implemented", + Header="If-Match,If-None-Match", + additionalMessage="Multiple conditional request headers present in the request", + ) + + elif (if_none_match and if_none_match != "*") or (if_match and if_match == "*"): + header_name = "If-None-Match" if if_none_match else "If-Match" + raise NotImplementedException( + "A header you provided implies functionality that is not implemented", + Header=header_name, + additionalMessage=f"We don't accept the provided value of {header_name} header for this API", + ) + + system_metadata = get_system_metadata_from_request(request) + if not system_metadata.get("ContentType"): + system_metadata["ContentType"] = "binary/octet-stream" + + version_id = generate_version_id(s3_bucket.versioning_status) + + etag_content_md5 = "" + if content_md5 := request.get("ContentMD5"): + # assert that the received ContentMD5 is a properly b64 encoded value that fits a MD5 hash length + etag_content_md5 = base_64_content_md5_to_etag(content_md5) + if not etag_content_md5: + raise InvalidDigest( + "The Content-MD5 you specified was invalid.", + Content_MD5=content_md5, + ) + + checksum_algorithm = get_s3_checksum_algorithm_from_request(request) + checksum_value = ( + request.get(f"Checksum{checksum_algorithm.upper()}") if checksum_algorithm else None + ) + + # TODO: we're not encrypting the object with the provided key for now + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + server_side_encryption=request.get("ServerSideEncryption"), + ) + + encryption_parameters = get_encryption_parameters_from_request_and_bucket( + request, + s3_bucket, + store, + ) + + lock_parameters = get_object_lock_parameters_from_bucket_and_request(request, s3_bucket) + + acl = get_access_control_policy_for_new_resource_request(request, owner=s3_bucket.owner) + + if tagging := request.get("Tagging"): + tagging = parse_tagging_header(tagging) + + s3_object = S3Object( + key=key, + version_id=version_id, + storage_class=storage_class, + expires=request.get("Expires"), + user_metadata=request.get("Metadata"), + system_metadata=system_metadata, + checksum_algorithm=checksum_algorithm, + checksum_value=checksum_value, + encryption=encryption_parameters.encryption, + kms_key_id=encryption_parameters.kms_key_id, + bucket_key_enabled=encryption_parameters.bucket_key_enabled, + sse_key_hash=sse_c_key_md5, + lock_mode=lock_parameters.lock_mode, + lock_legal_status=lock_parameters.lock_legal_status, + lock_until=lock_parameters.lock_until, + website_redirect_location=request.get("WebsiteRedirectLocation"), + acl=acl, + owner=s3_bucket.owner, # TODO: for now we only have one owner, but it can depends on Bucket settings + ) + + body = request.get("Body") + # check if chunked request + headers = context.request.headers + is_aws_chunked = headers.get("x-amz-content-sha256", "").startswith( + "STREAMING-" + ) or "aws-chunked" in headers.get("content-encoding", "") + if is_aws_chunked: + checksum_algorithm = ( + checksum_algorithm + or get_s3_checksum_algorithm_from_trailing_headers(headers.get("x-amz-trailer", "")) + ) + if checksum_algorithm: + s3_object.checksum_algorithm = checksum_algorithm + + decoded_content_length = int(headers.get("x-amz-decoded-content-length", 0)) + body = AwsChunkedDecoder(body, decoded_content_length, s3_object=s3_object) + + # S3 removes the `aws-chunked` value from ContentEncoding + if content_encoding := s3_object.system_metadata.pop("ContentEncoding", None): + encodings = [enc for enc in content_encoding.split(",") if enc != "aws-chunked"] + if encodings: + s3_object.system_metadata["ContentEncoding"] = ",".join(encodings) + + with self._storage_backend.open(bucket_name, s3_object, mode="w") as s3_stored_object: + # as we are inside the lock here, if multiple concurrent requests happen for the same object, it's the first + # one to finish to succeed, and subsequent will raise exceptions. Once the first write finishes, we're + # opening the lock and other requests can check this condition + if if_none_match and object_exists_for_precondition_write(s3_bucket, key): + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition="If-None-Match", + ) + + elif if_match: + verify_object_equality_precondition_write(s3_bucket, key, if_match) + + s3_stored_object.write(body) + + if s3_object.checksum_algorithm: + if not s3_object.checksum_value: + s3_object.checksum_value = s3_stored_object.checksum + elif not validate_checksum_value(s3_object.checksum_value, checksum_algorithm): + self._storage_backend.remove(bucket_name, s3_object) + raise InvalidRequest( + f"Value for x-amz-checksum-{s3_object.checksum_algorithm.lower()} header is invalid." + ) + elif s3_object.checksum_value != s3_stored_object.checksum: + self._storage_backend.remove(bucket_name, s3_object) + raise BadDigest( + f"The {checksum_algorithm.upper()} you specified did not match the calculated checksum." + ) + + # TODO: handle ContentMD5 and ChecksumAlgorithm in a handler for all requests except requests with a + # streaming body. We can use the specs to verify which operations needs to have the checksum validated + if content_md5: + calculated_md5 = etag_to_base_64_content_md5(s3_stored_object.etag) + if calculated_md5 != content_md5: + self._storage_backend.remove(bucket_name, s3_object) + raise BadDigest( + "The Content-MD5 you specified did not match what we received.", + ExpectedDigest=etag_content_md5, + CalculatedDigest=calculated_md5, + ) + + s3_bucket.objects.set(key, s3_object) + + # in case we are overriding an object, delete the tags entry + key_id = get_unique_key_id(bucket_name, key, version_id) + store.TAGS.tags.pop(key_id, None) + if tagging: + store.TAGS.tags[key_id] = tagging + + # RequestCharged: Optional[RequestCharged] # TODO + response = PutObjectOutput( + ETag=s3_object.quoted_etag, + ) + if s3_bucket.versioning_status == "Enabled": + response["VersionId"] = s3_object.version_id + + if s3_object.checksum_algorithm: + response[f"Checksum{s3_object.checksum_algorithm}"] = s3_object.checksum_value + response["ChecksumType"] = getattr(s3_object, "checksum_type", ChecksumType.FULL_OBJECT) + + if s3_bucket.lifecycle_rules: + if expiration_header := self._get_expiration_header( + s3_bucket.lifecycle_rules, + bucket_name, + s3_object, + store.TAGS.tags.get(key_id, {}), + ): + # TODO: we either apply the lifecycle to existing objects when we set the new rules, or we need to + # apply them everytime we get/head an object + response["Expiration"] = expiration_header + + add_encryption_to_response(response, s3_object=s3_object) + if sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = sse_c_key_md5 + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + @handler("GetObject", expand=False) + def get_object( + self, + context: RequestContext, + request: GetObjectRequest, + ) -> GetObjectOutput: + # TODO: missing handling parameters: + # request_payer: RequestPayer = None, + # expected_bucket_owner: AccountId = None, + + bucket_name = request["Bucket"] + object_key = request["Key"] + version_id = request.get("VersionId") + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + s3_object = s3_bucket.get_object( + key=object_key, + version_id=version_id, + http_method="GET", + ) + if s3_object.expires and s3_object.expires < datetime.datetime.now( + tz=s3_object.expires.tzinfo + ): + # TODO: old behaviour was deleting key instantly if expired. AWS cleans up only once a day generally + # you can still HeadObject on it and you get the expiry time until scheduled deletion + kwargs = {"Key": object_key} + if version_id: + kwargs["VersionId"] = version_id + raise NoSuchKey("The specified key does not exist.", **kwargs) + + if s3_object.storage_class in ARCHIVES_STORAGE_CLASSES and not s3_object.restore: + raise InvalidObjectState( + "The operation is not valid for the object's storage class", + StorageClass=s3_object.storage_class, + ) + + if not config.S3_SKIP_KMS_KEY_VALIDATION and s3_object.kms_key_id: + validate_kms_key_id(kms_key=s3_object.kms_key_id, bucket=s3_bucket) + + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + # we're using getattr access because when restoring, the field might not exist + # TODO: cleanup at next major release + if sse_key_hash := getattr(s3_object, "sse_key_hash", None): + if sse_key_hash and not sse_c_key_md5: + raise InvalidRequest( + "The object was stored using a form of Server Side Encryption. " + "The correct parameters must be provided to retrieve the object." + ) + elif sse_key_hash != sse_c_key_md5: + raise AccessDenied( + "Requests specifying Server Side Encryption with Customer provided keys must provide the correct secret key." + ) + + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + ) + + validate_failed_precondition(request, s3_object.last_modified, s3_object.etag) + + range_header = request.get("Range") + part_number = request.get("PartNumber") + if range_header and part_number: + raise InvalidRequest("Cannot specify both Range header and partNumber query parameter") + range_data = None + if range_header: + range_data = parse_range_header(range_header, s3_object.size) + elif part_number: + range_data = get_part_range(s3_object, part_number) + + # we deliberately do not call `.close()` on the s3_stored_object to keep the read lock acquired. When passing + # the object to Werkzeug, the handler will call `.close()` after finishing iterating over `__iter__`. + # this can however lead to deadlocks if an exception happens between the call and returning the object. + # Be careful into adding validation between this call and `return` of `S3Provider.get_object` + s3_stored_object = self._storage_backend.open(bucket_name, s3_object, mode="r") + + # this is a hacky way to verify the object hasn't been modified between `s3_object = s3_bucket.get_object` + # and the storage backend call. If it has been modified, now that we're in the read lock, we can safely fetch + # the object again + if s3_stored_object.last_modified != s3_object.internal_last_modified: + s3_object = s3_bucket.get_object( + key=object_key, + version_id=version_id, + http_method="GET", + ) + + response = GetObjectOutput( + AcceptRanges="bytes", + **s3_object.get_system_metadata_fields(), + ) + if s3_object.user_metadata: + response["Metadata"] = s3_object.user_metadata + + if s3_object.parts and request.get("PartNumber"): + response["PartsCount"] = len(s3_object.parts) + + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + if s3_object.website_redirect_location: + response["WebsiteRedirectLocation"] = s3_object.website_redirect_location + + if s3_object.restore: + response["Restore"] = s3_object.restore + + checksum_value = None + if checksum_algorithm := s3_object.checksum_algorithm: + if (request.get("ChecksumMode") or "").upper() == "ENABLED": + checksum_value = s3_object.checksum_value + + if range_data: + s3_stored_object.seek(range_data.begin) + response["Body"] = LimitedIterableStream( + s3_stored_object, max_length=range_data.content_length + ) + response["ContentRange"] = range_data.content_range + response["ContentLength"] = range_data.content_length + response["StatusCode"] = 206 + if range_data.content_length == s3_object.size and checksum_value: + response[f"Checksum{checksum_algorithm.upper()}"] = checksum_value + response["ChecksumType"] = getattr( + s3_object, "checksum_type", ChecksumType.FULL_OBJECT + ) + else: + response["Body"] = s3_stored_object + if checksum_value: + response[f"Checksum{checksum_algorithm.upper()}"] = checksum_value + response["ChecksumType"] = getattr( + s3_object, "checksum_type", ChecksumType.FULL_OBJECT + ) + + add_encryption_to_response(response, s3_object=s3_object) + + if object_tags := store.TAGS.tags.get( + get_unique_key_id(bucket_name, object_key, version_id) + ): + response["TagCount"] = len(object_tags) + + if s3_object.is_current and s3_bucket.lifecycle_rules: + if expiration_header := self._get_expiration_header( + s3_bucket.lifecycle_rules, + bucket_name, + s3_object, + object_tags, + ): + # TODO: we either apply the lifecycle to existing objects when we set the new rules, or we need to + # apply them everytime we get/head an object + response["Expiration"] = expiration_header + + # TODO: missing returned fields + # RequestCharged: Optional[RequestCharged] + # ReplicationStatus: Optional[ReplicationStatus] + + if s3_object.lock_mode: + response["ObjectLockMode"] = s3_object.lock_mode + if s3_object.lock_until: + response["ObjectLockRetainUntilDate"] = s3_object.lock_until + if s3_object.lock_legal_status: + response["ObjectLockLegalHoldStatus"] = s3_object.lock_legal_status + + if sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = sse_c_key_md5 + + for request_param, response_param in ALLOWED_HEADER_OVERRIDES.items(): + if request_param_value := request.get(request_param): + response[response_param] = request_param_value + + return response + + @handler("HeadObject", expand=False) + def head_object( + self, + context: RequestContext, + request: HeadObjectRequest, + ) -> HeadObjectOutput: + bucket_name = request["Bucket"] + object_key = request["Key"] + version_id = request.get("VersionId") + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + s3_object = s3_bucket.get_object( + key=object_key, + version_id=version_id, + http_method="HEAD", + ) + + validate_failed_precondition(request, s3_object.last_modified, s3_object.etag) + + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + if s3_object.sse_key_hash: + if not sse_c_key_md5: + raise InvalidRequest( + "The object was stored using a form of Server Side Encryption. " + "The correct parameters must be provided to retrieve the object." + ) + elif s3_object.sse_key_hash != sse_c_key_md5: + raise AccessDenied( + "Requests specifying Server Side Encryption with Customer provided keys must provide the correct secret key." + ) + + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + ) + + response = HeadObjectOutput( + AcceptRanges="bytes", + **s3_object.get_system_metadata_fields(), + ) + if s3_object.user_metadata: + response["Metadata"] = s3_object.user_metadata + + if checksum_algorithm := s3_object.checksum_algorithm: + if (request.get("ChecksumMode") or "").upper() == "ENABLED": + response[f"Checksum{checksum_algorithm.upper()}"] = s3_object.checksum_value + response["ChecksumType"] = getattr( + s3_object, "checksum_type", ChecksumType.FULL_OBJECT + ) + + if s3_object.parts and request.get("PartNumber"): + response["PartsCount"] = len(s3_object.parts) + + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + if s3_object.website_redirect_location: + response["WebsiteRedirectLocation"] = s3_object.website_redirect_location + + if s3_object.restore: + response["Restore"] = s3_object.restore + + range_header = request.get("Range") + part_number = request.get("PartNumber") + if range_header and part_number: + raise InvalidRequest("Cannot specify both Range header and partNumber query parameter") + range_data = None + if range_header: + range_data = parse_range_header(range_header, s3_object.size) + elif part_number: + range_data = get_part_range(s3_object, part_number) + + if range_data: + response["ContentLength"] = range_data.content_length + response["ContentRange"] = range_data.content_range + response["StatusCode"] = 206 + + add_encryption_to_response(response, s3_object=s3_object) + + # if you specify the VersionId, AWS won't return the Expiration header, even if that's the current version + if not version_id and s3_bucket.lifecycle_rules: + object_tags = store.TAGS.tags.get( + get_unique_key_id(bucket_name, object_key, s3_object.version_id) + ) + if expiration_header := self._get_expiration_header( + s3_bucket.lifecycle_rules, + bucket_name, + s3_object, + object_tags, + ): + # TODO: we either apply the lifecycle to existing objects when we set the new rules, or we need to + # apply them everytime we get/head an object + response["Expiration"] = expiration_header + + if s3_object.lock_mode: + response["ObjectLockMode"] = s3_object.lock_mode + if s3_object.lock_until: + response["ObjectLockRetainUntilDate"] = s3_object.lock_until + if s3_object.lock_legal_status: + response["ObjectLockLegalHoldStatus"] = s3_object.lock_legal_status + + if sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = sse_c_key_md5 + + # TODO: missing return fields: + # ArchiveStatus: Optional[ArchiveStatus] + # RequestCharged: Optional[RequestCharged] + # ReplicationStatus: Optional[ReplicationStatus] + + return response + + def delete_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + mfa: MFA = None, + version_id: ObjectVersionId = None, + request_payer: RequestPayer = None, + bypass_governance_retention: BypassGovernanceRetention = None, + expected_bucket_owner: AccountId = None, + if_match: IfMatch = None, + if_match_last_modified_time: IfMatchLastModifiedTime = None, + if_match_size: IfMatchSize = None, + **kwargs, + ) -> DeleteObjectOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if bypass_governance_retention is not None and not s3_bucket.object_lock_enabled: + raise InvalidArgument( + "x-amz-bypass-governance-retention is only applicable to Object Lock enabled buckets.", + ArgumentName="x-amz-bypass-governance-retention", + ) + + if s3_bucket.versioning_status is None: + if version_id and version_id != "null": + raise InvalidArgument( + "Invalid version id specified", + ArgumentName="versionId", + ArgumentValue=version_id, + ) + + found_object = s3_bucket.objects.pop(key, None) + # TODO: RequestCharged + if found_object: + self._storage_backend.remove(bucket, found_object) + self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) + + return DeleteObjectOutput() + + if not version_id: + delete_marker_id = generate_version_id(s3_bucket.versioning_status) + delete_marker = S3DeleteMarker(key=key, version_id=delete_marker_id) + s3_bucket.objects.set(key, delete_marker) + s3_notif_ctx = S3EventNotificationContext.from_request_context_native( + context, + s3_bucket=s3_bucket, + s3_object=delete_marker, + ) + s3_notif_ctx.event_type = f"{s3_notif_ctx.event_type}MarkerCreated" + self._notify(context, s3_bucket=s3_bucket, s3_notif_ctx=s3_notif_ctx) + + return DeleteObjectOutput(VersionId=delete_marker.version_id, DeleteMarker=True) + + if key not in s3_bucket.objects: + return DeleteObjectOutput() + + if not (s3_object := s3_bucket.objects.get(key, version_id)): + raise InvalidArgument( + "Invalid version id specified", + ArgumentName="versionId", + ArgumentValue=version_id, + ) + + if s3_object.is_locked(bypass_governance_retention): + raise AccessDenied("Access Denied because object protected by object lock.") + + s3_bucket.objects.pop(object_key=key, version_id=version_id) + response = DeleteObjectOutput(VersionId=s3_object.version_id) + + if isinstance(s3_object, S3DeleteMarker): + response["DeleteMarker"] = True + else: + self._storage_backend.remove(bucket, s3_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + def delete_objects( + self, + context: RequestContext, + bucket: BucketName, + delete: Delete, + mfa: MFA = None, + request_payer: RequestPayer = None, + bypass_governance_retention: BypassGovernanceRetention = None, + expected_bucket_owner: AccountId = None, + checksum_algorithm: ChecksumAlgorithm = None, + **kwargs, + ) -> DeleteObjectsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if bypass_governance_retention is not None and not s3_bucket.object_lock_enabled: + raise InvalidArgument( + "x-amz-bypass-governance-retention is only applicable to Object Lock enabled buckets.", + ArgumentName="x-amz-bypass-governance-retention", + ) + + objects: list[ObjectIdentifier] = delete.get("Objects") + if not objects: + raise MalformedXML() + + # TODO: max 1000 delete at once? test against AWS? + + quiet = delete.get("Quiet", False) + deleted = [] + errors = [] + + to_remove = [] + for to_delete_object in objects: + object_key = to_delete_object.get("Key") + version_id = to_delete_object.get("VersionId") + if s3_bucket.versioning_status is None: + if version_id and version_id != "null": + errors.append( + Error( + Code="NoSuchVersion", + Key=object_key, + Message="The specified version does not exist.", + VersionId=version_id, + ) + ) + continue + + found_object = s3_bucket.objects.pop(object_key, None) + if found_object: + to_remove.append(found_object) + self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, object_key, version_id), None) + # small hack to not create a fake object for nothing + elif s3_bucket.notification_configuration: + # DeleteObjects is a bit weird, even if the object didn't exist, S3 will trigger a notification + # for a non-existing object being deleted + self._notify( + context, s3_bucket=s3_bucket, s3_object=S3Object(key=object_key, etag="") + ) + + if not quiet: + deleted.append(DeletedObject(Key=object_key)) + + continue + + if not version_id: + delete_marker_id = generate_version_id(s3_bucket.versioning_status) + delete_marker = S3DeleteMarker(key=object_key, version_id=delete_marker_id) + s3_bucket.objects.set(object_key, delete_marker) + s3_notif_ctx = S3EventNotificationContext.from_request_context_native( + context, + s3_bucket=s3_bucket, + s3_object=delete_marker, + ) + s3_notif_ctx.event_type = f"{s3_notif_ctx.event_type}MarkerCreated" + self._notify(context, s3_bucket=s3_bucket, s3_notif_ctx=s3_notif_ctx) + + if not quiet: + deleted.append( + DeletedObject( + DeleteMarker=True, + DeleteMarkerVersionId=delete_marker_id, + Key=object_key, + ) + ) + continue + + if not ( + found_object := s3_bucket.objects.get(object_key=object_key, version_id=version_id) + ): + errors.append( + Error( + Code="NoSuchVersion", + Key=object_key, + Message="The specified version does not exist.", + VersionId=version_id, + ) + ) + continue + + if found_object.is_locked(bypass_governance_retention): + errors.append( + Error( + Code="AccessDenied", + Key=object_key, + Message="Access Denied because object protected by object lock.", + VersionId=version_id, + ) + ) + continue + + s3_bucket.objects.pop(object_key=object_key, version_id=version_id) + if not quiet: + deleted_object = DeletedObject( + Key=object_key, + VersionId=version_id, + ) + if isinstance(found_object, S3DeleteMarker): + deleted_object["DeleteMarker"] = True + deleted_object["DeleteMarkerVersionId"] = found_object.version_id + + deleted.append(deleted_object) + + if isinstance(found_object, S3Object): + to_remove.append(found_object) + + self._notify(context, s3_bucket=s3_bucket, s3_object=found_object) + store.TAGS.tags.pop(get_unique_key_id(bucket, object_key, version_id), None) + + # TODO: request charged + self._storage_backend.remove(bucket, to_remove) + response: DeleteObjectsOutput = {} + # AWS validated: the list of Deleted objects is unordered, multiple identical calls can return different results + if errors: + response["Errors"] = errors + if not quiet: + response["Deleted"] = deleted + + return response + + @handler("CopyObject", expand=False) + def copy_object( + self, + context: RequestContext, + request: CopyObjectRequest, + ) -> CopyObjectOutput: + # request_payer: RequestPayer = None, # TODO: + dest_bucket = request["Bucket"] + dest_key = request["Key"] + validate_object_key(dest_key) + store, dest_s3_bucket = self._get_cross_account_bucket(context, dest_bucket) + + src_bucket, src_key, src_version_id = extract_bucket_key_version_id_from_copy_source( + request.get("CopySource") + ) + _, src_s3_bucket = self._get_cross_account_bucket(context, src_bucket) + + if not config.S3_SKIP_KMS_KEY_VALIDATION and (sse_kms_key_id := request.get("SSEKMSKeyId")): + validate_kms_key_id(sse_kms_key_id, dest_s3_bucket) + + # if the object is a delete marker, get_object will raise NotFound if no versionId, like AWS + try: + src_s3_object = src_s3_bucket.get_object(key=src_key, version_id=src_version_id) + except MethodNotAllowed: + raise InvalidRequest( + "The source of a copy request may not specifically refer to a delete marker by version id." + ) + + if src_s3_object.storage_class in ARCHIVES_STORAGE_CLASSES and not src_s3_object.restore: + raise InvalidObjectState( + "Operation is not valid for the source object's storage class", + StorageClass=src_s3_object.storage_class, + ) + + if failed_condition := get_failed_precondition_copy_source( + request, src_s3_object.last_modified, src_s3_object.etag + ): + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition=failed_condition, + ) + + source_sse_c_key_md5 = request.get("CopySourceSSECustomerKeyMD5") + if src_s3_object.sse_key_hash: + if not source_sse_c_key_md5: + raise InvalidRequest( + "The object was stored using a form of Server Side Encryption. " + "The correct parameters must be provided to retrieve the object." + ) + elif src_s3_object.sse_key_hash != source_sse_c_key_md5: + raise AccessDenied("Access Denied") + + validate_sse_c( + algorithm=request.get("CopySourceSSECustomerAlgorithm"), + encryption_key=request.get("CopySourceSSECustomerKey"), + encryption_key_md5=source_sse_c_key_md5, + ) + + target_sse_c_key_md5 = request.get("SSECustomerKeyMD5") + server_side_encryption = request.get("ServerSideEncryption") + # validate target SSE-C parameters + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=target_sse_c_key_md5, + server_side_encryption=server_side_encryption, + ) + + # TODO validate order of validation + storage_class = request.get("StorageClass") + metadata_directive = request.get("MetadataDirective") + website_redirect_location = request.get("WebsiteRedirectLocation") + # we need to check for identity of the object, to see if the default one has been changed + is_default_encryption = ( + dest_s3_bucket.encryption_rule is DEFAULT_BUCKET_ENCRYPTION + and src_s3_object.encryption == "AES256" + ) + if ( + src_bucket == dest_bucket + and src_key == dest_key + and not any( + ( + storage_class, + server_side_encryption, + target_sse_c_key_md5, + metadata_directive == "REPLACE", + website_redirect_location, + dest_s3_bucket.encryption_rule + and not is_default_encryption, # S3 will allow copy in place if the bucket has encryption configured + src_s3_object.restore, + ) + ) + ): + raise InvalidRequest( + "This copy request is illegal because it is trying to copy an object to itself without changing the " + "object's metadata, storage class, website redirect location or encryption attributes." + ) + + if tagging := request.get("Tagging"): + tagging = parse_tagging_header(tagging) + + if metadata_directive == "REPLACE": + user_metadata = request.get("Metadata") + system_metadata = get_system_metadata_from_request(request) + if not system_metadata.get("ContentType"): + system_metadata["ContentType"] = "binary/octet-stream" + else: + user_metadata = src_s3_object.user_metadata + system_metadata = src_s3_object.system_metadata + + dest_version_id = generate_version_id(dest_s3_bucket.versioning_status) + + encryption_parameters = get_encryption_parameters_from_request_and_bucket( + request, + dest_s3_bucket, + store, + ) + lock_parameters = get_object_lock_parameters_from_bucket_and_request( + request, dest_s3_bucket + ) + + acl = get_access_control_policy_for_new_resource_request( + request, owner=dest_s3_bucket.owner + ) + checksum_algorithm = request.get("ChecksumAlgorithm") + + s3_object = S3Object( + key=dest_key, + size=src_s3_object.size, + version_id=dest_version_id, + storage_class=storage_class, + expires=request.get("Expires"), + user_metadata=user_metadata, + system_metadata=system_metadata, + checksum_algorithm=checksum_algorithm or src_s3_object.checksum_algorithm, + encryption=encryption_parameters.encryption, + kms_key_id=encryption_parameters.kms_key_id, + bucket_key_enabled=request.get( + "BucketKeyEnabled" + ), # CopyObject does not inherit from the bucket here + sse_key_hash=target_sse_c_key_md5, + lock_mode=lock_parameters.lock_mode, + lock_legal_status=lock_parameters.lock_legal_status, + lock_until=lock_parameters.lock_until, + website_redirect_location=website_redirect_location, + expiration=None, # TODO, from lifecycle + acl=acl, + owner=dest_s3_bucket.owner, + ) + + with self._storage_backend.copy( + src_bucket=src_bucket, + src_object=src_s3_object, + dest_bucket=dest_bucket, + dest_object=s3_object, + ) as s3_stored_object: + s3_object.checksum_value = s3_stored_object.checksum or src_s3_object.checksum_value + s3_object.etag = s3_stored_object.etag or src_s3_object.etag + + dest_s3_bucket.objects.set(dest_key, s3_object) + + dest_key_id = get_unique_key_id(dest_bucket, dest_key, dest_version_id) + + if (request.get("TaggingDirective")) == "REPLACE": + store.TAGS.tags[dest_key_id] = tagging or {} + else: + src_key_id = get_unique_key_id(src_bucket, src_key, src_s3_object.version_id) + src_tags = store.TAGS.tags.get(src_key_id, {}) + store.TAGS.tags[dest_key_id] = copy.copy(src_tags) + + copy_object_result = CopyObjectResult( + ETag=s3_object.quoted_etag, + LastModified=s3_object.last_modified, + ) + if s3_object.checksum_algorithm: + copy_object_result[f"Checksum{s3_object.checksum_algorithm.upper()}"] = ( + s3_object.checksum_value + ) + + response = CopyObjectOutput( + CopyObjectResult=copy_object_result, + ) + + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + if s3_object.expiration: + response["Expiration"] = s3_object.expiration # TODO: properly parse the datetime + + add_encryption_to_response(response, s3_object=s3_object) + if target_sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = target_sse_c_key_md5 + + if ( + src_s3_bucket.versioning_status + and src_s3_object.version_id + and src_s3_object.version_id != "null" + ): + response["CopySourceVersionId"] = src_s3_object.version_id + + # RequestCharged: Optional[RequestCharged] # TODO + self._notify(context, s3_bucket=dest_s3_bucket, s3_object=s3_object) + + return response + + def list_objects( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter = None, + encoding_type: EncodingType = None, + marker: Marker = None, + max_keys: MaxKeys = None, + prefix: Prefix = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + optional_object_attributes: OptionalObjectAttributesList = None, + **kwargs, + ) -> ListObjectsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + common_prefixes = set() + count = 0 + is_truncated = False + next_key_marker = None + max_keys = max_keys or 1000 + prefix = prefix or "" + delimiter = delimiter or "" + if encoding_type: + prefix = urlparse.quote(prefix) + delimiter = urlparse.quote(delimiter) + + s3_objects: list[Object] = [] + + all_keys = sorted(s3_bucket.objects.values(), key=lambda r: r.key) + last_key = all_keys[-1] if all_keys else None + + # sort by key + for s3_object in all_keys: + key = urlparse.quote(s3_object.key) if encoding_type else s3_object.key + # skip all keys that alphabetically come before key_marker + if marker: + if key <= marker: + continue + + # Filter for keys that start with prefix + if prefix and not key.startswith(prefix): + continue + + # see ListObjectsV2 for the logic comments (shared logic here) + prefix_including_delimiter = None + if delimiter and delimiter in (key_no_prefix := key.removeprefix(prefix)): + pre_delimiter, _, _ = key_no_prefix.partition(delimiter) + prefix_including_delimiter = f"{prefix}{pre_delimiter}{delimiter}" + + if prefix_including_delimiter in common_prefixes or ( + marker and marker.startswith(prefix_including_delimiter) + ): + continue + + if prefix_including_delimiter: + common_prefixes.add(prefix_including_delimiter) + else: + # TODO: add RestoreStatus if present + object_data = Object( + Key=key, + ETag=s3_object.quoted_etag, + Owner=s3_bucket.owner, # TODO: verify reality + Size=s3_object.size, + LastModified=s3_object.last_modified, + StorageClass=s3_object.storage_class, + ) + + if s3_object.checksum_algorithm: + object_data["ChecksumAlgorithm"] = [s3_object.checksum_algorithm] + object_data["ChecksumType"] = getattr( + s3_object, "checksum_type", ChecksumType.FULL_OBJECT + ) + + s3_objects.append(object_data) + + # we just added a CommonPrefix or an Object, increase the counter + count += 1 + if count >= max_keys and last_key.key != s3_object.key: + is_truncated = True + if prefix_including_delimiter: + next_key_marker = prefix_including_delimiter + elif s3_objects: + next_key_marker = s3_objects[-1]["Key"] + break + + common_prefixes = [CommonPrefix(Prefix=prefix) for prefix in sorted(common_prefixes)] + + response = ListObjectsOutput( + IsTruncated=is_truncated, + Name=bucket, + MaxKeys=max_keys, + Prefix=prefix or "", + Marker=marker or "", + ) + if s3_objects: + response["Contents"] = s3_objects + if encoding_type: + response["EncodingType"] = EncodingType.url + if delimiter: + response["Delimiter"] = delimiter + if common_prefixes: + response["CommonPrefixes"] = common_prefixes + if delimiter and next_key_marker: + response["NextMarker"] = next_key_marker + if s3_bucket.bucket_region != "us-east-1": + response["BucketRegion"] = s3_bucket.bucket_region + + # RequestCharged: Optional[RequestCharged] # TODO + return response + + def list_objects_v2( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter = None, + encoding_type: EncodingType = None, + max_keys: MaxKeys = None, + prefix: Prefix = None, + continuation_token: Token = None, + fetch_owner: FetchOwner = None, + start_after: StartAfter = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + optional_object_attributes: OptionalObjectAttributesList = None, + **kwargs, + ) -> ListObjectsV2Output: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if continuation_token == "": + raise InvalidArgument( + "The continuation token provided is incorrect", + ArgumentName="continuation-token", + ) + + common_prefixes = set() + count = 0 + is_truncated = False + next_continuation_token = None + max_keys = max_keys or 1000 + prefix = prefix or "" + delimiter = delimiter or "" + if encoding_type: + prefix = urlparse.quote(prefix) + delimiter = urlparse.quote(delimiter) + decoded_continuation_token = ( + to_str(base64.urlsafe_b64decode(continuation_token.encode())) + if continuation_token + else None + ) + + s3_objects: list[Object] = [] + + # sort by key + for s3_object in sorted(s3_bucket.objects.values(), key=lambda r: r.key): + key = urlparse.quote(s3_object.key) if encoding_type else s3_object.key + + # skip all keys that alphabetically come before continuation_token + if continuation_token: + if key < decoded_continuation_token: + continue + + elif start_after: + if key <= start_after: + continue + + # Filter for keys that start with prefix + if prefix and not key.startswith(prefix): + continue + + # separate keys that contain the same string between the prefix and the first occurrence of the delimiter + prefix_including_delimiter = None + if delimiter and delimiter in (key_no_prefix := key.removeprefix(prefix)): + pre_delimiter, _, _ = key_no_prefix.partition(delimiter) + prefix_including_delimiter = f"{prefix}{pre_delimiter}{delimiter}" + + # if the CommonPrefix is already in the CommonPrefixes, it doesn't count towards MaxKey, we can skip + # the entry without increasing the counter. We need to iterate over all of these entries before + # returning the next continuation marker, to properly start at the next key after this CommonPrefix + if prefix_including_delimiter in common_prefixes: + continue + + # After skipping all entries, verify we're not over the MaxKeys before adding a new entry + if count >= max_keys: + is_truncated = True + next_continuation_token = to_str(base64.urlsafe_b64encode(s3_object.key.encode())) + break + + # if we found a new CommonPrefix, add it to the CommonPrefixes + # else, it means it's a new Object, add it to the Contents + if prefix_including_delimiter: + common_prefixes.add(prefix_including_delimiter) + else: + # TODO: add RestoreStatus if present + object_data = Object( + Key=key, + ETag=s3_object.quoted_etag, + Size=s3_object.size, + LastModified=s3_object.last_modified, + StorageClass=s3_object.storage_class, + ) + + if fetch_owner: + object_data["Owner"] = s3_bucket.owner + + if s3_object.checksum_algorithm: + object_data["ChecksumAlgorithm"] = [s3_object.checksum_algorithm] + object_data["ChecksumType"] = getattr( + s3_object, "checksum_type", ChecksumType.FULL_OBJECT + ) + + s3_objects.append(object_data) + + # we just added either a CommonPrefix or an Object to the List, increase the counter by one + count += 1 + + common_prefixes = [CommonPrefix(Prefix=prefix) for prefix in sorted(common_prefixes)] + + response = ListObjectsV2Output( + IsTruncated=is_truncated, + Name=bucket, + MaxKeys=max_keys, + Prefix=prefix or "", + KeyCount=count, + ) + if s3_objects: + response["Contents"] = s3_objects + if encoding_type: + response["EncodingType"] = EncodingType.url + if delimiter: + response["Delimiter"] = delimiter + if common_prefixes: + response["CommonPrefixes"] = common_prefixes + if next_continuation_token: + response["NextContinuationToken"] = next_continuation_token + + if continuation_token: + response["ContinuationToken"] = continuation_token + elif start_after: + response["StartAfter"] = start_after + + if s3_bucket.bucket_region != "us-east-1": + response["BucketRegion"] = s3_bucket.bucket_region + + # RequestCharged: Optional[RequestCharged] # TODO + return response + + def list_object_versions( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter = None, + encoding_type: EncodingType = None, + key_marker: KeyMarker = None, + max_keys: MaxKeys = None, + prefix: Prefix = None, + version_id_marker: VersionIdMarker = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + optional_object_attributes: OptionalObjectAttributesList = None, + **kwargs, + ) -> ListObjectVersionsOutput: + if version_id_marker and not key_marker: + raise InvalidArgument( + "A version-id marker cannot be specified without a key marker.", + ArgumentName="version-id-marker", + ArgumentValue=version_id_marker, + ) + + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + common_prefixes = set() + count = 0 + is_truncated = False + next_key_marker = None + next_version_id_marker = None + max_keys = max_keys or 1000 + prefix = prefix or "" + delimiter = delimiter or "" + if encoding_type: + prefix = urlparse.quote(prefix) + delimiter = urlparse.quote(delimiter) + version_key_marker_found = False + + object_versions: list[ObjectVersion] = [] + delete_markers: list[DeleteMarkerEntry] = [] + + all_versions = s3_bucket.objects.values(with_versions=True) + # sort by key, and last-modified-date, to get the last version first + all_versions.sort(key=lambda r: (r.key, -r.last_modified.timestamp())) + last_version = all_versions[-1] if all_versions else None + + for version in all_versions: + key = urlparse.quote(version.key) if encoding_type else version.key + # skip all keys that alphabetically come before key_marker + if key_marker: + if key < key_marker: + continue + elif key == key_marker: + if not version_id_marker: + continue + # as the keys are ordered by time, once we found the key marker, we can return the next one + if version.version_id == version_id_marker: + version_key_marker_found = True + continue + + # it is possible that the version_id_marker related object has been deleted, in that case, start + # as soon as the next version id is older than the version id marker (meaning this version was + # next after the now-deleted version) + elif is_version_older_than_other(version.version_id, version_id_marker): + version_key_marker_found = True + + elif not version_key_marker_found: + # as long as we have not passed the version_key_marker, skip the versions + continue + + # Filter for keys that start with prefix + if prefix and not key.startswith(prefix): + continue + + # see ListObjectsV2 for the logic comments (shared logic here) + prefix_including_delimiter = None + if delimiter and delimiter in (key_no_prefix := key.removeprefix(prefix)): + pre_delimiter, _, _ = key_no_prefix.partition(delimiter) + prefix_including_delimiter = f"{prefix}{pre_delimiter}{delimiter}" + + if prefix_including_delimiter in common_prefixes or ( + key_marker and key_marker.startswith(prefix_including_delimiter) + ): + continue + + if prefix_including_delimiter: + common_prefixes.add(prefix_including_delimiter) + + elif isinstance(version, S3DeleteMarker): + delete_marker = DeleteMarkerEntry( + Key=key, + Owner=s3_bucket.owner, + VersionId=version.version_id, + IsLatest=version.is_current, + LastModified=version.last_modified, + ) + delete_markers.append(delete_marker) + else: + # TODO: add RestoreStatus if present + object_version = ObjectVersion( + Key=key, + ETag=version.quoted_etag, + Owner=s3_bucket.owner, # TODO: verify reality + Size=version.size, + VersionId=version.version_id or "null", + LastModified=version.last_modified, + IsLatest=version.is_current, + # TODO: verify this, are other class possible? + # StorageClass=version.storage_class, + StorageClass=ObjectVersionStorageClass.STANDARD, + ) + + if version.checksum_algorithm: + object_version["ChecksumAlgorithm"] = [version.checksum_algorithm] + object_version["ChecksumType"] = getattr( + version, "checksum_type", ChecksumType.FULL_OBJECT + ) + + object_versions.append(object_version) + + # we just added a CommonPrefix, an Object or a DeleteMarker, increase the counter + count += 1 + if count >= max_keys and last_version.version_id != version.version_id: + is_truncated = True + if prefix_including_delimiter: + next_key_marker = prefix_including_delimiter + else: + next_key_marker = version.key + next_version_id_marker = version.version_id + break + + common_prefixes = [CommonPrefix(Prefix=prefix) for prefix in sorted(common_prefixes)] + + response = ListObjectVersionsOutput( + IsTruncated=is_truncated, + Name=bucket, + MaxKeys=max_keys, + Prefix=prefix, + KeyMarker=key_marker or "", + VersionIdMarker=version_id_marker or "", + ) + if object_versions: + response["Versions"] = object_versions + if encoding_type: + response["EncodingType"] = EncodingType.url + if delete_markers: + response["DeleteMarkers"] = delete_markers + if delimiter: + response["Delimiter"] = delimiter + if common_prefixes: + response["CommonPrefixes"] = common_prefixes + if next_key_marker: + response["NextKeyMarker"] = next_key_marker + if next_version_id_marker: + response["NextVersionIdMarker"] = next_version_id_marker + + # RequestCharged: Optional[RequestCharged] # TODO + return response + + @handler("GetObjectAttributes", expand=False) + def get_object_attributes( + self, + context: RequestContext, + request: GetObjectAttributesRequest, + ) -> GetObjectAttributesOutput: + bucket_name = request["Bucket"] + object_key = request["Key"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + s3_object = s3_bucket.get_object( + key=object_key, + version_id=request.get("VersionId"), + http_method="GET", + ) + + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + if s3_object.sse_key_hash: + if not sse_c_key_md5: + raise InvalidRequest( + "The object was stored using a form of Server Side Encryption. " + "The correct parameters must be provided to retrieve the object." + ) + elif s3_object.sse_key_hash != sse_c_key_md5: + raise AccessDenied("Access Denied") + + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + ) + + object_attrs = request.get("ObjectAttributes", []) + response = GetObjectAttributesOutput() + if "ETag" in object_attrs: + response["ETag"] = s3_object.etag + if "StorageClass" in object_attrs: + response["StorageClass"] = s3_object.storage_class + if "ObjectSize" in object_attrs: + response["ObjectSize"] = s3_object.size + if "Checksum" in object_attrs and (checksum_algorithm := s3_object.checksum_algorithm): + if s3_object.parts: + checksum_value = s3_object.checksum_value.split("-")[0] + else: + checksum_value = s3_object.checksum_value + response["Checksum"] = { + f"Checksum{checksum_algorithm.upper()}": checksum_value, + "ChecksumType": getattr(s3_object, "checksum_type", ChecksumType.FULL_OBJECT), + } + + response["LastModified"] = s3_object.last_modified + + if s3_bucket.versioning_status: + response["VersionId"] = s3_object.version_id + + if "ObjectParts" in object_attrs and s3_object.parts: + # TODO: implements ObjectParts, this is basically a simplified `ListParts` call on the object, we might + # need to store more data about the Parts once we implement checksums for them + response["ObjectParts"] = GetObjectAttributesParts(TotalPartsCount=len(s3_object.parts)) + + return response + + def restore_object( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + restore_request: RestoreRequest = None, + request_payer: RequestPayer = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> RestoreObjectOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + http_method="GET", # TODO: verify http method + ) + if s3_object.storage_class not in ARCHIVES_STORAGE_CLASSES: + raise InvalidObjectState(StorageClass=s3_object.storage_class) + + # TODO: moto was only supported "Days" parameters from RestoreRequest, and was ignoring the others + # will only implement only the same functionality for now + + # if a request was already done and the object was available, and we're updating it, set the status code to 200 + status_code = 200 if s3_object.restore else 202 + restore_days = restore_request.get("Days") + if not restore_days: + LOG.debug("LocalStack does not support restore SELECT requests yet.") + return RestoreObjectOutput() + + restore_expiration_date = add_expiration_days_to_datetime( + datetime.datetime.now(datetime.UTC), restore_days + ) + # TODO: add a way to transition from ongoing-request=true to false? for now it is instant + s3_object.restore = f'ongoing-request="false", expiry-date="{restore_expiration_date}"' + + s3_notif_ctx_initiated = S3EventNotificationContext.from_request_context_native( + context, + s3_bucket=s3_bucket, + s3_object=s3_object, + ) + self._notify(context, s3_bucket=s3_bucket, s3_notif_ctx=s3_notif_ctx_initiated) + # But because it's instant in LocalStack, we can directly send the Completed notification as well + # We just need to copy the context so that we don't mutate the first context while it could be sent + # And modify its event type from `ObjectRestore:Post` to `ObjectRestore:Completed` + s3_notif_ctx_completed = copy.copy(s3_notif_ctx_initiated) + s3_notif_ctx_completed.event_type = s3_notif_ctx_completed.event_type.replace( + "Post", "Completed" + ) + self._notify(context, s3_bucket=s3_bucket, s3_notif_ctx=s3_notif_ctx_completed) + + # TODO: request charged + return RestoreObjectOutput(StatusCode=status_code) + + @handler("CreateMultipartUpload", expand=False) + def create_multipart_upload( + self, + context: RequestContext, + request: CreateMultipartUploadRequest, + ) -> CreateMultipartUploadOutput: + # TODO: handle missing parameters: + # request_payer: RequestPayer = None, + bucket_name = request["Bucket"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + if (storage_class := request.get("StorageClass")) is not None and ( + storage_class not in STORAGE_CLASSES or storage_class == StorageClass.OUTPOSTS + ): + raise InvalidStorageClass( + "The storage class you specified is not valid", StorageClassRequested=storage_class + ) + + if not config.S3_SKIP_KMS_KEY_VALIDATION and (sse_kms_key_id := request.get("SSEKMSKeyId")): + validate_kms_key_id(sse_kms_key_id, s3_bucket) + + if tagging := request.get("Tagging"): + tagging = parse_tagging_header(tagging_header=tagging) + + key = request["Key"] + + system_metadata = get_system_metadata_from_request(request) + if not system_metadata.get("ContentType"): + system_metadata["ContentType"] = "binary/octet-stream" + + checksum_algorithm = request.get("ChecksumAlgorithm") + if checksum_algorithm and checksum_algorithm not in CHECKSUM_ALGORITHMS: + raise InvalidRequest( + "Checksum algorithm provided is unsupported. Please try again with any of the valid types: [CRC32, CRC32C, SHA1, SHA256]" + ) + + if not (checksum_type := request.get("ChecksumType")) and checksum_algorithm: + if checksum_algorithm == ChecksumAlgorithm.CRC64NVME: + checksum_type = ChecksumType.FULL_OBJECT + else: + checksum_type = ChecksumType.COMPOSITE + elif checksum_type and not checksum_algorithm: + raise InvalidRequest( + "The x-amz-checksum-type header can only be used with the x-amz-checksum-algorithm header." + ) + + if ( + checksum_type == ChecksumType.COMPOSITE + and checksum_algorithm == ChecksumAlgorithm.CRC64NVME + ): + raise InvalidRequest( + "The COMPOSITE checksum type cannot be used with the crc64nvme checksum algorithm." + ) + elif checksum_type == ChecksumType.FULL_OBJECT and checksum_algorithm.upper().startswith( + "SHA" + ): + raise InvalidRequest( + f"The FULL_OBJECT checksum type cannot be used with the {checksum_algorithm.lower()} checksum algorithm." + ) + + # TODO: we're not encrypting the object with the provided key for now + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + server_side_encryption=request.get("ServerSideEncryption"), + ) + + encryption_parameters = get_encryption_parameters_from_request_and_bucket( + request, + s3_bucket, + store, + ) + lock_parameters = get_object_lock_parameters_from_bucket_and_request(request, s3_bucket) + + acl = get_access_control_policy_for_new_resource_request(request, owner=s3_bucket.owner) + + # validate encryption values + s3_multipart = S3Multipart( + key=key, + storage_class=storage_class, + expires=request.get("Expires"), + user_metadata=request.get("Metadata"), + system_metadata=system_metadata, + checksum_algorithm=checksum_algorithm, + checksum_type=checksum_type, + encryption=encryption_parameters.encryption, + kms_key_id=encryption_parameters.kms_key_id, + bucket_key_enabled=encryption_parameters.bucket_key_enabled, + sse_key_hash=sse_c_key_md5, + lock_mode=lock_parameters.lock_mode, + lock_legal_status=lock_parameters.lock_legal_status, + lock_until=lock_parameters.lock_until, + website_redirect_location=request.get("WebsiteRedirectLocation"), + expiration=None, # TODO, from lifecycle, or should it be updated with config? + acl=acl, + initiator=get_owner_for_account_id(context.account_id), + tagging=tagging, + owner=s3_bucket.owner, + precondition=object_exists_for_precondition_write(s3_bucket, key), + ) + # it seems if there is SSE-C on the multipart, AWS S3 will override the default Checksum behavior (but not on + # PutObject) + if sse_c_key_md5: + s3_multipart.object.checksum_algorithm = None + + s3_bucket.multiparts[s3_multipart.id] = s3_multipart + + response = CreateMultipartUploadOutput( + Bucket=bucket_name, Key=key, UploadId=s3_multipart.id + ) + + if checksum_algorithm: + response["ChecksumAlgorithm"] = checksum_algorithm + response["ChecksumType"] = checksum_type + + add_encryption_to_response(response, s3_object=s3_multipart.object) + if sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = sse_c_key_md5 + + # TODO: missing response fields we're not currently supporting + # - AbortDate: lifecycle related,not currently supported, todo + # - AbortRuleId: lifecycle related, not currently supported, todo + # - RequestCharged: todo + + return response + + @handler("UploadPart", expand=False) + def upload_part( + self, + context: RequestContext, + request: UploadPartRequest, + ) -> UploadPartOutput: + # TODO: missing following parameters: + # content_length: ContentLength = None, ->validate? + # content_md5: ContentMD5 = None, -> validate? + # request_payer: RequestPayer = None, + bucket_name = request["Bucket"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket_name) + + upload_id = request.get("UploadId") + if not ( + s3_multipart := s3_bucket.multiparts.get(upload_id) + ) or s3_multipart.object.key != request.get("Key"): + raise NoSuchUpload( + "The specified upload does not exist. " + "The upload ID may be invalid, or the upload may have been aborted or completed.", + UploadId=upload_id, + ) + elif (part_number := request.get("PartNumber", 0)) < 1 or part_number > 10000: + raise InvalidArgument( + "Part number must be an integer between 1 and 10000, inclusive", + ArgumentName="partNumber", + ArgumentValue=part_number, + ) + + if content_md5 := request.get("ContentMD5"): + # assert that the received ContentMD5 is a properly b64 encoded value that fits a MD5 hash length + if not base_64_content_md5_to_etag(content_md5): + raise InvalidDigest( + "The Content-MD5 you specified was invalid.", + Content_MD5=content_md5, + ) + + checksum_algorithm = get_s3_checksum_algorithm_from_request(request) + checksum_value = ( + request.get(f"Checksum{checksum_algorithm.upper()}") if checksum_algorithm else None + ) + + # TODO: we're not encrypting the object with the provided key for now + sse_c_key_md5 = request.get("SSECustomerKeyMD5") + validate_sse_c( + algorithm=request.get("SSECustomerAlgorithm"), + encryption_key=request.get("SSECustomerKey"), + encryption_key_md5=sse_c_key_md5, + ) + + if (s3_multipart.object.sse_key_hash and not sse_c_key_md5) or ( + sse_c_key_md5 and not s3_multipart.object.sse_key_hash + ): + raise InvalidRequest( + "The multipart upload initiate requested encryption. " + "Subsequent part requests must include the appropriate encryption parameters." + ) + elif ( + s3_multipart.object.sse_key_hash + and sse_c_key_md5 + and s3_multipart.object.sse_key_hash != sse_c_key_md5 + ): + raise InvalidRequest( + "The provided encryption parameters did not match the ones used originally." + ) + + s3_part = S3Part( + part_number=part_number, + checksum_algorithm=checksum_algorithm, + checksum_value=checksum_value, + ) + body = request.get("Body") + headers = context.request.headers + is_aws_chunked = headers.get("x-amz-content-sha256", "").startswith( + "STREAMING-" + ) or "aws-chunked" in headers.get("content-encoding", "") + # check if chunked request + if is_aws_chunked: + checksum_algorithm = ( + checksum_algorithm + or get_s3_checksum_algorithm_from_trailing_headers(headers.get("x-amz-trailer", "")) + ) + if checksum_algorithm: + s3_part.checksum_algorithm = checksum_algorithm + + decoded_content_length = int(headers.get("x-amz-decoded-content-length", 0)) + body = AwsChunkedDecoder(body, decoded_content_length, s3_part) + + if ( + s3_multipart.checksum_algorithm + and s3_part.checksum_algorithm != s3_multipart.checksum_algorithm + ): + error_req_checksum = checksum_algorithm.lower() if checksum_algorithm else "null" + error_mp_checksum = ( + s3_multipart.object.checksum_algorithm.lower() + if s3_multipart.object.checksum_algorithm + else "null" + ) + if not error_mp_checksum == "null": + raise InvalidRequest( + f"Checksum Type mismatch occurred, expected checksum Type: {error_mp_checksum}, actual checksum Type: {error_req_checksum}" + ) + + stored_multipart = self._storage_backend.get_multipart(bucket_name, s3_multipart) + with stored_multipart.open(s3_part, mode="w") as stored_s3_part: + try: + stored_s3_part.write(body) + except Exception: + stored_multipart.remove_part(s3_part) + raise + + if checksum_algorithm: + if not validate_checksum_value(s3_part.checksum_value, checksum_algorithm): + stored_multipart.remove_part(s3_part) + raise InvalidRequest( + f"Value for x-amz-checksum-{s3_part.checksum_algorithm.lower()} header is invalid." + ) + elif s3_part.checksum_value != stored_s3_part.checksum: + stored_multipart.remove_part(s3_part) + raise BadDigest( + f"The {checksum_algorithm.upper()} you specified did not match the calculated checksum." + ) + + if content_md5: + calculated_md5 = etag_to_base_64_content_md5(s3_part.etag) + if calculated_md5 != content_md5: + stored_multipart.remove_part(s3_part) + raise BadDigest( + "The Content-MD5 you specified did not match what we received.", + ExpectedDigest=content_md5, + CalculatedDigest=calculated_md5, + ) + + s3_multipart.parts[part_number] = s3_part + + response = UploadPartOutput( + ETag=s3_part.quoted_etag, + ) + + add_encryption_to_response(response, s3_object=s3_multipart.object) + if sse_c_key_md5: + response["SSECustomerAlgorithm"] = "AES256" + response["SSECustomerKeyMD5"] = sse_c_key_md5 + + if s3_part.checksum_algorithm: + response[f"Checksum{s3_part.checksum_algorithm.upper()}"] = s3_part.checksum_value + + # TODO: RequestCharged: Optional[RequestCharged] + return response + + @handler("UploadPartCopy", expand=False) + def upload_part_copy( + self, + context: RequestContext, + request: UploadPartCopyRequest, + ) -> UploadPartCopyOutput: + # TODO: handle following parameters: + # copy_source_if_match: CopySourceIfMatch = None, + # copy_source_if_modified_since: CopySourceIfModifiedSince = None, + # copy_source_if_none_match: CopySourceIfNoneMatch = None, + # copy_source_if_unmodified_since: CopySourceIfUnmodifiedSince = None, + # request_payer: RequestPayer = None, + dest_bucket = request["Bucket"] + dest_key = request["Key"] + store = self.get_store(context.account_id, context.region) + # TODO: validate cross-account UploadPartCopy + if not (dest_s3_bucket := store.buckets.get(dest_bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=dest_bucket) + + src_bucket, src_key, src_version_id = extract_bucket_key_version_id_from_copy_source( + request.get("CopySource") + ) + + if not (src_s3_bucket := store.buckets.get(src_bucket)): + raise NoSuchBucket("The specified bucket does not exist", BucketName=src_bucket) + + # if the object is a delete marker, get_object will raise NotFound if no versionId, like AWS + try: + src_s3_object = src_s3_bucket.get_object(key=src_key, version_id=src_version_id) + except MethodNotAllowed: + raise InvalidRequest( + "The source of a copy request may not specifically refer to a delete marker by version id." + ) + + if src_s3_object.storage_class in ARCHIVES_STORAGE_CLASSES and not src_s3_object.restore: + raise InvalidObjectState( + "Operation is not valid for the source object's storage class", + StorageClass=src_s3_object.storage_class, + ) + + upload_id = request.get("UploadId") + if ( + not (s3_multipart := dest_s3_bucket.multiparts.get(upload_id)) + or s3_multipart.object.key != dest_key + ): + raise NoSuchUpload( + "The specified upload does not exist. " + "The upload ID may be invalid, or the upload may have been aborted or completed.", + UploadId=upload_id, + ) + + elif (part_number := request.get("PartNumber", 0)) < 1 or part_number > 10000: + raise InvalidArgument( + "Part number must be an integer between 1 and 10000, inclusive", + ArgumentName="partNumber", + ArgumentValue=part_number, + ) + + source_range = request.get("CopySourceRange") + # TODO implement copy source IF (done in ASF provider) + + range_data: Optional[ObjectRange] = None + if source_range: + range_data = parse_copy_source_range_header(source_range, src_s3_object.size) + + s3_part = S3Part(part_number=part_number) + + stored_multipart = self._storage_backend.get_multipart(dest_bucket, s3_multipart) + stored_multipart.copy_from_object(s3_part, src_bucket, src_s3_object, range_data) + + s3_multipart.parts[part_number] = s3_part + + # TODO: return those fields (checksum not handled currently in moto for parts) + # ChecksumCRC32: Optional[ChecksumCRC32] + # ChecksumCRC32C: Optional[ChecksumCRC32C] + # ChecksumSHA1: Optional[ChecksumSHA1] + # ChecksumSHA256: Optional[ChecksumSHA256] + # RequestCharged: Optional[RequestCharged] + + result = CopyPartResult( + ETag=s3_part.quoted_etag, + LastModified=s3_part.last_modified, + ) + + response = UploadPartCopyOutput( + CopyPartResult=result, + ) + + if src_s3_bucket.versioning_status and src_s3_object.version_id: + response["CopySourceVersionId"] = src_s3_object.version_id + + add_encryption_to_response(response, s3_object=s3_multipart.object) + + return response + + def complete_multipart_upload( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + multipart_upload: CompletedMultipartUpload = None, + checksum_crc32: ChecksumCRC32 = None, + checksum_crc32_c: ChecksumCRC32C = None, + checksum_crc64_nvme: ChecksumCRC64NVME = None, + checksum_sha1: ChecksumSHA1 = None, + checksum_sha256: ChecksumSHA256 = None, + checksum_type: ChecksumType = None, + mpu_object_size: MpuObjectSize = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + if_match: IfMatch = None, + if_none_match: IfNoneMatch = None, + sse_customer_algorithm: SSECustomerAlgorithm = None, + sse_customer_key: SSECustomerKey = None, + sse_customer_key_md5: SSECustomerKeyMD5 = None, + **kwargs, + ) -> CompleteMultipartUploadOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if ( + not (s3_multipart := s3_bucket.multiparts.get(upload_id)) + or s3_multipart.object.key != key + ): + raise NoSuchUpload( + "The specified upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + UploadId=upload_id, + ) + + if if_none_match and if_match: + raise NotImplementedException( + "A header you provided implies functionality that is not implemented", + Header="If-Match,If-None-Match", + additionalMessage="Multiple conditional request headers present in the request", + ) + + elif if_none_match: + if if_none_match != "*": + raise NotImplementedException( + "A header you provided implies functionality that is not implemented", + Header="If-None-Match", + additionalMessage="We don't accept the provided value of If-None-Match header for this API", + ) + if object_exists_for_precondition_write(s3_bucket, key): + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition="If-None-Match", + ) + elif s3_multipart.precondition: + raise ConditionalRequestConflict( + "The conditional request cannot succeed due to a conflicting operation against this resource.", + Condition="If-None-Match", + Key=key, + ) + + elif if_match: + if if_match == "*": + raise NotImplementedException( + "A header you provided implies functionality that is not implemented", + Header="If-None-Match", + additionalMessage="We don't accept the provided value of If-None-Match header for this API", + ) + verify_object_equality_precondition_write( + s3_bucket, key, if_match, initiated=s3_multipart.initiated + ) + + parts = multipart_upload.get("Parts", []) + if not parts: + raise InvalidRequest("You must specify at least one part") + + parts_numbers = [part.get("PartNumber") for part in parts] + # TODO: it seems that with new S3 data integrity, sorting might not be mandatory depending on checksum type + # see https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # sorted is very fast (fastest) if the list is already sorted, which should be the case + if sorted(parts_numbers) != parts_numbers: + raise InvalidPartOrder( + "The list of parts was not in ascending order. Parts must be ordered by part number.", + UploadId=upload_id, + ) + + mpu_checksum_algorithm = s3_multipart.checksum_algorithm + mpu_checksum_type = getattr(s3_multipart, "checksum_type", None) + + if checksum_type and checksum_type != mpu_checksum_type: + raise InvalidRequest( + f"The upload was created using the {mpu_checksum_type or 'null'} checksum mode. " + f"The complete request must use the same checksum mode." + ) + + # generate the versionId before completing, in case the bucket versioning status has changed between + # creation and completion? AWS validate this + version_id = generate_version_id(s3_bucket.versioning_status) + s3_multipart.object.version_id = version_id + + # we're inspecting the signature of `complete_multipart`, in case the multipart has been restored from + # persistence. if we do not have a new version, do not validate those parameters + # TODO: remove for next major version (minor?) + if signature(s3_multipart.complete_multipart).parameters.get("mpu_size"): + checksum_algorithm = mpu_checksum_algorithm.lower() if mpu_checksum_algorithm else None + checksum_map = { + "crc32": checksum_crc32, + "crc32c": checksum_crc32_c, + "crc64nvme": checksum_crc64_nvme, + "sha1": checksum_sha1, + "sha256": checksum_sha256, + } + checksum_value = checksum_map.get(checksum_algorithm) + s3_multipart.complete_multipart( + parts, mpu_size=mpu_object_size, validation_checksum=checksum_value + ) + if mpu_checksum_algorithm and ( + ( + checksum_value + and mpu_checksum_type == ChecksumType.FULL_OBJECT + and not checksum_type + ) + or any( + checksum_value + for checksum_type, checksum_value in checksum_map.items() + if checksum_type != checksum_algorithm + ) + ): + # this is not ideal, but this validation comes last... after the validation of individual parts + s3_multipart.object.parts.clear() + raise BadDigest( + f"The {mpu_checksum_algorithm.lower()} you specified did not match the calculated checksum." + ) + else: + s3_multipart.complete_multipart(parts) + + stored_multipart = self._storage_backend.get_multipart(bucket, s3_multipart) + stored_multipart.complete_multipart( + [s3_multipart.parts.get(part_number) for part_number in parts_numbers] + ) + if not s3_multipart.checksum_algorithm and s3_multipart.object.checksum_algorithm: + with self._storage_backend.open( + bucket, s3_multipart.object, mode="r" + ) as s3_stored_object: + s3_multipart.object.checksum_value = s3_stored_object.checksum + s3_multipart.object.checksum_type = ChecksumType.FULL_OBJECT + + s3_object = s3_multipart.object + + s3_bucket.objects.set(key, s3_object) + + # remove the multipart now that it's complete + self._storage_backend.remove_multipart(bucket, s3_multipart) + s3_bucket.multiparts.pop(s3_multipart.id, None) + + key_id = get_unique_key_id(bucket, key, version_id) + store.TAGS.tags.pop(key_id, None) + if s3_multipart.tagging: + store.TAGS.tags[key_id] = s3_multipart.tagging + + # RequestCharged: Optional[RequestCharged] TODO + + response = CompleteMultipartUploadOutput( + Bucket=bucket, + Key=key, + ETag=s3_object.quoted_etag, + Location=f"{get_full_default_bucket_location(bucket)}{key}", + ) + + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + # it seems AWS is not returning checksum related fields if the object has KMS encryption ¯\_(ツ)_/¯ + # but it still generates them, and they can be retrieved with regular GetObject and such operations + if s3_object.checksum_algorithm and not s3_object.kms_key_id: + response[f"Checksum{s3_object.checksum_algorithm.upper()}"] = s3_object.checksum_value + response["ChecksumType"] = s3_object.checksum_type + + if s3_object.expiration: + response["Expiration"] = s3_object.expiration # TODO: properly parse the datetime + + add_encryption_to_response(response, s3_object=s3_object) + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + def abort_multipart_upload( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + if_match_initiated_time: IfMatchInitiatedTime = None, + **kwargs, + ) -> AbortMultipartUploadOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if ( + not (s3_multipart := s3_bucket.multiparts.get(upload_id)) + or s3_multipart.object.key != key + ): + raise NoSuchUpload( + "The specified upload does not exist. " + "The upload ID may be invalid, or the upload may have been aborted or completed.", + UploadId=upload_id, + ) + s3_bucket.multiparts.pop(upload_id, None) + + self._storage_backend.remove_multipart(bucket, s3_multipart) + response = AbortMultipartUploadOutput() + # TODO: requestCharged + return response + + def list_parts( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + upload_id: MultipartUploadId, + max_parts: MaxParts = None, + part_number_marker: PartNumberMarker = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + sse_customer_algorithm: SSECustomerAlgorithm = None, + sse_customer_key: SSECustomerKey = None, + sse_customer_key_md5: SSECustomerKeyMD5 = None, + **kwargs, + ) -> ListPartsOutput: + # TODO: implement MaxParts + # TODO: implements PartNumberMarker + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if ( + not (s3_multipart := s3_bucket.multiparts.get(upload_id)) + or s3_multipart.object.key != key + ): + raise NoSuchUpload( + "The specified upload does not exist. " + "The upload ID may be invalid, or the upload may have been aborted or completed.", + UploadId=upload_id, + ) + + # AbortDate: Optional[AbortDate] TODO: lifecycle + # AbortRuleId: Optional[AbortRuleId] TODO: lifecycle + # RequestCharged: Optional[RequestCharged] + + count = 0 + is_truncated = False + part_number_marker = part_number_marker or 0 + max_parts = max_parts or 1000 + + parts = [] + all_parts = sorted(s3_multipart.parts.items()) + last_part_number = all_parts[-1][0] if all_parts else None + for part_number, part in all_parts: + if part_number <= part_number_marker: + continue + part_item = Part( + ETag=part.quoted_etag, + LastModified=part.last_modified, + PartNumber=part_number, + Size=part.size, + ) + if s3_multipart.checksum_algorithm: + part_item[f"Checksum{part.checksum_algorithm.upper()}"] = part.checksum_value + + parts.append(part_item) + count += 1 + + if count >= max_parts and part.part_number != last_part_number: + is_truncated = True + break + + response = ListPartsOutput( + Bucket=bucket, + Key=key, + UploadId=upload_id, + Initiator=s3_multipart.initiator, + Owner=s3_multipart.initiator, + StorageClass=s3_multipart.object.storage_class, + IsTruncated=is_truncated, + MaxParts=max_parts, + PartNumberMarker=0, + NextPartNumberMarker=0, + ) + if parts: + response["Parts"] = parts + last_part = parts[-1]["PartNumber"] + response["NextPartNumberMarker"] = last_part + + if part_number_marker: + response["PartNumberMarker"] = part_number_marker + if s3_multipart.checksum_algorithm: + response["ChecksumAlgorithm"] = s3_multipart.object.checksum_algorithm + response["ChecksumType"] = getattr(s3_multipart, "checksum_type", None) + + return response + + def list_multipart_uploads( + self, + context: RequestContext, + bucket: BucketName, + delimiter: Delimiter = None, + encoding_type: EncodingType = None, + key_marker: KeyMarker = None, + max_uploads: MaxUploads = None, + prefix: Prefix = None, + upload_id_marker: UploadIdMarker = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + **kwargs, + ) -> ListMultipartUploadsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + common_prefixes = set() + count = 0 + is_truncated = False + max_uploads = max_uploads or 1000 + prefix = prefix or "" + delimiter = delimiter or "" + if encoding_type: + prefix = urlparse.quote(prefix) + delimiter = urlparse.quote(delimiter) + upload_id_marker_found = False + + if key_marker and upload_id_marker: + multipart = s3_bucket.multiparts.get(upload_id_marker) + if multipart: + key = ( + urlparse.quote(multipart.object.key) if encoding_type else multipart.object.key + ) + else: + # set key to None so it fails if the multipart is not Found + key = None + + if key_marker != key: + raise InvalidArgument( + "Invalid uploadId marker", + ArgumentName="upload-id-marker", + ArgumentValue=upload_id_marker, + ) + + uploads = [] + # sort by key and initiated + all_multiparts = sorted( + s3_bucket.multiparts.values(), key=lambda r: (r.object.key, r.initiated.timestamp()) + ) + last_multipart = all_multiparts[-1] if all_multiparts else None + + for multipart in all_multiparts: + key = urlparse.quote(multipart.object.key) if encoding_type else multipart.object.key + # skip all keys that are different than key_marker + if key_marker: + if key < key_marker: + continue + elif key == key_marker: + if not upload_id_marker: + continue + # as the keys are ordered by time, once we found the key marker, we can return the next one + if multipart.id == upload_id_marker: + upload_id_marker_found = True + continue + elif not upload_id_marker_found: + # as long as we have not passed the version_key_marker, skip the versions + continue + + # Filter for keys that start with prefix + if prefix and not key.startswith(prefix): + continue + + # see ListObjectsV2 for the logic comments (shared logic here) + prefix_including_delimiter = None + if delimiter and delimiter in (key_no_prefix := key.removeprefix(prefix)): + pre_delimiter, _, _ = key_no_prefix.partition(delimiter) + prefix_including_delimiter = f"{prefix}{pre_delimiter}{delimiter}" + + if prefix_including_delimiter in common_prefixes or ( + key_marker and key_marker.startswith(prefix_including_delimiter) + ): + continue + + if prefix_including_delimiter: + common_prefixes.add(prefix_including_delimiter) + else: + multipart_upload = MultipartUpload( + UploadId=multipart.id, + Key=multipart.object.key, + Initiated=multipart.initiated, + StorageClass=multipart.object.storage_class, + Owner=multipart.initiator, # TODO: check the difference + Initiator=multipart.initiator, + ) + if multipart.checksum_algorithm: + multipart_upload["ChecksumAlgorithm"] = multipart.checksum_algorithm + multipart_upload["ChecksumType"] = getattr(multipart, "checksum_type", None) + + uploads.append(multipart_upload) + + count += 1 + if count >= max_uploads and last_multipart.id != multipart.id: + is_truncated = True + break + + common_prefixes = [CommonPrefix(Prefix=prefix) for prefix in sorted(common_prefixes)] + + response = ListMultipartUploadsOutput( + Bucket=bucket, + IsTruncated=is_truncated, + MaxUploads=max_uploads or 1000, + KeyMarker=key_marker or "", + UploadIdMarker=upload_id_marker or "" if key_marker else "", + NextKeyMarker="", + NextUploadIdMarker="", + ) + if uploads: + response["Uploads"] = uploads + last_upload = uploads[-1] + response["NextKeyMarker"] = last_upload["Key"] + response["NextUploadIdMarker"] = last_upload["UploadId"] + if delimiter: + response["Delimiter"] = delimiter + if prefix: + response["Prefix"] = prefix + if encoding_type: + response["EncodingType"] = EncodingType.url + if common_prefixes: + response["CommonPrefixes"] = common_prefixes + + return response + + def put_bucket_versioning( + self, + context: RequestContext, + bucket: BucketName, + versioning_configuration: VersioningConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + mfa: MFA = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not (versioning_status := versioning_configuration.get("Status")): + raise CommonServiceException( + code="IllegalVersioningConfigurationException", + message="The Versioning element must be specified", + ) + + if versioning_status not in ("Enabled", "Suspended"): + raise MalformedXML() + + if s3_bucket.object_lock_enabled and versioning_status == "Suspended": + raise InvalidBucketState( + "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed." + ) + + if not s3_bucket.versioning_status: + s3_bucket.objects = VersionedKeyStore.from_key_store(s3_bucket.objects) + + s3_bucket.versioning_status = versioning_status + + def get_bucket_versioning( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketVersioningOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.versioning_status: + return GetBucketVersioningOutput() + + return GetBucketVersioningOutput(Status=s3_bucket.versioning_status) + + def get_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketEncryptionOutput: + # AWS now encrypts bucket by default with AES256, see: + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-bucket-encryption.html + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.encryption_rule: + return GetBucketEncryptionOutput() + + return GetBucketEncryptionOutput( + ServerSideEncryptionConfiguration={"Rules": [s3_bucket.encryption_rule]} + ) + + def put_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + server_side_encryption_configuration: ServerSideEncryptionConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (rules := server_side_encryption_configuration.get("Rules")): + raise MalformedXML() + + if len(rules) != 1 or not ( + encryption := rules[0].get("ApplyServerSideEncryptionByDefault") + ): + raise MalformedXML() + + if not (sse_algorithm := encryption.get("SSEAlgorithm")): + raise MalformedXML() + + if sse_algorithm not in SSE_ALGORITHMS: + raise MalformedXML() + + if sse_algorithm != ServerSideEncryption.aws_kms and "KMSMasterKeyID" in encryption: + raise InvalidArgument( + "a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms or aws:kms:dsse", + ArgumentName="ApplyServerSideEncryptionByDefault", + ) + # elif master_kms_key := encryption.get("KMSMasterKeyID"): + # TODO: validate KMS key? not currently done in moto + # You can pass either the KeyId or the KeyArn. If cross-account, it has to be the ARN. + # It's always saved as the ARN in the bucket configuration. + # kms_key_arn = get_kms_key_arn(master_kms_key, s3_bucket.bucket_account_id) + # encryption["KMSMasterKeyID"] = master_kms_key + + s3_bucket.encryption_rule = rules[0] + + def delete_bucket_encryption( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_bucket.encryption_rule = None + + def put_bucket_notification_configuration( + self, + context: RequestContext, + bucket: BucketName, + notification_configuration: NotificationConfiguration, + expected_bucket_owner: AccountId = None, + skip_destination_validation: SkipValidation = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + self._verify_notification_configuration( + notification_configuration, skip_destination_validation, context, bucket + ) + s3_bucket.notification_configuration = notification_configuration + + def get_bucket_notification_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> NotificationConfiguration: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return s3_bucket.notification_configuration or NotificationConfiguration() + + def put_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + tagging: Tagging, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if "TagSet" not in tagging: + raise MalformedXML() + + validate_tag_set(tagging["TagSet"], type_set="bucket") + + # remove the previous tags before setting the new ones, it overwrites the whole TagSet + store.TAGS.tags.pop(s3_bucket.bucket_arn, None) + store.TAGS.tag_resource(s3_bucket.bucket_arn, tags=tagging["TagSet"]) + + def get_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketTaggingOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + tag_set = store.TAGS.list_tags_for_resource(s3_bucket.bucket_arn, root_name="Tags")["Tags"] + if not tag_set: + raise NoSuchTagSet( + "The TagSet does not exist", + BucketName=bucket, + ) + + return GetBucketTaggingOutput(TagSet=tag_set) + + def delete_bucket_tagging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + store.TAGS.tags.pop(s3_bucket.bucket_arn, None) + + def put_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + tagging: Tagging, + version_id: ObjectVersionId = None, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + **kwargs, + ) -> PutObjectTaggingOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_object = s3_bucket.get_object(key=key, version_id=version_id, http_method="PUT") + + if "TagSet" not in tagging: + raise MalformedXML() + + validate_tag_set(tagging["TagSet"], type_set="object") + + key_id = get_unique_key_id(bucket, key, s3_object.version_id) + # remove the previous tags before setting the new ones, it overwrites the whole TagSet + store.TAGS.tags.pop(key_id, None) + store.TAGS.tag_resource(key_id, tags=tagging["TagSet"]) + response = PutObjectTaggingOutput() + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + def get_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + **kwargs, + ) -> GetObjectTaggingOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + try: + s3_object = s3_bucket.get_object(key=key, version_id=version_id) + except NoSuchKey as e: + # it seems GetObjectTagging does not work like all other operations, so we need to raise a different + # exception. As we already need to catch it because of the format of the Key, it is not worth to modify the + # `S3Bucket.get_object` signature for one operation. + if s3_bucket.versioning_status and ( + s3_object_version := s3_bucket.objects.get(key, version_id) + ): + raise MethodNotAllowed( + "The specified method is not allowed against this resource.", + Method="GET", + ResourceType="DeleteMarker", + DeleteMarker=True, + Allow="DELETE", + VersionId=s3_object_version.version_id, + ) + + # There a weird AWS validated bug in S3: the returned key contains the bucket name as well + # follow AWS on this one + e.Key = f"{bucket}/{key}" + raise e + + tag_set = store.TAGS.list_tags_for_resource( + get_unique_key_id(bucket, key, s3_object.version_id) + )["Tags"] + response = GetObjectTaggingOutput(TagSet=tag_set) + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + return response + + def delete_object_tagging( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> DeleteObjectTaggingOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_object = s3_bucket.get_object(key=key, version_id=version_id, http_method="DELETE") + + store.TAGS.tags.pop(get_unique_key_id(bucket, key, version_id), None) + response = DeleteObjectTaggingOutput() + if s3_object.version_id: + response["VersionId"] = s3_object.version_id + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + return response + + def put_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + cors_configuration: CORSConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + validate_cors_configuration(cors_configuration) + s3_bucket.cors_rules = cors_configuration + self._cors_handler.invalidate_cache() + + def get_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketCorsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.cors_rules: + raise NoSuchCORSConfiguration( + "The CORS configuration does not exist", + BucketName=bucket, + ) + return GetBucketCorsOutput(CORSRules=s3_bucket.cors_rules["CORSRules"]) + + def delete_bucket_cors( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if s3_bucket.cors_rules: + self._cors_handler.invalidate_cache() + s3_bucket.cors_rules = None + + def get_bucket_lifecycle_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketLifecycleConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.lifecycle_rules: + raise NoSuchLifecycleConfiguration( + "The lifecycle configuration does not exist", + BucketName=bucket, + ) + + return GetBucketLifecycleConfigurationOutput( + Rules=s3_bucket.lifecycle_rules, + # TODO: remove for next major version, safe access to new attribute + TransitionDefaultMinimumObjectSize=getattr( + s3_bucket, + "transition_default_minimum_object_size", + TransitionDefaultMinimumObjectSize.all_storage_classes_128K, + ), + ) + + def put_bucket_lifecycle_configuration( + self, + context: RequestContext, + bucket: BucketName, + checksum_algorithm: ChecksumAlgorithm = None, + lifecycle_configuration: BucketLifecycleConfiguration = None, + expected_bucket_owner: AccountId = None, + transition_default_minimum_object_size: TransitionDefaultMinimumObjectSize = None, + **kwargs, + ) -> PutBucketLifecycleConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + transition_min_obj_size = ( + transition_default_minimum_object_size + or TransitionDefaultMinimumObjectSize.all_storage_classes_128K + ) + + if transition_min_obj_size not in ( + TransitionDefaultMinimumObjectSize.all_storage_classes_128K, + TransitionDefaultMinimumObjectSize.varies_by_storage_class, + ): + raise InvalidRequest( + f"Invalid TransitionDefaultMinimumObjectSize found: {transition_min_obj_size}" + ) + + validate_lifecycle_configuration(lifecycle_configuration) + # TODO: we either apply the lifecycle to existing objects when we set the new rules, or we need to apply them + # everytime we get/head an object + # for now, we keep a cache and get it everytime we fetch an object + s3_bucket.lifecycle_rules = lifecycle_configuration["Rules"] + s3_bucket.transition_default_minimum_object_size = transition_min_obj_size + self._expiration_cache[bucket].clear() + return PutBucketLifecycleConfigurationOutput( + TransitionDefaultMinimumObjectSize=transition_min_obj_size + ) + + def delete_bucket_lifecycle( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_bucket.lifecycle_rules = None + self._expiration_cache[bucket].clear() + + def put_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + analytics_configuration: AnalyticsConfiguration, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + validate_bucket_analytics_configuration( + id=id, analytics_configuration=analytics_configuration + ) + + s3_bucket.analytics_configurations[id] = analytics_configuration + + def get_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketAnalyticsConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (analytic_config := s3_bucket.analytics_configurations.get(id)): + raise NoSuchConfiguration("The specified configuration does not exist.") + + return GetBucketAnalyticsConfigurationOutput(AnalyticsConfiguration=analytic_config) + + def list_bucket_analytics_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> ListBucketAnalyticsConfigurationsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return ListBucketAnalyticsConfigurationsOutput( + IsTruncated=False, + AnalyticsConfigurationList=sorted( + s3_bucket.analytics_configurations.values(), + key=itemgetter("Id"), + ), + ) + + def delete_bucket_analytics_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: AnalyticsId, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.analytics_configurations.pop(id, None): + raise NoSuchConfiguration("The specified configuration does not exist.") + + def put_bucket_intelligent_tiering_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: IntelligentTieringId, + intelligent_tiering_configuration: IntelligentTieringConfiguration, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + validate_bucket_intelligent_tiering_configuration(id, intelligent_tiering_configuration) + + s3_bucket.intelligent_tiering_configurations[id] = intelligent_tiering_configuration + + def get_bucket_intelligent_tiering_configuration( + self, context: RequestContext, bucket: BucketName, id: IntelligentTieringId, **kwargs + ) -> GetBucketIntelligentTieringConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (itier_config := s3_bucket.intelligent_tiering_configurations.get(id)): + raise NoSuchConfiguration("The specified configuration does not exist.") + + return GetBucketIntelligentTieringConfigurationOutput( + IntelligentTieringConfiguration=itier_config + ) + + def delete_bucket_intelligent_tiering_configuration( + self, context: RequestContext, bucket: BucketName, id: IntelligentTieringId, **kwargs + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.intelligent_tiering_configurations.pop(id, None): + raise NoSuchConfiguration("The specified configuration does not exist.") + + def list_bucket_intelligent_tiering_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token = None, + **kwargs, + ) -> ListBucketIntelligentTieringConfigurationsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return ListBucketIntelligentTieringConfigurationsOutput( + IsTruncated=False, + IntelligentTieringConfigurationList=sorted( + s3_bucket.intelligent_tiering_configurations.values(), + key=itemgetter("Id"), + ), + ) + + def put_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + inventory_configuration: InventoryConfiguration, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + validate_inventory_configuration( + config_id=id, inventory_configuration=inventory_configuration + ) + s3_bucket.inventory_configurations[id] = inventory_configuration + + def get_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketInventoryConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (inv_config := s3_bucket.inventory_configurations.get(id)): + raise NoSuchConfiguration("The specified configuration does not exist.") + return GetBucketInventoryConfigurationOutput(InventoryConfiguration=inv_config) + + def list_bucket_inventory_configurations( + self, + context: RequestContext, + bucket: BucketName, + continuation_token: Token = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> ListBucketInventoryConfigurationsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return ListBucketInventoryConfigurationsOutput( + IsTruncated=False, + InventoryConfigurationList=sorted( + s3_bucket.inventory_configurations.values(), key=itemgetter("Id") + ), + ) + + def delete_bucket_inventory_configuration( + self, + context: RequestContext, + bucket: BucketName, + id: InventoryId, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.inventory_configurations.pop(id, None): + raise NoSuchConfiguration("The specified configuration does not exist.") + + def get_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketWebsiteOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.website_configuration: + raise NoSuchWebsiteConfiguration( + "The specified bucket does not have a website configuration", + BucketName=bucket, + ) + return s3_bucket.website_configuration + + def put_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + website_configuration: WebsiteConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + validate_website_configuration(website_configuration) + s3_bucket.website_configuration = website_configuration + + def delete_bucket_website( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + # does not raise error if the bucket did not have a config, will simply return + s3_bucket.website_configuration = None + + def get_object_lock_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetObjectLockConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not s3_bucket.object_lock_enabled: + raise ObjectLockConfigurationNotFoundError( + "Object Lock configuration does not exist for this bucket", + BucketName=bucket, + ) + + response = GetObjectLockConfigurationOutput( + ObjectLockConfiguration=ObjectLockConfiguration( + ObjectLockEnabled=ObjectLockEnabled.Enabled + ) + ) + if s3_bucket.object_lock_default_retention: + response["ObjectLockConfiguration"]["Rule"] = { + "DefaultRetention": s3_bucket.object_lock_default_retention + } + + return response + + def put_object_lock_configuration( + self, + context: RequestContext, + bucket: BucketName, + object_lock_configuration: ObjectLockConfiguration = None, + request_payer: RequestPayer = None, + token: ObjectLockToken = None, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> PutObjectLockConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if s3_bucket.versioning_status != "Enabled": + raise InvalidBucketState( + "Versioning must be 'Enabled' on the bucket to apply a Object Lock configuration" + ) + + if ( + not object_lock_configuration + or object_lock_configuration.get("ObjectLockEnabled") != "Enabled" + ): + raise MalformedXML() + + if "Rule" not in object_lock_configuration: + s3_bucket.object_lock_default_retention = None + if not s3_bucket.object_lock_enabled: + s3_bucket.object_lock_enabled = True + + return PutObjectLockConfigurationOutput() + elif not (rule := object_lock_configuration["Rule"]) or not ( + default_retention := rule.get("DefaultRetention") + ): + raise MalformedXML() + + if "Mode" not in default_retention or ( + ("Days" in default_retention and "Years" in default_retention) + or ("Days" not in default_retention and "Years" not in default_retention) + ): + raise MalformedXML() + + s3_bucket.object_lock_default_retention = default_retention + if not s3_bucket.object_lock_enabled: + s3_bucket.object_lock_enabled = True + + return PutObjectLockConfigurationOutput() + + def get_object_legal_hold( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetObjectLegalHoldOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not s3_bucket.object_lock_enabled: + raise InvalidRequest("Bucket is missing Object Lock Configuration") + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + http_method="GET", + ) + if not s3_object.lock_legal_status: + raise NoSuchObjectLockConfiguration( + "The specified object does not have a ObjectLock configuration" + ) + + return GetObjectLegalHoldOutput( + LegalHold=ObjectLockLegalHold(Status=s3_object.lock_legal_status) + ) + + def put_object_legal_hold( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + legal_hold: ObjectLockLegalHold = None, + request_payer: RequestPayer = None, + version_id: ObjectVersionId = None, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> PutObjectLegalHoldOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not legal_hold: + raise MalformedXML() + + if not s3_bucket.object_lock_enabled: + raise InvalidRequest("Bucket is missing Object Lock Configuration") + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + http_method="PUT", + ) + # TODO: check casing + if not (status := legal_hold.get("Status")) or status not in ("ON", "OFF"): + raise MalformedXML() + + s3_object.lock_legal_status = status + + # TODO: return RequestCharged + return PutObjectRetentionOutput() + + def get_object_retention( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetObjectRetentionOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not s3_bucket.object_lock_enabled: + raise InvalidRequest("Bucket is missing Object Lock Configuration") + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + http_method="GET", + ) + if not s3_object.lock_mode: + raise NoSuchObjectLockConfiguration( + "The specified object does not have a ObjectLock configuration" + ) + + return GetObjectRetentionOutput( + Retention=ObjectLockRetention( + Mode=s3_object.lock_mode, + RetainUntilDate=s3_object.lock_until, + ) + ) + + def put_object_retention( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + retention: ObjectLockRetention = None, + request_payer: RequestPayer = None, + version_id: ObjectVersionId = None, + bypass_governance_retention: BypassGovernanceRetention = None, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> PutObjectRetentionOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not s3_bucket.object_lock_enabled: + raise InvalidRequest("Bucket is missing Object Lock Configuration") + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + http_method="PUT", + ) + + if retention and not validate_dict_fields( + retention, required_fields={"Mode", "RetainUntilDate"} + ): + raise MalformedXML() + + if retention and retention["RetainUntilDate"] < datetime.datetime.now(datetime.UTC): + # weirdly, this date is format as following: Tue Dec 31 16:00:00 PST 2019 + # it contains the timezone as PST, even if you target a bucket in Europe or Asia + pst_datetime = retention["RetainUntilDate"].astimezone(tz=ZoneInfo("US/Pacific")) + raise InvalidArgument( + "The retain until date must be in the future!", + ArgumentName="RetainUntilDate", + ArgumentValue=pst_datetime.strftime("%a %b %d %H:%M:%S %Z %Y"), + ) + + if ( + not retention + or (s3_object.lock_until and s3_object.lock_until > retention["RetainUntilDate"]) + ) and not ( + bypass_governance_retention and s3_object.lock_mode == ObjectLockMode.GOVERNANCE + ): + raise AccessDenied("Access Denied because object protected by object lock.") + + s3_object.lock_mode = retention["Mode"] if retention else None + s3_object.lock_until = retention["RetainUntilDate"] if retention else None + + # TODO: return RequestCharged + return PutObjectRetentionOutput() + + def put_bucket_request_payment( + self, + context: RequestContext, + bucket: BucketName, + request_payment_configuration: RequestPaymentConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + # TODO: this currently only mock the operation, but its actual effect is not emulated + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + payer = request_payment_configuration.get("Payer") + if payer not in ["Requester", "BucketOwner"]: + raise MalformedXML() + + s3_bucket.payer = payer + + def get_bucket_request_payment( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketRequestPaymentOutput: + # TODO: this currently only mock the operation, but its actual effect is not emulated + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return GetBucketRequestPaymentOutput(Payer=s3_bucket.payer) + + def get_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketOwnershipControlsOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.object_ownership: + raise OwnershipControlsNotFoundError( + "The bucket ownership controls were not found", + BucketName=bucket, + ) + + return GetBucketOwnershipControlsOutput( + OwnershipControls={"Rules": [{"ObjectOwnership": s3_bucket.object_ownership}]} + ) + + def put_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + ownership_controls: OwnershipControls, + content_md5: ContentMD5 | None = None, + expected_bucket_owner: AccountId | None = None, + checksum_algorithm: ChecksumAlgorithm | None = None, + **kwargs, + ) -> None: + # TODO: this currently only mock the operation, but its actual effect is not emulated + # it for example almost forbid ACL usage when set to BucketOwnerEnforced + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (rules := ownership_controls.get("Rules")) or len(rules) > 1: + raise MalformedXML() + + rule = rules[0] + if (object_ownership := rule.get("ObjectOwnership")) not in OBJECT_OWNERSHIPS: + raise MalformedXML() + + s3_bucket.object_ownership = object_ownership + + def delete_bucket_ownership_controls( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_bucket.object_ownership = None + + def get_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetPublicAccessBlockOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.public_access_block: + raise NoSuchPublicAccessBlockConfiguration( + "The public access block configuration was not found", BucketName=bucket + ) + + return GetPublicAccessBlockOutput( + PublicAccessBlockConfiguration=s3_bucket.public_access_block + ) + + def put_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + public_access_block_configuration: PublicAccessBlockConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + # TODO: this currently only mock the operation, but its actual effect is not emulated + # as we do not enforce ACL directly. Also, this should take the most restrictive between S3Control and the + # bucket configuration. See s3control + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + public_access_block_fields = { + "BlockPublicAcls", + "BlockPublicPolicy", + "IgnorePublicAcls", + "RestrictPublicBuckets", + } + if not validate_dict_fields( + public_access_block_configuration, + required_fields=set(), + optional_fields=public_access_block_fields, + ): + raise MalformedXML() + + for field in public_access_block_fields: + if public_access_block_configuration.get(field) is None: + public_access_block_configuration[field] = False + + s3_bucket.public_access_block = public_access_block_configuration + + def delete_public_access_block( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_bucket.public_access_block = None + + def get_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketPolicyOutput: + store, s3_bucket = self._get_cross_account_bucket( + context, bucket, expected_bucket_owner=expected_bucket_owner + ) + if not s3_bucket.policy: + raise NoSuchBucketPolicy( + "The bucket policy does not exist", + BucketName=bucket, + ) + return GetBucketPolicyOutput(Policy=s3_bucket.policy) + + def put_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + policy: Policy, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + confirm_remove_self_bucket_access: ConfirmRemoveSelfBucketAccess = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket( + context, bucket, expected_bucket_owner=expected_bucket_owner + ) + + if not policy or policy[0] != "{": + raise MalformedPolicy("Policies must be valid JSON and the first byte must be '{'") + try: + json_policy = json.loads(policy) + if not json_policy: + # TODO: add more validation around the policy? + raise MalformedPolicy("Missing required field Statement") + except ValueError: + raise MalformedPolicy("Policies must be valid JSON and the first byte must be '{'") + + s3_bucket.policy = policy + + def delete_bucket_policy( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket( + context, bucket, expected_bucket_owner=expected_bucket_owner + ) + + s3_bucket.policy = None + + def get_bucket_accelerate_configuration( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + request_payer: RequestPayer = None, + **kwargs, + ) -> GetBucketAccelerateConfigurationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + response = GetBucketAccelerateConfigurationOutput() + if s3_bucket.accelerate_status: + response["Status"] = s3_bucket.accelerate_status + + return response + + def put_bucket_accelerate_configuration( + self, + context: RequestContext, + bucket: BucketName, + accelerate_configuration: AccelerateConfiguration, + expected_bucket_owner: AccountId = None, + checksum_algorithm: ChecksumAlgorithm = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if "." in bucket: + raise InvalidRequest( + "S3 Transfer Acceleration is not supported for buckets with periods (.) in their names" + ) + + if not (status := accelerate_configuration.get("Status")) or status not in ( + "Enabled", + "Suspended", + ): + raise MalformedXML() + + s3_bucket.accelerate_status = status + + def put_bucket_logging( + self, + context: RequestContext, + bucket: BucketName, + bucket_logging_status: BucketLoggingStatus, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not (logging_config := bucket_logging_status.get("LoggingEnabled")): + s3_bucket.logging = {} + return + + # the target bucket must be in the same account + if not (target_bucket_name := logging_config.get("TargetBucket")): + raise MalformedXML() + + if not logging_config.get("TargetPrefix"): + logging_config["TargetPrefix"] = "" + + # TODO: validate Grants + + if not (target_s3_bucket := store.buckets.get(target_bucket_name)): + raise InvalidTargetBucketForLogging( + "The target bucket for logging does not exist", + TargetBucket=target_bucket_name, + ) + + source_bucket_region = s3_bucket.bucket_region + if target_s3_bucket.bucket_region != source_bucket_region: + raise ( + CrossLocationLoggingProhibitted( + "Cross S3 location logging not allowed. ", + TargetBucketLocation=target_s3_bucket.bucket_region, + ) + if source_bucket_region == AWS_REGION_US_EAST_1 + else CrossLocationLoggingProhibitted( + "Cross S3 location logging not allowed. ", + SourceBucketLocation=source_bucket_region, + TargetBucketLocation=target_s3_bucket.bucket_region, + ) + ) + + s3_bucket.logging = logging_config + + def get_bucket_logging( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketLoggingOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.logging: + return GetBucketLoggingOutput() + + return GetBucketLoggingOutput(LoggingEnabled=s3_bucket.logging) + + def put_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + replication_configuration: ReplicationConfiguration, + content_md5: ContentMD5 = None, + checksum_algorithm: ChecksumAlgorithm = None, + token: ObjectLockToken = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + if not s3_bucket.versioning_status == BucketVersioningStatus.Enabled: + raise InvalidRequest( + "Versioning must be 'Enabled' on the bucket to apply a replication configuration" + ) + + if not (rules := replication_configuration.get("Rules")): + raise MalformedXML() + + for rule in rules: + if "ID" not in rule: + rule["ID"] = short_uid() + + dest_bucket_arn = rule.get("Destination", {}).get("Bucket") + dest_bucket_name = s3_bucket_name(dest_bucket_arn) + if ( + not (dest_s3_bucket := store.buckets.get(dest_bucket_name)) + or not dest_s3_bucket.versioning_status == BucketVersioningStatus.Enabled + ): + # according to AWS testing the same exception is raised if the bucket does not exist + # or if versioning was disabled + raise InvalidRequest("Destination bucket must have versioning enabled.") + + # TODO more validation on input + s3_bucket.replication = replication_configuration + + def get_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketReplicationOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + if not s3_bucket.replication: + raise ReplicationConfigurationNotFoundError( + "The replication configuration was not found", + BucketName=bucket, + ) + + return GetBucketReplicationOutput(ReplicationConfiguration=s3_bucket.replication) + + def delete_bucket_replication( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> None: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_bucket.replication = None + + @handler("PutBucketAcl", expand=False) + def put_bucket_acl( + self, + context: RequestContext, + request: PutBucketAclRequest, + ) -> None: + bucket = request["Bucket"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + acp = get_access_control_policy_from_acl_request( + request=request, owner=s3_bucket.owner, request_body=context.request.data + ) + s3_bucket.acl = acp + + def get_bucket_acl( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketAclOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + return GetBucketAclOutput(Owner=s3_bucket.acl["Owner"], Grants=s3_bucket.acl["Grants"]) + + @handler("PutObjectAcl", expand=False) + def put_object_acl( + self, + context: RequestContext, + request: PutObjectAclRequest, + ) -> PutObjectAclOutput: + bucket = request["Bucket"] + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_object = s3_bucket.get_object( + key=request["Key"], + version_id=request.get("VersionId"), + http_method="PUT", + ) + acp = get_access_control_policy_from_acl_request( + request=request, owner=s3_object.owner, request_body=context.request.data + ) + previous_acl = s3_object.acl + s3_object.acl = acp + + if previous_acl != acp: + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + # TODO: RequestCharged + return PutObjectAclOutput() + + def get_object_acl( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + version_id: ObjectVersionId = None, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetObjectAclOutput: + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + s3_object = s3_bucket.get_object( + key=key, + version_id=version_id, + ) + # TODO: RequestCharged + return GetObjectAclOutput(Owner=s3_object.acl["Owner"], Grants=s3_object.acl["Grants"]) + + def get_bucket_policy_status( + self, + context: RequestContext, + bucket: BucketName, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetBucketPolicyStatusOutput: + raise NotImplementedError + + def get_object_torrent( + self, + context: RequestContext, + bucket: BucketName, + key: ObjectKey, + request_payer: RequestPayer = None, + expected_bucket_owner: AccountId = None, + **kwargs, + ) -> GetObjectTorrentOutput: + raise NotImplementedError + + def post_object( + self, context: RequestContext, bucket: BucketName, body: IO[Body] = None, **kwargs + ) -> PostResponse: + if "multipart/form-data" not in context.request.headers.get("Content-Type", ""): + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition="Bucket POST must be of the enclosure-type multipart/form-data", + ) + # see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html + # TODO: signature validation is not implemented for pre-signed POST + # policy validation is not implemented either, except expiration and mandatory fields + # This operation is the only one using form for storing the request data. We will have to do some manual + # parsing here, as no specs are present for this, as no client directly implements this operation. + store, s3_bucket = self._get_cross_account_bucket(context, bucket) + + form = context.request.form + object_key = context.request.form.get("key") + + if "file" in form: + # in AWS, you can pass the file content as a string in the form field and not as a file object + file_data = to_bytes(form["file"]) + object_content_length = len(file_data) + stream = BytesIO(file_data) + else: + # this is the default behaviour + fileobj = context.request.files["file"] + stream = fileobj.stream + # stream is a SpooledTemporaryFile, so we can seek the stream to know its length, necessary for policy + # validation + original_pos = stream.tell() + object_content_length = stream.seek(0, 2) + # reset the stream and put it back at its original position + stream.seek(original_pos, 0) + + if "${filename}" in object_key: + # TODO: ${filename} is actually usable in all form fields + # See https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/PresignedPost.html + # > The string ${filename} is automatically replaced with the name of the file provided by the user and + # is recognized by all form fields. + object_key = object_key.replace("${filename}", fileobj.filename) + + # TODO: see if we need to pass additional metadata not contained in the policy from the table under + # https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions + additional_policy_metadata = { + "bucket": bucket, + "content_length": object_content_length, + } + validate_post_policy(form, additional_policy_metadata) + + if canned_acl := form.get("acl"): + validate_canned_acl(canned_acl) + acp = get_canned_acl(canned_acl, owner=s3_bucket.owner) + else: + acp = get_canned_acl(BucketCannedACL.private, owner=s3_bucket.owner) + + post_system_settable_headers = [ + "Cache-Control", + "Content-Type", + "Content-Disposition", + "Content-Encoding", + ] + system_metadata = {} + for system_metadata_field in post_system_settable_headers: + if field_value := form.get(system_metadata_field): + system_metadata[system_metadata_field.replace("-", "")] = field_value + + if not system_metadata.get("ContentType"): + system_metadata["ContentType"] = "binary/octet-stream" + + user_metadata = { + field.removeprefix("x-amz-meta-").lower(): form.get(field) + for field in form + if field.startswith("x-amz-meta-") + } + + if tagging := form.get("tagging"): + # this is weird, as it's direct XML in the form, we need to parse it directly + tagging = parse_post_object_tagging_xml(tagging) + + if (storage_class := form.get("x-amz-storage-class")) is not None and ( + storage_class not in STORAGE_CLASSES or storage_class == StorageClass.OUTPOSTS + ): + raise InvalidStorageClass( + "The storage class you specified is not valid", StorageClassRequested=storage_class + ) + + encryption_request = { + "ServerSideEncryption": form.get("x-amz-server-side-encryption"), + "SSEKMSKeyId": form.get("x-amz-server-side-encryption-aws-kms-key-id"), + "BucketKeyEnabled": form.get("x-amz-server-side-encryption-bucket-key-enabled"), + } + + encryption_parameters = get_encryption_parameters_from_request_and_bucket( + encryption_request, + s3_bucket, + store, + ) + + checksum_algorithm = form.get("x-amz-checksum-algorithm") + checksum_value = ( + form.get(f"x-amz-checksum-{checksum_algorithm.lower()}") if checksum_algorithm else None + ) + expires = ( + str_to_rfc_1123_datetime(expires_str) if (expires_str := form.get("Expires")) else None + ) + + version_id = generate_version_id(s3_bucket.versioning_status) + + s3_object = S3Object( + key=object_key, + version_id=version_id, + storage_class=storage_class, + expires=expires, + user_metadata=user_metadata, + system_metadata=system_metadata, + checksum_algorithm=checksum_algorithm, + checksum_value=checksum_value, + encryption=encryption_parameters.encryption, + kms_key_id=encryption_parameters.kms_key_id, + bucket_key_enabled=encryption_parameters.bucket_key_enabled, + website_redirect_location=form.get("x-amz-website-redirect-location"), + acl=acp, + owner=s3_bucket.owner, # TODO: for now we only have one owner, but it can depends on Bucket settings + ) + + with self._storage_backend.open(bucket, s3_object, mode="w") as s3_stored_object: + s3_stored_object.write(stream) + + if not s3_object.checksum_value: + s3_object.checksum_value = s3_stored_object.checksum + + elif checksum_algorithm and s3_object.checksum_value != s3_stored_object.checksum: + self._storage_backend.remove(bucket, s3_object) + raise InvalidRequest( + f"Value for x-amz-checksum-{checksum_algorithm.lower()} header is invalid." + ) + + s3_bucket.objects.set(object_key, s3_object) + + # in case we are overriding an object, delete the tags entry + key_id = get_unique_key_id(bucket, object_key, version_id) + store.TAGS.tags.pop(key_id, None) + if tagging: + store.TAGS.tags[key_id] = tagging + + response = PostResponse() + # hacky way to set the etag in the headers as well: two locations for one value + response["ETagHeader"] = s3_object.quoted_etag + + if redirect := form.get("success_action_redirect"): + # we need to create the redirect, as the parser could not return the moto-calculated one + try: + redirect = create_redirect_for_post_request( + base_redirect=redirect, + bucket=bucket, + object_key=object_key, + etag=s3_object.quoted_etag, + ) + response["LocationHeader"] = redirect + response["StatusCode"] = 303 + except ValueError: + # If S3 cannot interpret the URL, it acts as if the field is not present. + response["StatusCode"] = form.get("success_action_status", 204) + + elif status_code := form.get("success_action_status"): + response["StatusCode"] = status_code + else: + response["StatusCode"] = 204 + + response["LocationHeader"] = response.get( + "LocationHeader", f"{get_full_default_bucket_location(bucket)}{object_key}" + ) + + if s3_bucket.versioning_status == "Enabled": + response["VersionId"] = s3_object.version_id + + if s3_object.checksum_algorithm: + response[f"Checksum{s3_object.checksum_algorithm.upper()}"] = s3_object.checksum_value + response["ChecksumType"] = ChecksumType.FULL_OBJECT + + if s3_bucket.lifecycle_rules: + if expiration_header := self._get_expiration_header( + s3_bucket.lifecycle_rules, + bucket, + s3_object, + store.TAGS.tags.get(key_id, {}), + ): + # TODO: we either apply the lifecycle to existing objects when we set the new rules, or we need to + # apply them everytime we get/head an object + response["Expiration"] = expiration_header + + add_encryption_to_response(response, s3_object=s3_object) + + self._notify(context, s3_bucket=s3_bucket, s3_object=s3_object) + + if response["StatusCode"] == "201": + # if the StatusCode is 201, S3 returns an XML body with additional information + response["ETag"] = s3_object.quoted_etag + response["Bucket"] = bucket + response["Key"] = object_key + response["Location"] = response["LocationHeader"] + + return response + + +def generate_version_id(bucket_versioning_status: str) -> str | None: + if not bucket_versioning_status: + return None + elif bucket_versioning_status.lower() == "enabled": + return generate_safe_version_id() + else: + return "null" + + +def add_encryption_to_response(response: dict, s3_object: S3Object): + if encryption := s3_object.encryption: + response["ServerSideEncryption"] = encryption + if encryption == ServerSideEncryption.aws_kms: + response["SSEKMSKeyId"] = s3_object.kms_key_id + if s3_object.bucket_key_enabled: + response["BucketKeyEnabled"] = s3_object.bucket_key_enabled + + +def get_encryption_parameters_from_request_and_bucket( + request: PutObjectRequest | CopyObjectRequest | CreateMultipartUploadRequest, + s3_bucket: S3Bucket, + store: S3Store, +) -> EncryptionParameters: + if request.get("SSECustomerKey"): + # we return early, because ServerSideEncryption does not apply if the request has SSE-C + return EncryptionParameters(None, None, False) + + encryption = request.get("ServerSideEncryption") + kms_key_id = request.get("SSEKMSKeyId") + bucket_key_enabled = request.get("BucketKeyEnabled") + if s3_bucket.encryption_rule: + bucket_key_enabled = bucket_key_enabled or s3_bucket.encryption_rule.get("BucketKeyEnabled") + encryption = ( + encryption + or s3_bucket.encryption_rule["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] + ) + if encryption == ServerSideEncryption.aws_kms: + key_id = kms_key_id or s3_bucket.encryption_rule[ + "ApplyServerSideEncryptionByDefault" + ].get("KMSMasterKeyID") + kms_key_id = get_kms_key_arn( + key_id, s3_bucket.bucket_account_id, s3_bucket.bucket_region + ) + if not kms_key_id: + # if not key is provided, AWS will use an AWS managed KMS key + # create it if it doesn't already exist, and save it in the store per region + if not store.aws_managed_kms_key_id: + managed_kms_key_id = create_s3_kms_managed_key_for_region( + s3_bucket.bucket_account_id, s3_bucket.bucket_region + ) + store.aws_managed_kms_key_id = managed_kms_key_id + + kms_key_id = store.aws_managed_kms_key_id + + return EncryptionParameters(encryption, kms_key_id, bucket_key_enabled) + + +def get_object_lock_parameters_from_bucket_and_request( + request: PutObjectRequest | CopyObjectRequest | CreateMultipartUploadRequest, + s3_bucket: S3Bucket, +): + # TODO: also validate here? + lock_mode = request.get("ObjectLockMode") + lock_legal_status = request.get("ObjectLockLegalHoldStatus") + lock_until = request.get("ObjectLockRetainUntilDate") + + if default_retention := s3_bucket.object_lock_default_retention: + lock_mode = lock_mode or default_retention.get("Mode") + if lock_mode and not lock_until: + lock_until = get_retention_from_now( + days=default_retention.get("Days"), + years=default_retention.get("Years"), + ) + + return ObjectLockParameters(lock_until, lock_legal_status, lock_mode) + + +def get_part_range(s3_object: S3Object, part_number: PartNumber) -> ObjectRange: + """ + Calculate the range value from a part Number for an S3 Object + :param s3_object: S3Object + :param part_number: the wanted part from the S3Object + :return: an ObjectRange used to return only a slice of an Object + """ + if not s3_object.parts: + if part_number > 1: + raise InvalidPartNumber( + "The requested partnumber is not satisfiable", + PartNumberRequested=part_number, + ActualPartCount=1, + ) + return ObjectRange( + begin=0, + end=s3_object.size - 1, + content_length=s3_object.size, + content_range=f"bytes 0-{s3_object.size - 1}/{s3_object.size}", + ) + elif not (part_data := s3_object.parts.get(part_number)): + raise InvalidPartNumber( + "The requested partnumber is not satisfiable", + PartNumberRequested=part_number, + ActualPartCount=len(s3_object.parts), + ) + + begin, part_length = part_data + end = begin + part_length - 1 + return ObjectRange( + begin=begin, + end=end, + content_length=part_length, + content_range=f"bytes {begin}-{end}/{s3_object.size}", + ) + + +def get_acl_headers_from_request( + request: Union[ + PutObjectRequest, + CreateMultipartUploadRequest, + CopyObjectRequest, + CreateBucketRequest, + PutBucketAclRequest, + PutObjectAclRequest, + ], +) -> list[tuple[str, str]]: + permission_keys = [ + "GrantFullControl", + "GrantRead", + "GrantReadACP", + "GrantWrite", + "GrantWriteACP", + ] + acl_headers = [ + (permission, grant_header) + for permission in permission_keys + if (grant_header := request.get(permission)) + ] + return acl_headers + + +def get_access_control_policy_from_acl_request( + request: Union[PutBucketAclRequest, PutObjectAclRequest], + owner: Owner, + request_body: bytes, +) -> AccessControlPolicy: + canned_acl = request.get("ACL") + acl_headers = get_acl_headers_from_request(request) + + # FIXME: this is very dirty, but the parser does not differentiate between an empty body and an empty XML node + # errors are different depending on that data, so we need to access the context. Modifying the parser for this + # use case seems dangerous + is_acp_in_body = request_body + + if not (canned_acl or acl_headers or is_acp_in_body): + raise MissingSecurityHeader( + "Your request was missing a required header", MissingHeaderName="x-amz-acl" + ) + + elif canned_acl and acl_headers: + raise InvalidRequest("Specifying both Canned ACLs and Header Grants is not allowed") + + elif (canned_acl or acl_headers) and is_acp_in_body: + raise UnexpectedContent("This request does not support content") + + if canned_acl: + validate_canned_acl(canned_acl) + acp = get_canned_acl(canned_acl, owner=owner) + + elif acl_headers: + grants = [] + for permission, grantees_values in acl_headers: + permission = get_permission_from_header(permission) + partial_grants = parse_grants_in_headers(permission, grantees_values) + grants.extend(partial_grants) + + acp = AccessControlPolicy(Owner=owner, Grants=grants) + else: + acp = request.get("AccessControlPolicy") + validate_acl_acp(acp) + if ( + owner.get("DisplayName") + and acp["Grants"] + and "DisplayName" not in acp["Grants"][0]["Grantee"] + ): + acp["Grants"][0]["Grantee"]["DisplayName"] = owner["DisplayName"] + + return acp + + +def get_access_control_policy_for_new_resource_request( + request: Union[ + PutObjectRequest, CreateMultipartUploadRequest, CopyObjectRequest, CreateBucketRequest + ], + owner: Owner, +) -> AccessControlPolicy: + # TODO: this is basic ACL, not taking into account Bucket settings. Revisit once we really implement ACLs. + canned_acl = request.get("ACL") + acl_headers = get_acl_headers_from_request(request) + + if not (canned_acl or acl_headers): + return get_canned_acl(BucketCannedACL.private, owner=owner) + + elif canned_acl and acl_headers: + raise InvalidRequest("Specifying both Canned ACLs and Header Grants is not allowed") + + if canned_acl: + validate_canned_acl(canned_acl) + return get_canned_acl(canned_acl, owner=owner) + + grants = [] + for permission, grantees_values in acl_headers: + permission = get_permission_from_header(permission) + partial_grants = parse_grants_in_headers(permission, grantees_values) + grants.extend(partial_grants) + + return AccessControlPolicy(Owner=owner, Grants=grants) + + +def object_exists_for_precondition_write(s3_bucket: S3Bucket, key: ObjectKey) -> bool: + return (existing := s3_bucket.objects.get(key)) and not isinstance(existing, S3DeleteMarker) + + +def verify_object_equality_precondition_write( + s3_bucket: S3Bucket, + key: ObjectKey, + etag: str, + initiated: datetime.datetime | None = None, +) -> None: + existing = s3_bucket.objects.get(key) + if not existing or isinstance(existing, S3DeleteMarker): + raise NoSuchKey("The specified key does not exist.", Key=key) + + if not existing.etag == etag.strip('"'): + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition="If-Match", + ) + + if initiated and initiated < existing.last_modified: + raise ConditionalRequestConflict( + "The conditional request cannot succeed due to a conflicting operation against this resource.", + Condition="If-Match", + Key=key, + ) diff --git a/localstack/services/stepfunctions/asl/parse/intrinsic/__init__.py b/localstack-core/localstack/services/s3/resource_providers/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/parse/intrinsic/__init__.py rename to localstack-core/localstack/services/s3/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.py b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.py new file mode 100644 index 0000000000000..de1573274b2b8 --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.py @@ -0,0 +1,733 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import re +from pathlib import Path +from typing import Optional, TypedDict + +from botocore.exceptions import ClientError + +import localstack.services.cloudformation.provider_utils as util +from localstack.config import S3_STATIC_WEBSITE_HOSTNAME, S3_VIRTUAL_HOSTNAME +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.services.s3.utils import normalize_bucket_name +from localstack.utils.aws import arns +from localstack.utils.testutil import delete_all_s3_objects +from localstack.utils.urls import localstack_host + + +class S3BucketProperties(TypedDict): + AccelerateConfiguration: Optional[AccelerateConfiguration] + AccessControl: Optional[str] + AnalyticsConfigurations: Optional[list[AnalyticsConfiguration]] + Arn: Optional[str] + BucketEncryption: Optional[BucketEncryption] + BucketName: Optional[str] + CorsConfiguration: Optional[CorsConfiguration] + DomainName: Optional[str] + DualStackDomainName: Optional[str] + IntelligentTieringConfigurations: Optional[list[IntelligentTieringConfiguration]] + InventoryConfigurations: Optional[list[InventoryConfiguration]] + LifecycleConfiguration: Optional[LifecycleConfiguration] + LoggingConfiguration: Optional[LoggingConfiguration] + MetricsConfigurations: Optional[list[MetricsConfiguration]] + NotificationConfiguration: Optional[NotificationConfiguration] + ObjectLockConfiguration: Optional[ObjectLockConfiguration] + ObjectLockEnabled: Optional[bool] + OwnershipControls: Optional[OwnershipControls] + PublicAccessBlockConfiguration: Optional[PublicAccessBlockConfiguration] + RegionalDomainName: Optional[str] + ReplicationConfiguration: Optional[ReplicationConfiguration] + Tags: Optional[list[Tag]] + VersioningConfiguration: Optional[VersioningConfiguration] + WebsiteConfiguration: Optional[WebsiteConfiguration] + WebsiteURL: Optional[str] + + +class AccelerateConfiguration(TypedDict): + AccelerationStatus: Optional[str] + + +class TagFilter(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class Destination(TypedDict): + BucketArn: Optional[str] + Format: Optional[str] + BucketAccountId: Optional[str] + Prefix: Optional[str] + + +class DataExport(TypedDict): + Destination: Optional[Destination] + OutputSchemaVersion: Optional[str] + + +class StorageClassAnalysis(TypedDict): + DataExport: Optional[DataExport] + + +class AnalyticsConfiguration(TypedDict): + Id: Optional[str] + StorageClassAnalysis: Optional[StorageClassAnalysis] + Prefix: Optional[str] + TagFilters: Optional[list[TagFilter]] + + +class ServerSideEncryptionByDefault(TypedDict): + SSEAlgorithm: Optional[str] + KMSMasterKeyID: Optional[str] + + +class ServerSideEncryptionRule(TypedDict): + BucketKeyEnabled: Optional[bool] + ServerSideEncryptionByDefault: Optional[ServerSideEncryptionByDefault] + + +class BucketEncryption(TypedDict): + ServerSideEncryptionConfiguration: Optional[list[ServerSideEncryptionRule]] + + +class CorsRule(TypedDict): + AllowedMethods: Optional[list[str]] + AllowedOrigins: Optional[list[str]] + AllowedHeaders: Optional[list[str]] + ExposedHeaders: Optional[list[str]] + Id: Optional[str] + MaxAge: Optional[int] + + +class CorsConfiguration(TypedDict): + CorsRules: Optional[list[CorsRule]] + + +class Tiering(TypedDict): + AccessTier: Optional[str] + Days: Optional[int] + + +class IntelligentTieringConfiguration(TypedDict): + Id: Optional[str] + Status: Optional[str] + Tierings: Optional[list[Tiering]] + Prefix: Optional[str] + TagFilters: Optional[list[TagFilter]] + + +class InventoryConfiguration(TypedDict): + Destination: Optional[Destination] + Enabled: Optional[bool] + Id: Optional[str] + IncludedObjectVersions: Optional[str] + ScheduleFrequency: Optional[str] + OptionalFields: Optional[list[str]] + Prefix: Optional[str] + + +class AbortIncompleteMultipartUpload(TypedDict): + DaysAfterInitiation: Optional[int] + + +class NoncurrentVersionExpiration(TypedDict): + NoncurrentDays: Optional[int] + NewerNoncurrentVersions: Optional[int] + + +class NoncurrentVersionTransition(TypedDict): + StorageClass: Optional[str] + TransitionInDays: Optional[int] + NewerNoncurrentVersions: Optional[int] + + +class Transition(TypedDict): + StorageClass: Optional[str] + TransitionDate: Optional[str] + TransitionInDays: Optional[int] + + +class Rule(TypedDict): + Status: Optional[str] + AbortIncompleteMultipartUpload: Optional[AbortIncompleteMultipartUpload] + ExpirationDate: Optional[str] + ExpirationInDays: Optional[int] + ExpiredObjectDeleteMarker: Optional[bool] + Id: Optional[str] + NoncurrentVersionExpiration: Optional[NoncurrentVersionExpiration] + NoncurrentVersionExpirationInDays: Optional[int] + NoncurrentVersionTransition: Optional[NoncurrentVersionTransition] + NoncurrentVersionTransitions: Optional[list[NoncurrentVersionTransition]] + ObjectSizeGreaterThan: Optional[str] + ObjectSizeLessThan: Optional[str] + Prefix: Optional[str] + TagFilters: Optional[list[TagFilter]] + Transition: Optional[Transition] + Transitions: Optional[list[Transition]] + + +class LifecycleConfiguration(TypedDict): + Rules: Optional[list[Rule]] + + +class LoggingConfiguration(TypedDict): + DestinationBucketName: Optional[str] + LogFilePrefix: Optional[str] + + +class MetricsConfiguration(TypedDict): + Id: Optional[str] + AccessPointArn: Optional[str] + Prefix: Optional[str] + TagFilters: Optional[list[TagFilter]] + + +class EventBridgeConfiguration(TypedDict): + EventBridgeEnabled: Optional[bool] + + +class FilterRule(TypedDict): + Name: Optional[str] + Value: Optional[str] + + +class S3KeyFilter(TypedDict): + Rules: Optional[list[FilterRule]] + + +class NotificationFilter(TypedDict): + S3Key: Optional[S3KeyFilter] + + +class LambdaConfiguration(TypedDict): + Event: Optional[str] + Function: Optional[str] + Filter: Optional[NotificationFilter] + + +class QueueConfiguration(TypedDict): + Event: Optional[str] + Queue: Optional[str] + Filter: Optional[NotificationFilter] + + +class TopicConfiguration(TypedDict): + Event: Optional[str] + Topic: Optional[str] + Filter: Optional[NotificationFilter] + + +class NotificationConfiguration(TypedDict): + EventBridgeConfiguration: Optional[EventBridgeConfiguration] + LambdaConfigurations: Optional[list[LambdaConfiguration]] + QueueConfigurations: Optional[list[QueueConfiguration]] + TopicConfigurations: Optional[list[TopicConfiguration]] + + +class DefaultRetention(TypedDict): + Days: Optional[int] + Mode: Optional[str] + Years: Optional[int] + + +class ObjectLockRule(TypedDict): + DefaultRetention: Optional[DefaultRetention] + + +class ObjectLockConfiguration(TypedDict): + ObjectLockEnabled: Optional[str] + Rule: Optional[ObjectLockRule] + + +class OwnershipControlsRule(TypedDict): + ObjectOwnership: Optional[str] + + +class OwnershipControls(TypedDict): + Rules: Optional[list[OwnershipControlsRule]] + + +class PublicAccessBlockConfiguration(TypedDict): + BlockPublicAcls: Optional[bool] + BlockPublicPolicy: Optional[bool] + IgnorePublicAcls: Optional[bool] + RestrictPublicBuckets: Optional[bool] + + +class DeleteMarkerReplication(TypedDict): + Status: Optional[str] + + +class AccessControlTranslation(TypedDict): + Owner: Optional[str] + + +class EncryptionConfiguration(TypedDict): + ReplicaKmsKeyID: Optional[str] + + +class ReplicationTimeValue(TypedDict): + Minutes: Optional[int] + + +class Metrics(TypedDict): + Status: Optional[str] + EventThreshold: Optional[ReplicationTimeValue] + + +class ReplicationTime(TypedDict): + Status: Optional[str] + Time: Optional[ReplicationTimeValue] + + +class ReplicationDestination(TypedDict): + Bucket: Optional[str] + AccessControlTranslation: Optional[AccessControlTranslation] + Account: Optional[str] + EncryptionConfiguration: Optional[EncryptionConfiguration] + Metrics: Optional[Metrics] + ReplicationTime: Optional[ReplicationTime] + StorageClass: Optional[str] + + +class ReplicationRuleAndOperator(TypedDict): + Prefix: Optional[str] + TagFilters: Optional[list[TagFilter]] + + +class ReplicationRuleFilter(TypedDict): + And: Optional[ReplicationRuleAndOperator] + Prefix: Optional[str] + TagFilter: Optional[TagFilter] + + +class ReplicaModifications(TypedDict): + Status: Optional[str] + + +class SseKmsEncryptedObjects(TypedDict): + Status: Optional[str] + + +class SourceSelectionCriteria(TypedDict): + ReplicaModifications: Optional[ReplicaModifications] + SseKmsEncryptedObjects: Optional[SseKmsEncryptedObjects] + + +class ReplicationRule(TypedDict): + Destination: Optional[ReplicationDestination] + Status: Optional[str] + DeleteMarkerReplication: Optional[DeleteMarkerReplication] + Filter: Optional[ReplicationRuleFilter] + Id: Optional[str] + Prefix: Optional[str] + Priority: Optional[int] + SourceSelectionCriteria: Optional[SourceSelectionCriteria] + + +class ReplicationConfiguration(TypedDict): + Role: Optional[str] + Rules: Optional[list[ReplicationRule]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +class VersioningConfiguration(TypedDict): + Status: Optional[str] + + +class RedirectRule(TypedDict): + HostName: Optional[str] + HttpRedirectCode: Optional[str] + Protocol: Optional[str] + ReplaceKeyPrefixWith: Optional[str] + ReplaceKeyWith: Optional[str] + + +class RoutingRuleCondition(TypedDict): + HttpErrorCodeReturnedEquals: Optional[str] + KeyPrefixEquals: Optional[str] + + +class RoutingRule(TypedDict): + RedirectRule: Optional[RedirectRule] + RoutingRuleCondition: Optional[RoutingRuleCondition] + + +class RedirectAllRequestsTo(TypedDict): + HostName: Optional[str] + Protocol: Optional[str] + + +class WebsiteConfiguration(TypedDict): + ErrorDocument: Optional[str] + IndexDocument: Optional[str] + RedirectAllRequestsTo: Optional[RedirectAllRequestsTo] + RoutingRules: Optional[list[RoutingRule]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class S3BucketProvider(ResourceProvider[S3BucketProperties]): + TYPE = "AWS::S3::Bucket" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[S3BucketProperties], + ) -> ProgressEvent[S3BucketProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/BucketName + + + Create-only properties: + - /properties/BucketName + - /properties/ObjectLockEnabled + + Read-only properties: + - /properties/Arn + - /properties/DomainName + - /properties/DualStackDomainName + - /properties/RegionalDomainName + - /properties/WebsiteURL + + IAM permissions required: + - s3:CreateBucket + - s3:PutBucketTagging + - s3:PutAnalyticsConfiguration + - s3:PutEncryptionConfiguration + - s3:PutBucketCORS + - s3:PutInventoryConfiguration + - s3:PutLifecycleConfiguration + - s3:PutMetricsConfiguration + - s3:PutBucketNotification + - s3:PutBucketReplication + - s3:PutBucketWebsite + - s3:PutAccelerateConfiguration + - s3:PutBucketPublicAccessBlock + - s3:PutReplicationConfiguration + - s3:PutObjectAcl + - s3:PutBucketObjectLockConfiguration + - s3:GetBucketAcl + - s3:ListBucket + - iam:PassRole + - s3:DeleteObject + - s3:PutBucketLogging + - s3:PutBucketVersioning + - s3:PutObjectLockConfiguration + - s3:PutBucketOwnershipControls + - s3:PutBucketIntelligentTieringConfiguration + + """ + model = request.desired_state + s3_client = request.aws_client_factory.s3 + + if not model.get("BucketName"): + model["BucketName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + model["BucketName"] = normalize_bucket_name(model["BucketName"]) + + self._create_bucket_if_does_not_exist(model, request.region_name, s3_client) + + self._setup_post_creation_attributes(model, request.region_name) + + if put_config := self._get_s3_bucket_notification_config(model): + s3_client.put_bucket_notification_configuration(**put_config) + + if version_conf := model.get("VersioningConfiguration"): + # from the documentation, it seems that `Status` is a required parameter + s3_client.put_bucket_versioning( + Bucket=model["BucketName"], + VersioningConfiguration={ + "Status": version_conf.get("Status", "Suspended"), + }, + ) + + if cors_configuration := self._transform_cfn_cors(model.get("CorsConfiguration")): + s3_client.put_bucket_cors( + Bucket=model["BucketName"], + CORSConfiguration=cors_configuration, + ) + + if object_lock_configuration := model.get("ObjectLockConfiguration"): + s3_client.put_object_lock_configuration( + Bucket=model["BucketName"], + ObjectLockConfiguration=object_lock_configuration, + ) + + if tags := model.get("Tags"): + s3_client.put_bucket_tagging(Bucket=model["BucketName"], Tagging={"TagSet": tags}) + + if website_config := self._transform_website_configuration( + model.get("WebsiteConfiguration") + ): + s3_client.put_bucket_website( + Bucket=model["BucketName"], + WebsiteConfiguration=website_config, + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def _transform_website_configuration(self, website_configuration: dict) -> dict: + if not website_configuration: + return {} + output = {} + if index := website_configuration.get("IndexDocument"): + output["IndexDocument"] = {"Suffix": index} + if error := website_configuration.get("ErrorDocument"): + output["ErrorDocument"] = {"Key": error} + if redirect_all := website_configuration.get("RedirectAllRequestsTo"): + output["RedirectAllRequestsTo"] = redirect_all + + for r in website_configuration.get("RoutingRules", []): + rule = {} + if condition := r.get("RoutingRuleCondition"): + rule["Condition"] = condition + if redirect := r.get("RedirectRule"): + rule["Redirect"] = redirect + output.setdefault("RoutingRules", []).append(rule) + + return output + + def _transform_cfn_cors(self, cors_config): + # See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + # https://docs.aws.amazon.com/AmazonS3/latest/API/API_CORSRule.html + # only AllowedMethods and AllowedOrigins are required + if not cors_config: + return {} + + cors_rules = [] + for cfn_rule in cors_config.get("CorsRules", []): + rule = { + "AllowedOrigins": cfn_rule.get("AllowedOrigins"), + "AllowedMethods": cfn_rule.get("AllowedMethods"), + } + # we should not pass those to PutBucketCors if they are None, as S3 will provide default values and + # does not accept None + if (allowed_headers := cfn_rule.get("AllowedHeaders")) is not None: + rule["AllowedHeaders"] = allowed_headers + + if (allowed_headers := cfn_rule.get("ExposedHeaders")) is not None: + rule["ExposeHeaders"] = allowed_headers + + if (allowed_headers := cfn_rule.get("MaxAge")) is not None: + rule["MaxAgeSeconds"] = allowed_headers + + if (allowed_headers := cfn_rule.get("Id")) is not None: + rule["ID"] = allowed_headers + + cors_rules.append(rule) + + return {"CORSRules": cors_rules} + + def _get_s3_bucket_notification_config( + self, + properties: dict, + ) -> dict | None: + notif_config = properties.get("NotificationConfiguration") + if not notif_config: + return None + + lambda_configs = [] + queue_configs = [] + topic_configs = [] + + attr_tuples = ( + ( + "LambdaConfigurations", + lambda_configs, + "LambdaFunctionArn", + "Function", + ), + ("QueueConfigurations", queue_configs, "QueueArn", "Queue"), + ("TopicConfigurations", topic_configs, "TopicArn", "Topic"), + ) + + # prepare lambda/queue/topic notification configs + for attrs in attr_tuples: + for notif_cfg in notif_config.get(attrs[0]) or []: + filter_rules = notif_cfg.get("Filter", {}).get("S3Key", {}).get("Rules") + entry = { + attrs[2]: notif_cfg[attrs[3]], + "Events": [notif_cfg["Event"]], + } + if filter_rules: + entry["Filter"] = {"Key": {"FilterRules": filter_rules}} + attrs[1].append(entry) + + # construct final result + result = { + "Bucket": properties.get("BucketName"), + "NotificationConfiguration": { + "LambdaFunctionConfigurations": lambda_configs, + "QueueConfigurations": queue_configs, + "TopicConfigurations": topic_configs, + }, + } + if notif_config.get("EventBridgeConfiguration", {}).get("EventBridgeEnabled"): + result["NotificationConfiguration"]["EventBridgeConfiguration"] = {} + + return result + + def _setup_post_creation_attributes(self, model, region: str): + model["Arn"] = arns.s3_bucket_arn(model["BucketName"], region=region) + domain_name = f"{model['BucketName']}.{S3_VIRTUAL_HOSTNAME}" + model["DomainName"] = domain_name + model["RegionalDomainName"] = domain_name + # by default (parity) s3 website only supports http + # https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html + # "Amazon S3 website endpoints do not support HTTPS. If you want to use HTTPS, + # you can use Amazon CloudFront [...]" + model["WebsiteURL"] = ( + f"http://{model['BucketName']}.{S3_STATIC_WEBSITE_HOSTNAME}:{localstack_host().port}" + ) + # resource["Properties"]["DualStackDomainName"] = ? + + def _create_bucket_if_does_not_exist(self, model, region_name, s3_client): + try: + s3_client.head_bucket(Bucket=model["BucketName"]) + except ClientError as e: + if e.response["ResponseMetadata"]["HTTPStatusCode"] != 404: + return + + params = { + "Bucket": model["BucketName"], + "ACL": self._convert_acl_cf_to_s3(model.get("AccessControl", "PublicRead")), + } + + if model.get("ObjectLockEnabled"): + params["ObjectLockEnabledForBucket"] = True + + if region_name != "us-east-1": + params["CreateBucketConfiguration"] = { + "LocationConstraint": region_name, + } + + s3_client.create_bucket(**params) + + def _convert_acl_cf_to_s3(self, acl): + """Convert a CloudFormation ACL string (e.g., 'PublicRead') to an S3 ACL string (e.g., 'public-read')""" + return re.sub("(? ProgressEvent[S3BucketProperties]: + """ + Fetch resource information + + IAM permissions required: + - s3:GetAccelerateConfiguration + - s3:GetLifecycleConfiguration + - s3:GetBucketPublicAccessBlock + - s3:GetAnalyticsConfiguration + - s3:GetBucketCORS + - s3:GetEncryptionConfiguration + - s3:GetInventoryConfiguration + - s3:GetBucketLogging + - s3:GetMetricsConfiguration + - s3:GetBucketNotification + - s3:GetBucketVersioning + - s3:GetReplicationConfiguration + - S3:GetBucketWebsite + - s3:GetBucketPublicAccessBlock + - s3:GetBucketObjectLockConfiguration + - s3:GetBucketTagging + - s3:GetBucketOwnershipControls + - s3:GetIntelligentTieringConfiguration + - s3:ListBucket + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[S3BucketProperties], + ) -> ProgressEvent[S3BucketProperties]: + """ + Delete a resource + + IAM permissions required: + - s3:DeleteBucket + """ + model = request.desired_state + s3_client = request.aws_client_factory.s3 + + # TODO: divergence from how AWS deals with bucket deletes (should throw an error) + try: + delete_all_s3_objects(s3_client, model["BucketName"]) + except s3_client.exceptions.ClientError as e: + if "NoSuchBucket" not in str(e): + raise + + s3_client.delete_bucket(Bucket=model["BucketName"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[S3BucketProperties], + ) -> ProgressEvent[S3BucketProperties]: + """ + Update a resource + + IAM permissions required: + - s3:PutBucketAcl + - s3:PutBucketTagging + - s3:PutAnalyticsConfiguration + - s3:PutEncryptionConfiguration + - s3:PutBucketCORS + - s3:PutInventoryConfiguration + - s3:PutLifecycleConfiguration + - s3:PutMetricsConfiguration + - s3:PutBucketNotification + - s3:PutBucketReplication + - s3:PutBucketWebsite + - s3:PutAccelerateConfiguration + - s3:PutBucketPublicAccessBlock + - s3:PutReplicationConfiguration + - s3:PutBucketOwnershipControls + - s3:PutBucketIntelligentTieringConfiguration + - s3:DeleteBucketWebsite + - s3:PutBucketLogging + - s3:PutBucketVersioning + - s3:PutObjectLockConfiguration + - s3:DeleteBucketAnalyticsConfiguration + - s3:DeleteBucketCors + - s3:DeleteBucketMetricsConfiguration + - s3:DeleteBucketEncryption + - s3:DeleteBucketLifecycle + - s3:DeleteBucketReplication + - iam:PassRole + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[S3BucketProperties], + ) -> ProgressEvent[S3BucketProperties]: + buckets = request.aws_client_factory.s3.list_buckets() + final_buckets = [] + for bucket in buckets["Buckets"]: + final_buckets.append(S3BucketProperties(BucketName=bucket["Name"])) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_models=final_buckets) diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.schema.json b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.schema.json new file mode 100644 index 0000000000000..88c84aad148f3 --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket.schema.json @@ -0,0 +1,1611 @@ +{ + "typeName": "AWS::S3::Bucket", + "description": "Resource Type definition for AWS::S3::Bucket", + "additionalProperties": false, + "properties": { + "AccelerateConfiguration": { + "$ref": "#/definitions/AccelerateConfiguration", + "description": "Configuration for the transfer acceleration state." + }, + "AccessControl": { + "description": "A canned access control list (ACL) that grants predefined permissions to the bucket.", + "enum": [ + "AuthenticatedRead", + "AwsExecRead", + "BucketOwnerFullControl", + "BucketOwnerRead", + "LogDeliveryWrite", + "Private", + "PublicRead", + "PublicReadWrite" + ], + "type": "string" + }, + "AnalyticsConfigurations": { + "description": "The configuration and any analyses for the analytics filter of an Amazon S3 bucket.", + "items": { + "$ref": "#/definitions/AnalyticsConfiguration" + }, + "type": "array", + "uniqueItems": true, + "insertionOrder": true + }, + "BucketEncryption": { + "$ref": "#/definitions/BucketEncryption" + }, + "BucketName": { + "description": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the bucket name.", + "maxLength": 63, + "minLength": 3, + "pattern": "^[a-z0-9][a-z0-9//.//-]*[a-z0-9]$", + "type": "string" + }, + "CorsConfiguration": { + "$ref": "#/definitions/CorsConfiguration", + "description": "Rules that define cross-origin resource sharing of objects in this bucket." + }, + "IntelligentTieringConfigurations": { + "description": "Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.", + "items": { + "$ref": "#/definitions/IntelligentTieringConfiguration" + }, + "type": "array", + "uniqueItems": true, + "insertionOrder": true + }, + "InventoryConfigurations": { + "description": "The inventory configuration for an Amazon S3 bucket.", + "items": { + "$ref": "#/definitions/InventoryConfiguration" + }, + "type": "array", + "uniqueItems": true, + "insertionOrder": true + }, + "LifecycleConfiguration": { + "$ref": "#/definitions/LifecycleConfiguration", + "description": "Rules that define how Amazon S3 manages objects during their lifetime." + }, + "LoggingConfiguration": { + "$ref": "#/definitions/LoggingConfiguration", + "description": "Settings that define where logs are stored." + }, + "MetricsConfigurations": { + "description": "Settings that define a metrics configuration for the CloudWatch request metrics from the bucket.", + "items": { + "$ref": "#/definitions/MetricsConfiguration" + }, + "type": "array", + "uniqueItems": true, + "insertionOrder": true + }, + "NotificationConfiguration": { + "$ref": "#/definitions/NotificationConfiguration", + "description": "Configuration that defines how Amazon S3 handles bucket notifications." + }, + "ObjectLockConfiguration": { + "$ref": "#/definitions/ObjectLockConfiguration", + "description": "Places an Object Lock configuration on the specified bucket." + }, + "ObjectLockEnabled": { + "description": "Indicates whether this bucket has an Object Lock configuration enabled.", + "type": "boolean" + }, + "OwnershipControls": { + "description": "Specifies the container element for object ownership rules.", + "$ref": "#/definitions/OwnershipControls" + }, + "PublicAccessBlockConfiguration": { + "$ref": "#/definitions/PublicAccessBlockConfiguration" + }, + "ReplicationConfiguration": { + "$ref": "#/definitions/ReplicationConfiguration", + "description": "Configuration for replicating objects in an S3 bucket." + }, + "Tags": { + "description": "An arbitrary set of tags (key-value pairs) for this S3 bucket.", + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + }, + "VersioningConfiguration": { + "$ref": "#/definitions/VersioningConfiguration" + }, + "WebsiteConfiguration": { + "$ref": "#/definitions/WebsiteConfiguration" + }, + "Arn": { + "$ref": "#/definitions/Arn", + "description": "The Amazon Resource Name (ARN) of the specified bucket.", + "examples": [ + "arn:aws:s3:::mybucket" + ] + }, + "DomainName": { + "description": "The IPv4 DNS name of the specified bucket.", + "examples": [ + "mystack-mybucket-kdwwxmddtr2g.s3.amazonaws.com" + ], + "type": "string" + }, + "DualStackDomainName": { + "description": "The IPv6 DNS name of the specified bucket. For more information about dual-stack endpoints, see [Using Amazon S3 Dual-Stack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html).", + "examples": [ + "mystack-mybucket-kdwwxmddtr2g.s3.dualstack.us-east-2.amazonaws.com" + ], + "type": "string" + }, + "RegionalDomainName": { + "description": "Returns the regional domain name of the specified bucket.", + "examples": [ + "mystack-mybucket-kdwwxmddtr2g.s3.us-east-2.amazonaws.com" + ], + "type": "string" + }, + "WebsiteURL": { + "description": "The Amazon S3 website endpoint for the specified bucket.", + "examples": [ + "Example (IPv4): http://mystack-mybucket-kdwwxmddtr2g.s3-website-us-east-2.amazonaws.com/", + "Example (IPv6): http://mystack-mybucket-kdwwxmddtr2g.s3.dualstack.us-east-2.amazonaws.com/" + ], + "format": "uri", + "type": "string" + } + }, + "definitions": { + "TagFilter": { + "description": "Tags to use to identify a subset of objects for an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "Destination": { + "description": "Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).", + "type": "object", + "additionalProperties": false, + "properties": { + "BucketArn": { + "description": "The Amazon Resource Name (ARN) of the bucket to which data is exported.", + "type": "string" + }, + "BucketAccountId": { + "description": "The account ID that owns the destination S3 bucket. ", + "type": "string" + }, + "Format": { + "description": "Specifies the file format used when exporting data to Amazon S3.", + "type": "string", + "enum": [ + "CSV", + "ORC", + "Parquet" + ] + }, + "Prefix": { + "description": "The prefix to use when exporting data. The prefix is prepended to all results.", + "type": "string" + } + }, + "required": [ + "BucketArn", + "Format" + ] + }, + "AccelerateConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "AccelerationStatus": { + "description": "Configures the transfer acceleration state for an Amazon S3 bucket.", + "type": "string", + "enum": [ + "Enabled", + "Suspended" + ] + } + }, + "required": [ + "AccelerationStatus" + ] + }, + "AnalyticsConfiguration": { + "description": "Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "TagFilters": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TagFilter" + } + }, + "StorageClassAnalysis": { + "$ref": "#/definitions/StorageClassAnalysis" + }, + "Id": { + "description": "The ID that identifies the analytics configuration.", + "type": "string" + }, + "Prefix": { + "description": "The prefix that an object must have to be included in the analytics results.", + "type": "string" + } + }, + "required": [ + "StorageClassAnalysis", + "Id" + ] + }, + "StorageClassAnalysis": { + "description": "Specifies data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes for an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "DataExport": { + "$ref": "#/definitions/DataExport" + } + } + }, + "DataExport": { + "description": "Specifies how data related to the storage class analysis for an Amazon S3 bucket should be exported.", + "type": "object", + "additionalProperties": false, + "properties": { + "Destination": { + "$ref": "#/definitions/Destination" + }, + "OutputSchemaVersion": { + "description": "The version of the output schema to use when exporting data.", + "type": "string", + "const": "V_1" + } + }, + "required": [ + "Destination", + "OutputSchemaVersion" + ] + }, + "BucketEncryption": { + "description": "Specifies default encryption for a bucket using server-side encryption with either Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS).", + "type": "object", + "additionalProperties": false, + "properties": { + "ServerSideEncryptionConfiguration": { + "description": "Specifies the default server-side-encryption configuration.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/ServerSideEncryptionRule" + } + } + }, + "required": [ + "ServerSideEncryptionConfiguration" + ] + }, + "ServerSideEncryptionRule": { + "description": "Specifies the default server-side encryption configuration.", + "type": "object", + "additionalProperties": false, + "properties": { + "BucketKeyEnabled": { + "description": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.", + "type": "boolean" + }, + "ServerSideEncryptionByDefault": { + "$ref": "#/definitions/ServerSideEncryptionByDefault" + } + } + }, + "ServerSideEncryptionByDefault": { + "description": "Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied.", + "type": "object", + "properties": { + "KMSMasterKeyID": { + "description": "\"KMSMasterKeyID\" can only be used when you set the value of SSEAlgorithm as aws:kms.", + "type": "string" + }, + "SSEAlgorithm": { + "type": "string", + "enum": [ + "aws:kms", + "AES256" + ] + } + }, + "additionalProperties": false, + "required": [ + "SSEAlgorithm" + ] + }, + "CorsConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "CorsRules": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/CorsRule", + "maxLength": 100 + } + } + }, + "required": [ + "CorsRules" + ] + }, + "CorsRule": { + "type": "object", + "description": "A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.", + "additionalProperties": false, + "properties": { + "AllowedHeaders": { + "description": "Headers that are specified in the Access-Control-Request-Headers header.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string" + } + }, + "AllowedMethods": { + "description": "An HTTP method that you allow the origin to execute.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string", + "enum": [ + "GET", + "PUT", + "HEAD", + "POST", + "DELETE" + ] + } + }, + "AllowedOrigins": { + "description": "One or more origins you want customers to be able to access the bucket from.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string" + } + }, + "ExposedHeaders": { + "description": "One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string" + } + }, + "Id": { + "description": "A unique identifier for this rule.", + "type": "string", + "maxLength": 255 + }, + "MaxAge": { + "description": "The time in seconds that your browser is to cache the preflight response for the specified resource.", + "type": "integer", + "minimum": 0 + } + }, + "required": [ + "AllowedMethods", + "AllowedOrigins" + ] + }, + "IntelligentTieringConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Id": { + "description": "The ID used to identify the S3 Intelligent-Tiering configuration.", + "type": "string" + }, + "Prefix": { + "description": "An object key name prefix that identifies the subset of objects to which the rule applies.", + "type": "string" + }, + "Status": { + "description": "Specifies the status of the configuration.", + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + }, + "TagFilters": { + "description": "A container for a key-value pair.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TagFilter" + } + }, + "Tierings": { + "description": "Specifies a list of S3 Intelligent-Tiering storage class tiers in the configuration. At least one tier must be defined in the list. At most, you can specify two tiers in the list, one for each available AccessTier: ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/Tiering" + } + } + }, + "required": [ + "Id", + "Status", + "Tierings" + ] + }, + "Tiering": { + "type": "object", + "additionalProperties": false, + "properties": { + "AccessTier": { + "description": "S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing frequently and infrequently accessed objects for a list of access tiers in the S3 Intelligent-Tiering storage class.", + "type": "string", + "enum": [ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, + "Days": { + "description": "The number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. The minimum number of days specified for Archive Access tier must be at least 90 days and Deep Archive Access tier must be at least 180 days. The maximum can be up to 2 years (730 days).", + "type": "integer" + } + }, + "required": [ + "AccessTier", + "Days" + ] + }, + "InventoryConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Destination": { + "$ref": "#/definitions/Destination" + }, + "Enabled": { + "description": "Specifies whether the inventory is enabled or disabled.", + "type": "boolean" + }, + "Id": { + "description": "The ID used to identify the inventory configuration.", + "type": "string" + }, + "IncludedObjectVersions": { + "description": "Object versions to include in the inventory list.", + "type": "string", + "enum": [ + "All", + "Current" + ] + }, + "OptionalFields": { + "description": "Contains the optional fields that are included in the inventory results.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "type": "string", + "enum": [ + "Size", + "LastModifiedDate", + "StorageClass", + "ETag", + "IsMultipartUploaded", + "ReplicationStatus", + "EncryptionStatus", + "ObjectLockRetainUntilDate", + "ObjectLockMode", + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier", + "BucketKeyStatus" + ] + } + }, + "Prefix": { + "description": "The prefix that is prepended to all inventory results.", + "type": "string" + }, + "ScheduleFrequency": { + "description": "Specifies the schedule for generating inventory results.", + "type": "string", + "enum": [ + "Daily", + "Weekly" + ] + } + }, + "required": [ + "Destination", + "Enabled", + "Id", + "IncludedObjectVersions", + "ScheduleFrequency" + ] + }, + "LifecycleConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Rules": { + "description": "A lifecycle rule for individual objects in an Amazon S3 bucket.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/Rule" + } + } + }, + "required": [ + "Rules" + ] + }, + "Rule": { + "type": "object", + "description": "You must specify at least one of the following properties: AbortIncompleteMultipartUpload, ExpirationDate, ExpirationInDays, NoncurrentVersionExpirationInDays, NoncurrentVersionTransition, NoncurrentVersionTransitions, Transition, or Transitions.", + "additionalProperties": false, + "properties": { + "AbortIncompleteMultipartUpload": { + "$ref": "#/definitions/AbortIncompleteMultipartUpload" + }, + "ExpirationDate": { + "$ref": "#/definitions/iso8601UTC" + }, + "ExpirationInDays": { + "type": "integer" + }, + "ExpiredObjectDeleteMarker": { + "type": "boolean" + }, + "Id": { + "type": "string", + "maxLength": 255 + }, + "NoncurrentVersionExpirationInDays": { + "type": "integer" + }, + "NoncurrentVersionExpiration": { + "$ref": "#/definitions/NoncurrentVersionExpiration" + }, + "NoncurrentVersionTransition": { + "$ref": "#/definitions/NoncurrentVersionTransition" + }, + "NoncurrentVersionTransitions": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/NoncurrentVersionTransition" + } + }, + "Prefix": { + "type": "string" + }, + "Status": { + "type": "string", + "enum": [ + "Enabled", + "Disabled" + ] + }, + "TagFilters": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TagFilter" + } + }, + "ObjectSizeGreaterThan": { + "type": "string", + "maxLength": 20, + "pattern": "[0-9]+" + }, + "ObjectSizeLessThan": { + "type": "string", + "maxLength": 20, + "pattern": "[0-9]+" + }, + "Transition": { + "$ref": "#/definitions/Transition" + }, + "Transitions": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/Transition" + } + } + }, + "required": [ + "Status" + ] + }, + "AbortIncompleteMultipartUpload": { + "description": "Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload.", + "type": "object", + "additionalProperties": false, + "properties": { + "DaysAfterInitiation": { + "description": "Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload.", + "type": "integer", + "minimum": 0 + } + }, + "required": [ + "DaysAfterInitiation" + ] + }, + "iso8601UTC": { + "description": "The date value in ISO 8601 format. The timezone is always UTC. (YYYY-MM-DDThh:mm:ssZ)", + "type": "string", + "pattern": "^([0-2]\\d{3})-(0[0-9]|1[0-2])-([0-2]\\d|3[01])T([01]\\d|2[0-4]):([0-5]\\d):([0-6]\\d)((\\.\\d{3})?)Z$" + }, + "NoncurrentVersionExpiration": { + "type": "object", + "description": "Container for the expiration rule that describes when noncurrent objects are expired. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 expire noncurrent object versions at a specific period in the object's lifetime", + "additionalProperties": false, + "properties": { + "NoncurrentDays": { + "description": "Specified the number of days an object is noncurrent before Amazon S3 can perform the associated action", + "type": "integer" + }, + "NewerNoncurrentVersions": { + "description": "Specified the number of newer noncurrent and current versions that must exists before performing the associated action", + "type": "integer" + } + }, + "required": [ + "NoncurrentDays" + ] + }, + "NoncurrentVersionTransition": { + "type": "object", + "description": "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's lifetime.", + "additionalProperties": false, + "properties": { + "StorageClass": { + "description": "The class of storage used to store the object.", + "type": "string", + "enum": [ + "DEEP_ARCHIVE", + "GLACIER", + "Glacier", + "GLACIER_IR", + "INTELLIGENT_TIERING", + "ONEZONE_IA", + "STANDARD_IA" + ] + }, + "TransitionInDays": { + "description": "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action.", + "type": "integer" + }, + "NewerNoncurrentVersions": { + "description": "Specified the number of newer noncurrent and current versions that must exists before performing the associated action", + "type": "integer" + } + }, + "required": [ + "StorageClass", + "TransitionInDays" + ] + }, + "Transition": { + "type": "object", + "properties": { + "StorageClass": { + "type": "string", + "enum": [ + "DEEP_ARCHIVE", + "GLACIER", + "Glacier", + "GLACIER_IR", + "INTELLIGENT_TIERING", + "ONEZONE_IA", + "STANDARD_IA" + ] + }, + "TransitionDate": { + "$ref": "#/definitions/iso8601UTC" + }, + "TransitionInDays": { + "type": "integer" + } + }, + "additionalProperties": false, + "description": "You must specify at least one of \"TransitionDate\" and \"TransitionInDays\"", + "required": [ + "StorageClass" + ] + }, + "LoggingConfiguration": { + "type": "object", + "properties": { + "DestinationBucketName": { + "type": "string", + "description": "The name of an Amazon S3 bucket where Amazon S3 store server access log files. You can store log files in any bucket that you own. By default, logs are stored in the bucket where the LoggingConfiguration property is defined." + }, + "LogFilePrefix": { + "type": "string" + } + }, + "additionalProperties": false + }, + "MetricsConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "AccessPointArn": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "Prefix": { + "type": "string" + }, + "TagFilters": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TagFilter" + } + } + }, + "required": [ + "Id" + ] + }, + "NotificationConfiguration": { + "description": "Describes the notification configuration for an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "EventBridgeConfiguration": { + "$ref": "#/definitions/EventBridgeConfiguration" + }, + "LambdaConfigurations": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/LambdaConfiguration" + } + }, + "QueueConfigurations": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/QueueConfiguration" + } + }, + "TopicConfigurations": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TopicConfiguration" + } + } + } + }, + "EventBridgeConfiguration": { + "type": "object", + "description": "Describes the Amazon EventBridge notification configuration for an Amazon S3 bucket.", + "additionalProperties": false, + "properties": { + "EventBridgeEnabled": { + "description": "Specifies whether to send notifications to Amazon EventBridge when events occur in an Amazon S3 bucket.", + "type": "boolean", + "default": "true" + } + }, + "required": [ + "EventBridgeEnabled" + ] + }, + "LambdaConfiguration": { + "type": "object", + "description": "Describes the AWS Lambda functions to invoke and the events for which to invoke them.", + "additionalProperties": false, + "properties": { + "Event": { + "description": "The Amazon S3 bucket event for which to invoke the AWS Lambda function.", + "type": "string" + }, + "Filter": { + "description": "The filtering rules that determine which objects invoke the AWS Lambda function.", + "$ref": "#/definitions/NotificationFilter" + }, + "Function": { + "description": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes when the specified event type occurs.", + "type": "string" + } + }, + "required": [ + "Function", + "Event" + ] + }, + "QueueConfiguration": { + "type": "object", + "description": "The Amazon Simple Queue Service queues to publish messages to and the events for which to publish messages.", + "additionalProperties": false, + "properties": { + "Event": { + "description": "The Amazon S3 bucket event about which you want to publish messages to Amazon SQS.", + "type": "string" + }, + "Filter": { + "description": "The filtering rules that determine which objects trigger notifications.", + "$ref": "#/definitions/NotificationFilter" + }, + "Queue": { + "description": "The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.", + "type": "string" + } + }, + "required": [ + "Event", + "Queue" + ] + }, + "TopicConfiguration": { + "type": "object", + "description": "The topic to which notifications are sent and the events for which notifications are generated.", + "additionalProperties": false, + "properties": { + "Event": { + "description": "The Amazon S3 bucket event about which to send notifications.", + "type": "string" + }, + "Filter": { + "description": "The filtering rules that determine for which objects to send notifications.", + "$ref": "#/definitions/NotificationFilter" + }, + "Topic": { + "description": "The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 publishes a message when it detects events of the specified type.", + "type": "string" + } + }, + "required": [ + "Event", + "Topic" + ] + }, + "NotificationFilter": { + "type": "object", + "description": "Specifies object key name filtering rules.", + "additionalProperties": false, + "properties": { + "S3Key": { + "$ref": "#/definitions/S3KeyFilter" + } + }, + "required": [ + "S3Key" + ] + }, + "S3KeyFilter": { + "type": "object", + "description": "A container for object key name prefix and suffix filtering rules.", + "additionalProperties": false, + "properties": { + "Rules": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/FilterRule" + } + } + }, + "required": [ + "Rules" + ] + }, + "FilterRule": { + "type": "object", + "description": "Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name.", + "additionalProperties": false, + "properties": { + "Name": { + "type": "string", + "maxLength": 1024 + }, + "Value": { + "type": "string" + } + }, + "required": [ + "Value", + "Name" + ] + }, + "ObjectLockConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "ObjectLockEnabled": { + "type": "string", + "const": "Enabled" + }, + "Rule": { + "$ref": "#/definitions/ObjectLockRule" + } + } + }, + "ObjectLockRule": { + "type": "object", + "description": "The Object Lock rule in place for the specified object.", + "additionalProperties": false, + "properties": { + "DefaultRetention": { + "$ref": "#/definitions/DefaultRetention" + } + } + }, + "DefaultRetention": { + "type": "object", + "description": "The default retention period that you want to apply to new objects placed in the specified bucket.", + "additionalProperties": false, + "properties": { + "Years": { + "type": "integer" + }, + "Days": { + "type": "integer" + }, + "Mode": { + "type": "string", + "enum": [ + "COMPLIANCE", + "GOVERNANCE" + ] + } + } + }, + "OwnershipControls": { + "type": "object", + "additionalProperties": false, + "properties": { + "Rules": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/OwnershipControlsRule" + } + } + }, + "required": [ + "Rules" + ] + }, + "OwnershipControlsRule": { + "type": "object", + "additionalProperties": false, + "properties": { + "ObjectOwnership": { + "description": "Specifies an object ownership rule.", + "type": "string", + "enum": [ + "ObjectWriter", + "BucketOwnerPreferred", + "BucketOwnerEnforced" + ] + } + } + }, + "PublicAccessBlockConfiguration": { + "description": "Configuration that defines how Amazon S3 handles public access.", + "type": "object", + "additionalProperties": false, + "properties": { + "BlockPublicAcls": { + "type": "boolean", + "description": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:\n- PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.\n - PUT Object calls fail if the request includes a public ACL.\nEnabling this setting doesn't affect existing policies or ACLs." + }, + "BlockPublicPolicy": { + "type": "boolean", + "description": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\nEnabling this setting doesn't affect existing bucket policies." + }, + "IgnorePublicAcls": { + "type": "boolean", + "description": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.\nEnabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set." + }, + "RestrictPublicBuckets": { + "type": "boolean", + "description": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." + } + } + }, + "ReplicationConfiguration": { + "type": "object", + "description": "A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB.", + "additionalProperties": false, + "properties": { + "Role": { + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects.", + "type": "string" + }, + "Rules": { + "description": "A container for one or more replication rules.", + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/ReplicationRule", + "maxLength": 1000, + "minLength": 1 + } + } + }, + "required": [ + "Role", + "Rules" + ] + }, + "ReplicationRule": { + "type": "object", + "description": "Specifies which Amazon S3 objects to replicate and where to store the replicas.", + "additionalProperties": false, + "properties": { + "DeleteMarkerReplication": { + "$ref": "#/definitions/DeleteMarkerReplication" + }, + "Destination": { + "$ref": "#/definitions/ReplicationDestination" + }, + "Filter": { + "$ref": "#/definitions/ReplicationRuleFilter" + }, + "Id": { + "description": "A unique identifier for the rule.", + "type": "string", + "maxLength": 255 + }, + "Prefix": { + "description": "An object key name prefix that identifies the object or objects to which the rule applies.", + "type": "string", + "maxLength": 1024 + }, + "Priority": { + "type": "integer" + }, + "SourceSelectionCriteria": { + "$ref": "#/definitions/SourceSelectionCriteria" + }, + "Status": { + "description": "Specifies whether the rule is enabled.", + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + } + }, + "required": [ + "Destination", + "Status" + ] + }, + "DeleteMarkerReplication": { + "type": "object", + "additionalProperties": false, + "properties": { + "Status": { + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + } + } + }, + "ReplicationDestination": { + "type": "object", + "description": "Specifies which Amazon S3 bucket to store replicated objects in and their storage class.", + "additionalProperties": false, + "properties": { + "AccessControlTranslation": { + "$ref": "#/definitions/AccessControlTranslation" + }, + "Account": { + "type": "string" + }, + "Bucket": { + "type": "string" + }, + "EncryptionConfiguration": { + "$ref": "#/definitions/EncryptionConfiguration" + }, + "Metrics": { + "$ref": "#/definitions/Metrics" + }, + "ReplicationTime": { + "$ref": "#/definitions/ReplicationTime" + }, + "StorageClass": { + "description": "The storage class to use when replicating objects, such as S3 Standard or reduced redundancy.", + "type": "string", + "enum": [ + "DEEP_ARCHIVE", + "GLACIER", + "GLACIER_IR", + "INTELLIGENT_TIERING", + "ONEZONE_IA", + "REDUCED_REDUNDANCY", + "STANDARD", + "STANDARD_IA" + ] + } + }, + "required": [ + "Bucket" + ] + }, + "AccessControlTranslation": { + "type": "object", + "description": "Specify this only in a cross-account scenario (where source and destination bucket owners are not the same), and you want to change replica ownership to the AWS account that owns the destination bucket. If this is not specified in the replication configuration, the replicas are owned by same AWS account that owns the source object.", + "additionalProperties": false, + "properties": { + "Owner": { + "type": "string", + "const": "Destination" + } + }, + "required": [ + "Owner" + ] + }, + "EncryptionConfiguration": { + "type": "object", + "description": "Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.", + "additionalProperties": false, + "properties": { + "ReplicaKmsKeyID": { + "description": "Specifies the ID (Key ARN or Alias ARN) of the customer managed customer master key (CMK) stored in AWS Key Management Service (KMS) for the destination bucket.", + "type": "string" + } + }, + "required": [ + "ReplicaKmsKeyID" + ] + }, + "Metrics": { + "type": "object", + "additionalProperties": false, + "properties": { + "EventThreshold": { + "$ref": "#/definitions/ReplicationTimeValue" + }, + "Status": { + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + } + }, + "required": [ + "Status" + ] + }, + "ReplicationTimeValue": { + "type": "object", + "additionalProperties": false, + "properties": { + "Minutes": { + "type": "integer" + } + }, + "required": [ + "Minutes" + ] + }, + "ReplicationTime": { + "type": "object", + "additionalProperties": false, + "properties": { + "Status": { + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + }, + "Time": { + "$ref": "#/definitions/ReplicationTimeValue" + } + }, + "required": [ + "Status", + "Time" + ] + }, + "ReplicationRuleFilter": { + "type": "object", + "additionalProperties": false, + "properties": { + "And": { + "$ref": "#/definitions/ReplicationRuleAndOperator" + }, + "Prefix": { + "type": "string" + }, + "TagFilter": { + "$ref": "#/definitions/TagFilter" + } + } + }, + "ReplicationRuleAndOperator": { + "type": "object", + "additionalProperties": false, + "properties": { + "Prefix": { + "type": "string" + }, + "TagFilters": { + "type": "array", + "uniqueItems": true, + "insertionOrder": true, + "items": { + "$ref": "#/definitions/TagFilter" + } + } + } + }, + "SourceSelectionCriteria": { + "description": "A container that describes additional filters for identifying the source objects that you want to replicate.", + "type": "object", + "additionalProperties": false, + "properties": { + "ReplicaModifications": { + "description": "A filter that you can specify for selection for modifications on replicas.", + "$ref": "#/definitions/ReplicaModifications" + }, + "SseKmsEncryptedObjects": { + "description": "A container for filter information for the selection of Amazon S3 objects encrypted with AWS KMS.", + "$ref": "#/definitions/SseKmsEncryptedObjects" + } + } + }, + "ReplicaModifications": { + "type": "object", + "additionalProperties": false, + "properties": { + "Status": { + "description": "Specifies whether Amazon S3 replicates modifications on replicas.", + "type": "string", + "enum": [ + "Enabled", + "Disabled" + ] + } + }, + "required": [ + "Status" + ] + }, + "SseKmsEncryptedObjects": { + "type": "object", + "description": "A container for filter information for the selection of S3 objects encrypted with AWS KMS.", + "additionalProperties": false, + "properties": { + "Status": { + "description": "Specifies whether Amazon S3 replicates objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service.", + "type": "string", + "enum": [ + "Disabled", + "Enabled" + ] + } + }, + "required": [ + "Status" + ] + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "required": [ + "Value", + "Key" + ] + }, + "VersioningConfiguration": { + "description": "Describes the versioning state of an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "Status": { + "description": "The versioning state of the bucket.", + "type": "string", + "default": "Suspended", + "enum": [ + "Enabled", + "Suspended" + ] + } + }, + "required": [ + "Status" + ] + }, + "WebsiteConfiguration": { + "type": "object", + "description": "Specifies website configuration parameters for an Amazon S3 bucket.", + "additionalProperties": false, + "properties": { + "ErrorDocument": { + "description": "The name of the error document for the website.", + "type": "string" + }, + "IndexDocument": { + "description": "The name of the index document for the website.", + "type": "string" + }, + "RoutingRules": { + "type": "array", + "insertionOrder": true, + "items": { + "$ref": "#/definitions/RoutingRule" + } + }, + "RedirectAllRequestsTo": { + "$ref": "#/definitions/RedirectAllRequestsTo" + } + } + }, + "RoutingRule": { + "description": "Specifies the redirect behavior and when a redirect is applied.", + "type": "object", + "additionalProperties": false, + "properties": { + "RedirectRule": { + "description": "Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.", + "$ref": "#/definitions/RedirectRule" + }, + "RoutingRuleCondition": { + "$ref": "#/definitions/RoutingRuleCondition" + } + }, + "required": [ + "RedirectRule" + ] + }, + "RedirectRule": { + "type": "object", + "description": "Specifies how requests are redirected. In the event of an error, you can specify a different error code to return.", + "additionalProperties": false, + "properties": { + "HostName": { + "description": "The host name to use in the redirect request.", + "type": "string" + }, + "HttpRedirectCode": { + "description": "The HTTP redirect code to use on the response. Not required if one of the siblings is present.", + "type": "string" + }, + "Protocol": { + "description": "Protocol to use when redirecting requests. The default is the protocol that is used in the original request.", + "enum": [ + "http", + "https" + ], + "type": "string" + }, + "ReplaceKeyPrefixWith": { + "description": "The object key prefix to use in the redirect request.", + "type": "string" + }, + "ReplaceKeyWith": { + "description": "The specific object key to use in the redirect request.d", + "type": "string" + } + } + }, + "RoutingRuleCondition": { + "description": "A container for describing a condition that must be met for the specified redirect to apply.You must specify at least one of HttpErrorCodeReturnedEquals and KeyPrefixEquals", + "type": "object", + "additionalProperties": false, + "properties": { + "KeyPrefixEquals": { + "description": "The object key name prefix when the redirect is applied.", + "type": "string" + }, + "HttpErrorCodeReturnedEquals": { + "description": "The HTTP error code when the redirect is applied. ", + "type": "string" + } + } + }, + "RedirectAllRequestsTo": { + "description": "Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.", + "type": "object", + "additionalProperties": false, + "properties": { + "HostName": { + "description": "Name of the host where requests are redirected.", + "type": "string" + }, + "Protocol": { + "description": "Protocol to use when redirecting requests. The default is the protocol that is used in the original request.", + "type": "string", + "enum": [ + "http", + "https" + ] + } + }, + "required": [ + "HostName" + ] + }, + "Arn": { + "description": "the Amazon Resource Name (ARN) of the specified bucket.", + "type": "string" + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/BucketName", + "/properties/ObjectLockEnabled" + ], + "primaryIdentifier": [ + "/properties/BucketName" + ], + "readOnlyProperties": [ + "/properties/Arn", + "/properties/DomainName", + "/properties/DualStackDomainName", + "/properties/RegionalDomainName", + "/properties/WebsiteURL" + ], + "handlers": { + "create": { + "permissions": [ + "s3:CreateBucket", + "s3:PutBucketTagging", + "s3:PutAnalyticsConfiguration", + "s3:PutEncryptionConfiguration", + "s3:PutBucketCORS", + "s3:PutInventoryConfiguration", + "s3:PutLifecycleConfiguration", + "s3:PutMetricsConfiguration", + "s3:PutBucketNotification", + "s3:PutBucketReplication", + "s3:PutBucketWebsite", + "s3:PutAccelerateConfiguration", + "s3:PutBucketPublicAccessBlock", + "s3:PutReplicationConfiguration", + "s3:PutObjectAcl", + "s3:PutBucketObjectLockConfiguration", + "s3:GetBucketAcl", + "s3:ListBucket", + "iam:PassRole", + "s3:DeleteObject", + "s3:PutBucketLogging", + "s3:PutBucketVersioning", + "s3:PutObjectLockConfiguration", + "s3:PutBucketOwnershipControls", + "s3:PutBucketIntelligentTieringConfiguration" + ] + }, + "read": { + "permissions": [ + "s3:GetAccelerateConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetBucketPublicAccessBlock", + "s3:GetAnalyticsConfiguration", + "s3:GetBucketCORS", + "s3:GetEncryptionConfiguration", + "s3:GetInventoryConfiguration", + "s3:GetBucketLogging", + "s3:GetMetricsConfiguration", + "s3:GetBucketNotification", + "s3:GetBucketVersioning", + "s3:GetReplicationConfiguration", + "S3:GetBucketWebsite", + "s3:GetBucketPublicAccessBlock", + "s3:GetBucketObjectLockConfiguration", + "s3:GetBucketTagging", + "s3:GetBucketOwnershipControls", + "s3:GetIntelligentTieringConfiguration", + "s3:ListBucket" + ] + }, + "update": { + "permissions": [ + "s3:PutBucketAcl", + "s3:PutBucketTagging", + "s3:PutAnalyticsConfiguration", + "s3:PutEncryptionConfiguration", + "s3:PutBucketCORS", + "s3:PutInventoryConfiguration", + "s3:PutLifecycleConfiguration", + "s3:PutMetricsConfiguration", + "s3:PutBucketNotification", + "s3:PutBucketReplication", + "s3:PutBucketWebsite", + "s3:PutAccelerateConfiguration", + "s3:PutBucketPublicAccessBlock", + "s3:PutReplicationConfiguration", + "s3:PutBucketOwnershipControls", + "s3:PutBucketIntelligentTieringConfiguration", + "s3:DeleteBucketWebsite", + "s3:PutBucketLogging", + "s3:PutBucketVersioning", + "s3:PutObjectLockConfiguration", + "s3:DeleteBucketAnalyticsConfiguration", + "s3:DeleteBucketCors", + "s3:DeleteBucketMetricsConfiguration", + "s3:DeleteBucketEncryption", + "s3:DeleteBucketLifecycle", + "s3:DeleteBucketReplication", + "iam:PassRole" + ] + }, + "delete": { + "permissions": [ + "s3:DeleteBucket" + ] + }, + "list": { + "permissions": [ + "s3:ListAllMyBuckets" + ] + } + } +} diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket_plugin.py b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket_plugin.py new file mode 100644 index 0000000000000..d79e772ca7a65 --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucket_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class S3BucketProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::S3::Bucket" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.s3.resource_providers.aws_s3_bucket import S3BucketProvider + + self.factory = S3BucketProvider diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.py b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.py new file mode 100644 index 0000000000000..78c5db3544efa --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.py @@ -0,0 +1,110 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.json import canonical_json +from localstack.utils.strings import md5 + + +class S3BucketPolicyProperties(TypedDict): + Bucket: Optional[str] + PolicyDocument: Optional[dict] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class S3BucketPolicyProvider(ResourceProvider[S3BucketPolicyProperties]): + TYPE = "AWS::S3::BucketPolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[S3BucketPolicyProperties], + ) -> ProgressEvent[S3BucketPolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - Bucket + - PolicyDocument + + Create-only properties: + - /properties/Bucket + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + s3 = request.aws_client_factory.s3 + + s3.put_bucket_policy(Bucket=model["Bucket"], Policy=json.dumps(model["PolicyDocument"])) + model["Id"] = md5(canonical_json(model["PolicyDocument"])) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[S3BucketPolicyProperties], + ) -> ProgressEvent[S3BucketPolicyProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[S3BucketPolicyProperties], + ) -> ProgressEvent[S3BucketPolicyProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + s3 = request.aws_client_factory.s3 + + try: + s3.delete_bucket_policy(Bucket=model["Bucket"]) + except s3.exceptions.NoSuchBucket: + pass + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[S3BucketPolicyProperties], + ) -> ProgressEvent[S3BucketPolicyProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.schema.json b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.schema.json new file mode 100644 index 0000000000000..c0e5cca4493da --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy.schema.json @@ -0,0 +1,29 @@ +{ + "typeName": "AWS::S3::BucketPolicy", + "description": "Resource Type definition for AWS::S3::BucketPolicy", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "Bucket": { + "type": "string" + }, + "PolicyDocument": { + "type": "object" + } + }, + "required": [ + "Bucket", + "PolicyDocument" + ], + "createOnlyProperties": [ + "/properties/Bucket" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy_plugin.py b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy_plugin.py new file mode 100644 index 0000000000000..1589f69b38ad6 --- /dev/null +++ b/localstack-core/localstack/services/s3/resource_providers/aws_s3_bucketpolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class S3BucketPolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::S3::BucketPolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.s3.resource_providers.aws_s3_bucketpolicy import ( + S3BucketPolicyProvider, + ) + + self.factory = S3BucketPolicyProvider diff --git a/localstack-core/localstack/services/s3/storage/__init__.py b/localstack-core/localstack/services/s3/storage/__init__.py new file mode 100644 index 0000000000000..1e75dd858f811 --- /dev/null +++ b/localstack-core/localstack/services/s3/storage/__init__.py @@ -0,0 +1,15 @@ +from .core import ( + LimitedIterableStream, + LimitedStream, + S3ObjectStore, + S3StoredMultipart, + S3StoredObject, +) + +__all__ = [ + "LimitedIterableStream", + "LimitedStream", + "S3ObjectStore", + "S3StoredMultipart", + "S3StoredObject", +] diff --git a/localstack-core/localstack/services/s3/storage/core.py b/localstack-core/localstack/services/s3/storage/core.py new file mode 100644 index 0000000000000..d925f3cfc2b7e --- /dev/null +++ b/localstack-core/localstack/services/s3/storage/core.py @@ -0,0 +1,267 @@ +import abc +from io import RawIOBase +from typing import IO, Iterable, Iterator, Literal, Optional + +from localstack.aws.api.s3 import BucketName, PartNumber +from localstack.services.s3.models import S3Multipart, S3Object, S3Part +from localstack.services.s3.utils import ObjectRange + + +class LimitedIterableStream(Iterable[bytes]): + """ + This can limit an Iterable which can return any number of bytes at each iteration, to return a max_length total + amount of bytes + """ + + def __init__(self, iterable: Iterable[bytes], max_length: int): + self.iterable = iterable + self.max_length = max_length + + def __iter__(self): + for chunk in self.iterable: + read = len(chunk) + if self.max_length - read >= 0: + self.max_length -= read + yield chunk + elif self.max_length == 0: + break + else: + yield chunk[: self.max_length] + break + + return + + def close(self): + if hasattr(self.iterable, "close"): + self.iterable.close() + + +class LimitedStream(RawIOBase): + """ + This utility class allows to return a range from the underlying stream representing an S3 Object. + """ + + def __init__(self, base_stream: IO[bytes] | "S3StoredObject", range_data: ObjectRange): + super().__init__() + self.file = base_stream + self._pos = range_data.begin + self._max_length = range_data.content_length + + def read(self, s: int = -1) -> bytes | None: + if s is None or s < 0: + amount = self._max_length + else: + amount = min(self._max_length, s) + + self.file.seek(self._pos) + data = self.file.read(amount) + + if not data: + return b"" + read_amount = len(data) + self._max_length -= read_amount + self._pos += read_amount + + return data + + +class S3StoredObject(abc.ABC, Iterable[bytes]): + """ + This abstract class represents the underlying stored data of an S3 object. Its API mimics one of a typical object + returned by `open`, while allowing easy usage from an S3 perspective. + """ + + s3_object: S3Object + + def __init__(self, s3_object: S3Object | S3Part, mode: Literal["r", "w"] = "r"): + self.s3_object = s3_object + self._mode = mode + self.closed = False + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def write(self, s: IO[bytes] | "S3StoredObject") -> int: + pass + + @abc.abstractmethod + def append(self, part: "S3StoredObject") -> int: + pass + + @abc.abstractmethod + def read(self, s: int = -1) -> bytes | None: + pass + + @abc.abstractmethod + def seek(self, offset: int, whence: int = 0) -> int: + pass + + def truncate(self, size: int = None) -> int: + pass + + @property + @abc.abstractmethod + def last_modified(self) -> int: + pass + + @property + @abc.abstractmethod + def checksum(self) -> Optional[str]: + if not self.s3_object.checksum_algorithm: + return None + + @property + @abc.abstractmethod + def etag(self) -> str: + pass + + @abc.abstractmethod + def __iter__(self) -> Iterator[bytes]: + pass + + def __enter__(self): + """Context management protocol. Returns self (an instance of S3StoredObject).""" + if self.closed: + raise ValueError("I/O operation on closed S3 Object.") + return self + + def __exit__(self, *args): + """Context management protocol. Calls close()""" + self.close() + + +class S3StoredMultipart(abc.ABC): + """ + This abstract class represents the collection of stored data of an S3 Multipart Upload. It will collect parts, + represented as S3StoredObject, and can at some point be assembled into a single S3StoredObject. + """ + + parts: dict[PartNumber, S3StoredObject] + s3_multipart: S3Multipart + _s3_store: "S3ObjectStore" + + def __init__(self, s3_store: "S3ObjectStore", bucket: BucketName, s3_multipart: S3Multipart): + self.s3_multipart = s3_multipart + self.bucket = bucket + self._s3_store = s3_store + self.parts = {} + + @abc.abstractmethod + def open(self, s3_part: S3Part, mode: Literal["r", "w"] = "r") -> S3StoredObject: + pass + + @abc.abstractmethod + def remove_part(self, s3_part: S3Part): + pass + + @abc.abstractmethod + def complete_multipart(self, parts: list[PartNumber]) -> None: + pass + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def copy_from_object( + self, + s3_part: S3Part, + src_bucket: BucketName, + src_s3_object: S3Object, + range_data: Optional[ObjectRange], + ) -> None: + pass + + +class S3ObjectStore(abc.ABC): + """ + This abstract class is the entrypoint of accessing the storage of S3 data. You can easily open and remove S3 Objects + as well as directly retrieving a StoredS3Multipart to directly interact with it. + """ + + @abc.abstractmethod + def open( + self, bucket: BucketName, s3_object: S3Object, mode: Literal["r", "w"] = "r" + ) -> S3StoredObject: + pass + + @abc.abstractmethod + def remove(self, bucket: BucketName, s3_object: S3Object | list[S3Object]): + pass + + @abc.abstractmethod + def copy( + self, + src_bucket: BucketName, + src_object: S3Object, + dest_bucket: BucketName, + dest_object: S3Object, + ) -> S3StoredObject: + pass + + @abc.abstractmethod + def get_multipart(self, bucket: BucketName, upload_id: S3Multipart) -> S3StoredMultipart: + pass + + @abc.abstractmethod + def remove_multipart(self, bucket: BucketName, s3_multipart: S3Multipart): + pass + + def create_bucket(self, bucket: BucketName): + pass + + def delete_bucket(self, bucket: BucketName): + pass + + def flush(self): + """ + Calling `flush()` should force the `S3ObjectStore` to dump its state to disk, depending on the implementation. + """ + pass + + def close(self): + """ + Closing the `S3ObjectStore` allows freeing resources up (like file descriptors for example) when stopping the + linked provider. + """ + pass + + def reset(self): + """ + Resetting the `S3ObjectStore` will delete all the contained resources. + """ + pass + + +def should_copy_in_place( + src_bucket: BucketName, + src_object: S3Object, + dest_bucket: BucketName, + dest_object: S3Object, +) -> bool: + """ + Helper method to determine if we should use the same underlying fileobject to avoid copying in place for no gain. + :param src_bucket: the source bucket + :param src_object: the source S3Object + :param dest_bucket: the destination bucket + :param dest_object: the destination S3Object + :return: if + """ + if src_bucket != dest_bucket: + return False + + if src_object.key != dest_object.key: + return False + + # if the objects are versioned, we should not copy in place: the new destination + # object will be a new version of the source object, with a different version id (both can be fetched) + if _is_object_versioned(src_object) or _is_object_versioned(dest_object): + return False + + return True + + +def _is_object_versioned(s3_object: S3Object) -> bool: + return s3_object.version_id and s3_object.version_id != "null" diff --git a/localstack-core/localstack/services/s3/storage/ephemeral.py b/localstack-core/localstack/services/s3/storage/ephemeral.py new file mode 100644 index 0000000000000..6031610aeea62 --- /dev/null +++ b/localstack-core/localstack/services/s3/storage/ephemeral.py @@ -0,0 +1,523 @@ +import base64 +import hashlib +import os +import threading +import time +from collections import defaultdict +from io import BytesIO, UnsupportedOperation +from shutil import rmtree +from tempfile import SpooledTemporaryFile, mkdtemp +from threading import RLock +from typing import IO, Iterator, Literal, Optional, TypedDict + +from readerwriterlock import rwlock + +from localstack.aws.api.s3 import BucketName, MultipartUploadId, PartNumber +from localstack.services.s3.constants import S3_CHUNK_SIZE +from localstack.services.s3.models import S3Multipart, S3Object, S3Part +from localstack.services.s3.utils import ChecksumHash, ObjectRange, get_s3_checksum +from localstack.utils.files import mkdir + +from .core import ( + LimitedStream, + S3ObjectStore, + S3StoredMultipart, + S3StoredObject, + should_copy_in_place, +) + +# max file size for S3 objects kept in memory (500 KB by default) +# TODO: make it configurable +S3_MAX_FILE_SIZE_BYTES = 512 * 1024 + + +class LockedFileMixin: + """Mixin with 2 locks: one lock used to lock an underlying stream position between seek and read, and a readwrite""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # this lock allows us to make `seek` and `read` operation as an atomic one, without an external reader + # modifying the internal position of the stream + self.position_lock = RLock() + # these locks are for the read/write lock issues. No writer should modify the object while a reader is + # currently iterating over it. + # see: + self.readwrite_lock = rwlock.RWLockWrite() + self.internal_last_modified = 0 + + +class LockedSpooledTemporaryFile(LockedFileMixin, SpooledTemporaryFile): + """Creates a SpooledTemporaryFile with locks""" + + def seekable(self) -> bool: + return True + + +class EphemeralS3StoredObject(S3StoredObject): + """ + An Ephemeral S3StoredObject, using LockedSpooledTemporaryFile as its underlying file object. + """ + + file: LockedSpooledTemporaryFile + size: int + _pos: int + etag: Optional[str] + checksum_hash: Optional[ChecksumHash] + _checksum: Optional[str] + _lock: rwlock.Lockable + + def __init__( + self, + s3_object: S3Object | S3Part, + file: LockedSpooledTemporaryFile, + mode: Literal["r", "w"] = "r", + ): + super().__init__(s3_object=s3_object, mode=mode) + self.file = file + self.size = 0 + self._etag = None + self.checksum_hash = None + self._checksum = None + self._pos = 0 + self._lock = ( + self.file.readwrite_lock.gen_wlock() + if mode == "w" + else self.file.readwrite_lock.gen_rlock() + ) + self._lock.acquire() + + def read(self, s: int = -1) -> bytes | None: + """Read at most `s` bytes from the underlying fileobject, and keeps the internal position""" + with self.file.position_lock: + self.file.seek(self._pos) + data = self.file.read(s) + + if not data: + return b"" + + read = len(data) + self._pos += read + + return data + + def seek(self, offset: int, whence: int = 0) -> int: + """ + Set the position of the stream at `offset`, starting depending on `whence`. + :param offset: offset from the position depending on the whence + :param whence: can be 0, 1 or 2 - 0 meaning beginning of stream, 1 current position and 2 end of the stream + :return: the position after seeking, from beginning of the stream + """ + with self.file.position_lock: + self._pos = self.file.seek(offset, whence) + + return self._pos + + def truncate(self, size: int = None) -> int: + """ + Resize the stream to the given size in bytes (or the current position if size is not specified). + The current stream position isn’t changed. This resizing can extend or reduce the current file size. + :param size: size to resize the stream to, or position if not given + :return: the new file size + """ + if self._mode != "w": + raise UnsupportedOperation("S3 object is not in write mode") + + with self.file.position_lock: + truncate = self.file.truncate(size) + self.s3_object.internal_last_modified = self.file.internal_last_modified = ( + time.time_ns() + ) + return truncate + + def write(self, stream: IO[bytes] | "EphemeralS3StoredObject" | LimitedStream) -> int: + """ + Read from the `stream` parameter into the underlying fileobject. This will truncate the fileobject before + writing, effectively copying the stream into the fileobject. While iterating, it will also calculate the MD5 + hash of the stream, and its checksum value if the S3Object has a checksum algorithm set. + This method can directly take an EphemeralS3StoredObject as input, and will use its own locking system to + prevent concurrent write access while iterating over the stream input. + :param stream: can be a regular IO[bytes] or an EphemeralS3StoredObject + :return: number of bytes written + """ + if self._mode != "w": + raise UnsupportedOperation("S3 object is not in write mode") + + if stream is None: + stream = BytesIO() + + if self.s3_object.checksum_algorithm: + self.checksum_hash = get_s3_checksum(self.s3_object.checksum_algorithm) + + file = self.file + with file.position_lock: + file.seek(0) + file.truncate() + + etag = hashlib.md5(usedforsecurity=False) + + while data := stream.read(S3_CHUNK_SIZE): + file.write(data) + etag.update(data) + if self.checksum_hash: + self.checksum_hash.update(data) + + etag = etag.hexdigest() + self.size = self.s3_object.size = file.tell() + self._etag = self.s3_object.etag = etag + self.s3_object.internal_last_modified = self.file.internal_last_modified = ( + time.time_ns() + ) + + self._pos = file.seek(0) + + return self.size + + def append(self, part: "EphemeralS3StoredObject") -> int: + """ + Append the EphemeralS3StoredObject data into the underlying fileobject. Used with Multipart Upload to + assemble parts into the final S3StoredObject. + :param part: EphemeralS3StoredObject + :return: number of written bytes + """ + if self._mode != "w": + raise UnsupportedOperation("S3 object is not in write mode") + + read = 0 + while data := part.read(S3_CHUNK_SIZE): + self.file.write(data) + read += len(data) + + self.size += read + self.s3_object.size = self.size + self.s3_object.internal_last_modified = self.file.internal_last_modified = time.time_ns() + return read + + def close(self): + """We only release the lock, because closing the underlying file object will delete it""" + self._lock.release() + self.closed = True + + @property + def last_modified(self) -> int: + return self.file.internal_last_modified + + @property + def checksum(self) -> Optional[str]: + """ + Return the object checksum base64 encoded, if the S3Object has a checksum algorithm. + If the checksum hasn't been calculated, this method will iterate over the file again to recalculate it. + :return: + """ + if not self.s3_object.checksum_algorithm: + return + if not self.checksum_hash: + # we didn't write or yet calculated the checksum, so calculate with what is in the underlying file + self.checksum_hash = get_s3_checksum(self.s3_object.checksum_algorithm) + original_pos = self._pos + self._pos = 0 + while data := self.read(S3_CHUNK_SIZE): + self.checksum_hash.update(data) + + self._pos = original_pos + + if not self._checksum: + self._checksum = base64.b64encode(self.checksum_hash.digest()).decode() + + return self._checksum + + @property + def etag(self) -> str: + if not self._etag: + etag = hashlib.md5(usedforsecurity=False) + original_pos = self._pos + self._pos = 0 + while data := self.read(S3_CHUNK_SIZE): + etag.update(data) + self._pos = original_pos + self._etag = etag.hexdigest() + + return self._etag + + def __iter__(self) -> Iterator[bytes]: + """ + This is mostly used as convenience to directly pass this object to a Werkzeug response object, hiding the + iteration locking logic. + The caller needs to call `close()` once it is done to release the lock. + :return: + """ + while data := self.read(S3_CHUNK_SIZE): + if not data: + return b"" + + yield data + + +class EphemeralS3StoredMultipart(S3StoredMultipart): + upload_dir: str + _s3_store: "EphemeralS3ObjectStore" + parts: dict[PartNumber, LockedSpooledTemporaryFile] + + def __init__( + self, + s3_store: "EphemeralS3ObjectStore", + bucket: BucketName, + s3_multipart: S3Multipart, + upload_dir: str, + ): + super().__init__(s3_store=s3_store, bucket=bucket, s3_multipart=s3_multipart) + self.upload_dir = upload_dir + + def open(self, s3_part: S3Part, mode: Literal["r", "w"] = "r") -> EphemeralS3StoredObject: + """ + Returns an EphemeralS3StoredObject for an S3Part, allowing direct access to the object. This will add a part + into the Multipart collection. We can directly store the EphemeralS3Stored Object in the collection, as S3Part + cannot be accessed/read directly from the API. + :param s3_part: S3Part object + :param mode: opening mode, "read" or "write" + :return: EphemeralS3StoredObject, most often to directly `write` into it. + """ + if not (file := self.parts.get(s3_part.part_number)): + file = LockedSpooledTemporaryFile(dir=self.upload_dir, max_size=S3_MAX_FILE_SIZE_BYTES) + self.parts[s3_part.part_number] = file + + return EphemeralS3StoredObject(s3_part, file, mode=mode) + + def remove_part(self, s3_part: S3Part): + """ + Remove a part from the Multipart collection. + :param s3_part: S3Part + :return: + """ + stored_part_file = self.parts.pop(s3_part.part_number, None) + if stored_part_file: + stored_part_file.close() + + def complete_multipart(self, parts: list[S3Part]) -> None: + """ + Takes a list of parts numbers, and will iterate over it to assemble all parts together into a single + EphemeralS3StoredObject containing all those parts. + :param parts: list of PartNumber + :return: the resulting EphemeralS3StoredObject + """ + with self._s3_store.open( + self.bucket, self.s3_multipart.object, mode="w" + ) as s3_stored_object: + # reset the file to overwrite + s3_stored_object.seek(0) + s3_stored_object.truncate() + for s3_part in parts: + with self.open(s3_part, mode="r") as stored_part: + s3_stored_object.append(stored_part) + + def close(self): + """ + Iterates over all parts of the collection to close them and clean them up. Closing a part will delete it. + :return: + """ + for stored_part_file in self.parts.values(): + stored_part_file.close() + self.parts.clear() + + def copy_from_object( + self, + s3_part: S3Part, + src_bucket: BucketName, + src_s3_object: S3Object, + range_data: Optional[ObjectRange], + ) -> None: + """ + Create and add an EphemeralS3StoredObject to the Multipart collection, with an S3Object as input. This will + take a slice of the S3Object to create a part. + :param s3_part: the part which will contain the S3 Object slice + :param src_bucket: the bucket where the source S3Object resides + :param src_s3_object: the source S3Object + :param range_data: the range data from which the S3Part will copy its data. + :return: the EphemeralS3StoredObject representing the stored part + """ + with ( + self._s3_store.open(src_bucket, src_s3_object, mode="r") as src_stored_object, + self.open(s3_part, mode="w") as stored_part, + ): + if not range_data: + stored_part.write(src_stored_object) + return + + object_slice = LimitedStream(src_stored_object, range_data=range_data) + stored_part.write(object_slice) + + +class BucketTemporaryFileSystem(TypedDict): + keys: dict[str, LockedSpooledTemporaryFile] + multiparts: dict[MultipartUploadId, EphemeralS3StoredMultipart] + + +class EphemeralS3ObjectStore(S3ObjectStore): + """ + This simulates a filesystem where S3 will store its assets + The structure is the following: + / + keys/ + ├─ -> fileobj + ├─ -> fileobj + multiparts/ + ├─ / + │ ├─ -> fileobj + │ ├─ -> fileobj + """ + + root_directory: str + + def __init__(self, root_directory: str = None): + self._filesystem: dict[BucketName, BucketTemporaryFileSystem] = defaultdict( + lambda: {"keys": {}, "multiparts": {}} + ) + # namespace the EphemeralS3ObjectStore artifacts under a single root directory, under gettempdir() if not + # provided + if not root_directory: + root_directory = mkdtemp() + + self.root_directory = root_directory + self._lock_multipart_create = threading.RLock() + + def open( + self, bucket: BucketName, s3_object: S3Object, mode: Literal["r", "w"] = "r" + ) -> EphemeralS3StoredObject: + """ + Returns a EphemeralS3StoredObject from an S3Object, a wrapper around an underlying fileobject underneath, + exposing higher level actions for the provider to interact with. This allows the provider to store data for an + S3Object. + :param bucket: the S3Object bucket + :param s3_object: an S3Object + :param mode: read or write mode for the object to open + :return: EphemeralS3StoredObject + """ + key = self._key_from_s3_object(s3_object) + if not (file := self._get_object_file(bucket, key)): + bucket_tmp_dir = os.path.join(self.root_directory, bucket) + file = LockedSpooledTemporaryFile(dir=bucket_tmp_dir, max_size=S3_MAX_FILE_SIZE_BYTES) + self._filesystem[bucket]["keys"][key] = file + + return EphemeralS3StoredObject(s3_object=s3_object, file=file, mode=mode) + + def remove(self, bucket: BucketName, s3_object: S3Object | list[S3Object]): + """ + Remove the underlying data of an S3Object. + :param bucket: the S3Object bucket + :param s3_object: S3Object to remove. This can also take a list of S3Objects + :return: + """ + if not isinstance(s3_object, list): + s3_object = [s3_object] + + if keys := self._filesystem.get(bucket, {}).get("keys", {}): + for obj in s3_object: + key = self._key_from_s3_object(obj) + file = keys.pop(key, None) + if file: + file.close() + + def copy( + self, + src_bucket: BucketName, + src_object: S3Object, + dest_bucket: BucketName, + dest_object: S3Object, + ) -> EphemeralS3StoredObject: + """ + Copy an S3Object into another one. This will copy the underlying data inside another. + :param src_bucket: the source bucket + :param src_object: the source S3Object + :param dest_bucket: the destination bucket + :param dest_object: the destination S3Object + :return: the destination EphemeralS3StoredObject + """ + # If this is an in-place copy, directly return the EphemeralS3StoredObject of the destination S3Object, no need + # to copy the underlying data except if we are in a versioned bucket. + if should_copy_in_place(src_bucket, src_object, dest_bucket, dest_object): + return self.open(dest_bucket, dest_object, mode="r") + + with self.open(src_bucket, src_object, mode="r") as src_stored_object: + dest_stored_object = self.open(dest_bucket, dest_object, mode="w") + dest_stored_object.write(src_stored_object) + + return dest_stored_object + + def get_multipart( + self, bucket: BucketName, s3_multipart: S3Multipart + ) -> EphemeralS3StoredMultipart: + # We need to lock this block, because we could have concurrent requests trying to access the same multipart + # and both creating it at the same time, returning 2 different entities and overriding one + with self._lock_multipart_create: + if not (multipart := self._get_multipart(bucket, s3_multipart.id)): + upload_dir = self._create_upload_directory(bucket, s3_multipart.id) + multipart = EphemeralS3StoredMultipart(self, bucket, s3_multipart, upload_dir) + self._filesystem[bucket]["multiparts"][s3_multipart.id] = multipart + + return multipart + + def remove_multipart(self, bucket: BucketName, s3_multipart: S3Multipart): + if multiparts := self._filesystem.get(bucket, {}).get("multiparts", {}): + if multipart := multiparts.pop(s3_multipart.id, None): + multipart.close() + self._delete_upload_directory(bucket, s3_multipart.id) + + def create_bucket(self, bucket: BucketName): + mkdir(os.path.join(self.root_directory, bucket)) + + def delete_bucket(self, bucket: BucketName): + if self._filesystem.pop(bucket, None): + rmtree(os.path.join(self.root_directory, bucket)) + + def close(self): + """ + Close the Store and clean up all underlying objects. This will effectively remove all data from the filesystem + and memory. + :return: + """ + for bucket in self._filesystem.values(): + if keys := bucket.get("keys"): + for file in keys.values(): + file.close() + keys.clear() + + if multiparts := bucket.get("multiparts"): + for multipart in multiparts.values(): + multipart.close() + multiparts.clear() + + def reset(self): + self.close() + + @staticmethod + def _key_from_s3_object(s3_object: S3Object) -> str: + return str(hash(f"{s3_object.key}?{s3_object.version_id or 'null'}")) + + def _get_object_file(self, bucket: BucketName, key: str) -> LockedSpooledTemporaryFile | None: + return self._filesystem.get(bucket, {}).get("keys", {}).get(key) + + def _get_multipart(self, bucket: BucketName, upload_key: str) -> S3StoredMultipart | None: + return self._filesystem.get(bucket, {}).get("multiparts", {}).get(upload_key) + + def _create_upload_directory( + self, bucket_name: BucketName, upload_id: MultipartUploadId + ) -> str: + """ + Create a temporary directory inside a bucket, representing a multipart upload, holding its parts + :param bucket_name: the bucket where the multipart upload resides + :param upload_id: the multipart upload id + :return: the full part of the upload, where the parts will live + """ + upload_tmp_dir = os.path.join(self.root_directory, bucket_name, upload_id) + mkdir(upload_tmp_dir) + return upload_tmp_dir + + def _delete_upload_directory(self, bucket_name: BucketName, upload_id: MultipartUploadId): + """ + Delete the temporary directory representing a multipart upload + :param bucket_name: the multipart upload bucket + :param upload_id: the multipart upload id + :return: + """ + upload_tmp_dir = os.path.join(self.root_directory, bucket_name, upload_id) + if upload_tmp_dir: + rmtree(upload_tmp_dir, ignore_errors=True) diff --git a/localstack-core/localstack/services/s3/utils.py b/localstack-core/localstack/services/s3/utils.py new file mode 100644 index 0000000000000..8592de4712594 --- /dev/null +++ b/localstack-core/localstack/services/s3/utils.py @@ -0,0 +1,1066 @@ +import base64 +import codecs +import datetime +import hashlib +import itertools +import logging +import re +import time +import zlib +from enum import StrEnum +from secrets import token_bytes +from typing import Any, Dict, Literal, NamedTuple, Optional, Protocol, Tuple, Union +from urllib import parse as urlparser +from zoneinfo import ZoneInfo + +import xmltodict +from botocore.exceptions import ClientError +from botocore.utils import InvalidArnException + +from localstack import config, constants +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api.s3 import ( + AccessControlPolicy, + BucketCannedACL, + BucketName, + ChecksumAlgorithm, + ContentMD5, + CopyObjectRequest, + CopySource, + ETag, + GetObjectRequest, + Grant, + Grantee, + HeadObjectRequest, + InvalidArgument, + InvalidRange, + InvalidTag, + LifecycleExpiration, + LifecycleRule, + LifecycleRules, + Metadata, + ObjectCannedACL, + ObjectKey, + ObjectSize, + ObjectVersionId, + Owner, + Permission, + PreconditionFailed, + PutObjectRequest, + SSEKMSKeyId, + TaggingHeader, + TagSet, + UploadPartRequest, +) +from localstack.aws.api.s3 import Type as GranteeType +from localstack.aws.chain import HandlerChain +from localstack.aws.connect import connect_to +from localstack.http import Response +from localstack.services.s3 import checksums +from localstack.services.s3.constants import ( + ALL_USERS_ACL_GRANTEE, + AUTHENTICATED_USERS_ACL_GRANTEE, + CHECKSUM_ALGORITHMS, + LOG_DELIVERY_ACL_GRANTEE, + S3_VIRTUAL_HOST_FORWARDED_HEADER, + SIGNATURE_V2_PARAMS, + SIGNATURE_V4_PARAMS, + SYSTEM_METADATA_SETTABLE_HEADERS, +) +from localstack.services.s3.exceptions import InvalidRequest, MalformedXML +from localstack.utils.aws import arns +from localstack.utils.aws.arns import parse_arn +from localstack.utils.objects import singleton_factory +from localstack.utils.strings import ( + is_base64, + to_bytes, + to_str, +) +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +BUCKET_NAME_REGEX = ( + r"(?=^.{3,63}$)(?!^(\d+\.)+\d+$)" + + r"(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)" +) + +TAG_REGEX = re.compile(r"^[\w\s.:/=+\-@]*$") + + +S3_VIRTUAL_HOSTNAME_REGEX = ( + r"(?P.*).s3.(?P(?:us-gov|us|ap|ca|cn|eu|sa)-[a-z]+-\d)?.*" +) + +_s3_virtual_host_regex = re.compile(S3_VIRTUAL_HOSTNAME_REGEX) + + +RFC1123 = "%a, %d %b %Y %H:%M:%S GMT" +_gmt_zone_info = ZoneInfo("GMT") + + +def s3_response_handler(chain: HandlerChain, context: RequestContext, response: Response): + """ + This response handler is taking care of removing certain headers from S3 responses. + We cannot handle this in the serializer, because the serializer handler calls `Response.update_from`, which does + not allow you to remove headers, only add them. + This handler can delete headers from the response. + """ + # some requests, for example coming frome extensions, are flagged as S3 requests. This check confirms that it is + # indeed truly an S3 request by checking if it parsed properly as an S3 operation + if not context.service_operation: + return + + # if AWS returns 204, it will not return a body, Content-Length and Content-Type + # the web server is already taking care of deleting the body, but it's more explicit to remove it here + if response.status_code == 204: + response.data = b"" + response.headers.pop("Content-Type", None) + response.headers.pop("Content-Length", None) + + elif ( + response.status_code == 200 + and context.request.method == "PUT" + and response.headers.get("Content-Length") in (0, None) + ): + # AWS does not return a Content-Type if the Content-Length is 0 + response.headers.pop("Content-Type", None) + + +def get_owner_for_account_id(account_id: str): + """ + This method returns the S3 Owner from the account id. for now, this is hardcoded as it was in moto, but we can then + extend it to return different values depending on the account ID + See https://docs.aws.amazon.com/AmazonS3/latest/API/API_Owner.html + :param account_id: the owner account id + :return: the Owner object containing the DisplayName and owner ID + """ + return Owner( + DisplayName="webfile", # only in certain regions, see above + ID="75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a", + ) + + +def extract_bucket_key_version_id_from_copy_source( + copy_source: CopySource, +) -> tuple[BucketName, ObjectKey, Optional[ObjectVersionId]]: + """ + Utility to parse bucket name, object key and optionally its versionId. It accepts the CopySource format: + - ?versionId=, used for example in CopySource for CopyObject + :param copy_source: the S3 CopySource to parse + :return: parsed BucketName, ObjectKey and optionally VersionId + """ + copy_source_parsed = urlparser.urlparse(copy_source) + # we need to manually replace `+` character with a space character before URL decoding, because different languages + # don't encode their URL the same way (%20 vs +), and Python doesn't unquote + into a space char + parsed_path = urlparser.unquote(copy_source_parsed.path.replace("+", " ")).lstrip("/") + + if "/" not in parsed_path: + raise InvalidArgument( + "Invalid copy source object key", + ArgumentName="x-amz-copy-source", + ArgumentValue="x-amz-copy-source", + ) + src_bucket, src_key = parsed_path.split("/", 1) + src_version_id = urlparser.parse_qs(copy_source_parsed.query).get("versionId", [None])[0] + + return src_bucket, src_key, src_version_id + + +class ChecksumHash(Protocol): + """ + This Protocol allows proper typing for different kind of hash used by S3 (hashlib.shaX, zlib.crc32 from + S3CRC32Checksum, and botocore CrtCrc32cChecksum). + """ + + def digest(self) -> bytes: ... + + def update(self, value: bytes): ... + + +def get_s3_checksum_algorithm_from_request( + request: PutObjectRequest | UploadPartRequest, +) -> ChecksumAlgorithm | None: + checksum_algorithm: list[ChecksumAlgorithm] = [ + algo for algo in CHECKSUM_ALGORITHMS if request.get(f"Checksum{algo}") + ] + if not checksum_algorithm: + return None + + if len(checksum_algorithm) > 1: + raise InvalidRequest( + "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed." + ) + + return checksum_algorithm[0] + + +def get_s3_checksum_algorithm_from_trailing_headers( + trailing_headers: str, +) -> ChecksumAlgorithm | None: + checksum_algorithm: list[ChecksumAlgorithm] = [ + algo for algo in CHECKSUM_ALGORITHMS if f"x-amz-checksum-{algo.lower()}" in trailing_headers + ] + if not checksum_algorithm: + return None + + if len(checksum_algorithm) > 1: + raise InvalidRequest( + "Expecting a single x-amz-checksum- header. Multiple checksum Types are not allowed." + ) + + return checksum_algorithm[0] + + +def get_s3_checksum(algorithm) -> ChecksumHash: + match algorithm: + case ChecksumAlgorithm.CRC32: + return S3CRC32Checksum() + + case ChecksumAlgorithm.CRC32C: + from botocore.httpchecksum import CrtCrc32cChecksum + + return CrtCrc32cChecksum() + + case ChecksumAlgorithm.CRC64NVME: + from botocore.httpchecksum import CrtCrc64NvmeChecksum + + return CrtCrc64NvmeChecksum() + + case ChecksumAlgorithm.SHA1: + return hashlib.sha1(usedforsecurity=False) + + case ChecksumAlgorithm.SHA256: + return hashlib.sha256(usedforsecurity=False) + + case _: + # TODO: check proper error? for now validated client side, need to check server response + raise InvalidRequest("The value specified in the x-amz-trailer header is not supported") + + +class S3CRC32Checksum: + """Implements a unified way of using zlib.crc32 compatible with hashlib.sha and botocore CrtCrc32cChecksum""" + + __slots__ = ["checksum"] + + def __init__(self): + self.checksum = zlib.crc32(b"") + + def update(self, value: bytes): + self.checksum = zlib.crc32(value, self.checksum) + + def digest(self) -> bytes: + return self.checksum.to_bytes(4, "big") + + +class CombinedCrcHash: + def __init__(self, checksum_type: ChecksumAlgorithm): + match checksum_type: + case ChecksumAlgorithm.CRC32: + func = checksums.combine_crc32 + case ChecksumAlgorithm.CRC32C: + func = checksums.combine_crc32c + case ChecksumAlgorithm.CRC64NVME: + func = checksums.combine_crc64_nvme + case _: + raise ValueError("You cannot combine SHA based checksums") + + self.combine_function = func + self.checksum = b"" + + def combine(self, value: bytes, object_len: int): + if not self.checksum: + self.checksum = value + return + + self.checksum = self.combine_function(self.checksum, value, object_len) + + def digest(self): + return self.checksum + + +class ObjectRange(NamedTuple): + """ + NamedTuple representing a parsed Range header with the requested S3 object size + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range + """ + + content_range: str # the original Range header + content_length: int # the full requested object size + begin: int # the start of range + end: int # the end of the end + + +def parse_range_header(range_header: str, object_size: int) -> ObjectRange | None: + """ + Takes a Range header, and returns a dataclass containing the necessary information to return only a slice of an + S3 object. If the range header is invalid, we return None so that the request is treated as a regular request. + :param range_header: a Range header + :param object_size: the requested S3 object total size + :return: ObjectRange or None if the Range header is invalid + """ + last = object_size - 1 + try: + _, rspec = range_header.split("=") + except ValueError: + return None + if "," in rspec: + return None + + try: + begin, end = [int(i) if i else None for i in rspec.split("-")] + except ValueError: + # if we can't parse the Range header, S3 just treat the request as a non-range request + return None + + if (begin is None and end == 0) or (begin is not None and begin > last): + raise InvalidRange( + "The requested range is not satisfiable", + ActualObjectSize=str(object_size), + RangeRequested=range_header, + ) + + if begin is not None: # byte range + end = last if end is None else min(end, last) + elif end is not None: # suffix byte range + begin = object_size - min(end, object_size) + end = last + else: + # Treat as non-range request + return None + + if begin > min(end, last): + # Treat as non-range request if after the logic is applied + return None + + return ObjectRange( + content_range=f"bytes {begin}-{end}/{object_size}", + content_length=end - begin + 1, + begin=begin, + end=end, + ) + + +def parse_copy_source_range_header(copy_source_range: str, object_size: int) -> ObjectRange: + """ + Takes a CopySourceRange parameter, and returns a dataclass containing the necessary information to return only a slice of an + S3 object. The validation is much stricter than `parse_range_header` + :param copy_source_range: a CopySourceRange parameter for UploadCopyPart + :param object_size: the requested S3 object total size + :raises InvalidArgument if the CopySourceRanger parameter does not follow validation + :return: ObjectRange + """ + last = object_size - 1 + try: + _, rspec = copy_source_range.split("=") + except ValueError: + raise InvalidArgument( + "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + ArgumentName="x-amz-copy-source-range", + ArgumentValue=copy_source_range, + ) + if "," in rspec: + raise InvalidArgument( + "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + ArgumentName="x-amz-copy-source-range", + ArgumentValue=copy_source_range, + ) + + try: + begin, end = [int(i) if i else None for i in rspec.split("-")] + except ValueError: + # if we can't parse the Range header, S3 just treat the request as a non-range request + raise InvalidArgument( + "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + ArgumentName="x-amz-copy-source-range", + ArgumentValue=copy_source_range, + ) + + if begin is None or end is None or begin > end: + raise InvalidArgument( + "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + ArgumentName="x-amz-copy-source-range", + ArgumentValue=copy_source_range, + ) + + if begin > last: + # Treat as non-range request if after the logic is applied + raise InvalidRequest( + "The specified copy range is invalid for the source object size", + ) + elif end > last: + raise InvalidArgument( + f"Range specified is not valid for source object of size: {object_size}", + ArgumentName="x-amz-copy-source-range", + ArgumentValue=copy_source_range, + ) + + return ObjectRange( + content_range=f"bytes {begin}-{end}/{object_size}", + content_length=end - begin + 1, + begin=begin, + end=end, + ) + + +def get_full_default_bucket_location(bucket_name: BucketName) -> str: + host_definition = localstack_host() + if host_definition.host != constants.LOCALHOST_HOSTNAME: + # the user has customised their LocalStack hostname, and may not support subdomains. + # Return the location in path form. + return f"{config.get_protocol()}://{host_definition.host_and_port()}/{bucket_name}/" + else: + return f"{config.get_protocol()}://{bucket_name}.s3.{host_definition.host_and_port()}/" + + +def etag_to_base_64_content_md5(etag: ETag) -> str: + """ + Convert an ETag, representing a MD5 hexdigest (might be quoted), to its base64 encoded representation + :param etag: an ETag, might be quoted + :return: the base64 value + """ + # get the bytes digest from the hexdigest + byte_digest = codecs.decode(to_bytes(etag.strip('"')), "hex") + return to_str(base64.b64encode(byte_digest)) + + +def base_64_content_md5_to_etag(content_md5: ContentMD5) -> str | None: + """ + Convert a ContentMD5 header, representing a base64 encoded representation of a MD5 binary digest to its ETag value, + hex encoded + :param content_md5: a ContentMD5 header, base64 encoded + :return: the ETag value, hex coded MD5 digest, or None if the input is not valid b64 or the representation of a MD5 + hash + """ + if not is_base64(content_md5): + return None + # get the hexdigest from the bytes digest + byte_digest = base64.b64decode(content_md5) + hex_digest = to_str(codecs.encode(byte_digest, "hex")) + if len(hex_digest) != 32: + return None + + return hex_digest + + +def is_presigned_url_request(context: RequestContext) -> bool: + """ + Detects pre-signed URL from query string parameters + Return True if any kind of presigned URL query string parameter is encountered + :param context: the request context from the handler chain + """ + # Detecting pre-sign url and checking signature + query_parameters = context.request.args + return any(p in query_parameters for p in SIGNATURE_V2_PARAMS) or any( + p in query_parameters for p in SIGNATURE_V4_PARAMS + ) + + +def is_bucket_name_valid(bucket_name: str) -> bool: + """ + ref. https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + """ + return True if re.match(BUCKET_NAME_REGEX, bucket_name) else False + + +def get_permission_header_name(permission: Permission) -> str: + return f"x-amz-grant-{permission.replace('_', '-').lower()}" + + +def get_permission_from_header(capitalized_field: str) -> Permission: + headers_parts = [part.upper() for part in re.split(r"([A-Z][a-z]+)", capitalized_field) if part] + return "_".join(headers_parts[1:]) + + +def is_valid_canonical_id(canonical_id: str) -> bool: + """ + Validate that the string is a hex string with 64 char + """ + try: + return int(canonical_id, 16) and len(canonical_id) == 64 + except ValueError: + return False + + +def uses_host_addressing(headers: Dict[str, str]) -> str | None: + """ + Determines if the request is targeting S3 with virtual host addressing + :param headers: the request headers + :return: if the request targets S3 with virtual host addressing, returns the bucket name else None + """ + host = headers.get("host", "") + + # try to extract the bucket from the hostname (the "in" check is a minor optimization, as the regex is very greedy) + if ".s3." in host and ( + (match := _s3_virtual_host_regex.match(host)) and (bucket_name := match.group("bucket")) + ): + return bucket_name + + +def get_class_attrs_from_spec_class(spec_class: type[StrEnum]) -> set[str]: + return {str(spec) for spec in spec_class} + + +def get_system_metadata_from_request(request: dict) -> Metadata: + metadata: Metadata = {} + + for system_metadata_field in SYSTEM_METADATA_SETTABLE_HEADERS: + if field_value := request.get(system_metadata_field): + metadata[system_metadata_field] = field_value + + return metadata + + +def forwarded_from_virtual_host_addressed_request(headers: dict[str, str]) -> bool: + """ + Determines if the request was forwarded from a v-host addressing style into a path one + """ + # we can assume that the host header we are receiving here is actually the header we originally received + # from the client (because the edge service is forwarding the request in memory) + return S3_VIRTUAL_HOST_FORWARDED_HEADER in headers + + +def extract_bucket_name_and_key_from_headers_and_path( + headers: dict[str, str], path: str +) -> tuple[Optional[str], Optional[str]]: + """ + Extract the bucket name and the object key from a request headers and path. This works with both virtual host + and path style requests. + :param headers: the request headers, used to get the Host + :param path: the request path + :return: if found, the bucket name and object key + """ + bucket_name = None + object_key = None + host = headers.get("host", "") + if ".s3" in host: + vhost_match = _s3_virtual_host_regex.match(host) + if vhost_match and vhost_match.group("bucket"): + bucket_name = vhost_match.group("bucket") or None + split = path.split("/", maxsplit=1) + if len(split) > 1 and split[1]: + object_key = split[1] + else: + path_without_params = path.partition("?")[0] + split = path_without_params.split("/", maxsplit=2) + bucket_name = split[1] or None + if len(split) > 2: + object_key = split[2] + + return bucket_name, object_key + + +def normalize_bucket_name(bucket_name): + bucket_name = bucket_name or "" + bucket_name = bucket_name.lower() + return bucket_name + + +def get_bucket_and_key_from_s3_uri(s3_uri: str) -> Tuple[str, str]: + """ + Extracts the bucket name and key from s3 uri + """ + output_bucket, _, output_key = s3_uri.removeprefix("s3://").partition("/") + return output_bucket, output_key + + +def get_bucket_and_key_from_presign_url(presign_url: str) -> Tuple[str, str]: + """ + Extracts the bucket name and key from s3 presign url + """ + parsed_url = urlparser.urlparse(presign_url) + bucket = parsed_url.path.split("/")[1] + key = "/".join(parsed_url.path.split("/")[2:]).split("?")[0] + return bucket, key + + +def _create_invalid_argument_exc( + message: Union[str, None], name: str, value: str, host_id: str = None +) -> InvalidArgument: + ex = InvalidArgument(message) + ex.ArgumentName = name + ex.ArgumentValue = value + if host_id: + ex.HostId = host_id + return ex + + +def capitalize_header_name_from_snake_case(header_name: str) -> str: + return "-".join([part.capitalize() for part in header_name.split("-")]) + + +def get_kms_key_arn(kms_key: str, account_id: str, bucket_region: str) -> Optional[str]: + """ + In S3, the KMS key can be passed as a KeyId or a KeyArn. This method allows to always get the KeyArn from either. + It can also validate if the key is in the same region, and raise an exception. + :param kms_key: the KMS key id or ARN + :param account_id: the bucket account id + :param bucket_region: the bucket region + :raise KMS.NotFoundException if the key is not in the same region + :return: the key ARN if found and enabled + """ + if not kms_key: + return None + try: + parsed_arn = parse_arn(kms_key) + key_region = parsed_arn["region"] + # the KMS key should be in the same region as the bucket, we can raise an exception without calling KMS + if bucket_region and key_region != bucket_region: + raise CommonServiceException( + code="KMS.NotFoundException", message=f"Invalid arn {key_region}" + ) + + except InvalidArnException: + # if it fails, the passed ID is a UUID with no region data + key_id = kms_key + # recreate the ARN manually with the bucket region and bucket owner + # if the KMS key is cross-account, user should provide an ARN and not a KeyId + kms_key = arns.kms_key_arn(key_id=key_id, account_id=account_id, region_name=bucket_region) + + return kms_key + + +# TODO: replace Any by a replacement for S3Bucket, some kind of defined type? +def validate_kms_key_id(kms_key: str, bucket: Any) -> None: + """ + Validate that the KMS key used to encrypt the object is valid + :param kms_key: the KMS key id or ARN + :param bucket: the targeted bucket + :raise KMS.DisabledException if the key is disabled + :raise KMS.NotFoundException if the key is not in the same region or does not exist + :return: the key ARN if found and enabled + """ + if hasattr(bucket, "region_name"): + bucket_region = bucket.region_name + else: + bucket_region = bucket.bucket_region + + if hasattr(bucket, "account_id"): + bucket_account_id = bucket.account_id + else: + bucket_account_id = bucket.bucket_account_id + + kms_key_arn = get_kms_key_arn(kms_key, bucket_account_id, bucket_region) + + # the KMS key should be in the same region as the bucket, create the client in the bucket region + kms_client = connect_to(region_name=bucket_region).kms + try: + key = kms_client.describe_key(KeyId=kms_key_arn) + if not key["KeyMetadata"]["Enabled"]: + if key["KeyMetadata"]["KeyState"] == "PendingDeletion": + raise CommonServiceException( + code="KMS.KMSInvalidStateException", + message=f"{key['KeyMetadata']['Arn']} is pending deletion.", + ) + raise CommonServiceException( + code="KMS.DisabledException", message=f"{key['KeyMetadata']['Arn']} is disabled." + ) + + except ClientError as e: + if e.response["Error"]["Code"] == "NotFoundException": + raise CommonServiceException( + code="KMS.NotFoundException", message=e.response["Error"]["Message"] + ) + raise + + +def create_s3_kms_managed_key_for_region(account_id: str, region_name: str) -> SSEKMSKeyId: + kms_client = connect_to(aws_access_key_id=account_id, region_name=region_name).kms + key = kms_client.create_key( + Description="Default key that protects my S3 objects when no other key is defined" + ) + + return key["KeyMetadata"]["Arn"] + + +def rfc_1123_datetime(src: datetime.datetime) -> str: + return src.strftime(RFC1123) + + +def str_to_rfc_1123_datetime(value: str) -> datetime.datetime: + return datetime.datetime.strptime(value, RFC1123).replace(tzinfo=_gmt_zone_info) + + +def add_expiration_days_to_datetime(user_datatime: datetime.datetime, exp_days: int) -> str: + """ + This adds expiration days to a datetime, rounding to the next day at midnight UTC. + :param user_datatime: datetime object + :param exp_days: provided days + :return: return a datetime object, rounded to midnight, in string formatted to rfc_1123 + """ + rounded_datetime = user_datatime.replace( + hour=0, minute=0, second=0, microsecond=0 + ) + datetime.timedelta(days=exp_days + 1) + + return rfc_1123_datetime(rounded_datetime) + + +def serialize_expiration_header( + rule_id: str, lifecycle_exp: LifecycleExpiration, last_modified: datetime.datetime +): + if exp_days := lifecycle_exp.get("Days"): + # AWS round to the next day at midnight UTC + exp_date = add_expiration_days_to_datetime(last_modified, exp_days) + else: + exp_date = rfc_1123_datetime(lifecycle_exp["Date"]) + + return f'expiry-date="{exp_date}", rule-id="{rule_id}"' + + +def get_lifecycle_rule_from_object( + lifecycle_conf_rules: LifecycleRules, + object_key: ObjectKey, + size: ObjectSize, + object_tags: dict[str, str], +) -> LifecycleRule: + for rule in lifecycle_conf_rules: + if not (expiration := rule.get("Expiration")) or "ExpiredObjectDeleteMarker" in expiration: + continue + + if not (rule_filter := rule.get("Filter")): + return rule + + if and_rules := rule_filter.get("And"): + if all( + _match_lifecycle_filter(key, value, object_key, size, object_tags) + for key, value in and_rules.items() + ): + return rule + + if any( + _match_lifecycle_filter(key, value, object_key, size, object_tags) + for key, value in rule_filter.items() + ): + # after validation, we can only one of `Prefix`, `Tag`, `ObjectSizeGreaterThan` or `ObjectSizeLessThan` in + # the dict. Instead of manually checking, we can iterate of the only key and try to match it + return rule + + +def _match_lifecycle_filter( + filter_key: str, + filter_value: str | int | dict[str, str], + object_key: ObjectKey, + size: ObjectSize, + object_tags: dict[str, str], +): + match filter_key: + case "Prefix": + return object_key.startswith(filter_value) + case "Tag": + return object_tags and object_tags.get(filter_value.get("Key")) == filter_value.get( + "Value" + ) + case "ObjectSizeGreaterThan": + return size > filter_value + case "ObjectSizeLessThan": + return size < filter_value + case "Tags": # this is inside the `And` field + return object_tags and all( + object_tags.get(tag.get("Key")) == tag.get("Value") for tag in filter_value + ) + + +def parse_expiration_header( + expiration_header: str, +) -> tuple[Optional[datetime.datetime], Optional[str]]: + try: + header_values = dict( + (p.strip('"') for p in v.split("=")) for v in expiration_header.split('", ') + ) + expiration_date = str_to_rfc_1123_datetime(header_values["expiry-date"]) + return expiration_date, header_values["rule-id"] + + except (IndexError, ValueError, KeyError): + return None, None + + +def validate_dict_fields(data: dict, required_fields: set, optional_fields: set = None): + """ + Validate whether the `data` dict contains at least the required fields and not more than the union of the required + and optional fields + TODO: we could pass the TypedDict to also use its required/optional properties, but it could be sensitive to + mistake/changes in the specs and not always right + :param data: the dict we want to validate + :param required_fields: a set containing the required fields + :param optional_fields: a set containing the optional fields + :return: bool, whether the dict is valid or not + """ + if optional_fields is None: + optional_fields = set() + return (set_fields := set(data)) >= required_fields and set_fields <= ( + required_fields | optional_fields + ) + + +def parse_tagging_header(tagging_header: TaggingHeader) -> dict: + try: + parsed_tags = urlparser.parse_qs(tagging_header, keep_blank_values=True) + tags: dict[str, str] = {} + for key, val in parsed_tags.items(): + if len(val) != 1 or not TAG_REGEX.match(key) or not TAG_REGEX.match(val[0]): + raise InvalidArgument( + "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.", + ArgumentName="x-amz-tagging", + ArgumentValue=tagging_header, + ) + elif key.startswith("aws:"): + raise + tags[key] = val[0] + return tags + + except ValueError: + raise InvalidArgument( + "The header 'x-amz-tagging' shall be encoded as UTF-8 then URLEncoded URL query parameters without tag name duplicates.", + ArgumentName="x-amz-tagging", + ArgumentValue=tagging_header, + ) + + +def validate_tag_set(tag_set: TagSet, type_set: Literal["bucket", "object"] = "bucket"): + keys = set() + for tag in tag_set: + if set(tag) != {"Key", "Value"}: + raise MalformedXML() + + key = tag["Key"] + if key in keys: + raise InvalidTag( + "Cannot provide multiple Tags with the same key", + TagKey=key, + ) + + if key.startswith("aws:"): + if type_set == "bucket": + message = "System tags cannot be added/updated by requester" + else: + message = "Your TagKey cannot be prefixed with aws:" + raise InvalidTag( + message, + TagKey=key, + ) + + if not TAG_REGEX.match(key): + raise InvalidTag( + "The TagKey you have provided is invalid", + TagKey=key, + ) + elif not TAG_REGEX.match(tag["Value"]): + raise InvalidTag( + "The TagValue you have provided is invalid", TagKey=key, TagValue=tag["Value"] + ) + + keys.add(key) + + +def get_unique_key_id( + bucket: BucketName, object_key: ObjectKey, version_id: ObjectVersionId +) -> str: + return f"{bucket}/{object_key}/{version_id or 'null'}" + + +def get_retention_from_now(days: int = None, years: int = None) -> datetime.datetime: + """ + This calculates a retention date from now, adding days or years to it + :param days: provided days + :param years: provided years, exclusive with days + :return: return a datetime object + """ + if not days and not years: + raise ValueError("Either 'days' or 'years' needs to be provided") + now = datetime.datetime.now(tz=_gmt_zone_info) + if days: + retention = now + datetime.timedelta(days=days) + else: + retention = now.replace(year=now.year + years) + + return retention + + +def get_failed_precondition_copy_source( + request: CopyObjectRequest, last_modified: datetime.datetime, etag: ETag +) -> Optional[str]: + """ + Validate if the source object LastModified and ETag matches a precondition, and if it does, return the failed + precondition + # see https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + :param request: the CopyObjectRequest + :param last_modified: source object LastModified + :param etag: source object ETag + :return str: the failed precondition to raise + """ + if (cs_if_match := request.get("CopySourceIfMatch")) and etag.strip('"') != cs_if_match.strip( + '"' + ): + return "x-amz-copy-source-If-Match" + + elif ( + cs_if_unmodified_since := request.get("CopySourceIfUnmodifiedSince") + ) and last_modified > cs_if_unmodified_since: + return "x-amz-copy-source-If-Unmodified-Since" + + elif (cs_if_none_match := request.get("CopySourceIfNoneMatch")) and etag.strip( + '"' + ) == cs_if_none_match.strip('"'): + return "x-amz-copy-source-If-None-Match" + + elif ( + cs_if_modified_since := request.get("CopySourceIfModifiedSince") + ) and last_modified < cs_if_modified_since < datetime.datetime.now(tz=_gmt_zone_info): + return "x-amz-copy-source-If-Modified-Since" + + +def validate_failed_precondition( + request: GetObjectRequest | HeadObjectRequest, last_modified: datetime.datetime, etag: ETag +) -> None: + """ + Validate if the object LastModified and ETag matches a precondition, and if it does, return the failed + precondition + :param request: the GetObjectRequest or HeadObjectRequest + :param last_modified: S3 object LastModified + :param etag: S3 object ETag + :raises PreconditionFailed + :raises NotModified, 304 with an empty body + """ + precondition_failed = None + # last_modified needs to be rounded to a second so that strict equality can be enforced from a RFC1123 header + last_modified = last_modified.replace(microsecond=0) + if (if_match := request.get("IfMatch")) and etag != if_match.strip('"'): + precondition_failed = "If-Match" + + elif ( + if_unmodified_since := request.get("IfUnmodifiedSince") + ) and last_modified > if_unmodified_since: + precondition_failed = "If-Unmodified-Since" + + if precondition_failed: + raise PreconditionFailed( + "At least one of the pre-conditions you specified did not hold", + Condition=precondition_failed, + ) + + if ((if_none_match := request.get("IfNoneMatch")) and etag == if_none_match.strip('"')) or ( + (if_modified_since := request.get("IfModifiedSince")) + and last_modified <= if_modified_since < datetime.datetime.now(tz=_gmt_zone_info) + ): + raise CommonServiceException( + message="Not Modified", + code="NotModified", + status_code=304, + ) + + +def get_canned_acl( + canned_acl: BucketCannedACL | ObjectCannedACL, owner: Owner +) -> AccessControlPolicy: + """ + Return the proper Owner and Grants from a CannedACL + See https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl + :param canned_acl: an S3 CannedACL + :param owner: the current owner of the bucket or object + :return: an AccessControlPolicy containing the Grants and Owner + """ + owner_grantee = Grantee(**owner, Type=GranteeType.CanonicalUser) + grants = [Grant(Grantee=owner_grantee, Permission=Permission.FULL_CONTROL)] + + match canned_acl: + case ObjectCannedACL.private: + pass # no other permissions + case ObjectCannedACL.public_read: + grants.append(Grant(Grantee=ALL_USERS_ACL_GRANTEE, Permission=Permission.READ)) + + case ObjectCannedACL.public_read_write: + grants.append(Grant(Grantee=ALL_USERS_ACL_GRANTEE, Permission=Permission.READ)) + grants.append(Grant(Grantee=ALL_USERS_ACL_GRANTEE, Permission=Permission.WRITE)) + case ObjectCannedACL.authenticated_read: + grants.append( + Grant(Grantee=AUTHENTICATED_USERS_ACL_GRANTEE, Permission=Permission.READ) + ) + case ObjectCannedACL.bucket_owner_read: + pass # TODO: bucket owner ACL + case ObjectCannedACL.bucket_owner_full_control: + pass # TODO: bucket owner ACL + case ObjectCannedACL.aws_exec_read: + pass # TODO: bucket owner, EC2 Read + case BucketCannedACL.log_delivery_write: + grants.append(Grant(Grantee=LOG_DELIVERY_ACL_GRANTEE, Permission=Permission.READ_ACP)) + grants.append(Grant(Grantee=LOG_DELIVERY_ACL_GRANTEE, Permission=Permission.WRITE)) + + return AccessControlPolicy(Owner=owner, Grants=grants) + + +def create_redirect_for_post_request( + base_redirect: str, bucket: BucketName, object_key: ObjectKey, etag: ETag +): + """ + POST requests can redirect if successful. It will take the URL provided and append query string parameters + (key, bucket and ETag). It needs to be a full URL. + :param base_redirect: the URL provided for redirection + :param bucket: bucket name + :param object_key: object key + :param etag: key ETag + :return: the URL provided with the new appended query string parameters + """ + parts = urlparser.urlparse(base_redirect) + if not parts.netloc: + raise ValueError("The provided URL is not valid") + queryargs = urlparser.parse_qs(parts.query) + queryargs["key"] = [object_key] + queryargs["bucket"] = [bucket] + queryargs["etag"] = [etag] + redirect_queryargs = urlparser.urlencode(queryargs, doseq=True) + newparts = ( + parts.scheme, + parts.netloc, + parts.path, + parts.params, + redirect_queryargs, + parts.fragment, + ) + return urlparser.urlunparse(newparts) + + +def parse_post_object_tagging_xml(tagging: str) -> Optional[dict]: + try: + tag_set = {} + tags = xmltodict.parse(tagging) + xml_tags = tags.get("Tagging", {}).get("TagSet", {}).get("Tag", []) + if not xml_tags: + # if the Tagging does not respect the schema, just return + return + if not isinstance(xml_tags, list): + xml_tags = [xml_tags] + for tag in xml_tags: + tag_set[tag["Key"]] = tag["Value"] + + return tag_set + + except Exception: + raise MalformedXML() + + +def generate_safe_version_id() -> str: + """ + Generate a safe version id for XML rendering. + VersionId cannot have `-` in it, as it fails in XML + Combine an ever-increasing part in the 8 first characters, and a random element. + We need the sequence part in order to properly implement pagination around ListObjectVersions. + By prefixing the version-id with a global increasing number, we can sort the versions + :return: an S3 VersionId containing a timestamp part in the first 8 characters + """ + tok = next(global_version_id_sequence()).to_bytes(length=6) + token_bytes(18) + return base64.b64encode(tok, altchars=b"._").rstrip(b"=").decode("ascii") + + +@singleton_factory +def global_version_id_sequence(): + start = int(time.time() * 1000) + # itertools.count is thread safe over the GIL since its getAndIncrement operation is a single python bytecode op + return itertools.count(start) + + +def is_version_older_than_other(version_id: str, other: str): + """ + Compare the sequence part of a VersionId against the sequence part of a VersionIdMarker. Used for pagination + See `generate_safe_version_id` + """ + return base64.b64decode(version_id, altchars=b"._") < base64.b64decode(other, altchars=b"._") diff --git a/localstack-core/localstack/services/s3/validation.py b/localstack-core/localstack/services/s3/validation.py new file mode 100644 index 0000000000000..884b9f6cd11ba --- /dev/null +++ b/localstack-core/localstack/services/s3/validation.py @@ -0,0 +1,508 @@ +import base64 +import datetime +import hashlib +from zoneinfo import ZoneInfo + +from botocore.utils import InvalidArnException + +from localstack.aws.api import CommonServiceException +from localstack.aws.api.s3 import ( + AccessControlPolicy, + AnalyticsConfiguration, + AnalyticsId, + BucketCannedACL, + BucketLifecycleConfiguration, + BucketName, + ChecksumAlgorithm, + CORSConfiguration, + Grant, + Grantee, + Grants, + IntelligentTieringConfiguration, + IntelligentTieringId, + InvalidArgument, + InvalidBucketName, + InvalidEncryptionAlgorithmError, + InventoryConfiguration, + InventoryId, + KeyTooLongError, + ObjectCannedACL, + Permission, + ServerSideEncryption, + SSECustomerAlgorithm, + SSECustomerKey, + SSECustomerKeyMD5, + WebsiteConfiguration, +) +from localstack.aws.api.s3 import Type as GranteeType +from localstack.services.s3 import constants as s3_constants +from localstack.services.s3.exceptions import InvalidRequest, MalformedACLError, MalformedXML +from localstack.services.s3.utils import ( + _create_invalid_argument_exc, + get_class_attrs_from_spec_class, + get_permission_header_name, + is_bucket_name_valid, + is_valid_canonical_id, + validate_dict_fields, +) +from localstack.utils.aws import arns +from localstack.utils.strings import to_bytes + +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl +# bucket-owner-read + bucket-owner-full-control are allowed, but ignored for buckets +VALID_CANNED_ACLS = get_class_attrs_from_spec_class( + BucketCannedACL +) | get_class_attrs_from_spec_class(ObjectCannedACL) + + +def validate_bucket_analytics_configuration( + id: AnalyticsId, analytics_configuration: AnalyticsConfiguration +) -> None: + if id != analytics_configuration.get("Id"): + raise MalformedXML( + "The XML you provided was not well-formed or did not validate against our published schema" + ) + + +def validate_bucket_intelligent_tiering_configuration( + id: IntelligentTieringId, intelligent_tiering_configuration: IntelligentTieringConfiguration +) -> None: + if id != intelligent_tiering_configuration.get("Id"): + raise MalformedXML( + "The XML you provided was not well-formed or did not validate against our published schema" + ) + + +def validate_bucket_name(bucket: BucketName) -> None: + """ + Validate s3 bucket name based on the documentation + ref. https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + """ + if not is_bucket_name_valid(bucket_name=bucket): + raise InvalidBucketName("The specified bucket is not valid.", BucketName=bucket) + + +def validate_canned_acl(canned_acl: str) -> None: + """ + Validate the canned ACL value, or raise an Exception + """ + if canned_acl and canned_acl not in VALID_CANNED_ACLS: + ex = _create_invalid_argument_exc(None, "x-amz-acl", canned_acl) + raise ex + + +def parse_grants_in_headers(permission: Permission, grantees: str) -> Grants: + splitted_grantees = [grantee.strip() for grantee in grantees.split(",")] + grants = [] + for seralized_grantee in splitted_grantees: + grantee_type, grantee_id = seralized_grantee.split("=") + grantee_id = grantee_id.strip('"') + if grantee_type not in ("uri", "id", "emailAddress"): + ex = _create_invalid_argument_exc( + "Argument format not recognized", + get_permission_header_name(permission), + seralized_grantee, + ) + raise ex + elif grantee_type == "uri": + if grantee_id not in s3_constants.VALID_ACL_PREDEFINED_GROUPS: + ex = _create_invalid_argument_exc("Invalid group uri", "uri", grantee_id) + raise ex + grantee = Grantee( + Type=GranteeType.Group, + URI=grantee_id, + ) + + elif grantee_type == "id": + if not is_valid_canonical_id(grantee_id): + ex = _create_invalid_argument_exc("Invalid id", "id", grantee_id) + raise ex + grantee = Grantee( + Type=GranteeType.CanonicalUser, + ID=grantee_id, + DisplayName="webfile", # TODO: only in certain regions + ) + + else: + # TODO: check validation here + grantee = Grantee( + Type=GranteeType.AmazonCustomerByEmail, + EmailAddress=grantee_id, + ) + grants.append(Grant(Permission=permission, Grantee=grantee)) + + return grants + + +def validate_acl_acp(acp: AccessControlPolicy) -> None: + if acp is None or "Owner" not in acp or "Grants" not in acp: + raise MalformedACLError( + "The XML you provided was not well-formed or did not validate against our published schema" + ) + + if not is_valid_canonical_id(owner_id := acp["Owner"].get("ID", "")): + ex = _create_invalid_argument_exc("Invalid id", "CanonicalUser/ID", owner_id) + raise ex + + for grant in acp["Grants"]: + if grant.get("Permission") not in s3_constants.VALID_GRANTEE_PERMISSIONS: + raise MalformedACLError( + "The XML you provided was not well-formed or did not validate against our published schema" + ) + + grantee = grant.get("Grantee", {}) + grant_type = grantee.get("Type") + if grant_type not in ( + GranteeType.Group, + GranteeType.CanonicalUser, + GranteeType.AmazonCustomerByEmail, + ): + raise MalformedACLError( + "The XML you provided was not well-formed or did not validate against our published schema" + ) + elif ( + grant_type == GranteeType.Group + and (grant_uri := grantee.get("URI", "")) + not in s3_constants.VALID_ACL_PREDEFINED_GROUPS + ): + ex = _create_invalid_argument_exc("Invalid group uri", "Group/URI", grant_uri) + raise ex + + elif grant_type == GranteeType.AmazonCustomerByEmail: + # TODO: add validation here + continue + + elif grant_type == GranteeType.CanonicalUser and not is_valid_canonical_id( + (grantee_id := grantee.get("ID", "")) + ): + ex = _create_invalid_argument_exc("Invalid id", "CanonicalUser/ID", grantee_id) + raise ex + + +def validate_lifecycle_configuration(lifecycle_conf: BucketLifecycleConfiguration) -> None: + """ + Validate the Lifecycle configuration following AWS docs + See https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html + https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + :param lifecycle_conf: the bucket lifecycle configuration given by the client + :raises MalformedXML: when the file doesn't follow the basic structure/required fields + :raises InvalidArgument: if the `Date` passed for the Expiration is not at Midnight GMT + :raises InvalidRequest: if there are duplicate tags keys in `Tags` field + :return: None + """ + # we only add the `Expiration` header, we don't delete objects yet + # We don't really expire or transition objects + # TODO: transition not supported not validated, as we don't use it yet + if not lifecycle_conf: + return + + for rule in lifecycle_conf.get("Rules", []): + if any(req_key not in rule for req_key in ("ID", "Filter", "Status")): + raise MalformedXML() + if (non_current_exp := rule.get("NoncurrentVersionExpiration")) is not None: + if all( + req_key not in non_current_exp + for req_key in ("NewerNoncurrentVersions", "NoncurrentDays") + ): + raise MalformedXML() + + if rule_filter := rule.get("Filter"): + if len(rule_filter) > 1: + raise MalformedXML() + + if (expiration := rule.get("Expiration", {})) and "ExpiredObjectDeleteMarker" in expiration: + if len(expiration) > 1: + raise MalformedXML() + + if exp_date := (expiration.get("Date")): + if exp_date.timetz() != datetime.time( + hour=0, minute=0, second=0, microsecond=0, tzinfo=ZoneInfo("GMT") + ): + raise InvalidArgument( + "'Date' must be at midnight GMT", + ArgumentName="Date", + ArgumentValue=exp_date.astimezone(), # use the locale timezone, that's what AWS does (returns PST?) + ) + + if tags := (rule_filter.get("And", {}).get("Tags")): + tag_keys = set() + for tag in tags: + if (tag_key := tag.get("Key")) in tag_keys: + raise InvalidRequest("Duplicate Tag Keys are not allowed.") + tag_keys.add(tag_key) + + +def validate_website_configuration(website_config: WebsiteConfiguration) -> None: + """ + Validate the website configuration following AWS docs + See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html + :param website_config: + :raises + :return: None + """ + if redirect_all_req := website_config.get("RedirectAllRequestsTo", {}): + if len(website_config) > 1: + ex = _create_invalid_argument_exc( + message="RedirectAllRequestsTo cannot be provided in conjunction with other Routing Rules.", + name="RedirectAllRequestsTo", + value="not null", + ) + raise ex + if "HostName" not in redirect_all_req: + raise MalformedXML() + + if (protocol := redirect_all_req.get("Protocol")) and protocol not in ("http", "https"): + raise InvalidRequest( + "Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically." + ) + + return + + # required + # https://docs.aws.amazon.com/AmazonS3/latest/API/API_IndexDocument.html + if not (index_configuration := website_config.get("IndexDocument")): + ex = _create_invalid_argument_exc( + message="A value for IndexDocument Suffix must be provided if RedirectAllRequestsTo is empty", + name="IndexDocument", + value="null", + ) + raise ex + + if not (index_suffix := index_configuration.get("Suffix")) or "/" in index_suffix: + ex = _create_invalid_argument_exc( + message="The IndexDocument Suffix is not well formed", + name="IndexDocument", + value=index_suffix or None, + ) + raise ex + + if "ErrorDocument" in website_config and not website_config.get("ErrorDocument", {}).get("Key"): + raise MalformedXML() + + if "RoutingRules" in website_config: + routing_rules = website_config.get("RoutingRules", []) + if len(routing_rules) == 0: + raise MalformedXML() + if len(routing_rules) > 50: + raise ValueError("Too many routing rules") # TODO: correct exception + for routing_rule in routing_rules: + redirect = routing_rule.get("Redirect", {}) + # todo: this does not raise an error? check what GetWebsiteConfig returns? empty field? + # if not (redirect := routing_rule.get("Redirect")): + # raise "Something" + + if "ReplaceKeyPrefixWith" in redirect and "ReplaceKeyWith" in redirect: + raise InvalidRequest( + "You can only define ReplaceKeyPrefix or ReplaceKey but not both." + ) + + if "Condition" in routing_rule and not routing_rule.get("Condition", {}): + raise InvalidRequest( + "Condition cannot be empty. To redirect all requests without a condition, the condition element shouldn't be present." + ) + + if (protocol := redirect.get("Protocol")) and protocol not in ("http", "https"): + raise InvalidRequest( + "Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically." + ) + + +def validate_inventory_configuration( + config_id: InventoryId, inventory_configuration: InventoryConfiguration +): + """ + Validate the Inventory Configuration following AWS docs + Validation order is XML then `Id` then S3DestinationBucket + https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html + :param config_id: the passed Id parameter passed to the provider method + :param inventory_configuration: InventoryConfiguration + :raises MalformedXML: when the file doesn't follow the basic structure/required fields + :raises IdMismatch: if the `Id` parameter is different from the `Id` field from the configuration + :raises InvalidS3DestinationBucket: if S3 bucket is not provided as an ARN + :return: None + """ + required_root_fields = {"Destination", "Id", "IncludedObjectVersions", "IsEnabled", "Schedule"} + optional_root_fields = {"Filter", "OptionalFields"} + + if not validate_dict_fields( + inventory_configuration, required_root_fields, optional_root_fields + ): + raise MalformedXML() + + required_s3_bucket_dest_fields = {"Bucket", "Format"} + optional_s3_bucket_dest_fields = {"AccountId", "Encryption", "Prefix"} + + if not ( + s3_bucket_destination := inventory_configuration["Destination"].get("S3BucketDestination") + ) or not validate_dict_fields( + s3_bucket_destination, required_s3_bucket_dest_fields, optional_s3_bucket_dest_fields + ): + raise MalformedXML() + + if inventory_configuration["Destination"]["S3BucketDestination"]["Format"] not in ( + "CSV", + "ORC", + "Parquet", + ): + raise MalformedXML() + + if not (frequency := inventory_configuration["Schedule"].get("Frequency")) or frequency not in ( + "Daily", + "Weekly", + ): + raise MalformedXML() + + if inventory_configuration["IncludedObjectVersions"] not in ("All", "Current"): + raise MalformedXML() + + possible_optional_fields = { + "Size", + "LastModifiedDate", + "StorageClass", + "ETag", + "IsMultipartUploaded", + "ReplicationStatus", + "EncryptionStatus", + "ObjectLockRetainUntilDate", + "ObjectLockMode", + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier", + "BucketKeyStatus", + "ChecksumAlgorithm", + } + if (opt_fields := inventory_configuration.get("OptionalFields")) and set( + opt_fields + ) - possible_optional_fields: + raise MalformedXML() + + if inventory_configuration.get("Id") != config_id: + raise CommonServiceException( + code="IdMismatch", message="Document ID does not match the specified configuration ID." + ) + + bucket_arn = inventory_configuration["Destination"]["S3BucketDestination"]["Bucket"] + try: + arns.parse_arn(bucket_arn) + except InvalidArnException: + raise CommonServiceException( + code="InvalidS3DestinationBucket", message="Invalid bucket ARN." + ) + + +def validate_cors_configuration(cors_configuration: CORSConfiguration): + rules = cors_configuration["CORSRules"] + + if not rules or len(rules) > 100: + raise MalformedXML() + + required_rule_fields = {"AllowedMethods", "AllowedOrigins"} + optional_rule_fields = {"AllowedHeaders", "ExposeHeaders", "MaxAgeSeconds", "ID"} + + for rule in rules: + if not validate_dict_fields(rule, required_rule_fields, optional_rule_fields): + raise MalformedXML() + + for method in rule["AllowedMethods"]: + if method not in ("GET", "PUT", "HEAD", "POST", "DELETE"): + raise InvalidRequest( + f"Found unsupported HTTP method in CORS config. Unsupported method is {method}" + ) + + +def validate_object_key(object_key: str) -> None: + """ + ref. https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html + + """ + if (len_key := len(to_bytes(object_key, encoding="UTF-8"))) > 1024: + raise KeyTooLongError( + "Your key is too long", + MaxSizeAllowed="1024", + Size=str(len_key), + ) + + +def validate_sse_c( + algorithm: SSECustomerAlgorithm, + encryption_key: SSECustomerKey, + encryption_key_md5: SSECustomerKeyMD5, + server_side_encryption: ServerSideEncryption = None, +): + """ + This method validates the SSE Customer parameters for different requests. + :param algorithm: the SSECustomerAlgorithm parameter of the incoming Request, can only be AES256 + :param encryption_key: the SSECustomerKey of the incoming Request, represent the base64 encoded encryption key + :param encryption_key_md5: the SSECustomerKeyMD5 of the request, represents the base64 encoded MD5 hash of the + encryption key + :param server_side_encryption: when the incoming request is a "write" request (PutObject, CopyObject, + CreateMultipartUpload), the user can specify the encryption. Customer encryption and AWS SSE can't both be set. + :raises: InvalidArgument if the request is invalid + :raises: InvalidEncryptionAlgorithmError if the given algorithm is different from AES256 + """ + if not encryption_key and not algorithm: + return + elif server_side_encryption: + raise InvalidArgument( + "Server Side Encryption with Customer provided key is incompatible with the encryption method specified", + ArgumentName="x-amz-server-side-encryption", + ArgumentValue=server_side_encryption, + ) + + if encryption_key and not algorithm: + raise InvalidArgument( + "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.", + ArgumentName="x-amz-server-side-encryption", + ArgumentValue="null", + ) + elif not encryption_key and algorithm: + raise InvalidArgument( + "Requests specifying Server Side Encryption with Customer provided keys must provide an appropriate secret key.", + ArgumentName="x-amz-server-side-encryption", + ArgumentValue="null", + ) + + if algorithm != "AES256": + raise InvalidEncryptionAlgorithmError( + "The Encryption request you specified is not valid. Supported value: AES256.", + ArgumentName="x-amz-server-side-encryption", + ArgumentValue=algorithm, + ) + + sse_customer_key = base64.b64decode(encryption_key) + if len(sse_customer_key) != 32: + raise InvalidArgument( + "The secret key was invalid for the specified algorithm.", + ArgumentName="x-amz-server-side-encryption", + ArgumentValue="null", + ) + + sse_customer_key_md5 = base64.b64encode(hashlib.md5(sse_customer_key).digest()).decode("utf-8") + if sse_customer_key_md5 != encryption_key_md5: + raise InvalidArgument( + "The calculated MD5 hash of the key did not match the hash that was provided.", + # weirdly, the argument name is wrong, it should be `x-amz-server-side-encryption-customer-key-MD5` + ArgumentName="x-amz-server-side-encryption", + ArgumentValue="null", + ) + + +def validate_checksum_value(checksum_value: str, checksum_algorithm: ChecksumAlgorithm) -> bool: + try: + checksum = base64.b64decode(checksum_value) + except Exception: + return False + + match checksum_algorithm: + case ChecksumAlgorithm.CRC32 | ChecksumAlgorithm.CRC32C: + valid_length = 4 + case ChecksumAlgorithm.CRC64NVME: + valid_length = 8 + case ChecksumAlgorithm.SHA1: + valid_length = 20 + case ChecksumAlgorithm.SHA256: + valid_length = 32 + case _: + valid_length = 0 + + return len(checksum) == valid_length diff --git a/localstack-core/localstack/services/s3/website_hosting.py b/localstack-core/localstack/services/s3/website_hosting.py new file mode 100644 index 0000000000000..141dc4e935105 --- /dev/null +++ b/localstack-core/localstack/services/s3/website_hosting.py @@ -0,0 +1,409 @@ +import logging +import re +from functools import wraps +from typing import Callable, Dict, Optional, Union +from urllib.parse import urlparse + +from werkzeug.datastructures import Headers + +from localstack.aws.api.s3 import ( + BucketName, + ErrorDocument, + GetObjectOutput, + NoSuchKey, + NoSuchWebsiteConfiguration, + ObjectKey, + RoutingRule, + RoutingRules, +) +from localstack.aws.connect import connect_to +from localstack.aws.protocol.serializer import gen_amzn_requestid +from localstack.http import Request, Response, Router +from localstack.http.dispatcher import Handler + +LOG = logging.getLogger(__name__) + +STATIC_WEBSITE_HOST_REGEX = '.s3-website.' + +_leading_whitespace_re = re.compile("(^[ \t]*)(?:[ \t\n])", re.MULTILINE) + + +class NoSuchKeyFromErrorDocument(NoSuchKey): + code: str = "NoSuchKey" + sender_fault: bool = False + status_code: int = 404 + Key: Optional[ObjectKey] + ErrorDocumentKey: Optional[ObjectKey] + + +class S3WebsiteHostingHandler: + def __init__(self): + # TODO: once we implement ACLs, maybe revisit the way we use the client/verify the bucket/object's ACL + self.s3_client = connect_to().s3 + + def __call__( + self, + request: Request, + bucket_name: str, + domain: str = None, + path: str = None, + ) -> Response: + """ + Tries to serve the key, and if an Exception is encountered, returns a generic response + This will allow to easily extend it to 403 exceptions + :param request: router Request object + :param bucket_name: str, bucket name + :param domain: str, domain name + :param path: the path of the request + :return: Response object + """ + if request.method != "GET": + return Response( + _create_405_error_string(request.method, request_id=gen_amzn_requestid()), + status=405, + ) + + try: + return self._serve_object(request, bucket_name, path) + + except (NoSuchKeyFromErrorDocument, NoSuchWebsiteConfiguration) as e: + resource_name = e.Key if hasattr(e, "Key") else e.BucketName + response_body = _create_404_error_string( + code=e.code, + message=e.message, + resource_name=resource_name, + request_id=gen_amzn_requestid(), + from_error_document=getattr(e, "ErrorDocumentKey", None), + ) + return Response(response_body, status=e.status_code) + + except self.s3_client.exceptions.ClientError as e: + error = e.response["Error"] + if error["Code"] not in ("NoSuchKey", "NoSuchBucket", "NoSuchWebsiteConfiguration"): + raise + + resource_name = error.get("Key", error.get("BucketName")) + response_body = _create_404_error_string( + code=error["Code"], + message=error["Message"], + resource_name=resource_name, + request_id=gen_amzn_requestid(), + from_error_document=getattr(e, "ErrorDocumentKey", None), + ) + return Response(response_body, status=e.response["ResponseMetadata"]["HTTPStatusCode"]) + + except Exception: + LOG.exception( + "Exception encountered while trying to serve s3-website at %s", request.url + ) + return Response(_create_500_error_string(), status=500) + + def _serve_object( + self, request: Request, bucket_name: BucketName, path: str = None + ) -> Response: + """ + Serves the S3 Object as a website handler. It will match routing rules set in the configuration first, + and redirect the request if necessary. They are specific case for handling configured index, see the docs: + https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html + https://docs.aws.amazon.com/AmazonS3/latest/userguide/CustomErrorDocSupport.html + https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-page-redirect.html + :param request: Request object received by the router + :param bucket_name: bucket name contained in the host name + :param path: path of the request, corresponds to the S3 Object key + :return: Response object, either the Object, a redirection or an error + """ + + website_config = self.s3_client.get_bucket_website(Bucket=bucket_name) + headers = {} + + redirection = website_config.get("RedirectAllRequestsTo") + if redirection: + parsed_url = urlparse(request.url) + redirect_to = request.url.replace(parsed_url.netloc, redirection["HostName"]) + if protocol := redirection.get("Protocol"): + redirect_to = redirect_to.replace(parsed_url.scheme, protocol) + + headers["Location"] = redirect_to + return Response("", status=301, headers=headers) + + object_key = path + routing_rules = website_config.get("RoutingRules") + # checks for prefix rules, before trying to get the key + if ( + object_key + and routing_rules + and (rule := self._find_matching_rule(routing_rules, object_key=object_key)) + ): + redirect_response = self._get_redirect_from_routing_rule(request, rule) + return redirect_response + + # if the URL ends with a trailing slash, try getting the index first + is_folder = request.url[-1] == "/" + if ( + not object_key or is_folder + ): # the path automatically remove the trailing slash, even with strict_slashes=False + index_key = website_config["IndexDocument"]["Suffix"] + object_key = f"{object_key}{index_key}" if object_key else index_key + + try: + s3_object = self.s3_client.get_object(Bucket=bucket_name, Key=object_key) + except self.s3_client.exceptions.NoSuchKey: + if not is_folder: + # try appending the index suffix in case we're accessing a "folder" without a trailing slash + index_key = website_config["IndexDocument"]["Suffix"] + try: + self.s3_client.head_object(Bucket=bucket_name, Key=f"{object_key}/{index_key}") + return Response("", status=302, headers={"Location": f"/{object_key}/"}) + except self.s3_client.exceptions.ClientError: + pass + + # checks for error code (and prefix) rules, after trying to get the key + if routing_rules and ( + rule := self._find_matching_rule( + routing_rules, object_key=object_key, error_code=404 + ) + ): + redirect_response = self._get_redirect_from_routing_rule(request, rule) + return redirect_response + + # tries to get the error document, otherwise raises NoSuchKey + if error_document := website_config.get("ErrorDocument"): + return self._return_error_document( + error_document=error_document, + bucket=bucket_name, + missing_key=object_key, + ) + else: + # If not ErrorDocument is configured, raise NoSuchKey + raise + + if website_redirect_location := s3_object.get("WebsiteRedirectLocation"): + headers["Location"] = website_redirect_location + return Response("", status=301, headers=headers) + + if self._check_if_headers(request.headers, s3_object=s3_object): + return Response("", status=304) + + headers = self._get_response_headers_from_object(s3_object) + return Response(s3_object["Body"], headers=headers) + + def _return_error_document( + self, + error_document: ErrorDocument, + bucket: BucketName, + missing_key: ObjectKey, + ) -> Response: + """ + Try to retrieve the configured ErrorDocument and return the response with its body + https://docs.aws.amazon.com/AmazonS3/latest/userguide/CustomErrorDocSupport.html + :param error_document: the ErrorDocument from the bucket WebsiteConfiguration + :param bucket: the bucket name + :param missing_key: the missing key not found in the bucket + :return: a Response, either a redirection or containing the Body of the ErrorDocument + :raises NoSuchKeyFromErrorDocument if the ErrorDocument is not found + """ + headers = {} + error_key = error_document["Key"] + try: + s3_object = self.s3_client.get_object(Bucket=bucket, Key=error_key) + # if the key is found, return the key, or if that key has a redirect, return a redirect + + if website_redirect_location := s3_object.get("WebsiteRedirectLocation"): + headers["Location"] = website_redirect_location + return Response("", status=301, headers=headers) + + headers = self._get_response_headers_from_object(s3_object) + return Response(s3_object["Body"], status=404, headers=headers) + + except self.s3_client.exceptions.NoSuchKey: + raise NoSuchKeyFromErrorDocument( + "The specified key does not exist.", + Key=missing_key, + ErrorDocumentKey=error_key, + ) + + @staticmethod + def _get_response_headers_from_object(get_object_response: GetObjectOutput) -> Dict[str, str]: + """ + Only return some headers from the S3 Object + :param get_object_response: the response from S3.GetObject + :return: headers from the object to be part of the response + """ + response_headers = {} + if content_type := get_object_response.get("ContentType"): + response_headers["Content-Type"] = content_type + if etag := get_object_response.get("ETag"): + response_headers["etag"] = etag + + return response_headers + + @staticmethod + def _check_if_headers(headers: Headers, s3_object: GetObjectOutput) -> bool: + # TODO: add other conditions here If-Modified-Since, etc etc + etag = s3_object.get("ETag") + # last_modified = s3_object.get("LastModified") # TODO + if "if-none-match" in headers and etag and etag in headers["if-none-match"]: + return True + + @staticmethod + def _find_matching_rule( + routing_rules: RoutingRules, object_key: ObjectKey, error_code: int = None + ) -> Union[RoutingRule, None]: + """ + Iterate over the routing rules set in the configuration, and return the first that match the key name and/or the + error code (in the 4XX range). + :param routing_rules: RoutingRules part of WebsiteConfiguration + :param object_key: ObjectKey + :param error_code: error code of the Response in the 4XX range + :return: a RoutingRule if matched, or None + """ + # TODO: we could separate rules depending in they have the HttpErrorCodeReturnedEquals field + # we would not try to match on them early, no need to iterate on them + # and iterate them over only if an exception is encountered + for rule in routing_rules: + if condition := rule.get("Condition"): + prefix = condition.get("KeyPrefixEquals") + return_http_code = condition.get("HttpErrorCodeReturnedEquals") + # if both prefix matching and http error matching conditions are set + if prefix and return_http_code: + if object_key.startswith(prefix) and error_code == int(return_http_code): + return rule + else: + # it must either match both or it does not apply + continue + # only prefix is set, but this should have been matched before the error + elif prefix and object_key.startswith(prefix): + return rule + elif return_http_code and error_code == int(return_http_code): + return rule + + else: + # if no Condition is set, the redirect is applied to all requests + return rule + + @staticmethod + def _get_redirect_from_routing_rule(request: Request, routing_rule: RoutingRule) -> Response: + """ + Return a redirect Response object created with the different parameters set in the RoutingRule + :param request: the original Request object received from the router + :param routing_rule: a RoutingRule from the WebsiteConfiguration + :return: a redirect Response + """ + parsed_url = urlparse(request.url) + redirect_to = request.url + redirect = routing_rule["Redirect"] + if host_name := redirect.get("HostName"): + redirect_to = redirect_to.replace(parsed_url.netloc, host_name) + if protocol := redirect.get("Protocol"): + redirect_to = redirect_to.replace(parsed_url.scheme, protocol) + if redirect_to_key := redirect.get("ReplaceKeyWith"): + redirect_to = redirect_to.replace(parsed_url.path, f"/{redirect_to_key}") + elif "ReplaceKeyPrefixWith" in redirect: # the value might be empty and it's a valid config + matched_prefix = routing_rule["Condition"].get("KeyPrefixEquals", "") + redirect_to = redirect_to.replace( + matched_prefix, redirect.get("ReplaceKeyPrefixWith"), 1 + ) + + return Response( + "", headers={"Location": redirect_to}, status=redirect.get("HttpRedirectCode", 301) + ) + + +def register_website_hosting_routes( + router: Router[Handler], handler: S3WebsiteHostingHandler = None +): + """ + Registers the S3 website hosting handler into the given router. + :param handler: an S3WebsiteHosting handler + :param router: the router to add the handlers into. + """ + handler = handler or S3WebsiteHostingHandler() + router.add( + path="/", + host=STATIC_WEBSITE_HOST_REGEX, + endpoint=handler, + ) + router.add( + path="/", + host=STATIC_WEBSITE_HOST_REGEX, + endpoint=handler, + ) + + +def _flatten_html_response(fn: Callable[[...], str]): + @wraps(fn) + def wrapper(*args, **kwargs) -> str: + r = fn(*args, **kwargs) + # remove leading whitespace + return re.sub(_leading_whitespace_re, "", r) + + return wrapper + + +@_flatten_html_response +def _create_404_error_string( + code: str, message: str, resource_name: str, request_id: str, from_error_document: str = None +) -> str: + # TODO: the nested error could be permission related + # permission are not enforced currently + resource_key = "Key" if "Key" in code else "BucketName" + return f""" + 404 Not Found + +

404 Not Found

+
    +
  • Code: {code}
  • +
  • Message: {message}
  • +
  • {resource_key}: {resource_name}
  • +
  • RequestId: {request_id}
  • +
  • HostId: h6t23Wl2Ndijztq+COn9kvx32omFVRLLtwk36D6+2/CIYSey+Uox6kBxRgcnAASsgnGwctU6zzU=
  • +
+ {_create_nested_404_error_string(from_error_document)} +
+ + +""" + + +def _create_nested_404_error_string(error_document_key: str) -> str: + if not error_document_key: + return "" + return f"""

An Error Occurred While Attempting to Retrieve a Custom Error Document

+
    +
  • Code: NoSuchKey
  • +
  • Message: The specified key does not exist.
  • +
  • Key: {error_document_key}
  • +
+ """ + + +@_flatten_html_response +def _create_405_error_string(method: str, request_id: str) -> str: + return f""" + 405 Method Not Allowed + +

405 Method Not Allowed

+
    +
  • Code: MethodNotAllowed
  • +
  • Message: The specified method is not allowed against this resource.
  • +
  • Method: {method.upper()}
  • +
  • ResourceType: OBJECT
  • +
  • RequestId: {request_id}
  • +
  • HostId: h6t23Wl2Ndijztq+COn9kvx32omFVRLLtwk36D6+2/CIYSey+Uox6kBxRgcnAASsgnGwctU6zzU=
  • +
+
+ + +""" + + +@_flatten_html_response +def _create_500_error_string() -> str: + return """ + 500 Service Error + +

500 Service Error

+
+ + + """ diff --git a/localstack/services/stepfunctions/asl/utils/__init__.py b/localstack-core/localstack/services/s3control/__init__.py similarity index 100% rename from localstack/services/stepfunctions/asl/utils/__init__.py rename to localstack-core/localstack/services/s3control/__init__.py diff --git a/localstack/services/s3control/provider.py b/localstack-core/localstack/services/s3control/provider.py similarity index 100% rename from localstack/services/s3control/provider.py rename to localstack-core/localstack/services/s3control/provider.py diff --git a/localstack/services/stepfunctions/backend/__init__.py b/localstack-core/localstack/services/scheduler/__init__.py similarity index 100% rename from localstack/services/stepfunctions/backend/__init__.py rename to localstack-core/localstack/services/scheduler/__init__.py diff --git a/localstack/services/sts/__init__.py b/localstack-core/localstack/services/scheduler/models.py similarity index 100% rename from localstack/services/sts/__init__.py rename to localstack-core/localstack/services/scheduler/models.py diff --git a/localstack-core/localstack/services/scheduler/provider.py b/localstack-core/localstack/services/scheduler/provider.py new file mode 100644 index 0000000000000..63177c01fda30 --- /dev/null +++ b/localstack-core/localstack/services/scheduler/provider.py @@ -0,0 +1,36 @@ +import logging +import re + +from moto.scheduler.models import EventBridgeSchedulerBackend + +from localstack.aws.api.scheduler import SchedulerApi, ValidationException +from localstack.services.events.rule import RULE_SCHEDULE_CRON_REGEX, RULE_SCHEDULE_RATE_REGEX +from localstack.services.plugins import ServiceLifecycleHook +from localstack.utils.patch import patch + +LOG = logging.getLogger(__name__) + +AT_REGEX = ( + r"^at[(](19|20)\d\d-(0[1-9]|1[012])-([012]\d|3[01])T([01]\d|2[0-3]):([0-5]\d):([0-5]\d)[)]$" +) +RULE_SCHEDULE_AT_REGEX = re.compile(AT_REGEX) + + +class SchedulerProvider(SchedulerApi, ServiceLifecycleHook): + pass + + +def _validate_schedule_expression(schedule_expression: str) -> None: + if not ( + RULE_SCHEDULE_CRON_REGEX.match(schedule_expression) + or RULE_SCHEDULE_RATE_REGEX.match(schedule_expression) + or RULE_SCHEDULE_AT_REGEX.match(schedule_expression) + ): + raise ValidationException(f"Invalid Schedule Expression {schedule_expression}.") + + +@patch(EventBridgeSchedulerBackend.create_schedule) +def create_schedule(fn, self, **kwargs): + if schedule_expression := kwargs.get("schedule_expression"): + _validate_schedule_expression(schedule_expression) + return fn(self, **kwargs) diff --git a/localstack/services/support/__init__.py b/localstack-core/localstack/services/scheduler/resource_providers/__init__.py similarity index 100% rename from localstack/services/support/__init__.py rename to localstack-core/localstack/services/scheduler/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.py b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.py new file mode 100644 index 0000000000000..adfc5316062ab --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.py @@ -0,0 +1,229 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SchedulerScheduleProperties(TypedDict): + FlexibleTimeWindow: Optional[FlexibleTimeWindow] + ScheduleExpression: Optional[str] + Target: Optional[Target] + Arn: Optional[str] + Description: Optional[str] + EndDate: Optional[str] + GroupName: Optional[str] + KmsKeyArn: Optional[str] + Name: Optional[str] + ScheduleExpressionTimezone: Optional[str] + StartDate: Optional[str] + State: Optional[str] + + +class FlexibleTimeWindow(TypedDict): + Mode: Optional[str] + MaximumWindowInMinutes: Optional[float] + + +class DeadLetterConfig(TypedDict): + Arn: Optional[str] + + +class RetryPolicy(TypedDict): + MaximumEventAgeInSeconds: Optional[float] + MaximumRetryAttempts: Optional[float] + + +class AwsVpcConfiguration(TypedDict): + Subnets: Optional[list[str]] + AssignPublicIp: Optional[str] + SecurityGroups: Optional[list[str]] + + +class NetworkConfiguration(TypedDict): + AwsvpcConfiguration: Optional[AwsVpcConfiguration] + + +class CapacityProviderStrategyItem(TypedDict): + CapacityProvider: Optional[str] + Base: Optional[float] + Weight: Optional[float] + + +class PlacementConstraint(TypedDict): + Expression: Optional[str] + Type: Optional[str] + + +class PlacementStrategy(TypedDict): + Field: Optional[str] + Type: Optional[str] + + +class EcsParameters(TypedDict): + TaskDefinitionArn: Optional[str] + CapacityProviderStrategy: Optional[list[CapacityProviderStrategyItem]] + EnableECSManagedTags: Optional[bool] + EnableExecuteCommand: Optional[bool] + Group: Optional[str] + LaunchType: Optional[str] + NetworkConfiguration: Optional[NetworkConfiguration] + PlacementConstraints: Optional[list[PlacementConstraint]] + PlacementStrategy: Optional[list[PlacementStrategy]] + PlatformVersion: Optional[str] + PropagateTags: Optional[str] + ReferenceId: Optional[str] + Tags: Optional[list[dict]] + TaskCount: Optional[float] + + +class EventBridgeParameters(TypedDict): + DetailType: Optional[str] + Source: Optional[str] + + +class KinesisParameters(TypedDict): + PartitionKey: Optional[str] + + +class SageMakerPipelineParameter(TypedDict): + Name: Optional[str] + Value: Optional[str] + + +class SageMakerPipelineParameters(TypedDict): + PipelineParameterList: Optional[list[SageMakerPipelineParameter]] + + +class SqsParameters(TypedDict): + MessageGroupId: Optional[str] + + +class Target(TypedDict): + Arn: Optional[str] + RoleArn: Optional[str] + DeadLetterConfig: Optional[DeadLetterConfig] + EcsParameters: Optional[EcsParameters] + EventBridgeParameters: Optional[EventBridgeParameters] + Input: Optional[str] + KinesisParameters: Optional[KinesisParameters] + RetryPolicy: Optional[RetryPolicy] + SageMakerPipelineParameters: Optional[SageMakerPipelineParameters] + SqsParameters: Optional[SqsParameters] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SchedulerScheduleProvider(ResourceProvider[SchedulerScheduleProperties]): + TYPE = "AWS::Scheduler::Schedule" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SchedulerScheduleProperties], + ) -> ProgressEvent[SchedulerScheduleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + Required properties: + - FlexibleTimeWindow + - ScheduleExpression + - Target + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - scheduler:CreateSchedule + - scheduler:GetSchedule + - iam:PassRole + + """ + model = request.desired_state + + if not model.get("Name"): + model["Name"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + create_params = util.select_attributes( + model, + [ + "Description", + "EndDate", + "FlexibleTimeWindow", + "GroupName", + "KmsKeyArn", + "Name", + "ScheduleExpression", + "ScheduleExpressionTimezone", + "StartDate", + "State", + "Target", + ], + ) + + result = request.aws_client_factory.scheduler.create_schedule(**create_params) + model["Arn"] = result["ScheduleArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[SchedulerScheduleProperties], + ) -> ProgressEvent[SchedulerScheduleProperties]: + """ + Fetch resource information + + IAM permissions required: + - scheduler:GetSchedule + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SchedulerScheduleProperties], + ) -> ProgressEvent[SchedulerScheduleProperties]: + """ + Delete a resource + + IAM permissions required: + - scheduler:DeleteSchedule + - scheduler:GetSchedule + """ + + delete_params = util.select_attributes(request.desired_state, ["Name", "GroupName"]) + request.aws_client_factory.scheduler.delete_schedule(**delete_params) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[SchedulerScheduleProperties], + ) -> ProgressEvent[SchedulerScheduleProperties]: + """ + Update a resource + + IAM permissions required: + - scheduler:UpdateSchedule + - scheduler:GetSchedule + - iam:PassRole + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.schema.json b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.schema.json new file mode 100644 index 0000000000000..6ea351d62add3 --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule.schema.json @@ -0,0 +1,591 @@ +{ + "typeName": "AWS::Scheduler::Schedule", + "description": "Definition of AWS::Scheduler::Schedule Resource Type", + "definitions": { + "AssignPublicIp": { + "type": "string", + "description": "Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "AwsVpcConfiguration": { + "type": "object", + "description": "This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode.", + "properties": { + "Subnets": { + "type": "array", + "items": { + "type": "string", + "maxLength": 1000, + "minLength": 1, + "description": "Specifies the subnet associated with the task." + }, + "maxItems": 16, + "minItems": 1, + "description": "Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.", + "insertionOrder": false + }, + "SecurityGroups": { + "type": "array", + "items": { + "type": "string", + "maxLength": 1000, + "minLength": 1, + "description": "Specifies the security group associated with the task." + }, + "maxItems": 5, + "minItems": 1, + "description": "Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "insertionOrder": false + }, + "AssignPublicIp": { + "$ref": "#/definitions/AssignPublicIp" + } + }, + "required": [ + "Subnets" + ], + "additionalProperties": false + }, + "CapacityProviderStrategyItem": { + "type": "object", + "description": "The details of a capacity provider strategy.", + "properties": { + "CapacityProvider": { + "type": "string", + "maxLength": 255, + "minLength": 1, + "description": "The short name of the capacity provider." + }, + "Weight": { + "type": "number", + "default": 0, + "maximum": 1000, + "minimum": 0, + "description": "The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied." + }, + "Base": { + "type": "number", + "default": 0, + "maximum": 100000, + "minimum": 0, + "description": "The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used." + } + }, + "required": [ + "CapacityProvider" + ], + "additionalProperties": false + }, + "DeadLetterConfig": { + "type": "object", + "description": "A DeadLetterConfig object that contains information about a dead-letter queue configuration.", + "properties": { + "Arn": { + "type": "string", + "maxLength": 1600, + "minLength": 1, + "pattern": "^arn:aws(-[a-z]+)?:sqs:[a-z0-9\\-]+:\\d{12}:[a-zA-Z0-9\\-_]+$", + "description": "The ARN of the SQS queue specified as the target for the dead-letter queue." + } + }, + "additionalProperties": false + }, + "EcsParameters": { + "type": "object", + "description": "The custom parameters to be used when the target is an Amazon ECS task.", + "properties": { + "TaskDefinitionArn": { + "type": "string", + "maxLength": 1600, + "minLength": 1, + "description": "The ARN of the task definition to use if the event target is an Amazon ECS task." + }, + "TaskCount": { + "type": "number", + "maximum": 10, + "minimum": 1, + "description": "The number of tasks to create based on TaskDefinition. The default is 1." + }, + "LaunchType": { + "$ref": "#/definitions/LaunchType" + }, + "NetworkConfiguration": { + "$ref": "#/definitions/NetworkConfiguration" + }, + "PlatformVersion": { + "type": "string", + "maxLength": 64, + "minLength": 1, + "description": "Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0." + }, + "Group": { + "type": "string", + "maxLength": 255, + "minLength": 1, + "description": "Specifies an ECS task group for the task. The maximum length is 255 characters." + }, + "CapacityProviderStrategy": { + "type": "array", + "items": { + "$ref": "#/definitions/CapacityProviderStrategyItem" + }, + "maxItems": 6, + "description": "The capacity provider strategy to use for the task.", + "insertionOrder": false + }, + "EnableECSManagedTags": { + "type": "boolean", + "description": "Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide." + }, + "EnableExecuteCommand": { + "type": "boolean", + "description": "Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task." + }, + "PlacementConstraints": { + "type": "array", + "items": { + "$ref": "#/definitions/PlacementConstraint" + }, + "maxItems": 10, + "description": "An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime).", + "insertionOrder": false + }, + "PlacementStrategy": { + "type": "array", + "items": { + "$ref": "#/definitions/PlacementStrategy" + }, + "maxItems": 5, + "description": "The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task.", + "insertionOrder": false + }, + "PropagateTags": { + "$ref": "#/definitions/PropagateTags" + }, + "ReferenceId": { + "type": "string", + "maxLength": 1024, + "description": "The reference ID to use for the task." + }, + "Tags": { + "type": "array", + "items": { + "$ref": "#/definitions/TagMap" + }, + "maxItems": 50, + "minItems": 0, + "description": "The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. To learn more, see RunTask in the Amazon ECS API Reference.", + "insertionOrder": false + } + }, + "required": [ + "TaskDefinitionArn" + ], + "additionalProperties": false + }, + "EventBridgeParameters": { + "type": "object", + "description": "EventBridge PutEvent predefined target type.", + "properties": { + "DetailType": { + "type": "string", + "maxLength": 128, + "minLength": 1, + "description": "Free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail." + }, + "Source": { + "type": "string", + "maxLength": 256, + "minLength": 1, + "pattern": "^(?=[/\\.\\-_A-Za-z0-9]+)((?!aws\\.).*)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)$", + "description": "The source of the event." + } + }, + "required": [ + "DetailType", + "Source" + ], + "additionalProperties": false + }, + "FlexibleTimeWindow": { + "type": "object", + "description": "Flexible time window allows configuration of a window within which a schedule can be invoked", + "properties": { + "Mode": { + "$ref": "#/definitions/FlexibleTimeWindowMode" + }, + "MaximumWindowInMinutes": { + "type": "number", + "maximum": 1440, + "minimum": 1, + "description": "The maximum time window during which a schedule can be invoked." + } + }, + "required": [ + "Mode" + ], + "additionalProperties": false + }, + "FlexibleTimeWindowMode": { + "type": "string", + "description": "Determines whether the schedule is executed within a flexible time window.", + "enum": [ + "OFF", + "FLEXIBLE" + ] + }, + "KinesisParameters": { + "type": "object", + "description": "The custom parameter you can use to control the shard to which EventBridge Scheduler sends the event.", + "properties": { + "PartitionKey": { + "type": "string", + "maxLength": 256, + "minLength": 1, + "description": "The custom parameter used as the Kinesis partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide." + } + }, + "required": [ + "PartitionKey" + ], + "additionalProperties": false + }, + "LaunchType": { + "type": "string", + "description": "Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. For more information, see AWS Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.", + "enum": [ + "EC2", + "FARGATE", + "EXTERNAL" + ] + }, + "NetworkConfiguration": { + "type": "object", + "description": "This structure specifies the network configuration for an ECS task.", + "properties": { + "AwsvpcConfiguration": { + "$ref": "#/definitions/AwsVpcConfiguration" + } + }, + "additionalProperties": false + }, + "PlacementConstraint": { + "type": "object", + "description": "An object representing a constraint on task placement.", + "properties": { + "Type": { + "$ref": "#/definitions/PlacementConstraintType" + }, + "Expression": { + "type": "string", + "maxLength": 2000, + "description": "A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide." + } + }, + "additionalProperties": false + }, + "PlacementConstraintType": { + "type": "string", + "description": "The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates.", + "enum": [ + "distinctInstance", + "memberOf" + ] + }, + "PlacementStrategy": { + "type": "object", + "description": "The task placement strategy for a task or service.", + "properties": { + "Type": { + "$ref": "#/definitions/PlacementStrategyType" + }, + "Field": { + "type": "string", + "maxLength": 255, + "description": "The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used." + } + }, + "additionalProperties": false + }, + "PlacementStrategyType": { + "type": "string", + "description": "The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task).", + "enum": [ + "random", + "spread", + "binpack" + ] + }, + "PropagateTags": { + "type": "string", + "description": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.", + "enum": [ + "TASK_DEFINITION" + ] + }, + "RetryPolicy": { + "type": "object", + "description": "A RetryPolicy object that includes information about the retry policy settings.", + "properties": { + "MaximumEventAgeInSeconds": { + "type": "number", + "maximum": 86400, + "minimum": 60, + "description": "The maximum amount of time, in seconds, to continue to make retry attempts." + }, + "MaximumRetryAttempts": { + "type": "number", + "maximum": 185, + "minimum": 0, + "description": "The maximum number of retry attempts to make before the request fails. Retry attempts with exponential backoff continue until either the maximum number of attempts is made or until the duration of the MaximumEventAgeInSeconds is reached." + } + }, + "additionalProperties": false + }, + "SageMakerPipelineParameter": { + "type": "object", + "description": "Name/Value pair of a parameter to start execution of a SageMaker Model Building Pipeline.", + "properties": { + "Name": { + "type": "string", + "maxLength": 256, + "minLength": 1, + "pattern": "^[A-Za-z0-9\\-_]*$", + "description": "Name of parameter to start execution of a SageMaker Model Building Pipeline." + }, + "Value": { + "type": "string", + "maxLength": 1024, + "minLength": 1, + "description": "Value of parameter to start execution of a SageMaker Model Building Pipeline." + } + }, + "required": [ + "Name", + "Value" + ], + "additionalProperties": false + }, + "SageMakerPipelineParameters": { + "type": "object", + "description": "These are custom parameters to use when the target is a SageMaker Model Building Pipeline that starts based on AWS EventBridge Scheduler schedules.", + "properties": { + "PipelineParameterList": { + "type": "array", + "items": { + "$ref": "#/definitions/SageMakerPipelineParameter" + }, + "maxItems": 200, + "minItems": 0, + "description": "List of Parameter names and values for SageMaker Model Building Pipeline execution.", + "insertionOrder": false + } + }, + "additionalProperties": false + }, + "ScheduleState": { + "type": "string", + "description": "Specifies whether the schedule is enabled or disabled.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "SqsParameters": { + "type": "object", + "description": "Contains the message group ID to use when the target is a FIFO queue. If you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled.", + "properties": { + "MessageGroupId": { + "type": "string", + "maxLength": 128, + "minLength": 1, + "description": "The FIFO message group ID to use as the target." + } + }, + "additionalProperties": false + }, + "TagMap": { + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "maxLength": 256, + "minLength": 1 + } + }, + "additionalProperties": false + }, + "Target": { + "type": "object", + "description": "The schedule target.", + "properties": { + "Arn": { + "type": "string", + "maxLength": 1600, + "minLength": 1, + "description": "The Amazon Resource Name (ARN) of the target." + }, + "RoleArn": { + "type": "string", + "maxLength": 1600, + "minLength": 1, + "pattern": "^arn:aws(-[a-z]+)?:iam::\\d{12}:role\\/[\\w+=,.@\\/-]+$", + "description": "The Amazon Resource Name (ARN) of the IAM role to be used for this target when the schedule is triggered." + }, + "DeadLetterConfig": { + "$ref": "#/definitions/DeadLetterConfig" + }, + "RetryPolicy": { + "$ref": "#/definitions/RetryPolicy" + }, + "Input": { + "type": "string", + "minLength": 1, + "description": "The text, or well-formed JSON, passed to the target. If you are configuring a templated Lambda, AWS Step Functions, or Amazon EventBridge target, the input must be a well-formed JSON. For all other target types, a JSON is not required. If you do not specify anything for this field, EventBridge Scheduler delivers a default notification to the target." + }, + "EcsParameters": { + "$ref": "#/definitions/EcsParameters" + }, + "EventBridgeParameters": { + "$ref": "#/definitions/EventBridgeParameters" + }, + "KinesisParameters": { + "$ref": "#/definitions/KinesisParameters" + }, + "SageMakerPipelineParameters": { + "$ref": "#/definitions/SageMakerPipelineParameters" + }, + "SqsParameters": { + "$ref": "#/definitions/SqsParameters" + } + }, + "required": [ + "Arn", + "RoleArn" + ], + "additionalProperties": false + } + }, + "properties": { + "Arn": { + "type": "string", + "maxLength": 1224, + "minLength": 1, + "pattern": "^arn:aws(-[a-z]+)?:scheduler:[a-z0-9\\-]+:\\d{12}:schedule\\/[0-9a-zA-Z-_.]+\\/[0-9a-zA-Z-_.]+$", + "description": "The Amazon Resource Name (ARN) of the schedule." + }, + "Description": { + "type": "string", + "maxLength": 512, + "minLength": 0, + "description": "The description of the schedule." + }, + "EndDate": { + "type": "string", + "description": "The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the EndDate you specify.", + "format": "date-time" + }, + "FlexibleTimeWindow": { + "$ref": "#/definitions/FlexibleTimeWindow" + }, + "GroupName": { + "type": "string", + "maxLength": 64, + "minLength": 1, + "pattern": "^[0-9a-zA-Z-_.]+$", + "description": "The name of the schedule group to associate with this schedule. If you omit this, the default schedule group is used." + }, + "KmsKeyArn": { + "type": "string", + "maxLength": 2048, + "minLength": 1, + "pattern": "^arn:aws(-[a-z]+)?:kms:[a-z0-9\\-]+:\\d{12}:(key|alias)\\/[0-9a-zA-Z-_]*$", + "description": "The ARN for a KMS Key that will be used to encrypt customer data." + }, + "Name": { + "type": "string", + "maxLength": 64, + "minLength": 1, + "pattern": "^[0-9a-zA-Z-_.]+$" + }, + "ScheduleExpression": { + "type": "string", + "maxLength": 256, + "minLength": 1, + "description": "The scheduling expression." + }, + "ScheduleExpressionTimezone": { + "type": "string", + "maxLength": 50, + "minLength": 1, + "description": "The timezone in which the scheduling expression is evaluated." + }, + "StartDate": { + "type": "string", + "description": "The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the StartDate you specify.", + "format": "date-time" + }, + "State": { + "$ref": "#/definitions/ScheduleState" + }, + "Target": { + "$ref": "#/definitions/Target" + } + }, + "required": [ + "FlexibleTimeWindow", + "ScheduleExpression", + "Target" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "handlers": { + "create": { + "permissions": [ + "scheduler:CreateSchedule", + "scheduler:GetSchedule", + "iam:PassRole" + ] + }, + "read": { + "permissions": [ + "scheduler:GetSchedule" + ] + }, + "update": { + "permissions": [ + "scheduler:UpdateSchedule", + "scheduler:GetSchedule", + "iam:PassRole" + ] + }, + "delete": { + "permissions": [ + "scheduler:DeleteSchedule", + "scheduler:GetSchedule" + ] + }, + "list": { + "permissions": [ + "scheduler:ListSchedules" + ] + } + }, + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "additionalProperties": false +} diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule_plugin.py b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule_plugin.py new file mode 100644 index 0000000000000..b5fc742b5377b --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedule_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SchedulerScheduleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Scheduler::Schedule" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.scheduler.resource_providers.aws_scheduler_schedule import ( + SchedulerScheduleProvider, + ) + + self.factory = SchedulerScheduleProvider diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.py b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.py new file mode 100644 index 0000000000000..913ce73707551 --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.py @@ -0,0 +1,123 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SchedulerScheduleGroupProperties(TypedDict): + Arn: Optional[str] + CreationDate: Optional[str] + LastModificationDate: Optional[str] + Name: Optional[str] + State: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SchedulerScheduleGroupProvider(ResourceProvider[SchedulerScheduleGroupProperties]): + TYPE = "AWS::Scheduler::ScheduleGroup" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SchedulerScheduleGroupProperties], + ) -> ProgressEvent[SchedulerScheduleGroupProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + - /properties/CreationDate + - /properties/LastModificationDate + - /properties/State + + IAM permissions required: + - scheduler:CreateScheduleGroup + - scheduler:GetScheduleGroup + - scheduler:ListTagsForResource + + """ + model = request.desired_state + + if not model.get("Name"): + model["Name"] = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + + create_params = util.select_attributes(model, ("Name", "Tags")) + + result = request.aws_client_factory.scheduler.create_schedule_group(**create_params) + model["Arn"] = result["ScheduleGroupArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + ) + + def read( + self, + request: ResourceRequest[SchedulerScheduleGroupProperties], + ) -> ProgressEvent[SchedulerScheduleGroupProperties]: + """ + Fetch resource information + + IAM permissions required: + - scheduler:GetScheduleGroup + - scheduler:ListTagsForResource + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SchedulerScheduleGroupProperties], + ) -> ProgressEvent[SchedulerScheduleGroupProperties]: + """ + Delete a resource + + IAM permissions required: + - scheduler:DeleteScheduleGroup + - scheduler:GetScheduleGroup + - scheduler:DeleteSchedule + """ + model = request.desired_state + request.aws_client_factory.scheduler.delete_schedule_group(Name=model["Name"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[SchedulerScheduleGroupProperties], + ) -> ProgressEvent[SchedulerScheduleGroupProperties]: + """ + Update a resource + + IAM permissions required: + - scheduler:TagResource + - scheduler:UntagResource + - scheduler:ListTagsForResource + - scheduler:GetScheduleGroup + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.schema.json b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.schema.json new file mode 100644 index 0000000000000..50e5a3fe4f275 --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup.schema.json @@ -0,0 +1,130 @@ +{ + "typeName": "AWS::Scheduler::ScheduleGroup", + "description": "Definition of AWS::Scheduler::ScheduleGroup Resource Type", + "definitions": { + "ScheduleGroupState": { + "type": "string", + "description": "Specifies the state of the schedule group.", + "enum": [ + "ACTIVE", + "DELETING" + ] + }, + "Tag": { + "type": "object", + "description": "Tag to associate with the resource.", + "properties": { + "Key": { + "type": "string", + "maxLength": 128, + "minLength": 1, + "description": "Key for the tag" + }, + "Value": { + "type": "string", + "maxLength": 256, + "minLength": 1, + "description": "Value for the tag" + } + }, + "required": [ + "Key", + "Value" + ], + "additionalProperties": false + } + }, + "properties": { + "Arn": { + "type": "string", + "maxLength": 1224, + "minLength": 1, + "pattern": "^arn:aws(-[a-z]+)?:scheduler:[a-z0-9\\-]+:\\d{12}:schedule-group\\/[0-9a-zA-Z-_.]+$", + "description": "The Amazon Resource Name (ARN) of the schedule group." + }, + "CreationDate": { + "type": "string", + "description": "The time at which the schedule group was created.", + "format": "date-time" + }, + "LastModificationDate": { + "type": "string", + "description": "The time at which the schedule group was last modified.", + "format": "date-time" + }, + "Name": { + "type": "string", + "maxLength": 64, + "minLength": 1, + "pattern": "^[0-9a-zA-Z-_.]+$" + }, + "State": { + "$ref": "#/definitions/ScheduleGroupState" + }, + "Tags": { + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + }, + "maxItems": 200, + "minItems": 0, + "description": "The list of tags to associate with the schedule group.", + "insertionOrder": false + } + }, + "readOnlyProperties": [ + "/properties/Arn", + "/properties/CreationDate", + "/properties/LastModificationDate", + "/properties/State" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "handlers": { + "create": { + "permissions": [ + "scheduler:CreateScheduleGroup", + "scheduler:GetScheduleGroup", + "scheduler:ListTagsForResource" + ] + }, + "read": { + "permissions": [ + "scheduler:GetScheduleGroup", + "scheduler:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "scheduler:TagResource", + "scheduler:UntagResource", + "scheduler:ListTagsForResource", + "scheduler:GetScheduleGroup" + ] + }, + "delete": { + "permissions": [ + "scheduler:DeleteScheduleGroup", + "scheduler:GetScheduleGroup", + "scheduler:DeleteSchedule" + ] + }, + "list": { + "permissions": [ + "scheduler:ListScheduleGroups" + ] + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "additionalProperties": false +} diff --git a/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup_plugin.py b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup_plugin.py new file mode 100644 index 0000000000000..2f76e843976f7 --- /dev/null +++ b/localstack-core/localstack/services/scheduler/resource_providers/aws_scheduler_schedulegroup_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SchedulerScheduleGroupProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::Scheduler::ScheduleGroup" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.scheduler.resource_providers.aws_scheduler_schedulegroup import ( + SchedulerScheduleGroupProvider, + ) + + self.factory = SchedulerScheduleGroupProvider diff --git a/localstack/services/swf/__init__.py b/localstack-core/localstack/services/secretsmanager/__init__.py similarity index 100% rename from localstack/services/swf/__init__.py rename to localstack-core/localstack/services/secretsmanager/__init__.py diff --git a/localstack-core/localstack/services/secretsmanager/provider.py b/localstack-core/localstack/services/secretsmanager/provider.py new file mode 100644 index 0000000000000..5838732f2c4b0 --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/provider.py @@ -0,0 +1,948 @@ +from __future__ import annotations + +import base64 +import json +import logging +import re +import time +from typing import Any, Final, Optional, Union + +import moto.secretsmanager.exceptions as moto_exception +from botocore.utils import InvalidArnException +from moto.iam.policy_validation import IAMPolicyDocumentValidator +from moto.secretsmanager import secretsmanager_backends +from moto.secretsmanager.models import FakeSecret, SecretsManagerBackend +from moto.secretsmanager.responses import SecretsManagerResponse + +from localstack.aws.api import CommonServiceException, RequestContext, handler +from localstack.aws.api.secretsmanager import ( + CancelRotateSecretRequest, + CancelRotateSecretResponse, + CreateSecretRequest, + CreateSecretResponse, + DeleteResourcePolicyRequest, + DeleteResourcePolicyResponse, + DeleteSecretRequest, + DeleteSecretResponse, + DescribeSecretRequest, + DescribeSecretResponse, + GetResourcePolicyRequest, + GetResourcePolicyResponse, + GetSecretValueRequest, + GetSecretValueResponse, + InvalidParameterException, + InvalidRequestException, + ListSecretVersionIdsRequest, + ListSecretVersionIdsResponse, + NameType, + PutResourcePolicyRequest, + PutResourcePolicyResponse, + PutSecretValueRequest, + PutSecretValueResponse, + RemoveRegionsFromReplicationRequest, + RemoveRegionsFromReplicationResponse, + ReplicateSecretToRegionsRequest, + ReplicateSecretToRegionsResponse, + ResourceExistsException, + ResourceNotFoundException, + RestoreSecretRequest, + RestoreSecretResponse, + RotateSecretRequest, + RotateSecretResponse, + SecretIdType, + SecretsmanagerApi, + SecretVersionsListEntry, + StopReplicationToReplicaRequest, + StopReplicationToReplicaResponse, + TagResourceRequest, + UntagResourceRequest, + UpdateSecretRequest, + UpdateSecretResponse, + UpdateSecretVersionStageRequest, + UpdateSecretVersionStageResponse, + ValidateResourcePolicyRequest, + ValidateResourcePolicyResponse, +) +from localstack.aws.connect import connect_to +from localstack.services.moto import call_moto +from localstack.utils.aws import arns +from localstack.utils.patch import patch +from localstack.utils.time import today_no_time + +# Constants. +AWSPREVIOUS: Final[str] = "AWSPREVIOUS" +AWSPENDING: Final[str] = "AWSPENDING" +AWSCURRENT: Final[str] = "AWSCURRENT" +# The maximum number of outdated versions that can be stored in the secret. +# see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_PutSecretValue.html +MAX_OUTDATED_SECRET_VERSIONS: Final[int] = 100 +# +# Error Messages. +AWS_INVALID_REQUEST_MESSAGE_CREATE_WITH_SCHEDULED_DELETION: Final[str] = ( + "You can't create this secret because a secret with this name is already scheduled for deletion." +) + +LOG = logging.getLogger(__name__) + + +class ValidationException(CommonServiceException): + def __init__(self, message: str): + super().__init__("ValidationException", message, 400, True) + + +class SecretNotFoundException(CommonServiceException): + def __init__(self): + super().__init__( + "ResourceNotFoundException", + "Secrets Manager can't find the specified secret.", + 400, + True, + ) + + +class SecretsmanagerProvider(SecretsmanagerApi): + def __init__(self): + super().__init__() + apply_patches() + + @staticmethod + def get_moto_backend_for_resource( + name_or_arn: str, context: RequestContext + ) -> SecretsManagerBackend: + try: + arn_data = arns.parse_arn(name_or_arn) + backend = secretsmanager_backends[arn_data["account"]][arn_data["region"]] + except InvalidArnException: + backend = secretsmanager_backends[context.account_id][context.region] + return backend + + @staticmethod + def _raise_if_default_kms_key( + secret_id: str, request: RequestContext, backend: SecretsManagerBackend + ): + try: + secret = backend.describe_secret(secret_id) + except moto_exception.SecretNotFoundException: + raise ResourceNotFoundException("Secrets Manager can't find the specified secret.") + if secret.kms_key_id is None and request.account_id != secret.account_id: + raise InvalidRequestException( + "You can't access a secret from a different AWS account if you encrypt the secret with the default KMS service key." + ) + + @staticmethod + def _validate_secret_id(secret_id: SecretIdType) -> bool: + # The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@- + return bool(re.match(r"^[A-Za-z0-9/_+=.@-]+\Z", secret_id)) + + @staticmethod + def _raise_if_invalid_secret_id(secret_id: Union[SecretIdType, NameType]): + # Patches moto's implementation for which secret_ids are not validated, by raising a ValidationException. + # Skips this check if the secret_id provided appears to be an arn (starting with 'arn:'). + if not re.match( + r"^arn:", secret_id + ): # Check if it appears to be an arn: so to skip secret_id check: delegate parsing of arn to handlers. + if not SecretsmanagerProvider._validate_secret_id(secret_id): + raise ValidationException( + "Invalid name. Must be a valid name containing alphanumeric " + "characters, or any of the following: -/_+=.@!" + ) + + @staticmethod + def _raise_if_missing_client_req_token( + request: Union[ + CreateSecretRequest, + PutSecretValueRequest, + RotateSecretRequest, + UpdateSecretRequest, + ], + ): + if "ClientRequestToken" not in request: + raise InvalidRequestException( + "You must provide a ClientRequestToken value. We recommend a UUID-type value." + ) + + @handler("CancelRotateSecret", expand=False) + def cancel_rotate_secret( + self, context: RequestContext, request: CancelRotateSecretRequest + ) -> CancelRotateSecretResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("CreateSecret", expand=False) + def create_secret( + self, context: RequestContext, request: CreateSecretRequest + ) -> CreateSecretResponse: + self._raise_if_missing_client_req_token(request) + # Some providers need to create keys which are not usually creatable by users + if not any( + tag_entry["Key"] == "BYPASS_SECRET_ID_VALIDATION" + for tag_entry in request.get("Tags", []) + ): + self._raise_if_invalid_secret_id(request["Name"]) + else: + request["Tags"] = [ + tag_entry + for tag_entry in request.get("Tags", []) + if tag_entry["Key"] != "BYPASS_SECRET_ID_VALIDATION" + ] + + return call_moto(context, request) + + @handler("DeleteResourcePolicy", expand=False) + def delete_resource_policy( + self, context: RequestContext, request: DeleteResourcePolicyRequest + ) -> DeleteResourcePolicyResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("DeleteSecret", expand=False) + def delete_secret( + self, context: RequestContext, request: DeleteSecretRequest + ) -> DeleteSecretResponse: + secret_id: str = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + recovery_window_in_days: Optional[int] = request.get("RecoveryWindowInDays") + force_delete_without_recovery: Optional[bool] = request.get("ForceDeleteWithoutRecovery") + + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + try: + arn, name, deletion_date = backend.delete_secret( + secret_id=secret_id, + recovery_window_in_days=recovery_window_in_days, + force_delete_without_recovery=force_delete_without_recovery, + ) + except moto_exception.InvalidParameterException as e: + raise InvalidParameterException(str(e)) + except moto_exception.InvalidRequestException: + raise InvalidRequestException( + "You tried to perform the operation on a secret that's currently marked deleted." + ) + except moto_exception.SecretNotFoundException: + raise SecretNotFoundException() + return DeleteSecretResponse(ARN=arn, Name=name, DeletionDate=deletion_date) + + @handler("DescribeSecret", expand=False) + def describe_secret( + self, context: RequestContext, request: DescribeSecretRequest + ) -> DescribeSecretResponse: + secret_id: str = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + try: + secret = backend.describe_secret(secret_id) + except moto_exception.SecretNotFoundException: + raise ResourceNotFoundException("Secrets Manager can't find the specified secret.") + return DescribeSecretResponse(**secret.to_dict()) + + @handler("GetResourcePolicy", expand=False) + def get_resource_policy( + self, context: RequestContext, request: GetResourcePolicyRequest + ) -> GetResourcePolicyResponse: + secret_id = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + policy = backend.get_resource_policy(secret_id) + return GetResourcePolicyResponse(**json.loads(policy)) + + @handler("GetSecretValue", expand=False) + def get_secret_value( + self, context: RequestContext, request: GetSecretValueRequest + ) -> GetSecretValueResponse: + secret_id = request.get("SecretId") + version_id = request.get("VersionId") + version_stage = request.get("VersionStage") + if not version_id and not version_stage: + version_stage = "AWSCURRENT" + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + self._raise_if_default_kms_key(secret_id, context, backend) + try: + response = backend.get_secret_value(secret_id, version_id, version_stage) + response = decode_secret_binary_from_response(response) + except moto_exception.SecretNotFoundException: + raise ResourceNotFoundException( + f"Secrets Manager can't find the specified secret value for staging label: {version_stage}" + ) + except moto_exception.ResourceNotFoundException: + error_message = ( + f"VersionId: {version_id}" if version_id else f"staging label: {version_stage}" + ) + raise ResourceNotFoundException( + f"Secrets Manager can't find the specified secret value for {error_message}" + ) + except moto_exception.SecretStageVersionMismatchException: + raise InvalidRequestException( + "You provided a VersionStage that is not associated to the provided VersionId." + ) + except moto_exception.SecretHasNoValueException: + raise ResourceNotFoundException( + f"Secrets Manager can't find the specified secret value for staging label: {version_stage}" + ) + except moto_exception.InvalidRequestException: + raise InvalidRequestException( + "You can't perform this operation on the secret because it was marked for deletion." + ) + return GetSecretValueResponse(**response) + + @handler("ListSecretVersionIds", expand=False) + def list_secret_version_ids( + self, context: RequestContext, request: ListSecretVersionIdsRequest + ) -> ListSecretVersionIdsResponse: + secret_id = request["SecretId"] + include_deprecated = request.get("IncludeDeprecated", False) + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + secrets = backend.list_secret_version_ids(secret_id, include_deprecated=include_deprecated) + return ListSecretVersionIdsResponse(**json.loads(secrets)) + + @handler("PutResourcePolicy", expand=False) + def put_resource_policy( + self, context: RequestContext, request: PutResourcePolicyRequest + ) -> PutResourcePolicyResponse: + secret_id = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + arn, name = backend.put_resource_policy(secret_id, request["ResourcePolicy"]) + return PutResourcePolicyResponse(ARN=arn, Name=name) + + @handler("PutSecretValue", expand=False) + def put_secret_value( + self, context: RequestContext, request: PutSecretValueRequest + ) -> PutSecretValueResponse: + secret_id = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + self._raise_if_missing_client_req_token(request) + client_req_token = request.get("ClientRequestToken") + secret_string = request.get("SecretString") + secret_binary = request.get("SecretBinary") + if not secret_binary and not secret_string: + raise InvalidRequestException("You must provide either SecretString or SecretBinary.") + + version_stages = request.get("VersionStages", ["AWSCURRENT"]) + if not isinstance(version_stages, list): + version_stages = [version_stages] + + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + self._raise_if_default_kms_key(secret_id, context, backend) + + response = backend.put_secret_value( + secret_id=secret_id, + secret_binary=secret_binary, + secret_string=secret_string, + version_stages=version_stages, + client_request_token=client_req_token, + ) + return PutSecretValueResponse(**json.loads(response)) + + @handler("RemoveRegionsFromReplication", expand=False) + def remove_regions_from_replication( + self, context: RequestContext, request: RemoveRegionsFromReplicationRequest + ) -> RemoveRegionsFromReplicationResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("ReplicateSecretToRegions", expand=False) + def replicate_secret_to_regions( + self, context: RequestContext, request: ReplicateSecretToRegionsRequest + ) -> ReplicateSecretToRegionsResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("RestoreSecret", expand=False) + def restore_secret( + self, context: RequestContext, request: RestoreSecretRequest + ) -> RestoreSecretResponse: + secret_id = request["SecretId"] + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + try: + arn, name = backend.restore_secret(secret_id) + except moto_exception.SecretNotFoundException: + raise ResourceNotFoundException("Secrets Manager can't find the specified secret.") + return RestoreSecretResponse(ARN=arn, Name=name) + + @handler("RotateSecret", expand=False) + def rotate_secret( + self, context: RequestContext, request: RotateSecretRequest + ) -> RotateSecretResponse: + self._raise_if_missing_client_req_token(request) + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("StopReplicationToReplica", expand=False) + def stop_replication_to_replica( + self, context: RequestContext, request: StopReplicationToReplicaRequest + ) -> StopReplicationToReplicaResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("TagResource", expand=False) + def tag_resource(self, context: RequestContext, request: TagResourceRequest) -> None: + secret_id = request["SecretId"] + tags = request["Tags"] + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + backend.tag_resource(secret_id, tags) + + @handler("UntagResource", expand=False) + def untag_resource(self, context: RequestContext, request: UntagResourceRequest) -> None: + secret_id = request["SecretId"] + tag_keys = request.get("TagKeys") + self._raise_if_invalid_secret_id(secret_id) + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + backend.untag_resource(secret_id=secret_id, tag_keys=tag_keys) + + @handler("UpdateSecret", expand=False) + def update_secret( + self, context: RequestContext, request: UpdateSecretRequest + ) -> UpdateSecretResponse: + # if we're modifying the value of the secret, ClientRequestToken is required + secret_id = request["SecretId"] + secret_string = request.get("SecretString") + secret_binary = request.get("SecretBinary") + description = request.get("Description") + kms_key_id = request.get("KmsKeyId") + client_req_token = request.get("ClientRequestToken") + self._raise_if_invalid_secret_id(secret_id) + self._raise_if_missing_client_req_token(request) + + backend = SecretsmanagerProvider.get_moto_backend_for_resource(secret_id, context) + try: + secret = backend.update_secret( + secret_id, + description=description, + secret_string=secret_string, + secret_binary=secret_binary, + client_request_token=client_req_token, + kms_key_id=kms_key_id, + ) + except moto_exception.SecretNotFoundException: + raise ResourceNotFoundException("Secrets Manager can't find the specified secret.") + except moto_exception.OperationNotPermittedOnReplica: + raise InvalidRequestException( + "Operation not permitted on a replica secret. Call must be made in primary secret's region." + ) + except moto_exception.InvalidRequestException: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " + "You can't perform this operation on the secret because it was marked for deletion." + ) + return UpdateSecretResponse(**json.loads(secret)) + + @handler("UpdateSecretVersionStage", expand=False) + def update_secret_version_stage( + self, context: RequestContext, request: UpdateSecretVersionStageRequest + ) -> UpdateSecretVersionStageResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + @handler("ValidateResourcePolicy", expand=False) + def validate_resource_policy( + self, context: RequestContext, request: ValidateResourcePolicyRequest + ) -> ValidateResourcePolicyResponse: + self._raise_if_invalid_secret_id(request["SecretId"]) + return call_moto(context, request) + + +@patch(FakeSecret.__init__) +def fake_secret__init__(fn, self, *args, **kwargs): + fn(self, *args, **kwargs) + + # Fix time not including millis. + time_now = time.time() + if kwargs.get("last_changed_date", None): + self.last_changed_date = time_now + if kwargs.get("created_date", None): + self.created_date = time_now + + # The last date that the secret value was retrieved. + # This value does not include the time. + # This field is omitted if the secret has never been retrieved. + self.last_accessed_date = None + # Results in RotationEnabled being returned only if rotation was ever overwritten, + # in which case this field is non-null, but an integer. + self.auto_rotate_after_days = None + self.rotation_lambda_arn = None + + +@patch(FakeSecret.update) +def fake_secret_update( + fn, self, description=None, tags=None, kms_key_id=None, last_changed_date=None +): + fn(self, description, tags, kms_key_id, last_changed_date) + if last_changed_date is not None: + self.last_changed_date = round(time.time(), 3) + + +@patch(SecretsManagerBackend.get_secret_value) +def moto_smb_get_secret_value(fn, self, secret_id, version_id, version_stage): + res = fn(self, secret_id, version_id, version_stage) + + secret = self.secrets[secret_id] + + # Patch: update last accessed date on get. + secret.last_accessed_date = today_no_time() + + # Patch: update version's last accessed date. + secret_version = secret.versions.get(version_id or secret.default_version_id) + if secret_version: + secret_version["last_accessed_date"] = secret.last_accessed_date + + return res + + +@patch(SecretsManagerBackend.create_secret) +def moto_smb_create_secret(fn, self, name, *args, **kwargs): + # Creating a secret with a SecretId equal to one that is scheduled for + # deletion should raise an 'InvalidRequestException'. + secret: Optional[FakeSecret] = self.secrets.get(name) + if secret is not None and secret.deleted_date is not None: + raise InvalidRequestException(AWS_INVALID_REQUEST_MESSAGE_CREATE_WITH_SCHEDULED_DELETION) + + if name in self.secrets: + raise ResourceExistsException( + f"The operation failed because the secret {name} already exists." + ) + + return fn(self, name, *args, **kwargs) + + +@patch(SecretsManagerBackend.list_secret_version_ids) +def moto_smb_list_secret_version_ids( + _, self, secret_id: str, include_deprecated: bool, *args, **kwargs +): + if secret_id not in self.secrets: + raise SecretNotFoundException() + + if self.secrets[secret_id].is_deleted(): + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " + "You can't perform this operation on the secret because it was marked for deletion." + ) + + secret = self.secrets[secret_id] + + # Patch: output format, report exact createdate instead of current time. + versions: list[SecretVersionsListEntry] = list() + for version_id, version in secret.versions.items(): + version_stages = version["version_stages"] + # Patch: include deprecated versions if include_deprecated is True. + # version_stages is empty if the version is deprecated. + # see: https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version + if len(version_stages) > 0 or include_deprecated: + entry = SecretVersionsListEntry( + CreatedDate=version["createdate"], + VersionId=version_id, + ) + + if version_stages: + entry["VersionStages"] = version_stages + + # Patch: bind LastAccessedDate if one exists for this version. + last_accessed_date = version.get("last_accessed_date") + if last_accessed_date: + entry["LastAccessedDate"] = last_accessed_date + + versions.append(entry) + + # Patch: sort versions by date. + versions.sort(key=lambda v: v["CreatedDate"], reverse=True) + + response = ListSecretVersionIdsResponse(ARN=secret.arn, Name=secret.name, Versions=versions) + + return json.dumps(response) + + +@patch(FakeSecret.to_dict) +def fake_secret_to_dict(fn, self): + res_dict = fn(self) + if self.last_accessed_date: + res_dict["LastAccessedDate"] = self.last_accessed_date + if not self.description and "Description" in res_dict: + del res_dict["Description"] + if not self.rotation_enabled and "RotationEnabled" in res_dict: + del res_dict["RotationEnabled"] + if self.auto_rotate_after_days is None and "RotationRules" in res_dict: + del res_dict["RotationRules"] + if self.tags is None and "Tags" in res_dict: + del res_dict["Tags"] + for null_field in [key for key, value in res_dict.items() if value is None]: + del res_dict[null_field] + return res_dict + + +@patch(SecretsManagerBackend.update_secret) +def backend_update_secret( + fn, + self, + secret_id, + description=None, + **kwargs, +): + if secret_id not in self.secrets: + raise SecretNotFoundException() + + if self.secrets[secret_id].is_deleted(): + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " + "You can't perform this operation on the secret because it was marked for deletion." + ) + + secret = self.secrets[secret_id] + version_id_t0 = secret.default_version_id + + requires_new_version: bool = any( + [kwargs.get("kms_key_id"), kwargs.get("secret_binary"), kwargs.get("secret_string")] + ) + if requires_new_version: + fn(self, secret_id, **kwargs) + + if description is not None: + secret.description = description + + version_id_t1 = secret.default_version_id + + resp: UpdateSecretResponse = UpdateSecretResponse() + resp["ARN"] = secret.arn + resp["Name"] = secret.name + + if version_id_t0 != version_id_t1: + resp["VersionId"] = version_id_t1 + + return json.dumps(resp) + + +@patch(SecretsManagerResponse.update_secret, pass_target=False) +def response_update_secret(self): + secret_id = self._get_param("SecretId") + description = self._get_param("Description") + secret_string = self._get_param("SecretString") + secret_binary = self._get_param("SecretBinary") + client_request_token = self._get_param("ClientRequestToken") + kms_key_id = self._get_param("KmsKeyId") + return self.backend.update_secret( + secret_id=secret_id, + description=description, + secret_string=secret_string, + secret_binary=secret_binary, + client_request_token=client_request_token, + kms_key_id=kms_key_id, + ) + + +@patch(SecretsManagerBackend.update_secret_version_stage) +def backend_update_secret_version_stage( + fn, self, secret_id, version_stage, remove_from_version_id, move_to_version_id +): + fn(self, secret_id, version_stage, remove_from_version_id, move_to_version_id) + + secret = self.secrets[secret_id] + + # Patch: default version is the new AWSCURRENT version + if version_stage == AWSCURRENT: + secret.default_version_id = move_to_version_id + + versions_no_stages = [] + for version_id, version in secret.versions.items(): + version_stages = version["version_stages"] + + # moto appends a new AWSPREVIOUS label to the version AWSCURRENT was removed from, + # but it does not remove the old AWSPREVIOUS label. + # Patch: ensure only one AWSPREVIOUS tagged version is in the pool. + if ( + version_stage == AWSCURRENT + and version_id != remove_from_version_id + and AWSPREVIOUS in version_stages + ): + version_stages.remove(AWSPREVIOUS) + + if not version_stages: + versions_no_stages.append(version_id) + + # Patch: remove secret versions with no version stages. + for version_no_stages in versions_no_stages: + del secret.versions[version_no_stages] + + return secret.arn, secret.name + + +@patch(FakeSecret.reset_default_version) +def fake_secret_reset_default_version(fn, self, secret_version, version_id): + fn(self, secret_version, version_id) + + # Remove versions with no version stages, if max limit of outdated versions is exceeded. + versions_no_stages: list[str] = [ + version_id for version_id, version in self.versions.items() if not version["version_stages"] + ] + versions_to_delete: list[str] = [] + + # Patch: remove outdated versions if the max deprecated versions limit is exceeded. + if len(versions_no_stages) >= MAX_OUTDATED_SECRET_VERSIONS: + versions_to_delete = versions_no_stages[ + : len(versions_no_stages) - MAX_OUTDATED_SECRET_VERSIONS + ] + + for version_to_delete in versions_to_delete: + del self.versions[version_to_delete] + + +@patch(FakeSecret.remove_version_stages_from_old_versions) +def fake_secret_remove_version_stages_from_old_versions(fn, self, version_stages): + fn(self, version_stages) + # Remove versions with no version stages. + versions_no_stages = [ + version_id for version_id, version in self.versions.items() if not version["version_stages"] + ] + for version_no_stages in versions_no_stages: + del self.versions[version_no_stages] + + +# Moto does not support rotate_immediately as an API parameter while the AWS API does +@patch(SecretsManagerResponse.rotate_secret, pass_target=False) +def rotate_secret(self) -> str: + client_request_token = self._get_param("ClientRequestToken") + rotation_lambda_arn = self._get_param("RotationLambdaARN") + rotation_rules = self._get_param("RotationRules") + rotate_immediately = self._get_param("RotateImmediately") + secret_id = self._get_param("SecretId") + return self.backend.rotate_secret( + secret_id=secret_id, + client_request_token=client_request_token, + rotation_lambda_arn=rotation_lambda_arn, + rotation_rules=rotation_rules, + rotate_immediately=True if rotate_immediately is None else rotate_immediately, + ) + + +@patch(SecretsManagerBackend.rotate_secret) +def backend_rotate_secret( + _, + self, + secret_id, + client_request_token=None, + rotation_lambda_arn=None, + rotation_rules=None, + rotate_immediately=True, +): + rotation_days = "AutomaticallyAfterDays" + + if not self._is_valid_identifier(secret_id): + raise SecretNotFoundException() + + secret = self.secrets[secret_id] + if secret.is_deleted(): + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + # Resolve rotation_lambda_arn and fallback to previous value if its missing + # from the current request + rotation_lambda_arn = rotation_lambda_arn or secret.rotation_lambda_arn + if not rotation_lambda_arn: + raise InvalidRequestException( + "No Lambda rotation function ARN is associated with this secret." + ) + + if rotation_lambda_arn: + if len(rotation_lambda_arn) > 2048: + msg = "RotationLambdaARN must <= 2048 characters long." + raise InvalidParameterException(msg) + + # In case rotation_period is not provided, resolve auto_rotate_after_days + # and fallback to previous value if its missing from the current request. + rotation_period = secret.auto_rotate_after_days or 0 + if rotation_rules: + if rotation_days in rotation_rules: + rotation_period = rotation_rules[rotation_days] + if rotation_period < 1 or rotation_period > 1000: + msg = "RotationRules.AutomaticallyAfterDays must be within 1-1000." + raise InvalidParameterException(msg) + + try: + lm_client = connect_to(region_name=self.region_name).lambda_ + lm_client.get_function(FunctionName=rotation_lambda_arn) + except Exception: + raise ResourceNotFoundException("Lambda does not exist or could not be accessed") + + # The rotation function must end with the versions of the secret in + # one of two states: + # + # - The AWSPENDING and AWSCURRENT staging labels are attached to the + # same version of the secret, or + # - The AWSPENDING staging label is not attached to any version of the secret. + # + # If the AWSPENDING staging label is present but not attached to the same + # version as AWSCURRENT then any later invocation of RotateSecret assumes + # that a previous rotation request is still in progress and returns an error. + try: + pending_version = None + version = next( + version + for version in secret.versions.values() + if AWSPENDING in version["version_stages"] + ) + if AWSCURRENT not in version["version_stages"]: + msg = "Previous rotation request is still in progress." + # Delay exception, so we can trigger lambda again + pending_version = [InvalidRequestException(msg), version] + + except StopIteration: + # Pending is not present in any version + pass + + secret.rotation_lambda_arn = rotation_lambda_arn + secret.auto_rotate_after_days = rotation_period + if secret.auto_rotate_after_days > 0: + wait_interval_s = int(rotation_period) * 86400 + secret.next_rotation_date = int(time.time()) + wait_interval_s + secret.rotation_enabled = True + secret.rotation_requested = True + + if rotate_immediately: + if not pending_version: + # Begin the rotation process for the given secret by invoking the lambda function. + # + # We add the new secret version as "pending". The previous version remains + # as "current" for now. Once we've passed the new secret through the lambda + # rotation function (if provided) we can then update the status to "current". + new_version_id = self._from_client_request_token(client_request_token) + + # An initial dummy secret value is necessary otherwise moto is not adding the new + # secret version. + self._add_secret( + secret_id, + "dummy_password", + description=secret.description, + tags=secret.tags, + version_id=new_version_id, + version_stages=[AWSPENDING], + ) + + # AWS secret rotation function templates have checks on existing values so we remove + # the dummy value to force the lambda to generate a new one. + del secret.versions[new_version_id]["secret_string"] + else: + new_version_id = pending_version.pop()["version_id"] + + try: + for step in ["create", "set", "test", "finish"]: + resp = lm_client.invoke( + FunctionName=rotation_lambda_arn, + Payload=json.dumps( + { + "Step": step + "Secret", + "SecretId": secret.name, + "ClientRequestToken": new_version_id, + } + ), + ) + if resp.get("FunctionError"): + data = json.loads(resp.get("Payload").read()) + raise Exception(data.get("errorType")) + except Exception as e: + LOG.debug("An exception (%s) has occurred in %s", str(e), rotation_lambda_arn) + if pending_version: + raise pending_version.pop() + # Fall through if there is no previously pending version so we'll "stuck" with a new + # secret version in AWSPENDING state. + secret.last_rotation_date = int(time.time()) + return secret.to_short_dict(version_id=new_version_id) + + +@patch(moto_exception.SecretNotFoundException.__init__) +def moto_secret_not_found_exception_init(fn, self): + fn(self) + self.code = 400 + + +@patch(FakeSecret._form_version_ids_to_stages, pass_target=False) +def _form_version_ids_to_stages_modal(self): + version_id_to_stages: dict[str, list] = {} + for key, value in self.versions.items(): + # Patch: include version_stages in the response only if it is not empty. + if len(value["version_stages"]) > 0: + version_id_to_stages[key] = value["version_stages"] + return version_id_to_stages + + +# patching resource policy in moto +def get_resource_policy_model(self, secret_id): + if self._is_valid_identifier(secret_id): + result = { + "ARN": self.secrets[secret_id].arn, + "Name": self.secrets[secret_id].secret_id, + } + policy = getattr(self.secrets[secret_id], "policy", None) + if policy: + result["ResourcePolicy"] = policy + return json.dumps(result) + else: + raise SecretNotFoundException() + + +def get_resource_policy_response(self): + secret_id = self._get_param("SecretId") + return self.backend.get_resource_policy(secret_id=secret_id) + + +def decode_secret_binary_from_response(response: dict[str, Any]): + if "SecretBinary" in response: + response["SecretBinary"] = base64.b64decode(response["SecretBinary"]) + + return response + + +def delete_resource_policy_model(self, secret_id): + if self._is_valid_identifier(secret_id): + self.secrets[secret_id].policy = None + return json.dumps( + { + "ARN": self.secrets[secret_id].arn, + "Name": self.secrets[secret_id].secret_id, + } + ) + else: + raise SecretNotFoundException() + + +def delete_resource_policy_response(self): + secret_id = self._get_param("SecretId") + return self.backend.delete_resource_policy(secret_id=secret_id) + + +def put_resource_policy_model(self, secret_id, resource_policy): + policy_validator = IAMPolicyDocumentValidator(resource_policy) + policy_validator._validate_top_elements() + policy_validator._validate_version_syntax() + if self._is_valid_identifier(secret_id): + self.secrets[secret_id].policy = resource_policy + return json.dumps( + { + "ARN": self.secrets[secret_id].arn, + "Name": self.secrets[secret_id].secret_id, + } + ) + else: + raise SecretNotFoundException() + + +def put_resource_policy_response(self): + secret_id = self._get_param("SecretId") + resource_policy = self._get_param("ResourcePolicy") + return self.backend.put_resource_policy( + secret_id=secret_id, resource_policy=json.loads(resource_policy) + ) + + +def apply_patches(): + SecretsManagerBackend.get_resource_policy = get_resource_policy_model + SecretsManagerResponse.get_resource_policy = get_resource_policy_response + + if not hasattr(SecretsManagerBackend, "delete_resource_policy"): + SecretsManagerBackend.delete_resource_policy = delete_resource_policy_model + if not hasattr(SecretsManagerResponse, "delete_resource_policy"): + SecretsManagerResponse.delete_resource_policy = delete_resource_policy_response + if not hasattr(SecretsManagerBackend, "put_resource_policy"): + SecretsManagerBackend.put_resource_policy = put_resource_policy_model + if not hasattr(SecretsManagerResponse, "put_resource_policy"): + SecretsManagerResponse.put_resource_policy = put_resource_policy_response diff --git a/localstack/services/transcribe/__init__.py b/localstack-core/localstack/services/secretsmanager/resource_providers/__init__.py similarity index 100% rename from localstack/services/transcribe/__init__.py rename to localstack-core/localstack/services/secretsmanager/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.py new file mode 100644 index 0000000000000..53784023f67f5 --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.py @@ -0,0 +1,112 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SecretsManagerResourcePolicyProperties(TypedDict): + ResourcePolicy: Optional[dict] + SecretId: Optional[str] + BlockPublicPolicy: Optional[bool] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SecretsManagerResourcePolicyProvider( + ResourceProvider[SecretsManagerResourcePolicyProperties] +): + TYPE = "AWS::SecretsManager::ResourcePolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SecretsManagerResourcePolicyProperties], + ) -> ProgressEvent[SecretsManagerResourcePolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - SecretId + - ResourcePolicy + + Create-only properties: + - /properties/SecretId + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + secret_manager = request.aws_client_factory.secretsmanager + + params = { + "SecretId": model["SecretId"], + "ResourcePolicy": json.dumps(model["ResourcePolicy"]), + "BlockPublicPolicy": model.get("BlockPublicPolicy") or True, + } + response = secret_manager.put_resource_policy(**params) + + model["Id"] = response["ARN"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SecretsManagerResourcePolicyProperties], + ) -> ProgressEvent[SecretsManagerResourcePolicyProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SecretsManagerResourcePolicyProperties], + ) -> ProgressEvent[SecretsManagerResourcePolicyProperties]: + """ + Delete a resource + + """ + model = request.desired_state + secret_manager = request.aws_client_factory.secretsmanager + + response = secret_manager.delete_resource_policy(SecretId=model["SecretId"]) + + model["Id"] = response["ARN"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SecretsManagerResourcePolicyProperties], + ) -> ProgressEvent[SecretsManagerResourcePolicyProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.schema.json b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.schema.json new file mode 100644 index 0000000000000..cb829fc66c01d --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy.schema.json @@ -0,0 +1,32 @@ +{ + "typeName": "AWS::SecretsManager::ResourcePolicy", + "description": "Resource Type definition for AWS::SecretsManager::ResourcePolicy", + "additionalProperties": false, + "properties": { + "ResourcePolicy": { + "type": "object" + }, + "Id": { + "type": "string" + }, + "BlockPublicPolicy": { + "type": "boolean" + }, + "SecretId": { + "type": "string" + } + }, + "required": [ + "SecretId", + "ResourcePolicy" + ], + "createOnlyProperties": [ + "/properties/SecretId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy_plugin.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy_plugin.py new file mode 100644 index 0000000000000..1571bbfd89afc --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_resourcepolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SecretsManagerResourcePolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SecretsManager::ResourcePolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.secretsmanager.resource_providers.aws_secretsmanager_resourcepolicy import ( + SecretsManagerResourcePolicyProvider, + ) + + self.factory = SecretsManagerResourcePolicyProvider diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.py new file mode 100644 index 0000000000000..b838450d24a1d --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.py @@ -0,0 +1,125 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SecretsManagerRotationScheduleProperties(TypedDict): + SecretId: Optional[str] + HostedRotationLambda: Optional[HostedRotationLambda] + Id: Optional[str] + RotateImmediatelyOnUpdate: Optional[bool] + RotationLambdaARN: Optional[str] + RotationRules: Optional[RotationRules] + + +class RotationRules(TypedDict): + AutomaticallyAfterDays: Optional[int] + Duration: Optional[str] + ScheduleExpression: Optional[str] + + +class HostedRotationLambda(TypedDict): + RotationType: Optional[str] + ExcludeCharacters: Optional[str] + KmsKeyArn: Optional[str] + MasterSecretArn: Optional[str] + MasterSecretKmsKeyArn: Optional[str] + RotationLambdaName: Optional[str] + Runtime: Optional[str] + SuperuserSecretArn: Optional[str] + SuperuserSecretKmsKeyArn: Optional[str] + VpcSecurityGroupIds: Optional[str] + VpcSubnetIds: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SecretsManagerRotationScheduleProvider( + ResourceProvider[SecretsManagerRotationScheduleProperties] +): + TYPE = "AWS::SecretsManager::RotationSchedule" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SecretsManagerRotationScheduleProperties], + ) -> ProgressEvent[SecretsManagerRotationScheduleProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - SecretId + + Create-only properties: + - /properties/SecretId + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + if not model.get("Id"): + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SecretsManagerRotationScheduleProperties], + ) -> ProgressEvent[SecretsManagerRotationScheduleProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SecretsManagerRotationScheduleProperties], + ) -> ProgressEvent[SecretsManagerRotationScheduleProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SecretsManagerRotationScheduleProperties], + ) -> ProgressEvent[SecretsManagerRotationScheduleProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.schema.json b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.schema.json new file mode 100644 index 0000000000000..a99cc063f1106 --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule.schema.json @@ -0,0 +1,96 @@ +{ + "typeName": "AWS::SecretsManager::RotationSchedule", + "description": "Resource Type definition for AWS::SecretsManager::RotationSchedule", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "RotationLambdaARN": { + "type": "string" + }, + "RotationRules": { + "$ref": "#/definitions/RotationRules" + }, + "RotateImmediatelyOnUpdate": { + "type": "boolean" + }, + "SecretId": { + "type": "string" + }, + "HostedRotationLambda": { + "$ref": "#/definitions/HostedRotationLambda" + } + }, + "definitions": { + "HostedRotationLambda": { + "type": "object", + "additionalProperties": false, + "properties": { + "Runtime": { + "type": "string" + }, + "RotationType": { + "type": "string" + }, + "RotationLambdaName": { + "type": "string" + }, + "KmsKeyArn": { + "type": "string" + }, + "MasterSecretArn": { + "type": "string" + }, + "VpcSecurityGroupIds": { + "type": "string" + }, + "ExcludeCharacters": { + "type": "string" + }, + "MasterSecretKmsKeyArn": { + "type": "string" + }, + "SuperuserSecretArn": { + "type": "string" + }, + "SuperuserSecretKmsKeyArn": { + "type": "string" + }, + "VpcSubnetIds": { + "type": "string" + } + }, + "required": [ + "RotationType" + ] + }, + "RotationRules": { + "type": "object", + "additionalProperties": false, + "properties": { + "ScheduleExpression": { + "type": "string" + }, + "Duration": { + "type": "string" + }, + "AutomaticallyAfterDays": { + "type": "integer" + } + } + } + }, + "required": [ + "SecretId" + ], + "createOnlyProperties": [ + "/properties/SecretId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule_plugin.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule_plugin.py new file mode 100644 index 0000000000000..dd680bd788d1f --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_rotationschedule_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SecretsManagerRotationScheduleProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SecretsManager::RotationSchedule" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.secretsmanager.resource_providers.aws_secretsmanager_rotationschedule import ( + SecretsManagerRotationScheduleProvider, + ) + + self.factory = SecretsManagerRotationScheduleProvider diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.py new file mode 100644 index 0000000000000..d53dbd2e9aefe --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.py @@ -0,0 +1,272 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +import logging +import random +import string +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + +LOG = logging.getLogger(__name__) + + +class SecretsManagerSecretProperties(TypedDict): + Description: Optional[str] + GenerateSecretString: Optional[GenerateSecretString] + Id: Optional[str] + KmsKeyId: Optional[str] + Name: Optional[str] + ReplicaRegions: Optional[list[ReplicaRegion]] + SecretString: Optional[str] + Tags: Optional[list[Tag]] + + +class GenerateSecretString(TypedDict): + ExcludeCharacters: Optional[str] + ExcludeLowercase: Optional[bool] + ExcludeNumbers: Optional[bool] + ExcludePunctuation: Optional[bool] + ExcludeUppercase: Optional[bool] + GenerateStringKey: Optional[str] + IncludeSpace: Optional[bool] + PasswordLength: Optional[int] + RequireEachIncludedType: Optional[bool] + SecretStringTemplate: Optional[str] + + +class ReplicaRegion(TypedDict): + Region: Optional[str] + KmsKeyId: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SecretsManagerSecretProvider(ResourceProvider[SecretsManagerSecretProperties]): + TYPE = "AWS::SecretsManager::Secret" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SecretsManagerSecretProperties], + ) -> ProgressEvent[SecretsManagerSecretProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Id + + IAM permissions required: + - secretsmanager:DescribeSecret + - secretsmanager:GetRandomPassword + - secretsmanager:CreateSecret + - secretsmanager:TagResource + + """ + model = request.desired_state + secrets_manager = request.aws_client_factory.secretsmanager + + if not model.get("Name"): + # not actually correct. Given the LogicalResourceId "MySecret", + # an example for the generated name would be "MySecret-krxoxgcznYdq-sQNsqO" + model["Name"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + attributes = ["Name", "Description", "KmsKeyId", "SecretString", "Tags"] + params = util.select_attributes(model, attributes) + + """ + From CFn Docs: + If you omit both GenerateSecretString and SecretString, you create an empty secret. + When you make a change to this property, a new secret version is created. + CDK wil generate empty dict in which case we also need to generate SecretString + """ + + gen_secret = model.get("GenerateSecretString") + if gen_secret is not None: + secret_value = self._get_secret_value(gen_secret) + template = gen_secret.get("SecretStringTemplate") + if template: + secret_value = self._modify_secret_template(template, secret_value, gen_secret) + params["SecretString"] = secret_value + + response = secrets_manager.create_secret(**params) + model["Id"] = response["ARN"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def _get_secret_value(self, gen_secret): + excl_lower = gen_secret.get("ExcludeLowercase") + excl_upper = gen_secret.get("ExcludeUppercase") + excl_chars = gen_secret.get("ExcludeCharacters") or "" + excl_numbers = gen_secret.get("ExcludeNumbers") + excl_punct = gen_secret.get("ExcludePunctuation") + incl_spaces = gen_secret.get("IncludeSpace") + length = gen_secret.get("PasswordLength") or 32 + req_each = gen_secret.get("RequireEachIncludedType") + return self.generate_secret_value( + length=length, + excl_lower=excl_lower, + excl_upper=excl_upper, + excl_punct=excl_punct, + incl_spaces=incl_spaces, + excl_chars=excl_chars, + excl_numbers=excl_numbers, + req_each=req_each, + ) + + def _modify_secret_template(self, template, secret_value, gen_secret): + gen_key = gen_secret.get("GenerateStringKey") or "secret" + template = json.loads(template) + template[gen_key] = secret_value + return json.dumps(template) + + def generate_secret_value( + self, + length: int, + excl_lower: bool, + excl_upper: bool, + excl_chars: str, + excl_numbers: bool, + excl_punct: bool, + incl_spaces: bool, + req_each: bool, + ) -> str: + """WARN: This is NOT a secure way to generate secrets - use only for testing and not in production use cases!""" + + # TODO: add a couple of unit tests for this function ... + + punctuation = r"!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~" + alphabet = "" + if not excl_punct: + alphabet += punctuation + if not excl_upper: + alphabet += string.ascii_uppercase + if not excl_lower: + alphabet += string.ascii_lowercase + if not excl_numbers: + alphabet += "".join([str(i) for i in list(range(10))]) + if incl_spaces: + alphabet += " " + if req_each: + LOG.info("Secret generation option 'RequireEachIncludedType' not yet supported") + + for char in excl_chars: + alphabet = alphabet.replace(char, "") + + result = [alphabet[random.randrange(len(alphabet))] for _ in range(length)] + result = "".join(result) + return result + + def read( + self, + request: ResourceRequest[SecretsManagerSecretProperties], + ) -> ProgressEvent[SecretsManagerSecretProperties]: + """ + Fetch resource information + + IAM permissions required: + - secretsmanager:DescribeSecret + - secretsmanager:GetSecretValue + """ + secretsmanager = request.aws_client_factory.secretsmanager + secret_id = request.desired_state["Id"] + + secret = secretsmanager.describe_secret(SecretId=secret_id) + model = SecretsManagerSecretProperties( + **util.select_attributes(secret, self.SCHEMA["properties"]) + ) + model["Id"] = secret["ARN"] + + if "Tags" not in model: + model["Tags"] = [] + + model["ReplicaRegions"] = [ + {"KmsKeyId": replication_region["KmsKeyId"], "Region": replication_region["Region"]} + for replication_region in secret.get("ReplicationStatus", []) + ] + if "ReplicaRegions" not in model: + model["ReplicaRegions"] = [] + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + + def delete( + self, + request: ResourceRequest[SecretsManagerSecretProperties], + ) -> ProgressEvent[SecretsManagerSecretProperties]: + """ + Delete a resource + + IAM permissions required: + - secretsmanager:DeleteSecret + - secretsmanager:DescribeSecret + - secretsmanager:RemoveRegionsFromReplication + """ + model = request.desired_state + secrets_manager = request.aws_client_factory.secretsmanager + + secrets_manager.delete_secret(SecretId=model["Name"], ForceDeleteWithoutRecovery=True) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SecretsManagerSecretProperties], + ) -> ProgressEvent[SecretsManagerSecretProperties]: + """ + Update a resource + + IAM permissions required: + - secretsmanager:UpdateSecret + - secretsmanager:TagResource + - secretsmanager:UntagResource + - secretsmanager:GetRandomPassword + - secretsmanager:GetSecretValue + - secretsmanager:ReplicateSecretToRegions + - secretsmanager:RemoveRegionsFromReplication + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[SecretsManagerSecretProperties], + ) -> ProgressEvent[SecretsManagerSecretProperties]: + resources = request.aws_client_factory.secretsmanager.list_secrets() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + SecretsManagerSecretProperties(Id=resource["Name"]) + for resource in resources["SecretList"] + ], + ) diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.schema.json b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.schema.json new file mode 100644 index 0000000000000..408bb14bcdfd1 --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret.schema.json @@ -0,0 +1,195 @@ +{ + "typeName": "AWS::SecretsManager::Secret", + "$schema": "https://schema.cloudformation.us-east-1.amazonaws.com/provider.definition.schema.v1.json", + "description": "Resource Type definition for AWS::SecretsManager::Secret", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-secretsmanager.git", + "additionalProperties": false, + "properties": { + "Description": { + "type": "string", + "description": "(Optional) Specifies a user-provided description of the secret." + }, + "KmsKeyId": { + "type": "string", + "description": "(Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) used to encrypt the SecretString." + }, + "SecretString": { + "type": "string", + "description": "(Optional) Specifies text data that you want to encrypt and store in this new version of the secret." + }, + "GenerateSecretString": { + "$ref": "#/definitions/GenerateSecretString", + "description": "(Optional) Specifies text data that you want to encrypt and store in this new version of the secret." + }, + "ReplicaRegions": { + "type": "array", + "description": "(Optional) A list of ReplicaRegion objects. The ReplicaRegion type consists of a Region (required) and the KmsKeyId which can be an ARN, Key ID, or Alias.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/ReplicaRegion" + } + }, + "Id": { + "type": "string", + "description": "secret Id, the Arn of the resource." + }, + "Tags": { + "type": "array", + "description": "The list of user-defined tags associated with the secret. Use tags to manage your AWS resources. For additional information about tags, see TagResource.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Name": { + "type": "string", + "description": "The friendly name of the secret. You can use forward slashes in the name to represent a path hierarchy." + } + }, + "definitions": { + "GenerateSecretString": { + "type": "object", + "additionalProperties": false, + "properties": { + "ExcludeUppercase": { + "type": "boolean", + "description": "Specifies that the generated password should not include uppercase letters. The default behavior is False, and the generated password can include uppercase letters. " + }, + "RequireEachIncludedType": { + "type": "boolean", + "description": "Specifies whether the generated password must include at least one of every allowed character type. By default, Secrets Manager enables this parameter, and the generated password includes at least one of every character type." + }, + "IncludeSpace": { + "type": "boolean", + "description": "Specifies that the generated password can include the space character. By default, Secrets Manager disables this parameter, and the generated password doesn't include space" + }, + "ExcludeCharacters": { + "type": "string", + "description": "A string that excludes characters in the generated password. By default, all characters from the included sets can be used. The string can be a minimum length of 0 characters and a maximum length of 7168 characters. " + }, + "GenerateStringKey": { + "type": "string", + "description": "The JSON key name used to add the generated password to the JSON structure specified by the SecretStringTemplate parameter. If you specify this parameter, then you must also specify SecretStringTemplate. " + }, + "PasswordLength": { + "type": "integer", + "description": "The desired length of the generated password. The default value if you do not include this parameter is 32 characters. " + }, + "ExcludePunctuation": { + "type": "boolean", + "description": "Specifies that the generated password should not include punctuation characters. The default if you do not include this switch parameter is that punctuation characters can be included. " + }, + "ExcludeLowercase": { + "type": "boolean", + "description": "Specifies the generated password should not include lowercase letters. By default, ecrets Manager disables this parameter, and the generated password can include lowercase False, and the generated password can include lowercase letters." + }, + "SecretStringTemplate": { + "type": "string", + "description": "A properly structured JSON string that the generated password can be added to. If you specify this parameter, then you must also specify GenerateStringKey." + }, + "ExcludeNumbers": { + "type": "boolean", + "description": "Specifies that the generated password should exclude digits. By default, Secrets Manager does not enable the parameter, False, and the generated password can include digits." + } + } + }, + "ReplicaRegion": { + "type": "object", + "description": "A custom type that specifies a Region and the KmsKeyId for a replica secret.", + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "type": "string", + "description": "The ARN, key ID, or alias of the KMS key to encrypt the secret. If you don't include this field, Secrets Manager uses aws/secretsmanager." + }, + "Region": { + "type": "string", + "description": "(Optional) A string that represents a Region, for example \"us-east-1\"." + } + }, + "required": [ + "Region" + ] + }, + "Tag": { + "type": "object", + "description": "A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string.", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string", + "description": "The key name of the tag. You can specify a value that's 1 to 128 Unicode characters in length and can't be prefixed with aws." + }, + "Key": { + "type": "string", + "description": "The value for the tag. You can specify a value that's 1 to 256 characters in length." + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/Name" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "writeOnlyProperties": [ + "/properties/SecretString", + "/properties/GenerateSecretString" + ], + "handlers": { + "create": { + "permissions": [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetRandomPassword", + "secretsmanager:CreateSecret", + "secretsmanager:TagResource" + ] + }, + "delete": { + "permissions": [ + "secretsmanager:DeleteSecret", + "secretsmanager:DescribeSecret", + "secretsmanager:RemoveRegionsFromReplication" + ] + }, + "list": { + "permissions": [ + "secretsmanager:ListSecrets" + ] + }, + "read": { + "permissions": [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue" + ] + }, + "update": { + "permissions": [ + "secretsmanager:UpdateSecret", + "secretsmanager:TagResource", + "secretsmanager:UntagResource", + "secretsmanager:GetRandomPassword", + "secretsmanager:GetSecretValue", + "secretsmanager:ReplicateSecretToRegions", + "secretsmanager:RemoveRegionsFromReplication" + ] + } + } +} diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret_plugin.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret_plugin.py new file mode 100644 index 0000000000000..4c85279d0d81f --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secret_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SecretsManagerSecretProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SecretsManager::Secret" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.secretsmanager.resource_providers.aws_secretsmanager_secret import ( + SecretsManagerSecretProvider, + ) + + self.factory = SecretsManagerSecretProvider diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.py new file mode 100644 index 0000000000000..27f8682c0a51f --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.py @@ -0,0 +1,104 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SecretsManagerSecretTargetAttachmentProperties(TypedDict): + SecretId: Optional[str] + TargetId: Optional[str] + TargetType: Optional[str] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SecretsManagerSecretTargetAttachmentProvider( + ResourceProvider[SecretsManagerSecretTargetAttachmentProperties] +): + TYPE = "AWS::SecretsManager::SecretTargetAttachment" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SecretsManagerSecretTargetAttachmentProperties], + ) -> ProgressEvent[SecretsManagerSecretTargetAttachmentProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - TargetType + - TargetId + - SecretId + + + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + if not model.get("Id"): + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SecretsManagerSecretTargetAttachmentProperties], + ) -> ProgressEvent[SecretsManagerSecretTargetAttachmentProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SecretsManagerSecretTargetAttachmentProperties], + ) -> ProgressEvent[SecretsManagerSecretTargetAttachmentProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SecretsManagerSecretTargetAttachmentProperties], + ) -> ProgressEvent[SecretsManagerSecretTargetAttachmentProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.schema.json b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.schema.json new file mode 100644 index 0000000000000..38e1b18fcdc07 --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment.schema.json @@ -0,0 +1,30 @@ +{ + "typeName": "AWS::SecretsManager::SecretTargetAttachment", + "description": "Resource Type definition for AWS::SecretsManager::SecretTargetAttachment", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "SecretId": { + "type": "string" + }, + "TargetType": { + "type": "string" + }, + "TargetId": { + "type": "string" + } + }, + "required": [ + "TargetType", + "TargetId", + "SecretId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment_plugin.py b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment_plugin.py new file mode 100644 index 0000000000000..f84e773ee3faf --- /dev/null +++ b/localstack-core/localstack/services/secretsmanager/resource_providers/aws_secretsmanager_secrettargetattachment_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SecretsManagerSecretTargetAttachmentProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SecretsManager::SecretTargetAttachment" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.secretsmanager.resource_providers.aws_secretsmanager_secrettargetattachment import ( + SecretsManagerSecretTargetAttachmentProvider, + ) + + self.factory = SecretsManagerSecretTargetAttachmentProvider diff --git a/localstack/testing/__init__.py b/localstack-core/localstack/services/ses/__init__.py similarity index 100% rename from localstack/testing/__init__.py rename to localstack-core/localstack/services/ses/__init__.py diff --git a/localstack-core/localstack/services/ses/models.py b/localstack-core/localstack/services/ses/models.py new file mode 100644 index 0000000000000..2560f872410da --- /dev/null +++ b/localstack-core/localstack/services/ses/models.py @@ -0,0 +1,21 @@ +from typing import TypedDict + +from localstack.aws.api.ses import Address, Destination, Subject, TemplateData, TemplateName + + +class SentEmailBody(TypedDict): + html_part: str | None + text_part: str + + +class SentEmail(TypedDict): + Id: str + Region: str + Timestamp: str + Destination: Destination + RawData: str + Source: Address + Subject: Subject + Template: TemplateName + TemplateData: TemplateData + Body: SentEmailBody diff --git a/localstack-core/localstack/services/ses/provider.py b/localstack-core/localstack/services/ses/provider.py new file mode 100644 index 0000000000000..ca87c457c5818 --- /dev/null +++ b/localstack-core/localstack/services/ses/provider.py @@ -0,0 +1,647 @@ +import dataclasses +import json +import logging +import os +import re +from collections import defaultdict +from datetime import date, datetime, time, timezone +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from botocore.exceptions import ClientError +from moto.ses import ses_backends +from moto.ses.models import SESBackend + +from localstack import config +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.core import CommonServiceException +from localstack.aws.api.ses import ( + Address, + AddressList, + AmazonResourceName, + CloneReceiptRuleSetResponse, + ConfigurationSetDoesNotExistException, + ConfigurationSetName, + CreateConfigurationSetEventDestinationResponse, + DeleteConfigurationSetEventDestinationResponse, + DeleteConfigurationSetResponse, + DeleteTemplateResponse, + Destination, + EventDestination, + EventDestinationDoesNotExistException, + EventDestinationName, + GetIdentityVerificationAttributesResponse, + IdentityList, + IdentityVerificationAttributes, + InvalidSNSDestinationException, + ListTemplatesResponse, + MaxItems, + Message, + MessageId, + MessageRejected, + MessageTagList, + NextToken, + RawMessage, + ReceiptRuleSetName, + SendEmailResponse, + SendRawEmailResponse, + SendTemplatedEmailResponse, + SesApi, + TemplateData, + TemplateName, + VerificationAttributes, + VerificationStatus, +) +from localstack.aws.connect import connect_to +from localstack.constants import INTERNAL_AWS_SECRET_ACCESS_KEY +from localstack.http import Resource, Response +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.ses.models import SentEmail, SentEmailBody +from localstack.utils.aws import arns +from localstack.utils.files import mkdir +from localstack.utils.strings import long_uid, to_str +from localstack.utils.time import timestamp, timestamp_millis + +if TYPE_CHECKING: + from mypy_boto3_sns import SNSClient + +LOGGER = logging.getLogger(__name__) + +# Keep record of all sent emails +# These can be retrieved via a service endpoint +EMAILS: Dict[MessageId, Dict[str, Any]] = {} + +# Endpoint to access all the sent emails +# (relative to LocalStack internal HTTP resources base endpoint) +EMAILS_ENDPOINT = "/_aws/ses" + +_EMAILS_ENDPOINT_REGISTERED = False + +REGEX_TAG_NAME = r"^[A-Za-z0-9_-]*$" +REGEX_TAG_VALUE = r"^[A-Za-z0-9_\-.@]*$" + +ALLOWED_TAG_LEN = 255 + + +def save_for_retrospection(sent_email: SentEmail): + """ + Save a message for retrospection. + + The contents of the email is saved to filesystem. It can also be accessed via a service endpoint. + """ + message_id = sent_email["Id"] + ses_dir = os.path.join(config.dirs.data or config.dirs.tmp, "ses") + mkdir(ses_dir) + + path = os.path.join(ses_dir, message_id + ".json") + + if not sent_email.get("Timestamp"): + sent_email["Timestamp"] = timestamp() + + EMAILS[message_id] = sent_email + + def _serialize(obj): + """JSON serializer for timestamps.""" + if isinstance(obj, (datetime, date, time)): + return obj.isoformat() + return obj.__dict__ + + with open(path, "w") as f: + f.write(json.dumps(sent_email, default=_serialize)) + + LOGGER.debug("Email saved at: %s", path) + + +def recipients_from_destination(destination: Destination) -> List[str]: + """Get list of recipient email addresses from a Destination object.""" + return ( + destination.get("ToAddresses", []) + + destination.get("CcAddresses", []) + + destination.get("BccAddresses", []) + ) + + +def get_ses_backend(context: RequestContext) -> SESBackend: + return ses_backends[context.account_id][context.region] + + +class SesServiceApiResource: + """Provides a REST API for retrospective access to emails sent via SES. + + This is registered as a LocalStack internal HTTP resource. + + This endpoint accepts: + - GET param `id`: filter for `id` field in SES message + - GET param `email`: filter for `source` field in SES message, when `id` filter is specified then filters on both + """ + + def on_get(self, request): + filter_id = request.args.get("id") + filter_source = request.args.get("email") + messages = [] + + for msg in EMAILS.values(): + if filter_id in (msg.get("Id"), None, ""): + if filter_source in (msg.get("Source"), None, ""): + messages.append(msg) + + return { + "messages": messages, + } + + def on_delete(self, request): + filter_id = request.args.get("id") + if filter_id is not None: + del EMAILS[filter_id] + else: + EMAILS.clear() + return Response(status=204) + + +def register_ses_api_resource(): + """Register the email retrospection endpoint as an internal LocalStack endpoint.""" + # Use a global to indicate whether the resource has already been registered + # This is cheaper than iterating over the registered routes in the Router object + global _EMAILS_ENDPOINT_REGISTERED + + if not _EMAILS_ENDPOINT_REGISTERED: + from localstack.services.edge import ROUTER + + ROUTER.add(Resource(EMAILS_ENDPOINT, SesServiceApiResource())) + _EMAILS_ENDPOINT_REGISTERED = True + + +class SesProvider(SesApi, ServiceLifecycleHook): + # + # Lifecycle Hooks + # + + def on_after_init(self): + # Allow sent emails to be retrieved from the SES emails endpoint + register_ses_api_resource() + + # + # Helpers + # + + def get_source_from_raw(self, raw_data: str) -> Optional[str]: + """Given a raw representation of email, return the source/from field.""" + entities = raw_data.split("\n") + for entity in entities: + if "From:" in entity: + return entity.replace("From:", "").strip() + return None + + # + # Implementations for SES operations + # + + @handler("CreateConfigurationSetEventDestination") + def create_configuration_set_event_destination( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + event_destination: EventDestination, + **kwargs, + ) -> CreateConfigurationSetEventDestinationResponse: + # send SES test event if an SNS topic is attached + sns_topic_arn = event_destination.get("SNSDestination", {}).get("TopicARN") + if sns_topic_arn is not None: + emitter = SNSEmitter(context) + emitter.emit_create_configuration_set_event_destination_test_message(sns_topic_arn) + + # only register the event destiation if emitting the message worked + try: + result = call_moto(context) + except CommonServiceException as e: + if e.code == "ConfigurationSetDoesNotExist": + raise ConfigurationSetDoesNotExistException( + f"Configuration set <{configuration_set_name}> does not exist." + ) + else: + raise + + return result + + @handler("DeleteConfigurationSet") + def delete_configuration_set( + self, context: RequestContext, configuration_set_name: ConfigurationSetName, **kwargs + ) -> DeleteConfigurationSetResponse: + # not implemented in moto + # TODO: contribute upstream? + backend = get_ses_backend(context) + try: + backend.config_sets.pop(configuration_set_name) + except KeyError: + raise ConfigurationSetDoesNotExistException( + f"Configuration set <{configuration_set_name}> does not exist." + ) + + return DeleteConfigurationSetResponse() + + @handler("DeleteConfigurationSetEventDestination") + def delete_configuration_set_event_destination( + self, + context: RequestContext, + configuration_set_name: ConfigurationSetName, + event_destination_name: EventDestinationName, + **kwargs, + ) -> DeleteConfigurationSetEventDestinationResponse: + # not implemented in moto + # TODO: contribute upstream? + backend = get_ses_backend(context) + + # the configuration set must exist + if configuration_set_name not in backend.config_sets: + raise ConfigurationSetDoesNotExistException( + f"Configuration set <{configuration_set_name}> does not exist." + ) + + # the event destination must exist + if configuration_set_name not in backend.config_set_event_destination: + raise EventDestinationDoesNotExistException( + f"No EventDestination found for {configuration_set_name}" + ) + + if event_destination_name in backend.event_destinations: + backend.event_destinations.pop(event_destination_name) + else: + # FIXME: inconsistent state + LOGGER.warning("inconsistent state encountered in ses backend") + + backend.config_set_event_destination.pop(configuration_set_name) + + return DeleteConfigurationSetEventDestinationResponse() + + @handler("ListTemplates") + def list_templates( + self, + context: RequestContext, + next_token: NextToken = None, + max_items: MaxItems = None, + **kwargs, + ) -> ListTemplatesResponse: + backend = get_ses_backend(context) + for template in backend.list_templates(): + if isinstance(template["Timestamp"], (date, datetime)): + template["Timestamp"] = timestamp_millis(template["Timestamp"]) + return call_moto(context) + + @handler("DeleteTemplate") + def delete_template( + self, context: RequestContext, template_name: TemplateName, **kwargs + ) -> DeleteTemplateResponse: + backend = get_ses_backend(context) + if template_name in backend.templates: + del backend.templates[template_name] + return DeleteTemplateResponse() + + @handler("GetIdentityVerificationAttributes") + def get_identity_verification_attributes( + self, context: RequestContext, identities: IdentityList, **kwargs + ) -> GetIdentityVerificationAttributesResponse: + attributes: VerificationAttributes = {} + + for identity in identities: + if "@" in identity: + attributes[identity] = IdentityVerificationAttributes( + VerificationStatus=VerificationStatus.Success, + ) + else: + attributes[identity] = IdentityVerificationAttributes( + VerificationStatus=VerificationStatus.Success, + VerificationToken=long_uid(), + ) + + return GetIdentityVerificationAttributesResponse( + VerificationAttributes=attributes, + ) + + @handler("SendEmail") + def send_email( + self, + context: RequestContext, + source: Address, + destination: Destination, + message: Message, + reply_to_addresses: AddressList = None, + return_path: Address = None, + source_arn: AmazonResourceName = None, + return_path_arn: AmazonResourceName = None, + tags: MessageTagList = None, + configuration_set_name: ConfigurationSetName = None, + **kwargs, + ) -> SendEmailResponse: + if tags: + for tag in tags: + tag_name = tag.get("Name", "") + tag_value = tag.get("Value", "") + if tag_name == "": + raise InvalidParameterValue("The tag name must be specified.") + if tag_value == "": + raise InvalidParameterValue("The tag value must be specified.") + if len(tag_name) > 255: + raise InvalidParameterValue("Tag name cannot exceed 255 characters.") + # The `ses:` prefix is for a special case and disregarded for validation + # see https://docs.aws.amazon.com/ses/latest/dg/monitor-using-event-publishing.html#event-publishing-fine-grained-feedback + if not re.match(REGEX_TAG_NAME, tag_name.removeprefix("ses:")): + raise InvalidParameterValue( + f"Invalid tag name <{tag_name}>: only alphanumeric ASCII characters, '_', '-' are allowed.", + ) + if len(tag_value) > 255: + raise InvalidParameterValue("Tag value cannot exceed 255 characters.") + if not re.match(REGEX_TAG_VALUE, tag_value): + raise InvalidParameterValue( + f"Invalid tag value <{tag_value}>: only alphanumeric ASCII characters, '_', '-' , '.', '@' are allowed.", + ) + + response = call_moto(context) + + backend = get_ses_backend(context) + emitter = SNSEmitter(context) + recipients = recipients_from_destination(destination) + + for event_destination in backend.config_set_event_destination.values(): + if not event_destination["Enabled"]: + continue + + sns_destination_arn = event_destination.get("SNSDestination") + if not sns_destination_arn: + continue + + payload = SNSPayload( + message_id=response["MessageId"], + sender_email=source, + destination_addresses=recipients, + tags=tags, + ) + emitter.emit_send_event(payload, sns_destination_arn) + emitter.emit_delivery_event(payload, sns_destination_arn) + + text_part = message["Body"].get("Text", {}).get("Data") + html_part = message["Body"].get("Html", {}).get("Data") + + save_for_retrospection( + SentEmail( + Id=response["MessageId"], + Region=context.region, + Destination=destination, + Source=source, + Subject=message["Subject"].get("Data"), + Body=SentEmailBody(text_part=text_part, html_part=html_part), + ) + ) + + return response + + @handler("SendTemplatedEmail") + def send_templated_email( + self, + context: RequestContext, + source: Address, + destination: Destination, + template: TemplateName, + template_data: TemplateData, + reply_to_addresses: AddressList = None, + return_path: Address = None, + source_arn: AmazonResourceName = None, + return_path_arn: AmazonResourceName = None, + tags: MessageTagList = None, + configuration_set_name: ConfigurationSetName = None, + template_arn: AmazonResourceName = None, + **kwargs, + ) -> SendTemplatedEmailResponse: + response = call_moto(context) + + backend = get_ses_backend(context) + emitter = SNSEmitter(context) + recipients = recipients_from_destination(destination) + + for event_destination in backend.config_set_event_destination.values(): + if not event_destination["Enabled"]: + continue + + sns_destination_arn = event_destination.get("SNSDestination") + if not sns_destination_arn: + continue + + payload = SNSPayload( + message_id=response["MessageId"], + sender_email=source, + destination_addresses=recipients, + tags=tags, + ) + emitter.emit_send_event(payload, sns_destination_arn, emit_source_arn=False) + emitter.emit_delivery_event(payload, sns_destination_arn) + + save_for_retrospection( + SentEmail( + Id=response["MessageId"], + Region=context.region, + Source=source, + Template=template, + TemplateData=template_data, + Destination=destination, + ) + ) + + return response + + @handler("SendRawEmail") + def send_raw_email( + self, + context: RequestContext, + raw_message: RawMessage, + source: Address = None, + destinations: AddressList = None, + from_arn: AmazonResourceName = None, + source_arn: AmazonResourceName = None, + return_path_arn: AmazonResourceName = None, + tags: MessageTagList = None, + configuration_set_name: ConfigurationSetName = None, + **kwargs, + ) -> SendRawEmailResponse: + raw_data = to_str(raw_message["Data"]) + + if source is None or not source.strip(): + LOGGER.debug("Raw email:\n%s\nEOT", raw_data) + + source = self.get_source_from_raw(raw_data) + if not source: + LOGGER.warning("Source not specified. Rejecting message.") + raise MessageRejected() + + # TODO: On AWS, `destinations` is ignored if the `To` field is set in the raw email. + destinations = destinations or [] + + backend = get_ses_backend(context) + message = backend.send_raw_email(source, destinations, raw_data) + + emitter = SNSEmitter(context) + for event_destination in backend.config_set_event_destination.values(): + if not event_destination["Enabled"]: + continue + + sns_destination_arn = event_destination.get("SNSDestination") + if not sns_destination_arn: + continue + + payload = SNSPayload( + message_id=message.id, + sender_email=source, + destination_addresses=destinations, + tags=tags, + ) + emitter.emit_send_event(payload, sns_destination_arn) + emitter.emit_delivery_event(payload, sns_destination_arn) + + save_for_retrospection( + SentEmail( + Id=message.id, + Region=context.region, + Source=source or message.source, + RawData=raw_data, + ) + ) + + return SendRawEmailResponse(MessageId=message.id) + + @handler("CloneReceiptRuleSet") + def clone_receipt_rule_set( + self, + context: RequestContext, + rule_set_name: ReceiptRuleSetName, + original_rule_set_name: ReceiptRuleSetName, + **kwargs, + ) -> CloneReceiptRuleSetResponse: + backend = get_ses_backend(context) + + backend.create_receipt_rule_set(rule_set_name) + original_rule_set = backend.describe_receipt_rule_set(original_rule_set_name) + + for rule in original_rule_set: + backend.create_receipt_rule(rule_set_name, rule) + + return CloneReceiptRuleSetResponse() + + +@dataclasses.dataclass(frozen=True) +class SNSPayload: + message_id: str + sender_email: Address + destination_addresses: AddressList + tags: Optional[MessageTagList] + + +class SNSEmitter: + def __init__( + self, + context: RequestContext, + ): + self.context = context + + def emit_create_configuration_set_event_destination_test_message( + self, sns_topic_arn: str + ) -> None: + client = self._client_for_topic(sns_topic_arn) + # topic must exist + try: + client.get_topic_attributes(TopicArn=sns_topic_arn) + except ClientError as exc: + if "NotFound" in exc.response["Error"]["Code"]: + raise InvalidSNSDestinationException(f"SNS topic <{sns_topic_arn}> not found.") + raise + + client.publish( + TopicArn=sns_topic_arn, + Message="Successfully validated SNS topic for Amazon SES event publishing.", + ) + + def emit_send_event( + self, payload: SNSPayload, sns_topic_arn: str, emit_source_arn: bool = True + ): + now = datetime.now(tz=timezone.utc) + + tags = defaultdict(list) + for every in payload.tags or []: + tags[every["Name"]].append(every["Value"]) + + event_payload = { + "eventType": "Send", + "mail": { + "timestamp": now.isoformat(), + "source": payload.sender_email, + "sendingAccountId": self.context.account_id, + "destination": payload.destination_addresses, + "messageId": payload.message_id, + "tags": tags, + }, + "send": {}, + } + + if emit_source_arn: + event_payload["mail"]["sourceArn"] = ( + f"arn:{self.context.partition}:ses:{self.context.region}:{self.context.account_id}:identity/{payload.sender_email}" + ) + + client = self._client_for_topic(sns_topic_arn) + try: + client.publish( + TopicArn=sns_topic_arn, + Message=json.dumps(event_payload), + Subject="Amazon SES Email Event Notification", + ) + except ClientError: + LOGGER.exception("sending SNS message") + + def emit_delivery_event(self, payload: SNSPayload, sns_topic_arn: str): + now = datetime.now(tz=timezone.utc) + + tags = defaultdict(list) + for every in payload.tags or []: + tags[every["Name"]].append(every["Value"]) + + event_payload = { + "eventType": "Delivery", + "mail": { + "timestamp": now.isoformat(), + "source": payload.sender_email, + "sourceArn": f"arn:{self.context.partition}:ses:{self.context.region}:{self.context.account_id}:identity/{payload.sender_email}", + "sendingAccountId": self.context.account_id, + "destination": payload.destination_addresses, + "messageId": payload.message_id, + "tags": tags, + }, + "delivery": { + "recipients": payload.destination_addresses, + "timestamp": now.isoformat(), + }, + } + client = self._client_for_topic(sns_topic_arn) + try: + client.publish( + TopicArn=sns_topic_arn, + Message=json.dumps(event_payload), + Subject="Amazon SES Email Event Notification", + ) + except ClientError: + LOGGER.exception("sending SNS message") + + @staticmethod + def _client_for_topic(topic_arn: str) -> "SNSClient": + arn_parameters = arns.parse_arn(topic_arn) + region = arn_parameters["region"] + access_key_id = arn_parameters["account"] + + return connect_to( + region_name=region, + aws_access_key_id=access_key_id, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + ).sns + + +class InvalidParameterValue(CommonServiceException): + def __init__(self, message=None): + super().__init__( + "InvalidParameterValue", status_code=400, message=message, sender_fault=True + ) diff --git a/localstack/testing/aws/__init__.py b/localstack-core/localstack/services/ses/resource_providers/__init__.py similarity index 100% rename from localstack/testing/aws/__init__.py rename to localstack-core/localstack/services/ses/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.py b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.py new file mode 100644 index 0000000000000..5baeb44cd6a82 --- /dev/null +++ b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.py @@ -0,0 +1,166 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SESEmailIdentityProperties(TypedDict): + EmailIdentity: Optional[str] + ConfigurationSetAttributes: Optional[ConfigurationSetAttributes] + DkimAttributes: Optional[DkimAttributes] + DkimDNSTokenName1: Optional[str] + DkimDNSTokenName2: Optional[str] + DkimDNSTokenName3: Optional[str] + DkimDNSTokenValue1: Optional[str] + DkimDNSTokenValue2: Optional[str] + DkimDNSTokenValue3: Optional[str] + DkimSigningAttributes: Optional[DkimSigningAttributes] + FeedbackAttributes: Optional[FeedbackAttributes] + MailFromAttributes: Optional[MailFromAttributes] + + +class ConfigurationSetAttributes(TypedDict): + ConfigurationSetName: Optional[str] + + +class DkimSigningAttributes(TypedDict): + DomainSigningPrivateKey: Optional[str] + DomainSigningSelector: Optional[str] + NextSigningKeyLength: Optional[str] + + +class DkimAttributes(TypedDict): + SigningEnabled: Optional[bool] + + +class MailFromAttributes(TypedDict): + BehaviorOnMxFailure: Optional[str] + MailFromDomain: Optional[str] + + +class FeedbackAttributes(TypedDict): + EmailForwardingEnabled: Optional[bool] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SESEmailIdentityProvider(ResourceProvider[SESEmailIdentityProperties]): + TYPE = "AWS::SES::EmailIdentity" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SESEmailIdentityProperties], + ) -> ProgressEvent[SESEmailIdentityProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/EmailIdentity + + Required properties: + - EmailIdentity + + Create-only properties: + - /properties/EmailIdentity + + Read-only properties: + - /properties/DkimDNSTokenName1 + - /properties/DkimDNSTokenName2 + - /properties/DkimDNSTokenName3 + - /properties/DkimDNSTokenValue1 + - /properties/DkimDNSTokenValue2 + - /properties/DkimDNSTokenValue3 + + IAM permissions required: + - ses:CreateEmailIdentity + - ses:PutEmailIdentityMailFromAttributes + - ses:PutEmailIdentityFeedbackAttributes + - ses:PutEmailIdentityDkimAttributes + - ses:GetEmailIdentity + + """ + model = request.desired_state + + # TODO: validations + + if not request.custom_context.get(REPEATED_INVOCATION): + # this is the first time this callback is invoked + # TODO: defaults + # TODO: idempotency + # TODO: actually create the resource + request.custom_context[REPEATED_INVOCATION] = True + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + # TODO: check the status of the resource + # - if finished, update the model with all fields and return success event: + # return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model) + # - else + # return ProgressEvent(status=OperationStatus.IN_PROGRESS, resource_model=model) + + raise NotImplementedError + + def read( + self, + request: ResourceRequest[SESEmailIdentityProperties], + ) -> ProgressEvent[SESEmailIdentityProperties]: + """ + Fetch resource information + + IAM permissions required: + - ses:GetEmailIdentity + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[SESEmailIdentityProperties], + ) -> ProgressEvent[SESEmailIdentityProperties]: + response = request.aws_client_factory.ses.list_identities()["Identities"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[SESEmailIdentityProperties(EmailIdentity=every) for every in response], + ) + + def delete( + self, + request: ResourceRequest[SESEmailIdentityProperties], + ) -> ProgressEvent[SESEmailIdentityProperties]: + """ + Delete a resource + + IAM permissions required: + - ses:DeleteEmailIdentity + """ + raise NotImplementedError + + def update( + self, + request: ResourceRequest[SESEmailIdentityProperties], + ) -> ProgressEvent[SESEmailIdentityProperties]: + """ + Update a resource + + IAM permissions required: + - ses:PutEmailIdentityMailFromAttributes + - ses:PutEmailIdentityFeedbackAttributes + - ses:PutEmailIdentityConfigurationSetAttributes + - ses:PutEmailIdentityDkimSigningAttributes + - ses:PutEmailIdentityDkimAttributes + - ses:GetEmailIdentity + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.schema.json b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.schema.json new file mode 100644 index 0000000000000..8d952ff03a1a9 --- /dev/null +++ b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity.schema.json @@ -0,0 +1,173 @@ +{ + "typeName": "AWS::SES::EmailIdentity", + "description": "Resource Type definition for AWS::SES::EmailIdentity", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-ses.git", + "additionalProperties": false, + "properties": { + "EmailIdentity": { + "type": "string", + "description": "The email address or domain to verify." + }, + "ConfigurationSetAttributes": { + "$ref": "#/definitions/ConfigurationSetAttributes" + }, + "DkimSigningAttributes": { + "$ref": "#/definitions/DkimSigningAttributes" + }, + "DkimAttributes": { + "$ref": "#/definitions/DkimAttributes" + }, + "MailFromAttributes": { + "$ref": "#/definitions/MailFromAttributes" + }, + "FeedbackAttributes": { + "$ref": "#/definitions/FeedbackAttributes" + }, + "DkimDNSTokenName1": { + "type": "string" + }, + "DkimDNSTokenName2": { + "type": "string" + }, + "DkimDNSTokenName3": { + "type": "string" + }, + "DkimDNSTokenValue1": { + "type": "string" + }, + "DkimDNSTokenValue2": { + "type": "string" + }, + "DkimDNSTokenValue3": { + "type": "string" + } + }, + "definitions": { + "DkimSigningAttributes": { + "type": "object", + "additionalProperties": false, + "description": "If your request includes this object, Amazon SES configures the identity to use Bring Your Own DKIM (BYODKIM) for DKIM authentication purposes, or, configures the key length to be used for Easy DKIM.", + "properties": { + "DomainSigningSelector": { + "type": "string", + "description": "[Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain." + }, + "DomainSigningPrivateKey": { + "type": "string", + "description": "[Bring Your Own DKIM] A private key that's used to generate a DKIM signature. The private key must use 1024 or 2048-bit RSA encryption, and must be encoded using base64 encoding." + }, + "NextSigningKeyLength": { + "type": "string", + "description": "[Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day.", + "pattern": "RSA_1024_BIT|RSA_2048_BIT" + } + } + }, + "ConfigurationSetAttributes": { + "type": "object", + "additionalProperties": false, + "description": "Used to associate a configuration set with an email identity.", + "properties": { + "ConfigurationSetName": { + "type": "string", + "description": "The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence." + } + } + }, + "DkimAttributes": { + "type": "object", + "additionalProperties": false, + "description": "Used to enable or disable DKIM authentication for an email identity.", + "properties": { + "SigningEnabled": { + "type": "boolean", + "description": "Sets the DKIM signing configuration for the identity. When you set this value true, then the messages that are sent from the identity are signed using DKIM. If you set this value to false, your messages are sent without DKIM signing." + } + } + }, + "MailFromAttributes": { + "type": "object", + "additionalProperties": false, + "description": "Used to enable or disable the custom Mail-From domain configuration for an email identity.", + "properties": { + "MailFromDomain": { + "type": "string", + "description": "The custom MAIL FROM domain that you want the verified identity to use" + }, + "BehaviorOnMxFailure": { + "type": "string", + "description": "The action to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue , the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage , the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.", + "pattern": "USE_DEFAULT_VALUE|REJECT_MESSAGE" + } + } + }, + "FeedbackAttributes": { + "type": "object", + "additionalProperties": false, + "description": "Used to enable or disable feedback forwarding for an identity.", + "properties": { + "EmailForwardingEnabled": { + "type": "boolean", + "description": "If the value is true, you receive email notifications when bounce or complaint events occur" + } + } + } + }, + "required": [ + "EmailIdentity" + ], + "readOnlyProperties": [ + "/properties/DkimDNSTokenName1", + "/properties/DkimDNSTokenName2", + "/properties/DkimDNSTokenName3", + "/properties/DkimDNSTokenValue1", + "/properties/DkimDNSTokenValue2", + "/properties/DkimDNSTokenValue3" + ], + "createOnlyProperties": [ + "/properties/EmailIdentity" + ], + "primaryIdentifier": [ + "/properties/EmailIdentity" + ], + "writeOnlyProperties": [ + "/properties/DkimSigningAttributes/DomainSigningSelector", + "/properties/DkimSigningAttributes/DomainSigningPrivateKey" + ], + "handlers": { + "create": { + "permissions": [ + "ses:CreateEmailIdentity", + "ses:PutEmailIdentityMailFromAttributes", + "ses:PutEmailIdentityFeedbackAttributes", + "ses:PutEmailIdentityDkimAttributes", + "ses:GetEmailIdentity" + ] + }, + "read": { + "permissions": [ + "ses:GetEmailIdentity" + ] + }, + "update": { + "permissions": [ + "ses:PutEmailIdentityMailFromAttributes", + "ses:PutEmailIdentityFeedbackAttributes", + "ses:PutEmailIdentityConfigurationSetAttributes", + "ses:PutEmailIdentityDkimSigningAttributes", + "ses:PutEmailIdentityDkimAttributes", + "ses:GetEmailIdentity" + ] + }, + "delete": { + "permissions": [ + "ses:DeleteEmailIdentity" + ] + }, + "list": { + "permissions": [ + "ses:ListEmailIdentities" + ] + } + } +} diff --git a/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity_plugin.py b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity_plugin.py new file mode 100644 index 0000000000000..ca75f6be6c340 --- /dev/null +++ b/localstack-core/localstack/services/ses/resource_providers/aws_ses_emailidentity_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SESEmailIdentityProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SES::EmailIdentity" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ses.resource_providers.aws_ses_emailidentity import ( + SESEmailIdentityProvider, + ) + + self.factory = SESEmailIdentityProvider diff --git a/localstack/testing/pytest/__init__.py b/localstack-core/localstack/services/sns/__init__.py similarity index 100% rename from localstack/testing/pytest/__init__.py rename to localstack-core/localstack/services/sns/__init__.py diff --git a/localstack-core/localstack/services/sns/analytics.py b/localstack-core/localstack/services/sns/analytics.py new file mode 100644 index 0000000000000..c74ed6ad2b141 --- /dev/null +++ b/localstack-core/localstack/services/sns/analytics.py @@ -0,0 +1,9 @@ +""" +Usage analytics for SNS internal endpoints +""" + +from localstack.utils.analytics.metrics import Counter + +# number of times SNS internal endpoint per resource types +# (e.g. PlatformMessage invoked 10x times, SMSMessage invoked 3x times, SubscriptionToken...) +internal_api_calls = Counter(namespace="sns", name="internal_api_call", labels=["resource_type"]) diff --git a/localstack-core/localstack/services/sns/certificate.py b/localstack-core/localstack/services/sns/certificate.py new file mode 100644 index 0000000000000..8e7ad5fc21803 --- /dev/null +++ b/localstack-core/localstack/services/sns/certificate.py @@ -0,0 +1,54 @@ +from datetime import datetime, timedelta + +from cryptography import x509 +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey + +SNS_SERVER_PRIVATE_KEY: RSAPrivateKey = rsa.generate_private_key( + public_exponent=65537, key_size=2048 +) + +SNS_SERVER_CERT_ISSUER = x509.Name( + [ + x509.NameAttribute(x509.NameOID.COUNTRY_NAME, "CH"), + x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, "LocalStack"), + x509.NameAttribute( + x509.NameOID.COMMON_NAME, "LocalStack TEST SNS Root Certificate Authority" + ), + ] +) + +SNS_SERVER_CERT: str = ( + ( + x509.CertificateBuilder() + .subject_name(SNS_SERVER_CERT_ISSUER) + .issuer_name(SNS_SERVER_CERT_ISSUER) + .public_key(SNS_SERVER_PRIVATE_KEY.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.utcnow()) + .not_valid_after(datetime.utcnow() + timedelta(days=365)) + .add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True) + .add_extension( + x509.KeyUsage( + crl_sign=True, + key_cert_sign=True, + digital_signature=True, + content_commitment=False, + key_encipherment=False, + data_encipherment=False, + key_agreement=False, + encipher_only=False, + decipher_only=False, + ), + critical=True, + ) + .add_extension( + x509.SubjectKeyIdentifier.from_public_key(SNS_SERVER_PRIVATE_KEY.public_key()), + critical=False, + ) + .sign(SNS_SERVER_PRIVATE_KEY, hashes.SHA256()) + ) + .public_bytes(serialization.Encoding.PEM) + .decode("utf-8") +) diff --git a/localstack-core/localstack/services/sns/constants.py b/localstack-core/localstack/services/sns/constants.py new file mode 100644 index 0000000000000..04b5f05293818 --- /dev/null +++ b/localstack-core/localstack/services/sns/constants.py @@ -0,0 +1,41 @@ +import re +from string import ascii_letters, digits + +SNS_PROTOCOLS = [ + "http", + "https", + "email", + "email-json", + "sms", + "sqs", + "application", + "lambda", + "firehose", +] + +VALID_SUBSCRIPTION_ATTR_NAME = [ + "DeliveryPolicy", + "FilterPolicy", + "FilterPolicyScope", + "RawMessageDelivery", + "RedrivePolicy", + "SubscriptionRoleArn", +] + +MSG_ATTR_NAME_REGEX = re.compile(r"^(?!\.)(?!.*\.$)(?!.*\.\.)[a-zA-Z0-9_\-.]+$") +ATTR_TYPE_REGEX = re.compile(r"^(String|Number|Binary)\..+$") +VALID_MSG_ATTR_NAME_CHARS = set(ascii_letters + digits + "." + "-" + "_") + + +GCM_URL = "https://fcm.googleapis.com/fcm/send" + +# Endpoint to access all the PlatformEndpoint sent Messages +PLATFORM_ENDPOINT_MSGS_ENDPOINT = "/_aws/sns/platform-endpoint-messages" +SMS_MSGS_ENDPOINT = "/_aws/sns/sms-messages" +SUBSCRIPTION_TOKENS_ENDPOINT = "/_aws/sns/subscription-tokens" + +# we add hex chars to respect the format of AWS with certificate ID, hardcoded for now +# we could parametrize the certificate ID in the future +SNS_CERT_ENDPOINT = "/_aws/sns/SimpleNotificationService-6c6f63616c737461636b69736e696365.pem" + +DUMMY_SUBSCRIPTION_PRINCIPAL = "arn:{partition}:iam::{account_id}:user/DummySNSPrincipal" diff --git a/localstack-core/localstack/services/sns/executor.py b/localstack-core/localstack/services/sns/executor.py new file mode 100644 index 0000000000000..ce4f8850d6e3e --- /dev/null +++ b/localstack-core/localstack/services/sns/executor.py @@ -0,0 +1,114 @@ +import itertools +import logging +import os +import queue +import threading + +LOG = logging.getLogger(__name__) + + +def _worker(work_queue: queue.Queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is None: + return + work_item.run() + # delete reference to the work item to avoid it being in memory until the next blocking `queue.get` call returns + del work_item + + except Exception: + LOG.exception("Exception in worker") + + +class _WorkItem: + def __init__(self, fn, args, kwargs): + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + try: + self.fn(*self.args, **self.kwargs) + except Exception: + LOG.exception("Unhandled Exception in while running %s", self.fn.__name__) + + +class TopicPartitionedThreadPoolExecutor: + """ + This topic partition the work between workers based on Topics. + It guarantees that each Topic only has one worker assigned, and thus that the tasks will be executed sequentially. + + Loosely based on ThreadPoolExecutor for stdlib, but does not return Future as SNS does not need it (fire&forget) + Could be extended if needed to fit other needs. + + Currently, we do not re-balance between workers if some of them have more load. This could be investigated. + """ + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + + def __init__(self, max_workers: int = None, thread_name_prefix: str = ""): + if max_workers is None: + max_workers = min(32, (os.cpu_count() or 1) + 4) + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + self._thread_name_prefix = ( + thread_name_prefix or f"TopicThreadPoolExecutor-{self._counter()}" + ) + + # for now, the pool isn't fair and is not redistributed depending on load + self._pool = {} + self._shutdown = False + self._lock = threading.Lock() + self._threads = set() + self._work_queues = [] + self._cycle = itertools.cycle(range(max_workers)) + + def _add_worker(self): + work_queue = queue.SimpleQueue() + self._work_queues.append(work_queue) + thread_name = f"{self._thread_name_prefix}_{len(self._threads)}" + t = threading.Thread(name=thread_name, target=_worker, args=(work_queue,)) + t.daemon = True + t.start() + self._threads.add(t) + + def _get_work_queue(self, topic: str) -> queue.SimpleQueue: + if not (work_queue := self._pool.get(topic)): + if len(self._threads) < self._max_workers: + self._add_worker() + + # we cycle through the possible indexes for a work queue, in order to distribute the load across + # once we get to the max amount of worker, the cycle will start back at 0 + index = next(self._cycle) + work_queue = self._work_queues[index] + + # TODO: the pool is not cleaned up at the moment, think about the clean-up interface + self._pool[topic] = work_queue + return work_queue + + def submit(self, fn, topic, /, *args, **kwargs) -> None: + with self._lock: + work_queue = self._get_work_queue(topic) + + if self._shutdown: + raise RuntimeError("cannot schedule new futures after shutdown") + + w = _WorkItem(fn, args, kwargs) + work_queue.put(w) + + def shutdown(self, wait=True): + with self._lock: + self._shutdown = True + + # Send a wake-up to prevent threads calling + # _work_queue.get(block=True) from permanently blocking. + for work_queue in self._work_queues: + work_queue.put(None) + + if wait: + for t in self._threads: + t.join() diff --git a/localstack-core/localstack/services/sns/filter.py b/localstack-core/localstack/services/sns/filter.py new file mode 100644 index 0000000000000..1a61fcab10552 --- /dev/null +++ b/localstack-core/localstack/services/sns/filter.py @@ -0,0 +1,538 @@ +import ipaddress +import json +import typing as t + +from localstack.aws.api.sns import InvalidParameterException + + +class SubscriptionFilter: + def check_filter_policy_on_message_attributes( + self, filter_policy: dict, message_attributes: dict + ): + if not filter_policy: + return True + + flat_policy_conditions = self.flatten_policy(filter_policy) + + return any( + all( + self._evaluate_filter_policy_conditions_on_attribute( + conditions, + message_attributes.get(criteria), + field_exists=criteria in message_attributes, + ) + for criteria, conditions in flat_policy.items() + ) + for flat_policy in flat_policy_conditions + ) + + def check_filter_policy_on_message_body(self, filter_policy: dict, message_body: str): + try: + body = json.loads(message_body) + if not isinstance(body, dict): + return False + except json.JSONDecodeError: + # Filter policies for the message body assume that the message payload is a well-formed JSON object. + # See https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html + return False + + return self._evaluate_nested_filter_policy_on_dict(filter_policy, payload=body) + + def _evaluate_nested_filter_policy_on_dict(self, filter_policy, payload: dict) -> bool: + """ + This method evaluates the filter policy against the JSON decoded payload. + Although it's not documented anywhere, AWS allows `.` in the fields name in the filter policy and the payload, + and will evaluate them. However, it's not JSONPath compatible: + Example: + Policy: `{"field1.field2": "value1"}` + This policy will match both `{"field1.field2": "value1"}` and {"field1: {"field2": "value1"}}`, unlike JSONPath + for which `.` points to a child node. + This might show they are flattening the both dictionaries to a single level for an easier matching without + recursion. + :param filter_policy: a dict, starting at the FilterPolicy + :param payload: a dict, starting at the MessageBody + :return: True if the payload respect the filter policy, otherwise False + """ + if not filter_policy: + return True + + # TODO: maybe save/cache the flattened/expanded policy? + flat_policy_conditions = self.flatten_policy(filter_policy) + flat_payloads = self.flatten_payload(payload, flat_policy_conditions) + + return any( + all( + any( + self._evaluate_condition( + flat_payload.get(key), condition, field_exists=key in flat_payload + ) + for condition in conditions + for flat_payload in flat_payloads + ) + for key, conditions in flat_policy.items() + ) + for flat_policy in flat_policy_conditions + ) + + def _evaluate_filter_policy_conditions_on_attribute( + self, conditions, attribute, field_exists: bool + ): + if not isinstance(conditions, list): + conditions = [conditions] + + tpe = attribute.get("DataType") or attribute.get("Type") if attribute else None + val = attribute.get("StringValue") or attribute.get("Value") if attribute else None + if attribute is not None and tpe == "String.Array": + try: + values = json.loads(val) + except ValueError: + return False + for value in values: + for condition in conditions: + if self._evaluate_condition(value, condition, field_exists): + return True + else: + for condition in conditions: + value = val or None + if self._evaluate_condition(value, condition, field_exists): + return True + + return False + + def _evaluate_condition(self, value, condition, field_exists: bool): + if not isinstance(condition, dict): + return field_exists and value == condition + elif (must_exist := condition.get("exists")) is not None: + # if must_exists is True then field_exists must be True + # if must_exists is False then fields_exists must be False + return must_exist == field_exists + elif value is None: + # the remaining conditions require the value to not be None + return False + elif anything_but := condition.get("anything-but"): + if isinstance(anything_but, dict): + not_prefix = anything_but.get("prefix") + return not value.startswith(not_prefix) + elif isinstance(anything_but, list): + return value not in anything_but + else: + return value != anything_but + elif prefix := condition.get("prefix"): + return value.startswith(prefix) + elif suffix := condition.get("suffix"): + return value.endswith(suffix) + elif equal_ignore_case := condition.get("equals-ignore-case"): + return equal_ignore_case.lower() == value.lower() + elif numeric_condition := condition.get("numeric"): + return self._evaluate_numeric_condition(numeric_condition, value) + elif cidr := condition.get("cidr"): + try: + ip = ipaddress.ip_address(value) + return ip in ipaddress.ip_network(cidr) + except ValueError: + return False + + return False + + @staticmethod + def _evaluate_numeric_condition(conditions, value): + try: + # try if the value is numeric + value = float(value) + except ValueError: + # the value is not numeric, the condition is False + return False + + for i in range(0, len(conditions), 2): + operator = conditions[i] + operand = float(conditions[i + 1]) + + if operator == "=": + if value != operand: + return False + elif operator == ">": + if value <= operand: + return False + elif operator == "<": + if value >= operand: + return False + elif operator == ">=": + if value < operand: + return False + elif operator == "<=": + if value > operand: + return False + + return True + + @staticmethod + def flatten_policy(nested_dict: dict) -> list[dict]: + """ + Takes a dictionary as input and will output the dictionary on a single level. + Input: + `{"field1": {"field2": {"field3": "val1", "field4": "val2"}}}` + Output: + `[ + { + "field1.field2.field3": "val1", + "field1.field2.field4": "val2" + } + ]` + Input with $or will create multiple outputs: + `{"$or": [{"field1": "val1"}, {"field2": "val2"}], "field3": "val3"}` + Output: + `[ + {"field1": "val1", "field3": "val3"}, + {"field2": "val2", "field3": "val3"} + ]` + :param nested_dict: a (nested) dictionary + :return: a list of flattened dictionaries with no nested dict or list inside, flattened to a + single level, one list item for every list item encountered + """ + + def _traverse_policy(obj, array=None, parent_key=None) -> list: + if array is None: + array = [{}] + + for key, values in obj.items(): + if key == "$or" and isinstance(values, list) and len(values) > 1: + # $or will create multiple new branches in the array. + # Each current branch will traverse with each choice in $or + array = [ + i for value in values for i in _traverse_policy(value, array, parent_key) + ] + else: + # We update the parent key do that {"key1": {"key2": ""}} becomes "key1.key2" + _parent_key = f"{parent_key}.{key}" if parent_key else key + if isinstance(values, dict): + # If the current key has child dict -- key: "key1", child: {"key2": ["val1", val2"]} + # We only update the parent_key and traverse its children with the current branches + array = _traverse_policy(values, array, _parent_key) + else: + # If the current key has no child, this means we found the values to match -- child: ["val1", val2"] + # we update the branches with the parent chain and the values -- {"key1.key2": ["val1, val2"]} + array = [{**item, _parent_key: values} for item in array] + + return array + + return _traverse_policy(nested_dict) + + @staticmethod + def flatten_payload(payload: dict, policy_conditions: list[dict]) -> list[dict]: + """ + Takes a dictionary as input and will output the dictionary on a single level. + The dictionary can have lists containing other dictionaries, and one root level entry will be created for every + item in a list if it corresponds to the entries of the policy conditions. + Input: + payload: + `{"field1": { + "field2: [ + {"field3": "val1", "field4": "val2"}, + {"field3": "val3", "field4": "val4"} + } + ]}` + policy_conditions: + `[ + "field1.field2.field3": , + "field1.field2.field4": , + ]` + Output: + `[ + { + "field1.field2.field3": "val1", + "field1.field2.field4": "val2" + }, + { + "field1.field2.field3": "val3", + "field1.field2.field4": "val4" + } + ]` + :param payload: a (nested) dictionary + :return: flatten_dict: a dictionary with no nested dict inside, flattened to a single level + """ + policy_keys = {key for keys in policy_conditions for key in keys} + + def _is_key_in_policy(key: str) -> bool: + return key is None or any(policy_key.startswith(key) for policy_key in policy_keys) + + def _traverse(_object: dict, array=None, parent_key=None) -> list: + if isinstance(_object, dict): + for key, values in _object.items(): + # We update the parent key so that {"key1": {"key2": ""}} becomes "key1.key2" + _parent_key = f"{parent_key}.{key}" if parent_key else key + + # we make sure that we are building only the relevant parts of the payload related to the policy + # the payload could be very complex, and the policy only applies to part of it + if _is_key_in_policy(_parent_key): + array = _traverse(values, array, _parent_key) + + elif isinstance(_object, list): + if not _object: + return array + array = [i for value in _object for i in _traverse(value, array, parent_key)] + else: + array = [{**item, parent_key: _object} for item in array] + + return array + + return _traverse(payload, array=[{}], parent_key=None) + + +class FilterPolicyValidator: + def __init__(self, scope: str, is_subscribe_call: bool): + self.scope = scope + self.error_prefix = ( + "Invalid parameter: Attributes Reason: " if is_subscribe_call else "Invalid parameter: " + ) + + def validate_filter_policy(self, filter_policy: dict[str, t.Any]): + # # A filter policy can have a maximum of five attribute names. For a nested policy, only parent keys are counted. + if len(filter_policy.values()) > 5: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Filter policy can not have more than 5 keys" + ) + + aggregated_rules, combinations = self.aggregate_rules(filter_policy) + # For the complexity of the filter policy, the total combination of values must not exceed 150. + # https://docs.aws.amazon.com/sns/latest/dg/subscription-filter-policy-constraints.html + if combinations > 150: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Filter policy is too complex" + ) + + for rules in aggregated_rules: + for rule in rules: + self._validate_rule(rule) + + def aggregate_rules(self, filter_policy: dict[str, t.Any]) -> tuple[list[list[t.Any]], int]: + """ + This method evaluate the filter policy recursively, and returns only a list of lists of rules. + It also calculates the combinations of rules, calculated depending on the nesting of the rules. + Example: + nested_filter_policy = { + "key_a": { + "key_b": { + "key_c": ["value_one", "value_two", "value_three", "value_four"] + } + }, + "key_d": { + "key_e": ["value_one", "value_two", "value_three"] + } + } + This function then iterates on the values of the top level keys of the filter policy: ("key_a", "key_d") + If the iterated value is not a list, it means it is a nested property. If the scope is `MessageBody`, it is + allowed, we call this method on the value, adding a level to the depth to keep track on how deep the key is. + If the value is a list, it means it contains rules: we will append this list of rules in _rules, and + calculate the combinations it adds. + For the example filter policy containing nested properties, we calculate it this way + The first array has four values in a three-level nested key, and the second has three values in a two-level + nested key. 3 x 4 x 2 x 3 = 72 + The return value would be: + [["value_one", "value_two", "value_three", "value_four"], ["value_one", "value_two", "value_three"]] + It allows us to later iterate of the list of rules in an easy way, to verify its conditions only. + + :param filter_policy: a dict, starting at the FilterPolicy + :return: a tuple with a list of lists of rules and the calculated number of combinations + """ + + def _inner( + policy_elements: dict[str, t.Any], depth: int = 1, combinations: int = 1 + ) -> tuple[list[list[t.Any]], int]: + _rules = [] + for key, _value in policy_elements.items(): + if isinstance(_value, dict): + # From AWS docs: "unlike attribute-based policies, payload-based policies support property nesting." + sub_rules, combinations = _inner( + _value, depth=depth + 1, combinations=combinations + ) + _rules.extend(sub_rules) + elif isinstance(_value, list): + if not _value: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Empty arrays are not allowed" + ) + + current_combination = 0 + if key == "$or": + for val in _value: + sub_rules, or_combinations = _inner( + val, depth=depth, combinations=combinations + ) + _rules.extend(sub_rules) + current_combination += or_combinations + + combinations = current_combination + else: + _rules.append(_value) + combinations = combinations * len(_value) * depth + else: + raise InvalidParameterException( + f'{self.error_prefix}FilterPolicy: "{key}" must be an object or an array' + ) + + if self.scope == "MessageAttributes" and depth > 1: + raise InvalidParameterException( + f"{self.error_prefix}Filter policy scope MessageAttributes does not support nested filter policy" + ) + + return _rules, combinations + + return _inner(filter_policy) + + def _validate_rule(self, rule: t.Any) -> None: + match rule: + case None | str() | bool(): + return + + case int() | float(): + # TODO: AWS says they support only from -10^9 to 10^9 but seems to accept it, so we just return + # if rule <= -1000000000 or rule >= 1000000000: + # raise "" + return + + case {**kwargs}: + if len(kwargs) != 1: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Only one key allowed in match expression" + ) + + operator, value = None, None + for k, v in kwargs.items(): + operator, value = k, v + + if operator in ( + "equals-ignore-case", + "prefix", + "suffix", + ): + if not isinstance(value, str): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: {operator} match pattern must be a string" + ) + return + + elif operator == "anything-but": + # anything-but can actually contain any kind of simple rule (str, number, and list) + if isinstance(value, list): + for v in value: + self._validate_rule(v) + + return + + # or have a nested `prefix` pattern + elif isinstance(value, dict): + for inner_operator in value.keys(): + if inner_operator != "prefix": + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Unsupported anything-but pattern: {inner_operator}" + ) + + self._validate_rule(value) + return + + elif operator == "exists": + if not isinstance(value, bool): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: exists match pattern must be either true or false." + ) + return + + elif operator == "numeric": + self._validate_numeric_condition(value) + + elif operator == "cidr": + self._validate_cidr_condition(value) + + else: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Unrecognized match type {operator}" + ) + + case _: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Match value must be String, number, true, false, or null" + ) + + def _validate_cidr_condition(self, value): + if not isinstance(value, str): + # `cidr` returns the prefix error + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: prefix match pattern must be a string" + ) + splitted = value.split("/") + if len(splitted) != 2: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Malformed CIDR, one '/' required" + ) + ip_addr, mask = value.split("/") + try: + int(mask) + except ValueError: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Malformed CIDR, mask bits must be an integer" + ) + try: + ipaddress.ip_network(value) + except ValueError: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Nonstandard IP address: {ip_addr}" + ) + + def _validate_numeric_condition(self, value): + if not value: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Invalid member in numeric match: ]" + ) + num_values = value[::-1] + + operator = num_values.pop() + if not isinstance(operator, str): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Invalid member in numeric match: {operator}" + ) + elif operator not in ("<", "<=", "=", ">", ">="): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Unrecognized numeric range operator: {operator}" + ) + + value = num_values.pop() if num_values else None + if not isinstance(value, (int, float)): + exc_operator = "equals" if operator == "=" else operator + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Value of {exc_operator} must be numeric" + ) + + if not num_values: + return + + if operator not in (">", ">="): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Too many elements in numeric expression" + ) + + second_operator = num_values.pop() + if not isinstance(second_operator, str): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Bad value in numeric range: {second_operator}" + ) + elif second_operator not in ("<", "<="): + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Bad numeric range operator: {second_operator}" + ) + + second_value = num_values.pop() if num_values else None + if not isinstance(second_value, (int, float)): + exc_operator = "equals" if second_operator == "=" else second_operator + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Value of {exc_operator} must be numeric" + ) + + elif second_value <= value: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Bottom must be less than top" + ) + + elif num_values: + raise InvalidParameterException( + f"{self.error_prefix}FilterPolicy: Too many terms in numeric range expression" + ) diff --git a/localstack-core/localstack/services/sns/models.py b/localstack-core/localstack/services/sns/models.py new file mode 100644 index 0000000000000..00d70586cfa5b --- /dev/null +++ b/localstack-core/localstack/services/sns/models.py @@ -0,0 +1,152 @@ +import itertools +import time +from dataclasses import dataclass, field +from enum import StrEnum +from typing import Dict, List, Literal, Optional, TypedDict, Union + +from localstack.aws.api.sns import ( + MessageAttributeMap, + PublishBatchRequestEntry, + subscriptionARN, + topicARN, +) +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute +from localstack.utils.objects import singleton_factory +from localstack.utils.strings import long_uid + +SnsProtocols = Literal[ + "http", "https", "email", "email-json", "sms", "sqs", "application", "lambda", "firehose" +] + +SnsApplicationPlatforms = Literal[ + "APNS", "APNS_SANDBOX", "ADM", "FCM", "Baidu", "GCM", "MPNS", "WNS" +] + +SnsMessageProtocols = Literal[SnsProtocols, SnsApplicationPlatforms] + + +@singleton_factory +def global_sns_message_sequence(): + # creates a 20-digit number used as the start for the global sequence, adds 100 for it to be different from SQS's + # mostly for testing purpose, both global sequence would be initialized at the same and be identical + start = int(time.time() + 100) << 33 + # itertools.count is thread safe over the GIL since its getAndIncrement operation is a single python bytecode op + return itertools.count(start) + + +def get_next_sequence_number(): + return next(global_sns_message_sequence()) + + +class SnsMessageType(StrEnum): + Notification = "Notification" + SubscriptionConfirmation = "SubscriptionConfirmation" + UnsubscribeConfirmation = "UnsubscribeConfirmation" + + +@dataclass +class SnsMessage: + type: SnsMessageType + message: Union[ + str, Dict + ] # can be Dict if after being JSON decoded for validation if structure is `json` + message_attributes: Optional[MessageAttributeMap] = None + message_structure: Optional[str] = None + subject: Optional[str] = None + message_deduplication_id: Optional[str] = None + message_group_id: Optional[str] = None + token: Optional[str] = None + message_id: str = field(default_factory=long_uid) + is_fifo: Optional[bool] = False + sequencer_number: Optional[str] = None + + def __post_init__(self): + if self.message_attributes is None: + self.message_attributes = {} + if self.is_fifo: + self.sequencer_number = str(get_next_sequence_number()) + + def message_content(self, protocol: SnsMessageProtocols) -> str: + """ + Helper function to retrieve the message content for the right protocol if the StructureMessage is `json` + See https://docs.aws.amazon.com/sns/latest/dg/sns-send-custom-platform-specific-payloads-mobile-devices.html + https://docs.aws.amazon.com/sns/latest/dg/example_sns_Publish_section.html + :param protocol: + :return: message content as string + """ + if self.message_structure == "json": + return self.message.get(protocol, self.message.get("default")) + + return self.message + + @classmethod + def from_batch_entry(cls, entry: PublishBatchRequestEntry, is_fifo=False) -> "SnsMessage": + return cls( + type=SnsMessageType.Notification, + message=entry["Message"], + subject=entry.get("Subject"), + message_structure=entry.get("MessageStructure"), + message_attributes=entry.get("MessageAttributes"), + message_deduplication_id=entry.get("MessageDeduplicationId"), + message_group_id=entry.get("MessageGroupId"), + is_fifo=is_fifo, + ) + + +class SnsSubscription(TypedDict, total=False): + """ + In SNS, Subscription can be represented with only TopicArn, Endpoint, Protocol, SubscriptionArn and Owner, for + example in ListSubscriptions. However, when getting a subscription with GetSubscriptionAttributes, it will return + the Subscription object merged with its own attributes. + This represents this merged object, for internal use and in GetSubscriptionAttributes + https://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html + """ + + TopicArn: topicARN + Endpoint: str + Protocol: SnsProtocols + SubscriptionArn: subscriptionARN + PendingConfirmation: Literal["true", "false"] + Owner: Optional[str] + SubscriptionPrincipal: Optional[str] + FilterPolicy: Optional[str] + FilterPolicyScope: Literal["MessageAttributes", "MessageBody"] + RawMessageDelivery: Literal["true", "false"] + ConfirmationWasAuthenticated: Literal["true", "false"] + SubscriptionRoleArn: Optional[str] + DeliveryPolicy: Optional[str] + + +class SnsStore(BaseStore): + # maps topic ARN to subscriptions ARN + topic_subscriptions: Dict[str, List[str]] = LocalAttribute(default=dict) + + # maps subscription ARN to SnsSubscription + subscriptions: Dict[str, SnsSubscription] = LocalAttribute(default=dict) + + # maps confirmation token to subscription ARN + subscription_tokens: Dict[str, str] = LocalAttribute(default=dict) + + # maps topic ARN to list of tags + sns_tags: Dict[str, List[Dict]] = LocalAttribute(default=dict) + + # cache of topic ARN to platform endpoint messages (used primarily for testing) + platform_endpoint_messages: Dict[str, List[Dict]] = LocalAttribute(default=dict) + + # list of sent SMS messages + sms_messages: List[Dict] = LocalAttribute(default=list) + + # filter policy are stored as JSON string in subscriptions, store the decoded result Dict + subscription_filter_policy: Dict[subscriptionARN, Dict] = LocalAttribute(default=dict) + + def get_topic_subscriptions(self, topic_arn: str) -> List[SnsSubscription]: + topic_subscriptions = self.topic_subscriptions.get(topic_arn, []) + subscriptions = [ + subscription + for subscription_arn in topic_subscriptions + if (subscription := self.subscriptions.get(subscription_arn)) + ] + return subscriptions + + +sns_stores = AccountRegionBundle("sns", SnsStore) diff --git a/localstack-core/localstack/services/sns/provider.py b/localstack-core/localstack/services/sns/provider.py new file mode 100644 index 0000000000000..e5d166ef3c72c --- /dev/null +++ b/localstack-core/localstack/services/sns/provider.py @@ -0,0 +1,1359 @@ +import base64 +import copy +import functools +import json +import logging +from typing import Dict, List +from uuid import uuid4 + +from botocore.utils import InvalidArnException +from moto.core.utils import camelcase_to_pascal, underscores_to_camelcase +from moto.sns import sns_backends +from moto.sns.models import MAXIMUM_MESSAGE_LENGTH, SNSBackend, Topic +from moto.sns.utils import is_e164 + +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api.sns import ( + AmazonResourceName, + BatchEntryIdsNotDistinctException, + ConfirmSubscriptionResponse, + CreateEndpointResponse, + CreatePlatformApplicationResponse, + CreateTopicResponse, + EndpointDisabledException, + GetSubscriptionAttributesResponse, + GetTopicAttributesResponse, + InvalidParameterException, + InvalidParameterValueException, + ListSubscriptionsByTopicResponse, + ListSubscriptionsResponse, + ListTagsForResourceResponse, + MapStringToString, + MessageAttributeMap, + NotFoundException, + PublishBatchRequestEntryList, + PublishBatchResponse, + PublishBatchResultEntry, + PublishResponse, + SnsApi, + String, + SubscribeResponse, + Subscription, + SubscriptionAttributesMap, + TagKeyList, + TagList, + TagResourceResponse, + TooManyEntriesInBatchRequestException, + TopicAttributesMap, + UntagResourceResponse, + attributeName, + attributeValue, + authenticateOnUnsubscribe, + boolean, + messageStructure, + nextToken, + subscriptionARN, + topicARN, + topicName, +) +from localstack.constants import AWS_REGION_US_EAST_1, DEFAULT_AWS_ACCOUNT_ID +from localstack.http import Request, Response, Router, route +from localstack.services.edge import ROUTER +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.sns import constants as sns_constants +from localstack.services.sns.certificate import SNS_SERVER_CERT +from localstack.services.sns.filter import FilterPolicyValidator +from localstack.services.sns.models import ( + SnsMessage, + SnsMessageType, + SnsStore, + SnsSubscription, + sns_stores, +) +from localstack.services.sns.publisher import ( + PublishDispatcher, + SnsBatchPublishContext, + SnsPublishContext, +) +from localstack.utils.aws.arns import ( + ArnData, + extract_account_id_from_arn, + extract_region_from_arn, + get_partition, + parse_arn, +) +from localstack.utils.collections import PaginatedList, select_from_typed_dict +from localstack.utils.strings import short_uid, to_bytes, to_str + +from .analytics import internal_api_calls + +# set up logger +LOG = logging.getLogger(__name__) + + +class SnsProvider(SnsApi, ServiceLifecycleHook): + """ + Provider class for AWS Simple Notification Service. + + AWS supports following operations in a cross-account setup: + - GetTopicAttributes + - SetTopicAttributes + - AddPermission + - RemovePermission + - Publish + - Subscribe + - ListSubscriptionByTopic + - DeleteTopic + """ + + @route(sns_constants.SNS_CERT_ENDPOINT, methods=["GET"]) + def get_signature_cert_pem_file(self, request: Request): + # see http://sns-public-resources.s3.amazonaws.com/SNS_Message_Signing_Release_Note_Jan_25_2011.pdf + # see https://docs.aws.amazon.com/sns/latest/dg/sns-verify-signature-of-message.html + return Response(self._signature_cert_pem, 200) + + def __init__(self) -> None: + super().__init__() + self._publisher = PublishDispatcher() + self._signature_cert_pem: str = SNS_SERVER_CERT + + def on_before_stop(self): + self._publisher.shutdown() + + def on_after_init(self): + # Allow sent platform endpoint messages to be retrieved from the SNS endpoint + register_sns_api_resource(ROUTER) + # add the route to serve the certificate used to validate message signatures + ROUTER.add(self.get_signature_cert_pem_file) + + @staticmethod + def get_store(account_id: str, region_name: str) -> SnsStore: + return sns_stores[account_id][region_name] + + @staticmethod + def get_moto_backend(account_id: str, region_name: str) -> SNSBackend: + return sns_backends[account_id][region_name] + + @staticmethod + def _get_topic(arn: str, context: RequestContext) -> Topic: + """ + :param arn: the Topic ARN + :param context: the RequestContext of the request + :param multiregion: if the request can fetch the topic across regions or not (ex. Publish cannot publish to a + topic in a different region than the request) + :return: the Moto model Topic + """ + arn_data = parse_and_validate_topic_arn(arn) + if context.region != arn_data["region"]: + raise InvalidParameterException("Invalid parameter: TopicArn") + + try: + return sns_backends[arn_data["account"]][context.region].topics[arn] + except KeyError: + raise NotFoundException("Topic does not exist") + + def get_topic_attributes( + self, context: RequestContext, topic_arn: topicARN, **kwargs + ) -> GetTopicAttributesResponse: + # get the Topic from moto manually first, because Moto does not handle well the case where the ARN is malformed + # (raises ValueError: not enough values to unpack (expected 6, got 1)) + moto_topic_model = self._get_topic(topic_arn, context) + moto_response: GetTopicAttributesResponse = call_moto(context) + # TODO: fix some attributes by moto, see snapshot + # DeliveryPolicy + # EffectiveDeliveryPolicy + # Policy.Statement..Action -> SNS:Receive is added by moto but not returned in AWS + # TODO: very hacky way to get the attributes we need instead of a moto patch + # see the attributes we need: https://docs.aws.amazon.com/sns/latest/dg/sns-topic-attributes.html + # would need more work to have the proper format out of moto, maybe extract the model to our store + attributes = moto_response["Attributes"] + for attr in vars(moto_topic_model): + if "_feedback" in attr: + key = camelcase_to_pascal(underscores_to_camelcase(attr)) + attributes[key] = getattr(moto_topic_model, attr) + elif attr == "signature_version": + attributes["SignatureVersion"] = moto_topic_model.signature_version + elif attr == "archive_policy": + attributes["ArchivePolicy"] = moto_topic_model.archive_policy + + return moto_response + + def set_topic_attributes( + self, + context: RequestContext, + topic_arn: topicARN, + attribute_name: attributeName, + attribute_value: attributeValue | None = None, + **kwargs, + ) -> None: + # validate the topic first + self._get_topic(topic_arn, context) + call_moto(context) + + def publish_batch( + self, + context: RequestContext, + topic_arn: topicARN, + publish_batch_request_entries: PublishBatchRequestEntryList, + **kwargs, + ) -> PublishBatchResponse: + if len(publish_batch_request_entries) > 10: + raise TooManyEntriesInBatchRequestException( + "The batch request contains more entries than permissible." + ) + + parsed_arn = parse_and_validate_topic_arn(topic_arn) + store = self.get_store(account_id=parsed_arn["account"], region_name=context.region) + moto_topic = self._get_topic(topic_arn, context) + + ids = [entry["Id"] for entry in publish_batch_request_entries] + if len(set(ids)) != len(publish_batch_request_entries): + raise BatchEntryIdsNotDistinctException( + "Two or more batch entries in the request have the same Id." + ) + + response: PublishBatchResponse = {"Successful": [], "Failed": []} + + # TODO: write AWS validated tests with FilterPolicy and batching + # TODO: find a scenario where we can fail to send a message synchronously to be able to report it + # right now, it seems that AWS fails the whole publish if something is wrong in the format of 1 message + + total_batch_size = 0 + message_contexts = [] + for entry_index, entry in enumerate(publish_batch_request_entries, start=1): + message_payload = entry.get("Message") + message_attributes = entry.get("MessageAttributes", {}) + if message_attributes: + # if a message contains non-valid message attributes + # will fail for the first non-valid message encountered, and raise ParameterValueInvalid + validate_message_attributes(message_attributes, position=entry_index) + + total_batch_size += get_total_publish_size(message_payload, message_attributes) + + # TODO: WRITE AWS VALIDATED + if entry.get("MessageStructure") == "json": + try: + message = json.loads(message_payload) + # Keys in the JSON object that correspond to supported transport protocols must have + # simple JSON string values. + # Non-string values will cause the key to be ignored. + message = { + key: field for key, field in message.items() if isinstance(field, str) + } + if "default" not in message: + raise InvalidParameterException( + "Invalid parameter: Message Structure - No default entry in JSON message body" + ) + entry["Message"] = message # noqa + except json.JSONDecodeError: + raise InvalidParameterException( + "Invalid parameter: Message Structure - JSON message body failed to parse" + ) + + if is_fifo := (".fifo" in topic_arn): + if not all("MessageGroupId" in entry for entry in publish_batch_request_entries): + raise InvalidParameterException( + "Invalid parameter: The MessageGroupId parameter is required for FIFO topics" + ) + if moto_topic.content_based_deduplication == "false": + if not all( + "MessageDeduplicationId" in entry for entry in publish_batch_request_entries + ): + raise InvalidParameterException( + "Invalid parameter: The topic should either have ContentBasedDeduplication enabled or MessageDeduplicationId provided explicitly", + ) + + msg_ctx = SnsMessage.from_batch_entry(entry, is_fifo=is_fifo) + message_contexts.append(msg_ctx) + success = PublishBatchResultEntry( + Id=entry["Id"], + MessageId=msg_ctx.message_id, + ) + if is_fifo: + success["SequenceNumber"] = msg_ctx.sequencer_number + response["Successful"].append(success) + + if total_batch_size > MAXIMUM_MESSAGE_LENGTH: + raise CommonServiceException( + code="BatchRequestTooLong", + message="The length of all the messages put together is more than the limit.", + sender_fault=True, + ) + + publish_ctx = SnsBatchPublishContext( + messages=message_contexts, + store=store, + request_headers=context.request.headers, + topic_attributes=vars(moto_topic), + ) + self._publisher.publish_batch_to_topic(publish_ctx, topic_arn) + + return response + + def set_subscription_attributes( + self, + context: RequestContext, + subscription_arn: subscriptionARN, + attribute_name: attributeName, + attribute_value: attributeValue = None, + **kwargs, + ) -> None: + store = self.get_store(account_id=context.account_id, region_name=context.region) + sub = store.subscriptions.get(subscription_arn) + if not sub: + raise NotFoundException("Subscription does not exist") + + validate_subscription_attribute( + attribute_name=attribute_name, + attribute_value=attribute_value, + topic_arn=sub["TopicArn"], + endpoint=sub["Endpoint"], + ) + if attribute_name == "RawMessageDelivery": + attribute_value = attribute_value.lower() + + elif attribute_name == "FilterPolicy": + filter_policy = json.loads(attribute_value) if attribute_value else None + if filter_policy: + validator = FilterPolicyValidator( + scope=sub.get("FilterPolicyScope", "MessageAttributes"), + is_subscribe_call=False, + ) + validator.validate_filter_policy(filter_policy) + + store.subscription_filter_policy[subscription_arn] = filter_policy + + sub[attribute_name] = attribute_value + + def confirm_subscription( + self, + context: RequestContext, + topic_arn: topicARN, + token: String, + authenticate_on_unsubscribe: authenticateOnUnsubscribe = None, + **kwargs, + ) -> ConfirmSubscriptionResponse: + # TODO: validate format on the token (seems to be 288 hex chars) + # this request can come from any http client, it might not be signed (we would need to implement + # `authenticate_on_unsubscribe` to force a signing client to do this request. + # so, the region and account_id might not be in the request. Use the ones from the topic_arn + try: + parsed_arn = parse_arn(topic_arn) + except InvalidArnException: + raise InvalidParameterException("Invalid parameter: Topic") + + store = self.get_store(account_id=parsed_arn["account"], region_name=parsed_arn["region"]) + + # it seems SNS is able to know what the region of the topic should be, even though a wrong topic is accepted + if parsed_arn["region"] != get_region_from_subscription_token(token): + raise InvalidParameterException("Invalid parameter: Topic") + + subscription_arn = store.subscription_tokens.get(token) + if not subscription_arn: + raise InvalidParameterException("Invalid parameter: Token") + + subscription = store.subscriptions.get(subscription_arn) + if not subscription: + # subscription could have been deleted in the meantime + raise InvalidParameterException("Invalid parameter: Token") + + # ConfirmSubscription is idempotent + if subscription.get("PendingConfirmation") == "false": + return ConfirmSubscriptionResponse(SubscriptionArn=subscription_arn) + + subscription["PendingConfirmation"] = "false" + subscription["ConfirmationWasAuthenticated"] = "true" + + return ConfirmSubscriptionResponse(SubscriptionArn=subscription_arn) + + def untag_resource( + self, + context: RequestContext, + resource_arn: AmazonResourceName, + tag_keys: TagKeyList, + **kwargs, + ) -> UntagResourceResponse: + call_moto(context) + # TODO: probably get the account_id and region from the `resource_arn` + store = self.get_store(context.account_id, context.region) + existing_tags = store.sns_tags.setdefault(resource_arn, []) + store.sns_tags[resource_arn] = [t for t in existing_tags if t["Key"] not in tag_keys] + return UntagResourceResponse() + + def list_tags_for_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs + ) -> ListTagsForResourceResponse: + # TODO: probably get the account_id and region from the `resource_arn` + store = self.get_store(context.account_id, context.region) + tags = store.sns_tags.setdefault(resource_arn, []) + return ListTagsForResourceResponse(Tags=tags) + + def create_platform_application( + self, + context: RequestContext, + name: String, + platform: String, + attributes: MapStringToString, + **kwargs, + ) -> CreatePlatformApplicationResponse: + # TODO: validate platform + # see https://docs.aws.amazon.com/cli/latest/reference/sns/create-platform-application.html + # list of possible values: ADM, Baidu, APNS, APNS_SANDBOX, GCM, MPNS, WNS + # each platform has a specific way to handle credentials + # this can also be used for dispatching message to the right platform + return call_moto(context) + + def create_platform_endpoint( + self, + context: RequestContext, + platform_application_arn: String, + token: String, + custom_user_data: String = None, + attributes: MapStringToString = None, + **kwargs, + ) -> CreateEndpointResponse: + # TODO: support mobile app events + # see https://docs.aws.amazon.com/sns/latest/dg/application-event-notifications.html + return call_moto(context) + + def unsubscribe( + self, context: RequestContext, subscription_arn: subscriptionARN, **kwargs + ) -> None: + count = len(subscription_arn.split(":")) + try: + parsed_arn = parse_arn(subscription_arn) + except InvalidArnException: + # TODO: check for invalid SubscriptionGUID + raise InvalidParameterException( + f"Invalid parameter: SubscriptionArn Reason: An ARN must have at least 6 elements, not {count}" + ) + + account_id = parsed_arn["account"] + region_name = parsed_arn["region"] + + store = self.get_store(account_id=account_id, region_name=region_name) + if count == 6 and subscription_arn not in store.subscriptions: + raise InvalidParameterException("Invalid parameter: SubscriptionId") + + moto_sns_backend = self.get_moto_backend(account_id, region_name) + moto_sns_backend.unsubscribe(subscription_arn) + + # pop the subscription at the end, to avoid race condition by iterating over the topic subscriptions + subscription = store.subscriptions.get(subscription_arn) + + if not subscription: + # unsubscribe is idempotent, so unsubscribing from a non-existing topic does nothing + return + + if subscription["Protocol"] in ["http", "https"]: + # TODO: actually validate this (re)subscribe behaviour somehow (localhost.run?) + # we might need to save the sub token in the store + # TODO: AWS only sends the UnsubscribeConfirmation if the call is unauthenticated or the requester is not + # the owner + subscription_token = encode_subscription_token_with_region(region=context.region) + message_ctx = SnsMessage( + type=SnsMessageType.UnsubscribeConfirmation, + token=subscription_token, + message=f"You have chosen to deactivate subscription {subscription_arn}.\nTo cancel this operation and restore the subscription, visit the SubscribeURL included in this message.", + ) + moto_topic = moto_sns_backend.topics.get(subscription["TopicArn"]) + publish_ctx = SnsPublishContext( + message=message_ctx, + store=store, + request_headers=context.request.headers, + topic_attributes=vars(moto_topic), + ) + self._publisher.publish_to_topic_subscriber( + publish_ctx, + topic_arn=subscription["TopicArn"], + subscription_arn=subscription_arn, + ) + + store.topic_subscriptions[subscription["TopicArn"]].remove(subscription_arn) + store.subscription_filter_policy.pop(subscription_arn, None) + store.subscriptions.pop(subscription_arn, None) + + def get_subscription_attributes( + self, context: RequestContext, subscription_arn: subscriptionARN, **kwargs + ) -> GetSubscriptionAttributesResponse: + store = self.get_store(account_id=context.account_id, region_name=context.region) + sub = store.subscriptions.get(subscription_arn) + if not sub: + raise NotFoundException("Subscription does not exist") + removed_attrs = ["sqs_queue_url"] + if "FilterPolicyScope" in sub and not sub.get("FilterPolicy"): + removed_attrs.append("FilterPolicyScope") + removed_attrs.append("FilterPolicy") + elif "FilterPolicy" in sub and "FilterPolicyScope" not in sub: + sub["FilterPolicyScope"] = "MessageAttributes" + + attributes = {k: v for k, v in sub.items() if k not in removed_attrs} + return GetSubscriptionAttributesResponse(Attributes=attributes) + + def list_subscriptions( + self, context: RequestContext, next_token: nextToken = None, **kwargs + ) -> ListSubscriptionsResponse: + store = self.get_store(context.account_id, context.region) + subscriptions = [ + select_from_typed_dict(Subscription, sub) for sub in list(store.subscriptions.values()) + ] + paginated_subscriptions = PaginatedList(subscriptions) + page, next_token = paginated_subscriptions.get_page( + token_generator=lambda x: get_next_page_token_from_arn(x["SubscriptionArn"]), + page_size=100, + next_token=next_token, + ) + + response = ListSubscriptionsResponse(Subscriptions=page) + if next_token: + response["NextToken"] = next_token + return response + + def list_subscriptions_by_topic( + self, context: RequestContext, topic_arn: topicARN, next_token: nextToken = None, **kwargs + ) -> ListSubscriptionsByTopicResponse: + self._get_topic(topic_arn, context) + parsed_topic_arn = parse_and_validate_topic_arn(topic_arn) + store = self.get_store(parsed_topic_arn["account"], parsed_topic_arn["region"]) + sns_subscriptions = store.get_topic_subscriptions(topic_arn) + subscriptions = [select_from_typed_dict(Subscription, sub) for sub in sns_subscriptions] + + paginated_subscriptions = PaginatedList(subscriptions) + page, next_token = paginated_subscriptions.get_page( + token_generator=lambda x: get_next_page_token_from_arn(x["SubscriptionArn"]), + page_size=100, + next_token=next_token, + ) + + response = ListSubscriptionsResponse(Subscriptions=page) + if next_token: + response["NextToken"] = next_token + return response + + def publish( + self, + context: RequestContext, + message: String, + topic_arn: topicARN = None, + target_arn: String = None, + phone_number: String = None, + subject: String = None, + message_structure: messageStructure = None, + message_attributes: MessageAttributeMap = None, + message_deduplication_id: String = None, + message_group_id: String = None, + **kwargs, + ) -> PublishResponse: + if subject == "": + raise InvalidParameterException("Invalid parameter: Subject") + if not message or all(not m for m in message): + raise InvalidParameterException("Invalid parameter: Empty message") + + # TODO: check for topic + target + phone number at the same time? + # TODO: more validation on phone, it might be opted out? + if phone_number and not is_e164(phone_number): + raise InvalidParameterException( + f"Invalid parameter: PhoneNumber Reason: {phone_number} is not valid to publish to" + ) + + if message_attributes: + validate_message_attributes(message_attributes) + + if get_total_publish_size(message, message_attributes) > MAXIMUM_MESSAGE_LENGTH: + raise InvalidParameterException("Invalid parameter: Message too long") + + # for compatibility reasons, AWS allows users to use either TargetArn or TopicArn for publishing to a topic + # use any of them for topic validation + topic_or_target_arn = topic_arn or target_arn + topic_model = None + + if is_fifo := (topic_or_target_arn and ".fifo" in topic_or_target_arn): + if not message_group_id: + raise InvalidParameterException( + "Invalid parameter: The MessageGroupId parameter is required for FIFO topics", + ) + topic_model = self._get_topic(topic_or_target_arn, context) + if topic_model.content_based_deduplication == "false": + if not message_deduplication_id: + raise InvalidParameterException( + "Invalid parameter: The topic should either have ContentBasedDeduplication enabled or MessageDeduplicationId provided explicitly", + ) + elif message_deduplication_id: + # this is the first one to raise if both are set while the topic is not fifo + raise InvalidParameterException( + "Invalid parameter: MessageDeduplicationId Reason: The request includes MessageDeduplicationId parameter that is not valid for this topic type" + ) + elif message_group_id: + raise InvalidParameterException( + "Invalid parameter: MessageGroupId Reason: The request includes MessageGroupId parameter that is not valid for this topic type" + ) + is_endpoint_publish = target_arn and ":endpoint/" in target_arn + if message_structure == "json": + try: + message = json.loads(message) + # Keys in the JSON object that correspond to supported transport protocols must have + # simple JSON string values. + # Non-string values will cause the key to be ignored. + message = {key: field for key, field in message.items() if isinstance(field, str)} + # TODO: check no default key for direct TargetArn endpoint publish, need credentials + # see example: https://docs.aws.amazon.com/sns/latest/dg/sns-send-custom-platform-specific-payloads-mobile-devices.html + if "default" not in message and not is_endpoint_publish: + raise InvalidParameterException( + "Invalid parameter: Message Structure - No default entry in JSON message body" + ) + except json.JSONDecodeError: + raise InvalidParameterException( + "Invalid parameter: Message Structure - JSON message body failed to parse" + ) + + if not phone_number: + # use the account to get the store from the TopicArn (you can only publish in the same region as the topic) + parsed_arn = parse_and_validate_topic_arn(topic_or_target_arn) + store = self.get_store(account_id=parsed_arn["account"], region_name=context.region) + moto_sns_backend = self.get_moto_backend(parsed_arn["account"], context.region) + if is_endpoint_publish: + if not (platform_endpoint := moto_sns_backend.platform_endpoints.get(target_arn)): + raise InvalidParameterException( + "Invalid parameter: TargetArn Reason: No endpoint found for the target arn specified" + ) + elif not platform_endpoint.enabled: + raise EndpointDisabledException("Endpoint is disabled") + else: + topic_model = self._get_topic(topic_or_target_arn, context) + else: + # use the store from the request context + store = self.get_store(account_id=context.account_id, region_name=context.region) + + message_ctx = SnsMessage( + type=SnsMessageType.Notification, + message=message, + message_attributes=message_attributes, + message_deduplication_id=message_deduplication_id, + message_group_id=message_group_id, + message_structure=message_structure, + subject=subject, + is_fifo=is_fifo, + ) + publish_ctx = SnsPublishContext( + message=message_ctx, store=store, request_headers=context.request.headers + ) + + if is_endpoint_publish: + self._publisher.publish_to_application_endpoint( + ctx=publish_ctx, endpoint_arn=target_arn + ) + elif phone_number: + self._publisher.publish_to_phone_number(ctx=publish_ctx, phone_number=phone_number) + else: + # beware if the subscription is FIFO, the order might not be guaranteed. + # 2 quick call to this method in succession might not be executed in order in the executor? + # TODO: test how this behaves in a FIFO context with a lot of threads. + publish_ctx.topic_attributes |= vars(topic_model) + self._publisher.publish_to_topic(publish_ctx, topic_or_target_arn) + + if is_fifo: + return PublishResponse( + MessageId=message_ctx.message_id, SequenceNumber=message_ctx.sequencer_number + ) + + return PublishResponse(MessageId=message_ctx.message_id) + + def subscribe( + self, + context: RequestContext, + topic_arn: topicARN, + protocol: String, + endpoint: String = None, + attributes: SubscriptionAttributesMap = None, + return_subscription_arn: boolean = None, + **kwargs, + ) -> SubscribeResponse: + # TODO: check validation ordering + parsed_topic_arn = parse_and_validate_topic_arn(topic_arn) + if context.region != parsed_topic_arn["region"]: + raise InvalidParameterException("Invalid parameter: TopicArn") + + store = self.get_store(account_id=parsed_topic_arn["account"], region_name=context.region) + + if topic_arn not in store.topic_subscriptions: + raise NotFoundException("Topic does not exist") + + if not endpoint: + # TODO: check AWS behaviour (because endpoint is optional) + raise NotFoundException("Endpoint not specified in subscription") + if protocol not in sns_constants.SNS_PROTOCOLS: + raise InvalidParameterException( + f"Invalid parameter: Amazon SNS does not support this protocol string: {protocol}" + ) + elif protocol in ["http", "https"] and not endpoint.startswith(f"{protocol}://"): + raise InvalidParameterException( + "Invalid parameter: Endpoint must match the specified protocol" + ) + elif protocol == "sms" and not is_e164(endpoint): + raise InvalidParameterException(f"Invalid SMS endpoint: {endpoint}") + + elif protocol == "sqs": + try: + parse_arn(endpoint) + except InvalidArnException: + raise InvalidParameterException("Invalid parameter: SQS endpoint ARN") + + elif protocol == "application": + # TODO: this is taken from moto, validate it + moto_backend = self.get_moto_backend( + account_id=parsed_topic_arn["account"], region_name=context.region + ) + if endpoint not in moto_backend.platform_endpoints: + raise NotFoundException("Endpoint does not exist") + + if ".fifo" in endpoint and ".fifo" not in topic_arn: + raise InvalidParameterException( + "Invalid parameter: Invalid parameter: Endpoint Reason: FIFO SQS Queues can not be subscribed to standard SNS topics" + ) + + sub_attributes = copy.deepcopy(attributes) if attributes else None + if sub_attributes: + for attr_name, attr_value in sub_attributes.items(): + validate_subscription_attribute( + attribute_name=attr_name, + attribute_value=attr_value, + topic_arn=topic_arn, + endpoint=endpoint, + is_subscribe_call=True, + ) + if raw_msg_delivery := sub_attributes.get("RawMessageDelivery"): + sub_attributes["RawMessageDelivery"] = raw_msg_delivery.lower() + + # An endpoint may only be subscribed to a topic once. Subsequent + # subscribe calls do nothing (subscribe is idempotent), except if its attributes are different. + for existing_topic_subscription in store.topic_subscriptions.get(topic_arn, []): + sub = store.subscriptions.get(existing_topic_subscription, {}) + if sub.get("Endpoint") == endpoint: + if sub_attributes: + # validate the subscription attributes aren't different + for attr in sns_constants.VALID_SUBSCRIPTION_ATTR_NAME: + # if a new attribute is present and different from an existent one, raise + if (new_attr := sub_attributes.get(attr)) and sub.get(attr) != new_attr: + raise InvalidParameterException( + "Invalid parameter: Attributes Reason: Subscription already exists with different attributes" + ) + + return SubscribeResponse(SubscriptionArn=sub["SubscriptionArn"]) + + principal = sns_constants.DUMMY_SUBSCRIPTION_PRINCIPAL.format( + partition=get_partition(context.region), account_id=context.account_id + ) + subscription_arn = create_subscription_arn(topic_arn) + subscription = SnsSubscription( + # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html + TopicArn=topic_arn, + Endpoint=endpoint, + Protocol=protocol, + SubscriptionArn=subscription_arn, + PendingConfirmation="true", + Owner=context.account_id, + RawMessageDelivery="false", # default value, will be overridden if set + FilterPolicyScope="MessageAttributes", # default value, will be overridden if set + SubscriptionPrincipal=principal, # dummy value, could be fetched with a call to STS? + ) + if sub_attributes: + subscription.update(sub_attributes) + if "FilterPolicy" in sub_attributes: + filter_policy = ( + json.loads(sub_attributes["FilterPolicy"]) + if sub_attributes["FilterPolicy"] + else None + ) + if filter_policy: + validator = FilterPolicyValidator( + scope=subscription.get("FilterPolicyScope", "MessageAttributes"), + is_subscribe_call=True, + ) + validator.validate_filter_policy(filter_policy) + + store.subscription_filter_policy[subscription_arn] = filter_policy + + store.subscriptions[subscription_arn] = subscription + + topic_subscriptions = store.topic_subscriptions.setdefault(topic_arn, []) + topic_subscriptions.append(subscription_arn) + + # store the token and subscription arn + # TODO: the token is a 288 hex char string + subscription_token = encode_subscription_token_with_region(region=context.region) + store.subscription_tokens[subscription_token] = subscription_arn + + response_subscription_arn = subscription_arn + # Send out confirmation message for HTTP(S), fix for https://github.com/localstack/localstack/issues/881 + if protocol in ["http", "https"]: + message_ctx = SnsMessage( + type=SnsMessageType.SubscriptionConfirmation, + token=subscription_token, + message=f"You have chosen to subscribe to the topic {topic_arn}.\nTo confirm the subscription, visit the SubscribeURL included in this message.", + ) + publish_ctx = SnsPublishContext( + message=message_ctx, + store=store, + request_headers=context.request.headers, + topic_attributes=vars(self._get_topic(topic_arn, context)), + ) + self._publisher.publish_to_topic_subscriber( + ctx=publish_ctx, + topic_arn=topic_arn, + subscription_arn=subscription_arn, + ) + if not return_subscription_arn: + response_subscription_arn = "pending confirmation" + + elif protocol not in ["email", "email-json"]: + # Only HTTP(S) and email subscriptions are not auto validated + # Except if the endpoint and the topic are not in the same AWS account, then you'd need to manually confirm + # the subscription with the token + # TODO: revisit for multi-account + # TODO: test with AWS for email & email-json confirmation message + # we need to add the following check: + # if parsed_topic_arn["account"] == endpoint account (depending on the type, SQS, lambda, parse the arn) + subscription["PendingConfirmation"] = "false" + subscription["ConfirmationWasAuthenticated"] = "true" + + return SubscribeResponse(SubscriptionArn=response_subscription_arn) + + def tag_resource( + self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs + ) -> TagResourceResponse: + # each tag key must be unique + # https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-best-practices + unique_tag_keys = {tag["Key"] for tag in tags} + if len(unique_tag_keys) < len(tags): + raise InvalidParameterException("Invalid parameter: Duplicated keys are not allowed.") + + call_moto(context) + store = self.get_store(context.account_id, context.region) + existing_tags = store.sns_tags.get(resource_arn, []) + + def existing_tag_index(_item): + for idx, tag in enumerate(existing_tags): + if _item["Key"] == tag["Key"]: + return idx + return None + + for item in tags: + existing_index = existing_tag_index(item) + if existing_index is None: + existing_tags.append(item) + else: + existing_tags[existing_index] = item + + store.sns_tags[resource_arn] = existing_tags + return TagResourceResponse() + + def delete_topic(self, context: RequestContext, topic_arn: topicARN, **kwargs) -> None: + parsed_arn = parse_and_validate_topic_arn(topic_arn) + if context.region != parsed_arn["region"]: + raise InvalidParameterException("Invalid parameter: TopicArn") + + call_moto(context) + store = self.get_store(account_id=parsed_arn["account"], region_name=context.region) + topic_subscriptions = store.topic_subscriptions.pop(topic_arn, []) + for topic_sub in topic_subscriptions: + store.subscriptions.pop(topic_sub, None) + + store.sns_tags.pop(topic_arn, None) + + def create_topic( + self, + context: RequestContext, + name: topicName, + attributes: TopicAttributesMap = None, + tags: TagList = None, + data_protection_policy: attributeValue = None, + **kwargs, + ) -> CreateTopicResponse: + moto_response = call_moto(context) + store = self.get_store(account_id=context.account_id, region_name=context.region) + topic_arn = moto_response["TopicArn"] + tag_resource_success = extract_tags(topic_arn, tags, True, store) + if not tag_resource_success: + raise InvalidParameterException( + "Invalid parameter: Tags Reason: Topic already exists with different tags" + ) + if tags: + self.tag_resource(context=context, resource_arn=topic_arn, tags=tags) + store.topic_subscriptions.setdefault(topic_arn, []) + return CreateTopicResponse(TopicArn=topic_arn) + + +def is_raw_message_delivery(susbcriber): + return susbcriber.get("RawMessageDelivery") in ("true", True, "True") + + +def validate_subscription_attribute( + attribute_name: str, + attribute_value: str, + topic_arn: str, + endpoint: str, + is_subscribe_call: bool = False, +) -> None: + """ + Validate the subscription attribute to be set. See: + https://docs.aws.amazon.com/sns/latest/api/API_SetSubscriptionAttributes.html + :param attribute_name: the subscription attribute name, must be in VALID_SUBSCRIPTION_ATTR_NAME + :param attribute_value: the subscription attribute value + :param topic_arn: the topic_arn of the subscription, needed to know if it is FIFO + :param endpoint: the subscription endpoint (like an SQS queue ARN) + :param is_subscribe_call: the error message is different if called from Subscribe or SetSubscriptionAttributes + :raises InvalidParameterException + :return: + """ + error_prefix = ( + "Invalid parameter: Attributes Reason: " if is_subscribe_call else "Invalid parameter: " + ) + if attribute_name not in sns_constants.VALID_SUBSCRIPTION_ATTR_NAME: + raise InvalidParameterException(f"{error_prefix}AttributeName") + + if attribute_name == "FilterPolicy": + try: + json.loads(attribute_value or "{}") + except json.JSONDecodeError: + raise InvalidParameterException(f"{error_prefix}FilterPolicy: failed to parse JSON.") + elif attribute_name == "FilterPolicyScope": + if attribute_value not in ("MessageAttributes", "MessageBody"): + raise InvalidParameterException( + f"{error_prefix}FilterPolicyScope: Invalid value [{attribute_value}]. " + f"Please use either MessageBody or MessageAttributes" + ) + elif attribute_name == "RawMessageDelivery": + # TODO: only for SQS and https(s) subs, + firehose + if attribute_value.lower() not in ("true", "false"): + raise InvalidParameterException( + f"{error_prefix}RawMessageDelivery: Invalid value [{attribute_value}]. " + f"Must be true or false." + ) + + elif attribute_name == "RedrivePolicy": + try: + dlq_target_arn = json.loads(attribute_value).get("deadLetterTargetArn", "") + except json.JSONDecodeError: + raise InvalidParameterException(f"{error_prefix}RedrivePolicy: failed to parse JSON.") + try: + parsed_arn = parse_arn(dlq_target_arn) + except InvalidArnException: + raise InvalidParameterException( + f"{error_prefix}RedrivePolicy: deadLetterTargetArn is an invalid arn" + ) + + if topic_arn.endswith(".fifo"): + if endpoint.endswith(".fifo") and ( + not parsed_arn["resource"].endswith(".fifo") or "sqs" not in parsed_arn["service"] + ): + raise InvalidParameterException( + f"{error_prefix}RedrivePolicy: must use a FIFO queue as DLQ for a FIFO Subscription to a FIFO Topic." + ) + + +def validate_message_attributes( + message_attributes: MessageAttributeMap, position: int | None = None +) -> None: + """ + Validate the message attributes, and raises an exception if those do not follow AWS validation + See: https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html + Regex from: https://stackoverflow.com/questions/40718851/regex-that-does-not-allow-consecutive-dots + :param message_attributes: the message attributes map for the message + :param position: given to give the Batch Entry position if coming from `publishBatch` + :raises: InvalidParameterValueException + :return: None + """ + for attr_name, attr in message_attributes.items(): + if len(attr_name) > 256: + raise InvalidParameterValueException( + "Length of message attribute name must be less than 256 bytes." + ) + validate_message_attribute_name(attr_name) + # `DataType` is a required field for MessageAttributeValue + if (data_type := attr.get("DataType")) is None: + if position: + at = f"publishBatchRequestEntries.{position}.member.messageAttributes.{attr_name}.member.dataType" + else: + at = f"messageAttributes.{attr_name}.member.dataType" + + raise CommonServiceException( + code="ValidationError", + message=f"1 validation error detected: Value null at '{at}' failed to satisfy constraint: Member must not be null", + sender_fault=True, + ) + + if data_type not in ( + "String", + "Number", + "Binary", + ) and not sns_constants.ATTR_TYPE_REGEX.match(data_type): + raise InvalidParameterValueException( + f"The message attribute '{attr_name}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String." + ) + if not any(attr_value.endswith("Value") for attr_value in attr): + raise InvalidParameterValueException( + f"The message attribute '{attr_name}' must contain non-empty message attribute value for message attribute type '{data_type}'." + ) + + value_key_data_type = "Binary" if data_type.startswith("Binary") else "String" + value_key = f"{value_key_data_type}Value" + if value_key not in attr: + raise InvalidParameterValueException( + f"The message attribute '{attr_name}' with type '{data_type}' must use field '{value_key_data_type}'." + ) + elif not attr[value_key]: + raise InvalidParameterValueException( + f"The message attribute '{attr_name}' must contain non-empty message attribute value for message attribute type '{data_type}'.", + ) + + +def validate_message_attribute_name(name: str) -> None: + """ + Validate the message attribute name with the specification of AWS. + The message attribute name can contain the following characters: A-Z, a-z, 0-9, underscore(_), hyphen(-), and period (.). The name must not start or end with a period, and it should not have successive periods. + :param name: message attribute name + :raises InvalidParameterValueException: if the name does not conform to the spec + """ + if not sns_constants.MSG_ATTR_NAME_REGEX.match(name): + # find the proper exception + if name[0] == ".": + raise InvalidParameterValueException( + "Invalid message attribute name starting with character '.' was found." + ) + elif name[-1] == ".": + raise InvalidParameterValueException( + "Invalid message attribute name ending with character '.' was found." + ) + + for idx, char in enumerate(name): + if char not in sns_constants.VALID_MSG_ATTR_NAME_CHARS: + # change prefix from 0x to #x, without capitalizing the x + hex_char = "#x" + hex(ord(char)).upper()[2:] + raise InvalidParameterValueException( + f"Invalid non-alphanumeric character '{hex_char}' was found in the message attribute name. Can only include alphanumeric characters, hyphens, underscores, or dots." + ) + # even if we go negative index, it will be covered by starting/ending with dot + if char == "." and name[idx - 1] == ".": + raise InvalidParameterValueException( + "Message attribute name can not have successive '.' character." + ) + + +def extract_tags( + topic_arn: str, tags: TagList, is_create_topic_request: bool, store: SnsStore +) -> bool: + existing_tags = list(store.sns_tags.get(topic_arn, [])) + # if this is none there is nothing to check + if topic_arn in store.topic_subscriptions: + if tags is None: + tags = [] + for tag in tags: + # this means topic already created with empty tags and when we try to create it + # again with other tag value then it should fail according to aws documentation. + if is_create_topic_request and existing_tags is not None and tag not in existing_tags: + return False + return True + + +def parse_and_validate_topic_arn(topic_arn: str | None) -> ArnData: + topic_arn = topic_arn or "" + try: + return parse_arn(topic_arn) + except InvalidArnException: + count = len(topic_arn.split(":")) + raise InvalidParameterException( + f"Invalid parameter: TopicArn Reason: An ARN must have at least 6 elements, not {count}" + ) + + +def create_subscription_arn(topic_arn: str) -> str: + # This is the format of a Subscription ARN + # arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f + return f"{topic_arn}:{uuid4()}" + + +def encode_subscription_token_with_region(region: str) -> str: + """ + Create a 64 characters Subscription Token with the region encoded + :param region: + :return: a subscription token with the region encoded + """ + return ((region.encode() + b"/").hex() + short_uid() * 8)[:64] + + +def get_region_from_subscription_token(token: str) -> str: + """ + Try to decode and return the region from a subscription token + :param token: + :return: the region if able to decode it + :raises: InvalidParameterException if the token is invalid + """ + try: + region = token.split("2f", maxsplit=1)[0] + return bytes.fromhex(region).decode("utf-8") + except (IndexError, ValueError, TypeError, UnicodeDecodeError): + raise InvalidParameterException("Invalid parameter: Token") + + +def get_next_page_token_from_arn(resource_arn: str) -> str: + return to_str(base64.b64encode(to_bytes(resource_arn))) + + +def _get_byte_size(payload: str | bytes) -> int: + # Calculate the real length of the byte object if the object is a string + return len(to_bytes(payload)) + + +def get_total_publish_size( + message_body: str, message_attributes: MessageAttributeMap | None +) -> int: + size = _get_byte_size(message_body) + if message_attributes: + # https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html + # All parts of the message attribute, including name, type, and value, are included in the message size + # restriction, which is 256 KB. + # iterate over the Keys and Attributes, adding the length of the Key to the length of all Attributes values + # (DataType and StringValue or BinaryValue) + size += sum( + _get_byte_size(key) + sum(_get_byte_size(attr_value) for attr_value in attr.values()) + for key, attr in message_attributes.items() + ) + + return size + + +def register_sns_api_resource(router: Router): + """Register the retrospection endpoints as internal LocalStack endpoints.""" + router.add(SNSServicePlatformEndpointMessagesApiResource()) + router.add(SNSServiceSMSMessagesApiResource()) + router.add(SNSServiceSubscriptionTokenApiResource()) + + +def _format_messages(sent_messages: List[Dict[str, str]], validated_keys: List[str]): + """ + This method format the messages to be more readable and undo the format change that was needed for Moto + Should be removed once we refactor SNS. + """ + formatted_messages = [] + for sent_message in sent_messages: + msg = { + key: json.dumps(value) + if key == "Message" and sent_message.get("MessageStructure") == "json" + else value + for key, value in sent_message.items() + if key in validated_keys + } + formatted_messages.append(msg) + + return formatted_messages + + +class SNSInternalResource: + resource_type: str + """Base class with helper to properly track usage of internal endpoints""" + + def count_usage(self): + internal_api_calls.labels(resource_type=self.resource_type).increment() + + +def count_usage(f): + @functools.wraps(f) + def _wrapper(self, *args, **kwargs): + self.count_usage() + return f(self, *args, **kwargs) + + return _wrapper + + +class SNSServicePlatformEndpointMessagesApiResource(SNSInternalResource): + resource_type = "platform-endpoint-message" + """Provides a REST API for retrospective access to platform endpoint messages sent via SNS. + + This is registered as a LocalStack internal HTTP resource. + + This endpoint accepts: + - GET param `accountId`: selector for AWS account. If not specified, return fallback `000000000000` test ID + - GET param `region`: selector for AWS `region`. If not specified, return default "us-east-1" + - GET param `endpointArn`: filter for `endpointArn` resource in SNS + - DELETE param `accountId`: selector for AWS account + - DELETE param `region`: will delete saved messages for `region` + - DELETE param `endpointArn`: will delete saved messages for `endpointArn` + """ + + _PAYLOAD_FIELDS = [ + "TargetArn", + "TopicArn", + "Message", + "MessageAttributes", + "MessageStructure", + "Subject", + "MessageId", + ] + + @route(sns_constants.PLATFORM_ENDPOINT_MSGS_ENDPOINT, methods=["GET"]) + @count_usage + def on_get(self, request: Request): + filter_endpoint_arn = request.args.get("endpointArn") + account_id = ( + request.args.get("accountId", DEFAULT_AWS_ACCOUNT_ID) + if not filter_endpoint_arn + else extract_account_id_from_arn(filter_endpoint_arn) + ) + region = ( + request.args.get("region", AWS_REGION_US_EAST_1) + if not filter_endpoint_arn + else extract_region_from_arn(filter_endpoint_arn) + ) + store: SnsStore = sns_stores[account_id][region] + if filter_endpoint_arn: + messages = store.platform_endpoint_messages.get(filter_endpoint_arn, []) + messages = _format_messages(messages, self._PAYLOAD_FIELDS) + return { + "platform_endpoint_messages": {filter_endpoint_arn: messages}, + "region": region, + } + + platform_endpoint_messages = { + endpoint_arn: _format_messages(messages, self._PAYLOAD_FIELDS) + for endpoint_arn, messages in store.platform_endpoint_messages.items() + } + return { + "platform_endpoint_messages": platform_endpoint_messages, + "region": region, + } + + @route(sns_constants.PLATFORM_ENDPOINT_MSGS_ENDPOINT, methods=["DELETE"]) + @count_usage + def on_delete(self, request: Request) -> Response: + filter_endpoint_arn = request.args.get("endpointArn") + account_id = ( + request.args.get("accountId", DEFAULT_AWS_ACCOUNT_ID) + if not filter_endpoint_arn + else extract_account_id_from_arn(filter_endpoint_arn) + ) + region = ( + request.args.get("region", AWS_REGION_US_EAST_1) + if not filter_endpoint_arn + else extract_region_from_arn(filter_endpoint_arn) + ) + store: SnsStore = sns_stores[account_id][region] + if filter_endpoint_arn: + store.platform_endpoint_messages.pop(filter_endpoint_arn, None) + return Response("", status=204) + + store.platform_endpoint_messages.clear() + return Response("", status=204) + + +class SNSServiceSMSMessagesApiResource(SNSInternalResource): + resource_type = "sms-message" + """Provides a REST API for retrospective access to SMS messages sent via SNS. + + This is registered as a LocalStack internal HTTP resource. + + This endpoint accepts: + - GET param `accountId`: selector for AWS account. If not specified, return fallback `000000000000` test ID + - GET param `region`: selector for AWS `region`. If not specified, return default "us-east-1" + - GET param `phoneNumber`: filter for `phoneNumber` resource in SNS + """ + + _PAYLOAD_FIELDS = [ + "PhoneNumber", + "TopicArn", + "SubscriptionArn", + "MessageId", + "Message", + "MessageAttributes", + "MessageStructure", + "Subject", + ] + + @route(sns_constants.SMS_MSGS_ENDPOINT, methods=["GET"]) + @count_usage + def on_get(self, request: Request): + account_id = request.args.get("accountId", DEFAULT_AWS_ACCOUNT_ID) + region = request.args.get("region", AWS_REGION_US_EAST_1) + filter_phone_number = request.args.get("phoneNumber") + store: SnsStore = sns_stores[account_id][region] + if filter_phone_number: + messages = [ + m for m in store.sms_messages if m.get("PhoneNumber") == filter_phone_number + ] + messages = _format_messages(messages, self._PAYLOAD_FIELDS) + return { + "sms_messages": {filter_phone_number: messages}, + "region": region, + } + + sms_messages = {} + + for m in _format_messages(store.sms_messages, self._PAYLOAD_FIELDS): + sms_messages.setdefault(m.get("PhoneNumber"), []).append(m) + + return { + "sms_messages": sms_messages, + "region": region, + } + + @route(sns_constants.SMS_MSGS_ENDPOINT, methods=["DELETE"]) + @count_usage + def on_delete(self, request: Request) -> Response: + account_id = request.args.get("accountId", DEFAULT_AWS_ACCOUNT_ID) + region = request.args.get("region", AWS_REGION_US_EAST_1) + filter_phone_number = request.args.get("phoneNumber") + store: SnsStore = sns_stores[account_id][region] + if filter_phone_number: + store.sms_messages = [ + m for m in store.sms_messages if m.get("PhoneNumber") != filter_phone_number + ] + return Response("", status=204) + + store.sms_messages.clear() + return Response("", status=204) + + +class SNSServiceSubscriptionTokenApiResource(SNSInternalResource): + resource_type = "subscription-token" + """Provides a REST API for retrospective access to Subscription Confirmation Tokens to confirm subscriptions. + Those are not sent for email, and sometimes inaccessible when working with external HTTPS endpoint which won't be + able to reach your local host. + + This is registered as a LocalStack internal HTTP resource. + + This endpoint has the following parameter: + - GET `subscription_arn`: `subscriptionArn`resource in SNS for which you want the SubscriptionToken + """ + + @route(f"{sns_constants.SUBSCRIPTION_TOKENS_ENDPOINT}/", methods=["GET"]) + @count_usage + def on_get(self, _request: Request, subscription_arn: str): + try: + parsed_arn = parse_arn(subscription_arn) + except InvalidArnException: + response = Response("", 400) + response.set_json( + { + "error": "The provided SubscriptionARN is invalid", + "subscription_arn": subscription_arn, + } + ) + return response + + store: SnsStore = sns_stores[parsed_arn["account"]][parsed_arn["region"]] + + for token, sub_arn in store.subscription_tokens.items(): + if sub_arn == subscription_arn: + return { + "subscription_token": token, + "subscription_arn": subscription_arn, + } + + response = Response("", 404) + response.set_json( + { + "error": "The provided SubscriptionARN is not found", + "subscription_arn": subscription_arn, + } + ) + return response diff --git a/localstack-core/localstack/services/sns/publisher.py b/localstack-core/localstack/services/sns/publisher.py new file mode 100644 index 0000000000000..9510885f51431 --- /dev/null +++ b/localstack-core/localstack/services/sns/publisher.py @@ -0,0 +1,1357 @@ +import abc +import base64 +import copy +import datetime +import hashlib +import json +import logging +import time +import traceback +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass, field +from typing import Dict, List, Tuple, Union + +import requests +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import padding + +from localstack import config +from localstack.aws.api.lambda_ import InvocationType +from localstack.aws.api.sns import MessageAttributeMap +from localstack.aws.connect import connect_to +from localstack.config import external_service_url +from localstack.services.sns import constants as sns_constants +from localstack.services.sns.certificate import SNS_SERVER_PRIVATE_KEY +from localstack.services.sns.executor import TopicPartitionedThreadPoolExecutor +from localstack.services.sns.filter import SubscriptionFilter +from localstack.services.sns.models import ( + SnsApplicationPlatforms, + SnsMessage, + SnsMessageType, + SnsStore, + SnsSubscription, +) +from localstack.utils.aws.arns import ( + PARTITION_NAMES, + extract_account_id_from_arn, + extract_region_from_arn, + extract_resource_from_arn, + parse_arn, + sqs_queue_url_for_arn, +) +from localstack.utils.aws.aws_responses import create_sqs_system_attributes +from localstack.utils.aws.client_types import ServicePrincipal +from localstack.utils.aws.dead_letter_queue import sns_error_to_dead_letter_queue +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs +from localstack.utils.objects import not_none_or +from localstack.utils.strings import long_uid, md5, to_bytes, to_str +from localstack.utils.time import timestamp_millis + +LOG = logging.getLogger(__name__) + + +@dataclass +class SnsPublishContext: + message: SnsMessage + store: SnsStore + request_headers: dict[str, str] + topic_attributes: dict[str, str] = field(default_factory=dict) + + +@dataclass +class SnsBatchPublishContext: + messages: List[SnsMessage] + store: SnsStore + request_headers: Dict[str, str] + topic_attributes: dict[str, str] = field(default_factory=dict) + + +class TopicPublisher(abc.ABC): + """ + The TopicPublisher is responsible for publishing SNS messages to a topic's subscription. + This is the base class implementing the basic logic. + Each subclass will need to implement `_publish` using the subscription's protocol logic and client. + Subclasses can override `prepare_message` if the format of the message is different. + """ + + def publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + """ + This function wraps the underlying call to the actual publishing. This allows us to catch any uncaught + exception and log it properly. This method is passed to the ThreadPoolExecutor, which would swallow the + exception. This is a convenient way of doing it, but not something the abstract class should take care. + Discussion here: https://github.com/localstack/localstack/pull/7267#discussion_r1056873437 + # TODO: move this out of the base class + :param context: the SnsPublishContext created by the caller, containing the necessary data to publish the + message + :param subscriber: the subscription data + :return: + """ + try: + self._publish(context=context, subscriber=subscriber) + except Exception: + LOG.exception( + "An internal error occurred while trying to send the SNS message %s", + context.message, + ) + return + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + """ + Base method for publishing the message. It is up to the child class to implement its way to publish the message + :param context: the SnsPublishContext created by the caller, containing the necessary data to publish the + message + :param subscriber: the subscription data + :return: + """ + raise NotImplementedError + + def prepare_message( + self, + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, + ) -> str: + """ + Returns the message formatted in the base SNS message format. The base SNS message format is shared amongst + SQS, HTTP(S), email-json and Firehose. + See https://docs.aws.amazon.com/sns/latest/dg/sns-sqs-as-subscriber.html + :param message_context: the SnsMessage containing the message data + :param subscriber: the SNS subscription + :param topic_attributes: the SNS Topic attributes + :return: formatted SNS message body in a JSON string + """ + return create_sns_message_body(message_context, subscriber, topic_attributes) + + +class EndpointPublisher(abc.ABC): + """ + The EndpointPublisher is responsible for publishing SNS messages directly to an endpoint. + SNS allows directly publishing to phone numbers and application endpoints. + This is the base class implementing the basic logic. + Each subclass will need to implement `_publish` and `prepare_message `using the subscription's protocol logic + and client. + """ + + def publish(self, context: SnsPublishContext, endpoint: str): + """ + This function wraps the underlying call to the actual publishing. This allows us to catch any uncaught + exception and log it properly. This method is passed to the ThreadPoolExecutor, which would swallow the + exception. This is a convenient way of doing it, but not something the abstract class should take care. + Discussion here: https://github.com/localstack/localstack/pull/7267#discussion_r1056873437 + # TODO: move this out of the base class + :param context: the SnsPublishContext created by the caller, containing the necessary data to publish the + message + :param endpoint: the endpoint where the message should be published + :return: + """ + try: + self._publish(context=context, endpoint=endpoint) + except Exception: + LOG.exception( + "An internal error occurred while trying to send the SNS message %s", + context.message, + ) + return + + def _publish(self, context: SnsPublishContext, endpoint: str): + """ + Base method for publishing the message. It is up to the child class to implement its way to publish the message + :param context: the SnsPublishContext created by the caller, containing the necessary data to publish the + message + :param endpoint: the endpoint where the message should be published + :return: + """ + raise NotImplementedError + + def prepare_message(self, message_context: SnsMessage, endpoint: str) -> str: + """ + Base method to format the message. It is up to the child class to implement it. + :param message_context: the SnsMessage containing the message data + :param endpoint: the endpoint where the message should be published + :return: the formatted message + """ + raise NotImplementedError + + +class LambdaTopicPublisher(TopicPublisher): + """ + The Lambda publisher is responsible for invoking a subscribed lambda function to process the SNS message using + `Lambda.invoke` with the formatted message as Payload. + See: https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + try: + region = extract_region_from_arn(subscriber["Endpoint"]) + lambda_client = connect_to(region_name=region).lambda_.request_metadata( + source_arn=subscriber["TopicArn"], service_principal="sns" + ) + event = self.prepare_message(context.message, subscriber, context.topic_attributes) + inv_result = lambda_client.invoke( + FunctionName=subscriber["Endpoint"], + Payload=to_bytes(event), + InvocationType=InvocationType.Event, + ) + status_code = inv_result.get("StatusCode") + payload = inv_result.get("Payload") + if payload: + delivery = { + "statusCode": status_code, + "providerResponse": json.dumps( + {"lambdaRequestId": inv_result["ResponseMetadata"]["RequestId"]} + ), + } + store_delivery_log( + context.message, + subscriber, + success=True, + topic_attributes=context.topic_attributes, + delivery=delivery, + ) + + except Exception as exc: + LOG.info( + "Unable to run Lambda function on SNS message: %s %s", exc, traceback.format_exc() + ) + store_delivery_log( + context.message, + subscriber, + success=False, + topic_attributes=context.topic_attributes, + ) + message_body = create_sns_message_body( + message_context=context.message, + subscriber=subscriber, + topic_attributes=context.topic_attributes, + ) + sns_error_to_dead_letter_queue(subscriber, message_body, str(exc)) + + def prepare_message( + self, + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, + ) -> str: + """ + You can see Lambda SNS Event format here: https://docs.aws.amazon.com/lambda/latest/dg/with-sns.html + :param message_context: the SnsMessage containing the message data + :param subscriber: the SNS subscription + :return: an SNS message body formatted as a lambda Event in a JSON string + """ + external_url = get_cert_base_url() + unsubscribe_url = create_unsubscribe_url(external_url, subscriber["SubscriptionArn"]) + message_attributes = prepare_message_attributes(message_context.message_attributes) + + event_payload = { + "Type": message_context.type or SnsMessageType.Notification, + "MessageId": message_context.message_id, + "Subject": message_context.subject, + "TopicArn": subscriber["TopicArn"], + "Message": message_context.message_content(subscriber["Protocol"]), + "Timestamp": timestamp_millis(), + "UnsubscribeUrl": unsubscribe_url, + "MessageAttributes": message_attributes, + } + + signature_version = ( + topic_attributes.get("signature_version", "1") if topic_attributes else "1" + ) + canonical_string = compute_canonical_string(event_payload, message_context.type) + signature = get_message_signature(canonical_string, signature_version=signature_version) + + event_payload.update( + { + # this is a bug on AWS side, it is always returned a 1, but it should be actual version of the topic + "SignatureVersion": "1", + "Signature": signature, + "SigningCertUrl": f"{external_url}{sns_constants.SNS_CERT_ENDPOINT}", + } + ) + event = { + "Records": [ + { + "EventSource": "aws:sns", + "EventVersion": "1.0", + "EventSubscriptionArn": subscriber["SubscriptionArn"], + "Sns": event_payload, + } + ] + } + return json.dumps(event) + + +class SqsTopicPublisher(TopicPublisher): + """ + The SQS publisher is responsible for publishing the SNS message to a subscribed SQS queue using `SQS.send_message`. + For integrations and the format of message, see: + https://docs.aws.amazon.com/sns/latest/dg/sns-sqs-as-subscriber.html + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + message_context = context.message + try: + message_body = self.prepare_message( + message_context, subscriber, topic_attributes=context.topic_attributes + ) + kwargs = self.get_sqs_kwargs(msg_context=message_context, subscriber=subscriber) + except Exception: + LOG.exception("An internal error occurred while trying to format the message for SQS") + return + try: + queue_url: str = sqs_queue_url_for_arn(subscriber["Endpoint"]) + region = extract_region_from_arn(subscriber["Endpoint"]) + sqs_client = connect_to(region_name=region).sqs.request_metadata( + source_arn=subscriber["TopicArn"], service_principal="sns" + ) + sqs_client.send_message( + QueueUrl=queue_url, + MessageBody=message_body, + MessageSystemAttributes=create_sqs_system_attributes(context.request_headers), + **kwargs, + ) + store_delivery_log( + message_context, subscriber, success=True, topic_attributes=context.topic_attributes + ) + except Exception as exc: + LOG.info("Unable to forward SNS message to SQS: %s %s", exc, traceback.format_exc()) + store_delivery_log( + message_context, + subscriber, + success=False, + topic_attributes=context.topic_attributes, + ) + sns_error_to_dead_letter_queue(subscriber, message_body, str(exc), **kwargs) + if "NonExistentQueue" in str(exc): + LOG.debug("The SQS queue endpoint does not exist anymore") + # todo: if the queue got deleted, even if we recreate a queue with the same name/url + # AWS won't send to it anymore. Would need to unsub/resub. + # We should mark this subscription as "broken" + + @staticmethod + def get_sqs_kwargs(msg_context: SnsMessage, subscriber: SnsSubscription): + kwargs = {} + if is_raw_message_delivery(subscriber) and msg_context.message_attributes: + kwargs["MessageAttributes"] = msg_context.message_attributes + + # SNS now allows regular non-fifo subscriptions to FIFO topics. Validate that the subscription target is fifo + # before passing the FIFO-only parameters + if subscriber["Endpoint"].endswith(".fifo"): + if msg_context.message_group_id: + kwargs["MessageGroupId"] = msg_context.message_group_id + if msg_context.message_deduplication_id: + kwargs["MessageDeduplicationId"] = msg_context.message_deduplication_id + elif subscriber["TopicArn"].endswith(".fifo"): + # Amazon SNS uses the message body provided to generate a unique hash value to use as the deduplication + # ID for each message, so you don't need to set a deduplication ID when you send each message. + # https://docs.aws.amazon.com/sns/latest/dg/fifo-message-dedup.html + content = msg_context.message_content("sqs") + kwargs["MessageDeduplicationId"] = hashlib.sha256( + content.encode("utf-8") + ).hexdigest() + + # TODO: for message deduplication, we are using the underlying features of the SQS queue + # however, SQS queue only deduplicate at the Queue level, where the SNS topic deduplicate on the topic level + # we will need to implement this + return kwargs + + +class SqsBatchTopicPublisher(SqsTopicPublisher): + """ + The SQS Batch publisher is responsible for publishing batched SNS messages to a subscribed SQS queue using + `SQS.send_message_batch`. This allows to make use of SQS batching capabilities. + See https://docs.aws.amazon.com/sns/latest/dg/sns-batch-api-actions.html + https://docs.aws.amazon.com/sns/latest/api/API_PublishBatch.html + https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html + """ + + def _publish(self, context: SnsBatchPublishContext, subscriber: SnsSubscription): + entries = [] + sqs_system_attrs = create_sqs_system_attributes(context.request_headers) + # TODO: check ID, SNS rules are not the same as SQS, so maybe generate the entries ID + failure_map = {} + for index, message_ctx in enumerate(context.messages): + message_body = self.prepare_message( + message_ctx, subscriber, topic_attributes=context.topic_attributes + ) + sqs_kwargs = self.get_sqs_kwargs(message_ctx, subscriber) + entry = {"Id": f"sns-batch-{index}", "MessageBody": message_body, **sqs_kwargs} + # in case of failure + failure_map[entry["Id"]] = { + "context": message_ctx, + "entry": entry, + } + + if sqs_system_attrs: + entry["MessageSystemAttributes"] = sqs_system_attrs + + entries.append(entry) + + try: + queue_url = sqs_queue_url_for_arn(subscriber["Endpoint"]) + + account_id = extract_account_id_from_arn(subscriber["Endpoint"]) + region = extract_region_from_arn(subscriber["Endpoint"]) + + sqs_client = connect_to( + aws_access_key_id=account_id, region_name=region + ).sqs.request_metadata(source_arn=subscriber["TopicArn"], service_principal="sns") + response = sqs_client.send_message_batch(QueueUrl=queue_url, Entries=entries) + + for message_ctx in context.messages: + store_delivery_log( + message_ctx, subscriber, success=True, topic_attributes=context.topic_attributes + ) + + if failed_messages := response.get("Failed"): + for failed_msg in failed_messages: + failure_data = failure_map.get(failed_msg["Id"]) + LOG.info( + "Unable to forward SNS message to SQS: %s %s", + failed_msg["Code"], + failed_msg["Message"], + ) + store_delivery_log( + failure_data["context"], + subscriber, + success=False, + topic_attributes=context.topic_attributes, + ) + kwargs = {} + if msg_attrs := failure_data["entry"].get("MessageAttributes"): + kwargs["MessageAttributes"] = msg_attrs + + if msg_group_id := failure_data["context"].get("MessageGroupId"): + kwargs["MessageGroupId"] = msg_group_id + + if msg_dedup_id := failure_data["context"].get("MessageDeduplicationId"): + kwargs["MessageDeduplicationId"] = msg_dedup_id + + sns_error_to_dead_letter_queue( + sns_subscriber=subscriber, + message=failure_data["entry"]["MessageBody"], + error=failed_msg["Code"], + **kwargs, + ) + + except Exception as exc: + LOG.info("Unable to forward SNS message to SQS: %s %s", exc, traceback.format_exc()) + for message_ctx in context.messages: + store_delivery_log( + message_ctx, + subscriber, + success=False, + topic_attributes=context.topic_attributes, + ) + msg_body = self.prepare_message( + message_ctx, subscriber, topic_attributes=context.topic_attributes + ) + kwargs = self.get_sqs_kwargs(message_ctx, subscriber) + + sns_error_to_dead_letter_queue( + subscriber, + msg_body, + str(exc), + **kwargs, + ) + if "NonExistentQueue" in str(exc): + LOG.debug("The SQS queue endpoint does not exist anymore") + # todo: if the queue got deleted, even if we recreate a queue with the same name/url + # AWS won't send to it anymore. Would need to unsub/resub. + # We should mark this subscription as "broken" + + +class HttpTopicPublisher(TopicPublisher): + """ + The HTTP(S) publisher is responsible for publishing the SNS message to an external HTTP(S) endpoint which subscribed + to the topic. It will create an HTTP POST request to be sent to the endpoint. + See https://docs.aws.amazon.com/sns/latest/dg/sns-http-https-endpoint-as-subscriber.html + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + message_context = context.message + message_body = self.prepare_message( + message_context, subscriber, topic_attributes=context.topic_attributes + ) + try: + message_headers = { + "Content-Type": "text/plain; charset=UTF-8", + "Accept-Encoding": "gzip,deflate", + "User-Agent": "Amazon Simple Notification Service Agent", + # AWS headers according to + # https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header + "x-amz-sns-message-type": message_context.type, + "x-amz-sns-message-id": message_context.message_id, + "x-amz-sns-topic-arn": subscriber["TopicArn"], + } + if message_context.type != SnsMessageType.SubscriptionConfirmation: + # while testing, never had those from AWS but the docs above states it should be there + message_headers["x-amz-sns-subscription-arn"] = subscriber["SubscriptionArn"] + + # When raw message delivery is enabled, x-amz-sns-rawdelivery needs to be set to 'true' + # indicating that the message has been published without JSON formatting. + # https://docs.aws.amazon.com/sns/latest/dg/sns-large-payload-raw-message-delivery.html + if message_context.type == SnsMessageType.Notification: + if is_raw_message_delivery(subscriber): + message_headers["x-amz-sns-rawdelivery"] = "true" + if content_type := self._get_content_type(subscriber, context.topic_attributes): + message_headers["Content-Type"] = content_type + + response = requests.post( + subscriber["Endpoint"], + headers=message_headers, + data=message_body, + verify=False, + ) + + delivery = { + "statusCode": response.status_code, + "providerResponse": response.content.decode("utf-8"), + } + store_delivery_log( + message_context, + subscriber, + success=True, + delivery=delivery, + topic_attributes=context.topic_attributes, + ) + + response.raise_for_status() + except Exception as exc: + LOG.info( + "Received error on sending SNS message, putting to DLQ (if configured): %s", exc + ) + store_delivery_log( + message_context, + subscriber, + success=False, + topic_attributes=context.topic_attributes, + ) + # AWS doesn't send to the DLQ if there's an error trying to deliver a UnsubscribeConfirmation msg + if message_context.type != SnsMessageType.UnsubscribeConfirmation: + sns_error_to_dead_letter_queue(subscriber, message_body, str(exc)) + + @staticmethod + def _get_content_type(subscriber: SnsSubscription, topic_attributes: dict) -> str | None: + # TODO: we need to load the DeliveryPolicy every time if there's one, we should probably save the loaded + # policy on the subscription and dumps it when requested instead + # to be much faster, once the logic is implemented in moto, we would only need to fetch EffectiveDeliveryPolicy, + # which would already have the value from the topic + if json_sub_delivery_policy := subscriber.get("DeliveryPolicy"): + sub_delivery_policy = json.loads(json_sub_delivery_policy) + if sub_content_type := sub_delivery_policy.get("requestPolicy", {}).get( + "headerContentType" + ): + return sub_content_type + + if json_topic_delivery_policy := topic_attributes.get("delivery_policy"): + topic_delivery_policy = json.loads(json_topic_delivery_policy) + if not ( + topic_content_type := topic_delivery_policy.get(subscriber["Protocol"].lower()) + ): + return + if content_type := topic_content_type.get("defaultRequestPolicy", {}).get( + "headerContentType" + ): + return content_type + + +class EmailJsonTopicPublisher(TopicPublisher): + """ + The email-json publisher is responsible for publishing the SNS message to a subscribed email address. + The format of the message will be JSON-encoded, and "is meant for applications to programmatically process emails". + There is not a lot of AWS documentation on SNS emails. + See https://docs.aws.amazon.com/sns/latest/dg/sns-email-notifications.html + But it is mentioned several times in the SNS FAQ (especially in #Transports section): + https://aws.amazon.com/sns/faqs/ + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + account_id = extract_account_id_from_arn(subscriber["Endpoint"]) + region = extract_region_from_arn(subscriber["Endpoint"]) + ses_client = connect_to(aws_access_key_id=account_id, region_name=region).ses + if endpoint := subscriber.get("Endpoint"): + # TODO: legacy value, replace by a more sane value in the future + # no-reply@sns-localstack.cloud or similar + sender = config.SNS_SES_SENDER_ADDRESS or "admin@localstack.com" + ses_client.verify_email_address(EmailAddress=endpoint) + ses_client.verify_email_address(EmailAddress=sender) + message_body = self.prepare_message( + context.message, subscriber, topic_attributes=context.topic_attributes + ) + ses_client.send_email( + Source=sender, + Message={ + "Body": {"Text": {"Data": message_body}}, + "Subject": {"Data": "SNS-Subscriber-Endpoint"}, + }, + Destination={"ToAddresses": [endpoint]}, + ) + + +class EmailTopicPublisher(EmailJsonTopicPublisher): + """ + The email publisher is responsible for publishing the SNS message to a subscribed email address. + The format of the message will be text-based, and "is meant for end-users/consumers and notifications are regular, + text-based messages which are easily readable." + See https://docs.aws.amazon.com/sns/latest/dg/sns-email-notifications.html + """ + + def prepare_message( + self, + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, + ) -> str: + return message_context.message_content(subscriber["Protocol"]) + + +class ApplicationTopicPublisher(TopicPublisher): + """ + The application publisher is responsible for publishing the SNS message to a subscribed SNS application endpoint. + The SNS application endpoint represents a mobile app and device. + The application endpoint can be of different types, represented in `SnsApplicationPlatforms`. + This is not directly implemented yet in LocalStack, we save the message to be retrieved later from an internal + endpoint. + The `LEGACY_SNS_GCM_PUBLISHING` flag allows direct publishing to the GCM platform, with some caveats: + - It always publishes if the platform is GCM, and raises an exception if the credentials are wrong. + - the Platform Application should be validated before and not while publishing + See https://docs.aws.amazon.com/sns/latest/dg/sns-mobile-application-as-subscriber.html + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + endpoint_arn = subscriber["Endpoint"] + message = self.prepare_message( + context.message, subscriber, topic_attributes=context.topic_attributes + ) + cache = context.store.platform_endpoint_messages.setdefault(endpoint_arn, []) + cache.append(message) + + if ( + config.LEGACY_SNS_GCM_PUBLISHING + and get_platform_type_from_endpoint_arn(endpoint_arn) == "GCM" + ): + self._legacy_publish_to_gcm(context, endpoint_arn) + + # TODO: rewrite the platform application publishing logic + # will need to validate credentials when creating platform app earlier, need thorough testing + + store_delivery_log( + context.message, subscriber, success=True, topic_attributes=context.topic_attributes + ) + + def prepare_message( + self, + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, + ) -> dict[str, str]: + endpoint_arn = subscriber["Endpoint"] + platform_type = get_platform_type_from_endpoint_arn(endpoint_arn) + return { + "TargetArn": endpoint_arn, + "TopicArn": subscriber["TopicArn"], + "SubscriptionArn": subscriber["SubscriptionArn"], + "Message": message_context.message_content(protocol=platform_type), + "MessageAttributes": message_context.message_attributes, + "MessageStructure": message_context.message_structure, + "Subject": message_context.subject, + } + + @staticmethod + def _legacy_publish_to_gcm(context: SnsPublishContext, endpoint: str): + application_attributes, endpoint_attributes = get_attributes_for_application_endpoint( + endpoint + ) + send_message_to_gcm( + context=context, + app_attributes=application_attributes, + endpoint_attributes=endpoint_attributes, + ) + + +class SmsTopicPublisher(TopicPublisher): + """ + The SMS publisher is responsible for publishing the SNS message to a subscribed phone number. + This is not directly implemented yet in LocalStack, we only save the message. + # TODO: create an internal endpoint to retrieve SMS. + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + event = self.prepare_message( + context.message, subscriber, topic_attributes=context.topic_attributes + ) + context.store.sms_messages.append(event) + LOG.info( + "Delivering SMS message to %s: %s from topic: %s", + event["PhoneNumber"], + event["Message"], + event["TopicArn"], + ) + + # MOCK DATA + delivery = { + "phoneCarrier": "Mock Carrier", + "mnc": 270, + "priceInUSD": 0.00645, + "smsType": "Transactional", + "mcc": 310, + "providerResponse": "Message has been accepted by phone carrier", + "dwellTimeMsUntilDeviceAck": 200, + } + store_delivery_log(context.message, subscriber, success=True, delivery=delivery) + + def prepare_message( + self, + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, + ) -> dict: + return { + "PhoneNumber": subscriber["Endpoint"], + "TopicArn": subscriber["TopicArn"], + "SubscriptionArn": subscriber["SubscriptionArn"], + "MessageId": message_context.message_id, + "Message": message_context.message_content(protocol=subscriber["Protocol"]), + "MessageAttributes": message_context.message_attributes, + "MessageStructure": message_context.message_structure, + "Subject": message_context.subject, + } + + +class FirehoseTopicPublisher(TopicPublisher): + """ + The Firehose publisher is responsible for publishing the SNS message to a subscribed Firehose delivery stream. + This allows you to "fan out Amazon SNS notifications to Amazon Simple Storage Service (Amazon S3), Amazon Redshift, + Amazon OpenSearch Service (OpenSearch Service), and to third-party service providers." + See https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html + """ + + def _publish(self, context: SnsPublishContext, subscriber: SnsSubscription): + message_body = self.prepare_message( + context.message, subscriber, topic_attributes=context.topic_attributes + ) + try: + region = extract_region_from_arn(subscriber["Endpoint"]) + if role_arn := subscriber.get("SubscriptionRoleArn"): + factory = connect_to.with_assumed_role( + role_arn=role_arn, service_principal=ServicePrincipal.sns, region_name=region + ) + else: + account_id = extract_account_id_from_arn(subscriber["Endpoint"]) + factory = connect_to(aws_access_key_id=account_id, region_name=region) + firehose_client = factory.firehose.request_metadata( + source_arn=subscriber["TopicArn"], service_principal=ServicePrincipal.sns + ) + endpoint = subscriber["Endpoint"] + if endpoint: + delivery_stream = extract_resource_from_arn(endpoint).split("/")[1] + firehose_client.put_record( + DeliveryStreamName=delivery_stream, Record={"Data": to_bytes(message_body)} + ) + store_delivery_log( + context.message, + subscriber, + success=True, + topic_attributes=context.topic_attributes, + ) + except Exception as exc: + LOG.info( + "Received error on sending SNS message, putting to DLQ (if configured): %s", exc + ) + # TODO: check delivery log + # TODO check DLQ? + + +class SmsPhoneNumberPublisher(EndpointPublisher): + """ + The SMS publisher is responsible for publishing the SNS message directly to a phone number. + This is not directly implemented yet in LocalStack, we only save the message. + """ + + def _publish(self, context: SnsPublishContext, endpoint: str): + event = self.prepare_message(context.message, endpoint) + context.store.sms_messages.append(event) + LOG.info( + "Delivering SMS message to %s: %s", + event["PhoneNumber"], + event["Message"], + ) + + # TODO: check about delivery logs for individual call, need a real AWS test + # hard to know the format + + def prepare_message(self, message_context: SnsMessage, endpoint: str) -> dict: + return { + "PhoneNumber": endpoint, + "TopicArn": None, + "SubscriptionArn": None, + "MessageId": message_context.message_id, + "Message": message_context.message_content(protocol="sms"), + "MessageAttributes": message_context.message_attributes, + "MessageStructure": message_context.message_structure, + "Subject": message_context.subject, + } + + +class ApplicationEndpointPublisher(EndpointPublisher): + """ + The application publisher is responsible for publishing the SNS message directly to a registered SNS application + endpoint, without it being subscribed to a topic. + See `ApplicationTopicPublisher` for more information about Application Endpoint publishing. + """ + + def _publish(self, context: SnsPublishContext, endpoint: str): + message = self.prepare_message(context.message, endpoint) + cache = context.store.platform_endpoint_messages.setdefault(endpoint, []) + cache.append(message) + + if ( + config.LEGACY_SNS_GCM_PUBLISHING + and get_platform_type_from_endpoint_arn(endpoint) == "GCM" + ): + self._legacy_publish_to_gcm(context, endpoint) + + # TODO: rewrite the platform application publishing logic + # will need to validate credentials when creating platform app earlier, need thorough testing + + # TODO: see about delivery log for individual endpoint message, need credentials for testing + # store_delivery_log(subscriber, context, success=True) + + def prepare_message(self, message_context: SnsMessage, endpoint: str) -> Union[str, Dict]: + platform_type = get_platform_type_from_endpoint_arn(endpoint) + return { + "TargetArn": endpoint, + "TopicArn": "", + "SubscriptionArn": "", + "Message": message_context.message_content(protocol=platform_type), + "MessageAttributes": message_context.message_attributes, + "MessageStructure": message_context.message_structure, + "Subject": message_context.subject, + "MessageId": message_context.message_id, + } + + @staticmethod + def _legacy_publish_to_gcm(context: SnsPublishContext, endpoint: str): + application_attributes, endpoint_attributes = get_attributes_for_application_endpoint( + endpoint + ) + send_message_to_gcm( + context=context, + app_attributes=application_attributes, + endpoint_attributes=endpoint_attributes, + ) + + +def get_platform_type_from_endpoint_arn(endpoint_arn: str) -> SnsApplicationPlatforms: + return endpoint_arn.rsplit("/", maxsplit=3)[1] # noqa + + +def get_application_platform_arn_from_endpoint_arn(endpoint_arn: str) -> str: + """ + Retrieve the application_platform information from the endpoint_arn to build the application platform ARN + The format of the endpoint is: + `arn:aws:sns:{region}:{account_id}:endpoint/{platform_type}/{application_name}/{endpoint_id}` + :param endpoint_arn: str + :return: application_platform_arn: str + """ + parsed_arn = parse_arn(endpoint_arn) + + _, platform_type, app_name, _ = parsed_arn["resource"].split("/") + base_arn = f"arn:aws:sns:{parsed_arn['region']}:{parsed_arn['account']}" + return f"{base_arn}:app/{platform_type}/{app_name}" + + +def get_attributes_for_application_endpoint(endpoint_arn: str) -> Tuple[Dict, Dict]: + """ + Retrieve the attributes necessary to send a message directly to the platform (credentials and token) + :param endpoint_arn: + :return: + """ + account_id = extract_account_id_from_arn(endpoint_arn) + region_name = extract_region_from_arn(endpoint_arn) + + sns_client = connect_to(aws_access_key_id=account_id, region_name=region_name).sns + + # TODO: we should access this from the moto store directly + endpoint_attributes = sns_client.get_endpoint_attributes(EndpointArn=endpoint_arn) + + app_platform_arn = get_application_platform_arn_from_endpoint_arn(endpoint_arn) + app = sns_client.get_platform_application_attributes(PlatformApplicationArn=app_platform_arn) + + return app.get("Attributes", {}), endpoint_attributes.get("Attributes", {}) + + +def send_message_to_gcm( + context: SnsPublishContext, app_attributes: Dict[str, str], endpoint_attributes: Dict[str, str] +) -> None: + """ + Send the message directly to GCM, with the credentials used when creating the PlatformApplication and the Endpoint + :param context: SnsPublishContext + :param app_attributes: ApplicationPlatform attributes, contains PlatformCredential for GCM + :param endpoint_attributes: Endpoint attributes, contains Token that represent the mobile endpoint + :return: + """ + server_key = app_attributes.get("PlatformCredential", "") + token = endpoint_attributes.get("Token", "") + # message is supposed to be a JSON string to GCM + json_message = context.message.message_content("GCM") + data = json.loads(json_message) + + data["to"] = token + headers = {"Authorization": f"key={server_key}", "Content-type": "application/json"} + + response = requests.post( + sns_constants.GCM_URL, + headers=headers, + data=json.dumps(data), + ) + if response.status_code != 200: + LOG.warning( + "Platform GCM returned response %s with content %s", + response.status_code, + response.content, + ) + + +def compute_canonical_string(message: dict, notification_type: str) -> str: + """ + The notification message signature is computed using the SHA1withRSA algorithm on a "canonical string" – a UTF-8 + string which observes certain conventions including the sort order of included fields. (Please note that any + deviation in the construction of the message string described below such as excluding a field, including an extra + space or changing sort order will result in a different validation signature which will not match the pre-computed + message signature.) + See https://docs.aws.amazon.com/sns/latest/dg/sns-verify-signature-of-message.html + """ + # create the canonical string + if notification_type == SnsMessageType.Notification: + fields = ["Message", "MessageId", "Subject", "Timestamp", "TopicArn", "Type"] + elif notification_type in ( + SnsMessageType.SubscriptionConfirmation, + SnsMessageType.UnsubscribeConfirmation, + ): + fields = ["Message", "MessageId", "SubscribeURL", "Timestamp", "Token", "TopicArn", "Type"] + else: + return "" + + # create the canonical string + string_to_sign = "".join([f"{f}\n{message[f]}\n" for f in fields if f in message]) + return string_to_sign + + +def get_message_signature(canonical_string: str, signature_version: str) -> str: + chosen_hash = hashes.SHA256() if signature_version == "2" else hashes.SHA1() + message_signature = SNS_SERVER_PRIVATE_KEY.sign( + to_bytes(canonical_string), + padding=padding.PKCS1v15(), + algorithm=chosen_hash, + ) + # base64 encode the signature + encoded_signature = base64.b64encode(message_signature) + return to_str(encoded_signature) + + +def create_sns_message_body( + message_context: SnsMessage, + subscriber: SnsSubscription, + topic_attributes: dict[str, str] = None, +) -> str: + message_type = message_context.type or "Notification" + protocol = subscriber["Protocol"] + message_content = message_context.message_content(protocol) + + if message_type == "Notification" and is_raw_message_delivery(subscriber): + return message_content + + external_url = get_cert_base_url() + + data = { + "Type": message_type, + "MessageId": message_context.message_id, + "TopicArn": subscriber["TopicArn"], + "Message": message_content, + "Timestamp": timestamp_millis(), + } + + if message_type == SnsMessageType.Notification: + unsubscribe_url = create_unsubscribe_url(external_url, subscriber["SubscriptionArn"]) + data["UnsubscribeURL"] = unsubscribe_url + + elif message_type in ( + SnsMessageType.SubscriptionConfirmation, + SnsMessageType.UnsubscribeConfirmation, + ): + data["Token"] = message_context.token + data["SubscribeURL"] = create_subscribe_url( + external_url, subscriber["TopicArn"], message_context.token + ) + + if message_context.subject: + data["Subject"] = message_context.subject + + if message_context.message_attributes: + data["MessageAttributes"] = prepare_message_attributes(message_context.message_attributes) + + # FIFO topics do not add the signature in the message + if not subscriber.get("TopicArn", "").endswith(".fifo"): + signature_version = ( + topic_attributes.get("signature_version", "1") if topic_attributes else "1" + ) + canonical_string = compute_canonical_string(data, message_type) + signature = get_message_signature(canonical_string, signature_version=signature_version) + data.update( + { + "SignatureVersion": signature_version, + "Signature": signature, + "SigningCertURL": f"{external_url}{sns_constants.SNS_CERT_ENDPOINT}", + } + ) + else: + data["SequenceNumber"] = message_context.sequencer_number + + return json.dumps(data) + + +def prepare_message_attributes( + message_attributes: MessageAttributeMap, +) -> Dict[str, Dict[str, str]]: + attributes = {} + if not message_attributes: + return attributes + # TODO: Number type is not supported for Lambda subscriptions, passed as String + # do conversion here + for attr_name, attr in message_attributes.items(): + data_type = attr["DataType"] + if data_type.startswith("Binary"): + # binary payload in base64 encoded by AWS, UTF-8 for JSON + # https://docs.aws.amazon.com/sns/latest/api/API_MessageAttributeValue.html + val = base64.b64encode(attr["BinaryValue"]).decode() + else: + val = attr.get("StringValue") + + attributes[attr_name] = { + "Type": data_type, + "Value": val, + } + return attributes + + +def is_raw_message_delivery(subscriber: SnsSubscription) -> bool: + return subscriber.get("RawMessageDelivery") in ("true", True, "True") + + +def is_fifo_topic(subscriber: SnsSubscription) -> bool: + return subscriber.get("TopicArn", "").endswith(".fifo") + + +def store_delivery_log( + message_context: SnsMessage, + subscriber: SnsSubscription, + success: bool, + topic_attributes: dict[str, str] = None, + delivery: dict = None, +): + """ + Store the delivery logs in CloudWatch, configured as TopicAttributes + See: https://docs.aws.amazon.com/sns/latest/dg/sns-topic-attributes.html#msg-status-sdk + + TODO: for Application, you can also configure Platform attributes: + See:https://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html + """ + # TODO: effectively use `SuccessFeedbackSampleRate` to sample delivery logs + # TODO: validate format of `delivery` for each Publisher + # map Protocol to TopicAttribute + available_delivery_logs_services = { + "http", + "https", + "firehose", + "lambda", + "application", + "sqs", + } + # SMS is a special case: https://docs.aws.amazon.com/sns/latest/dg/sms_stats_cloudwatch.html + # seems like you need to configure on the Console, leave it on by default now in LocalStack + protocol = subscriber.get("Protocol") + + if protocol != "sms": + if protocol not in available_delivery_logs_services or not topic_attributes: + # this service does not have DeliveryLogs feature, return + return + + # TODO: for now, those attributes are stored as attributes of the moto Topic model in snake case + # see to work this in our store instead + role_type = "success" if success else "failure" + topic_attribute = f"{protocol}_{role_type}_feedback_role_arn" + + # check if topic has the right attribute and a role, otherwise return + # TODO: on purpose not using walrus operator to show that we get the RoleArn here for CloudWatch + role_arn = topic_attributes.get(topic_attribute) + if not role_arn: + return + + if not is_api_enabled("logs"): + LOG.warning( + "Service 'logs' is not enabled: skip storing SNS delivery logs. " + "Please check your 'SERVICES' configuration variable." + ) + return + + log_group_name = subscriber.get("TopicArn", "") + for partition in PARTITION_NAMES: + log_group_name = log_group_name.replace(f"arn:{partition}:", "") + log_group_name = log_group_name.replace(":", "/") + log_stream_name = long_uid() + invocation_time = int(time.time() * 1000) + + delivery = not_none_or(delivery, {}) + delivery["deliveryId"] = long_uid() + delivery["destination"] = subscriber.get("Endpoint", "") + delivery["dwellTimeMs"] = 200 + if not success: + delivery["attemps"] = 1 + + if (protocol := subscriber["Protocol"]) == "application": + protocol = get_platform_type_from_endpoint_arn(subscriber["Endpoint"]) + + message = message_context.message_content(protocol) + delivery_log = { + "notification": { + "messageMD5Sum": md5(message), + "messageId": message_context.message_id, + "topicArn": subscriber.get("TopicArn"), + "timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"), + }, + "delivery": delivery, + "status": "SUCCESS" if success else "FAILURE", + } + + log_output = json.dumps(delivery_log) + + # TODO: use the account/region from the role in the TopicAttribute instead, this is what AWS uses + account_id = extract_account_id_from_arn(subscriber["TopicArn"]) + region_name = extract_region_from_arn(subscriber["TopicArn"]) + logs_client = connect_to(aws_access_key_id=account_id, region_name=region_name).logs + + return store_cloudwatch_logs( + logs_client, log_group_name, log_stream_name, log_output, invocation_time + ) + + +def get_cert_base_url() -> str: + if config.SNS_CERT_URL_HOST: + return f"https://{config.SNS_CERT_URL_HOST}" + + return external_service_url().rstrip("/") + + +def create_subscribe_url(external_url, topic_arn, subscription_token): + return f"{external_url}/?Action=ConfirmSubscription&TopicArn={topic_arn}&Token={subscription_token}" + + +def create_unsubscribe_url(external_url, subscription_arn): + return f"{external_url}/?Action=Unsubscribe&SubscriptionArn={subscription_arn}" + + +class PublishDispatcher: + """ + The PublishDispatcher is responsible for dispatching the publishing of SNS messages asynchronously to worker + threads via a `ThreadPoolExecutor`, depending on the SNS subscriber protocol and filter policy. + """ + + topic_notifiers = { + "http": HttpTopicPublisher(), + "https": HttpTopicPublisher(), + "email": EmailTopicPublisher(), + "email-json": EmailJsonTopicPublisher(), + "sms": SmsTopicPublisher(), + "sqs": SqsTopicPublisher(), + "application": ApplicationTopicPublisher(), + "lambda": LambdaTopicPublisher(), + "firehose": FirehoseTopicPublisher(), + } + batch_topic_notifiers = {"sqs": SqsBatchTopicPublisher()} + sms_notifier = SmsPhoneNumberPublisher() + application_notifier = ApplicationEndpointPublisher() + + subscription_filter = SubscriptionFilter() + + def __init__(self, num_thread: int = 10): + self.executor = ThreadPoolExecutor(num_thread, thread_name_prefix="sns_pub") + self.topic_partitioned_executor = TopicPartitionedThreadPoolExecutor( + max_workers=num_thread, thread_name_prefix="sns_pub_fifo" + ) + + def shutdown(self): + self.executor.shutdown(wait=False) + self.topic_partitioned_executor.shutdown(wait=False) + + def _should_publish( + self, + subscription_filter_policy: dict[str, dict], + message_ctx: SnsMessage, + subscriber: SnsSubscription, + ): + """ + Validate that the message should be relayed to the subscriber, depending on the filter policy and the + subscription status + """ + # FIXME: for now, send to email even if not confirmed, as we do not send the token to confirm to email + # subscriptions + if ( + not subscriber["PendingConfirmation"] == "false" + and "email" not in subscriber["Protocol"] + ): + return + + subscriber_arn = subscriber["SubscriptionArn"] + filter_policy = subscription_filter_policy.get(subscriber_arn) + if not filter_policy: + return True + # default value is `MessageAttributes` + match subscriber.get("FilterPolicyScope", "MessageAttributes"): + case "MessageAttributes": + return self.subscription_filter.check_filter_policy_on_message_attributes( + filter_policy=filter_policy, message_attributes=message_ctx.message_attributes + ) + case "MessageBody": + return self.subscription_filter.check_filter_policy_on_message_body( + filter_policy=filter_policy, + message_body=message_ctx.message_content(subscriber["Protocol"]), + ) + + def publish_to_topic(self, ctx: SnsPublishContext, topic_arn: str) -> None: + subscriptions = ctx.store.get_topic_subscriptions(topic_arn) + for subscriber in subscriptions: + if self._should_publish(ctx.store.subscription_filter_policy, ctx.message, subscriber): + notifier = self.topic_notifiers[subscriber["Protocol"]] + LOG.debug( + "Topic '%s' publishing '%s' to subscribed '%s' with protocol '%s' (subscription '%s')", + topic_arn, + ctx.message.message_id, + subscriber.get("Endpoint"), + subscriber["Protocol"], + subscriber["SubscriptionArn"], + ) + self._submit_notification(notifier, ctx, subscriber) + + def publish_batch_to_topic(self, ctx: SnsBatchPublishContext, topic_arn: str) -> None: + subscriptions = ctx.store.get_topic_subscriptions(topic_arn) + for subscriber in subscriptions: + protocol = subscriber["Protocol"] + notifier = self.batch_topic_notifiers.get(protocol) + # does the notifier supports batching natively? for now, only SQS supports it + if notifier: + subscriber_ctx = ctx + messages_amount_before_filtering = len(ctx.messages) + filtered_messages = [ + message + for message in ctx.messages + if self._should_publish( + ctx.store.subscription_filter_policy, message, subscriber + ) + ] + if not filtered_messages: + LOG.debug( + "No messages match filter policy, not publishing batch from topic '%s' to subscription '%s'", + topic_arn, + subscriber["SubscriptionArn"], + ) + continue + + messages_amount = len(filtered_messages) + if messages_amount != messages_amount_before_filtering: + LOG.debug( + "After applying subscription filter, %s out of %s message(s) to be sent to '%s'", + messages_amount, + messages_amount_before_filtering, + subscriber["SubscriptionArn"], + ) + # We need to copy the context to not overwrite the messages after filtering messages, otherwise we + # would filter on the same context for different subscribers + subscriber_ctx = copy.copy(ctx) + subscriber_ctx.messages = filtered_messages + + LOG.debug( + "Topic '%s' batch publishing %s messages to subscribed '%s' with protocol '%s' (subscription '%s')", + topic_arn, + messages_amount, + subscriber.get("Endpoint"), + subscriber["Protocol"], + subscriber["SubscriptionArn"], + ) + self._submit_notification(notifier, subscriber_ctx, subscriber) + else: + # if no batch support, fall back to sending them sequentially + notifier = self.topic_notifiers[subscriber["Protocol"]] + for message in ctx.messages: + if self._should_publish( + ctx.store.subscription_filter_policy, message, subscriber + ): + individual_ctx = SnsPublishContext( + message=message, store=ctx.store, request_headers=ctx.request_headers + ) + LOG.debug( + "Topic '%s' batch publishing '%s' to subscribed '%s' with protocol '%s' (subscription '%s')", + topic_arn, + individual_ctx.message.message_id, + subscriber.get("Endpoint"), + subscriber["Protocol"], + subscriber["SubscriptionArn"], + ) + self._submit_notification(notifier, individual_ctx, subscriber) + + def _submit_notification( + self, notifier, ctx: SnsPublishContext | SnsBatchPublishContext, subscriber: SnsSubscription + ): + if (topic_arn := subscriber.get("TopicArn", "")).endswith(".fifo"): + # TODO: we still need to implement Message deduplication on the topic level with `should_publish` for FIFO + self.topic_partitioned_executor.submit( + notifier.publish, topic_arn, context=ctx, subscriber=subscriber + ) + else: + self.executor.submit(notifier.publish, context=ctx, subscriber=subscriber) + + def publish_to_phone_number(self, ctx: SnsPublishContext, phone_number: str) -> None: + LOG.debug( + "Publishing '%s' to phone number '%s' with protocol 'sms'", + ctx.message.message_id, + phone_number, + ) + self.executor.submit(self.sms_notifier.publish, context=ctx, endpoint=phone_number) + + def publish_to_application_endpoint(self, ctx: SnsPublishContext, endpoint_arn: str) -> None: + LOG.debug( + "Publishing '%s' to application endpoint '%s'", + ctx.message.message_id, + endpoint_arn, + ) + self.executor.submit(self.application_notifier.publish, context=ctx, endpoint=endpoint_arn) + + def publish_to_topic_subscriber( + self, ctx: SnsPublishContext, topic_arn: str, subscription_arn: str + ) -> None: + """ + This allows us to publish specific HTTP(S) messages specific to those endpoints, namely + `SubscriptionConfirmation` and `UnsubscribeConfirmation`. Those are "topic" messages in shape, but are sent + only to the endpoint subscribing or unsubscribing. + This is only used internally. + Note: might be needed for multi account SQS and Lambda `SubscriptionConfirmation` + :param ctx: SnsPublishContext + :param topic_arn: the topic of the subscriber + :param subscription_arn: the ARN of the subscriber + :return: None + """ + subscriber = ctx.store.subscriptions.get(subscription_arn) + if not subscriber: + return + notifier = self.topic_notifiers[subscriber["Protocol"]] + LOG.debug( + "Topic '%s' publishing '%s' to subscribed '%s' with protocol '%s' (Id='%s', Subscription='%s')", + topic_arn, + ctx.message.type, + subscription_arn, + subscriber["Protocol"], + ctx.message.message_id, + subscriber.get("Endpoint"), + ) + self.executor.submit(notifier.publish, context=ctx, subscriber=subscriber) diff --git a/localstack/utils/__init__.py b/localstack-core/localstack/services/sns/resource_providers/__init__.py similarity index 100% rename from localstack/utils/__init__.py rename to localstack-core/localstack/services/sns/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.py new file mode 100644 index 0000000000000..650df889dff02 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.py @@ -0,0 +1,178 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack import config +from localstack.aws.connect import ServiceLevelClientFactory +from localstack.services.cloudformation.resource_provider import ( + ConvertingInternalClientFactory, + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SNSSubscriptionProperties(TypedDict): + Protocol: Optional[str] + TopicArn: Optional[str] + DeliveryPolicy: Optional[dict] + Endpoint: Optional[str] + FilterPolicy: Optional[dict] + FilterPolicyScope: Optional[str] + Id: Optional[str] + RawMessageDelivery: Optional[bool] + RedrivePolicy: Optional[dict] + Region: Optional[str] + ReplayPolicy: Optional[dict] + SubscriptionRoleArn: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SNSSubscriptionProvider(ResourceProvider[SNSSubscriptionProperties]): + TYPE = "AWS::SNS::Subscription" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SNSSubscriptionProperties], + ) -> ProgressEvent[SNSSubscriptionProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - TopicArn + - Protocol + + Create-only properties: + - /properties/Endpoint + - /properties/Protocol + - /properties/TopicArn + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + sns = self._get_client(request).sns + + params = util.select_attributes(model=model, params=["TopicArn", "Protocol", "Endpoint"]) + + attrs = [ + "DeliveryPolicy", + "FilterPolicy", + "FilterPolicyScope", + "RawMessageDelivery", + "RedrivePolicy", + ] + attributes = {a: self.attr_val(model[a]) for a in attrs if a in model} + + if attributes: + params["Attributes"] = attributes + + result = sns.subscribe(**params) + model["Id"] = result["SubscriptionArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SNSSubscriptionProperties], + ) -> ProgressEvent[SNSSubscriptionProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SNSSubscriptionProperties], + ) -> ProgressEvent[SNSSubscriptionProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + sns = request.aws_client_factory.sns + + sns.unsubscribe(SubscriptionArn=model["Id"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SNSSubscriptionProperties], + ) -> ProgressEvent[SNSSubscriptionProperties]: + """ + Update a resource + + """ + model = request.desired_state + model["Id"] = request.previous_state["Id"] + sns = self._get_client(request).sns + + attrs = [ + "DeliveryPolicy", + "FilterPolicy", + "FilterPolicyScope", + "RawMessageDelivery", + "RedrivePolicy", + ] + for a in attrs: + if a in model: + sns.set_subscription_attributes( + SubscriptionArn=model["Id"], + AttributeName=a, + AttributeValue=self.attr_val(model[a]), + ) + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + @staticmethod + def attr_val(val): + return json.dumps(val) if isinstance(val, dict) else str(val) + + @staticmethod + def _get_client( + request: ResourceRequest[SNSSubscriptionProperties], + ) -> ServiceLevelClientFactory: + model = request.desired_state + if subscription_region := model.get("Region"): + # FIXME: this is hacky, maybe we should have access to the original parameters for the `aws_client_factory` + # as we now need to manually use them + # Not all internal CloudFormation requests will be directed to the same region and account + # maybe we could need to expose a proper client factory where we can override some parameters like the + # Region + factory = ConvertingInternalClientFactory(use_ssl=config.DISTRIBUTED_MODE) + client_params = dict(request.aws_client_factory._client_creation_params) + client_params["region_name"] = subscription_region + service_factory = factory(**client_params) + else: + service_factory = request.aws_client_factory + + return service_factory diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.schema.json b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.schema.json new file mode 100644 index 0000000000000..e4c5a9e883b5b --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription.schema.json @@ -0,0 +1,58 @@ +{ + "typeName": "AWS::SNS::Subscription", + "description": "Resource Type definition for AWS::SNS::Subscription", + "additionalProperties": false, + "properties": { + "ReplayPolicy": { + "type": "object" + }, + "RawMessageDelivery": { + "type": "boolean" + }, + "Endpoint": { + "type": "string" + }, + "FilterPolicy": { + "type": "object" + }, + "TopicArn": { + "type": "string" + }, + "RedrivePolicy": { + "type": "object" + }, + "DeliveryPolicy": { + "type": "object" + }, + "Region": { + "type": "string" + }, + "SubscriptionRoleArn": { + "type": "string" + }, + "FilterPolicyScope": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "Protocol": { + "type": "string" + } + }, + "required": [ + "TopicArn", + "Protocol" + ], + "createOnlyProperties": [ + "/properties/Endpoint", + "/properties/Protocol", + "/properties/TopicArn" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription_plugin.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription_plugin.py new file mode 100644 index 0000000000000..01e23a1f30aed --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_subscription_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SNSSubscriptionProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SNS::Subscription" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.sns.resource_providers.aws_sns_subscription import ( + SNSSubscriptionProvider, + ) + + self.factory = SNSSubscriptionProvider diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py new file mode 100644 index 0000000000000..00b68044ae750 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.py @@ -0,0 +1,188 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import canonicalize_bool_to_str, short_uid + + +class SNSTopicProperties(TypedDict): + ContentBasedDeduplication: Optional[bool] + DataProtectionPolicy: Optional[dict] + DisplayName: Optional[str] + FifoTopic: Optional[bool] + KmsMasterKeyId: Optional[str] + SignatureVersion: Optional[str] + Subscription: Optional[list[Subscription]] + Tags: Optional[list[Tag]] + TopicArn: Optional[str] + TopicName: Optional[str] + TracingConfig: Optional[str] + + +class Subscription(TypedDict): + Endpoint: Optional[str] + Protocol: Optional[str] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SNSTopicProvider(ResourceProvider[SNSTopicProperties]): + TYPE = "AWS::SNS::Topic" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SNSTopicProperties], + ) -> ProgressEvent[SNSTopicProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/TopicArn + + + + Create-only properties: + - /properties/TopicName + - /properties/FifoTopic + + Read-only properties: + - /properties/TopicArn + + IAM permissions required: + - sns:CreateTopic + - sns:TagResource + - sns:Subscribe + - sns:GetTopicAttributes + - sns:PutDataProtectionPolicy + + """ + model = request.desired_state + sns = request.aws_client_factory.sns + + attributes = { + k: v + for k, v in model.items() + if v is not None + if k not in ["TopicName", "Subscription", "Tags"] + } + if (fifo_topic := attributes.get("FifoTopic")) is not None: + attributes["FifoTopic"] = canonicalize_bool_to_str(fifo_topic) + + if archive_policy := attributes.get("ArchivePolicy"): + archive_policy["MessageRetentionPeriod"] = str(archive_policy["MessageRetentionPeriod"]) + attributes["ArchivePolicy"] = json.dumps(archive_policy) + + if (content_based_dedup := attributes.get("ContentBasedDeduplication")) is not None: + attributes["ContentBasedDeduplication"] = canonicalize_bool_to_str(content_based_dedup) + + # Default name + if model.get("TopicName") is None: + model["TopicName"] = ( + f"topic-{short_uid()}.fifo" if fifo_topic else f"topic-{short_uid()}" + ) + + create_sns_response = sns.create_topic(Name=model["TopicName"], Attributes=attributes) + model["TopicArn"] = create_sns_response["TopicArn"] + + # now we add subscriptions if they exists + for subscription in model.get("Subscription", []): + sns.subscribe( + TopicArn=model["TopicArn"], + Protocol=subscription["Protocol"], + Endpoint=subscription["Endpoint"], + ) + if tags := model.get("Tags"): + sns.tag_resource(ResourceArn=model["TopicArn"], Tags=tags) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SNSTopicProperties], + ) -> ProgressEvent[SNSTopicProperties]: + """ + Fetch resource information + + IAM permissions required: + - sns:GetTopicAttributes + - sns:ListTagsForResource + - sns:ListSubscriptionsByTopic + - sns:GetDataProtectionPolicy + """ + model = request.desired_state + topic_arn = model["TopicArn"] + + describe_res = request.aws_client_factory.sns.get_topic_attributes(TopicArn=topic_arn)[ + "Attributes" + ] + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=describe_res) + + def delete( + self, + request: ResourceRequest[SNSTopicProperties], + ) -> ProgressEvent[SNSTopicProperties]: + """ + Delete a resource + + IAM permissions required: + - sns:DeleteTopic + """ + # FIXME: This appears to incorrectly assume TopicArn would be provided. + model = request.desired_state + sns = request.aws_client_factory.sns + sns.delete_topic(TopicArn=model["TopicArn"]) + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={}) + + def update( + self, + request: ResourceRequest[SNSTopicProperties], + ) -> ProgressEvent[SNSTopicProperties]: + """ + Update a resource + + IAM permissions required: + - sns:SetTopicAttributes + - sns:TagResource + - sns:UntagResource + - sns:Subscribe + - sns:Unsubscribe + - sns:GetTopicAttributes + - sns:ListTagsForResource + - sns:ListSubscriptionsByTopic + - sns:GetDataProtectionPolicy + - sns:PutDataProtectionPolicy + """ + raise NotImplementedError + + def list( + self, + request: ResourceRequest[SNSTopicProperties], + ) -> ProgressEvent[SNSTopicProperties]: + resources = request.aws_client_factory.sns.list_topics() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + SNSTopicProperties(TopicArn=topic["TopicArn"]) for topic in resources["Topics"] + ], + ) diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.schema.json b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.schema.json new file mode 100644 index 0000000000000..222634c5503a3 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic.schema.json @@ -0,0 +1,156 @@ +{ + "typeName": "AWS::SNS::Topic", + "description": "Resource Type definition for AWS::SNS::Topic", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-sns", + "additionalProperties": false, + "properties": { + "DisplayName": { + "description": "The display name to use for an Amazon SNS topic with SMS subscriptions.", + "type": "string" + }, + "KmsMasterKeyId": { + "description": "The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.\n\nThis property applies only to [server-side-encryption](https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html).", + "type": "string" + }, + "DataProtectionPolicy": { + "description": "The body of the policy document you want to use for this topic.\n\nYou can only add one policy per topic.\n\nThe policy must be in JSON string format.\n\nLength Constraints: Maximum length of 30720", + "type": "object" + }, + "Subscription": { + "description": "The SNS subscriptions (endpoints) for this topic.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Subscription" + } + }, + "FifoTopic": { + "description": "Set to true to create a FIFO topic.", + "type": "boolean" + }, + "ContentBasedDeduplication": { + "description": "Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.\n\nWhen you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the MessageDeduplicationId parameter for the Publish action.\n\n", + "type": "boolean" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "TopicName": { + "description": "The name of the topic you want to create. Topic names must include only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long. FIFO topic names must end with .fifo.\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the topic name. For more information, see Name Type.", + "type": "string" + }, + "TopicArn": { + "type": "string" + }, + "SignatureVersion": { + "description": "Version of the Amazon SNS signature used. If the SignatureVersion is 1, Signature is a Base64-encoded SHA1withRSA signature of the Message, MessageId, Type, Timestamp, and TopicArn values. If the SignatureVersion is 2, Signature is a Base64-encoded SHA256withRSA signature of the Message, MessageId, Type, Timestamp, and TopicArn values.", + "type": "string" + }, + "TracingConfig": { + "description": "Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an SNS publisher to its subscriptions. If set to Active, SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. Only supported on standard topics.", + "type": "string" + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "type": "string", + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_`, `.`, `/`, `=`, `+`, and `-`." + }, + "Value": { + "type": "string", + "description": "The value for the tag. You can specify a value that is 0 to 256 characters in length." + } + }, + "required": [ + "Value", + "Key" + ] + }, + "Subscription": { + "type": "object", + "additionalProperties": false, + "properties": { + "Endpoint": { + "type": "string" + }, + "Protocol": { + "type": "string" + } + }, + "required": [ + "Endpoint", + "Protocol" + ] + } + }, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/TopicName", + "/properties/FifoTopic" + ], + "primaryIdentifier": [ + "/properties/TopicArn" + ], + "readOnlyProperties": [ + "/properties/TopicArn" + ], + "handlers": { + "create": { + "permissions": [ + "sns:CreateTopic", + "sns:TagResource", + "sns:Subscribe", + "sns:GetTopicAttributes", + "sns:PutDataProtectionPolicy" + ] + }, + "read": { + "permissions": [ + "sns:GetTopicAttributes", + "sns:ListTagsForResource", + "sns:ListSubscriptionsByTopic", + "sns:GetDataProtectionPolicy" + ] + }, + "update": { + "permissions": [ + "sns:SetTopicAttributes", + "sns:TagResource", + "sns:UntagResource", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:GetTopicAttributes", + "sns:ListTagsForResource", + "sns:ListSubscriptionsByTopic", + "sns:GetDataProtectionPolicy", + "sns:PutDataProtectionPolicy" + ] + }, + "delete": { + "permissions": [ + "sns:DeleteTopic" + ] + }, + "list": { + "permissions": [ + "sns:ListTopics" + ] + } + } +} diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic_plugin.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic_plugin.py new file mode 100644 index 0000000000000..de6a26a9482c5 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topic_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SNSTopicProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SNS::Topic" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.sns.resource_providers.aws_sns_topic import SNSTopicProvider + + self.factory = SNSTopicProvider diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.py new file mode 100644 index 0000000000000..f86bee8c16be0 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.py @@ -0,0 +1,121 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +from botocore.exceptions import ClientError + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SNSTopicPolicyProperties(TypedDict): + PolicyDocument: Optional[dict | str] + Topics: Optional[list[str]] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SNSTopicPolicyProvider(ResourceProvider[SNSTopicPolicyProperties]): + TYPE = "AWS::SNS::TopicPolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SNSTopicPolicyProperties], + ) -> ProgressEvent[SNSTopicPolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - PolicyDocument + - Topics + + Read-only properties: + - /properties/Id + + IAM permissions required: + - sns:SetTopicAttributes + + """ + model = request.desired_state + sns_client = request.aws_client_factory.sns + + policy = json.dumps(model["PolicyDocument"]) + for topic_arn in model["Topics"]: + sns_client.set_topic_attributes( + TopicArn=topic_arn, AttributeName="Policy", AttributeValue=policy + ) + + model["Id"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SNSTopicPolicyProperties], + ) -> ProgressEvent[SNSTopicPolicyProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SNSTopicPolicyProperties], + ) -> ProgressEvent[SNSTopicPolicyProperties]: + """ + Delete a resource + + IAM permissions required: + - sns:SetTopicAttributes + """ + model = request.desired_state + sns = request.aws_client_factory.sns + + for topic_arn in model["Topics"]: + try: + sns.set_topic_attributes( + TopicArn=topic_arn, AttributeName="Policy", AttributeValue="" + ) + except ClientError as err: + if "NotFound" not in err.response["Error"]["Code"]: + raise + + return ProgressEvent( + status=OperationStatus.IN_PROGRESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SNSTopicPolicyProperties], + ) -> ProgressEvent[SNSTopicPolicyProperties]: + """ + Update a resource + + IAM permissions required: + - sns:SetTopicAttributes + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.schema.json b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.schema.json new file mode 100644 index 0000000000000..64cb3d845ce17 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy.schema.json @@ -0,0 +1,61 @@ +{ + "typeName": "AWS::SNS::TopicPolicy", + "description": "Schema for AWS::SNS::TopicPolicy", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-sns.git", + "additionalProperties": false, + "properties": { + "Id": { + "description": "The provider-assigned unique ID for this managed resource.", + "type": "string" + }, + "PolicyDocument": { + "description": "A policy document that contains permissions to add to the specified SNS topics.", + "type": [ + "object", + "string" + ] + }, + "Topics": { + "description": "The Amazon Resource Names (ARN) of the topics to which you want to add the policy. You can use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html)` function to specify an [AWS::SNS::Topic](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sns-topic.html) resource.", + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "type": "string" + } + } + }, + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "required": [ + "PolicyDocument", + "Topics" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ], + "handlers": { + "create": { + "permissions": [ + "sns:SetTopicAttributes" + ] + }, + "update": { + "permissions": [ + "sns:SetTopicAttributes" + ] + }, + "delete": { + "permissions": [ + "sns:SetTopicAttributes" + ] + } + } +} diff --git a/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy_plugin.py b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy_plugin.py new file mode 100644 index 0000000000000..9fbe0afe2d7e4 --- /dev/null +++ b/localstack-core/localstack/services/sns/resource_providers/aws_sns_topicpolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SNSTopicPolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SNS::TopicPolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.sns.resource_providers.aws_sns_topicpolicy import ( + SNSTopicPolicyProvider, + ) + + self.factory = SNSTopicPolicyProvider diff --git a/localstack/utils/aws/__init__.py b/localstack-core/localstack/services/sqs/__init__.py similarity index 100% rename from localstack/utils/aws/__init__.py rename to localstack-core/localstack/services/sqs/__init__.py diff --git a/localstack-core/localstack/services/sqs/constants.py b/localstack-core/localstack/services/sqs/constants.py new file mode 100644 index 0000000000000..0cdc49b8eccdb --- /dev/null +++ b/localstack-core/localstack/services/sqs/constants.py @@ -0,0 +1,56 @@ +# Valid unicode values: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF +# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html +from localstack.aws.api.sqs import QueueAttributeName + +MSG_CONTENT_REGEX = "^[\u0009\u000a\u000d\u0020-\ud7ff\ue000-\ufffd\U00010000-\U0010ffff]*$" + +# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html +# While not documented, umlauts seem to be allowed +ATTR_NAME_CHAR_REGEX = "^[\u00c0-\u017fa-zA-Z0-9_.-]*$" +ATTR_NAME_PREFIX_SUFFIX_REGEX = r"^(?!(aws\.|amazon\.|\.)).*(??@[\\]^_`{|}~-]*$" + +DEDUPLICATION_INTERVAL_IN_SEC = 5 * 60 + +# When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. +# see https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteQueue.html +RECENTLY_DELETED_TIMEOUT = 60 + +# the default maximum message size in SQS +DEFAULT_MAXIMUM_MESSAGE_SIZE = 262144 +INTERNAL_QUEUE_ATTRIBUTES = [ + # these attributes cannot be changed by set_queue_attributes and should + # therefore be ignored when comparing queue attributes for create_queue + # 'FifoQueue' is handled on a per_queue basis + QueueAttributeName.ApproximateNumberOfMessages, + QueueAttributeName.ApproximateNumberOfMessagesDelayed, + QueueAttributeName.ApproximateNumberOfMessagesNotVisible, + QueueAttributeName.CreatedTimestamp, + QueueAttributeName.LastModifiedTimestamp, + QueueAttributeName.QueueArn, +] + +INVALID_STANDARD_QUEUE_ATTRIBUTES = [ + QueueAttributeName.FifoQueue, + QueueAttributeName.ContentBasedDeduplication, + *INTERNAL_QUEUE_ATTRIBUTES, +] + +# URL regexes for various endpoint strategies +STANDARD_STRATEGY_URL_REGEX = r"sqs.(?P[a-z0-9-]{1,})\.[^:]+:\d{4,5}\/(?P\d{12})\/(?P[a-zA-Z0-9_-]+(.fifo)?)$" +DOMAIN_STRATEGY_URL_REGEX = r"((?P[a-z0-9-]{1,})\.)?queue\.[^:]+:\d{4,5}\/(?P\d{12})\/(?P[a-zA-Z0-9_-]+(.fifo)?)$" +PATH_STRATEGY_URL_REGEX = r"[^:]+:\d{4,5}\/queue\/(?P[a-z0-9-]{1,})\/(?P\d{12})\/(?P[a-zA-Z0-9_-]+(.fifo)?)$" +LEGACY_STRATEGY_URL_REGEX = ( + r"[^:]+:\d{4,5}\/(?P\d{12})\/(?P[a-zA-Z0-9_-]+(.fifo)?)$" +) + +# HTTP headers used to override internal SQS ReceiveMessage +HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT = "x-localstack-sqs-override-message-count" +HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS = "x-localstack-sqs-override-wait-time-seconds" + +# response includes a default maximum of 1,000 results +MAX_RESULT_LIMIT = 1000 + +# SQS string seed value for uuid generation +SQS_UUID_STRING_SEED = "123e4567-e89b-12d3-a456-426614174000" diff --git a/localstack-core/localstack/services/sqs/exceptions.py b/localstack-core/localstack/services/sqs/exceptions.py new file mode 100644 index 0000000000000..4f256648cc145 --- /dev/null +++ b/localstack-core/localstack/services/sqs/exceptions.py @@ -0,0 +1,16 @@ +from localstack.aws.api import CommonServiceException + + +class InvalidParameterValueException(CommonServiceException): + def __init__(self, message): + super().__init__("InvalidParameterValueException", message, 400, True) + + +class InvalidAttributeValue(CommonServiceException): + def __init__(self, message): + super().__init__("InvalidAttributeValue", message, 400, True) + + +class MissingRequiredParameterException(CommonServiceException): + def __init__(self, message): + super().__init__("MissingRequiredParameterException", message, 400, True) diff --git a/localstack-core/localstack/services/sqs/models.py b/localstack-core/localstack/services/sqs/models.py new file mode 100644 index 0000000000000..8e7352bd28172 --- /dev/null +++ b/localstack-core/localstack/services/sqs/models.py @@ -0,0 +1,1331 @@ +import hashlib +import heapq +import inspect +import json +import logging +import re +import threading +import time +from datetime import datetime +from queue import Empty +from typing import Dict, Optional, Set + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.api.sqs import ( + AttributeNameList, + InvalidAttributeName, + Message, + MessageSystemAttributeName, + QueueAttributeMap, + QueueAttributeName, + ReceiptHandleIsInvalid, + TagMap, +) +from localstack.services.sqs import constants as sqs_constants +from localstack.services.sqs.exceptions import ( + InvalidAttributeValue, + InvalidParameterValueException, + MissingRequiredParameterException, +) +from localstack.services.sqs.queue import InterruptiblePriorityQueue, InterruptibleQueue +from localstack.services.sqs.utils import ( + encode_move_task_handle, + encode_receipt_handle, + extract_receipt_handle_info, + global_message_sequence, + guess_endpoint_strategy_and_host, + is_message_deduplication_id_required, +) +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute +from localstack.utils.aws.arns import get_partition +from localstack.utils.strings import long_uid +from localstack.utils.time import now +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +ReceiptHandle = str + + +class SqsMessage: + message: Message + created: float + visibility_timeout: int + receive_count: int + delay_seconds: Optional[int] + receipt_handles: Set[str] + last_received: Optional[float] + first_received: Optional[float] + visibility_deadline: Optional[float] + deleted: bool + priority: float + message_deduplication_id: str + message_group_id: str + sequence_number: str + + def __init__( + self, + priority: float, + message: Message, + message_deduplication_id: str = None, + message_group_id: str = None, + sequence_number: str = None, + ) -> None: + self.created = time.time() + self.message = message + self.receive_count = 0 + self.receipt_handles = set() + + self.delay_seconds = None + self.last_received = None + self.first_received = None + self.visibility_deadline = None + self.deleted = False + self.priority = priority + self.sequence_number = sequence_number + + attributes = {} + if message_group_id is not None: + attributes["MessageGroupId"] = message_group_id + if message_deduplication_id is not None: + attributes["MessageDeduplicationId"] = message_deduplication_id + if sequence_number is not None: + attributes["SequenceNumber"] = sequence_number + + if self.message.get("Attributes"): + self.message["Attributes"].update(attributes) + else: + self.message["Attributes"] = attributes + + # set attribute default values if not set + self.message["Attributes"].setdefault( + MessageSystemAttributeName.ApproximateReceiveCount, "0" + ) + + @property + def message_group_id(self) -> Optional[str]: + return self.message["Attributes"].get(MessageSystemAttributeName.MessageGroupId) + + @property + def message_deduplication_id(self) -> Optional[str]: + return self.message["Attributes"].get(MessageSystemAttributeName.MessageDeduplicationId) + + @property + def dead_letter_queue_source_arn(self) -> Optional[str]: + return self.message["Attributes"].get(MessageSystemAttributeName.DeadLetterQueueSourceArn) + + @property + def message_id(self): + return self.message["MessageId"] + + def increment_approximate_receive_count(self): + """ + Increment the message system attribute ``ApproximateReceiveCount``. + """ + # TODO: need better handling of system attributes + cnt = int( + self.message["Attributes"].get(MessageSystemAttributeName.ApproximateReceiveCount, "0") + ) + cnt += 1 + self.message["Attributes"][MessageSystemAttributeName.ApproximateReceiveCount] = str(cnt) + + def set_last_received(self, timestamp: float): + """ + Sets the last received timestamp of the message to the given value, and updates the visibility deadline + accordingly. + + :param timestamp: the last time the message was received + """ + self.last_received = timestamp + self.visibility_deadline = timestamp + self.visibility_timeout + + def update_visibility_timeout(self, timeout: int): + """ + Sets the visibility timeout of the message to the given value, and updates the visibility deadline accordingly. + + :param timeout: the timeout value in seconds + """ + self.visibility_timeout = timeout + self.visibility_deadline = time.time() + timeout + + @property + def is_visible(self) -> bool: + """ + Returns false if the message has a visibility deadline that is in the future. + + :return: whether the message is visibile or not. + """ + if self.visibility_deadline is None: + return True + if time.time() >= self.visibility_deadline: + return True + + return False + + @property + def is_delayed(self) -> bool: + if self.delay_seconds is None: + return False + return time.time() <= self.created + self.delay_seconds + + def __gt__(self, other): + return self.priority > other.priority + + def __ge__(self, other): + return self.priority >= other.priority + + def __lt__(self, other): + return self.priority < other.priority + + def __le__(self, other): + return self.priority <= other.priority + + def __eq__(self, other): + return self.message_id == other.message_id + + def __hash__(self): + return self.message_id.__hash__() + + def __repr__(self): + return f"SqsMessage(id={self.message_id},group={self.message_group_id})" + + +class ReceiveMessageResult: + """ + Object to communicate the result of a "receive messages" operation between the SqsProvider and + the underlying datastructure holding the messages. + """ + + successful: list[SqsMessage] + """The messages that were successfully received from the queue""" + + receipt_handles: list[str] + """The array index position in ``successful`` and ``receipt_handles`` need to be the same (this + assumption is needed when assembling the result in `SqsProvider.receive_message`)""" + + dead_letter_messages: list[SqsMessage] + """All messages that were received more than maxReceiveCount in the redrive policy (if any)""" + + def __init__(self): + self.successful = [] + self.receipt_handles = [] + self.dead_letter_messages = [] + + +class MessageMoveTaskStatus(str): + CREATED = "CREATED" # not validated, for internal use + RUNNING = "RUNNING" + COMPLETED = "COMPLETED" + CANCELLING = "CANCELLING" + CANCELLED = "CANCELLED" + FAILED = "FAILED" + + +class MessageMoveTask: + """ + A task created by the ``StartMessageMoveTask`` operation. + """ + + # configurable fields + source_arn: str + """The arn of the DLQ the messages are currently in.""" + destination_arn: str | None = None + """If the DestinationArn is not specified, the original source arn will be used as target.""" + max_number_of_messages_per_second: int | None = None + + # dynamic fields + task_id: str + status: str = MessageMoveTaskStatus.CREATED + started_timestamp: datetime | None = None + approximate_number_of_messages_moved: int | None = None + approximate_number_of_messages_to_move: int | None = None + failure_reason: str | None = None + + cancel_event: threading.Event + + def __init__( + self, source_arn: str, destination_arn: str, max_number_of_messages_per_second: int = None + ): + self.task_id = long_uid() + self.source_arn = source_arn + self.destination_arn = destination_arn + self.max_number_of_messages_per_second = max_number_of_messages_per_second + self.cancel_event = threading.Event() + + def mark_started(self): + self.started_timestamp = datetime.utcnow() + self.status = MessageMoveTaskStatus.RUNNING + self.cancel_event.clear() + + @property + def task_handle(self) -> str: + return encode_move_task_handle(self.task_id, self.source_arn) + + +class SqsQueue: + name: str + region: str + account_id: str + + attributes: QueueAttributeMap + tags: TagMap + + purge_in_progress: bool + purge_timestamp: Optional[float] + + delayed: Set[SqsMessage] + inflight: Set[SqsMessage] + receipts: Dict[str, SqsMessage] + + def __init__(self, name: str, region: str, account_id: str, attributes=None, tags=None) -> None: + self.name = name + self.region = region + self.account_id = account_id + + self._assert_queue_name(name) + self.tags = tags or {} + + self.delayed = set() + self.inflight = set() + self.receipts = {} + + self.attributes = self.default_attributes() + if attributes: + self.validate_queue_attributes(attributes) + self.attributes.update(attributes) + + self.purge_in_progress = False + self.purge_timestamp = None + + self.permissions = set() + self.mutex = threading.RLock() + + def shutdown(self): + pass + + def default_attributes(self) -> QueueAttributeMap: + return { + QueueAttributeName.ApproximateNumberOfMessages: lambda: str( + self.approx_number_of_messages + ), + QueueAttributeName.ApproximateNumberOfMessagesNotVisible: lambda: str( + self.approx_number_of_messages_not_visible + ), + QueueAttributeName.ApproximateNumberOfMessagesDelayed: lambda: str( + self.approx_number_of_messages_delayed + ), + QueueAttributeName.CreatedTimestamp: str(now()), + QueueAttributeName.DelaySeconds: "0", + QueueAttributeName.LastModifiedTimestamp: str(now()), + QueueAttributeName.MaximumMessageSize: str(sqs_constants.DEFAULT_MAXIMUM_MESSAGE_SIZE), + QueueAttributeName.MessageRetentionPeriod: "345600", + QueueAttributeName.QueueArn: self.arn, + QueueAttributeName.ReceiveMessageWaitTimeSeconds: "0", + QueueAttributeName.VisibilityTimeout: "30", + QueueAttributeName.SqsManagedSseEnabled: "true", + } + + def update_delay_seconds(self, value: int): + """ + For standard queues, the per-queue delay setting is not retroactive—changing the setting doesn't affect the + delay of messages already in the queue. For FIFO queues, the per-queue delay setting is retroactive—changing + the setting affects the delay of messages already in the queue. + + https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-delay-queues.html + + :param value: the number of seconds + """ + self.attributes[QueueAttributeName.DelaySeconds] = str(value) + + def update_last_modified(self, timestamp: int = None): + if timestamp is None: + timestamp = now() + + self.attributes[QueueAttributeName.LastModifiedTimestamp] = str(timestamp) + + @property + def arn(self) -> str: + return f"arn:{get_partition(self.region)}:sqs:{self.region}:{self.account_id}:{self.name}" + + def url(self, context: RequestContext) -> str: + """Return queue URL which depending on the endpoint strategy returns e.g.: + * (standard) http://sqs.eu-west-1.localhost.localstack.cloud:4566/000000000000/myqueue + * (domain) http://eu-west-1.queue.localhost.localstack.cloud:4566/000000000000/myqueue + * (path) http://localhost.localstack.cloud:4566/queue/eu-central-1/000000000000/myqueue + * otherwise: http://localhost.localstack.cloud:4566/000000000000/myqueue + """ + + scheme = config.get_protocol() # TODO: should probably change to context.request.scheme + host_definition = localstack_host() + host_and_port = host_definition.host_and_port() + + endpoint_strategy = config.SQS_ENDPOINT_STRATEGY + + if endpoint_strategy == "dynamic": + scheme = context.request.scheme + # determine the endpoint strategy that should be used, and determine the host dynamically + endpoint_strategy, host_and_port = guess_endpoint_strategy_and_host( + context.request.host + ) + + if endpoint_strategy == "standard": + # Region is always part of the queue URL + # sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/my-queue + scheme = context.request.scheme + host_url = f"{scheme}://sqs.{self.region}.{host_and_port}" + elif endpoint_strategy == "domain": + # Legacy style + # queue.localhost.localstack.cloud:4566/000000000000/my-queue (us-east-1) + # or us-east-2.queue.localhost.localstack.cloud:4566/000000000000/my-queue + region = "" if self.region == "us-east-1" else self.region + "." + host_url = f"{scheme}://{region}queue.{host_and_port}" + elif endpoint_strategy == "path": + # https?://localhost:4566/queue/us-east-1/00000000000/my-queue (us-east-1) + host_url = f"{scheme}://{host_and_port}/queue/{self.region}" + else: + host_url = f"{scheme}://{host_and_port}" + + return "{host}/{account_id}/{name}".format( + host=host_url.rstrip("/"), + account_id=self.account_id, + name=self.name, + ) + + @property + def redrive_policy(self) -> Optional[dict]: + if policy_document := self.attributes.get(QueueAttributeName.RedrivePolicy): + return json.loads(policy_document) + return None + + @property + def max_receive_count(self) -> Optional[int]: + """ + Returns the maxReceiveCount attribute of the redrive policy. If no redrive policy is set, then it + returns None. + """ + if redrive_policy := self.redrive_policy: + return int(redrive_policy["maxReceiveCount"]) + return None + + @property + def visibility_timeout(self) -> int: + return int(self.attributes[QueueAttributeName.VisibilityTimeout]) + + @property + def delay_seconds(self) -> int: + return int(self.attributes[QueueAttributeName.DelaySeconds]) + + @property + def wait_time_seconds(self) -> int: + return int(self.attributes[QueueAttributeName.ReceiveMessageWaitTimeSeconds]) + + @property + def message_retention_period(self) -> int: + """ + ``MessageRetentionPeriod`` -- the length of time, in seconds, for which Amazon SQS retains a message. Valid + values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). + """ + return int(self.attributes[QueueAttributeName.MessageRetentionPeriod]) + + @property + def maximum_message_size(self) -> int: + return int(self.attributes[QueueAttributeName.MaximumMessageSize]) + + @property + def approx_number_of_messages(self) -> int: + raise NotImplementedError + + @property + def approx_number_of_messages_not_visible(self) -> int: + return len(self.inflight) + + @property + def approx_number_of_messages_delayed(self) -> int: + return len(self.delayed) + + def validate_receipt_handle(self, receipt_handle: str): + if self.arn != extract_receipt_handle_info(receipt_handle).queue_arn: + raise ReceiptHandleIsInvalid( + f'The input receipt handle "{receipt_handle}" is not a valid receipt handle.' + ) + + def update_visibility_timeout(self, receipt_handle: str, visibility_timeout: int): + with self.mutex: + self.validate_receipt_handle(receipt_handle) + + if receipt_handle not in self.receipts: + raise InvalidParameterValueException( + f"Value {receipt_handle} for parameter ReceiptHandle is invalid. Reason: Message does not exist " + f"or is not available for visibility timeout change." + ) + + standard_message = self.receipts[receipt_handle] + + if standard_message not in self.inflight: + return + + standard_message.update_visibility_timeout(visibility_timeout) + + if visibility_timeout == 0: + LOG.info( + "terminating the visibility timeout of %s", + standard_message.message["MessageId"], + ) + # Terminating the visibility timeout for a message + # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html#terminating-message-visibility-timeout + self.inflight.remove(standard_message) + self._put_message(standard_message) + + def remove(self, receipt_handle: str): + with self.mutex: + self.validate_receipt_handle(receipt_handle) + + if receipt_handle not in self.receipts: + LOG.debug( + "no in-flight message found for receipt handle %s in queue %s", + receipt_handle, + self.arn, + ) + return + + standard_message = self.receipts[receipt_handle] + self._pre_delete_checks(standard_message, receipt_handle) + standard_message.deleted = True + LOG.debug( + "deleting message %s from queue %s", + standard_message.message["MessageId"], + self.arn, + ) + + # remove all handles associated with this message + for handle in standard_message.receipt_handles: + del self.receipts[handle] + standard_message.receipt_handles.clear() + + self._on_remove_message(standard_message) + + def _on_remove_message(self, message: SqsMessage): + """Hook for queue-specific logic executed when a message is removed.""" + pass + + def put( + self, + message: Message, + visibility_timeout: int = None, + message_deduplication_id: str = None, + message_group_id: str = None, + delay_seconds: int = None, + ) -> SqsMessage: + raise NotImplementedError + + def receive( + self, + num_messages: int = 1, + wait_time_seconds: int = None, + visibility_timeout: int = None, + *, + poll_empty_queue: bool = False, + ) -> ReceiveMessageResult: + """ + Receive ``num_messages`` from the queue, and wait at max ``wait_time_seconds``. If a visibility + timeout is given, also change the visibility timeout of all received messages accordingly. + + :param num_messages: the number of messages you want to get from the underlying queue + :param wait_time_seconds: the number of seconds you want to wait + :param visibility_timeout: an optional new visibility timeout + :param poll_empty_queue: whether to keep polling an empty queue until the duration ``wait_time_seconds`` has elapsed + :return: a ReceiveMessageResult object that contains the result of the operation + """ + raise NotImplementedError + + def clear(self): + """ + Calls clear on all internal datastructures that hold messages and data related to them. + """ + with self.mutex: + self.inflight.clear() + self.delayed.clear() + self.receipts.clear() + + def _put_message(self, message: SqsMessage): + """Low-level put operation to put messages into a queue and modify visibilities accordingly.""" + raise NotImplementedError + + def create_receipt_handle(self, message: SqsMessage) -> str: + return encode_receipt_handle(self.arn, message) + + def requeue_inflight_messages(self): + if not self.inflight: + return + + with self.mutex: + messages = [message for message in self.inflight if message.is_visible] + for standard_message in messages: + LOG.debug( + "re-queueing inflight messages %s into queue %s", + standard_message, + self.arn, + ) + self.inflight.remove(standard_message) + self._put_message(standard_message) + + def enqueue_delayed_messages(self): + if not self.delayed: + return + + with self.mutex: + messages = [message for message in self.delayed if not message.is_delayed] + for standard_message in messages: + LOG.debug( + "enqueueing delayed messages %s into queue %s", + standard_message.message["MessageId"], + self.arn, + ) + self.delayed.remove(standard_message) + self._put_message(standard_message) + + def remove_expired_messages(self): + """ + Removes messages from the queue whose retention period has expired. + """ + raise NotImplementedError + + def _assert_queue_name(self, name): + if not re.match(r"^[a-zA-Z0-9_-]{1,80}$", name): + raise InvalidParameterValueException( + "Can only include alphanumeric characters, hyphens, or underscores. 1 to 80 in length" + ) + + def validate_queue_attributes(self, attributes): + pass + + def add_permission(self, label: str, actions: list[str], account_ids: list[str]) -> None: + """ + Create / append to a policy for usage with the add_permission api call + + :param actions: List of actions to be included in the policy, without the SQS: prefix + :param account_ids: List of account ids to be included in the policy + :param label: Permission label + """ + statement = { + "Sid": label, + "Effect": "Allow", + "Principal": { + "AWS": [ + f"arn:{get_partition(self.region)}:iam::{account_id}:root" + for account_id in account_ids + ] + if len(account_ids) > 1 + else f"arn:{get_partition(self.region)}:iam::{account_ids[0]}:root" + }, + "Action": [f"SQS:{action}" for action in actions] + if len(actions) > 1 + else f"SQS:{actions[0]}", + "Resource": self.arn, + } + if policy := self.attributes.get(QueueAttributeName.Policy): + policy = json.loads(policy) + policy.setdefault("Statement", []) + else: + policy = { + "Version": "2008-10-17", + "Id": f"{self.arn}/SQSDefaultPolicy", + "Statement": [], + } + policy.setdefault("Statement", []) + existing_statement_ids = [statement.get("Sid") for statement in policy["Statement"]] + if label in existing_statement_ids: + raise InvalidParameterValueException( + f"Value {label} for parameter Label is invalid. Reason: Already exists." + ) + policy["Statement"].append(statement) + self.attributes[QueueAttributeName.Policy] = json.dumps(policy) + + def remove_permission(self, label: str) -> None: + """ + Delete a policy statement for usage of the remove_permission call + + :param label: Permission label + """ + if policy := self.attributes.get(QueueAttributeName.Policy): + policy = json.loads(policy) + # this should not be necessary, but we can upload custom policies, so it's better to be safe + policy.setdefault("Statement", []) + else: + policy = { + "Version": "2008-10-17", + "Id": f"{self.arn}/SQSDefaultPolicy", + "Statement": [], + } + existing_statement_ids = [statement.get("Sid") for statement in policy["Statement"]] + if label not in existing_statement_ids: + raise InvalidParameterValueException( + f"Value {label} for parameter Label is invalid. Reason: can't find label." + ) + policy["Statement"] = [ + statement for statement in policy["Statement"] if statement.get("Sid") != label + ] + if policy["Statement"]: + self.attributes[QueueAttributeName.Policy] = json.dumps(policy) + else: + del self.attributes[QueueAttributeName.Policy] + + def get_queue_attributes(self, attribute_names: AttributeNameList = None) -> dict[str, str]: + if not attribute_names: + return {} + + if QueueAttributeName.All in attribute_names: + attribute_names = self.attributes.keys() + + result: Dict[QueueAttributeName, str] = {} + + for attr in attribute_names: + try: + getattr(QueueAttributeName, attr) + except AttributeError: + raise InvalidAttributeName(f"Unknown Attribute {attr}.") + + value = self.attributes.get(attr) + if callable(value): + func = value + value = func() + if value is not None: + result[attr] = value + elif value == "False" or value == "True": + result[attr] = value.lower() + elif value is not None: + result[attr] = value + return result + + @staticmethod + def remove_expired_messages_from_heap( + heap: list[SqsMessage], message_retention_period: int + ) -> list[SqsMessage]: + """ + Removes from the given heap of SqsMessages all messages that have expired in the context of the current time + and the given message retention period. The method manipulates the heap but retains the heap property. + + :param heap: an array satisfying the heap property + :param message_retention_period: the message retention period to use in relation to the current time + :return: a list of expired messages that have been removed from the heap + """ + th = time.time() - message_retention_period + + expired = [] + while heap: + # here we're leveraging the heap property "that a[0] is always its smallest element" + # and the assumption that message.created == message.priority + message = heap[0] + if th < message.created: + break + # remove the expired element + expired.append(message) + heapq.heappop(heap) + + return expired + + def _pre_delete_checks(self, standard_message: SqsMessage, receipt_handle: str) -> None: + """ + Runs any potential checks if a message that has been successfully identified via a receipt handle + is indeed supposed to be deleted. + For example, a receipt handle that has expired might not lead to deletion. + + :param standard_message: The message to be deleted + :param receipt_handle: The handle associated with the message + :return: None. Potential violations raise errors. + """ + pass + + +class StandardQueue(SqsQueue): + visible: InterruptiblePriorityQueue[SqsMessage] + inflight: Set[SqsMessage] + + def __init__(self, name: str, region: str, account_id: str, attributes=None, tags=None) -> None: + super().__init__(name, region, account_id, attributes, tags) + self.visible = InterruptiblePriorityQueue() + + def clear(self): + with self.mutex: + super().clear() + self.visible.queue.clear() + + @property + def approx_number_of_messages(self): + return self.visible.qsize() + + def shutdown(self): + self.visible.shutdown() + + def put( + self, + message: Message, + visibility_timeout: int = None, + message_deduplication_id: str = None, + message_group_id: str = None, + delay_seconds: int = None, + ): + if message_deduplication_id: + raise InvalidParameterValueException( + f"Value {message_deduplication_id} for parameter MessageDeduplicationId is invalid. Reason: The " + f"request includes a parameter that is not valid for this queue type." + ) + if isinstance(message_group_id, str): + raise InvalidParameterValueException( + f"Value {message_group_id} for parameter MessageGroupId is invalid. Reason: The request include " + f"parameter that is not valid for this queue type." + ) + + standard_message = SqsMessage(time.time(), message) + + if visibility_timeout is not None: + standard_message.visibility_timeout = visibility_timeout + else: + # use the attribute from the queue + standard_message.visibility_timeout = self.visibility_timeout + + if delay_seconds is not None: + standard_message.delay_seconds = delay_seconds + else: + standard_message.delay_seconds = self.delay_seconds + + if standard_message.is_delayed: + self.delayed.add(standard_message) + else: + self._put_message(standard_message) + + return standard_message + + def _put_message(self, message: SqsMessage): + self.visible.put_nowait(message) + + def remove_expired_messages(self): + with self.mutex: + messages = self.remove_expired_messages_from_heap( + self.visible.queue, self.message_retention_period + ) + + for message in messages: + LOG.debug("Removed expired message %s from queue %s", message.message_id, self.arn) + + def receive( + self, + num_messages: int = 1, + wait_time_seconds: int = None, + visibility_timeout: int = None, + *, + poll_empty_queue: bool = False, + ) -> ReceiveMessageResult: + result = ReceiveMessageResult() + + max_receive_count = self.max_receive_count + visibility_timeout = ( + self.visibility_timeout if visibility_timeout is None else visibility_timeout + ) + + block = True if wait_time_seconds else False + timeout = wait_time_seconds or 0 + start = time.time() + + # collect messages + while True: + try: + message = self.visible.get(block=block, timeout=timeout) + except Empty: + break + # setting block to false guarantees that, if we've already waited before, we don't wait the + # full time again in the next iteration if max_number_of_messages is set but there are no more + # messages in the queue. see https://github.com/localstack/localstack/issues/5824 + if not poll_empty_queue: + block = False + + timeout -= time.time() - start + if timeout < 0: + timeout = 0 + + if message.deleted: + # filter messages that were deleted with an expired receipt handle after they have been + # re-queued. this can only happen due to a race with `remove`. + continue + + # update message attributes + message.receive_count += 1 + message.update_visibility_timeout(visibility_timeout) + message.set_last_received(time.time()) + if message.first_received is None: + message.first_received = message.last_received + + LOG.debug("de-queued message %s from %s", message, self.arn) + if max_receive_count and message.receive_count > max_receive_count: + # the message needs to move to the DLQ + LOG.debug( + "message %s has been received %d times, marking it for DLQ", + message, + message.receive_count, + ) + result.dead_letter_messages.append(message) + else: + result.successful.append(message) + message.increment_approximate_receive_count() + + # now we can return + if len(result.successful) == num_messages: + break + + # now process the successful result messages: create receipt handles and manage visibility. + for message in result.successful: + # manage receipt handle + receipt_handle = self.create_receipt_handle(message) + message.receipt_handles.add(receipt_handle) + self.receipts[receipt_handle] = message + result.receipt_handles.append(receipt_handle) + + # manage message visibility + if message.visibility_timeout == 0: + self.visible.put_nowait(message) + else: + self.inflight.add(message) + + return result + + def _on_remove_message(self, message: SqsMessage): + try: + self.inflight.remove(message) + except KeyError: + # this likely means the message was removed with an expired receipt handle unfortunately this + # means we need to scan the queue for the element and remove it from there, and then re-heapify + # the queue + try: + self.visible.queue.remove(message) + heapq.heapify(self.visible.queue) + except ValueError: + # this may happen if the message no longer exists because it was removed earlier + pass + + def validate_queue_attributes(self, attributes): + valid = [ + k[1] + for k in inspect.getmembers( + QueueAttributeName, lambda x: isinstance(x, str) and not x.startswith("__") + ) + if k[1] not in sqs_constants.INVALID_STANDARD_QUEUE_ATTRIBUTES + ] + + for k in attributes.keys(): + if k in [QueueAttributeName.FifoThroughputLimit, QueueAttributeName.DeduplicationScope]: + raise InvalidAttributeName( + f"You can specify the {k} only when FifoQueue is set to true." + ) + if k not in valid: + raise InvalidAttributeName(f"Unknown Attribute {k}.") + + +class MessageGroup: + message_group_id: str + messages: list[SqsMessage] + + def __init__(self, message_group_id: str): + self.message_group_id = message_group_id + self.messages = [] + + def empty(self) -> bool: + return not self.messages + + def size(self) -> int: + return len(self.messages) + + def pop(self) -> SqsMessage: + return heapq.heappop(self.messages) + + def push(self, message: SqsMessage): + heapq.heappush(self.messages, message) + + def __eq__(self, other): + return self.message_group_id == other.message_group_id + + def __hash__(self): + return self.message_group_id.__hash__() + + def __repr__(self): + return f"MessageGroup(id={self.message_group_id}, size={len(self.messages)})" + + +class FifoQueue(SqsQueue): + """ + A FIFO queue behaves differently than a default queue. Most behavior has to be implemented separately. + + See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html + + TODO: raise exceptions when trying to remove a message with an expired receipt handle + """ + + deduplication: Dict[str, SqsMessage] + message_groups: dict[str, MessageGroup] + inflight_groups: set[MessageGroup] + message_group_queue: InterruptibleQueue + deduplication_scope: str + + def __init__(self, name: str, region: str, account_id: str, attributes=None, tags=None) -> None: + super().__init__(name, region, account_id, attributes, tags) + self.deduplication = {} + + self.message_groups = {} + self.inflight_groups = set() + self.message_group_queue = InterruptibleQueue() + + # SQS does not seem to change the deduplication behaviour of fifo queues if you + # change to/from 'queue'/'messageGroup' scope after creation -> we need to set this on creation + self.deduplication_scope = self.attributes[QueueAttributeName.DeduplicationScope] + + @property + def approx_number_of_messages(self): + n = 0 + for message_group in self.message_groups.values(): + n += len(message_group.messages) + return n + + def shutdown(self): + self.message_group_queue.shutdown() + + def get_message_group(self, message_group_id: str) -> MessageGroup: + """ + Thread safe lazy factory for MessageGroup objects. + + :param message_group_id: the message group ID + :return: a new or existing MessageGroup object + """ + with self.mutex: + if message_group_id not in self.message_groups: + self.message_groups[message_group_id] = MessageGroup(message_group_id) + + return self.message_groups.get(message_group_id) + + def default_attributes(self) -> QueueAttributeMap: + return { + **super().default_attributes(), + QueueAttributeName.ContentBasedDeduplication: "false", + QueueAttributeName.DeduplicationScope: "queue", + QueueAttributeName.FifoThroughputLimit: "perQueue", + } + + def update_delay_seconds(self, value: int): + super(FifoQueue, self).update_delay_seconds(value) + for message in self.delayed: + message.delay_seconds = value + + def _pre_delete_checks(self, message: SqsMessage, receipt_handle: str) -> None: + _, _, _, last_received = extract_receipt_handle_info(receipt_handle) + if time.time() - float(last_received) > message.visibility_timeout: + raise InvalidParameterValueException( + f"Value {receipt_handle} for parameter ReceiptHandle is invalid. Reason: The receipt handle has expired." + ) + + def remove(self, receipt_handle: str): + self.validate_receipt_handle(receipt_handle) + + super().remove(receipt_handle) + + def put( + self, + message: Message, + visibility_timeout: int = None, + message_deduplication_id: str = None, + message_group_id: str = None, + delay_seconds: int = None, + ): + if delay_seconds: + # in fifo queues, delay is only applied on queue level. However, explicitly setting delay_seconds=0 is valid + raise InvalidParameterValueException( + f"Value {delay_seconds} for parameter DelaySeconds is invalid. Reason: The request include parameter " + f"that is not valid for this queue type." + ) + + if not message_group_id: + raise MissingRequiredParameterException( + "The request must contain the parameter MessageGroupId." + ) + dedup_id = message_deduplication_id + content_based_deduplication = not is_message_deduplication_id_required(self) + if not dedup_id and content_based_deduplication: + dedup_id = hashlib.sha256(message.get("Body").encode("utf-8")).hexdigest() + if not dedup_id: + raise InvalidParameterValueException( + "The queue should either have ContentBasedDeduplication enabled or MessageDeduplicationId provided explicitly" + ) + + fifo_message = SqsMessage( + time.time(), + message, + message_deduplication_id=dedup_id, + message_group_id=message_group_id, + sequence_number=str(self.next_sequence_number()), + ) + if visibility_timeout is not None: + fifo_message.visibility_timeout = visibility_timeout + else: + # use the attribute from the queue + fifo_message.visibility_timeout = self.visibility_timeout + + if delay_seconds is not None: + fifo_message.delay_seconds = delay_seconds + else: + fifo_message.delay_seconds = self.delay_seconds + + original_message = self.deduplication.get(dedup_id) + if ( + original_message + and original_message.priority + sqs_constants.DEDUPLICATION_INTERVAL_IN_SEC + > fifo_message.priority + # account for deduplication scope required for (but not restricted to) high-throughput-mode + and ( + not self.deduplication_scope == "messageGroup" + or fifo_message.message_group_id == original_message.message_group_id + ) + ): + message["MessageId"] = original_message.message["MessageId"] + else: + if fifo_message.is_delayed: + self.delayed.add(fifo_message) + else: + self._put_message(fifo_message) + + self.deduplication[dedup_id] = fifo_message + + return fifo_message + + def _put_message(self, message: SqsMessage): + """Once a message becomes visible in a FIFO queue, its message group also becomes visible.""" + message_group = self.get_message_group(message.message_group_id) + + with self.mutex: + previously_empty = message_group.empty() + # put the message into the group + message_group.push(message) + + # new messages should not make groups visible that are currently inflight + if message.receive_count < 1 and message_group in self.inflight_groups: + return + # if an older message becomes visible again in the queue, that message's group becomes visible also. + if message_group in self.inflight_groups: + self.inflight_groups.remove(message_group) + self.message_group_queue.put_nowait(message_group) + # if the group was previously empty, it was not yet added back to the queue + elif previously_empty: + self.message_group_queue.put_nowait(message_group) + + def remove_expired_messages(self): + with self.mutex: + retention_period = self.message_retention_period + for message_group in self.message_groups.values(): + messages = self.remove_expired_messages_from_heap( + message_group.messages, retention_period + ) + + for message in messages: + LOG.debug( + "Removed expired message %s from message group %s in queue %s", + message.message_id, + message.message_group_id, + self.arn, + ) + + def receive( + self, + num_messages: int = 1, + wait_time_seconds: int = None, + visibility_timeout: int = None, + *, + poll_empty_queue: bool = False, + ) -> ReceiveMessageResult: + """ + Receive logic for FIFO queues is different from standard queues. See + https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html. + + When receiving messages from a FIFO queue with multiple message group IDs, SQS first attempts to + return as many messages with the same message group ID as possible. This allows other consumers to + process messages with a different message group ID. When you receive a message with a message group + ID, no more messages for the same message group ID are returned unless you delete the message, or it + becomes visible. + """ + result = ReceiveMessageResult() + + max_receive_count = self.max_receive_count + visibility_timeout = ( + self.visibility_timeout if visibility_timeout is None else visibility_timeout + ) + + block = True if wait_time_seconds else False + timeout = wait_time_seconds or 0 + start = time.time() + + received_groups: Set[MessageGroup] = set() + + # collect messages over potentially multiple groups + while True: + try: + group: MessageGroup = self.message_group_queue.get(block=block, timeout=timeout) + except Empty: + break + + if group.empty(): + # this can be the case if all messages in the group are still invisible or + # if all messages of a group have been processed. + # TODO: it should be blocking until at least one message is in the queue, but we don't + # want to block the group + # TODO: check behavior in case it happens if all messages were removed from a group due to message + # retention period. + timeout -= time.time() - start + if timeout < 0: + timeout = 0 + continue + + self.inflight_groups.add(group) + + received_groups.add(group) + + if not poll_empty_queue: + block = False + + # we lock the queue while accessing the groups to not get into races with re-queueing/deleting + with self.mutex: + # collect messages from the group until a continue/break condition is met + while True: + try: + message = group.pop() + except IndexError: + break + + if message.deleted: + # this means the message was deleted with a receipt handle after its visibility + # timeout expired and the messages was re-queued in the meantime. + continue + + # update message attributes + message.receive_count += 1 + message.update_visibility_timeout(visibility_timeout) + message.set_last_received(time.time()) + if message.first_received is None: + message.first_received = message.last_received + + LOG.debug("de-queued message %s from fifo queue %s", message, self.arn) + if max_receive_count and message.receive_count > max_receive_count: + # the message needs to move to the DLQ + LOG.debug( + "message %s has been received %d times, marking it for DLQ", + message, + message.receive_count, + ) + result.dead_letter_messages.append(message) + else: + result.successful.append(message) + message.increment_approximate_receive_count() + + # now we can break the inner loop + if len(result.successful) == num_messages: + break + + # but we also need to check the condition to return from the outer loop + if len(result.successful) == num_messages: + break + + # now process the successful result messages: create receipt handles and manage visibility. + # we use the mutex again because we are modifying the group + with self.mutex: + for message in result.successful: + # manage receipt handle + receipt_handle = self.create_receipt_handle(message) + message.receipt_handles.add(receipt_handle) + self.receipts[receipt_handle] = message + result.receipt_handles.append(receipt_handle) + + # manage message visibility + if message.visibility_timeout == 0: + self._put_message(message) + else: + self.inflight.add(message) + + return result + + def _on_remove_message(self, message: SqsMessage): + # if a message is deleted from the queue, the message's group can become visible again + message_group = self.get_message_group(message.message_group_id) + + with self.mutex: + try: + self.inflight.remove(message) + except KeyError: + # in FIFO queues, this should not happen, as expired receipt handles cannot be used to + # delete a message. + pass + self.update_message_group_visibility(message_group) + + def update_message_group_visibility(self, message_group: MessageGroup): + """ + Check if the passed message group should be made visible again + """ + + with self.mutex: + if message_group in self.inflight_groups: + # it becomes visible again only if there are no other in flight messages in that group + for message in self.inflight: + if message.message_group_id == message_group.message_group_id: + return + + self.inflight_groups.remove(message_group) + if not message_group.empty(): + self.message_group_queue.put_nowait(message_group) + + def _assert_queue_name(self, name): + if not name.endswith(".fifo"): + raise InvalidParameterValueException( + "The name of a FIFO queue can only include alphanumeric characters, hyphens, or underscores, " + "must end with .fifo suffix and be 1 to 80 in length" + ) + # The .fifo suffix counts towards the 80-character queue name quota. + queue_name = name[:-5] + "_fifo" + super()._assert_queue_name(queue_name) + + def validate_queue_attributes(self, attributes): + valid = [ + k[1] + for k in inspect.getmembers(QueueAttributeName) + if k not in sqs_constants.INTERNAL_QUEUE_ATTRIBUTES + ] + for k in attributes.keys(): + if k not in valid: + raise InvalidAttributeName(f"Unknown Attribute {k}.") + # Special Cases + fifo = attributes.get(QueueAttributeName.FifoQueue) + if fifo and fifo.lower() != "true": + raise InvalidAttributeValue( + "Invalid value for the parameter FifoQueue. Reason: Modifying queue type is not supported." + ) + + def next_sequence_number(self): + return next(global_message_sequence()) + + def clear(self): + with self.mutex: + super().clear() + self.message_groups.clear() + self.inflight_groups.clear() + self.message_group_queue.queue.clear() + self.deduplication.clear() + + +class SqsStore(BaseStore): + queues: Dict[str, SqsQueue] = LocalAttribute(default=dict) + + deleted: Dict[str, float] = LocalAttribute(default=dict) + + move_tasks: Dict[str, MessageMoveTask] = LocalAttribute(default=dict) + """Maps task IDs to their ``MoveMessageTask`` object. Task IDs can be found by decoding a task handle.""" + + def expire_deleted(self): + for k in list(self.deleted.keys()): + if self.deleted[k] <= (time.time() - sqs_constants.RECENTLY_DELETED_TIMEOUT): + del self.deleted[k] + + +sqs_stores = AccountRegionBundle("sqs", SqsStore) diff --git a/localstack-core/localstack/services/sqs/provider.py b/localstack-core/localstack/services/sqs/provider.py new file mode 100644 index 0000000000000..10988383bd745 --- /dev/null +++ b/localstack-core/localstack/services/sqs/provider.py @@ -0,0 +1,1946 @@ +import copy +import hashlib +import json +import logging +import re +import threading +import time +from concurrent.futures.thread import ThreadPoolExecutor +from itertools import islice +from typing import Dict, Iterable, List, Literal, Optional, Tuple + +from botocore.utils import InvalidArnException +from moto.sqs.models import BINARY_TYPE_FIELD_INDEX, STRING_TYPE_FIELD_INDEX +from moto.sqs.models import Message as MotoMessage +from werkzeug import Request as WerkzeugRequest + +from localstack import config +from localstack.aws.api import CommonServiceException, RequestContext, ServiceException +from localstack.aws.api.sqs import ( + ActionNameList, + AttributeNameList, + AWSAccountIdList, + BatchEntryIdsNotDistinct, + BatchRequestTooLong, + BatchResultErrorEntry, + BoxedInteger, + CancelMessageMoveTaskResult, + ChangeMessageVisibilityBatchRequestEntryList, + ChangeMessageVisibilityBatchResult, + CreateQueueResult, + DeleteMessageBatchRequestEntryList, + DeleteMessageBatchResult, + DeleteMessageBatchResultEntry, + EmptyBatchRequest, + GetQueueAttributesResult, + GetQueueUrlResult, + InvalidAttributeName, + InvalidBatchEntryId, + InvalidMessageContents, + ListDeadLetterSourceQueuesResult, + ListMessageMoveTasksResult, + ListMessageMoveTasksResultEntry, + ListQueuesResult, + ListQueueTagsResult, + Message, + MessageAttributeNameList, + MessageBodyAttributeMap, + MessageBodySystemAttributeMap, + MessageSystemAttributeList, + MessageSystemAttributeName, + NullableInteger, + PurgeQueueInProgress, + QueueAttributeMap, + QueueAttributeName, + QueueDeletedRecently, + QueueDoesNotExist, + QueueNameExists, + ReceiveMessageResult, + ResourceNotFoundException, + SendMessageBatchRequestEntryList, + SendMessageBatchResult, + SendMessageBatchResultEntry, + SendMessageResult, + SqsApi, + StartMessageMoveTaskResult, + String, + TagKeyList, + TagMap, + Token, + TooManyEntriesInBatchRequest, +) +from localstack.aws.protocol.parser import create_parser +from localstack.aws.protocol.serializer import aws_response_serializer +from localstack.aws.spec import load_service +from localstack.config import SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT +from localstack.http import Request, route +from localstack.services.edge import ROUTER +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.sqs import constants as sqs_constants +from localstack.services.sqs.constants import ( + HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT, + HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS, + MAX_RESULT_LIMIT, +) +from localstack.services.sqs.exceptions import ( + InvalidParameterValueException, + MissingRequiredParameterException, +) +from localstack.services.sqs.models import ( + FifoQueue, + MessageMoveTask, + MessageMoveTaskStatus, + SqsMessage, + SqsQueue, + SqsStore, + StandardQueue, + sqs_stores, +) +from localstack.services.sqs.utils import ( + decode_move_task_handle, + generate_message_id, + is_fifo_queue, + is_message_deduplication_id_required, + parse_queue_url, +) +from localstack.services.stores import AccountRegionBundle +from localstack.utils.aws.arns import parse_arn +from localstack.utils.aws.request_context import extract_region_from_headers +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.cloudwatch.cloudwatch_util import ( + SqsMetricBatchData, + publish_sqs_metric, + publish_sqs_metric_batch, +) +from localstack.utils.collections import PaginatedList +from localstack.utils.run import FuncThread +from localstack.utils.scheduler import Scheduler +from localstack.utils.strings import md5, token_generator +from localstack.utils.threads import start_thread +from localstack.utils.time import now + +LOG = logging.getLogger(__name__) + +MAX_NUMBER_OF_MESSAGES = 10 +_STORE_LOCK = threading.RLock() + + +class InvalidAddress(ServiceException): + code = "InvalidAddress" + message = "The address https://queue.amazonaws.com/ is not valid for this endpoint." + sender_fault = True + status_code = 404 + + +def assert_queue_name(queue_name: str, fifo: bool = False): + if queue_name.endswith(".fifo"): + if not fifo: + # Standard queues with .fifo suffix are not allowed + raise InvalidParameterValueException( + "Can only include alphanumeric characters, hyphens, or underscores. 1 to 80 in length" + ) + # The .fifo suffix counts towards the 80-character queue name quota. + queue_name = queue_name[:-5] + "_fifo" + + # slashes are actually not allowed, but we've allowed it explicitly in localstack + if not re.match(r"^[a-zA-Z0-9/_-]{1,80}$", queue_name): + raise InvalidParameterValueException( + "Can only include alphanumeric characters, hyphens, or underscores. 1 to 80 in length" + ) + + +def check_message_min_size(message_body: str): + if _message_body_size(message_body) == 0: + raise MissingRequiredParameterException( + "The request must contain the parameter MessageBody." + ) + + +def check_message_max_size( + message_body: str, message_attributes: MessageBodyAttributeMap, max_message_size: int +): + # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html + error = "One or more parameters are invalid. " + error += f"Reason: Message must be shorter than {max_message_size} bytes." + if ( + _message_body_size(message_body) + _message_attributes_size(message_attributes) + > max_message_size + ): + raise InvalidParameterValueException(error) + + +def _message_body_size(body: str): + return _bytesize(body) + + +def _message_attributes_size(attributes: MessageBodyAttributeMap): + if not attributes: + return 0 + message_attributes_keys_size = sum(_bytesize(k) for k in attributes.keys()) + message_attributes_values_size = sum( + sum(_bytesize(v) for v in attr.values()) for attr in attributes.values() + ) + return message_attributes_keys_size + message_attributes_values_size + + +def _bytesize(value: str | bytes): + # must encode as utf8 to get correct bytes with len + return len(value.encode("utf8")) if isinstance(value, str) else len(value) + + +def check_message_content(message_body: str): + error = "Invalid characters found. Valid unicode characters are #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF" + + if not re.match(sqs_constants.MSG_CONTENT_REGEX, message_body): + raise InvalidMessageContents(error) + + +class CloudwatchDispatcher: + """ + Dispatches SQS metrics for specific api-calls using a ThreadPool + """ + + def __init__(self, num_thread: int = 3): + self.executor = ThreadPoolExecutor( + num_thread, thread_name_prefix="sqs-metrics-cloudwatch-dispatcher" + ) + self.running = True + + def shutdown(self): + self.executor.shutdown(wait=False, cancel_futures=True) + self.running = False + + def dispatch_sqs_metric( + self, + account_id: str, + region: str, + queue_name: str, + metric: str, + value: float = 1, + unit: str = "Count", + ): + """ + Publishes a metric to Cloudwatch using a Threadpool + :param account_id The account id that should be used for Cloudwatch client + :param region The region that should be used for Cloudwatch client + :param queue_name The name of the queue that the metric belongs to + :param metric The name of the metric + :param value The value for that metric, default 1 + :param unit The unit for the value, default "Count" + """ + if not self.running: + return + + self.executor.submit( + publish_sqs_metric, + account_id=account_id, + region=region, + queue_name=queue_name, + metric=metric, + value=value, + unit=unit, + ) + + def dispatch_metric_message_sent(self, queue: SqsQueue, message_body_size: int): + """ + Sends metric 'NumberOfMessagesSent' and 'SentMessageSize' to Cloudwatch + :param queue The Queue for which the metric will be send + :param message_body_size the size of the message in bytes + """ + self.dispatch_sqs_metric( + account_id=queue.account_id, + region=queue.region, + queue_name=queue.name, + metric="NumberOfMessagesSent", + ) + self.dispatch_sqs_metric( + account_id=queue.account_id, + region=queue.region, + queue_name=queue.name, + metric="SentMessageSize", + value=message_body_size, + unit="Bytes", + ) + + def dispatch_metric_message_deleted(self, queue: SqsQueue, deleted: int = 1): + """ + Sends metric 'NumberOfMessagesDeleted' to Cloudwatch + :param queue The Queue for which the metric will be sent + :param deleted The number of messages that were successfully deleted, default: 1 + """ + self.dispatch_sqs_metric( + account_id=queue.account_id, + region=queue.region, + queue_name=queue.name, + metric="NumberOfMessagesDeleted", + value=deleted, + ) + + def dispatch_metric_received(self, queue: SqsQueue, received: int): + """ + Sends metric 'NumberOfMessagesReceived' (if received > 0), or 'NumberOfEmptyReceives' to Cloudwatch + :param queue The Queue for which the metric will be send + :param received The number of messages that have been received + """ + if received > 0: + self.dispatch_sqs_metric( + account_id=queue.account_id, + region=queue.region, + queue_name=queue.name, + metric="NumberOfMessagesReceived", + value=received, + ) + else: + self.dispatch_sqs_metric( + account_id=queue.account_id, + region=queue.region, + queue_name=queue.name, + metric="NumberOfEmptyReceives", + ) + + +class CloudwatchPublishWorker: + """ + Regularly publish metrics data about approximate messages to Cloudwatch. + Includes: ApproximateNumberOfMessagesVisible, ApproximateNumberOfMessagesNotVisible + and ApproximateNumberOfMessagesDelayed + """ + + def __init__(self) -> None: + super().__init__() + self.scheduler = Scheduler() + self.thread: Optional[FuncThread] = None + + def publish_approximate_cloudwatch_metrics(self): + """Publishes the metrics for ApproximateNumberOfMessagesVisible, ApproximateNumberOfMessagesNotVisible + and ApproximateNumberOfMessagesDelayed to CloudWatch""" + # TODO ApproximateAgeOfOldestMessage is missing + # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-available-cloudwatch-metrics.html + + for account_id, region, store in sqs_stores.iter_stores(): + start = 0 + # we can include up to 1000 metric queries for one put-metric-data call + # and we currently include 3 metrics per queue + batch_size = 300 + + while start < len(store.queues): + batch_data = [] + # Process the current batch + for queue in islice(store.queues.values(), start, start + batch_size): + batch_data.append( + SqsMetricBatchData( + QueueName=queue.name, + MetricName="ApproximateNumberOfMessagesVisible", + Value=queue.approx_number_of_messages, + ) + ) + batch_data.append( + SqsMetricBatchData( + QueueName=queue.name, + MetricName="ApproximateNumberOfMessagesNotVisible", + Value=queue.approx_number_of_messages_not_visible, + ) + ) + batch_data.append( + SqsMetricBatchData( + QueueName=queue.name, + MetricName="ApproximateNumberOfMessagesDelayed", + Value=queue.approx_number_of_messages_delayed, + ) + ) + + publish_sqs_metric_batch( + account_id=account_id, region=region, sqs_metric_batch_data=batch_data + ) + # Update for the next batch + start += batch_size + + def start(self): + if self.thread: + return + + self.scheduler = Scheduler() + self.scheduler.schedule( + self.publish_approximate_cloudwatch_metrics, + period=config.SQS_CLOUDWATCH_METRICS_REPORT_INTERVAL, + ) + + def _run(*_args): + self.scheduler.run() + + self.thread = start_thread(_run, name="sqs-approx-metrics-cloudwatch-publisher") + + def stop(self): + if self.scheduler: + self.scheduler.close() + + if self.thread: + self.thread.stop() + + self.thread = None + self.scheduler = None + + +class QueueUpdateWorker: + """ + Regularly re-queues inflight and delayed messages whose visibility timeout has expired or delay deadline has been + reached. + """ + + def __init__(self) -> None: + super().__init__() + self.scheduler = Scheduler() + self.thread: Optional[FuncThread] = None + self.mutex = threading.RLock() + + def iter_queues(self) -> Iterable[SqsQueue]: + for account_id, region, store in sqs_stores.iter_stores(): + for queue in store.queues.values(): + yield queue + + def do_update_all_queues(self): + for queue in self.iter_queues(): + try: + queue.requeue_inflight_messages() + except Exception: + LOG.exception("error re-queueing inflight messages") + + try: + queue.enqueue_delayed_messages() + except Exception: + LOG.exception("error enqueueing delayed messages") + + if config.SQS_ENABLE_MESSAGE_RETENTION_PERIOD: + try: + queue.remove_expired_messages() + except Exception: + LOG.exception("error removing expired messages") + + def start(self): + with self.mutex: + if self.thread: + return + + self.scheduler = Scheduler() + self.scheduler.schedule(self.do_update_all_queues, period=1) + + def _run(*_args): + self.scheduler.run() + + self.thread = start_thread(_run, name="sqs-queue-update-worker") + + def stop(self): + with self.mutex: + if self.scheduler: + self.scheduler.close() + + if self.thread: + self.thread.stop() + + self.thread = None + self.scheduler = None + + +class MessageMoveTaskManager: + """ + Manages and runs MessageMoveTasks. + + TODO: we should check how AWS really moves messages internally: do they use the API? + it's hard to know how AWS really does moving of messages. there are a number of things we could do + to understand it better, including creating a DLQ chain and letting move tasks fail to see whether + move tasks cause message consuming and create receipt handles. for now, we're doing a middle-layer + transactional move, foregoing the API layer but using receipt handles and transactions. + + TODO: restoring move tasks from persistence doesn't work, may be a fringe case though + + TODO: re-drive into multiple original source queues + """ + + def __init__(self, stores: AccountRegionBundle[SqsStore] = None) -> None: + self.stores = stores or sqs_stores + self.mutex = threading.RLock() + self.move_tasks: dict[str, MessageMoveTask] = dict() + self.executor = ThreadPoolExecutor(max_workers=100, thread_name_prefix="sqs-move-message") + + def submit(self, move_task: MessageMoveTask): + with self.mutex: + try: + source_queue = self._get_queue_by_arn(move_task.source_arn) + move_task.approximate_number_of_messages_to_move = ( + source_queue.approx_number_of_messages + ) + move_task.approximate_number_of_messages_moved = 0 + move_task.mark_started() + self.move_tasks[move_task.task_id] = move_task + self.executor.submit(self._run, move_task) + except Exception as e: + self._fail_task(move_task, e) + raise + + def cancel(self, move_task: MessageMoveTask): + with self.mutex: + move_task.status = MessageMoveTaskStatus.CANCELLING + move_task.cancel_event.set() + + def close(self): + with self.mutex: + for move_task in self.move_tasks.values(): + move_task.cancel_event.set() + + self.executor.shutdown(wait=False, cancel_futures=True) + + def _run(self, move_task: MessageMoveTask): + try: + if move_task.destination_arn: + LOG.info( + "Move task started %s (%s -> %s)", + move_task.task_id, + move_task.source_arn, + move_task.destination_arn, + ) + else: + LOG.info( + "Move task started %s (%s -> original sources)", + move_task.task_id, + move_task.source_arn, + ) + + while not move_task.cancel_event.is_set(): + # look up queues for every message in case they are removed + source_queue = self._get_queue_by_arn(move_task.source_arn) + + receive_result = source_queue.receive(num_messages=1, visibility_timeout=1) + + if receive_result.dead_letter_messages: + raise NotImplementedError("Cannot deal with DLQ chains in move tasks") + + if not receive_result.successful: + # queue empty, task done + break + + message = receive_result.successful[0] + receipt_handle = receive_result.receipt_handles[0] + + if move_task.destination_arn: + target_queue = self._get_queue_by_arn(move_task.destination_arn) + else: + # we assume that dead_letter_source_arn is set since the message comes from a DLQ + target_queue = self._get_queue_by_arn(message.dead_letter_queue_source_arn) + + target_queue.put( + message=message.message, + message_group_id=message.message_group_id, + message_deduplication_id=message.message_deduplication_id, + ) + source_queue.remove(receipt_handle) + move_task.approximate_number_of_messages_moved += 1 + + if rate := move_task.max_number_of_messages_per_second: + move_task.cancel_event.wait(timeout=(1 / rate)) + + except Exception as e: + self._fail_task(move_task, e) + else: + if move_task.cancel_event.is_set(): + LOG.info("Move task cancelled %s", move_task.task_id) + move_task.status = MessageMoveTaskStatus.CANCELLED + else: + LOG.info("Move task completed successfully %s", move_task.task_id) + move_task.status = MessageMoveTaskStatus.COMPLETED + + def _get_queue_by_arn(self, queue_arn: str) -> SqsQueue: + arn = parse_arn(queue_arn) + return SqsProvider._require_queue(arn["account"], arn["region"], arn["resource"]) + + def _fail_task(self, task: MessageMoveTask, reason: Exception): + """ + Marks a given task as failed due to the given reason. + + :param task: the task to mark as failed + :param reason: the failure reason + """ + LOG.info( + "Exception occurred during move task %s: %s", + task.task_id, + reason, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + task.status = MessageMoveTaskStatus.FAILED + if isinstance(reason, ServiceException): + task.failure_reason = reason.code + else: + task.failure_reason = reason.__class__.__name__ + + +def check_attributes(message_attributes: MessageBodyAttributeMap): + if not message_attributes: + return + for attribute_name in message_attributes: + if len(attribute_name) >= 256: + raise InvalidParameterValueException( + "Message (user) attribute names must be shorter than 256 Bytes" + ) + if not re.match(sqs_constants.ATTR_NAME_CHAR_REGEX, attribute_name.lower()): + raise InvalidParameterValueException( + "Message (user) attributes name can only contain upper and lower score characters, digits, periods, " + "hyphens and underscores. " + ) + if not re.match(sqs_constants.ATTR_NAME_PREFIX_SUFFIX_REGEX, attribute_name.lower()): + raise InvalidParameterValueException( + "You can't use message attribute names beginning with 'AWS.' or 'Amazon.'. " + "These strings are reserved for internal use. Additionally, they cannot start or end with '.'." + ) + + attribute = message_attributes[attribute_name] + attribute_type = attribute.get("DataType") + if not attribute_type: + raise InvalidParameterValueException("Missing required parameter DataType") + if not re.match(sqs_constants.ATTR_TYPE_REGEX, attribute_type): + raise InvalidParameterValueException( + f"Type for parameter MessageAttributes.Attribute_name.DataType must be prefixed" + f'with "String", "Binary", or "Number", but was: {attribute_type}' + ) + if len(attribute_type) >= 256: + raise InvalidParameterValueException( + "Message (user) attribute types must be shorter than 256 Bytes" + ) + + if attribute_type == "String": + try: + attribute_value = attribute.get("StringValue") + + if not attribute_value: + raise InvalidParameterValueException( + f"Message (user) attribute '{attribute_name}' must contain a non-empty value of type 'String'." + ) + + check_message_content(attribute_value) + except InvalidMessageContents as e: + # AWS throws a different exception here + raise InvalidParameterValueException(e.args[0]) + + +def check_fifo_id(fifo_id, parameter): + if not fifo_id: + return + if len(fifo_id) > 128: + raise InvalidParameterValueException( + f"Value {fifo_id} for parameter {parameter} is invalid. Reason: {parameter} can only include alphanumeric and punctuation characters. 1 to 128 in length." + ) + if not re.match(sqs_constants.FIFO_MSG_REGEX, fifo_id): + raise InvalidParameterValueException( + "Invalid characters found. Deduplication ID and group ID can only contain" + "alphanumeric characters as well as TODO" + ) + + +def get_sqs_protocol(request: Request) -> Literal["query", "json"]: + content_type = request.headers.get("Content-Type") + return "json" if content_type == "application/x-amz-json-1.0" else "query" + + +def sqs_auto_protocol_aws_response_serializer(service_name: str, operation: str): + def _decorate(fn): + def _proxy(*args, **kwargs): + # extract request from function invocation (decorator can be used for methods as well as for functions). + if len(args) > 0 and isinstance(args[0], WerkzeugRequest): + # function + request = args[0] + elif len(args) > 1 and isinstance(args[1], WerkzeugRequest): + # method (arg[0] == self) + request = args[1] + elif "request" in kwargs: + request = kwargs["request"] + else: + raise ValueError(f"could not find Request in signature of function {fn}") + + protocol = get_sqs_protocol(request) + return aws_response_serializer(service_name, operation, protocol)(fn)(*args, **kwargs) + + return _proxy + + return _decorate + + +class SqsDeveloperEndpoints: + """ + A set of SQS developer tool endpoints: + + - ``/_aws/sqs/messages``: list SQS messages without side effects, compatible with ``ReceiveMessage``. + """ + + def __init__(self, stores=None): + self.stores = stores or sqs_stores + + @route("/_aws/sqs/messages", methods=["GET", "POST"]) + @sqs_auto_protocol_aws_response_serializer("sqs", "ReceiveMessage") + def list_messages(self, request: Request) -> ReceiveMessageResult: + """ + This endpoint expects a ``QueueUrl`` request parameter (either as query arg or form parameter), similar to + the ``ReceiveMessage`` operation. It will parse the Queue URL generated by one of the SQS endpoint strategies. + """ + + if "x-amz-" in request.mimetype or "x-www-form-urlencoded" in request.mimetype: + # only parse the request using a parser if it comes from an AWS client + protocol = get_sqs_protocol(request) + operation, service_request = create_parser( + load_service("sqs", protocol=protocol) + ).parse(request) + if operation.name != "ReceiveMessage": + raise CommonServiceException( + "InvalidRequest", "This endpoint only accepts ReceiveMessage calls" + ) + else: + service_request = dict(request.values) + + if not service_request.get("QueueUrl"): + raise QueueDoesNotExist() + + try: + account_id, region, queue_name = parse_queue_url(service_request.get("QueueUrl")) + except ValueError: + LOG.exception( + "Error while parsing Queue URL from request values: %s", service_request.get + ) + raise InvalidAddress() + + if not region: + region = extract_region_from_headers(request.headers) + + return self._get_and_serialize_messages(request, region, account_id, queue_name) + + @route("/_aws/sqs/messages///") + @sqs_auto_protocol_aws_response_serializer("sqs", "ReceiveMessage") + def list_messages_for_queue_url( + self, request: Request, region: str, account_id: str, queue_name: str + ) -> ReceiveMessageResult: + """ + This endpoint extracts the region, account_id, and queue_name directly from the URL rather than requiring the + QueueUrl as parameter. + """ + return self._get_and_serialize_messages(request, region, account_id, queue_name) + + def _get_and_serialize_messages( + self, + request: Request, + region: str, + account_id: str, + queue_name: str, + ) -> ReceiveMessageResult: + show_invisible = request.values.get("ShowInvisible", "").lower() in ["true", "1"] + show_delayed = request.values.get("ShowDelayed", "").lower() in ["true", "1"] + + try: + store = SqsProvider.get_store(account_id, region) + queue = store.queues[queue_name] + except KeyError: + LOG.info( + "no queue named %s in region %s and account %s", queue_name, region, account_id + ) + raise QueueDoesNotExist() + + messages = self._collect_messages( + queue, show_invisible=show_invisible, show_delayed=show_delayed + ) + + return ReceiveMessageResult(Messages=messages) + + def _collect_messages( + self, queue: SqsQueue, show_invisible: bool = False, show_delayed: bool = False + ) -> List[Message]: + """ + Retrieves from a given SqsQueue all visible messages without causing any side effects (not setting any + receive timestamps, receive counts, or visibility state). + + :param queue: the queue + :param show_invisible: show invisible messages as well + :param show_delayed: show delayed messages as well + :return: a list of messages + """ + receipt_handle = "SQS/BACKDOOR/ACCESS" # dummy receipt handle + + sqs_messages: List[SqsMessage] = [] + + if show_invisible: + sqs_messages.extend(queue.inflight) + + if isinstance(queue, StandardQueue): + sqs_messages.extend(queue.visible.queue) + elif isinstance(queue, FifoQueue): + for message_group in queue.message_groups.values(): + for sqs_message in message_group.messages: + sqs_messages.append(sqs_message) + else: + raise ValueError(f"unknown queue type {type(queue)}") + + if show_delayed: + sqs_messages.extend(queue.delayed) + + messages = [] + + for sqs_message in sqs_messages: + message: Message = to_sqs_api_message(sqs_message, [QueueAttributeName.All], ["All"]) + # these are all non-standard fields so we squelch the linter + if show_invisible: + message["Attributes"]["IsVisible"] = str(sqs_message.is_visible).lower() # noqa + if show_delayed: + message["Attributes"]["IsDelayed"] = str(sqs_message.is_delayed).lower() # noqa + messages.append(message) + message["ReceiptHandle"] = receipt_handle + + return messages + + +class SqsProvider(SqsApi, ServiceLifecycleHook): + """ + LocalStack SQS Provider. + + LIMITATIONS: + - Pagination of results (NextToken) + - Delivery guarantees + - The region is not encoded in the queue URL + + CROSS-ACCOUNT ACCESS: + LocalStack permits cross-account access for all operations. However, AWS + disallows the same for following operations: + - AddPermission + - CreateQueue + - DeleteQueue + - ListQueues + - ListQueueTags + - RemovePermission + - SetQueueAttributes + - TagQueue + - UntagQueue + """ + + queues: Dict[str, SqsQueue] + + def __init__(self) -> None: + super().__init__() + self._queue_update_worker = QueueUpdateWorker() + self._message_move_task_manager = MessageMoveTaskManager() + self._router_rules = [] + self._init_cloudwatch_metrics_reporting() + + @staticmethod + def get_store(account_id: str, region: str) -> SqsStore: + return sqs_stores[account_id][region] + + def on_before_start(self): + self._router_rules = ROUTER.add(SqsDeveloperEndpoints()) + self._queue_update_worker.start() + self._start_cloudwatch_metrics_reporting() + + def on_before_stop(self): + ROUTER.remove(self._router_rules) + + self._queue_update_worker.stop() + self._message_move_task_manager.close() + for _, _, store in sqs_stores.iter_stores(): + for queue in store.queues.values(): + queue.shutdown() + + self._stop_cloudwatch_metrics_reporting() + + @staticmethod + def _require_queue( + account_id: str, region_name: str, name: str, is_query: bool = False + ) -> SqsQueue: + """ + Returns the queue for the given name, or raises QueueDoesNotExist if it does not exist. + + :param: context: the request context + :param name: the name to look for + :param is_query: whether the request is using query protocol (error message is different) + :returns: the queue + :raises QueueDoesNotExist: if the queue does not exist + """ + store = SqsProvider.get_store(account_id, region_name) + with _STORE_LOCK: + if name not in store.queues: + if is_query: + message = "The specified queue does not exist for this wsdl version." + else: + message = "The specified queue does not exist." + raise QueueDoesNotExist(message) + + return store.queues[name] + + def _require_queue_by_arn(self, context: RequestContext, queue_arn: str) -> SqsQueue: + arn = parse_arn(queue_arn) + return self._require_queue( + arn["account"], + arn["region"], + arn["resource"], + is_query=context.service.protocol == "query", + ) + + def _resolve_queue( + self, + context: RequestContext, + queue_name: Optional[str] = None, + queue_url: Optional[str] = None, + ) -> SqsQueue: + """ + Determines the name of the queue from available information (request context, queue URL) to return the respective queue, + or raises QueueDoesNotExist if it does not exist. + + :param context: the request context, used for getting region and account_id, and optionally the queue_url + :param queue_name: the queue name (if this is set, then this will be used for the key) + :param queue_url: the queue url (if name is not set, this will be used to determine the queue name) + :returns: the queue + :raises QueueDoesNotExist: if the queue does not exist + """ + account_id, region_name, name = resolve_queue_location(context, queue_name, queue_url) + is_query = context.service.protocol == "query" + return self._require_queue( + account_id, region_name or context.region, name, is_query=is_query + ) + + def create_queue( + self, + context: RequestContext, + queue_name: String, + attributes: QueueAttributeMap = None, + tags: TagMap = None, + **kwargs, + ) -> CreateQueueResult: + fifo = attributes and ( + attributes.get(QueueAttributeName.FifoQueue, "false").lower() == "true" + ) + + # Special Case TODO: why is an emtpy policy passed at all? same in set_queue_attributes + if attributes and attributes.get(QueueAttributeName.Policy) == "": + del attributes[QueueAttributeName.Policy] + + store = self.get_store(context.account_id, context.region) + + with _STORE_LOCK: + if queue_name in store.queues: + queue = store.queues[queue_name] + + if attributes: + # if attributes are set, then we check whether the existing attributes match the passed ones + queue.validate_queue_attributes(attributes) + for k, v in attributes.items(): + if queue.attributes.get(k) != v: + LOG.debug( + "queue attribute values %s for queue %s do not match %s (existing) != %s (new)", + k, + queue_name, + queue.attributes.get(k), + v, + ) + raise QueueNameExists( + f"A queue already exists with the same name and a different value for attribute {k}" + ) + + return CreateQueueResult(QueueUrl=queue.url(context)) + + if config.SQS_DELAY_RECENTLY_DELETED: + deleted = store.deleted.get(queue_name) + if deleted and deleted > (time.time() - sqs_constants.RECENTLY_DELETED_TIMEOUT): + raise QueueDeletedRecently( + "You must wait 60 seconds after deleting a queue before you can create " + "another with the same name." + ) + store.expire_deleted() + + # create the appropriate queue + if fifo: + queue = FifoQueue(queue_name, context.region, context.account_id, attributes, tags) + else: + queue = StandardQueue( + queue_name, context.region, context.account_id, attributes, tags + ) + + LOG.debug("creating queue key=%s attributes=%s tags=%s", queue_name, attributes, tags) + + store.queues[queue_name] = queue + + return CreateQueueResult(QueueUrl=queue.url(context)) + + def get_queue_url( + self, + context: RequestContext, + queue_name: String, + queue_owner_aws_account_id: String = None, + **kwargs, + ) -> GetQueueUrlResult: + queue = self._require_queue( + queue_owner_aws_account_id or context.account_id, + context.region, + queue_name, + is_query=context.service.protocol == "query", + ) + + return GetQueueUrlResult(QueueUrl=queue.url(context)) + + def list_queues( + self, + context: RequestContext, + queue_name_prefix: String = None, + next_token: Token = None, + max_results: BoxedInteger = None, + **kwargs, + ) -> ListQueuesResult: + store = self.get_store(context.account_id, context.region) + + if queue_name_prefix: + urls = [ + queue.url(context) + for queue in store.queues.values() + if queue.name.startswith(queue_name_prefix) + ] + else: + urls = [queue.url(context) for queue in store.queues.values()] + + paginated_list = PaginatedList(urls) + + page_size = max_results if max_results else MAX_RESULT_LIMIT + paginated_urls, next_token = paginated_list.get_page( + token_generator=token_generator, next_token=next_token, page_size=page_size + ) + + if len(urls) == 0: + return ListQueuesResult() + + return ListQueuesResult(QueueUrls=paginated_urls, NextToken=next_token) + + def change_message_visibility( + self, + context: RequestContext, + queue_url: String, + receipt_handle: String, + visibility_timeout: NullableInteger, + **kwargs, + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + queue.update_visibility_timeout(receipt_handle, visibility_timeout) + + def change_message_visibility_batch( + self, + context: RequestContext, + queue_url: String, + entries: ChangeMessageVisibilityBatchRequestEntryList, + **kwargs, + ) -> ChangeMessageVisibilityBatchResult: + queue = self._resolve_queue(context, queue_url=queue_url) + + self._assert_batch(entries) + + successful = [] + failed = [] + + with queue.mutex: + for entry in entries: + try: + queue.update_visibility_timeout( + entry["ReceiptHandle"], entry["VisibilityTimeout"] + ) + successful.append({"Id": entry["Id"]}) + except Exception as e: + failed.append( + BatchResultErrorEntry( + Id=entry["Id"], + SenderFault=False, + Code=e.__class__.__name__, + Message=str(e), + ) + ) + + return ChangeMessageVisibilityBatchResult( + Successful=successful, + Failed=failed, + ) + + def delete_queue(self, context: RequestContext, queue_url: String, **kwargs) -> None: + account_id, region, name = parse_queue_url(queue_url) + if region is None: + region = context.region + + if account_id != context.account_id: + LOG.warning( + "Attempting a cross-account DeleteQueue operation (account from context: %s, account from queue url: %s, which is not allowed in AWS", + account_id, + context.account_id, + ) + + with _STORE_LOCK: + store = self.get_store(account_id, region) + queue = self._resolve_queue(context, queue_url=queue_url) + LOG.debug( + "deleting queue name=%s, region=%s, account=%s", + queue.name, + queue.region, + queue.account_id, + ) + # Trigger a shutdown prior to removing the queue resource + store.queues[queue.name].shutdown() + del store.queues[queue.name] + store.deleted[queue.name] = time.time() + + def get_queue_attributes( + self, + context: RequestContext, + queue_url: String, + attribute_names: AttributeNameList = None, + **kwargs, + ) -> GetQueueAttributesResult: + queue = self._resolve_queue(context, queue_url=queue_url) + result = queue.get_queue_attributes(attribute_names=attribute_names) + + return GetQueueAttributesResult(Attributes=(result if result else None)) + + def send_message( + self, + context: RequestContext, + queue_url: String, + message_body: String, + delay_seconds: NullableInteger = None, + message_attributes: MessageBodyAttributeMap = None, + message_system_attributes: MessageBodySystemAttributeMap = None, + message_deduplication_id: String = None, + message_group_id: String = None, + **kwargs, + ) -> SendMessageResult: + queue = self._resolve_queue(context, queue_url=queue_url) + + queue_item = self._put_message( + queue, + context, + message_body, + delay_seconds, + message_attributes, + message_system_attributes, + message_deduplication_id, + message_group_id, + ) + message = queue_item.message + return SendMessageResult( + MessageId=message["MessageId"], + MD5OfMessageBody=message["MD5OfBody"], + MD5OfMessageAttributes=message.get("MD5OfMessageAttributes"), + SequenceNumber=queue_item.sequence_number, + MD5OfMessageSystemAttributes=_create_message_attribute_hash(message_system_attributes), + ) + + def send_message_batch( + self, + context: RequestContext, + queue_url: String, + entries: SendMessageBatchRequestEntryList, + **kwargs, + ) -> SendMessageBatchResult: + queue = self._resolve_queue(context, queue_url=queue_url) + + self._assert_batch( + entries, + require_fifo_queue_params=is_fifo_queue(queue), + require_message_deduplication_id=is_message_deduplication_id_required(queue), + ) + # check the total batch size first and raise BatchRequestTooLong id > DEFAULT_MAXIMUM_MESSAGE_SIZE. + # This is checked before any messages in the batch are sent. Raising the exception here should + # cause error response, rather than batching error results and returning + self._assert_valid_batch_size(entries, sqs_constants.DEFAULT_MAXIMUM_MESSAGE_SIZE) + + successful = [] + failed = [] + + with queue.mutex: + for entry in entries: + try: + queue_item = self._put_message( + queue, + context, + message_body=entry.get("MessageBody"), + delay_seconds=entry.get("DelaySeconds"), + message_attributes=entry.get("MessageAttributes"), + message_system_attributes=entry.get("MessageSystemAttributes"), + message_deduplication_id=entry.get("MessageDeduplicationId"), + message_group_id=entry.get("MessageGroupId"), + ) + message = queue_item.message + + successful.append( + SendMessageBatchResultEntry( + Id=entry["Id"], + MessageId=message.get("MessageId"), + MD5OfMessageBody=message.get("MD5OfBody"), + MD5OfMessageAttributes=message.get("MD5OfMessageAttributes"), + MD5OfMessageSystemAttributes=_create_message_attribute_hash( + message.get("message_system_attributes") + ), + SequenceNumber=queue_item.sequence_number, + ) + ) + except ServiceException as e: + failed.append( + BatchResultErrorEntry( + Id=entry["Id"], + SenderFault=e.sender_fault, + Code=e.code, + Message=e.message, + ) + ) + except Exception as e: + failed.append( + BatchResultErrorEntry( + Id=entry["Id"], + SenderFault=False, + Code=e.__class__.__name__, + Message=str(e), + ) + ) + + return SendMessageBatchResult( + Successful=(successful if successful else None), + Failed=(failed if failed else None), + ) + + def _put_message( + self, + queue: SqsQueue, + context: RequestContext, + message_body: String, + delay_seconds: NullableInteger = None, + message_attributes: MessageBodyAttributeMap = None, + message_system_attributes: MessageBodySystemAttributeMap = None, + message_deduplication_id: String = None, + message_group_id: String = None, + ) -> SqsMessage: + check_message_min_size(message_body) + check_message_max_size(message_body, message_attributes, queue.maximum_message_size) + check_message_content(message_body) + check_attributes(message_attributes) + check_attributes(message_system_attributes) + check_fifo_id(message_deduplication_id, "MessageDeduplicationId") + check_fifo_id(message_group_id, "MessageGroupId") + + message = Message( + MessageId=generate_message_id(), + MD5OfBody=md5(message_body), + Body=message_body, + Attributes=self._create_message_attributes(context, message_system_attributes), + MD5OfMessageAttributes=_create_message_attribute_hash(message_attributes), + MessageAttributes=message_attributes, + ) + if self._cloudwatch_dispatcher: + self._cloudwatch_dispatcher.dispatch_metric_message_sent( + queue=queue, message_body_size=len(message_body.encode("utf-8")) + ) + + return queue.put( + message=message, + message_deduplication_id=message_deduplication_id, + message_group_id=message_group_id, + delay_seconds=int(delay_seconds) if delay_seconds is not None else None, + ) + + def receive_message( + self, + context: RequestContext, + queue_url: String, + attribute_names: AttributeNameList = None, + message_system_attribute_names: MessageSystemAttributeList = None, + message_attribute_names: MessageAttributeNameList = None, + max_number_of_messages: NullableInteger = None, + visibility_timeout: NullableInteger = None, + wait_time_seconds: NullableInteger = None, + receive_request_attempt_id: String = None, + **kwargs, + ) -> ReceiveMessageResult: + # TODO add support for message_system_attribute_names + queue = self._resolve_queue(context, queue_url=queue_url) + + poll_empty_queue = False + if override := extract_wait_time_seconds_from_headers(context): + wait_time_seconds = override + poll_empty_queue = True + elif wait_time_seconds is None: + wait_time_seconds = queue.wait_time_seconds + elif wait_time_seconds < 0 or wait_time_seconds > 20: + raise InvalidParameterValueException( + f"Value {wait_time_seconds} for parameter WaitTimeSeconds is invalid. " + f"Reason: Must be >= 0 and <= 20, if provided." + ) + num = max_number_of_messages or 1 + + # override receive count with value from custom header + if override := extract_message_count_from_headers(context): + num = override + elif num == -1: + # backdoor to get all messages + num = queue.approx_number_of_messages + elif ( + num < 1 or num > MAX_NUMBER_OF_MESSAGES + ) and not SQS_DISABLE_MAX_NUMBER_OF_MESSAGE_LIMIT: + raise InvalidParameterValueException( + f"Value {num} for parameter MaxNumberOfMessages is invalid. " + f"Reason: Must be between 1 and 10, if provided." + ) + + # we chose to always return the maximum possible number of messages, even though AWS will typically return + # fewer messages than requested on small queues. at some point we could maybe change this to randomly sample + # between 1 and max_number_of_messages. + # see https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html + result = queue.receive( + num, wait_time_seconds, visibility_timeout, poll_empty_queue=poll_empty_queue + ) + + # process dead letter messages + if result.dead_letter_messages: + dead_letter_target_arn = queue.redrive_policy["deadLetterTargetArn"] + dl_queue = self._require_queue_by_arn(context, dead_letter_target_arn) + # TODO: does this need to be atomic? + for standard_message in result.dead_letter_messages: + message = standard_message.message + message["Attributes"][MessageSystemAttributeName.DeadLetterQueueSourceArn] = ( + queue.arn + ) + dl_queue.put( + message=message, + message_deduplication_id=standard_message.message_deduplication_id, + message_group_id=standard_message.message_group_id, + ) + + if isinstance(queue, FifoQueue): + message_group = queue.get_message_group(standard_message.message_group_id) + queue.update_message_group_visibility(message_group) + + # prepare result + messages = [] + message_system_attribute_names = message_system_attribute_names or attribute_names + for i, standard_message in enumerate(result.successful): + message = to_sqs_api_message( + standard_message, message_system_attribute_names, message_attribute_names + ) + message["ReceiptHandle"] = result.receipt_handles[i] + messages.append(message) + + if self._cloudwatch_dispatcher: + self._cloudwatch_dispatcher.dispatch_metric_received(queue, received=len(messages)) + + # TODO: how does receiving behave if the queue was deleted in the meantime? + return ReceiveMessageResult(Messages=(messages if messages else None)) + + def list_dead_letter_source_queues( + self, + context: RequestContext, + queue_url: String, + next_token: Token = None, + max_results: BoxedInteger = None, + **kwargs, + ) -> ListDeadLetterSourceQueuesResult: + urls = [] + store = self.get_store(context.account_id, context.region) + dead_letter_queue = self._resolve_queue(context, queue_url=queue_url) + for queue in store.queues.values(): + if policy := queue.redrive_policy: + if policy.get("deadLetterTargetArn") == dead_letter_queue.arn: + urls.append(queue.url(context)) + return ListDeadLetterSourceQueuesResult(queueUrls=urls) + + def delete_message( + self, context: RequestContext, queue_url: String, receipt_handle: String, **kwargs + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + queue.remove(receipt_handle) + if self._cloudwatch_dispatcher: + self._cloudwatch_dispatcher.dispatch_metric_message_deleted(queue) + + def delete_message_batch( + self, + context: RequestContext, + queue_url: String, + entries: DeleteMessageBatchRequestEntryList, + **kwargs, + ) -> DeleteMessageBatchResult: + queue = self._resolve_queue(context, queue_url=queue_url) + override = extract_message_count_from_headers(context) + self._assert_batch(entries, max_messages_override=override) + self._assert_valid_message_ids(entries) + + successful = [] + failed = [] + + with queue.mutex: + for entry in entries: + try: + queue.remove(entry["ReceiptHandle"]) + successful.append(DeleteMessageBatchResultEntry(Id=entry["Id"])) + except Exception as e: + failed.append( + BatchResultErrorEntry( + Id=entry["Id"], + SenderFault=False, + Code=e.__class__.__name__, + Message=str(e), + ) + ) + if self._cloudwatch_dispatcher: + self._cloudwatch_dispatcher.dispatch_metric_message_deleted( + queue, deleted=len(successful) + ) + + return DeleteMessageBatchResult( + Successful=successful, + Failed=failed, + ) + + def purge_queue(self, context: RequestContext, queue_url: String, **kwargs) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + with queue.mutex: + if config.SQS_DELAY_PURGE_RETRY: + if queue.purge_timestamp and (queue.purge_timestamp + 60) > time.time(): + raise PurgeQueueInProgress( + f"Only one PurgeQueue operation on {queue.name} is allowed every 60 seconds.", + status_code=403, + ) + queue.purge_timestamp = time.time() + queue.clear() + + def set_queue_attributes( + self, context: RequestContext, queue_url: String, attributes: QueueAttributeMap, **kwargs + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + if not attributes: + return + + queue.validate_queue_attributes(attributes) + + for k, v in attributes.items(): + if k in sqs_constants.INTERNAL_QUEUE_ATTRIBUTES: + raise InvalidAttributeName(f"Unknown Attribute {k}.") + queue.attributes[k] = v + + # Special cases + if queue.attributes.get(QueueAttributeName.Policy) == "": + del queue.attributes[QueueAttributeName.Policy] + + redrive_policy = queue.attributes.get(QueueAttributeName.RedrivePolicy) + if redrive_policy == "": + del queue.attributes[QueueAttributeName.RedrivePolicy] + return + + if redrive_policy: + _redrive_policy = json.loads(redrive_policy) + dl_target_arn = _redrive_policy.get("deadLetterTargetArn") + max_receive_count = _redrive_policy.get("maxReceiveCount") + # TODO: use the actual AWS responses + if not dl_target_arn: + raise InvalidParameterValueException( + "The required parameter 'deadLetterTargetArn' is missing" + ) + if max_receive_count is None: + raise InvalidParameterValueException( + "The required parameter 'maxReceiveCount' is missing" + ) + try: + max_receive_count = int(max_receive_count) + valid_count = 1 <= max_receive_count <= 1000 + except ValueError: + valid_count = False + if not valid_count: + raise InvalidParameterValueException( + f"Value {redrive_policy} for parameter RedrivePolicy is invalid. Reason: Invalid value for " + f"maxReceiveCount: {max_receive_count}, valid values are from 1 to 1000 both inclusive." + ) + + def list_message_move_tasks( + self, + context: RequestContext, + source_arn: String, + max_results: NullableInteger = None, + **kwargs, + ) -> ListMessageMoveTasksResult: + try: + self._require_queue_by_arn(context, source_arn) + except InvalidArnException: + raise InvalidParameterValueException( + "You must use this format to specify the SourceArn: arn:::::" + ) + except QueueDoesNotExist: + raise ResourceNotFoundException( + "The resource that you specified for the SourceArn parameter doesn't exist." + ) + + # get move tasks for queue and sort them by most-recent + store = self.get_store(context.account_id, context.region) + tasks = [ + move_task + for move_task in store.move_tasks.values() + if move_task.source_arn == source_arn + and move_task.status != MessageMoveTaskStatus.CREATED + ] + tasks.sort(key=lambda t: t.started_timestamp, reverse=True) + + # convert to result list + n = max_results or 1 + return ListMessageMoveTasksResult( + Results=[self._to_message_move_task_entry(task) for task in tasks[:n]] + ) + + def _to_message_move_task_entry( + self, entity: MessageMoveTask + ) -> ListMessageMoveTasksResultEntry: + """ + Converts a ``MoveMessageTask`` entity into a ``ListMessageMoveTasksResultEntry`` API concept. + + :param entity: the entity to convert + :return: the typed dict for use in the AWS API + """ + entry = ListMessageMoveTasksResultEntry( + SourceArn=entity.source_arn, + DestinationArn=entity.destination_arn, + Status=entity.status, + ) + + if entity.status == "RUNNING": + entry["TaskHandle"] = entity.task_handle + if entity.started_timestamp is not None: + entry["StartedTimestamp"] = int(entity.started_timestamp.timestamp() * 1000) + if entity.max_number_of_messages_per_second is not None: + entry["MaxNumberOfMessagesPerSecond"] = entity.max_number_of_messages_per_second + if entity.approximate_number_of_messages_to_move is not None: + entry["ApproximateNumberOfMessagesToMove"] = ( + entity.approximate_number_of_messages_to_move + ) + if entity.approximate_number_of_messages_moved is not None: + entry["ApproximateNumberOfMessagesMoved"] = entity.approximate_number_of_messages_moved + if entity.failure_reason is not None: + entry["FailureReason"] = entity.failure_reason + + return entry + + def start_message_move_task( + self, + context: RequestContext, + source_arn: String, + destination_arn: String = None, + max_number_of_messages_per_second: NullableInteger = None, + **kwargs, + ) -> StartMessageMoveTaskResult: + try: + self._require_queue_by_arn(context, source_arn) + except QueueDoesNotExist as e: + raise ResourceNotFoundException( + "The resource that you specified for the SourceArn parameter doesn't exist.", + status_code=404, + ) from e + + # check that the source queue is the dlq of some other queue + is_dlq = False + for _, _, store in sqs_stores.iter_stores(): + for queue in store.queues.values(): + if not queue.redrive_policy: + continue + if queue.redrive_policy.get("deadLetterTargetArn") == source_arn: + is_dlq = True + break + if is_dlq: + break + if not is_dlq: + raise InvalidParameterValueException( + "Source queue must be configured as a Dead Letter Queue." + ) + + # If destination_arn is left blank, the messages will be redriven back to their respective original + # source queues. + if destination_arn: + try: + self._require_queue_by_arn(context, destination_arn) + except QueueDoesNotExist as e: + raise ResourceNotFoundException( + "The resource that you specified for the DestinationArn parameter doesn't exist.", + status_code=404, + ) from e + + # check that only one active task exists + with self._message_move_task_manager.mutex: + store = self.get_store(context.account_id, context.region) + tasks = [ + task + for task in store.move_tasks.values() + if task.source_arn == source_arn + and task.status + in [ + MessageMoveTaskStatus.CREATED, + MessageMoveTaskStatus.RUNNING, + MessageMoveTaskStatus.CANCELLING, + ] + ] + if len(tasks) > 0: + raise InvalidParameterValueException( + "There is already a task running. Only one active task is allowed for a source queue " + "arn at a given time." + ) + + task = MessageMoveTask( + source_arn, + destination_arn, + max_number_of_messages_per_second, + ) + store.move_tasks[task.task_id] = task + + self._message_move_task_manager.submit(task) + + return StartMessageMoveTaskResult(TaskHandle=task.task_handle) + + def cancel_message_move_task( + self, context: RequestContext, task_handle: String, **kwargs + ) -> CancelMessageMoveTaskResult: + try: + task_id, source_arn = decode_move_task_handle(task_handle) + except ValueError as e: + raise InvalidParameterValueException( + "Value for parameter TaskHandle is invalid." + ) from e + + try: + self._require_queue_by_arn(context, source_arn) + except QueueDoesNotExist as e: + raise ResourceNotFoundException( + "The resource that you specified for the SourceArn parameter doesn't exist.", + status_code=404, + ) from e + + store = self.get_store(context.account_id, context.region) + try: + move_task = store.move_tasks[task_id] + except KeyError: + raise ResourceNotFoundException("Task does not exist.", status_code=404) + + # TODO: what happens if move tasks are already cancelled? + + self._message_move_task_manager.cancel(move_task) + + return CancelMessageMoveTaskResult( + ApproximateNumberOfMessagesMoved=move_task.approximate_number_of_messages_moved, + ) + + def tag_queue(self, context: RequestContext, queue_url: String, tags: TagMap, **kwargs) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + if not tags: + return + + for k, v in tags.items(): + queue.tags[k] = v + + def list_queue_tags( + self, context: RequestContext, queue_url: String, **kwargs + ) -> ListQueueTagsResult: + queue = self._resolve_queue(context, queue_url=queue_url) + return ListQueueTagsResult(Tags=(queue.tags if queue.tags else None)) + + def untag_queue( + self, context: RequestContext, queue_url: String, tag_keys: TagKeyList, **kwargs + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + for k in tag_keys: + if k in queue.tags: + del queue.tags[k] + + def add_permission( + self, + context: RequestContext, + queue_url: String, + label: String, + aws_account_ids: AWSAccountIdList, + actions: ActionNameList, + **kwargs, + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + self._validate_actions(actions) + + queue.add_permission(label=label, actions=actions, account_ids=aws_account_ids) + + def remove_permission( + self, context: RequestContext, queue_url: String, label: String, **kwargs + ) -> None: + queue = self._resolve_queue(context, queue_url=queue_url) + + queue.remove_permission(label=label) + + def _create_message_attributes( + self, + context: RequestContext, + message_system_attributes: MessageBodySystemAttributeMap = None, + ) -> Dict[MessageSystemAttributeName, str]: + result: Dict[MessageSystemAttributeName, str] = { + MessageSystemAttributeName.SenderId: context.account_id, # not the account ID in AWS + MessageSystemAttributeName.SentTimestamp: str(now(millis=True)), + } + + if message_system_attributes is not None: + for attr in message_system_attributes: + result[attr] = message_system_attributes[attr]["StringValue"] + + return result + + def _validate_actions(self, actions: ActionNameList): + service = load_service(service=self.service, version=self.version) + # FIXME: this is a bit of a heuristic as it will also include actions like "ListQueues" which is not + # associated with an action on a queue + valid = list(service.operation_names) + valid.append("*") + + for action in actions: + if action not in valid: + raise InvalidParameterValueException( + f"Value SQS:{action} for parameter ActionName is invalid. Reason: Please refer to the appropriate " + "WSDL for a list of valid actions. " + ) + + def _assert_batch( + self, + batch: List, + *, + require_fifo_queue_params: bool = False, + require_message_deduplication_id: bool = False, + max_messages_override: int | None = None, + ) -> None: + if not batch: + raise EmptyBatchRequest + + max_messages_per_batch = max_messages_override or MAX_NUMBER_OF_MESSAGES + if batch and (no_entries := len(batch)) > max_messages_per_batch: + raise TooManyEntriesInBatchRequest( + f"Maximum number of entries per request are {max_messages_per_batch}. You have sent {no_entries}." + ) + visited = set() + for entry in batch: + entry_id = entry["Id"] + if not re.search(r"^[\w-]+$", entry_id) or len(entry_id) > 80: + raise InvalidBatchEntryId( + "A batch entry id can only contain alphanumeric characters, hyphens and underscores. " + "It can be at most 80 letters long." + ) + if require_message_deduplication_id and not entry.get("MessageDeduplicationId"): + raise InvalidParameterValueException( + "The queue should either have ContentBasedDeduplication enabled or " + "MessageDeduplicationId provided explicitly" + ) + if require_fifo_queue_params and not entry.get("MessageGroupId"): + raise InvalidParameterValueException( + "The request must contain the parameter MessageGroupId." + ) + if entry_id in visited: + raise BatchEntryIdsNotDistinct() + else: + visited.add(entry_id) + + def _assert_valid_batch_size(self, batch: List, max_message_size: int): + batch_message_size = sum( + _message_body_size(entry.get("MessageBody")) + + _message_attributes_size(entry.get("MessageAttributes")) + for entry in batch + ) + if batch_message_size > max_message_size: + error = f"Batch requests cannot be longer than {max_message_size} bytes." + error += f" You have sent {batch_message_size} bytes." + raise BatchRequestTooLong(error) + + def _assert_valid_message_ids(self, batch: List): + batch_id_regex = r"^[\w-]{1,80}$" + for message in batch: + if not re.match(batch_id_regex, message.get("Id", "")): + raise InvalidBatchEntryId( + "A batch entry id can only contain alphanumeric characters, " + "hyphens and underscores. It can be at most 80 letters long." + ) + + def _init_cloudwatch_metrics_reporting(self): + self.cloudwatch_disabled: bool = ( + config.SQS_DISABLE_CLOUDWATCH_METRICS or not is_api_enabled("cloudwatch") + ) + + self._cloudwatch_publish_worker = ( + None if self.cloudwatch_disabled else CloudwatchPublishWorker() + ) + self._cloudwatch_dispatcher = None if self.cloudwatch_disabled else CloudwatchDispatcher() + + def _start_cloudwatch_metrics_reporting(self): + if not self.cloudwatch_disabled: + self._cloudwatch_publish_worker.start() + + def _stop_cloudwatch_metrics_reporting(self): + if not self.cloudwatch_disabled: + self._cloudwatch_publish_worker.stop() + self._cloudwatch_dispatcher.shutdown() + + +# Method from moto's attribute_md5 of moto/sqs/models.py, separated from the Message Object +def _create_message_attribute_hash(message_attributes) -> Optional[str]: + # To avoid the need to check for dict conformity everytime we invoke this function + if not isinstance(message_attributes, dict): + return + hash = hashlib.md5() + + for attrName in sorted(message_attributes.keys()): + attr_value = message_attributes[attrName] + # Encode name + MotoMessage.update_binary_length_and_value(hash, MotoMessage.utf8(attrName)) + # Encode data type + MotoMessage.update_binary_length_and_value(hash, MotoMessage.utf8(attr_value["DataType"])) + # Encode transport type and value + if attr_value.get("StringValue"): + hash.update(bytearray([STRING_TYPE_FIELD_INDEX])) + MotoMessage.update_binary_length_and_value( + hash, MotoMessage.utf8(attr_value.get("StringValue")) + ) + elif attr_value.get("BinaryValue"): + hash.update(bytearray([BINARY_TYPE_FIELD_INDEX])) + decoded_binary_value = attr_value.get("BinaryValue") + MotoMessage.update_binary_length_and_value(hash, decoded_binary_value) + # string_list_value, binary_list_value type is not implemented, reserved for the future use. + # See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_MessageAttributeValue.html + return hash.hexdigest() + + +def resolve_queue_location( + context: RequestContext, queue_name: Optional[str] = None, queue_url: Optional[str] = None +) -> Tuple[str, Optional[str], str]: + """ + Resolves a queue location from the given information. + + :param context: the request context, used for getting region and account_id, and optionally the queue_url + :param queue_name: the queue name (if this is set, then this will be used for the key) + :param queue_url: the queue url (if name is not set, this will be used to determine the queue name) + :return: tuple of account id, region and queue_name + """ + if not queue_name: + try: + if queue_url: + return parse_queue_url(queue_url) + else: + return parse_queue_url(context.request.base_url) + except ValueError: + # should work if queue name is passed in QueueUrl + return context.account_id, context.region, queue_url + + return context.account_id, context.region, queue_name + + +def to_sqs_api_message( + standard_message: SqsMessage, + attribute_names: AttributeNameList = None, + message_attribute_names: MessageAttributeNameList = None, +) -> Message: + """ + Utility function to convert an SQS message from LocalStack's internal representation to the AWS API + concept 'Message', which is the format returned by the ``ReceiveMessage`` operation. + + :param standard_message: A LocalStack SQS message + :param attribute_names: the attribute name list to filter + :param message_attribute_names: the message attribute names to filter + :return: a copy of the original Message with updated message attributes and MD5 attribute hash sums + """ + # prepare message for receiver + message = copy.deepcopy(standard_message.message) + + # update system attributes of the message copy + message["Attributes"][MessageSystemAttributeName.ApproximateFirstReceiveTimestamp] = str( + int((standard_message.first_received or 0) * 1000) + ) + + # filter attributes for receiver + message_filter_attributes(message, attribute_names) + message_filter_message_attributes(message, message_attribute_names) + if message.get("MessageAttributes"): + message["MD5OfMessageAttributes"] = _create_message_attribute_hash( + message["MessageAttributes"] + ) + else: + # delete the value that was computed when creating the message + message.pop("MD5OfMessageAttributes", None) + return message + + +def message_filter_attributes(message: Message, names: Optional[AttributeNameList]): + """ + Utility function filter from the given message (in-place) the system attributes from the given list. It will + apply all rules according to: + https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Client.receive_message. + + :param message: The message to filter (it will be modified) + :param names: the attributes names/filters + """ + if "Attributes" not in message: + return + + if not names: + del message["Attributes"] + return + + if QueueAttributeName.All in names: + return + + for k in list(message["Attributes"].keys()): + if k not in names: + del message["Attributes"][k] + + +def message_filter_message_attributes(message: Message, names: Optional[MessageAttributeNameList]): + """ + Utility function filter from the given message (in-place) the message attributes from the given list. It will + apply all rules according to: + https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Client.receive_message. + + :param message: The message to filter (it will be modified) + :param names: the attributes names/filters (can be 'All', '.*', '*' or prefix filters like 'Foo.*') + """ + if not message.get("MessageAttributes"): + return + + if not names: + del message["MessageAttributes"] + return + + if "All" in names or ".*" in names or "*" in names: + return + + attributes = message["MessageAttributes"] + matched = [] + + keys = [name for name in names if ".*" not in name] + prefixes = [name.split(".*")[0] for name in names if ".*" in name] + + # match prefix filters + for k in attributes: + if k in keys: + matched.append(k) + continue + + for prefix in prefixes: + if k.startswith(prefix): + matched.append(k) + break + if matched: + message["MessageAttributes"] = {k: attributes[k] for k in matched} + else: + message.pop("MessageAttributes") + + +def extract_message_count_from_headers(context: RequestContext) -> int | None: + if override := context.request.headers.get( + HEADER_LOCALSTACK_SQS_OVERRIDE_MESSAGE_COUNT, default=None, type=int + ): + return override + + return None + + +def extract_wait_time_seconds_from_headers(context: RequestContext) -> int | None: + if override := context.request.headers.get( + HEADER_LOCALSTACK_SQS_OVERRIDE_WAIT_TIME_SECONDS, default=None, type=int + ): + return override + + return None diff --git a/localstack-core/localstack/services/sqs/query_api.py b/localstack-core/localstack/services/sqs/query_api.py new file mode 100644 index 0000000000000..6d5a33ee4bd5d --- /dev/null +++ b/localstack-core/localstack/services/sqs/query_api.py @@ -0,0 +1,226 @@ +"""The SQS Query API allows using Queue URLs as endpoints for operations on that queue. See: +https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html. This is a +generic implementation that creates from Query API requests the respective AWS requests, and uses an aws_stack client +to make the request.""" + +import logging +from typing import Dict, Optional, Tuple +from urllib.parse import urlencode + +from botocore.exceptions import ClientError +from botocore.model import OperationModel +from werkzeug.datastructures import Headers +from werkzeug.exceptions import NotFound + +from localstack.aws.api import CommonServiceException +from localstack.aws.connect import connect_to +from localstack.aws.protocol.parser import OperationNotFoundParserError, create_parser +from localstack.aws.protocol.serializer import create_serializer +from localstack.aws.protocol.validate import MissingRequiredField, validate_request +from localstack.aws.spec import load_service +from localstack.constants import ( + AWS_REGION_US_EAST_1, + INTERNAL_AWS_ACCESS_KEY_ID, + INTERNAL_AWS_SECRET_ACCESS_KEY, +) +from localstack.http import Request, Response, Router, route +from localstack.http.dispatcher import Handler +from localstack.services.sqs.exceptions import MissingRequiredParameterException +from localstack.utils.aws.request_context import ( + extract_access_key_id_from_auth_header, + extract_region_from_headers, +) +from localstack.utils.strings import long_uid + +LOG = logging.getLogger(__name__) + +service = load_service("sqs-query") +parser = create_parser(service) +serializer = create_serializer(service) + + +@route( + '//', + host='sqs.', + methods=["POST", "GET"], +) +def standard_strategy_handler( + request: Request, + account_id: str, + queue_name: str, + region: str = None, + domain: str = None, + port: int = None, +): + """ + Handler for modern-style endpoints which always have the region encoded. + See https://docs.aws.amazon.com/general/latest/gr/sqs-service.html + """ + return handle_request(request, region.rstrip(".")) + + +@route( + '/queue///', + methods=["POST", "GET"], +) +def path_strategy_handler(request: Request, region, account_id: str, queue_name: str): + return handle_request(request, region) + + +@route( + '//', + host='queue.', + methods=["POST", "GET"], +) +def domain_strategy_handler( + request: Request, + account_id: str, + queue_name: str, + region: str = None, + domain: str = None, + port: int = None, +): + """Uses the endpoint host to extract the region. See: + https://docs.aws.amazon.com/general/latest/gr/sqs-service.html""" + if not region: + region = AWS_REGION_US_EAST_1 + else: + region = region.rstrip(".") + + return handle_request(request, region) + + +@route( + '//', + methods=["POST", "GET"], +) +def legacy_handler(request: Request, account_id: str, queue_name: str) -> Response: + # previously, Queue URLs were created as http://localhost:4566/000000000000/my-queue-name. Because the region is + # ambiguous in this request, we fall back to the region that the request is coming from (this is not how AWS + # behaves though). + if "X-Amz-Credential" in request.args: + region = request.args["X-Amz-Credential"].split("/")[2] + else: + region = extract_region_from_headers(request.headers) + + LOG.debug( + "Region of queue URL %s is ambiguous, got region %s from request", request.url, region + ) + + return handle_request(request, region) + + +def register(router: Router[Handler]): + """ + Registers the query API handlers into the given router. There are four routes, one for each SQS_ENDPOINT_STRATEGY. + + :param router: the router to add the handlers into. + """ + router.add(standard_strategy_handler) + router.add(path_strategy_handler) + router.add(domain_strategy_handler) + router.add(legacy_handler) + + +class UnknownOperationException(Exception): + pass + + +class InvalidAction(CommonServiceException): + def __init__(self, action: str): + super().__init__( + "InvalidAction", + f"The action {action} is not valid for this endpoint.", + 400, + sender_fault=True, + ) + + +class BotoException(CommonServiceException): + def __init__(self, boto_response): + error = boto_response["Error"] + super().__init__( + code=error.get("Code", "UnknownError"), + status_code=boto_response["ResponseMetadata"]["HTTPStatusCode"], + message=error.get("Message", ""), + sender_fault=error.get("Type", "Sender") == "Sender", + ) + + +def handle_request(request: Request, region: str) -> Response: + # some SDK (PHP) still send requests to the Queue URL even though the JSON spec does not allow it in the + # documentation. If the request is `json`, raise `NotFound` so that we continue the handler chain and the provider + # can handle the request + if request.headers.get("Content-Type", "").lower() == "application/x-amz-json-1.0": + raise NotFound + + request_id = long_uid() + + try: + response, operation = try_call_sqs(request, region) + del response["ResponseMetadata"] + return serializer.serialize_to_response(response, operation, request.headers, request_id) + except UnknownOperationException: + return Response("", 404) + except CommonServiceException as e: + # use a dummy operation for the serialization to work + op = service.operation_model(service.operation_names[0]) + return serializer.serialize_error_to_response(e, op, request.headers, request_id) + except Exception as e: + LOG.exception("exception") + op = service.operation_model(service.operation_names[0]) + return serializer.serialize_error_to_response( + CommonServiceException( + "InternalError", f"An internal error occurred: {e}", status_code=500 + ), + op, + request.headers, + request_id, + ) + + +def try_call_sqs(request: Request, region: str) -> Tuple[Dict, OperationModel]: + action = request.values.get("Action") + if not action: + raise UnknownOperationException() + + if action in ["ListQueues", "CreateQueue"]: + raise InvalidAction(action) + + # prepare aws request for the SQS query protocol (POST request with action url-encoded in the body) + params = {"QueueUrl": request.base_url} + # if a QueueUrl is already set in the body, it should overwrite the one in the URL. this behavior is validated + # against AWS (see TestSqsQueryApi) + params.update(request.values) + body = urlencode(params) + + try: + headers = Headers(request.headers) + headers["Content-Type"] = "application/x-www-form-urlencoded; charset=utf-8" + operation, service_request = parser.parse(Request("POST", "/", headers=headers, body=body)) + validate_request(operation, service_request).raise_first() + except OperationNotFoundParserError: + raise InvalidAction(action) + except MissingRequiredField as e: + raise MissingRequiredParameterException( + f"The request must contain the parameter {e.required_name}." + ) + + # Extract from auth header to allow cross-account operations + # TODO: permissions encoded in URL as AUTHPARAMS cannot be accounted for in this method, which is not a big + # problem yet since we generally don't enforce permissions. + account_id: Optional[str] = extract_access_key_id_from_auth_header(headers) + + client = connect_to( + region_name=region, + aws_access_key_id=account_id or INTERNAL_AWS_ACCESS_KEY_ID, + aws_secret_access_key=INTERNAL_AWS_SECRET_ACCESS_KEY, + ).sqs_query + + try: + # using the layer below boto3.client("sqs").(...) to make the call + boto_response = client._make_api_call(operation.name, service_request) + except ClientError as e: + raise BotoException(e.response) from e + + return boto_response, operation diff --git a/localstack-core/localstack/services/sqs/queue.py b/localstack-core/localstack/services/sqs/queue.py new file mode 100644 index 0000000000000..dc3b5e8d88f70 --- /dev/null +++ b/localstack-core/localstack/services/sqs/queue.py @@ -0,0 +1,50 @@ +import time +from queue import Empty, PriorityQueue, Queue + + +class InterruptibleQueue(Queue): + # is_shutdown is used to check whether we have triggered a shutdown of the Queue + is_shutdown: bool + + def __init__(self, maxsize=0): + super().__init__(maxsize) + self.is_shutdown = False + + def get(self, block=True, timeout=None): + with self.not_empty: + if self.is_shutdown: + raise Empty + if not block: + if not self._qsize(): + raise Empty + elif timeout is None: + while not self._qsize() and not self.is_shutdown: # additional shutdown check + self.not_empty.wait() + elif timeout < 0: + raise ValueError("'timeout' must be a non-negative number") + else: + endtime = time.time() + timeout + while not self._qsize() and not self.is_shutdown: # additional shutdown check + remaining = endtime - time.time() + if remaining <= 0.0: + raise Empty + self.not_empty.wait(remaining) + if self.is_shutdown: # additional shutdown check + raise Empty + item = self._get() + self.not_full.notify() + return item + + def shutdown(self): + """ + `shutdown` signals to stop all current and future `Queue.get` calls from executing. + + This is helpful for exiting otherwise blocking calls early. + """ + with self.not_empty: + self.is_shutdown = True + self.not_empty.notify_all() + + +class InterruptiblePriorityQueue(PriorityQueue, InterruptibleQueue): + pass diff --git a/localstack/utils/cloudwatch/__init__.py b/localstack-core/localstack/services/sqs/resource_providers/__init__.py similarity index 100% rename from localstack/utils/cloudwatch/__init__.py rename to localstack-core/localstack/services/sqs/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.py b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.py new file mode 100644 index 0000000000000..52b39da351d96 --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.py @@ -0,0 +1,263 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SQSQueueProperties(TypedDict): + Arn: Optional[str] + ContentBasedDeduplication: Optional[bool] + DeduplicationScope: Optional[str] + DelaySeconds: Optional[int] + FifoQueue: Optional[bool] + FifoThroughputLimit: Optional[str] + KmsDataKeyReusePeriodSeconds: Optional[int] + KmsMasterKeyId: Optional[str] + MaximumMessageSize: Optional[int] + MessageRetentionPeriod: Optional[int] + QueueName: Optional[str] + QueueUrl: Optional[str] + ReceiveMessageWaitTimeSeconds: Optional[int] + RedriveAllowPolicy: Optional[dict | str] + RedrivePolicy: Optional[dict | str] + SqsManagedSseEnabled: Optional[bool] + Tags: Optional[list[Tag]] + VisibilityTimeout: Optional[int] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + +_queue_attribute_list = [ + "ContentBasedDeduplication", + "DeduplicationScope", + "DelaySeconds", + "FifoQueue", + "FifoThroughputLimit", + "KmsDataKeyReusePeriodSeconds", + "KmsMasterKeyId", + "MaximumMessageSize", + "MessageRetentionPeriod", + "ReceiveMessageWaitTimeSeconds", + "RedriveAllowPolicy", + "RedrivePolicy", + "SqsManagedSseEnabled", + "VisibilityTimeout", +] + + +class SQSQueueProvider(ResourceProvider[SQSQueueProperties]): + TYPE = "AWS::SQS::Queue" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SQSQueueProperties], + ) -> ProgressEvent[SQSQueueProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/QueueUrl + + + + Create-only properties: + - /properties/FifoQueue + - /properties/QueueName + + Read-only properties: + - /properties/QueueUrl + - /properties/Arn + + IAM permissions required: + - sqs:CreateQueue + - sqs:GetQueueUrl + - sqs:GetQueueAttributes + - sqs:ListQueueTags + - sqs:TagQueue + + """ + # TODO: validations + model = request.desired_state + sqs = request.aws_client_factory.sqs + + if model.get("FifoQueue", False): + model["FifoQueue"] = model["FifoQueue"] + + queue_name = model.get("QueueName") + if not queue_name: + # TODO: verify patterns here + if model.get("FifoQueue"): + queue_name = util.generate_default_name( + request.stack_name, request.logical_resource_id + )[:-5] + queue_name = f"{queue_name}.fifo" + else: + queue_name = util.generate_default_name( + request.stack_name, request.logical_resource_id + ) + model["QueueName"] = queue_name + + attributes = self._compile_sqs_queue_attributes(model) + result = request.aws_client_factory.sqs.create_queue( + QueueName=model["QueueName"], + Attributes=attributes, + tags={t["Key"]: t["Value"] for t in model.get("Tags", [])}, + ) + + # set read-only properties + model["QueueUrl"] = result["QueueUrl"] + model["Arn"] = sqs.get_queue_attributes( + QueueUrl=result["QueueUrl"], AttributeNames=["QueueArn"] + )["Attributes"]["QueueArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SQSQueueProperties], + ) -> ProgressEvent[SQSQueueProperties]: + """ + Fetch resource information + + IAM permissions required: + - sqs:GetQueueAttributes + - sqs:ListQueueTags + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SQSQueueProperties], + ) -> ProgressEvent[SQSQueueProperties]: + """ + Delete a resource + + IAM permissions required: + - sqs:DeleteQueue + - sqs:GetQueueAttributes + """ + sqs = request.aws_client_factory.sqs + try: + queue_url = sqs.get_queue_url(QueueName=request.previous_state["QueueName"])["QueueUrl"] + sqs.delete_queue(QueueUrl=queue_url) + + except sqs.exceptions.QueueDoesNotExist: + return ProgressEvent( + status=OperationStatus.SUCCESS, resource_model=request.desired_state + ) + + return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=request.desired_state) + + def update( + self, + request: ResourceRequest[SQSQueueProperties], + ) -> ProgressEvent[SQSQueueProperties]: + """ + Update a resource + + IAM permissions required: + - sqs:SetQueueAttributes + - sqs:GetQueueAttributes + - sqs:ListQueueTags + - sqs:TagQueue + - sqs:UntagQueue + """ + sqs = request.aws_client_factory.sqs + model = request.desired_state + + assert request.previous_state is not None + + should_replace = ( + request.desired_state.get("QueueName", request.previous_state["QueueName"]) + != request.previous_state["QueueName"] + ) or ( + request.desired_state.get("FifoQueue", request.previous_state.get("FifoQueue")) + != request.previous_state.get("FifoQueue") + ) + + if not should_replace: + return ProgressEvent(OperationStatus.SUCCESS, resource_model=request.previous_state) + + # TODO: copied from the create handler, extract? + if model.get("FifoQueue"): + queue_name = util.generate_default_name( + request.stack_name, request.logical_resource_id + )[:-5] + queue_name = f"{queue_name}.fifo" + else: + queue_name = util.generate_default_name(request.stack_name, request.logical_resource_id) + + # replacement (TODO: find out if we should handle this in the provider or outside of it) + # delete old queue + sqs.delete_queue(QueueUrl=request.previous_state["QueueUrl"]) + # create new queue (TODO: re-use create logic to make this more robust, e.g. for + # auto-generated queue names) + model["QueueUrl"] = sqs.create_queue(QueueName=queue_name)["QueueUrl"] + model["Arn"] = sqs.get_queue_attributes( + QueueUrl=model["QueueUrl"], AttributeNames=["QueueArn"] + )["Attributes"]["QueueArn"] + return ProgressEvent(OperationStatus.SUCCESS, resource_model=model) + + def _compile_sqs_queue_attributes(self, properties: SQSQueueProperties) -> dict[str, str]: + """ + SQS is really awkward in how the ``CreateQueue`` operation expects arguments. Most of a Queue's + attributes are passed as a string values in the "Attributes" dictionary. So we need to compile this + dictionary here. + + :param properties: the properties passed from cloudformation + :return: a mapping used for the ``Attributes`` argument of the `CreateQueue` call. + """ + result = {} + + for k in _queue_attribute_list: + v = properties.get(k) + + if v is None: + continue + elif isinstance(v, str): + pass + elif isinstance(v, bool): + v = str(v).lower() + elif isinstance(v, dict): + # RedrivePolicy and RedriveAllowPolicy + v = json.dumps(v) + elif isinstance(v, int): + v = str(v) + else: + raise TypeError(f"cannot convert attribute {k}, unhandled type {type(v)}") + + result[k] = v + + return result + + def list( + self, + request: ResourceRequest[SQSQueueProperties], + ) -> ProgressEvent[SQSQueueProperties]: + resources = request.aws_client_factory.sqs.list_queues() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + SQSQueueProperties(QueueUrl=url) for url in resources.get("QueueUrls", []) + ], + ) diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.schema.json b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.schema.json new file mode 100644 index 0000000000000..0756d0bfb2b07 --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue.schema.json @@ -0,0 +1,166 @@ +{ + "typeName": "AWS::SQS::Queue", + "description": "Resource Type definition for AWS::SQS::Queue", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-sqs.git", + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Key": { + "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string" + }, + "Value": { + "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "properties": { + "QueueUrl": { + "type": "string", + "description": "URL of the source queue." + }, + "Arn": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the queue." + }, + "ContentBasedDeduplication": { + "type": "boolean", + "description": "For first-in-first-out (FIFO) queues, specifies whether to enable content-based deduplication. During the deduplication interval, Amazon SQS treats messages that are sent with identical content as duplicates and delivers only one copy of the message." + }, + "DeduplicationScope": { + "description": "Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.", + "type": "string" + }, + "DelaySeconds": { + "type": "integer", + "description": "The time in seconds for which the delivery of all messages in the queue is delayed. You can specify an integer value of 0 to 900 (15 minutes). The default value is 0." + }, + "FifoQueue": { + "type": "boolean", + "description": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue." + }, + "FifoThroughputLimit": { + "description": "Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.", + "type": "string" + }, + "KmsDataKeyReusePeriodSeconds": { + "type": "integer", + "description": "The length of time in seconds for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. The value must be an integer between 60 (1 minute) and 86,400 (24 hours). The default is 300 (5 minutes)." + }, + "KmsMasterKeyId": { + "type": "string", + "description": "The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom CMK. To use the AWS managed CMK for Amazon SQS, specify the (default) alias alias/aws/sqs." + }, + "SqsManagedSseEnabled": { + "type": "boolean", + "description": "Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS )." + }, + "MaximumMessageSize": { + "type": "integer", + "description": "The limit of how many bytes that a message can contain before Amazon SQS rejects it. You can specify an integer value from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default value is 262,144 (256 KiB)." + }, + "MessageRetentionPeriod": { + "type": "integer", + "description": "The number of seconds that Amazon SQS retains a message. You can specify an integer value from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default value is 345,600 seconds (4 days)." + }, + "QueueName": { + "type": "string", + "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the .fifo suffix." + }, + "ReceiveMessageWaitTimeSeconds": { + "type": "integer", + "description": "Specifies the duration, in seconds, that the ReceiveMessage action call waits until a message is in the queue in order to include it in the response, rather than returning an empty response if a message isn't yet available. You can specify an integer from 1 to 20. Short polling is used as the default or when you specify 0 for this property." + }, + "RedriveAllowPolicy": { + "type": [ + "object", + "string" + ], + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object." + }, + "RedrivePolicy": { + "type": [ + "object", + "string" + ], + "description": "A string that includes the parameters for the dead-letter queue functionality (redrive policy) of the source queue." + }, + "Tags": { + "type": "array", + "description": "The tags that you attach to this queue.", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "VisibilityTimeout": { + "type": "integer", + "description": "The length of time during which a message will be unavailable after a message is delivered from the queue. This blocks other components from receiving the same message and gives the initial component time to process and delete the message from the queue. Values must be from 0 to 43,200 seconds (12 hours). If you don't specify a value, AWS CloudFormation uses the default value of 30 seconds." + } + }, + "additionalProperties": false, + "readOnlyProperties": [ + "/properties/QueueUrl", + "/properties/Arn" + ], + "primaryIdentifier": [ + "/properties/QueueUrl" + ], + "createOnlyProperties": [ + "/properties/FifoQueue", + "/properties/QueueName" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "handlers": { + "create": { + "permissions": [ + "sqs:CreateQueue", + "sqs:GetQueueUrl", + "sqs:GetQueueAttributes", + "sqs:ListQueueTags", + "sqs:TagQueue" + ] + }, + "read": { + "permissions": [ + "sqs:GetQueueAttributes", + "sqs:ListQueueTags" + ] + }, + "update": { + "permissions": [ + "sqs:SetQueueAttributes", + "sqs:GetQueueAttributes", + "sqs:ListQueueTags", + "sqs:TagQueue", + "sqs:UntagQueue" + ] + }, + "delete": { + "permissions": [ + "sqs:DeleteQueue", + "sqs:GetQueueAttributes" + ] + }, + "list": { + "permissions": [ + "sqs:ListQueues" + ] + } + } +} diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue_plugin.py b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue_plugin.py new file mode 100644 index 0000000000000..45c892bc5dade --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queue_plugin.py @@ -0,0 +1,18 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SQSQueueProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SQS::Queue" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.sqs.resource_providers.aws_sqs_queue import SQSQueueProvider + + self.factory = SQSQueueProvider diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.py b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.py new file mode 100644 index 0000000000000..cc7bdecfa9254 --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.py @@ -0,0 +1,110 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SQSQueuePolicyProperties(TypedDict): + PolicyDocument: Optional[dict] + Queues: Optional[list[str]] + Id: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SQSQueuePolicyProvider(ResourceProvider[SQSQueuePolicyProperties]): + TYPE = "AWS::SQS::QueuePolicy" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SQSQueuePolicyProperties], + ) -> ProgressEvent[SQSQueuePolicyProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - PolicyDocument + - Queues + + Read-only properties: + - /properties/Id + + """ + model = request.desired_state + sqs = request.aws_client_factory.sqs + for queue in model.get("Queues", []): + policy = json.dumps(model["PolicyDocument"]) + sqs.set_queue_attributes(QueueUrl=queue, Attributes={"Policy": policy}) + + physical_resource_id = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + model["Id"] = physical_resource_id + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SQSQueuePolicyProperties], + ) -> ProgressEvent[SQSQueuePolicyProperties]: + """ + Fetch resource information + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SQSQueuePolicyProperties], + ) -> ProgressEvent[SQSQueuePolicyProperties]: + """ + Delete a resource + """ + sqs = request.aws_client_factory.sqs + for queue in request.previous_state["Queues"]: + try: + sqs.set_queue_attributes(QueueUrl=queue, Attributes={"Policy": ""}) + + except sqs.exceptions.QueueDoesNotExist: + return ProgressEvent(status=OperationStatus.FAILED, resource_model={}) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model={}, + ) + + def update( + self, + request: ResourceRequest[SQSQueuePolicyProperties], + ) -> ProgressEvent[SQSQueuePolicyProperties]: + """ + Update a resource + """ + model = request.desired_state + sqs = request.aws_client_factory.sqs + for queue in model.get("Queues", []): + policy = json.dumps(model["PolicyDocument"]) + sqs.set_queue_attributes(QueueUrl=queue, Attributes={"Policy": policy}) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=request.desired_state, + ) diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.schema.json b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.schema.json new file mode 100644 index 0000000000000..654910643709d --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy.schema.json @@ -0,0 +1,30 @@ +{ + "typeName": "AWS::SQS::QueuePolicy", + "description": "Resource Type definition for AWS::SQS::QueuePolicy", + "additionalProperties": false, + "properties": { + "Id": { + "type": "string" + }, + "PolicyDocument": { + "type": "object" + }, + "Queues": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + } + }, + "required": [ + "PolicyDocument", + "Queues" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy_plugin.py b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy_plugin.py new file mode 100644 index 0000000000000..fc6ce346cf5e3 --- /dev/null +++ b/localstack-core/localstack/services/sqs/resource_providers/aws_sqs_queuepolicy_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SQSQueuePolicyProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SQS::QueuePolicy" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.sqs.resource_providers.aws_sqs_queuepolicy import ( + SQSQueuePolicyProvider, + ) + + self.factory = SQSQueuePolicyProvider diff --git a/localstack-core/localstack/services/sqs/utils.py b/localstack-core/localstack/services/sqs/utils.py new file mode 100644 index 0000000000000..a280128ad7b66 --- /dev/null +++ b/localstack-core/localstack/services/sqs/utils.py @@ -0,0 +1,186 @@ +import base64 +import itertools +import json +import re +import time +from typing import Literal, NamedTuple, Optional, Tuple +from urllib.parse import urlparse + +from localstack.aws.api.sqs import QueueAttributeName, ReceiptHandleIsInvalid +from localstack.services.sqs.constants import ( + DOMAIN_STRATEGY_URL_REGEX, + LEGACY_STRATEGY_URL_REGEX, + PATH_STRATEGY_URL_REGEX, + STANDARD_STRATEGY_URL_REGEX, +) +from localstack.utils.aws.arns import parse_arn +from localstack.utils.objects import singleton_factory +from localstack.utils.strings import base64_decode, long_uid, to_bytes, to_str + +STANDARD_ENDPOINT = re.compile(STANDARD_STRATEGY_URL_REGEX) +DOMAIN_ENDPOINT = re.compile(DOMAIN_STRATEGY_URL_REGEX) +PATH_ENDPOINT = re.compile(PATH_STRATEGY_URL_REGEX) +LEGACY_ENDPOINT = re.compile(LEGACY_STRATEGY_URL_REGEX) + + +def is_sqs_queue_url(url: str) -> bool: + return any( + [ + STANDARD_ENDPOINT.search(url), + DOMAIN_ENDPOINT.search(url), + PATH_ENDPOINT.search(url), + LEGACY_ENDPOINT.search(url), + ] + ) + + +def guess_endpoint_strategy_and_host( + host: str, +) -> Tuple[Literal["standard", "domain", "path"], str]: + """ + This method is used for the dynamic endpoint strategy. It heuristically determines a tuple where the first + element is the endpoint strategy, and the second is the part of the host after the endpoint prefix and region. + For instance: + + * ``sqs.us-east-1.localhost.localstack.cloud`` -> ``standard, localhost.localstack.cloud`` + * ``queue.localhost.localstack.cloud:4566`` -> ``domain, localhost.localstack.cloud:4566`` + * ``us-east-2.queue.amazonaws.com`` -> ``domain, amazonaws.com`` + * ``localhost:4566`` -> ``path, localhost:443`` + * ``amazonaws.com`` -> ``path, amazonaws.com`` + + :param host: the original host in the request + :return: endpoint strategy, host segment + """ + components = host.split(".") + + if host.startswith("sqs."): + return "standard", ".".join(components[2:]) + + if host.startswith("queue."): + return "domain", ".".join(components[1:]) + + if len(components) > 2 and components[1] == "queue": + return "domain", ".".join(components[2:]) + + return "path", host + + +def is_message_deduplication_id_required(queue): + content_based_deduplication_disabled = ( + "false" + == (queue.attributes.get(QueueAttributeName.ContentBasedDeduplication, "false")).lower() + ) + return is_fifo_queue(queue) and content_based_deduplication_disabled + + +def is_fifo_queue(queue): + return "true" == queue.attributes.get(QueueAttributeName.FifoQueue, "false").lower() + + +def parse_queue_url(queue_url: str) -> Tuple[str, Optional[str], str]: + """ + Parses an SQS Queue URL and returns a triple of account_id, region and queue_name. + + :param queue_url: the queue URL + :return: account_id, region (may be None), queue_name + """ + url = urlparse(queue_url.rstrip("/")) + path_parts = url.path.lstrip("/").split("/") + domain_parts = url.netloc.split(".") + + if len(path_parts) != 2 and len(path_parts) != 4: + raise ValueError(f"Not a valid queue URL: {queue_url}") + + account_id, queue_name = path_parts[-2:] + + if len(path_parts) == 4: + if path_parts[0] != "queue": + raise ValueError(f"Not a valid queue URL: {queue_url}") + # SQS_ENDPOINT_STRATEGY == "path" + region = path_parts[1] + elif url.netloc.startswith("sqs."): + # SQS_ENDPOINT_STRATEGY == "standard" + region = domain_parts[1] + elif ".queue." in url.netloc: + if domain_parts[1] != "queue": + # .queue. should be on second position after the region + raise ValueError(f"Not a valid queue URL: {queue_url}") + # SQS_ENDPOINT_STRATEGY == "domain" + region = domain_parts[0] + elif url.netloc.startswith("queue"): + # SQS_ENDPOINT_STRATEGY == "domain" (with default region) + region = "us-east-1" + else: + region = None + + return account_id, region, queue_name + + +class ReceiptHandleInformation(NamedTuple): + identifier: str + queue_arn: str + message_id: str + last_received: str + + +def extract_receipt_handle_info(receipt_handle: str) -> ReceiptHandleInformation: + try: + handle = base64.b64decode(receipt_handle).decode("utf-8") + parts = handle.split(" ") + if len(parts) != 4: + raise ValueError(f'The input receipt handle "{receipt_handle}" is incomplete.') + parse_arn(parts[1]) + return ReceiptHandleInformation(*parts) + except (IndexError, ValueError) as e: + raise ReceiptHandleIsInvalid( + f'The input receipt handle "{receipt_handle}" is not a valid receipt handle.' + ) from e + + +def encode_receipt_handle(queue_arn, message) -> str: + # http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html#ImportantIdentifiers-receipt-handles + # encode the queue arn in the receipt handle, so we can later check if it belongs to the queue + # but also add some randomness s.t. the generated receipt handles look like the ones from AWS + handle = f"{long_uid()} {queue_arn} {message.message.get('MessageId')} {message.last_received}" + encoded = base64.b64encode(handle.encode("utf-8")) + return encoded.decode("utf-8") + + +def encode_move_task_handle(task_id: str, source_arn: str) -> str: + """ + Move task handles are base64 encoded JSON dictionaries containing the task id and the source arn. + + :param task_id: the move task id + :param source_arn: the source queue arn + :return: a string of a base64 encoded json doc + """ + doc = f'{{"taskId":"{task_id}","sourceArn":"{source_arn}"}}' + return to_str(base64.b64encode(to_bytes(doc))) + + +def decode_move_task_handle(handle: str | bytes) -> tuple[str, str]: + """ + Inverse operation of ``encode_move_task_handle``. + + :param handle: the base64 encoded task handle + :return: a tuple of task_id and source_arn + :raises ValueError: if the handle is not encoded correctly or does not contain the necessary fields + """ + doc = json.loads(base64_decode(handle)) + if "taskId" not in doc: + raise ValueError("taskId not found in handle") + if "sourceArn" not in doc: + raise ValueError("sourceArn not found in handle") + return doc["taskId"], doc["sourceArn"] + + +@singleton_factory +def global_message_sequence(): + # creates a 20-digit number used as the start for the global sequence + start = int(time.time()) << 33 + # itertools.count is thread safe over the GIL since its getAndIncrement operation is a single python bytecode op + return itertools.count(start) + + +def generate_message_id(): + return long_uid() diff --git a/localstack/utils/container_utils/__init__.py b/localstack-core/localstack/services/ssm/__init__.py similarity index 100% rename from localstack/utils/container_utils/__init__.py rename to localstack-core/localstack/services/ssm/__init__.py diff --git a/localstack-core/localstack/services/ssm/provider.py b/localstack-core/localstack/services/ssm/provider.py new file mode 100644 index 0000000000000..7787daa091383 --- /dev/null +++ b/localstack-core/localstack/services/ssm/provider.py @@ -0,0 +1,447 @@ +import copy +import json +import logging +import time +from abc import ABC +from typing import Dict, Optional + +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api.ssm import ( + AlarmConfiguration, + BaselineDescription, + BaselineId, + BaselineName, + Boolean, + ClientToken, + CreateMaintenanceWindowResult, + CreatePatchBaselineResult, + DeleteMaintenanceWindowResult, + DeleteParameterResult, + DeletePatchBaselineResult, + DeregisterTargetFromMaintenanceWindowResult, + DeregisterTaskFromMaintenanceWindowResult, + DescribeMaintenanceWindowsResult, + DescribeMaintenanceWindowTargetsResult, + DescribeMaintenanceWindowTasksResult, + DescribePatchBaselinesResult, + GetParameterResult, + GetParametersResult, + LabelParameterVersionResult, + LoggingInfo, + MaintenanceWindowAllowUnassociatedTargets, + MaintenanceWindowCutoff, + MaintenanceWindowDescription, + MaintenanceWindowDurationHours, + MaintenanceWindowFilterList, + MaintenanceWindowId, + MaintenanceWindowMaxResults, + MaintenanceWindowName, + MaintenanceWindowOffset, + MaintenanceWindowResourceType, + MaintenanceWindowSchedule, + MaintenanceWindowStringDateTime, + MaintenanceWindowTargetId, + MaintenanceWindowTaskArn, + MaintenanceWindowTaskCutoffBehavior, + MaintenanceWindowTaskId, + MaintenanceWindowTaskInvocationParameters, + MaintenanceWindowTaskParameters, + MaintenanceWindowTaskPriority, + MaintenanceWindowTaskType, + MaintenanceWindowTimezone, + MaxConcurrency, + MaxErrors, + NextToken, + OperatingSystem, + OwnerInformation, + ParameterLabelList, + ParameterName, + ParameterNameList, + PatchAction, + PatchBaselineMaxResults, + PatchComplianceLevel, + PatchComplianceStatus, + PatchFilterGroup, + PatchIdList, + PatchOrchestratorFilterList, + PatchRuleGroup, + PatchSourceList, + PSParameterName, + PSParameterVersion, + PutParameterRequest, + PutParameterResult, + RegisterTargetWithMaintenanceWindowResult, + RegisterTaskWithMaintenanceWindowResult, + ServiceRole, + SsmApi, + TagList, + Targets, +) +from localstack.aws.connect import connect_to +from localstack.services.moto import call_moto, call_moto_with_request +from localstack.utils.aws.arns import extract_resource_from_arn, is_arn +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.collections import remove_attributes +from localstack.utils.objects import keys_to_lower + +LOG = logging.getLogger(__name__) + +PARAM_PREFIX_SECRETSMANAGER = "/aws/reference/secretsmanager" + + +class ValidationException(CommonServiceException): + def __init__(self, message=None): + super().__init__("ValidationException", message=message, sender_fault=True) + + +class InvalidParameterNameException(ValidationException): + def __init__(self): + msg = ( + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). ' + "If formed as a path, it can consist of sub-paths divided by slash symbol; " + "each sub-path can be formed as a mix of letters, numbers and the following 3 symbols .-_" + ) + super().__init__(msg) + + +# TODO: check if _normalize_name(..) calls are still required here +class SsmProvider(SsmApi, ABC): + def get_parameters( + self, + context: RequestContext, + names: ParameterNameList, + with_decryption: Boolean = None, + **kwargs, + ) -> GetParametersResult: + if SsmProvider._has_secrets(names): + return SsmProvider._get_params_and_secrets(context.account_id, context.region, names) + + norm_names = [SsmProvider._normalize_name(name, validate=True) for name in names] + request = {"Names": norm_names, "WithDecryption": bool(with_decryption)} + res = call_moto_with_request(context, request) + + if not res.get("InvalidParameters"): + # note: simplifying assumption for now - only de-normalizing names if no invalid params were given + for i in range(len(res["Parameters"])): + self._denormalize_param_name_in_response(res["Parameters"][i], names[i]) + + return GetParametersResult(**res) + + def put_parameter( + self, context: RequestContext, request: PutParameterRequest, **kwargs + ) -> PutParameterResult: + name = request["Name"] + nname = SsmProvider._normalize_name(name) + if name != nname: + request.update({"Name": nname}) + moto_res = call_moto_with_request(context, request) + else: + moto_res = call_moto(context) + SsmProvider._notify_event_subscribers(context.account_id, context.region, nname, "Create") + return PutParameterResult(**moto_res) + + def get_parameter( + self, + context: RequestContext, + name: PSParameterName, + with_decryption: Boolean = None, + **kwargs, + ) -> GetParameterResult: + result = None + + norm_name = self._normalize_name(name, validate=True) + details = norm_name.split("/") + if len(details) > 4: + service = details[3] + if service == "secretsmanager": + resource_name = "/".join(details[4:]) + result = SsmProvider._get_secrets_information( + context.account_id, context.region, norm_name, resource_name + ) + + if not result: + result = call_moto_with_request( + context, {"Name": norm_name, "WithDecryption": bool(with_decryption)} + ) + + self._denormalize_param_name_in_response(result["Parameter"], name) + + return GetParameterResult(**result) + + def delete_parameter( + self, context: RequestContext, name: PSParameterName, **kwargs + ) -> DeleteParameterResult: + SsmProvider._notify_event_subscribers(context.account_id, context.region, name, "Delete") + call_moto(context) # Return type is an emtpy type. + return DeleteParameterResult() + + def label_parameter_version( + self, + context: RequestContext, + name: PSParameterName, + labels: ParameterLabelList, + parameter_version: PSParameterVersion = None, + **kwargs, + ) -> LabelParameterVersionResult: + SsmProvider._notify_event_subscribers( + context.account_id, context.region, name, "LabelParameterVersion" + ) + return LabelParameterVersionResult(**call_moto(context)) + + def create_patch_baseline( + self, + context: RequestContext, + name: BaselineName, + operating_system: OperatingSystem = None, + global_filters: PatchFilterGroup = None, + approval_rules: PatchRuleGroup = None, + approved_patches: PatchIdList = None, + approved_patches_compliance_level: PatchComplianceLevel = None, + approved_patches_enable_non_security: Boolean = None, + rejected_patches: PatchIdList = None, + rejected_patches_action: PatchAction = None, + description: BaselineDescription = None, + sources: PatchSourceList = None, + available_security_updates_compliance_status: PatchComplianceStatus = None, + client_token: ClientToken = None, + tags: TagList = None, + **kwargs, + ) -> CreatePatchBaselineResult: + return CreatePatchBaselineResult(**call_moto(context)) + + def delete_patch_baseline( + self, context: RequestContext, baseline_id: BaselineId, **kwargs + ) -> DeletePatchBaselineResult: + return DeletePatchBaselineResult(**call_moto(context)) + + def describe_patch_baselines( + self, + context: RequestContext, + filters: PatchOrchestratorFilterList = None, + max_results: PatchBaselineMaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribePatchBaselinesResult: + return DescribePatchBaselinesResult(**call_moto(context)) + + def register_target_with_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + resource_type: MaintenanceWindowResourceType, + targets: Targets, + owner_information: OwnerInformation = None, + name: MaintenanceWindowName = None, + description: MaintenanceWindowDescription = None, + client_token: ClientToken = None, + **kwargs, + ) -> RegisterTargetWithMaintenanceWindowResult: + return RegisterTargetWithMaintenanceWindowResult(**call_moto(context)) + + def deregister_target_from_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_target_id: MaintenanceWindowTargetId, + safe: Boolean = None, + **kwargs, + ) -> DeregisterTargetFromMaintenanceWindowResult: + return DeregisterTargetFromMaintenanceWindowResult(**call_moto(context)) + + def describe_maintenance_window_targets( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + filters: MaintenanceWindowFilterList = None, + max_results: MaintenanceWindowMaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeMaintenanceWindowTargetsResult: + return DescribeMaintenanceWindowTargetsResult(**call_moto(context)) + + def create_maintenance_window( + self, + context: RequestContext, + name: MaintenanceWindowName, + schedule: MaintenanceWindowSchedule, + duration: MaintenanceWindowDurationHours, + cutoff: MaintenanceWindowCutoff, + allow_unassociated_targets: MaintenanceWindowAllowUnassociatedTargets, + description: MaintenanceWindowDescription = None, + start_date: MaintenanceWindowStringDateTime = None, + end_date: MaintenanceWindowStringDateTime = None, + schedule_timezone: MaintenanceWindowTimezone = None, + schedule_offset: MaintenanceWindowOffset = None, + client_token: ClientToken = None, + tags: TagList = None, + **kwargs, + ) -> CreateMaintenanceWindowResult: + return CreateMaintenanceWindowResult(**call_moto(context)) + + def delete_maintenance_window( + self, context: RequestContext, window_id: MaintenanceWindowId, **kwargs + ) -> DeleteMaintenanceWindowResult: + return DeleteMaintenanceWindowResult(**call_moto(context)) + + def describe_maintenance_windows( + self, + context: RequestContext, + filters: MaintenanceWindowFilterList = None, + max_results: MaintenanceWindowMaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeMaintenanceWindowsResult: + return DescribeMaintenanceWindowsResult(**call_moto(context)) + + def register_task_with_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + task_arn: MaintenanceWindowTaskArn, + task_type: MaintenanceWindowTaskType, + targets: Targets = None, + service_role_arn: ServiceRole = None, + task_parameters: MaintenanceWindowTaskParameters = None, + task_invocation_parameters: MaintenanceWindowTaskInvocationParameters = None, + priority: MaintenanceWindowTaskPriority = None, + max_concurrency: MaxConcurrency = None, + max_errors: MaxErrors = None, + logging_info: LoggingInfo = None, + name: MaintenanceWindowName = None, + description: MaintenanceWindowDescription = None, + client_token: ClientToken = None, + cutoff_behavior: MaintenanceWindowTaskCutoffBehavior = None, + alarm_configuration: AlarmConfiguration = None, + **kwargs, + ) -> RegisterTaskWithMaintenanceWindowResult: + return RegisterTaskWithMaintenanceWindowResult(**call_moto(context)) + + def deregister_task_from_maintenance_window( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + window_task_id: MaintenanceWindowTaskId, + **kwargs, + ) -> DeregisterTaskFromMaintenanceWindowResult: + return DeregisterTaskFromMaintenanceWindowResult(**call_moto(context)) + + def describe_maintenance_window_tasks( + self, + context: RequestContext, + window_id: MaintenanceWindowId, + filters: MaintenanceWindowFilterList = None, + max_results: MaintenanceWindowMaxResults = None, + next_token: NextToken = None, + **kwargs, + ) -> DescribeMaintenanceWindowTasksResult: + return DescribeMaintenanceWindowTasksResult(**call_moto(context)) + + # utility methods below + + @staticmethod + def _denormalize_param_name_in_response(param_result: Dict, param_name: str): + result_name = param_result["Name"] + if result_name != param_name and result_name.lstrip("/") == param_name.lstrip("/"): + param_result["Name"] = param_name + + @staticmethod + def _has_secrets(names: ParameterNameList) -> Boolean: + maybe_secret = next( + filter(lambda n: n.startswith(PARAM_PREFIX_SECRETSMANAGER), names), None + ) + return maybe_secret is not None + + @staticmethod + def _normalize_name(param_name: ParameterName, validate=False) -> ParameterName: + if is_arn(param_name): + resource_name = extract_resource_from_arn(param_name).replace("parameter/", "") + # if the parameter name is only the root path we want to look up without the leading slash. + # Otherwise, we add the leading slash + if "/" in resource_name: + resource_name = f"/{resource_name}" + return resource_name + + if validate: + if "//" in param_name or ("/" in param_name and not param_name.startswith("/")): + raise InvalidParameterNameException() + param_name = param_name.strip("/") + param_name = param_name.replace("//", "/") + if "/" in param_name: + param_name = "/%s" % param_name + return param_name + + @staticmethod + def _get_secrets_information( + account_id: str, region_name: str, name: ParameterName, resource_name: str + ) -> Optional[GetParameterResult]: + client = connect_to(aws_access_key_id=account_id, region_name=region_name).secretsmanager + try: + secret_info = client.get_secret_value(SecretId=resource_name) + secret_info.pop("ResponseMetadata", None) + created_date_timestamp = time.mktime(secret_info["CreatedDate"].timetuple()) + secret_info["CreatedDate"] = created_date_timestamp + secret_info_lower = keys_to_lower( + remove_attributes(copy.deepcopy(secret_info), ["ARN"]) + ) + secret_info_lower["ARN"] = secret_info["ARN"] + result = { + "Parameter": { + "SourceResult": json.dumps(secret_info_lower, default=str), + "Name": name, + "Value": secret_info.get("SecretString"), + "Type": "SecureString", + "LastModifiedDate": created_date_timestamp, + } + } + return GetParameterResult(**result) + except client.exceptions.ResourceNotFoundException: + return None + + @staticmethod + def _get_params_and_secrets( + account_id: str, region_name: str, names: ParameterNameList + ) -> GetParametersResult: + ssm_client = connect_to(aws_access_key_id=account_id, region_name=region_name).ssm + result = {"Parameters": [], "InvalidParameters": []} + + for name in names: + if name.startswith(PARAM_PREFIX_SECRETSMANAGER): + secret = SsmProvider._get_secrets_information( + account_id, region_name, name, name[len(PARAM_PREFIX_SECRETSMANAGER) + 1 :] + ) + if secret is not None: + secret = secret["Parameter"] + result["Parameters"].append(secret) + else: + result["InvalidParameters"].append(name) + else: + try: + param = ssm_client.get_parameter(Name=name) + param["Parameter"]["LastModifiedDate"] = time.mktime( + param["Parameter"]["LastModifiedDate"].timetuple() + ) + result["Parameters"].append(param["Parameter"]) + except ssm_client.exceptions.ParameterNotFound: + result["InvalidParameters"].append(name) + + return GetParametersResult(**result) + + @staticmethod + def _notify_event_subscribers( + account_id: str, region_name: str, name: ParameterName, operation: str + ): + if not is_api_enabled("events"): + LOG.warning( + "Service 'events' is not enabled: skip emitting SSM event. " + "Please check your 'SERVICES' configuration variable." + ) + return + """Publish an EventBridge event to notify subscribers of changes.""" + events = connect_to(aws_access_key_id=account_id, region_name=region_name).events + detail = {"name": name, "operation": operation} + event = { + "Source": "aws.ssm", + "Detail": json.dumps(detail), + "DetailType": "Parameter Store Change", + } + events.put_events(Entries=[event]) diff --git a/localstack/utils/kinesis/__init__.py b/localstack-core/localstack/services/ssm/resource_providers/__init__.py similarity index 100% rename from localstack/utils/kinesis/__init__.py rename to localstack-core/localstack/services/ssm/resource_providers/__init__.py diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.py new file mode 100644 index 0000000000000..974a6b0676242 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.py @@ -0,0 +1,137 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SSMMaintenanceWindowProperties(TypedDict): + AllowUnassociatedTargets: Optional[bool] + Cutoff: Optional[int] + Duration: Optional[int] + Name: Optional[str] + Schedule: Optional[str] + Description: Optional[str] + EndDate: Optional[str] + Id: Optional[str] + ScheduleOffset: Optional[int] + ScheduleTimezone: Optional[str] + StartDate: Optional[str] + Tags: Optional[list[Tag]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SSMMaintenanceWindowProvider(ResourceProvider[SSMMaintenanceWindowProperties]): + TYPE = "AWS::SSM::MaintenanceWindow" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SSMMaintenanceWindowProperties], + ) -> ProgressEvent[SSMMaintenanceWindowProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - AllowUnassociatedTargets + - Cutoff + - Schedule + - Duration + - Name + + + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ssm_client = request.aws_client_factory.ssm + + params = util.select_attributes( + model, + [ + "AllowUnassociatedTargets", + "Cutoff", + "Duration", + "Name", + "Schedule", + "ScheduleOffset", + "ScheduleTimezone", + "StartDate", + "EndDate", + "Description", + "Tags", + ], + ) + + response = ssm_client.create_maintenance_window(**params) + model["Id"] = response["WindowId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SSMMaintenanceWindowProperties], + ) -> ProgressEvent[SSMMaintenanceWindowProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SSMMaintenanceWindowProperties], + ) -> ProgressEvent[SSMMaintenanceWindowProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ssm_client = request.aws_client_factory.ssm + + ssm_client.delete_maintenance_window(WindowId=model["Id"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SSMMaintenanceWindowProperties], + ) -> ProgressEvent[SSMMaintenanceWindowProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.schema.json b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.schema.json new file mode 100644 index 0000000000000..f4cd1289e18d9 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow.schema.json @@ -0,0 +1,78 @@ +{ + "typeName": "AWS::SSM::MaintenanceWindow", + "description": "Resource Type definition for AWS::SSM::MaintenanceWindow", + "additionalProperties": false, + "properties": { + "StartDate": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "AllowUnassociatedTargets": { + "type": "boolean" + }, + "Cutoff": { + "type": "integer" + }, + "Schedule": { + "type": "string" + }, + "Duration": { + "type": "integer" + }, + "ScheduleOffset": { + "type": "integer" + }, + "Id": { + "type": "string" + }, + "EndDate": { + "type": "string" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "Name": { + "type": "string" + }, + "ScheduleTimezone": { + "type": "string" + } + }, + "definitions": { + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + } + }, + "required": [ + "AllowUnassociatedTargets", + "Cutoff", + "Schedule", + "Duration", + "Name" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow_plugin.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow_plugin.py new file mode 100644 index 0000000000000..c7f5ef1c2e50a --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindow_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SSMMaintenanceWindowProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SSM::MaintenanceWindow" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ssm.resource_providers.aws_ssm_maintenancewindow import ( + SSMMaintenanceWindowProvider, + ) + + self.factory = SSMMaintenanceWindowProvider diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.py new file mode 100644 index 0000000000000..a6f8ef6029dbf --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.py @@ -0,0 +1,128 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SSMMaintenanceWindowTargetProperties(TypedDict): + ResourceType: Optional[str] + Targets: Optional[list[Targets]] + WindowId: Optional[str] + Description: Optional[str] + Id: Optional[str] + Name: Optional[str] + OwnerInformation: Optional[str] + + +class Targets(TypedDict): + Key: Optional[str] + Values: Optional[list[str]] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SSMMaintenanceWindowTargetProvider(ResourceProvider[SSMMaintenanceWindowTargetProperties]): + TYPE = "AWS::SSM::MaintenanceWindowTarget" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SSMMaintenanceWindowTargetProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTargetProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - WindowId + - ResourceType + - Targets + + Create-only properties: + - /properties/WindowId + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + params = util.select_attributes( + model=model, + params=[ + "Description", + "Name", + "OwnerInformation", + "ResourceType", + "Targets", + "WindowId", + ], + ) + + response = ssm.register_target_with_maintenance_window(**params) + model["Id"] = response["WindowTargetId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SSMMaintenanceWindowTargetProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTargetProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SSMMaintenanceWindowTargetProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTargetProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + ssm.deregister_target_from_maintenance_window( + WindowId=model["WindowId"], WindowTargetId=model["Id"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SSMMaintenanceWindowTargetProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTargetProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.schema.json b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.schema.json new file mode 100644 index 0000000000000..524e83e7f7134 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget.schema.json @@ -0,0 +1,68 @@ +{ + "typeName": "AWS::SSM::MaintenanceWindowTarget", + "description": "Resource Type definition for AWS::SSM::MaintenanceWindowTarget", + "additionalProperties": false, + "properties": { + "OwnerInformation": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "WindowId": { + "type": "string" + }, + "ResourceType": { + "type": "string" + }, + "Targets": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Targets" + } + }, + "Id": { + "type": "string" + }, + "Name": { + "type": "string" + } + }, + "definitions": { + "Targets": { + "type": "object", + "additionalProperties": false, + "properties": { + "Values": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Values", + "Key" + ] + } + }, + "required": [ + "WindowId", + "ResourceType", + "Targets" + ], + "createOnlyProperties": [ + "/properties/WindowId" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget_plugin.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget_plugin.py new file mode 100644 index 0000000000000..c16b5208eff20 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtarget_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SSMMaintenanceWindowTargetProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SSM::MaintenanceWindowTarget" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ssm.resource_providers.aws_ssm_maintenancewindowtarget import ( + SSMMaintenanceWindowTargetProvider, + ) + + self.factory = SSMMaintenanceWindowTargetProvider diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.py new file mode 100644 index 0000000000000..01b2f165a9aaa --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.py @@ -0,0 +1,208 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SSMMaintenanceWindowTaskProperties(TypedDict): + Priority: Optional[int] + TaskArn: Optional[str] + TaskType: Optional[str] + WindowId: Optional[str] + CutoffBehavior: Optional[str] + Description: Optional[str] + Id: Optional[str] + LoggingInfo: Optional[LoggingInfo] + MaxConcurrency: Optional[str] + MaxErrors: Optional[str] + Name: Optional[str] + ServiceRoleArn: Optional[str] + Targets: Optional[list[Target]] + TaskInvocationParameters: Optional[TaskInvocationParameters] + TaskParameters: Optional[dict] + + +class Target(TypedDict): + Key: Optional[str] + Values: Optional[list[str]] + + +class MaintenanceWindowStepFunctionsParameters(TypedDict): + Input: Optional[str] + Name: Optional[str] + + +class CloudWatchOutputConfig(TypedDict): + CloudWatchLogGroupName: Optional[str] + CloudWatchOutputEnabled: Optional[bool] + + +class NotificationConfig(TypedDict): + NotificationArn: Optional[str] + NotificationEvents: Optional[list[str]] + NotificationType: Optional[str] + + +class MaintenanceWindowRunCommandParameters(TypedDict): + CloudWatchOutputConfig: Optional[CloudWatchOutputConfig] + Comment: Optional[str] + DocumentHash: Optional[str] + DocumentHashType: Optional[str] + DocumentVersion: Optional[str] + NotificationConfig: Optional[NotificationConfig] + OutputS3BucketName: Optional[str] + OutputS3KeyPrefix: Optional[str] + Parameters: Optional[dict] + ServiceRoleArn: Optional[str] + TimeoutSeconds: Optional[int] + + +class MaintenanceWindowLambdaParameters(TypedDict): + ClientContext: Optional[str] + Payload: Optional[str] + Qualifier: Optional[str] + + +class MaintenanceWindowAutomationParameters(TypedDict): + DocumentVersion: Optional[str] + Parameters: Optional[dict] + + +class TaskInvocationParameters(TypedDict): + MaintenanceWindowAutomationParameters: Optional[MaintenanceWindowAutomationParameters] + MaintenanceWindowLambdaParameters: Optional[MaintenanceWindowLambdaParameters] + MaintenanceWindowRunCommandParameters: Optional[MaintenanceWindowRunCommandParameters] + MaintenanceWindowStepFunctionsParameters: Optional[MaintenanceWindowStepFunctionsParameters] + + +class LoggingInfo(TypedDict): + Region: Optional[str] + S3Bucket: Optional[str] + S3Prefix: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SSMMaintenanceWindowTaskProvider(ResourceProvider[SSMMaintenanceWindowTaskProperties]): + TYPE = "AWS::SSM::MaintenanceWindowTask" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SSMMaintenanceWindowTaskProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTaskProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - WindowId + - Priority + - TaskType + - TaskArn + + Create-only properties: + - /properties/WindowId + - /properties/TaskType + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + params = util.select_attributes( + model=model, + params=[ + "Description", + "Name", + "OwnerInformation", + "Priority", + "ServiceRoleArn", + "Targets", + "TaskArn", + "TaskParameters", + "TaskType", + "WindowId", + ], + ) + + if invocation_params := model.get("TaskInvocationParameters"): + task_type_map = { + "MaintenanceWindowAutomationParameters": "Automation", + "MaintenanceWindowLambdaParameters": "Lambda", + "MaintenanceWindowRunCommandParameters": "RunCommand", + "MaintenanceWindowStepFunctionsParameters": "StepFunctions", + } + params["TaskInvocationParameters"] = { + task_type_map[k]: v for k, v in invocation_params.items() + } + + response = ssm.register_task_with_maintenance_window(**params) + + model["Id"] = response["WindowTaskId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SSMMaintenanceWindowTaskProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTaskProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SSMMaintenanceWindowTaskProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTaskProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + ssm.deregister_task_from_maintenance_window( + WindowId=model["WindowId"], WindowTaskId=model["Id"] + ) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SSMMaintenanceWindowTaskProperties], + ) -> ProgressEvent[SSMMaintenanceWindowTaskProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.schema.json b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.schema.json new file mode 100644 index 0000000000000..344e3e5b83ae5 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask.schema.json @@ -0,0 +1,243 @@ +{ + "typeName": "AWS::SSM::MaintenanceWindowTask", + "description": "Resource Type definition for AWS::SSM::MaintenanceWindowTask", + "additionalProperties": false, + "properties": { + "MaxErrors": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "ServiceRoleArn": { + "type": "string" + }, + "Priority": { + "type": "integer" + }, + "MaxConcurrency": { + "type": "string" + }, + "Targets": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Target" + } + }, + "Name": { + "type": "string" + }, + "TaskArn": { + "type": "string" + }, + "TaskInvocationParameters": { + "$ref": "#/definitions/TaskInvocationParameters" + }, + "WindowId": { + "type": "string" + }, + "TaskParameters": { + "type": "object" + }, + "TaskType": { + "type": "string" + }, + "CutoffBehavior": { + "type": "string" + }, + "Id": { + "type": "string" + }, + "LoggingInfo": { + "$ref": "#/definitions/LoggingInfo" + } + }, + "definitions": { + "TaskInvocationParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "MaintenanceWindowStepFunctionsParameters": { + "$ref": "#/definitions/MaintenanceWindowStepFunctionsParameters" + }, + "MaintenanceWindowRunCommandParameters": { + "$ref": "#/definitions/MaintenanceWindowRunCommandParameters" + }, + "MaintenanceWindowLambdaParameters": { + "$ref": "#/definitions/MaintenanceWindowLambdaParameters" + }, + "MaintenanceWindowAutomationParameters": { + "$ref": "#/definitions/MaintenanceWindowAutomationParameters" + } + } + }, + "Target": { + "type": "object", + "additionalProperties": false, + "properties": { + "Values": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Values", + "Key" + ] + }, + "CloudWatchOutputConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchOutputEnabled": { + "type": "boolean" + }, + "CloudWatchLogGroupName": { + "type": "string" + } + } + }, + "MaintenanceWindowRunCommandParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "TimeoutSeconds": { + "type": "integer" + }, + "Comment": { + "type": "string" + }, + "OutputS3KeyPrefix": { + "type": "string" + }, + "Parameters": { + "type": "object" + }, + "CloudWatchOutputConfig": { + "$ref": "#/definitions/CloudWatchOutputConfig" + }, + "DocumentHashType": { + "type": "string" + }, + "ServiceRoleArn": { + "type": "string" + }, + "NotificationConfig": { + "$ref": "#/definitions/NotificationConfig" + }, + "DocumentVersion": { + "type": "string" + }, + "OutputS3BucketName": { + "type": "string" + }, + "DocumentHash": { + "type": "string" + } + } + }, + "MaintenanceWindowAutomationParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "Parameters": { + "type": "object" + }, + "DocumentVersion": { + "type": "string" + } + } + }, + "NotificationConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "NotificationEvents": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "NotificationArn": { + "type": "string" + }, + "NotificationType": { + "type": "string" + } + }, + "required": [ + "NotificationArn" + ] + }, + "MaintenanceWindowStepFunctionsParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "Input": { + "type": "string" + }, + "Name": { + "type": "string" + } + } + }, + "LoggingInfo": { + "type": "object", + "additionalProperties": false, + "properties": { + "Region": { + "type": "string" + }, + "S3Prefix": { + "type": "string" + }, + "S3Bucket": { + "type": "string" + } + }, + "required": [ + "S3Bucket", + "Region" + ] + }, + "MaintenanceWindowLambdaParameters": { + "type": "object", + "additionalProperties": false, + "properties": { + "Qualifier": { + "type": "string" + }, + "Payload": { + "type": "string" + }, + "ClientContext": { + "type": "string" + } + } + } + }, + "required": [ + "WindowId", + "Priority", + "TaskType", + "TaskArn" + ], + "createOnlyProperties": [ + "/properties/WindowId", + "/properties/TaskType" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask_plugin.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask_plugin.py new file mode 100644 index 0000000000000..494b10f07bd48 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_maintenancewindowtask_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SSMMaintenanceWindowTaskProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SSM::MaintenanceWindowTask" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ssm.resource_providers.aws_ssm_maintenancewindowtask import ( + SSMMaintenanceWindowTaskProvider, + ) + + self.factory = SSMMaintenanceWindowTaskProvider diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py new file mode 100644 index 0000000000000..95ea2ecb4d214 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.py @@ -0,0 +1,226 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SSMParameterProperties(TypedDict): + Type: Optional[str] + Value: Optional[str] + AllowedPattern: Optional[str] + DataType: Optional[str] + Description: Optional[str] + Name: Optional[str] + Policies: Optional[str] + Tags: Optional[dict] + Tier: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SSMParameterProvider(ResourceProvider[SSMParameterProperties]): + TYPE = "AWS::SSM::Parameter" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SSMParameterProperties], + ) -> ProgressEvent[SSMParameterProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Name + + Required properties: + - Value + - Type + + Create-only properties: + - /properties/Name + + + + IAM permissions required: + - ssm:PutParameter + - ssm:AddTagsToResource + - ssm:GetParameters + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + if not model.get("Name"): + model["Name"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + params = util.select_attributes( + model=model, + params=[ + "Name", + "Type", + "Value", + "Description", + "AllowedPattern", + "Policies", + "Tier", + ], + ) + if "Value" in params: + params["Value"] = str(params["Value"]) + + if tags := model.get("Tags"): + formatted_tags = [] + for key, value in tags.items(): + formatted_tags.append({"Key": key, "Value": value}) + + params["Tags"] = formatted_tags + + ssm.put_parameter(**params) + + return self.read(request) + + def read( + self, + request: ResourceRequest[SSMParameterProperties], + ) -> ProgressEvent[SSMParameterProperties]: + """ + Fetch resource information + + IAM permissions required: + - ssm:GetParameters + """ + ssm = request.aws_client_factory.ssm + parameter_name = request.desired_state.get("Name") + try: + resource = ssm.get_parameter(Name=parameter_name, WithDecryption=False) + except ssm.exceptions.ParameterNotFound: + return ProgressEvent( + status=OperationStatus.FAILED, + message=f"Resource of type '{self.TYPE}' with identifier '{parameter_name}' was not found.", + error_code="NotFound", + ) + + parameter = util.select_attributes(resource["Parameter"], params=self.SCHEMA["properties"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=parameter, + custom_context=request.custom_context, + ) + + def delete( + self, + request: ResourceRequest[SSMParameterProperties], + ) -> ProgressEvent[SSMParameterProperties]: + """ + Delete a resource + + IAM permissions required: + - ssm:DeleteParameter + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + ssm.delete_parameter(Name=model["Name"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SSMParameterProperties], + ) -> ProgressEvent[SSMParameterProperties]: + """ + Update a resource + + IAM permissions required: + - ssm:PutParameter + - ssm:AddTagsToResource + - ssm:RemoveTagsFromResource + - ssm:GetParameters + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + if not model.get("Name"): + model["Name"] = request.previous_state["Name"] + parameters_to_select = [ + "AllowedPattern", + "DataType", + "Description", + "Name", + "Policies", + "Tags", + "Tier", + "Type", + "Value", + ] + update_config_props = util.select_attributes(model, parameters_to_select) + + # tag handling + new_tags = update_config_props.pop("Tags", {}) + if new_tags: + self.update_tags(ssm, model, new_tags) + + ssm.put_parameter(Overwrite=True, Tags=[], **update_config_props) + + return self.read(request) + + def update_tags(self, ssm, model, new_tags): + current_tags = ssm.list_tags_for_resource( + ResourceType="Parameter", ResourceId=model["Name"] + )["TagList"] + current_tags = {tag["Key"]: tag["Value"] for tag in current_tags} + + new_tag_keys = set(new_tags.keys()) + old_tag_keys = set(current_tags.keys()) + potentially_modified_tag_keys = new_tag_keys.intersection(old_tag_keys) + tag_keys_to_add = new_tag_keys.difference(old_tag_keys) + tag_keys_to_remove = old_tag_keys.difference(new_tag_keys) + + for tag_key in potentially_modified_tag_keys: + if new_tags[tag_key] != current_tags[tag_key]: + tag_keys_to_add.add(tag_key) + + if tag_keys_to_add: + ssm.add_tags_to_resource( + ResourceType="Parameter", + ResourceId=model["Name"], + Tags=[ + {"Key": tag_key, "Value": tag_value} + for tag_key, tag_value in new_tags.items() + if tag_key in tag_keys_to_add + ], + ) + + if tag_keys_to_remove: + ssm.remove_tags_from_resource( + ResourceType="Parameter", ResourceId=model["Name"], TagKeys=tag_keys_to_remove + ) + + def list( + self, + request: ResourceRequest[SSMParameterProperties], + ) -> ProgressEvent[SSMParameterProperties]: + resources = request.aws_client_factory.ssm.describe_parameters() + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + SSMParameterProperties(Name=resource["Name"]) + for resource in resources["Parameters"] + ], + ) diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.schema.json b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.schema.json new file mode 100644 index 0000000000000..9d3e47882fd3d --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter.schema.json @@ -0,0 +1,121 @@ +{ + "typeName": "AWS::SSM::Parameter", + "description": "Resource Type definition for AWS::SSM::Parameter", + "additionalProperties": false, + "properties": { + "Type": { + "type": "string", + "description": "The type of the parameter.", + "enum": [ + "String", + "StringList", + "SecureString" + ] + }, + "Value": { + "type": "string", + "description": "The value associated with the parameter." + }, + "Description": { + "type": "string", + "description": "The information about the parameter." + }, + "Policies": { + "type": "string", + "description": "The policies attached to the parameter." + }, + "AllowedPattern": { + "type": "string", + "description": "The regular expression used to validate the parameter value." + }, + "Tier": { + "type": "string", + "description": "The corresponding tier of the parameter.", + "enum": [ + "Standard", + "Advanced", + "Intelligent-Tiering" + ] + }, + "Tags": { + "type": "object", + "description": "A key-value pair to associate with a resource.", + "patternProperties": { + "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$": { + "type": "string" + } + }, + "additionalProperties": false + }, + "DataType": { + "type": "string", + "description": "The corresponding DataType of the parameter.", + "enum": [ + "text", + "aws:ec2:image" + ] + }, + "Name": { + "type": "string", + "description": "The name of the parameter." + } + }, + "required": [ + "Value", + "Type" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "createOnlyProperties": [ + "/properties/Name" + ], + "primaryIdentifier": [ + "/properties/Name" + ], + "writeOnlyProperties": [ + "/properties/Tags", + "/properties/Description", + "/properties/Tier", + "/properties/AllowedPattern", + "/properties/Policies" + ], + "handlers": { + "create": { + "permissions": [ + "ssm:PutParameter", + "ssm:AddTagsToResource", + "ssm:GetParameters" + ], + "timeoutInMinutes": 5 + }, + "read": { + "permissions": [ + "ssm:GetParameters" + ] + }, + "update": { + "permissions": [ + "ssm:PutParameter", + "ssm:AddTagsToResource", + "ssm:RemoveTagsFromResource", + "ssm:GetParameters" + ], + "timeoutInMinutes": 5 + }, + "delete": { + "permissions": [ + "ssm:DeleteParameter" + ] + }, + "list": { + "permissions": [ + "ssm:DescribeParameters" + ] + } + } +} diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter_plugin.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter_plugin.py new file mode 100644 index 0000000000000..e75f657f22100 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_parameter_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SSMParameterProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SSM::Parameter" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ssm.resource_providers.aws_ssm_parameter import ( + SSMParameterProvider, + ) + + self.factory = SSMParameterProvider diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.py new file mode 100644 index 0000000000000..7c3623c981eee --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.py @@ -0,0 +1,165 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class SSMPatchBaselineProperties(TypedDict): + Name: Optional[str] + ApprovalRules: Optional[RuleGroup] + ApprovedPatches: Optional[list[str]] + ApprovedPatchesComplianceLevel: Optional[str] + ApprovedPatchesEnableNonSecurity: Optional[bool] + Description: Optional[str] + GlobalFilters: Optional[PatchFilterGroup] + Id: Optional[str] + OperatingSystem: Optional[str] + PatchGroups: Optional[list[str]] + RejectedPatches: Optional[list[str]] + RejectedPatchesAction: Optional[str] + Sources: Optional[list[PatchSource]] + Tags: Optional[list[Tag]] + + +class PatchFilter(TypedDict): + Key: Optional[str] + Values: Optional[list[str]] + + +class PatchFilterGroup(TypedDict): + PatchFilters: Optional[list[PatchFilter]] + + +class Rule(TypedDict): + ApproveAfterDays: Optional[int] + ApproveUntilDate: Optional[dict] + ComplianceLevel: Optional[str] + EnableNonSecurity: Optional[bool] + PatchFilterGroup: Optional[PatchFilterGroup] + + +class RuleGroup(TypedDict): + PatchRules: Optional[list[Rule]] + + +class PatchSource(TypedDict): + Configuration: Optional[str] + Name: Optional[str] + Products: Optional[list[str]] + + +class Tag(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class SSMPatchBaselineProvider(ResourceProvider[SSMPatchBaselineProperties]): + TYPE = "AWS::SSM::PatchBaseline" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[SSMPatchBaselineProperties], + ) -> ProgressEvent[SSMPatchBaselineProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Id + + Required properties: + - Name + + Create-only properties: + - /properties/OperatingSystem + + Read-only properties: + - /properties/Id + + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + params = util.select_attributes( + model=model, + params=[ + "OperatingSystem", + "Name", + "GlobalFilters", + "ApprovalRules", + "ApprovedPatches", + "ApprovedPatchesComplianceLevel", + "ApprovedPatchesEnableNonSecurity", + "RejectedPatches", + "RejectedPatchesAction", + "Description", + "Sources", + "ClientToken", + "Tags", + ], + ) + + response = ssm.create_patch_baseline(**params) + model["Id"] = response["BaselineId"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[SSMPatchBaselineProperties], + ) -> ProgressEvent[SSMPatchBaselineProperties]: + """ + Fetch resource information + + + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[SSMPatchBaselineProperties], + ) -> ProgressEvent[SSMPatchBaselineProperties]: + """ + Delete a resource + + + """ + model = request.desired_state + ssm = request.aws_client_factory.ssm + + ssm.delete_patch_baseline(BaselineId=model["Id"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[SSMPatchBaselineProperties], + ) -> ProgressEvent[SSMPatchBaselineProperties]: + """ + Update a resource + + + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.schema.json b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.schema.json new file mode 100644 index 0000000000000..84db05c4f432c --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline.schema.json @@ -0,0 +1,185 @@ +{ + "typeName": "AWS::SSM::PatchBaseline", + "description": "Resource Type definition for AWS::SSM::PatchBaseline", + "additionalProperties": false, + "properties": { + "OperatingSystem": { + "type": "string" + }, + "Description": { + "type": "string" + }, + "ApprovalRules": { + "$ref": "#/definitions/RuleGroup" + }, + "Sources": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/PatchSource" + } + }, + "Name": { + "type": "string" + }, + "RejectedPatches": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "ApprovedPatches": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "RejectedPatchesAction": { + "type": "string" + }, + "PatchGroups": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "ApprovedPatchesComplianceLevel": { + "type": "string" + }, + "ApprovedPatchesEnableNonSecurity": { + "type": "boolean" + }, + "Id": { + "type": "string" + }, + "GlobalFilters": { + "$ref": "#/definitions/PatchFilterGroup" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Tag" + } + } + }, + "definitions": { + "PatchFilterGroup": { + "type": "object", + "additionalProperties": false, + "properties": { + "PatchFilters": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/PatchFilter" + } + } + } + }, + "PatchFilter": { + "type": "object", + "additionalProperties": false, + "properties": { + "Values": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Key": { + "type": "string" + } + } + }, + "Tag": { + "type": "object", + "additionalProperties": false, + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + }, + "required": [ + "Value", + "Key" + ] + }, + "Rule": { + "type": "object", + "additionalProperties": false, + "properties": { + "ApproveUntilDate": { + "$ref": "#/definitions/PatchStringDate" + }, + "ApproveAfterDays": { + "type": "integer" + }, + "EnableNonSecurity": { + "type": "boolean" + }, + "ComplianceLevel": { + "type": "string" + }, + "PatchFilterGroup": { + "$ref": "#/definitions/PatchFilterGroup" + } + } + }, + "PatchStringDate": { + "type": "object", + "additionalProperties": false + }, + "PatchSource": { + "type": "object", + "additionalProperties": false, + "properties": { + "Products": { + "type": "array", + "uniqueItems": false, + "items": { + "type": "string" + } + }, + "Configuration": { + "type": "string" + }, + "Name": { + "type": "string" + } + } + }, + "RuleGroup": { + "type": "object", + "additionalProperties": false, + "properties": { + "PatchRules": { + "type": "array", + "uniqueItems": false, + "items": { + "$ref": "#/definitions/Rule" + } + } + } + } + }, + "required": [ + "Name" + ], + "createOnlyProperties": [ + "/properties/OperatingSystem" + ], + "primaryIdentifier": [ + "/properties/Id" + ], + "readOnlyProperties": [ + "/properties/Id" + ] +} diff --git a/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline_plugin.py b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline_plugin.py new file mode 100644 index 0000000000000..3991ae2eec102 --- /dev/null +++ b/localstack-core/localstack/services/ssm/resource_providers/aws_ssm_patchbaseline_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class SSMPatchBaselineProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::SSM::PatchBaseline" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.ssm.resource_providers.aws_ssm_patchbaseline import ( + SSMPatchBaselineProvider, + ) + + self.factory = SSMPatchBaselineProvider diff --git a/localstack/utils/server/__init__.py b/localstack-core/localstack/services/stepfunctions/__init__.py similarity index 100% rename from localstack/utils/server/__init__.py rename to localstack-core/localstack/services/stepfunctions/__init__.py diff --git a/tests/integration/apigateway/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/__init__.py similarity index 100% rename from tests/integration/apigateway/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/.gitignore b/localstack-core/localstack/services/stepfunctions/asl/antlr/.gitignore new file mode 100644 index 0000000000000..ade3e916efb0c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/.gitignore @@ -0,0 +1,4 @@ +.antlr +/.antlr* +*.tokens +*.interp diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicLexer.g4 b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicLexer.g4 new file mode 100644 index 0000000000000..437122207065f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicLexer.g4 @@ -0,0 +1,62 @@ +// $antlr-format alignTrailingComments true, columnLimit 150, maxEmptyLinesToKeep 1, reflowComments false, useTab false +// $antlr-format allowShortRulesOnASingleLine true, allowShortBlocksOnASingleLine true, minEmptyLines 0, alignSemicolons ownLine +// $antlr-format alignColons trailing, singleLineOverrulesHangingColon true, alignLexerCommands true, alignLabels true, alignTrailers true + +lexer grammar ASLIntrinsicLexer; + +CONTEXT_PATH_STRING: DOLLAR DOLLAR JSON_PATH_BODY; + +JSON_PATH_STRING: DOLLAR JSON_PATH_BODY; + +STRING_VARIABLE: DOLLAR IDENTIFIER JSON_PATH_BODY; + +// TODO: JSONPath body composition may need strenghening to support features such as filtering conditions. +fragment JSON_PATH_BODY: JSON_PATH_BRACK? (DOT IDENTIFIER? JSON_PATH_BRACK?)*; + +fragment JSON_PATH_BRACK: '[' (JSON_PATH_BRACK | ~[\]])* ']'; + +DOLLAR : '$'; +LPAREN : '('; +RPAREN : ')'; +COMMA : ','; +DOT : '.'; + +TRUE : 'true'; +FALSE : 'false'; + +States : 'States'; +Format : 'Format'; +StringToJson : 'StringToJson'; +JsonToString : 'JsonToString'; +Array : 'Array'; +ArrayPartition : 'ArrayPartition'; +ArrayContains : 'ArrayContains'; +ArrayRange : 'ArrayRange'; +ArrayGetItem : 'ArrayGetItem'; +ArrayLength : 'ArrayLength'; +ArrayUnique : 'ArrayUnique'; +Base64Encode : 'Base64Encode'; +Base64Decode : 'Base64Decode'; +Hash : 'Hash'; +JsonMerge : 'JsonMerge'; +MathRandom : 'MathRandom'; +MathAdd : 'MathAdd'; +StringSplit : 'StringSplit'; +UUID : 'UUID'; + +STRING: '\'' (ESC | SAFECODEPOINT)*? '\''; + +fragment ESC : '\\' (UNICODE | .); +fragment UNICODE : 'u' HEX HEX HEX HEX; +fragment HEX : [0-9a-fA-F]; +fragment SAFECODEPOINT : ~ ['\\\u0000-\u001F]; + +INT: '-'? ('0' | [1-9] [0-9]*); + +NUMBER: '-'? INT ('.' [0-9]+)? EXP?; + +fragment EXP: [Ee] [+\-]? INT; + +IDENTIFIER: ([0-9a-zA-Z_] | UNICODE)+; + +WS: [ \t\n]+ -> skip; \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicParser.g4 b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicParser.g4 new file mode 100644 index 0000000000000..be0cac2a9379d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicParser.g4 @@ -0,0 +1,47 @@ +// $antlr-format alignTrailingComments true, columnLimit 150, maxEmptyLinesToKeep 1, reflowComments false, useTab false +// $antlr-format allowShortRulesOnASingleLine true, allowShortBlocksOnASingleLine true, minEmptyLines 0, alignSemicolons ownLine +// $antlr-format alignColons trailing, singleLineOverrulesHangingColon true, alignLexerCommands true, alignLabels true, alignTrailers true + +parser grammar ASLIntrinsicParser; + +options { + tokenVocab = ASLIntrinsicLexer; +} + +func_decl: states_func_decl EOF; + +states_func_decl: States DOT state_fun_name func_arg_list; + +state_fun_name: + Format + | StringToJson + | JsonToString + | Array + | ArrayPartition + | ArrayContains + | ArrayRange + | ArrayGetItem + | ArrayLength + | ArrayUnique + | Base64Encode + | Base64Decode + | Hash + | JsonMerge + | MathRandom + | MathAdd + | StringSplit + | UUID +; + +func_arg_list: LPAREN func_arg (COMMA func_arg)* RPAREN | LPAREN RPAREN; + +func_arg: + STRING # func_arg_string + | INT # func_arg_int + | NUMBER # func_arg_float + | (TRUE | FALSE) # func_arg_bool + | CONTEXT_PATH_STRING # func_arg_context_path + | JSON_PATH_STRING # func_arg_json_path + | STRING_VARIABLE # func_arg_var + | states_func_decl # func_arg_func_decl +; \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLLexer.g4 b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLLexer.g4 new file mode 100644 index 0000000000000..aa79ba245f380 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLLexer.g4 @@ -0,0 +1,357 @@ +// $antlr-format alignTrailingComments true, columnLimit 150, maxEmptyLinesToKeep 1, reflowComments false, useTab false +// $antlr-format allowShortRulesOnASingleLine true, allowShortBlocksOnASingleLine true, minEmptyLines 0, alignSemicolons ownLine +// $antlr-format alignColons trailing, singleLineOverrulesHangingColon true, alignLexerCommands true, alignLabels true, alignTrailers true + +lexer grammar ASLLexer; + +// Symbols. +COMMA: ','; + +COLON: ':'; + +LBRACK: '['; + +RBRACK: ']'; + +LBRACE: '{'; + +RBRACE: '}'; + +// Literals. +TRUE: 'true'; + +FALSE: 'false'; + +NULL: 'null'; + +// Keywords. +COMMENT: '"Comment"'; + +STATES: '"States"'; + +STARTAT: '"StartAt"'; + +NEXTSTATE: '"NextState"'; + +VERSION: '"Version"'; + +TYPE: '"Type"'; + +TASK: '"Task"'; + +CHOICE: '"Choice"'; + +FAIL: '"Fail"'; + +SUCCEED: '"Succeed"'; + +PASS: '"Pass"'; + +WAIT: '"Wait"'; + +PARALLEL: '"Parallel"'; + +MAP: '"Map"'; + +CHOICES: '"Choices"'; + +CONDITION: '"Condition"'; + +VARIABLE: '"Variable"'; + +DEFAULT: '"Default"'; + +BRANCHES: '"Branches"'; + +AND: '"And"'; + +BOOLEANEQUALS: '"BooleanEquals"'; + +BOOLEANQUALSPATH: '"BooleanEqualsPath"'; + +ISBOOLEAN: '"IsBoolean"'; + +ISNULL: '"IsNull"'; + +ISNUMERIC: '"IsNumeric"'; + +ISPRESENT: '"IsPresent"'; + +ISSTRING: '"IsString"'; + +ISTIMESTAMP: '"IsTimestamp"'; + +NOT: '"Not"'; + +NUMERICEQUALS: '"NumericEquals"'; + +NUMERICEQUALSPATH: '"NumericEqualsPath"'; + +NUMERICGREATERTHAN: '"NumericGreaterThan"'; + +NUMERICGREATERTHANPATH: '"NumericGreaterThanPath"'; + +NUMERICGREATERTHANEQUALS: '"NumericGreaterThanEquals"'; + +NUMERICGREATERTHANEQUALSPATH: '"NumericGreaterThanEqualsPath"'; + +NUMERICLESSTHAN: '"NumericLessThan"'; + +NUMERICLESSTHANPATH: '"NumericLessThanPath"'; + +NUMERICLESSTHANEQUALS: '"NumericLessThanEquals"'; + +NUMERICLESSTHANEQUALSPATH: '"NumericLessThanEqualsPath"'; + +OR: '"Or"'; + +STRINGEQUALS: '"StringEquals"'; + +STRINGEQUALSPATH: '"StringEqualsPath"'; + +STRINGGREATERTHAN: '"StringGreaterThan"'; + +STRINGGREATERTHANPATH: '"StringGreaterThanPath"'; + +STRINGGREATERTHANEQUALS: '"StringGreaterThanEquals"'; + +STRINGGREATERTHANEQUALSPATH: '"StringGreaterThanEqualsPath"'; + +STRINGLESSTHAN: '"StringLessThan"'; + +STRINGLESSTHANPATH: '"StringLessThanPath"'; + +STRINGLESSTHANEQUALS: '"StringLessThanEquals"'; + +STRINGLESSTHANEQUALSPATH: '"StringLessThanEqualsPath"'; + +STRINGMATCHES: '"StringMatches"'; + +TIMESTAMPEQUALS: '"TimestampEquals"'; + +TIMESTAMPEQUALSPATH: '"TimestampEqualsPath"'; + +TIMESTAMPGREATERTHAN: '"TimestampGreaterThan"'; + +TIMESTAMPGREATERTHANPATH: '"TimestampGreaterThanPath"'; + +TIMESTAMPGREATERTHANEQUALS: '"TimestampGreaterThanEquals"'; + +TIMESTAMPGREATERTHANEQUALSPATH: '"TimestampGreaterThanEqualsPath"'; + +TIMESTAMPLESSTHAN: '"TimestampLessThan"'; + +TIMESTAMPLESSTHANPATH: '"TimestampLessThanPath"'; + +TIMESTAMPLESSTHANEQUALS: '"TimestampLessThanEquals"'; + +TIMESTAMPLESSTHANEQUALSPATH: '"TimestampLessThanEqualsPath"'; + +SECONDSPATH: '"SecondsPath"'; + +SECONDS: '"Seconds"'; + +TIMESTAMPPATH: '"TimestampPath"'; + +TIMESTAMP: '"Timestamp"'; + +TIMEOUTSECONDS: '"TimeoutSeconds"'; + +TIMEOUTSECONDSPATH: '"TimeoutSecondsPath"'; + +HEARTBEATSECONDS: '"HeartbeatSeconds"'; + +HEARTBEATSECONDSPATH: '"HeartbeatSecondsPath"'; + +PROCESSORCONFIG: '"ProcessorConfig"'; + +MODE: '"Mode"'; + +INLINE: '"INLINE"'; + +DISTRIBUTED: '"DISTRIBUTED"'; + +EXECUTIONTYPE: '"ExecutionType"'; + +STANDARD: '"STANDARD"'; + +ITEMPROCESSOR: '"ItemProcessor"'; + +ITERATOR: '"Iterator"'; + +ITEMSELECTOR: '"ItemSelector"'; + +MAXCONCURRENCYPATH: '"MaxConcurrencyPath"'; + +MAXCONCURRENCY: '"MaxConcurrency"'; + +RESOURCE: '"Resource"'; + +INPUTPATH: '"InputPath"'; + +OUTPUTPATH: '"OutputPath"'; + +ITEMS: '"Items"'; + +ITEMSPATH: '"ItemsPath"'; + +RESULTPATH: '"ResultPath"'; + +RESULT: '"Result"'; + +PARAMETERS: '"Parameters"'; + +CREDENTIALS: '"Credentials"'; + +ROLEARN: '"RoleArn"'; + +ROLEARNPATH: '"RoleArn.$"'; + +RESULTSELECTOR: '"ResultSelector"'; + +ITEMREADER: '"ItemReader"'; + +READERCONFIG: '"ReaderConfig"'; + +INPUTTYPE: '"InputType"'; + +CSVHEADERLOCATION: '"CSVHeaderLocation"'; + +CSVHEADERS: '"CSVHeaders"'; + +MAXITEMS: '"MaxItems"'; + +MAXITEMSPATH: '"MaxItemsPath"'; + +TOLERATEDFAILURECOUNT: '"ToleratedFailureCount"'; + +TOLERATEDFAILURECOUNTPATH: '"ToleratedFailureCountPath"'; + +TOLERATEDFAILUREPERCENTAGE: '"ToleratedFailurePercentage"'; + +TOLERATEDFAILUREPERCENTAGEPATH: '"ToleratedFailurePercentagePath"'; + +LABEL: '"Label"'; + +RESULTWRITER: '"ResultWriter"'; + +NEXT: '"Next"'; + +END: '"End"'; + +CAUSE: '"Cause"'; + +CAUSEPATH: '"CausePath"'; + +ERROR: '"Error"'; + +ERRORPATH: '"ErrorPath"'; + +// Retry. +RETRY: '"Retry"'; + +ERROREQUALS: '"ErrorEquals"'; + +INTERVALSECONDS: '"IntervalSeconds"'; + +MAXATTEMPTS: '"MaxAttempts"'; + +BACKOFFRATE: '"BackoffRate"'; + +MAXDELAYSECONDS: '"MaxDelaySeconds"'; + +JITTERSTRATEGY: '"JitterStrategy"'; + +FULL: '"FULL"'; + +NONE: '"NONE"'; + +// Catch. +CATCH: '"Catch"'; + +// Query Language. +QUERYLANGUAGE: '"QueryLanguage"'; + +JSONPATH: '"JSONPath"'; + +JSONATA: '"JSONata"'; + +// Assign. +ASSIGN: '"Assign"'; + +// Output. +OUTPUT: '"Output"'; + +// Arguments. +ARGUMENTS: '"Arguments"'; + +// ErrorNames +ERRORNAMEStatesALL: '"States.ALL"'; + +ERRORNAMEStatesDataLimitExceeded: '"States.DataLimitExceeded"'; + +ERRORNAMEStatesHeartbeatTimeout: '"States.HeartbeatTimeout"'; + +ERRORNAMEStatesTimeout: '"States.Timeout"'; + +ERRORNAMEStatesTaskFailed: '"States.TaskFailed"'; + +ERRORNAMEStatesPermissions: '"States.Permissions"'; + +ERRORNAMEStatesResultPathMatchFailure: '"States.ResultPathMatchFailure"'; + +ERRORNAMEStatesParameterPathFailure: '"States.ParameterPathFailure"'; + +ERRORNAMEStatesBranchFailed: '"States.BranchFailed"'; + +ERRORNAMEStatesNoChoiceMatched: '"States.NoChoiceMatched"'; + +ERRORNAMEStatesIntrinsicFailure: '"States.IntrinsicFailure"'; + +ERRORNAMEStatesExceedToleratedFailureThreshold: '"States.ExceedToleratedFailureThreshold"'; + +ERRORNAMEStatesItemReaderFailed: '"States.ItemReaderFailed"'; + +ERRORNAMEStatesResultWriterFailed: '"States.ResultWriterFailed"'; + +ERRORNAMEStatesQueryEvaluationError: '"States.QueryEvaluationError"'; + +// Read-only: +ERRORNAMEStatesRuntime: '"States.Runtime"'; + +// Strings. +STRINGDOLLAR: '"' (ESC | SAFECODEPOINT)* '.$"'; + +STRINGPATHCONTEXTOBJ: '"$$' (ESC | SAFECODEPOINT)* '"'; + +STRINGPATH: '"$"' | '"$' ('.' | '[') (ESC | SAFECODEPOINT)* '"'; + +STRINGVAR: '"$' [a-zA-Z_] (ESC | SAFECODEPOINT)* '"'; + +STRINGINTRINSICFUNC: '"States.' (ESC | SAFECODEPOINT)+ '(' (ESC | SAFECODEPOINT)* ')"'; + +STRINGJSONATA: LJSONATA (ESC | SAFECODEPOINT)* RJSONATA; + +STRING: '"' (ESC | SAFECODEPOINT)* '"'; + +fragment ESC: '\\' (["\\/bfnrt] | UNICODE); + +fragment UNICODE: 'u' HEX HEX HEX HEX; + +fragment HEX: [0-9a-fA-F]; + +fragment SAFECODEPOINT: ~ ["\\\u0000-\u001F]; + +fragment LJSONATA: '"{%'; + +fragment RJSONATA: '%}"'; + +// Numbers. +INT: '0' | [1-9] [0-9]*; + +NUMBER: '-'? INT ('.' [0-9]+)? EXP?; + +fragment EXP: [Ee] [+\-]? INT; + +// Whitespace. +WS: [ \t\n\r]+ -> skip; \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLParser.g4 b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLParser.g4 new file mode 100644 index 0000000000000..a8868ea341269 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/ASLParser.g4 @@ -0,0 +1,576 @@ +// $antlr-format alignTrailingComments true, columnLimit 150, maxEmptyLinesToKeep 1, reflowComments false, useTab false +// $antlr-format allowShortRulesOnASingleLine true, allowShortBlocksOnASingleLine true, minEmptyLines 0, alignSemicolons ownLine +// $antlr-format alignColons trailing, singleLineOverrulesHangingColon true, alignLexerCommands true, alignLabels true, alignTrailers true + +parser grammar ASLParser; + +options { + tokenVocab = ASLLexer; +} + +state_machine: program_decl EOF; + +program_decl: LBRACE top_layer_stmt (COMMA top_layer_stmt)* RBRACE; + +top_layer_stmt: + comment_decl + | version_decl + | query_language_decl + | startat_decl + | states_decl + | timeout_seconds_decl +; + +startat_decl: STARTAT COLON string_literal; + +comment_decl: COMMENT COLON string_literal; + +version_decl: VERSION COLON string_literal; + +query_language_decl: QUERYLANGUAGE COLON (JSONPATH | JSONATA); + +state_stmt: + comment_decl + | query_language_decl + | type_decl + | input_path_decl + | resource_decl + | next_decl + | result_decl + | result_path_decl + | output_path_decl + | end_decl + | default_decl + | choices_decl + | error_decl + | cause_decl + | seconds_decl + | timestamp_decl + | items_decl + | items_path_decl + | item_processor_decl + | iterator_decl + | item_selector_decl + | item_reader_decl + | max_concurrency_decl + | timeout_seconds_decl + | heartbeat_seconds_decl + | branches_decl + | parameters_decl + | retry_decl + | catch_decl + | result_selector_decl + | tolerated_failure_count_decl + | tolerated_failure_percentage_decl + | label_decl + | result_writer_decl + | assign_decl + | arguments_decl + | output_decl + | credentials_decl +; + +states_decl: STATES COLON LBRACE state_decl (COMMA state_decl)* RBRACE; + +state_decl: string_literal COLON state_decl_body; + +state_decl_body: LBRACE state_stmt (COMMA state_stmt)* RBRACE; + +type_decl: TYPE COLON state_type; + +next_decl: NEXT COLON string_literal; + +resource_decl: RESOURCE COLON string_literal; + +input_path_decl: INPUTPATH COLON (NULL | string_sampler); + +result_decl: RESULT COLON json_value_decl; + +result_path_decl: RESULTPATH COLON (NULL | string_jsonpath); + +output_path_decl: OUTPUTPATH COLON (NULL | string_sampler); + +end_decl: END COLON (TRUE | FALSE); + +default_decl: DEFAULT COLON string_literal; + +error_decl: + ERROR COLON (string_jsonata | string_literal) # error + | ERRORPATH COLON string_expression_simple # error_path +; + +cause_decl: + CAUSE COLON (string_jsonata | string_literal) # cause + | CAUSEPATH COLON string_expression_simple # cause_path +; + +seconds_decl: + SECONDS COLON string_jsonata # seconds_jsonata + | SECONDS COLON INT # seconds_int + | SECONDSPATH COLON string_sampler # seconds_path +; + +timestamp_decl: + TIMESTAMP COLON (string_jsonata | string_literal) # timestamp + | TIMESTAMPPATH COLON string_sampler # timestamp_path +; + +items_decl: + ITEMS COLON jsonata_template_value_array # items_array + | ITEMS COLON string_jsonata # items_jsonata +; + +items_path_decl: ITEMSPATH COLON string_sampler; + +max_concurrency_decl: + MAXCONCURRENCY COLON string_jsonata # max_concurrency_jsonata + | MAXCONCURRENCY COLON INT # max_concurrency_int + | MAXCONCURRENCYPATH COLON string_sampler # max_concurrency_path +; + +parameters_decl: PARAMETERS COLON payload_tmpl_decl; + +credentials_decl: CREDENTIALS COLON LBRACE role_arn_decl RBRACE; + +role_arn_decl: + ROLEARN COLON (string_jsonata | string_literal) # role_arn + | ROLEARNPATH COLON string_expression_simple # role_path +; + +timeout_seconds_decl: + TIMEOUTSECONDS COLON string_jsonata # timeout_seconds_jsonata + | TIMEOUTSECONDS COLON INT # timeout_seconds_int + | TIMEOUTSECONDSPATH COLON string_sampler # timeout_seconds_path +; + +heartbeat_seconds_decl: + HEARTBEATSECONDS COLON string_jsonata # heartbeat_seconds_jsonata + | HEARTBEATSECONDS COLON INT # heartbeat_seconds_int + | HEARTBEATSECONDSPATH COLON string_sampler # heartbeat_seconds_path +; + +payload_tmpl_decl: LBRACE payload_binding (COMMA payload_binding)* RBRACE | LBRACE RBRACE; + +payload_binding: + STRINGDOLLAR COLON string_expression_simple # payload_binding_sample + | string_literal COLON payload_value_decl # payload_binding_value +; + +payload_arr_decl: LBRACK payload_value_decl (COMMA payload_value_decl)* RBRACK | LBRACK RBRACK; + +payload_value_decl: payload_arr_decl | payload_tmpl_decl | payload_value_lit; + +payload_value_lit: + NUMBER # payload_value_float + | INT # payload_value_int + | (TRUE | FALSE) # payload_value_bool + | NULL # payload_value_null + | string_literal # payload_value_str +; + +assign_decl: ASSIGN COLON assign_decl_body; + +assign_decl_body: LBRACE RBRACE | LBRACE assign_decl_binding (COMMA assign_decl_binding)* RBRACE; + +assign_decl_binding: assign_template_binding; + +assign_template_value_object: + LBRACE RBRACE + | LBRACE assign_template_binding (COMMA assign_template_binding)* RBRACE +; + +assign_template_binding: + STRINGDOLLAR COLON string_expression_simple # assign_template_binding_string_expression_simple + | string_literal COLON assign_template_value # assign_template_binding_value +; + +assign_template_value: + assign_template_value_object + | assign_template_value_array + | assign_template_value_terminal +; + +assign_template_value_array: + LBRACK RBRACK + | LBRACK assign_template_value (COMMA assign_template_value)* RBRACK +; + +assign_template_value_terminal: + NUMBER # assign_template_value_terminal_float + | INT # assign_template_value_terminal_int + | (TRUE | FALSE) # assign_template_value_terminal_bool + | NULL # assign_template_value_terminal_null + | string_jsonata # assign_template_value_terminal_string_jsonata + | string_literal # assign_template_value_terminal_string_literal +; + +arguments_decl: + ARGUMENTS COLON jsonata_template_value_object # arguments_jsonata_template_value_object + | ARGUMENTS COLON string_jsonata # arguments_string_jsonata +; + +output_decl: OUTPUT COLON jsonata_template_value; + +jsonata_template_value_object: + LBRACE RBRACE + | LBRACE jsonata_template_binding (COMMA jsonata_template_binding)* RBRACE +; + +jsonata_template_binding: string_literal COLON jsonata_template_value; + +jsonata_template_value: + jsonata_template_value_object + | jsonata_template_value_array + | jsonata_template_value_terminal +; + +jsonata_template_value_array: + LBRACK RBRACK + | LBRACK jsonata_template_value (COMMA jsonata_template_value)* RBRACK +; + +jsonata_template_value_terminal: + NUMBER # jsonata_template_value_terminal_float + | INT # jsonata_template_value_terminal_int + | (TRUE | FALSE) # jsonata_template_value_terminal_bool + | NULL # jsonata_template_value_terminal_null + | string_jsonata # jsonata_template_value_terminal_string_jsonata + | string_literal # jsonata_template_value_terminal_string_literal +; + +result_selector_decl: RESULTSELECTOR COLON payload_tmpl_decl; + +state_type: TASK | PASS | CHOICE | FAIL | SUCCEED | WAIT | MAP | PARALLEL; + +choices_decl: CHOICES COLON LBRACK choice_rule (COMMA choice_rule)* RBRACK; + +choice_rule: + LBRACE comparison_variable_stmt (COMMA comparison_variable_stmt)+ RBRACE # choice_rule_comparison_variable + | LBRACE comparison_composite_stmt (COMMA comparison_composite_stmt)* RBRACE # choice_rule_comparison_composite +; + +comparison_variable_stmt: + variable_decl + | comparison_func + | next_decl + | assign_decl + | output_decl + | comment_decl +; + +comparison_composite_stmt: comparison_composite | next_decl | assign_decl | comment_decl; + +comparison_composite: + choice_operator COLON (choice_rule | LBRACK choice_rule (COMMA choice_rule)* RBRACK) +; // TODO: this allows for Next definitions in nested choice_rules, is this supported at parse time? + +variable_decl: VARIABLE COLON string_sampler; + +comparison_func: + CONDITION COLON (TRUE | FALSE) # condition_lit + | CONDITION COLON string_jsonata # condition_string_jsonata + | comparison_op COLON string_variable_sample # comparison_func_string_variable_sample + | comparison_op COLON json_value_decl # comparison_func_value +; + +branches_decl: BRANCHES COLON LBRACK program_decl (COMMA program_decl)* RBRACK; + +item_processor_decl: + ITEMPROCESSOR COLON LBRACE item_processor_item (COMMA item_processor_item)* RBRACE +; + +item_processor_item: processor_config_decl | startat_decl | states_decl | comment_decl; + +processor_config_decl: + PROCESSORCONFIG COLON LBRACE processor_config_field (COMMA processor_config_field)* RBRACE +; + +processor_config_field: mode_decl | execution_decl; + +mode_decl: MODE COLON mode_type; + +mode_type: INLINE | DISTRIBUTED; + +execution_decl: EXECUTIONTYPE COLON execution_type; + +execution_type: STANDARD; + +iterator_decl: ITERATOR COLON LBRACE iterator_decl_item (COMMA iterator_decl_item)* RBRACE; + +iterator_decl_item: startat_decl | states_decl | comment_decl | processor_config_decl; + +item_selector_decl: ITEMSELECTOR COLON assign_template_value_object; + +item_reader_decl: ITEMREADER COLON LBRACE items_reader_field (COMMA items_reader_field)* RBRACE; + +items_reader_field: resource_decl | reader_config_decl | parameters_decl | arguments_decl; + +reader_config_decl: + READERCONFIG COLON LBRACE reader_config_field (COMMA reader_config_field)* RBRACE +; + +reader_config_field: + input_type_decl + | csv_header_location_decl + | csv_headers_decl + | max_items_decl +; + +input_type_decl: INPUTTYPE COLON string_literal; + +csv_header_location_decl: CSVHEADERLOCATION COLON string_literal; + +csv_headers_decl: + CSVHEADERS COLON LBRACK string_literal (COMMA string_literal)* RBRACK +; // TODO: are empty "CSVHeaders" list values supported? + +max_items_decl: + MAXITEMS COLON string_jsonata # max_items_string_jsonata + | MAXITEMS COLON INT # max_items_int + | MAXITEMSPATH COLON string_sampler # max_items_path +; + +tolerated_failure_count_decl: + TOLERATEDFAILURECOUNT COLON string_jsonata # tolerated_failure_count_string_jsonata + | TOLERATEDFAILURECOUNT COLON INT # tolerated_failure_count_int + | TOLERATEDFAILURECOUNTPATH COLON string_sampler # tolerated_failure_count_path +; + +tolerated_failure_percentage_decl: + TOLERATEDFAILUREPERCENTAGE COLON string_jsonata # tolerated_failure_percentage_string_jsonata + | TOLERATEDFAILUREPERCENTAGE COLON NUMBER # tolerated_failure_percentage_number + | TOLERATEDFAILUREPERCENTAGEPATH COLON string_sampler # tolerated_failure_percentage_path +; + +label_decl: LABEL COLON string_literal; + +result_writer_decl: + RESULTWRITER COLON LBRACE result_writer_field (COMMA result_writer_field)* RBRACE +; + +result_writer_field: resource_decl | parameters_decl; + +retry_decl: RETRY COLON LBRACK (retrier_decl (COMMA retrier_decl)*)? RBRACK; + +retrier_decl: LBRACE retrier_stmt (COMMA retrier_stmt)* RBRACE; + +retrier_stmt: + error_equals_decl + | interval_seconds_decl + | max_attempts_decl + | backoff_rate_decl + | max_delay_seconds_decl + | jitter_strategy_decl + | comment_decl +; + +error_equals_decl: ERROREQUALS COLON LBRACK error_name (COMMA error_name)* RBRACK; + +interval_seconds_decl: INTERVALSECONDS COLON INT; + +max_attempts_decl: MAXATTEMPTS COLON INT; + +backoff_rate_decl: BACKOFFRATE COLON (INT | NUMBER); + +max_delay_seconds_decl: MAXDELAYSECONDS COLON INT; + +jitter_strategy_decl: JITTERSTRATEGY COLON (FULL | NONE); + +catch_decl: CATCH COLON LBRACK (catcher_decl (COMMA catcher_decl)*)? RBRACK; + +catcher_decl: LBRACE catcher_stmt (COMMA catcher_stmt)* RBRACE; + +catcher_stmt: + error_equals_decl + | result_path_decl + | next_decl + | assign_decl + | output_decl + | comment_decl +; + +comparison_op: + BOOLEANEQUALS + | BOOLEANQUALSPATH + | ISBOOLEAN + | ISNULL + | ISNUMERIC + | ISPRESENT + | ISSTRING + | ISTIMESTAMP + | NUMERICEQUALS + | NUMERICEQUALSPATH + | NUMERICGREATERTHAN + | NUMERICGREATERTHANPATH + | NUMERICGREATERTHANEQUALS + | NUMERICGREATERTHANEQUALSPATH + | NUMERICLESSTHAN + | NUMERICLESSTHANPATH + | NUMERICLESSTHANEQUALS + | NUMERICLESSTHANEQUALSPATH + | STRINGEQUALS + | STRINGEQUALSPATH + | STRINGGREATERTHAN + | STRINGGREATERTHANPATH + | STRINGGREATERTHANEQUALS + | STRINGGREATERTHANEQUALSPATH + | STRINGLESSTHAN + | STRINGLESSTHANPATH + | STRINGLESSTHANEQUALS + | STRINGLESSTHANEQUALSPATH + | STRINGMATCHES + | TIMESTAMPEQUALS + | TIMESTAMPEQUALSPATH + | TIMESTAMPGREATERTHAN + | TIMESTAMPGREATERTHANPATH + | TIMESTAMPGREATERTHANEQUALS + | TIMESTAMPGREATERTHANEQUALSPATH + | TIMESTAMPLESSTHAN + | TIMESTAMPLESSTHANPATH + | TIMESTAMPLESSTHANEQUALS + | TIMESTAMPLESSTHANEQUALSPATH +; + +choice_operator: NOT | AND | OR; + +states_error_name: + ERRORNAMEStatesALL + | ERRORNAMEStatesDataLimitExceeded + | ERRORNAMEStatesHeartbeatTimeout + | ERRORNAMEStatesTimeout + | ERRORNAMEStatesTaskFailed + | ERRORNAMEStatesPermissions + | ERRORNAMEStatesResultPathMatchFailure + | ERRORNAMEStatesParameterPathFailure + | ERRORNAMEStatesBranchFailed + | ERRORNAMEStatesNoChoiceMatched + | ERRORNAMEStatesIntrinsicFailure + | ERRORNAMEStatesExceedToleratedFailureThreshold + | ERRORNAMEStatesItemReaderFailed + | ERRORNAMEStatesResultWriterFailed + | ERRORNAMEStatesRuntime + | ERRORNAMEStatesQueryEvaluationError +; + +error_name: states_error_name | string_literal; + +json_obj_decl: LBRACE json_binding (COMMA json_binding)* RBRACE | LBRACE RBRACE; + +json_binding: string_literal COLON json_value_decl; + +json_arr_decl: LBRACK json_value_decl (COMMA json_value_decl)* RBRACK | LBRACK RBRACK; + +json_value_decl: + NUMBER + | INT + | TRUE + | FALSE + | NULL + | json_binding + | json_arr_decl + | json_obj_decl + | string_literal +; + +string_sampler : string_jsonpath | string_context_path | string_variable_sample; +string_expression_simple : string_sampler | string_intrinsic_function; +string_expression : string_expression_simple | string_jsonata; + +string_jsonpath : STRINGPATH; +string_context_path : STRINGPATHCONTEXTOBJ; +string_variable_sample : STRINGVAR; +string_intrinsic_function : STRINGINTRINSICFUNC; +string_jsonata : STRINGJSONATA; +string_literal: + STRING + | STRINGDOLLAR + | soft_string_keyword + | comparison_op + | choice_operator + | states_error_name + | string_expression +; + +soft_string_keyword: + QUERYLANGUAGE + | ASSIGN + | ARGUMENTS + | OUTPUT + | COMMENT + | STATES + | STARTAT + | NEXTSTATE + | TYPE + | TASK + | CHOICE + | FAIL + | SUCCEED + | PASS + | WAIT + | PARALLEL + | MAP + | CHOICES + | CONDITION + | VARIABLE + | DEFAULT + | BRANCHES + | SECONDSPATH + | SECONDS + | TIMESTAMPPATH + | TIMESTAMP + | TIMEOUTSECONDS + | TIMEOUTSECONDSPATH + | HEARTBEATSECONDS + | HEARTBEATSECONDSPATH + | PROCESSORCONFIG + | MODE + | INLINE + | DISTRIBUTED + | EXECUTIONTYPE + | STANDARD + | ITEMS + | ITEMPROCESSOR + | ITERATOR + | ITEMSELECTOR + | MAXCONCURRENCY + | MAXCONCURRENCYPATH + | RESOURCE + | INPUTPATH + | OUTPUTPATH + | ITEMSPATH + | RESULTPATH + | RESULT + | PARAMETERS + | CREDENTIALS + | ROLEARN + | ROLEARNPATH + | RESULTSELECTOR + | ITEMREADER + | READERCONFIG + | INPUTTYPE + | CSVHEADERLOCATION + | CSVHEADERS + | MAXITEMS + | MAXITEMSPATH + | TOLERATEDFAILURECOUNT + | TOLERATEDFAILURECOUNTPATH + | TOLERATEDFAILUREPERCENTAGE + | TOLERATEDFAILUREPERCENTAGEPATH + | LABEL + | RESULTWRITER + | NEXT + | END + | CAUSE + | ERROR + | RETRY + | ERROREQUALS + | INTERVALSECONDS + | MAXATTEMPTS + | BACKOFFRATE + | MAXDELAYSECONDS + | JITTERSTRATEGY + | FULL + | NONE + | CATCH + | VERSION +; \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/Makefile b/localstack-core/localstack/services/stepfunctions/asl/antlr/Makefile new file mode 100644 index 0000000000000..c74eba7c02dfb --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/Makefile @@ -0,0 +1,39 @@ +# Define default ANTLR4 tool dump directory. +ANTLR4_DIR = .antlr + +# Define the default input and output directory for ANTLR4 grammars. +ANTLR4_SRC_DIR = . +ANTLR4_TARGET_DIR = $(ANTLR4_SRC_DIR)/runtime +ANTLR4_GRAMMAR_FILES = $(wildcard $(ANTLR4_SRC_DIR)/*.g4) + +# Define the default ANTLR4 version and jar file. +ANTLR4_VERSION ?= 4.13.2 +ANTLR4_JAR ?= $(ANTLR4_DIR)/antlr-$(ANTLR4_VERSION)-complete.jar + +# Define the download path for ANTLR4 parser generator. +ANTLR4_URL = https://www.antlr.org/download/antlr-$(ANTLR4_VERSION)-complete.jar + +# Define the default ANTLR4 run command and options. +RUN_ANTLR4 = java -jar $(ANTLR4_JAR) -Dlanguage=Python3 -visitor + +install: ## Install the dependencies for compiling the ANTLR4 project. + @npm i -g --save-dev antlr-format@2.1.5 + @mkdir -p $(ANTLR4_DIR) + @curl -o $(ANTLR4_JAR) $(ANTLR4_URL) + +build: $(ANTLR4_GRAMMAR_FILES) ## Build the ANTLR4 project. + @echo "Compiling grammar files in $(ANTLR_SRC_DIR)" + @mkdir -p $(ANTLR4_TARGET_DIR) + @for grammar in $^ ; do \ + echo "Processing $$grammar..."; \ + $(RUN_ANTLR4) $$grammar -o $(ANTLR4_TARGET_DIR) -Xexact-output-dir; \ + done + +format: + @antlr-format *.g4 + +clean: ## Clean up the ANTLR4 project directory. + rm -rf $(ANTLR4_TARGET_DIR) + rm -rf $(ANTLR4_DIR) + +.PHONY: install build format clean diff --git a/tests/integration/awslambda/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/__init__.py similarity index 100% rename from tests/integration/awslambda/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/antlr/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicLexer.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicLexer.py new file mode 100644 index 0000000000000..cef42738dc801 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicLexer.py @@ -0,0 +1,243 @@ +# Generated from ASLIntrinsicLexer.g4 by ANTLR 4.13.2 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + return [ + 4,0,34,412,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5, + 2,6,7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,10,7,10,2,11,7,11,2,12,7,12,2, + 13,7,13,2,14,7,14,2,15,7,15,2,16,7,16,2,17,7,17,2,18,7,18,2,19,7, + 19,2,20,7,20,2,21,7,21,2,22,7,22,2,23,7,23,2,24,7,24,2,25,7,25,2, + 26,7,26,2,27,7,27,2,28,7,28,2,29,7,29,2,30,7,30,2,31,7,31,2,32,7, + 32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,2,37,7,37,2,38,7,38,2, + 39,7,39,2,40,7,40,1,0,1,0,1,0,1,0,1,1,1,1,1,1,1,2,1,2,1,2,1,2,1, + 3,3,3,96,8,3,1,3,1,3,3,3,100,8,3,1,3,3,3,103,8,3,5,3,105,8,3,10, + 3,12,3,108,9,3,1,4,1,4,1,4,5,4,113,8,4,10,4,12,4,116,9,4,1,4,1,4, + 1,5,1,5,1,6,1,6,1,7,1,7,1,8,1,8,1,9,1,9,1,10,1,10,1,10,1,10,1,10, + 1,11,1,11,1,11,1,11,1,11,1,11,1,12,1,12,1,12,1,12,1,12,1,12,1,12, + 1,13,1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14, + 1,14,1,14,1,14,1,14,1,14,1,14,1,14,1,15,1,15,1,15,1,15,1,15,1,15, + 1,15,1,15,1,15,1,15,1,15,1,15,1,15,1,16,1,16,1,16,1,16,1,16,1,16, + 1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17, + 1,17,1,17,1,18,1,18,1,18,1,18,1,18,1,18,1,18,1,18,1,18,1,18,1,18, + 1,18,1,18,1,18,1,19,1,19,1,19,1,19,1,19,1,19,1,19,1,19,1,19,1,19, + 1,19,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,20, + 1,20,1,21,1,21,1,21,1,21,1,21,1,21,1,21,1,21,1,21,1,21,1,21,1,21, + 1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,23, + 1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,24, + 1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,25, + 1,25,1,25,1,25,1,25,1,26,1,26,1,26,1,26,1,26,1,26,1,26,1,26,1,26, + 1,26,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,28, + 1,28,1,28,1,28,1,28,1,28,1,28,1,28,1,29,1,29,1,29,1,29,1,29,1,29, + 1,29,1,29,1,29,1,29,1,29,1,29,1,30,1,30,1,30,1,30,1,30,1,31,1,31, + 1,31,5,31,344,8,31,10,31,12,31,347,9,31,1,31,1,31,1,32,1,32,1,32, + 3,32,354,8,32,1,33,1,33,1,33,1,33,1,33,1,33,1,34,1,34,1,35,1,35, + 1,36,3,36,367,8,36,1,36,1,36,1,36,5,36,372,8,36,10,36,12,36,375, + 9,36,3,36,377,8,36,1,37,3,37,380,8,37,1,37,1,37,1,37,4,37,385,8, + 37,11,37,12,37,386,3,37,389,8,37,1,37,3,37,392,8,37,1,38,1,38,3, + 38,396,8,38,1,38,1,38,1,39,1,39,4,39,402,8,39,11,39,12,39,403,1, + 40,4,40,407,8,40,11,40,12,40,408,1,40,1,40,1,345,0,41,1,1,3,2,5, + 3,7,0,9,0,11,4,13,5,15,6,17,7,19,8,21,9,23,10,25,11,27,12,29,13, + 31,14,33,15,35,16,37,17,39,18,41,19,43,20,45,21,47,22,49,23,51,24, + 53,25,55,26,57,27,59,28,61,29,63,30,65,0,67,0,69,0,71,0,73,31,75, + 32,77,0,79,33,81,34,1,0,9,1,0,93,93,3,0,48,57,65,70,97,102,3,0,0, + 31,39,39,92,92,1,0,49,57,1,0,48,57,2,0,69,69,101,101,2,0,43,43,45, + 45,4,0,48,57,65,90,95,95,97,122,2,0,9,10,32,32,424,0,1,1,0,0,0,0, + 3,1,0,0,0,0,5,1,0,0,0,0,11,1,0,0,0,0,13,1,0,0,0,0,15,1,0,0,0,0,17, + 1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,0,0,0,25,1,0,0,0,0,27, + 1,0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0,0,35,1,0,0,0,0,37, + 1,0,0,0,0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0,0,45,1,0,0,0,0,47, + 1,0,0,0,0,49,1,0,0,0,0,51,1,0,0,0,0,53,1,0,0,0,0,55,1,0,0,0,0,57, + 1,0,0,0,0,59,1,0,0,0,0,61,1,0,0,0,0,63,1,0,0,0,0,73,1,0,0,0,0,75, + 1,0,0,0,0,79,1,0,0,0,0,81,1,0,0,0,1,83,1,0,0,0,3,87,1,0,0,0,5,90, + 1,0,0,0,7,95,1,0,0,0,9,109,1,0,0,0,11,119,1,0,0,0,13,121,1,0,0,0, + 15,123,1,0,0,0,17,125,1,0,0,0,19,127,1,0,0,0,21,129,1,0,0,0,23,134, + 1,0,0,0,25,140,1,0,0,0,27,147,1,0,0,0,29,154,1,0,0,0,31,167,1,0, + 0,0,33,180,1,0,0,0,35,186,1,0,0,0,37,201,1,0,0,0,39,215,1,0,0,0, + 41,226,1,0,0,0,43,239,1,0,0,0,45,251,1,0,0,0,47,263,1,0,0,0,49,276, + 1,0,0,0,51,289,1,0,0,0,53,294,1,0,0,0,55,304,1,0,0,0,57,315,1,0, + 0,0,59,323,1,0,0,0,61,335,1,0,0,0,63,340,1,0,0,0,65,350,1,0,0,0, + 67,355,1,0,0,0,69,361,1,0,0,0,71,363,1,0,0,0,73,366,1,0,0,0,75,379, + 1,0,0,0,77,393,1,0,0,0,79,401,1,0,0,0,81,406,1,0,0,0,83,84,3,11, + 5,0,84,85,3,11,5,0,85,86,3,7,3,0,86,2,1,0,0,0,87,88,3,11,5,0,88, + 89,3,7,3,0,89,4,1,0,0,0,90,91,3,11,5,0,91,92,3,79,39,0,92,93,3,7, + 3,0,93,6,1,0,0,0,94,96,3,9,4,0,95,94,1,0,0,0,95,96,1,0,0,0,96,106, + 1,0,0,0,97,99,3,19,9,0,98,100,3,79,39,0,99,98,1,0,0,0,99,100,1,0, + 0,0,100,102,1,0,0,0,101,103,3,9,4,0,102,101,1,0,0,0,102,103,1,0, + 0,0,103,105,1,0,0,0,104,97,1,0,0,0,105,108,1,0,0,0,106,104,1,0,0, + 0,106,107,1,0,0,0,107,8,1,0,0,0,108,106,1,0,0,0,109,114,5,91,0,0, + 110,113,3,9,4,0,111,113,8,0,0,0,112,110,1,0,0,0,112,111,1,0,0,0, + 113,116,1,0,0,0,114,112,1,0,0,0,114,115,1,0,0,0,115,117,1,0,0,0, + 116,114,1,0,0,0,117,118,5,93,0,0,118,10,1,0,0,0,119,120,5,36,0,0, + 120,12,1,0,0,0,121,122,5,40,0,0,122,14,1,0,0,0,123,124,5,41,0,0, + 124,16,1,0,0,0,125,126,5,44,0,0,126,18,1,0,0,0,127,128,5,46,0,0, + 128,20,1,0,0,0,129,130,5,116,0,0,130,131,5,114,0,0,131,132,5,117, + 0,0,132,133,5,101,0,0,133,22,1,0,0,0,134,135,5,102,0,0,135,136,5, + 97,0,0,136,137,5,108,0,0,137,138,5,115,0,0,138,139,5,101,0,0,139, + 24,1,0,0,0,140,141,5,83,0,0,141,142,5,116,0,0,142,143,5,97,0,0,143, + 144,5,116,0,0,144,145,5,101,0,0,145,146,5,115,0,0,146,26,1,0,0,0, + 147,148,5,70,0,0,148,149,5,111,0,0,149,150,5,114,0,0,150,151,5,109, + 0,0,151,152,5,97,0,0,152,153,5,116,0,0,153,28,1,0,0,0,154,155,5, + 83,0,0,155,156,5,116,0,0,156,157,5,114,0,0,157,158,5,105,0,0,158, + 159,5,110,0,0,159,160,5,103,0,0,160,161,5,84,0,0,161,162,5,111,0, + 0,162,163,5,74,0,0,163,164,5,115,0,0,164,165,5,111,0,0,165,166,5, + 110,0,0,166,30,1,0,0,0,167,168,5,74,0,0,168,169,5,115,0,0,169,170, + 5,111,0,0,170,171,5,110,0,0,171,172,5,84,0,0,172,173,5,111,0,0,173, + 174,5,83,0,0,174,175,5,116,0,0,175,176,5,114,0,0,176,177,5,105,0, + 0,177,178,5,110,0,0,178,179,5,103,0,0,179,32,1,0,0,0,180,181,5,65, + 0,0,181,182,5,114,0,0,182,183,5,114,0,0,183,184,5,97,0,0,184,185, + 5,121,0,0,185,34,1,0,0,0,186,187,5,65,0,0,187,188,5,114,0,0,188, + 189,5,114,0,0,189,190,5,97,0,0,190,191,5,121,0,0,191,192,5,80,0, + 0,192,193,5,97,0,0,193,194,5,114,0,0,194,195,5,116,0,0,195,196,5, + 105,0,0,196,197,5,116,0,0,197,198,5,105,0,0,198,199,5,111,0,0,199, + 200,5,110,0,0,200,36,1,0,0,0,201,202,5,65,0,0,202,203,5,114,0,0, + 203,204,5,114,0,0,204,205,5,97,0,0,205,206,5,121,0,0,206,207,5,67, + 0,0,207,208,5,111,0,0,208,209,5,110,0,0,209,210,5,116,0,0,210,211, + 5,97,0,0,211,212,5,105,0,0,212,213,5,110,0,0,213,214,5,115,0,0,214, + 38,1,0,0,0,215,216,5,65,0,0,216,217,5,114,0,0,217,218,5,114,0,0, + 218,219,5,97,0,0,219,220,5,121,0,0,220,221,5,82,0,0,221,222,5,97, + 0,0,222,223,5,110,0,0,223,224,5,103,0,0,224,225,5,101,0,0,225,40, + 1,0,0,0,226,227,5,65,0,0,227,228,5,114,0,0,228,229,5,114,0,0,229, + 230,5,97,0,0,230,231,5,121,0,0,231,232,5,71,0,0,232,233,5,101,0, + 0,233,234,5,116,0,0,234,235,5,73,0,0,235,236,5,116,0,0,236,237,5, + 101,0,0,237,238,5,109,0,0,238,42,1,0,0,0,239,240,5,65,0,0,240,241, + 5,114,0,0,241,242,5,114,0,0,242,243,5,97,0,0,243,244,5,121,0,0,244, + 245,5,76,0,0,245,246,5,101,0,0,246,247,5,110,0,0,247,248,5,103,0, + 0,248,249,5,116,0,0,249,250,5,104,0,0,250,44,1,0,0,0,251,252,5,65, + 0,0,252,253,5,114,0,0,253,254,5,114,0,0,254,255,5,97,0,0,255,256, + 5,121,0,0,256,257,5,85,0,0,257,258,5,110,0,0,258,259,5,105,0,0,259, + 260,5,113,0,0,260,261,5,117,0,0,261,262,5,101,0,0,262,46,1,0,0,0, + 263,264,5,66,0,0,264,265,5,97,0,0,265,266,5,115,0,0,266,267,5,101, + 0,0,267,268,5,54,0,0,268,269,5,52,0,0,269,270,5,69,0,0,270,271,5, + 110,0,0,271,272,5,99,0,0,272,273,5,111,0,0,273,274,5,100,0,0,274, + 275,5,101,0,0,275,48,1,0,0,0,276,277,5,66,0,0,277,278,5,97,0,0,278, + 279,5,115,0,0,279,280,5,101,0,0,280,281,5,54,0,0,281,282,5,52,0, + 0,282,283,5,68,0,0,283,284,5,101,0,0,284,285,5,99,0,0,285,286,5, + 111,0,0,286,287,5,100,0,0,287,288,5,101,0,0,288,50,1,0,0,0,289,290, + 5,72,0,0,290,291,5,97,0,0,291,292,5,115,0,0,292,293,5,104,0,0,293, + 52,1,0,0,0,294,295,5,74,0,0,295,296,5,115,0,0,296,297,5,111,0,0, + 297,298,5,110,0,0,298,299,5,77,0,0,299,300,5,101,0,0,300,301,5,114, + 0,0,301,302,5,103,0,0,302,303,5,101,0,0,303,54,1,0,0,0,304,305,5, + 77,0,0,305,306,5,97,0,0,306,307,5,116,0,0,307,308,5,104,0,0,308, + 309,5,82,0,0,309,310,5,97,0,0,310,311,5,110,0,0,311,312,5,100,0, + 0,312,313,5,111,0,0,313,314,5,109,0,0,314,56,1,0,0,0,315,316,5,77, + 0,0,316,317,5,97,0,0,317,318,5,116,0,0,318,319,5,104,0,0,319,320, + 5,65,0,0,320,321,5,100,0,0,321,322,5,100,0,0,322,58,1,0,0,0,323, + 324,5,83,0,0,324,325,5,116,0,0,325,326,5,114,0,0,326,327,5,105,0, + 0,327,328,5,110,0,0,328,329,5,103,0,0,329,330,5,83,0,0,330,331,5, + 112,0,0,331,332,5,108,0,0,332,333,5,105,0,0,333,334,5,116,0,0,334, + 60,1,0,0,0,335,336,5,85,0,0,336,337,5,85,0,0,337,338,5,73,0,0,338, + 339,5,68,0,0,339,62,1,0,0,0,340,345,5,39,0,0,341,344,3,65,32,0,342, + 344,3,71,35,0,343,341,1,0,0,0,343,342,1,0,0,0,344,347,1,0,0,0,345, + 346,1,0,0,0,345,343,1,0,0,0,346,348,1,0,0,0,347,345,1,0,0,0,348, + 349,5,39,0,0,349,64,1,0,0,0,350,353,5,92,0,0,351,354,3,67,33,0,352, + 354,9,0,0,0,353,351,1,0,0,0,353,352,1,0,0,0,354,66,1,0,0,0,355,356, + 5,117,0,0,356,357,3,69,34,0,357,358,3,69,34,0,358,359,3,69,34,0, + 359,360,3,69,34,0,360,68,1,0,0,0,361,362,7,1,0,0,362,70,1,0,0,0, + 363,364,8,2,0,0,364,72,1,0,0,0,365,367,5,45,0,0,366,365,1,0,0,0, + 366,367,1,0,0,0,367,376,1,0,0,0,368,377,5,48,0,0,369,373,7,3,0,0, + 370,372,7,4,0,0,371,370,1,0,0,0,372,375,1,0,0,0,373,371,1,0,0,0, + 373,374,1,0,0,0,374,377,1,0,0,0,375,373,1,0,0,0,376,368,1,0,0,0, + 376,369,1,0,0,0,377,74,1,0,0,0,378,380,5,45,0,0,379,378,1,0,0,0, + 379,380,1,0,0,0,380,381,1,0,0,0,381,388,3,73,36,0,382,384,5,46,0, + 0,383,385,7,4,0,0,384,383,1,0,0,0,385,386,1,0,0,0,386,384,1,0,0, + 0,386,387,1,0,0,0,387,389,1,0,0,0,388,382,1,0,0,0,388,389,1,0,0, + 0,389,391,1,0,0,0,390,392,3,77,38,0,391,390,1,0,0,0,391,392,1,0, + 0,0,392,76,1,0,0,0,393,395,7,5,0,0,394,396,7,6,0,0,395,394,1,0,0, + 0,395,396,1,0,0,0,396,397,1,0,0,0,397,398,3,73,36,0,398,78,1,0,0, + 0,399,402,7,7,0,0,400,402,3,67,33,0,401,399,1,0,0,0,401,400,1,0, + 0,0,402,403,1,0,0,0,403,401,1,0,0,0,403,404,1,0,0,0,404,80,1,0,0, + 0,405,407,7,8,0,0,406,405,1,0,0,0,407,408,1,0,0,0,408,406,1,0,0, + 0,408,409,1,0,0,0,409,410,1,0,0,0,410,411,6,40,0,0,411,82,1,0,0, + 0,21,0,95,99,102,106,112,114,343,345,353,366,373,376,379,386,388, + 391,395,401,403,408,1,6,0,0 + ] + +class ASLIntrinsicLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + CONTEXT_PATH_STRING = 1 + JSON_PATH_STRING = 2 + STRING_VARIABLE = 3 + DOLLAR = 4 + LPAREN = 5 + RPAREN = 6 + COMMA = 7 + DOT = 8 + TRUE = 9 + FALSE = 10 + States = 11 + Format = 12 + StringToJson = 13 + JsonToString = 14 + Array = 15 + ArrayPartition = 16 + ArrayContains = 17 + ArrayRange = 18 + ArrayGetItem = 19 + ArrayLength = 20 + ArrayUnique = 21 + Base64Encode = 22 + Base64Decode = 23 + Hash = 24 + JsonMerge = 25 + MathRandom = 26 + MathAdd = 27 + StringSplit = 28 + UUID = 29 + STRING = 30 + INT = 31 + NUMBER = 32 + IDENTIFIER = 33 + WS = 34 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "", + "'$'", "'('", "')'", "','", "'.'", "'true'", "'false'", "'States'", + "'Format'", "'StringToJson'", "'JsonToString'", "'Array'", "'ArrayPartition'", + "'ArrayContains'", "'ArrayRange'", "'ArrayGetItem'", "'ArrayLength'", + "'ArrayUnique'", "'Base64Encode'", "'Base64Decode'", "'Hash'", + "'JsonMerge'", "'MathRandom'", "'MathAdd'", "'StringSplit'", + "'UUID'" ] + + symbolicNames = [ "", + "CONTEXT_PATH_STRING", "JSON_PATH_STRING", "STRING_VARIABLE", + "DOLLAR", "LPAREN", "RPAREN", "COMMA", "DOT", "TRUE", "FALSE", + "States", "Format", "StringToJson", "JsonToString", "Array", + "ArrayPartition", "ArrayContains", "ArrayRange", "ArrayGetItem", + "ArrayLength", "ArrayUnique", "Base64Encode", "Base64Decode", + "Hash", "JsonMerge", "MathRandom", "MathAdd", "StringSplit", + "UUID", "STRING", "INT", "NUMBER", "IDENTIFIER", "WS" ] + + ruleNames = [ "CONTEXT_PATH_STRING", "JSON_PATH_STRING", "STRING_VARIABLE", + "JSON_PATH_BODY", "JSON_PATH_BRACK", "DOLLAR", "LPAREN", + "RPAREN", "COMMA", "DOT", "TRUE", "FALSE", "States", "Format", + "StringToJson", "JsonToString", "Array", "ArrayPartition", + "ArrayContains", "ArrayRange", "ArrayGetItem", "ArrayLength", + "ArrayUnique", "Base64Encode", "Base64Decode", "Hash", + "JsonMerge", "MathRandom", "MathAdd", "StringSplit", "UUID", + "STRING", "ESC", "UNICODE", "HEX", "SAFECODEPOINT", "INT", + "NUMBER", "EXP", "IDENTIFIER", "WS" ] + + grammarFileName = "ASLIntrinsicLexer.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.2") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParser.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParser.py new file mode 100644 index 0000000000000..13a9cebf3cb7a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParser.py @@ -0,0 +1,716 @@ +# Generated from ASLIntrinsicParser.g4 by ANTLR 4.13.2 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + +def serializedATN(): + return [ + 4,1,34,46,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,1,0,1,0,1,0,1, + 1,1,1,1,1,1,1,1,1,1,2,1,2,1,3,1,3,1,3,1,3,5,3,25,8,3,10,3,12,3,28, + 9,3,1,3,1,3,1,3,1,3,3,3,34,8,3,1,4,1,4,1,4,1,4,1,4,1,4,1,4,1,4,3, + 4,44,8,4,1,4,0,0,5,0,2,4,6,8,0,2,1,0,12,29,1,0,9,10,49,0,10,1,0, + 0,0,2,13,1,0,0,0,4,18,1,0,0,0,6,33,1,0,0,0,8,43,1,0,0,0,10,11,3, + 2,1,0,11,12,5,0,0,1,12,1,1,0,0,0,13,14,5,11,0,0,14,15,5,8,0,0,15, + 16,3,4,2,0,16,17,3,6,3,0,17,3,1,0,0,0,18,19,7,0,0,0,19,5,1,0,0,0, + 20,21,5,5,0,0,21,26,3,8,4,0,22,23,5,7,0,0,23,25,3,8,4,0,24,22,1, + 0,0,0,25,28,1,0,0,0,26,24,1,0,0,0,26,27,1,0,0,0,27,29,1,0,0,0,28, + 26,1,0,0,0,29,30,5,6,0,0,30,34,1,0,0,0,31,32,5,5,0,0,32,34,5,6,0, + 0,33,20,1,0,0,0,33,31,1,0,0,0,34,7,1,0,0,0,35,44,5,30,0,0,36,44, + 5,31,0,0,37,44,5,32,0,0,38,44,7,1,0,0,39,44,5,1,0,0,40,44,5,2,0, + 0,41,44,5,3,0,0,42,44,3,2,1,0,43,35,1,0,0,0,43,36,1,0,0,0,43,37, + 1,0,0,0,43,38,1,0,0,0,43,39,1,0,0,0,43,40,1,0,0,0,43,41,1,0,0,0, + 43,42,1,0,0,0,44,9,1,0,0,0,3,26,33,43 + ] + +class ASLIntrinsicParser ( Parser ): + + grammarFileName = "ASLIntrinsicParser.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ "", "", "", "", + "'$'", "'('", "')'", "','", "'.'", "'true'", "'false'", + "'States'", "'Format'", "'StringToJson'", "'JsonToString'", + "'Array'", "'ArrayPartition'", "'ArrayContains'", "'ArrayRange'", + "'ArrayGetItem'", "'ArrayLength'", "'ArrayUnique'", + "'Base64Encode'", "'Base64Decode'", "'Hash'", "'JsonMerge'", + "'MathRandom'", "'MathAdd'", "'StringSplit'", "'UUID'" ] + + symbolicNames = [ "", "CONTEXT_PATH_STRING", "JSON_PATH_STRING", + "STRING_VARIABLE", "DOLLAR", "LPAREN", "RPAREN", "COMMA", + "DOT", "TRUE", "FALSE", "States", "Format", "StringToJson", + "JsonToString", "Array", "ArrayPartition", "ArrayContains", + "ArrayRange", "ArrayGetItem", "ArrayLength", "ArrayUnique", + "Base64Encode", "Base64Decode", "Hash", "JsonMerge", + "MathRandom", "MathAdd", "StringSplit", "UUID", "STRING", + "INT", "NUMBER", "IDENTIFIER", "WS" ] + + RULE_func_decl = 0 + RULE_states_func_decl = 1 + RULE_state_fun_name = 2 + RULE_func_arg_list = 3 + RULE_func_arg = 4 + + ruleNames = [ "func_decl", "states_func_decl", "state_fun_name", "func_arg_list", + "func_arg" ] + + EOF = Token.EOF + CONTEXT_PATH_STRING=1 + JSON_PATH_STRING=2 + STRING_VARIABLE=3 + DOLLAR=4 + LPAREN=5 + RPAREN=6 + COMMA=7 + DOT=8 + TRUE=9 + FALSE=10 + States=11 + Format=12 + StringToJson=13 + JsonToString=14 + Array=15 + ArrayPartition=16 + ArrayContains=17 + ArrayRange=18 + ArrayGetItem=19 + ArrayLength=20 + ArrayUnique=21 + Base64Encode=22 + Base64Decode=23 + Hash=24 + JsonMerge=25 + MathRandom=26 + MathAdd=27 + StringSplit=28 + UUID=29 + STRING=30 + INT=31 + NUMBER=32 + IDENTIFIER=33 + WS=34 + + def __init__(self, input:TokenStream, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.2") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + + class Func_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def states_func_decl(self): + return self.getTypedRuleContext(ASLIntrinsicParser.States_func_declContext,0) + + + def EOF(self): + return self.getToken(ASLIntrinsicParser.EOF, 0) + + def getRuleIndex(self): + return ASLIntrinsicParser.RULE_func_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_decl" ): + listener.enterFunc_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_decl" ): + listener.exitFunc_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_decl" ): + return visitor.visitFunc_decl(self) + else: + return visitor.visitChildren(self) + + + + + def func_decl(self): + + localctx = ASLIntrinsicParser.Func_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_func_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 10 + self.states_func_decl() + self.state = 11 + self.match(ASLIntrinsicParser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class States_func_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def States(self): + return self.getToken(ASLIntrinsicParser.States, 0) + + def DOT(self): + return self.getToken(ASLIntrinsicParser.DOT, 0) + + def state_fun_name(self): + return self.getTypedRuleContext(ASLIntrinsicParser.State_fun_nameContext,0) + + + def func_arg_list(self): + return self.getTypedRuleContext(ASLIntrinsicParser.Func_arg_listContext,0) + + + def getRuleIndex(self): + return ASLIntrinsicParser.RULE_states_func_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterStates_func_decl" ): + listener.enterStates_func_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitStates_func_decl" ): + listener.exitStates_func_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitStates_func_decl" ): + return visitor.visitStates_func_decl(self) + else: + return visitor.visitChildren(self) + + + + + def states_func_decl(self): + + localctx = ASLIntrinsicParser.States_func_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 2, self.RULE_states_func_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 13 + self.match(ASLIntrinsicParser.States) + self.state = 14 + self.match(ASLIntrinsicParser.DOT) + self.state = 15 + self.state_fun_name() + self.state = 16 + self.func_arg_list() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class State_fun_nameContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def Format(self): + return self.getToken(ASLIntrinsicParser.Format, 0) + + def StringToJson(self): + return self.getToken(ASLIntrinsicParser.StringToJson, 0) + + def JsonToString(self): + return self.getToken(ASLIntrinsicParser.JsonToString, 0) + + def Array(self): + return self.getToken(ASLIntrinsicParser.Array, 0) + + def ArrayPartition(self): + return self.getToken(ASLIntrinsicParser.ArrayPartition, 0) + + def ArrayContains(self): + return self.getToken(ASLIntrinsicParser.ArrayContains, 0) + + def ArrayRange(self): + return self.getToken(ASLIntrinsicParser.ArrayRange, 0) + + def ArrayGetItem(self): + return self.getToken(ASLIntrinsicParser.ArrayGetItem, 0) + + def ArrayLength(self): + return self.getToken(ASLIntrinsicParser.ArrayLength, 0) + + def ArrayUnique(self): + return self.getToken(ASLIntrinsicParser.ArrayUnique, 0) + + def Base64Encode(self): + return self.getToken(ASLIntrinsicParser.Base64Encode, 0) + + def Base64Decode(self): + return self.getToken(ASLIntrinsicParser.Base64Decode, 0) + + def Hash(self): + return self.getToken(ASLIntrinsicParser.Hash, 0) + + def JsonMerge(self): + return self.getToken(ASLIntrinsicParser.JsonMerge, 0) + + def MathRandom(self): + return self.getToken(ASLIntrinsicParser.MathRandom, 0) + + def MathAdd(self): + return self.getToken(ASLIntrinsicParser.MathAdd, 0) + + def StringSplit(self): + return self.getToken(ASLIntrinsicParser.StringSplit, 0) + + def UUID(self): + return self.getToken(ASLIntrinsicParser.UUID, 0) + + def getRuleIndex(self): + return ASLIntrinsicParser.RULE_state_fun_name + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_fun_name" ): + listener.enterState_fun_name(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_fun_name" ): + listener.exitState_fun_name(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_fun_name" ): + return visitor.visitState_fun_name(self) + else: + return visitor.visitChildren(self) + + + + + def state_fun_name(self): + + localctx = ASLIntrinsicParser.State_fun_nameContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_state_fun_name) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 18 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & 1073737728) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Func_arg_listContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LPAREN(self): + return self.getToken(ASLIntrinsicParser.LPAREN, 0) + + def func_arg(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLIntrinsicParser.Func_argContext) + else: + return self.getTypedRuleContext(ASLIntrinsicParser.Func_argContext,i) + + + def RPAREN(self): + return self.getToken(ASLIntrinsicParser.RPAREN, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLIntrinsicParser.COMMA) + else: + return self.getToken(ASLIntrinsicParser.COMMA, i) + + def getRuleIndex(self): + return ASLIntrinsicParser.RULE_func_arg_list + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_list" ): + listener.enterFunc_arg_list(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_list" ): + listener.exitFunc_arg_list(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_list" ): + return visitor.visitFunc_arg_list(self) + else: + return visitor.visitChildren(self) + + + + + def func_arg_list(self): + + localctx = ASLIntrinsicParser.Func_arg_listContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_func_arg_list) + self._la = 0 # Token type + try: + self.state = 33 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,1,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 20 + self.match(ASLIntrinsicParser.LPAREN) + self.state = 21 + self.func_arg() + self.state = 26 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==7: + self.state = 22 + self.match(ASLIntrinsicParser.COMMA) + self.state = 23 + self.func_arg() + self.state = 28 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 29 + self.match(ASLIntrinsicParser.RPAREN) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 31 + self.match(ASLIntrinsicParser.LPAREN) + self.state = 32 + self.match(ASLIntrinsicParser.RPAREN) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Func_argContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLIntrinsicParser.RULE_func_arg + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Func_arg_context_pathContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def CONTEXT_PATH_STRING(self): + return self.getToken(ASLIntrinsicParser.CONTEXT_PATH_STRING, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_context_path" ): + listener.enterFunc_arg_context_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_context_path" ): + listener.exitFunc_arg_context_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_context_path" ): + return visitor.visitFunc_arg_context_path(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_floatContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def NUMBER(self): + return self.getToken(ASLIntrinsicParser.NUMBER, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_float" ): + listener.enterFunc_arg_float(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_float" ): + listener.exitFunc_arg_float(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_float" ): + return visitor.visitFunc_arg_float(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_varContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def STRING_VARIABLE(self): + return self.getToken(ASLIntrinsicParser.STRING_VARIABLE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_var" ): + listener.enterFunc_arg_var(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_var" ): + listener.exitFunc_arg_var(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_var" ): + return visitor.visitFunc_arg_var(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_func_declContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def states_func_decl(self): + return self.getTypedRuleContext(ASLIntrinsicParser.States_func_declContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_func_decl" ): + listener.enterFunc_arg_func_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_func_decl" ): + listener.exitFunc_arg_func_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_func_decl" ): + return visitor.visitFunc_arg_func_decl(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_intContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def INT(self): + return self.getToken(ASLIntrinsicParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_int" ): + listener.enterFunc_arg_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_int" ): + listener.exitFunc_arg_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_int" ): + return visitor.visitFunc_arg_int(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_boolContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def TRUE(self): + return self.getToken(ASLIntrinsicParser.TRUE, 0) + def FALSE(self): + return self.getToken(ASLIntrinsicParser.FALSE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_bool" ): + listener.enterFunc_arg_bool(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_bool" ): + listener.exitFunc_arg_bool(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_bool" ): + return visitor.visitFunc_arg_bool(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_stringContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def STRING(self): + return self.getToken(ASLIntrinsicParser.STRING, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_string" ): + listener.enterFunc_arg_string(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_string" ): + listener.exitFunc_arg_string(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_string" ): + return visitor.visitFunc_arg_string(self) + else: + return visitor.visitChildren(self) + + + class Func_arg_json_pathContext(Func_argContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLIntrinsicParser.Func_argContext + super().__init__(parser) + self.copyFrom(ctx) + + def JSON_PATH_STRING(self): + return self.getToken(ASLIntrinsicParser.JSON_PATH_STRING, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterFunc_arg_json_path" ): + listener.enterFunc_arg_json_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitFunc_arg_json_path" ): + listener.exitFunc_arg_json_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitFunc_arg_json_path" ): + return visitor.visitFunc_arg_json_path(self) + else: + return visitor.visitChildren(self) + + + + def func_arg(self): + + localctx = ASLIntrinsicParser.Func_argContext(self, self._ctx, self.state) + self.enterRule(localctx, 8, self.RULE_func_arg) + self._la = 0 # Token type + try: + self.state = 43 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [30]: + localctx = ASLIntrinsicParser.Func_arg_stringContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 35 + self.match(ASLIntrinsicParser.STRING) + pass + elif token in [31]: + localctx = ASLIntrinsicParser.Func_arg_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 36 + self.match(ASLIntrinsicParser.INT) + pass + elif token in [32]: + localctx = ASLIntrinsicParser.Func_arg_floatContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 37 + self.match(ASLIntrinsicParser.NUMBER) + pass + elif token in [9, 10]: + localctx = ASLIntrinsicParser.Func_arg_boolContext(self, localctx) + self.enterOuterAlt(localctx, 4) + self.state = 38 + _la = self._input.LA(1) + if not(_la==9 or _la==10): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + pass + elif token in [1]: + localctx = ASLIntrinsicParser.Func_arg_context_pathContext(self, localctx) + self.enterOuterAlt(localctx, 5) + self.state = 39 + self.match(ASLIntrinsicParser.CONTEXT_PATH_STRING) + pass + elif token in [2]: + localctx = ASLIntrinsicParser.Func_arg_json_pathContext(self, localctx) + self.enterOuterAlt(localctx, 6) + self.state = 40 + self.match(ASLIntrinsicParser.JSON_PATH_STRING) + pass + elif token in [3]: + localctx = ASLIntrinsicParser.Func_arg_varContext(self, localctx) + self.enterOuterAlt(localctx, 7) + self.state = 41 + self.match(ASLIntrinsicParser.STRING_VARIABLE) + pass + elif token in [11]: + localctx = ASLIntrinsicParser.Func_arg_func_declContext(self, localctx) + self.enterOuterAlt(localctx, 8) + self.state = 42 + self.states_func_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + + diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserListener.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserListener.py new file mode 100644 index 0000000000000..80d2a8868036e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserListener.py @@ -0,0 +1,120 @@ +# Generated from ASLIntrinsicParser.g4 by ANTLR 4.13.2 +from antlr4 import * +if "." in __name__: + from .ASLIntrinsicParser import ASLIntrinsicParser +else: + from ASLIntrinsicParser import ASLIntrinsicParser + +# This class defines a complete listener for a parse tree produced by ASLIntrinsicParser. +class ASLIntrinsicParserListener(ParseTreeListener): + + # Enter a parse tree produced by ASLIntrinsicParser#func_decl. + def enterFunc_decl(self, ctx:ASLIntrinsicParser.Func_declContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_decl. + def exitFunc_decl(self, ctx:ASLIntrinsicParser.Func_declContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#states_func_decl. + def enterStates_func_decl(self, ctx:ASLIntrinsicParser.States_func_declContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#states_func_decl. + def exitStates_func_decl(self, ctx:ASLIntrinsicParser.States_func_declContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#state_fun_name. + def enterState_fun_name(self, ctx:ASLIntrinsicParser.State_fun_nameContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#state_fun_name. + def exitState_fun_name(self, ctx:ASLIntrinsicParser.State_fun_nameContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_list. + def enterFunc_arg_list(self, ctx:ASLIntrinsicParser.Func_arg_listContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_list. + def exitFunc_arg_list(self, ctx:ASLIntrinsicParser.Func_arg_listContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_string. + def enterFunc_arg_string(self, ctx:ASLIntrinsicParser.Func_arg_stringContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_string. + def exitFunc_arg_string(self, ctx:ASLIntrinsicParser.Func_arg_stringContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_int. + def enterFunc_arg_int(self, ctx:ASLIntrinsicParser.Func_arg_intContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_int. + def exitFunc_arg_int(self, ctx:ASLIntrinsicParser.Func_arg_intContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_float. + def enterFunc_arg_float(self, ctx:ASLIntrinsicParser.Func_arg_floatContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_float. + def exitFunc_arg_float(self, ctx:ASLIntrinsicParser.Func_arg_floatContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_bool. + def enterFunc_arg_bool(self, ctx:ASLIntrinsicParser.Func_arg_boolContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_bool. + def exitFunc_arg_bool(self, ctx:ASLIntrinsicParser.Func_arg_boolContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_context_path. + def enterFunc_arg_context_path(self, ctx:ASLIntrinsicParser.Func_arg_context_pathContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_context_path. + def exitFunc_arg_context_path(self, ctx:ASLIntrinsicParser.Func_arg_context_pathContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_json_path. + def enterFunc_arg_json_path(self, ctx:ASLIntrinsicParser.Func_arg_json_pathContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_json_path. + def exitFunc_arg_json_path(self, ctx:ASLIntrinsicParser.Func_arg_json_pathContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_var. + def enterFunc_arg_var(self, ctx:ASLIntrinsicParser.Func_arg_varContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_var. + def exitFunc_arg_var(self, ctx:ASLIntrinsicParser.Func_arg_varContext): + pass + + + # Enter a parse tree produced by ASLIntrinsicParser#func_arg_func_decl. + def enterFunc_arg_func_decl(self, ctx:ASLIntrinsicParser.Func_arg_func_declContext): + pass + + # Exit a parse tree produced by ASLIntrinsicParser#func_arg_func_decl. + def exitFunc_arg_func_decl(self, ctx:ASLIntrinsicParser.Func_arg_func_declContext): + pass + + + +del ASLIntrinsicParser \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserVisitor.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserVisitor.py new file mode 100644 index 0000000000000..aaff82cbb9778 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLIntrinsicParserVisitor.py @@ -0,0 +1,73 @@ +# Generated from ASLIntrinsicParser.g4 by ANTLR 4.13.2 +from antlr4 import * +if "." in __name__: + from .ASLIntrinsicParser import ASLIntrinsicParser +else: + from ASLIntrinsicParser import ASLIntrinsicParser + +# This class defines a complete generic visitor for a parse tree produced by ASLIntrinsicParser. + +class ASLIntrinsicParserVisitor(ParseTreeVisitor): + + # Visit a parse tree produced by ASLIntrinsicParser#func_decl. + def visitFunc_decl(self, ctx:ASLIntrinsicParser.Func_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#states_func_decl. + def visitStates_func_decl(self, ctx:ASLIntrinsicParser.States_func_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#state_fun_name. + def visitState_fun_name(self, ctx:ASLIntrinsicParser.State_fun_nameContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_list. + def visitFunc_arg_list(self, ctx:ASLIntrinsicParser.Func_arg_listContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_string. + def visitFunc_arg_string(self, ctx:ASLIntrinsicParser.Func_arg_stringContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_int. + def visitFunc_arg_int(self, ctx:ASLIntrinsicParser.Func_arg_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_float. + def visitFunc_arg_float(self, ctx:ASLIntrinsicParser.Func_arg_floatContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_bool. + def visitFunc_arg_bool(self, ctx:ASLIntrinsicParser.Func_arg_boolContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_context_path. + def visitFunc_arg_context_path(self, ctx:ASLIntrinsicParser.Func_arg_context_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_json_path. + def visitFunc_arg_json_path(self, ctx:ASLIntrinsicParser.Func_arg_json_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_var. + def visitFunc_arg_var(self, ctx:ASLIntrinsicParser.Func_arg_varContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLIntrinsicParser#func_arg_func_decl. + def visitFunc_arg_func_decl(self, ctx:ASLIntrinsicParser.Func_arg_func_declContext): + return self.visitChildren(ctx) + + + +del ASLIntrinsicParser \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLLexer.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLLexer.py new file mode 100644 index 0000000000000..578ffc75320f7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLLexer.py @@ -0,0 +1,1413 @@ +# Generated from ASLLexer.g4 by ANTLR 4.13.2 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + return [ + 4,0,162,2863,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7, + 5,2,6,7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,10,7,10,2,11,7,11,2,12,7,12, + 2,13,7,13,2,14,7,14,2,15,7,15,2,16,7,16,2,17,7,17,2,18,7,18,2,19, + 7,19,2,20,7,20,2,21,7,21,2,22,7,22,2,23,7,23,2,24,7,24,2,25,7,25, + 2,26,7,26,2,27,7,27,2,28,7,28,2,29,7,29,2,30,7,30,2,31,7,31,2,32, + 7,32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,2,37,7,37,2,38,7,38, + 2,39,7,39,2,40,7,40,2,41,7,41,2,42,7,42,2,43,7,43,2,44,7,44,2,45, + 7,45,2,46,7,46,2,47,7,47,2,48,7,48,2,49,7,49,2,50,7,50,2,51,7,51, + 2,52,7,52,2,53,7,53,2,54,7,54,2,55,7,55,2,56,7,56,2,57,7,57,2,58, + 7,58,2,59,7,59,2,60,7,60,2,61,7,61,2,62,7,62,2,63,7,63,2,64,7,64, + 2,65,7,65,2,66,7,66,2,67,7,67,2,68,7,68,2,69,7,69,2,70,7,70,2,71, + 7,71,2,72,7,72,2,73,7,73,2,74,7,74,2,75,7,75,2,76,7,76,2,77,7,77, + 2,78,7,78,2,79,7,79,2,80,7,80,2,81,7,81,2,82,7,82,2,83,7,83,2,84, + 7,84,2,85,7,85,2,86,7,86,2,87,7,87,2,88,7,88,2,89,7,89,2,90,7,90, + 2,91,7,91,2,92,7,92,2,93,7,93,2,94,7,94,2,95,7,95,2,96,7,96,2,97, + 7,97,2,98,7,98,2,99,7,99,2,100,7,100,2,101,7,101,2,102,7,102,2,103, + 7,103,2,104,7,104,2,105,7,105,2,106,7,106,2,107,7,107,2,108,7,108, + 2,109,7,109,2,110,7,110,2,111,7,111,2,112,7,112,2,113,7,113,2,114, + 7,114,2,115,7,115,2,116,7,116,2,117,7,117,2,118,7,118,2,119,7,119, + 2,120,7,120,2,121,7,121,2,122,7,122,2,123,7,123,2,124,7,124,2,125, + 7,125,2,126,7,126,2,127,7,127,2,128,7,128,2,129,7,129,2,130,7,130, + 2,131,7,131,2,132,7,132,2,133,7,133,2,134,7,134,2,135,7,135,2,136, + 7,136,2,137,7,137,2,138,7,138,2,139,7,139,2,140,7,140,2,141,7,141, + 2,142,7,142,2,143,7,143,2,144,7,144,2,145,7,145,2,146,7,146,2,147, + 7,147,2,148,7,148,2,149,7,149,2,150,7,150,2,151,7,151,2,152,7,152, + 2,153,7,153,2,154,7,154,2,155,7,155,2,156,7,156,2,157,7,157,2,158, + 7,158,2,159,7,159,2,160,7,160,2,161,7,161,2,162,7,162,2,163,7,163, + 2,164,7,164,2,165,7,165,2,166,7,166,2,167,7,167,2,168,7,168,1,0, + 1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,6,1,6,1,6,1,6,1,6, + 1,7,1,7,1,7,1,7,1,7,1,7,1,8,1,8,1,8,1,8,1,8,1,9,1,9,1,9,1,9,1,9, + 1,9,1,9,1,9,1,9,1,9,1,10,1,10,1,10,1,10,1,10,1,10,1,10,1,10,1,10, + 1,11,1,11,1,11,1,11,1,11,1,11,1,11,1,11,1,11,1,11,1,12,1,12,1,12, + 1,12,1,12,1,12,1,12,1,12,1,12,1,12,1,12,1,12,1,13,1,13,1,13,1,13, + 1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14,1,14, + 1,15,1,15,1,15,1,15,1,15,1,15,1,15,1,16,1,16,1,16,1,16,1,16,1,16, + 1,16,1,16,1,16,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,18,1,18,1,18, + 1,18,1,18,1,18,1,18,1,18,1,18,1,18,1,19,1,19,1,19,1,19,1,19,1,19, + 1,19,1,20,1,20,1,20,1,20,1,20,1,20,1,20,1,21,1,21,1,21,1,21,1,21, + 1,21,1,21,1,21,1,21,1,21,1,21,1,22,1,22,1,22,1,22,1,22,1,22,1,23, + 1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,23,1,24,1,24,1,24,1,24, + 1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,24,1,25,1,25,1,25,1,25,1,25, + 1,25,1,25,1,25,1,25,1,25,1,25,1,26,1,26,1,26,1,26,1,26,1,26,1,26, + 1,26,1,26,1,26,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27, + 1,27,1,28,1,28,1,28,1,28,1,28,1,28,1,29,1,29,1,29,1,29,1,29,1,29, + 1,29,1,29,1,29,1,29,1,29,1,29,1,29,1,29,1,29,1,29,1,30,1,30,1,30, + 1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30,1,30, + 1,30,1,30,1,30,1,30,1,31,1,31,1,31,1,31,1,31,1,31,1,31,1,31,1,31, + 1,31,1,31,1,31,1,32,1,32,1,32,1,32,1,32,1,32,1,32,1,32,1,32,1,33, + 1,33,1,33,1,33,1,33,1,33,1,33,1,33,1,33,1,33,1,33,1,33,1,34,1,34, + 1,34,1,34,1,34,1,34,1,34,1,34,1,34,1,34,1,34,1,34,1,35,1,35,1,35, + 1,35,1,35,1,35,1,35,1,35,1,35,1,35,1,35,1,36,1,36,1,36,1,36,1,36, + 1,36,1,36,1,36,1,36,1,36,1,36,1,36,1,36,1,36,1,37,1,37,1,37,1,37, + 1,37,1,37,1,38,1,38,1,38,1,38,1,38,1,38,1,38,1,38,1,38,1,38,1,38, + 1,38,1,38,1,38,1,38,1,38,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39, + 1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,39,1,40, + 1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,40, + 1,40,1,40,1,40,1,40,1,40,1,40,1,40,1,41,1,41,1,41,1,41,1,41,1,41, + 1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41,1,41, + 1,41,1,41,1,41,1,41,1,41,1,41,1,42,1,42,1,42,1,42,1,42,1,42,1,42, + 1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,42, + 1,42,1,42,1,42,1,42,1,42,1,42,1,42,1,43,1,43,1,43,1,43,1,43,1,43, + 1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43, + 1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,43,1,44, + 1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44,1,44, + 1,44,1,44,1,44,1,44,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45, + 1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45,1,45, + 1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46, + 1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,46,1,47,1,47, + 1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47, + 1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47,1,47, + 1,48,1,48,1,48,1,48,1,48,1,49,1,49,1,49,1,49,1,49,1,49,1,49,1,49, + 1,49,1,49,1,49,1,49,1,49,1,49,1,49,1,50,1,50,1,50,1,50,1,50,1,50, + 1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50,1,50, + 1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,51, + 1,51,1,51,1,51,1,51,1,51,1,51,1,51,1,52,1,52,1,52,1,52,1,52,1,52, + 1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52,1,52, + 1,52,1,52,1,52,1,52,1,52,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53, + 1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53,1,53, + 1,53,1,53,1,53,1,53,1,53,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54, + 1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54, + 1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,54,1,55,1,55,1,55,1,55, + 1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55,1,55, + 1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56, + 1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,56,1,57,1,57,1,57,1,57,1,57, + 1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57,1,57, + 1,57,1,57,1,57,1,57,1,57,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58, + 1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58,1,58, + 1,58,1,58,1,58,1,58,1,58,1,58,1,59,1,59,1,59,1,59,1,59,1,59,1,59, + 1,59,1,59,1,59,1,59,1,59,1,59,1,59,1,59,1,59,1,60,1,60,1,60,1,60, + 1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60, + 1,60,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61, + 1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,61,1,62,1,62,1,62, + 1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,62, + 1,62,1,62,1,62,1,62,1,62,1,62,1,62,1,63,1,63,1,63,1,63,1,63,1,63, + 1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63, + 1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,63,1,64,1,64,1,64,1,64,1,64, + 1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64, + 1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,64,1,65,1,65, + 1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65, + 1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65,1,65, + 1,65,1,65,1,65,1,65,1,65,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66, + 1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,66,1,67, + 1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67, + 1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,67,1,68,1,68,1,68, + 1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68, + 1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,68,1,69,1,69,1,69, + 1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69, + 1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69,1,69, + 1,69,1,70,1,70,1,70,1,70,1,70,1,70,1,70,1,70,1,70,1,70,1,70,1,70, + 1,70,1,70,1,71,1,71,1,71,1,71,1,71,1,71,1,71,1,71,1,71,1,71,1,72, + 1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72,1,72, + 1,72,1,72,1,73,1,73,1,73,1,73,1,73,1,73,1,73,1,73,1,73,1,73,1,73, + 1,73,1,74,1,74,1,74,1,74,1,74,1,74,1,74,1,74,1,74,1,74,1,74,1,74, + 1,74,1,74,1,74,1,74,1,74,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75, + 1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75,1,75, + 1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76,1,76, + 1,76,1,76,1,76,1,76,1,76,1,76,1,77,1,77,1,77,1,77,1,77,1,77,1,77, + 1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77,1,77, + 1,77,1,77,1,77,1,78,1,78,1,78,1,78,1,78,1,78,1,78,1,78,1,78,1,78, + 1,78,1,78,1,78,1,78,1,78,1,78,1,78,1,78,1,79,1,79,1,79,1,79,1,79, + 1,79,1,79,1,80,1,80,1,80,1,80,1,80,1,80,1,80,1,80,1,80,1,81,1,81, + 1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,82, + 1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82,1,82, + 1,82,1,82,1,83,1,83,1,83,1,83,1,83,1,83,1,83,1,83,1,83,1,83,1,83, + 1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84,1,84, + 1,84,1,84,1,84,1,85,1,85,1,85,1,85,1,85,1,85,1,85,1,85,1,85,1,85, + 1,85,1,86,1,86,1,86,1,86,1,86,1,86,1,86,1,86,1,86,1,86,1,86,1,86, + 1,86,1,86,1,86,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87, + 1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,87,1,88,1,88, + 1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88,1,88, + 1,88,1,88,1,89,1,89,1,89,1,89,1,89,1,89,1,89,1,89,1,89,1,89,1,89, + 1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,90,1,91, + 1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,91,1,92, + 1,92,1,92,1,92,1,92,1,92,1,92,1,92,1,93,1,93,1,93,1,93,1,93,1,93, + 1,93,1,93,1,93,1,93,1,93,1,93,1,94,1,94,1,94,1,94,1,94,1,94,1,94, + 1,94,1,94,1,94,1,94,1,94,1,94,1,95,1,95,1,95,1,95,1,95,1,95,1,95, + 1,95,1,95,1,96,1,96,1,96,1,96,1,96,1,96,1,96,1,96,1,96,1,96,1,96, + 1,96,1,96,1,97,1,97,1,97,1,97,1,97,1,97,1,97,1,97,1,97,1,97,1,97, + 1,97,1,97,1,97,1,98,1,98,1,98,1,98,1,98,1,98,1,98,1,98,1,98,1,98, + 1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,99,1,100, + 1,100,1,100,1,100,1,100,1,100,1,100,1,100,1,100,1,100,1,100,1,100, + 1,100,1,100,1,100,1,100,1,100,1,101,1,101,1,101,1,101,1,101,1,101, + 1,101,1,101,1,101,1,101,1,101,1,101,1,101,1,102,1,102,1,102,1,102, + 1,102,1,102,1,102,1,102,1,102,1,102,1,102,1,102,1,102,1,102,1,102, + 1,103,1,103,1,103,1,103,1,103,1,103,1,103,1,103,1,103,1,103,1,103, + 1,103,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104, + 1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,104,1,105, + 1,105,1,105,1,105,1,105,1,105,1,105,1,105,1,105,1,105,1,105,1,105, + 1,105,1,106,1,106,1,106,1,106,1,106,1,106,1,106,1,106,1,106,1,106, + 1,106,1,107,1,107,1,107,1,107,1,107,1,107,1,107,1,107,1,107,1,107, + 1,107,1,107,1,107,1,107,1,107,1,108,1,108,1,108,1,108,1,108,1,108, + 1,108,1,108,1,108,1,108,1,108,1,108,1,108,1,108,1,108,1,108,1,108, + 1,108,1,108,1,108,1,108,1,108,1,108,1,108,1,109,1,109,1,109,1,109, + 1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109, + 1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109,1,109, + 1,109,1,109,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110, + 1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110, + 1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,110,1,111,1,111, + 1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111, + 1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111, + 1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,111,1,112,1,112, + 1,112,1,112,1,112,1,112,1,112,1,112,1,113,1,113,1,113,1,113,1,113, + 1,113,1,113,1,113,1,113,1,113,1,113,1,113,1,113,1,113,1,113,1,114, + 1,114,1,114,1,114,1,114,1,114,1,114,1,115,1,115,1,115,1,115,1,115, + 1,115,1,116,1,116,1,116,1,116,1,116,1,116,1,116,1,116,1,117,1,117, + 1,117,1,117,1,117,1,117,1,117,1,117,1,117,1,117,1,117,1,117,1,118, + 1,118,1,118,1,118,1,118,1,118,1,118,1,118,1,119,1,119,1,119,1,119, + 1,119,1,119,1,119,1,119,1,119,1,119,1,119,1,119,1,120,1,120,1,120, + 1,120,1,120,1,120,1,120,1,120,1,121,1,121,1,121,1,121,1,121,1,121, + 1,121,1,121,1,121,1,121,1,121,1,121,1,121,1,121,1,122,1,122,1,122, + 1,122,1,122,1,122,1,122,1,122,1,122,1,122,1,122,1,122,1,122,1,122, + 1,122,1,122,1,122,1,122,1,123,1,123,1,123,1,123,1,123,1,123,1,123, + 1,123,1,123,1,123,1,123,1,123,1,123,1,123,1,124,1,124,1,124,1,124, + 1,124,1,124,1,124,1,124,1,124,1,124,1,124,1,124,1,124,1,124,1,125, + 1,125,1,125,1,125,1,125,1,125,1,125,1,125,1,125,1,125,1,125,1,125, + 1,125,1,125,1,125,1,125,1,125,1,125,1,126,1,126,1,126,1,126,1,126, + 1,126,1,126,1,126,1,126,1,126,1,126,1,126,1,126,1,126,1,126,1,126, + 1,126,1,127,1,127,1,127,1,127,1,127,1,127,1,127,1,128,1,128,1,128, + 1,128,1,128,1,128,1,128,1,129,1,129,1,129,1,129,1,129,1,129,1,129, + 1,129,1,130,1,130,1,130,1,130,1,130,1,130,1,130,1,130,1,130,1,130, + 1,130,1,130,1,130,1,130,1,130,1,130,1,131,1,131,1,131,1,131,1,131, + 1,131,1,131,1,131,1,131,1,131,1,131,1,132,1,132,1,132,1,132,1,132, + 1,132,1,132,1,132,1,132,1,132,1,133,1,133,1,133,1,133,1,133,1,133, + 1,133,1,133,1,133,1,134,1,134,1,134,1,134,1,134,1,134,1,134,1,134, + 1,134,1,135,1,135,1,135,1,135,1,135,1,135,1,135,1,135,1,135,1,135, + 1,135,1,135,1,136,1,136,1,136,1,136,1,136,1,136,1,136,1,136,1,136, + 1,136,1,136,1,136,1,136,1,137,1,137,1,137,1,137,1,137,1,137,1,137, + 1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137, + 1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,137,1,138,1,138, + 1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138, + 1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138,1,138, + 1,138,1,138,1,139,1,139,1,139,1,139,1,139,1,139,1,139,1,139,1,139, + 1,139,1,139,1,139,1,139,1,139,1,139,1,139,1,139,1,140,1,140,1,140, + 1,140,1,140,1,140,1,140,1,140,1,140,1,140,1,140,1,140,1,140,1,140, + 1,140,1,140,1,140,1,140,1,140,1,140,1,141,1,141,1,141,1,141,1,141, + 1,141,1,141,1,141,1,141,1,141,1,141,1,141,1,141,1,141,1,141,1,141, + 1,141,1,141,1,141,1,141,1,141,1,142,1,142,1,142,1,142,1,142,1,142, + 1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142, + 1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142,1,142, + 1,142,1,142,1,142,1,142,1,143,1,143,1,143,1,143,1,143,1,143,1,143, + 1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143, + 1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143,1,143, + 1,143,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144, + 1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144,1,144, + 1,144,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145, + 1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145,1,145, + 1,145,1,145,1,145,1,145,1,146,1,146,1,146,1,146,1,146,1,146,1,146, + 1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,146, + 1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,146,1,147,1,147,1,147, + 1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147, + 1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147, + 1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147,1,147, + 1,147,1,147,1,147,1,147,1,147,1,148,1,148,1,148,1,148,1,148,1,148, + 1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148, + 1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,148,1,149,1,149, + 1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149, + 1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149,1,149, + 1,149,1,149,1,149,1,149,1,150,1,150,1,150,1,150,1,150,1,150,1,150, + 1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150, + 1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150,1,150, + 1,150,1,151,1,151,1,151,1,151,1,151,1,151,1,151,1,151,1,151,1,151, + 1,151,1,151,1,151,1,151,1,151,1,151,1,151,1,152,1,152,1,152,5,152, + 2705,8,152,10,152,12,152,2708,9,152,1,152,1,152,1,152,1,152,1,153, + 1,153,1,153,1,153,1,153,1,153,5,153,2720,8,153,10,153,12,153,2723, + 9,153,1,153,1,153,1,154,1,154,1,154,1,154,1,154,1,154,1,154,1,154, + 1,154,5,154,2736,8,154,10,154,12,154,2739,9,154,1,154,3,154,2742, + 8,154,1,155,1,155,1,155,1,155,1,155,1,155,5,155,2750,8,155,10,155, + 12,155,2753,9,155,1,155,1,155,1,156,1,156,1,156,1,156,1,156,1,156, + 1,156,1,156,1,156,1,156,1,156,4,156,2768,8,156,11,156,12,156,2769, + 1,156,1,156,1,156,5,156,2775,8,156,10,156,12,156,2778,9,156,1,156, + 1,156,1,156,1,157,1,157,1,157,5,157,2786,8,157,10,157,12,157,2789, + 9,157,1,157,1,157,1,158,1,158,1,158,5,158,2796,8,158,10,158,12,158, + 2799,9,158,1,158,1,158,1,159,1,159,1,159,3,159,2806,8,159,1,160, + 1,160,1,160,1,160,1,160,1,160,1,161,1,161,1,162,1,162,1,163,1,163, + 1,163,1,163,1,164,1,164,1,164,1,164,1,165,1,165,1,165,5,165,2829, + 8,165,10,165,12,165,2832,9,165,3,165,2834,8,165,1,166,3,166,2837, + 8,166,1,166,1,166,1,166,4,166,2842,8,166,11,166,12,166,2843,3,166, + 2846,8,166,1,166,3,166,2849,8,166,1,167,1,167,3,167,2853,8,167,1, + 167,1,167,1,168,4,168,2858,8,168,11,168,12,168,2859,1,168,1,168, + 0,0,169,1,1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21,11,23,12, + 25,13,27,14,29,15,31,16,33,17,35,18,37,19,39,20,41,21,43,22,45,23, + 47,24,49,25,51,26,53,27,55,28,57,29,59,30,61,31,63,32,65,33,67,34, + 69,35,71,36,73,37,75,38,77,39,79,40,81,41,83,42,85,43,87,44,89,45, + 91,46,93,47,95,48,97,49,99,50,101,51,103,52,105,53,107,54,109,55, + 111,56,113,57,115,58,117,59,119,60,121,61,123,62,125,63,127,64,129, + 65,131,66,133,67,135,68,137,69,139,70,141,71,143,72,145,73,147,74, + 149,75,151,76,153,77,155,78,157,79,159,80,161,81,163,82,165,83,167, + 84,169,85,171,86,173,87,175,88,177,89,179,90,181,91,183,92,185,93, + 187,94,189,95,191,96,193,97,195,98,197,99,199,100,201,101,203,102, + 205,103,207,104,209,105,211,106,213,107,215,108,217,109,219,110, + 221,111,223,112,225,113,227,114,229,115,231,116,233,117,235,118, + 237,119,239,120,241,121,243,122,245,123,247,124,249,125,251,126, + 253,127,255,128,257,129,259,130,261,131,263,132,265,133,267,134, + 269,135,271,136,273,137,275,138,277,139,279,140,281,141,283,142, + 285,143,287,144,289,145,291,146,293,147,295,148,297,149,299,150, + 301,151,303,152,305,153,307,154,309,155,311,156,313,157,315,158, + 317,159,319,0,321,0,323,0,325,0,327,0,329,0,331,160,333,161,335, + 0,337,162,1,0,10,2,0,46,46,91,91,3,0,65,90,95,95,97,122,8,0,34,34, + 47,47,92,92,98,98,102,102,110,110,114,114,116,116,3,0,48,57,65,70, + 97,102,3,0,0,31,34,34,92,92,1,0,49,57,1,0,48,57,2,0,69,69,101,101, + 2,0,43,43,45,45,3,0,9,10,13,13,32,32,2881,0,1,1,0,0,0,0,3,1,0,0, + 0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,13,1,0,0,0, + 0,15,1,0,0,0,0,17,1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,0,0, + 0,25,1,0,0,0,0,27,1,0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0, + 0,35,1,0,0,0,0,37,1,0,0,0,0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0, + 0,45,1,0,0,0,0,47,1,0,0,0,0,49,1,0,0,0,0,51,1,0,0,0,0,53,1,0,0,0, + 0,55,1,0,0,0,0,57,1,0,0,0,0,59,1,0,0,0,0,61,1,0,0,0,0,63,1,0,0,0, + 0,65,1,0,0,0,0,67,1,0,0,0,0,69,1,0,0,0,0,71,1,0,0,0,0,73,1,0,0,0, + 0,75,1,0,0,0,0,77,1,0,0,0,0,79,1,0,0,0,0,81,1,0,0,0,0,83,1,0,0,0, + 0,85,1,0,0,0,0,87,1,0,0,0,0,89,1,0,0,0,0,91,1,0,0,0,0,93,1,0,0,0, + 0,95,1,0,0,0,0,97,1,0,0,0,0,99,1,0,0,0,0,101,1,0,0,0,0,103,1,0,0, + 0,0,105,1,0,0,0,0,107,1,0,0,0,0,109,1,0,0,0,0,111,1,0,0,0,0,113, + 1,0,0,0,0,115,1,0,0,0,0,117,1,0,0,0,0,119,1,0,0,0,0,121,1,0,0,0, + 0,123,1,0,0,0,0,125,1,0,0,0,0,127,1,0,0,0,0,129,1,0,0,0,0,131,1, + 0,0,0,0,133,1,0,0,0,0,135,1,0,0,0,0,137,1,0,0,0,0,139,1,0,0,0,0, + 141,1,0,0,0,0,143,1,0,0,0,0,145,1,0,0,0,0,147,1,0,0,0,0,149,1,0, + 0,0,0,151,1,0,0,0,0,153,1,0,0,0,0,155,1,0,0,0,0,157,1,0,0,0,0,159, + 1,0,0,0,0,161,1,0,0,0,0,163,1,0,0,0,0,165,1,0,0,0,0,167,1,0,0,0, + 0,169,1,0,0,0,0,171,1,0,0,0,0,173,1,0,0,0,0,175,1,0,0,0,0,177,1, + 0,0,0,0,179,1,0,0,0,0,181,1,0,0,0,0,183,1,0,0,0,0,185,1,0,0,0,0, + 187,1,0,0,0,0,189,1,0,0,0,0,191,1,0,0,0,0,193,1,0,0,0,0,195,1,0, + 0,0,0,197,1,0,0,0,0,199,1,0,0,0,0,201,1,0,0,0,0,203,1,0,0,0,0,205, + 1,0,0,0,0,207,1,0,0,0,0,209,1,0,0,0,0,211,1,0,0,0,0,213,1,0,0,0, + 0,215,1,0,0,0,0,217,1,0,0,0,0,219,1,0,0,0,0,221,1,0,0,0,0,223,1, + 0,0,0,0,225,1,0,0,0,0,227,1,0,0,0,0,229,1,0,0,0,0,231,1,0,0,0,0, + 233,1,0,0,0,0,235,1,0,0,0,0,237,1,0,0,0,0,239,1,0,0,0,0,241,1,0, + 0,0,0,243,1,0,0,0,0,245,1,0,0,0,0,247,1,0,0,0,0,249,1,0,0,0,0,251, + 1,0,0,0,0,253,1,0,0,0,0,255,1,0,0,0,0,257,1,0,0,0,0,259,1,0,0,0, + 0,261,1,0,0,0,0,263,1,0,0,0,0,265,1,0,0,0,0,267,1,0,0,0,0,269,1, + 0,0,0,0,271,1,0,0,0,0,273,1,0,0,0,0,275,1,0,0,0,0,277,1,0,0,0,0, + 279,1,0,0,0,0,281,1,0,0,0,0,283,1,0,0,0,0,285,1,0,0,0,0,287,1,0, + 0,0,0,289,1,0,0,0,0,291,1,0,0,0,0,293,1,0,0,0,0,295,1,0,0,0,0,297, + 1,0,0,0,0,299,1,0,0,0,0,301,1,0,0,0,0,303,1,0,0,0,0,305,1,0,0,0, + 0,307,1,0,0,0,0,309,1,0,0,0,0,311,1,0,0,0,0,313,1,0,0,0,0,315,1, + 0,0,0,0,317,1,0,0,0,0,331,1,0,0,0,0,333,1,0,0,0,0,337,1,0,0,0,1, + 339,1,0,0,0,3,341,1,0,0,0,5,343,1,0,0,0,7,345,1,0,0,0,9,347,1,0, + 0,0,11,349,1,0,0,0,13,351,1,0,0,0,15,356,1,0,0,0,17,362,1,0,0,0, + 19,367,1,0,0,0,21,377,1,0,0,0,23,386,1,0,0,0,25,396,1,0,0,0,27,408, + 1,0,0,0,29,418,1,0,0,0,31,425,1,0,0,0,33,432,1,0,0,0,35,441,1,0, + 0,0,37,448,1,0,0,0,39,458,1,0,0,0,41,465,1,0,0,0,43,472,1,0,0,0, + 45,483,1,0,0,0,47,489,1,0,0,0,49,499,1,0,0,0,51,511,1,0,0,0,53,522, + 1,0,0,0,55,532,1,0,0,0,57,543,1,0,0,0,59,549,1,0,0,0,61,565,1,0, + 0,0,63,585,1,0,0,0,65,597,1,0,0,0,67,606,1,0,0,0,69,618,1,0,0,0, + 71,630,1,0,0,0,73,641,1,0,0,0,75,655,1,0,0,0,77,661,1,0,0,0,79,677, + 1,0,0,0,81,697,1,0,0,0,83,718,1,0,0,0,85,743,1,0,0,0,87,770,1,0, + 0,0,89,801,1,0,0,0,91,819,1,0,0,0,93,841,1,0,0,0,95,865,1,0,0,0, + 97,893,1,0,0,0,99,898,1,0,0,0,101,913,1,0,0,0,103,932,1,0,0,0,105, + 952,1,0,0,0,107,976,1,0,0,0,109,1002,1,0,0,0,111,1032,1,0,0,0,113, + 1049,1,0,0,0,115,1070,1,0,0,0,117,1093,1,0,0,0,119,1120,1,0,0,0, + 121,1136,1,0,0,0,123,1154,1,0,0,0,125,1176,1,0,0,0,127,1199,1,0, + 0,0,129,1226,1,0,0,0,131,1255,1,0,0,0,133,1288,1,0,0,0,135,1308, + 1,0,0,0,137,1332,1,0,0,0,139,1358,1,0,0,0,141,1388,1,0,0,0,143,1402, + 1,0,0,0,145,1412,1,0,0,0,147,1428,1,0,0,0,149,1440,1,0,0,0,151,1457, + 1,0,0,0,153,1478,1,0,0,0,155,1497,1,0,0,0,157,1520,1,0,0,0,159,1538, + 1,0,0,0,161,1545,1,0,0,0,163,1554,1,0,0,0,165,1568,1,0,0,0,167,1584, + 1,0,0,0,169,1595,1,0,0,0,171,1611,1,0,0,0,173,1622,1,0,0,0,175,1637, + 1,0,0,0,177,1658,1,0,0,0,179,1675,1,0,0,0,181,1686,1,0,0,0,183,1698, + 1,0,0,0,185,1711,1,0,0,0,187,1719,1,0,0,0,189,1731,1,0,0,0,191,1744, + 1,0,0,0,193,1753,1,0,0,0,195,1766,1,0,0,0,197,1780,1,0,0,0,199,1790, + 1,0,0,0,201,1802,1,0,0,0,203,1819,1,0,0,0,205,1832,1,0,0,0,207,1847, + 1,0,0,0,209,1859,1,0,0,0,211,1879,1,0,0,0,213,1892,1,0,0,0,215,1903, + 1,0,0,0,217,1918,1,0,0,0,219,1942,1,0,0,0,221,1970,1,0,0,0,223,1999, + 1,0,0,0,225,2032,1,0,0,0,227,2040,1,0,0,0,229,2055,1,0,0,0,231,2062, + 1,0,0,0,233,2068,1,0,0,0,235,2076,1,0,0,0,237,2088,1,0,0,0,239,2096, + 1,0,0,0,241,2108,1,0,0,0,243,2116,1,0,0,0,245,2130,1,0,0,0,247,2148, + 1,0,0,0,249,2162,1,0,0,0,251,2176,1,0,0,0,253,2194,1,0,0,0,255,2211, + 1,0,0,0,257,2218,1,0,0,0,259,2225,1,0,0,0,261,2233,1,0,0,0,263,2249, + 1,0,0,0,265,2260,1,0,0,0,267,2270,1,0,0,0,269,2279,1,0,0,0,271,2288, + 1,0,0,0,273,2300,1,0,0,0,275,2313,1,0,0,0,277,2340,1,0,0,0,279,2366, + 1,0,0,0,281,2383,1,0,0,0,283,2403,1,0,0,0,285,2424,1,0,0,0,287,2456, + 1,0,0,0,289,2486,1,0,0,0,291,2508,1,0,0,0,293,2533,1,0,0,0,295,2559, + 1,0,0,0,297,2600,1,0,0,0,299,2626,1,0,0,0,301,2654,1,0,0,0,303,2684, + 1,0,0,0,305,2701,1,0,0,0,307,2713,1,0,0,0,309,2741,1,0,0,0,311,2743, + 1,0,0,0,313,2756,1,0,0,0,315,2782,1,0,0,0,317,2792,1,0,0,0,319,2802, + 1,0,0,0,321,2807,1,0,0,0,323,2813,1,0,0,0,325,2815,1,0,0,0,327,2817, + 1,0,0,0,329,2821,1,0,0,0,331,2833,1,0,0,0,333,2836,1,0,0,0,335,2850, + 1,0,0,0,337,2857,1,0,0,0,339,340,5,44,0,0,340,2,1,0,0,0,341,342, + 5,58,0,0,342,4,1,0,0,0,343,344,5,91,0,0,344,6,1,0,0,0,345,346,5, + 93,0,0,346,8,1,0,0,0,347,348,5,123,0,0,348,10,1,0,0,0,349,350,5, + 125,0,0,350,12,1,0,0,0,351,352,5,116,0,0,352,353,5,114,0,0,353,354, + 5,117,0,0,354,355,5,101,0,0,355,14,1,0,0,0,356,357,5,102,0,0,357, + 358,5,97,0,0,358,359,5,108,0,0,359,360,5,115,0,0,360,361,5,101,0, + 0,361,16,1,0,0,0,362,363,5,110,0,0,363,364,5,117,0,0,364,365,5,108, + 0,0,365,366,5,108,0,0,366,18,1,0,0,0,367,368,5,34,0,0,368,369,5, + 67,0,0,369,370,5,111,0,0,370,371,5,109,0,0,371,372,5,109,0,0,372, + 373,5,101,0,0,373,374,5,110,0,0,374,375,5,116,0,0,375,376,5,34,0, + 0,376,20,1,0,0,0,377,378,5,34,0,0,378,379,5,83,0,0,379,380,5,116, + 0,0,380,381,5,97,0,0,381,382,5,116,0,0,382,383,5,101,0,0,383,384, + 5,115,0,0,384,385,5,34,0,0,385,22,1,0,0,0,386,387,5,34,0,0,387,388, + 5,83,0,0,388,389,5,116,0,0,389,390,5,97,0,0,390,391,5,114,0,0,391, + 392,5,116,0,0,392,393,5,65,0,0,393,394,5,116,0,0,394,395,5,34,0, + 0,395,24,1,0,0,0,396,397,5,34,0,0,397,398,5,78,0,0,398,399,5,101, + 0,0,399,400,5,120,0,0,400,401,5,116,0,0,401,402,5,83,0,0,402,403, + 5,116,0,0,403,404,5,97,0,0,404,405,5,116,0,0,405,406,5,101,0,0,406, + 407,5,34,0,0,407,26,1,0,0,0,408,409,5,34,0,0,409,410,5,86,0,0,410, + 411,5,101,0,0,411,412,5,114,0,0,412,413,5,115,0,0,413,414,5,105, + 0,0,414,415,5,111,0,0,415,416,5,110,0,0,416,417,5,34,0,0,417,28, + 1,0,0,0,418,419,5,34,0,0,419,420,5,84,0,0,420,421,5,121,0,0,421, + 422,5,112,0,0,422,423,5,101,0,0,423,424,5,34,0,0,424,30,1,0,0,0, + 425,426,5,34,0,0,426,427,5,84,0,0,427,428,5,97,0,0,428,429,5,115, + 0,0,429,430,5,107,0,0,430,431,5,34,0,0,431,32,1,0,0,0,432,433,5, + 34,0,0,433,434,5,67,0,0,434,435,5,104,0,0,435,436,5,111,0,0,436, + 437,5,105,0,0,437,438,5,99,0,0,438,439,5,101,0,0,439,440,5,34,0, + 0,440,34,1,0,0,0,441,442,5,34,0,0,442,443,5,70,0,0,443,444,5,97, + 0,0,444,445,5,105,0,0,445,446,5,108,0,0,446,447,5,34,0,0,447,36, + 1,0,0,0,448,449,5,34,0,0,449,450,5,83,0,0,450,451,5,117,0,0,451, + 452,5,99,0,0,452,453,5,99,0,0,453,454,5,101,0,0,454,455,5,101,0, + 0,455,456,5,100,0,0,456,457,5,34,0,0,457,38,1,0,0,0,458,459,5,34, + 0,0,459,460,5,80,0,0,460,461,5,97,0,0,461,462,5,115,0,0,462,463, + 5,115,0,0,463,464,5,34,0,0,464,40,1,0,0,0,465,466,5,34,0,0,466,467, + 5,87,0,0,467,468,5,97,0,0,468,469,5,105,0,0,469,470,5,116,0,0,470, + 471,5,34,0,0,471,42,1,0,0,0,472,473,5,34,0,0,473,474,5,80,0,0,474, + 475,5,97,0,0,475,476,5,114,0,0,476,477,5,97,0,0,477,478,5,108,0, + 0,478,479,5,108,0,0,479,480,5,101,0,0,480,481,5,108,0,0,481,482, + 5,34,0,0,482,44,1,0,0,0,483,484,5,34,0,0,484,485,5,77,0,0,485,486, + 5,97,0,0,486,487,5,112,0,0,487,488,5,34,0,0,488,46,1,0,0,0,489,490, + 5,34,0,0,490,491,5,67,0,0,491,492,5,104,0,0,492,493,5,111,0,0,493, + 494,5,105,0,0,494,495,5,99,0,0,495,496,5,101,0,0,496,497,5,115,0, + 0,497,498,5,34,0,0,498,48,1,0,0,0,499,500,5,34,0,0,500,501,5,67, + 0,0,501,502,5,111,0,0,502,503,5,110,0,0,503,504,5,100,0,0,504,505, + 5,105,0,0,505,506,5,116,0,0,506,507,5,105,0,0,507,508,5,111,0,0, + 508,509,5,110,0,0,509,510,5,34,0,0,510,50,1,0,0,0,511,512,5,34,0, + 0,512,513,5,86,0,0,513,514,5,97,0,0,514,515,5,114,0,0,515,516,5, + 105,0,0,516,517,5,97,0,0,517,518,5,98,0,0,518,519,5,108,0,0,519, + 520,5,101,0,0,520,521,5,34,0,0,521,52,1,0,0,0,522,523,5,34,0,0,523, + 524,5,68,0,0,524,525,5,101,0,0,525,526,5,102,0,0,526,527,5,97,0, + 0,527,528,5,117,0,0,528,529,5,108,0,0,529,530,5,116,0,0,530,531, + 5,34,0,0,531,54,1,0,0,0,532,533,5,34,0,0,533,534,5,66,0,0,534,535, + 5,114,0,0,535,536,5,97,0,0,536,537,5,110,0,0,537,538,5,99,0,0,538, + 539,5,104,0,0,539,540,5,101,0,0,540,541,5,115,0,0,541,542,5,34,0, + 0,542,56,1,0,0,0,543,544,5,34,0,0,544,545,5,65,0,0,545,546,5,110, + 0,0,546,547,5,100,0,0,547,548,5,34,0,0,548,58,1,0,0,0,549,550,5, + 34,0,0,550,551,5,66,0,0,551,552,5,111,0,0,552,553,5,111,0,0,553, + 554,5,108,0,0,554,555,5,101,0,0,555,556,5,97,0,0,556,557,5,110,0, + 0,557,558,5,69,0,0,558,559,5,113,0,0,559,560,5,117,0,0,560,561,5, + 97,0,0,561,562,5,108,0,0,562,563,5,115,0,0,563,564,5,34,0,0,564, + 60,1,0,0,0,565,566,5,34,0,0,566,567,5,66,0,0,567,568,5,111,0,0,568, + 569,5,111,0,0,569,570,5,108,0,0,570,571,5,101,0,0,571,572,5,97,0, + 0,572,573,5,110,0,0,573,574,5,69,0,0,574,575,5,113,0,0,575,576,5, + 117,0,0,576,577,5,97,0,0,577,578,5,108,0,0,578,579,5,115,0,0,579, + 580,5,80,0,0,580,581,5,97,0,0,581,582,5,116,0,0,582,583,5,104,0, + 0,583,584,5,34,0,0,584,62,1,0,0,0,585,586,5,34,0,0,586,587,5,73, + 0,0,587,588,5,115,0,0,588,589,5,66,0,0,589,590,5,111,0,0,590,591, + 5,111,0,0,591,592,5,108,0,0,592,593,5,101,0,0,593,594,5,97,0,0,594, + 595,5,110,0,0,595,596,5,34,0,0,596,64,1,0,0,0,597,598,5,34,0,0,598, + 599,5,73,0,0,599,600,5,115,0,0,600,601,5,78,0,0,601,602,5,117,0, + 0,602,603,5,108,0,0,603,604,5,108,0,0,604,605,5,34,0,0,605,66,1, + 0,0,0,606,607,5,34,0,0,607,608,5,73,0,0,608,609,5,115,0,0,609,610, + 5,78,0,0,610,611,5,117,0,0,611,612,5,109,0,0,612,613,5,101,0,0,613, + 614,5,114,0,0,614,615,5,105,0,0,615,616,5,99,0,0,616,617,5,34,0, + 0,617,68,1,0,0,0,618,619,5,34,0,0,619,620,5,73,0,0,620,621,5,115, + 0,0,621,622,5,80,0,0,622,623,5,114,0,0,623,624,5,101,0,0,624,625, + 5,115,0,0,625,626,5,101,0,0,626,627,5,110,0,0,627,628,5,116,0,0, + 628,629,5,34,0,0,629,70,1,0,0,0,630,631,5,34,0,0,631,632,5,73,0, + 0,632,633,5,115,0,0,633,634,5,83,0,0,634,635,5,116,0,0,635,636,5, + 114,0,0,636,637,5,105,0,0,637,638,5,110,0,0,638,639,5,103,0,0,639, + 640,5,34,0,0,640,72,1,0,0,0,641,642,5,34,0,0,642,643,5,73,0,0,643, + 644,5,115,0,0,644,645,5,84,0,0,645,646,5,105,0,0,646,647,5,109,0, + 0,647,648,5,101,0,0,648,649,5,115,0,0,649,650,5,116,0,0,650,651, + 5,97,0,0,651,652,5,109,0,0,652,653,5,112,0,0,653,654,5,34,0,0,654, + 74,1,0,0,0,655,656,5,34,0,0,656,657,5,78,0,0,657,658,5,111,0,0,658, + 659,5,116,0,0,659,660,5,34,0,0,660,76,1,0,0,0,661,662,5,34,0,0,662, + 663,5,78,0,0,663,664,5,117,0,0,664,665,5,109,0,0,665,666,5,101,0, + 0,666,667,5,114,0,0,667,668,5,105,0,0,668,669,5,99,0,0,669,670,5, + 69,0,0,670,671,5,113,0,0,671,672,5,117,0,0,672,673,5,97,0,0,673, + 674,5,108,0,0,674,675,5,115,0,0,675,676,5,34,0,0,676,78,1,0,0,0, + 677,678,5,34,0,0,678,679,5,78,0,0,679,680,5,117,0,0,680,681,5,109, + 0,0,681,682,5,101,0,0,682,683,5,114,0,0,683,684,5,105,0,0,684,685, + 5,99,0,0,685,686,5,69,0,0,686,687,5,113,0,0,687,688,5,117,0,0,688, + 689,5,97,0,0,689,690,5,108,0,0,690,691,5,115,0,0,691,692,5,80,0, + 0,692,693,5,97,0,0,693,694,5,116,0,0,694,695,5,104,0,0,695,696,5, + 34,0,0,696,80,1,0,0,0,697,698,5,34,0,0,698,699,5,78,0,0,699,700, + 5,117,0,0,700,701,5,109,0,0,701,702,5,101,0,0,702,703,5,114,0,0, + 703,704,5,105,0,0,704,705,5,99,0,0,705,706,5,71,0,0,706,707,5,114, + 0,0,707,708,5,101,0,0,708,709,5,97,0,0,709,710,5,116,0,0,710,711, + 5,101,0,0,711,712,5,114,0,0,712,713,5,84,0,0,713,714,5,104,0,0,714, + 715,5,97,0,0,715,716,5,110,0,0,716,717,5,34,0,0,717,82,1,0,0,0,718, + 719,5,34,0,0,719,720,5,78,0,0,720,721,5,117,0,0,721,722,5,109,0, + 0,722,723,5,101,0,0,723,724,5,114,0,0,724,725,5,105,0,0,725,726, + 5,99,0,0,726,727,5,71,0,0,727,728,5,114,0,0,728,729,5,101,0,0,729, + 730,5,97,0,0,730,731,5,116,0,0,731,732,5,101,0,0,732,733,5,114,0, + 0,733,734,5,84,0,0,734,735,5,104,0,0,735,736,5,97,0,0,736,737,5, + 110,0,0,737,738,5,80,0,0,738,739,5,97,0,0,739,740,5,116,0,0,740, + 741,5,104,0,0,741,742,5,34,0,0,742,84,1,0,0,0,743,744,5,34,0,0,744, + 745,5,78,0,0,745,746,5,117,0,0,746,747,5,109,0,0,747,748,5,101,0, + 0,748,749,5,114,0,0,749,750,5,105,0,0,750,751,5,99,0,0,751,752,5, + 71,0,0,752,753,5,114,0,0,753,754,5,101,0,0,754,755,5,97,0,0,755, + 756,5,116,0,0,756,757,5,101,0,0,757,758,5,114,0,0,758,759,5,84,0, + 0,759,760,5,104,0,0,760,761,5,97,0,0,761,762,5,110,0,0,762,763,5, + 69,0,0,763,764,5,113,0,0,764,765,5,117,0,0,765,766,5,97,0,0,766, + 767,5,108,0,0,767,768,5,115,0,0,768,769,5,34,0,0,769,86,1,0,0,0, + 770,771,5,34,0,0,771,772,5,78,0,0,772,773,5,117,0,0,773,774,5,109, + 0,0,774,775,5,101,0,0,775,776,5,114,0,0,776,777,5,105,0,0,777,778, + 5,99,0,0,778,779,5,71,0,0,779,780,5,114,0,0,780,781,5,101,0,0,781, + 782,5,97,0,0,782,783,5,116,0,0,783,784,5,101,0,0,784,785,5,114,0, + 0,785,786,5,84,0,0,786,787,5,104,0,0,787,788,5,97,0,0,788,789,5, + 110,0,0,789,790,5,69,0,0,790,791,5,113,0,0,791,792,5,117,0,0,792, + 793,5,97,0,0,793,794,5,108,0,0,794,795,5,115,0,0,795,796,5,80,0, + 0,796,797,5,97,0,0,797,798,5,116,0,0,798,799,5,104,0,0,799,800,5, + 34,0,0,800,88,1,0,0,0,801,802,5,34,0,0,802,803,5,78,0,0,803,804, + 5,117,0,0,804,805,5,109,0,0,805,806,5,101,0,0,806,807,5,114,0,0, + 807,808,5,105,0,0,808,809,5,99,0,0,809,810,5,76,0,0,810,811,5,101, + 0,0,811,812,5,115,0,0,812,813,5,115,0,0,813,814,5,84,0,0,814,815, + 5,104,0,0,815,816,5,97,0,0,816,817,5,110,0,0,817,818,5,34,0,0,818, + 90,1,0,0,0,819,820,5,34,0,0,820,821,5,78,0,0,821,822,5,117,0,0,822, + 823,5,109,0,0,823,824,5,101,0,0,824,825,5,114,0,0,825,826,5,105, + 0,0,826,827,5,99,0,0,827,828,5,76,0,0,828,829,5,101,0,0,829,830, + 5,115,0,0,830,831,5,115,0,0,831,832,5,84,0,0,832,833,5,104,0,0,833, + 834,5,97,0,0,834,835,5,110,0,0,835,836,5,80,0,0,836,837,5,97,0,0, + 837,838,5,116,0,0,838,839,5,104,0,0,839,840,5,34,0,0,840,92,1,0, + 0,0,841,842,5,34,0,0,842,843,5,78,0,0,843,844,5,117,0,0,844,845, + 5,109,0,0,845,846,5,101,0,0,846,847,5,114,0,0,847,848,5,105,0,0, + 848,849,5,99,0,0,849,850,5,76,0,0,850,851,5,101,0,0,851,852,5,115, + 0,0,852,853,5,115,0,0,853,854,5,84,0,0,854,855,5,104,0,0,855,856, + 5,97,0,0,856,857,5,110,0,0,857,858,5,69,0,0,858,859,5,113,0,0,859, + 860,5,117,0,0,860,861,5,97,0,0,861,862,5,108,0,0,862,863,5,115,0, + 0,863,864,5,34,0,0,864,94,1,0,0,0,865,866,5,34,0,0,866,867,5,78, + 0,0,867,868,5,117,0,0,868,869,5,109,0,0,869,870,5,101,0,0,870,871, + 5,114,0,0,871,872,5,105,0,0,872,873,5,99,0,0,873,874,5,76,0,0,874, + 875,5,101,0,0,875,876,5,115,0,0,876,877,5,115,0,0,877,878,5,84,0, + 0,878,879,5,104,0,0,879,880,5,97,0,0,880,881,5,110,0,0,881,882,5, + 69,0,0,882,883,5,113,0,0,883,884,5,117,0,0,884,885,5,97,0,0,885, + 886,5,108,0,0,886,887,5,115,0,0,887,888,5,80,0,0,888,889,5,97,0, + 0,889,890,5,116,0,0,890,891,5,104,0,0,891,892,5,34,0,0,892,96,1, + 0,0,0,893,894,5,34,0,0,894,895,5,79,0,0,895,896,5,114,0,0,896,897, + 5,34,0,0,897,98,1,0,0,0,898,899,5,34,0,0,899,900,5,83,0,0,900,901, + 5,116,0,0,901,902,5,114,0,0,902,903,5,105,0,0,903,904,5,110,0,0, + 904,905,5,103,0,0,905,906,5,69,0,0,906,907,5,113,0,0,907,908,5,117, + 0,0,908,909,5,97,0,0,909,910,5,108,0,0,910,911,5,115,0,0,911,912, + 5,34,0,0,912,100,1,0,0,0,913,914,5,34,0,0,914,915,5,83,0,0,915,916, + 5,116,0,0,916,917,5,114,0,0,917,918,5,105,0,0,918,919,5,110,0,0, + 919,920,5,103,0,0,920,921,5,69,0,0,921,922,5,113,0,0,922,923,5,117, + 0,0,923,924,5,97,0,0,924,925,5,108,0,0,925,926,5,115,0,0,926,927, + 5,80,0,0,927,928,5,97,0,0,928,929,5,116,0,0,929,930,5,104,0,0,930, + 931,5,34,0,0,931,102,1,0,0,0,932,933,5,34,0,0,933,934,5,83,0,0,934, + 935,5,116,0,0,935,936,5,114,0,0,936,937,5,105,0,0,937,938,5,110, + 0,0,938,939,5,103,0,0,939,940,5,71,0,0,940,941,5,114,0,0,941,942, + 5,101,0,0,942,943,5,97,0,0,943,944,5,116,0,0,944,945,5,101,0,0,945, + 946,5,114,0,0,946,947,5,84,0,0,947,948,5,104,0,0,948,949,5,97,0, + 0,949,950,5,110,0,0,950,951,5,34,0,0,951,104,1,0,0,0,952,953,5,34, + 0,0,953,954,5,83,0,0,954,955,5,116,0,0,955,956,5,114,0,0,956,957, + 5,105,0,0,957,958,5,110,0,0,958,959,5,103,0,0,959,960,5,71,0,0,960, + 961,5,114,0,0,961,962,5,101,0,0,962,963,5,97,0,0,963,964,5,116,0, + 0,964,965,5,101,0,0,965,966,5,114,0,0,966,967,5,84,0,0,967,968,5, + 104,0,0,968,969,5,97,0,0,969,970,5,110,0,0,970,971,5,80,0,0,971, + 972,5,97,0,0,972,973,5,116,0,0,973,974,5,104,0,0,974,975,5,34,0, + 0,975,106,1,0,0,0,976,977,5,34,0,0,977,978,5,83,0,0,978,979,5,116, + 0,0,979,980,5,114,0,0,980,981,5,105,0,0,981,982,5,110,0,0,982,983, + 5,103,0,0,983,984,5,71,0,0,984,985,5,114,0,0,985,986,5,101,0,0,986, + 987,5,97,0,0,987,988,5,116,0,0,988,989,5,101,0,0,989,990,5,114,0, + 0,990,991,5,84,0,0,991,992,5,104,0,0,992,993,5,97,0,0,993,994,5, + 110,0,0,994,995,5,69,0,0,995,996,5,113,0,0,996,997,5,117,0,0,997, + 998,5,97,0,0,998,999,5,108,0,0,999,1000,5,115,0,0,1000,1001,5,34, + 0,0,1001,108,1,0,0,0,1002,1003,5,34,0,0,1003,1004,5,83,0,0,1004, + 1005,5,116,0,0,1005,1006,5,114,0,0,1006,1007,5,105,0,0,1007,1008, + 5,110,0,0,1008,1009,5,103,0,0,1009,1010,5,71,0,0,1010,1011,5,114, + 0,0,1011,1012,5,101,0,0,1012,1013,5,97,0,0,1013,1014,5,116,0,0,1014, + 1015,5,101,0,0,1015,1016,5,114,0,0,1016,1017,5,84,0,0,1017,1018, + 5,104,0,0,1018,1019,5,97,0,0,1019,1020,5,110,0,0,1020,1021,5,69, + 0,0,1021,1022,5,113,0,0,1022,1023,5,117,0,0,1023,1024,5,97,0,0,1024, + 1025,5,108,0,0,1025,1026,5,115,0,0,1026,1027,5,80,0,0,1027,1028, + 5,97,0,0,1028,1029,5,116,0,0,1029,1030,5,104,0,0,1030,1031,5,34, + 0,0,1031,110,1,0,0,0,1032,1033,5,34,0,0,1033,1034,5,83,0,0,1034, + 1035,5,116,0,0,1035,1036,5,114,0,0,1036,1037,5,105,0,0,1037,1038, + 5,110,0,0,1038,1039,5,103,0,0,1039,1040,5,76,0,0,1040,1041,5,101, + 0,0,1041,1042,5,115,0,0,1042,1043,5,115,0,0,1043,1044,5,84,0,0,1044, + 1045,5,104,0,0,1045,1046,5,97,0,0,1046,1047,5,110,0,0,1047,1048, + 5,34,0,0,1048,112,1,0,0,0,1049,1050,5,34,0,0,1050,1051,5,83,0,0, + 1051,1052,5,116,0,0,1052,1053,5,114,0,0,1053,1054,5,105,0,0,1054, + 1055,5,110,0,0,1055,1056,5,103,0,0,1056,1057,5,76,0,0,1057,1058, + 5,101,0,0,1058,1059,5,115,0,0,1059,1060,5,115,0,0,1060,1061,5,84, + 0,0,1061,1062,5,104,0,0,1062,1063,5,97,0,0,1063,1064,5,110,0,0,1064, + 1065,5,80,0,0,1065,1066,5,97,0,0,1066,1067,5,116,0,0,1067,1068,5, + 104,0,0,1068,1069,5,34,0,0,1069,114,1,0,0,0,1070,1071,5,34,0,0,1071, + 1072,5,83,0,0,1072,1073,5,116,0,0,1073,1074,5,114,0,0,1074,1075, + 5,105,0,0,1075,1076,5,110,0,0,1076,1077,5,103,0,0,1077,1078,5,76, + 0,0,1078,1079,5,101,0,0,1079,1080,5,115,0,0,1080,1081,5,115,0,0, + 1081,1082,5,84,0,0,1082,1083,5,104,0,0,1083,1084,5,97,0,0,1084,1085, + 5,110,0,0,1085,1086,5,69,0,0,1086,1087,5,113,0,0,1087,1088,5,117, + 0,0,1088,1089,5,97,0,0,1089,1090,5,108,0,0,1090,1091,5,115,0,0,1091, + 1092,5,34,0,0,1092,116,1,0,0,0,1093,1094,5,34,0,0,1094,1095,5,83, + 0,0,1095,1096,5,116,0,0,1096,1097,5,114,0,0,1097,1098,5,105,0,0, + 1098,1099,5,110,0,0,1099,1100,5,103,0,0,1100,1101,5,76,0,0,1101, + 1102,5,101,0,0,1102,1103,5,115,0,0,1103,1104,5,115,0,0,1104,1105, + 5,84,0,0,1105,1106,5,104,0,0,1106,1107,5,97,0,0,1107,1108,5,110, + 0,0,1108,1109,5,69,0,0,1109,1110,5,113,0,0,1110,1111,5,117,0,0,1111, + 1112,5,97,0,0,1112,1113,5,108,0,0,1113,1114,5,115,0,0,1114,1115, + 5,80,0,0,1115,1116,5,97,0,0,1116,1117,5,116,0,0,1117,1118,5,104, + 0,0,1118,1119,5,34,0,0,1119,118,1,0,0,0,1120,1121,5,34,0,0,1121, + 1122,5,83,0,0,1122,1123,5,116,0,0,1123,1124,5,114,0,0,1124,1125, + 5,105,0,0,1125,1126,5,110,0,0,1126,1127,5,103,0,0,1127,1128,5,77, + 0,0,1128,1129,5,97,0,0,1129,1130,5,116,0,0,1130,1131,5,99,0,0,1131, + 1132,5,104,0,0,1132,1133,5,101,0,0,1133,1134,5,115,0,0,1134,1135, + 5,34,0,0,1135,120,1,0,0,0,1136,1137,5,34,0,0,1137,1138,5,84,0,0, + 1138,1139,5,105,0,0,1139,1140,5,109,0,0,1140,1141,5,101,0,0,1141, + 1142,5,115,0,0,1142,1143,5,116,0,0,1143,1144,5,97,0,0,1144,1145, + 5,109,0,0,1145,1146,5,112,0,0,1146,1147,5,69,0,0,1147,1148,5,113, + 0,0,1148,1149,5,117,0,0,1149,1150,5,97,0,0,1150,1151,5,108,0,0,1151, + 1152,5,115,0,0,1152,1153,5,34,0,0,1153,122,1,0,0,0,1154,1155,5,34, + 0,0,1155,1156,5,84,0,0,1156,1157,5,105,0,0,1157,1158,5,109,0,0,1158, + 1159,5,101,0,0,1159,1160,5,115,0,0,1160,1161,5,116,0,0,1161,1162, + 5,97,0,0,1162,1163,5,109,0,0,1163,1164,5,112,0,0,1164,1165,5,69, + 0,0,1165,1166,5,113,0,0,1166,1167,5,117,0,0,1167,1168,5,97,0,0,1168, + 1169,5,108,0,0,1169,1170,5,115,0,0,1170,1171,5,80,0,0,1171,1172, + 5,97,0,0,1172,1173,5,116,0,0,1173,1174,5,104,0,0,1174,1175,5,34, + 0,0,1175,124,1,0,0,0,1176,1177,5,34,0,0,1177,1178,5,84,0,0,1178, + 1179,5,105,0,0,1179,1180,5,109,0,0,1180,1181,5,101,0,0,1181,1182, + 5,115,0,0,1182,1183,5,116,0,0,1183,1184,5,97,0,0,1184,1185,5,109, + 0,0,1185,1186,5,112,0,0,1186,1187,5,71,0,0,1187,1188,5,114,0,0,1188, + 1189,5,101,0,0,1189,1190,5,97,0,0,1190,1191,5,116,0,0,1191,1192, + 5,101,0,0,1192,1193,5,114,0,0,1193,1194,5,84,0,0,1194,1195,5,104, + 0,0,1195,1196,5,97,0,0,1196,1197,5,110,0,0,1197,1198,5,34,0,0,1198, + 126,1,0,0,0,1199,1200,5,34,0,0,1200,1201,5,84,0,0,1201,1202,5,105, + 0,0,1202,1203,5,109,0,0,1203,1204,5,101,0,0,1204,1205,5,115,0,0, + 1205,1206,5,116,0,0,1206,1207,5,97,0,0,1207,1208,5,109,0,0,1208, + 1209,5,112,0,0,1209,1210,5,71,0,0,1210,1211,5,114,0,0,1211,1212, + 5,101,0,0,1212,1213,5,97,0,0,1213,1214,5,116,0,0,1214,1215,5,101, + 0,0,1215,1216,5,114,0,0,1216,1217,5,84,0,0,1217,1218,5,104,0,0,1218, + 1219,5,97,0,0,1219,1220,5,110,0,0,1220,1221,5,80,0,0,1221,1222,5, + 97,0,0,1222,1223,5,116,0,0,1223,1224,5,104,0,0,1224,1225,5,34,0, + 0,1225,128,1,0,0,0,1226,1227,5,34,0,0,1227,1228,5,84,0,0,1228,1229, + 5,105,0,0,1229,1230,5,109,0,0,1230,1231,5,101,0,0,1231,1232,5,115, + 0,0,1232,1233,5,116,0,0,1233,1234,5,97,0,0,1234,1235,5,109,0,0,1235, + 1236,5,112,0,0,1236,1237,5,71,0,0,1237,1238,5,114,0,0,1238,1239, + 5,101,0,0,1239,1240,5,97,0,0,1240,1241,5,116,0,0,1241,1242,5,101, + 0,0,1242,1243,5,114,0,0,1243,1244,5,84,0,0,1244,1245,5,104,0,0,1245, + 1246,5,97,0,0,1246,1247,5,110,0,0,1247,1248,5,69,0,0,1248,1249,5, + 113,0,0,1249,1250,5,117,0,0,1250,1251,5,97,0,0,1251,1252,5,108,0, + 0,1252,1253,5,115,0,0,1253,1254,5,34,0,0,1254,130,1,0,0,0,1255,1256, + 5,34,0,0,1256,1257,5,84,0,0,1257,1258,5,105,0,0,1258,1259,5,109, + 0,0,1259,1260,5,101,0,0,1260,1261,5,115,0,0,1261,1262,5,116,0,0, + 1262,1263,5,97,0,0,1263,1264,5,109,0,0,1264,1265,5,112,0,0,1265, + 1266,5,71,0,0,1266,1267,5,114,0,0,1267,1268,5,101,0,0,1268,1269, + 5,97,0,0,1269,1270,5,116,0,0,1270,1271,5,101,0,0,1271,1272,5,114, + 0,0,1272,1273,5,84,0,0,1273,1274,5,104,0,0,1274,1275,5,97,0,0,1275, + 1276,5,110,0,0,1276,1277,5,69,0,0,1277,1278,5,113,0,0,1278,1279, + 5,117,0,0,1279,1280,5,97,0,0,1280,1281,5,108,0,0,1281,1282,5,115, + 0,0,1282,1283,5,80,0,0,1283,1284,5,97,0,0,1284,1285,5,116,0,0,1285, + 1286,5,104,0,0,1286,1287,5,34,0,0,1287,132,1,0,0,0,1288,1289,5,34, + 0,0,1289,1290,5,84,0,0,1290,1291,5,105,0,0,1291,1292,5,109,0,0,1292, + 1293,5,101,0,0,1293,1294,5,115,0,0,1294,1295,5,116,0,0,1295,1296, + 5,97,0,0,1296,1297,5,109,0,0,1297,1298,5,112,0,0,1298,1299,5,76, + 0,0,1299,1300,5,101,0,0,1300,1301,5,115,0,0,1301,1302,5,115,0,0, + 1302,1303,5,84,0,0,1303,1304,5,104,0,0,1304,1305,5,97,0,0,1305,1306, + 5,110,0,0,1306,1307,5,34,0,0,1307,134,1,0,0,0,1308,1309,5,34,0,0, + 1309,1310,5,84,0,0,1310,1311,5,105,0,0,1311,1312,5,109,0,0,1312, + 1313,5,101,0,0,1313,1314,5,115,0,0,1314,1315,5,116,0,0,1315,1316, + 5,97,0,0,1316,1317,5,109,0,0,1317,1318,5,112,0,0,1318,1319,5,76, + 0,0,1319,1320,5,101,0,0,1320,1321,5,115,0,0,1321,1322,5,115,0,0, + 1322,1323,5,84,0,0,1323,1324,5,104,0,0,1324,1325,5,97,0,0,1325,1326, + 5,110,0,0,1326,1327,5,80,0,0,1327,1328,5,97,0,0,1328,1329,5,116, + 0,0,1329,1330,5,104,0,0,1330,1331,5,34,0,0,1331,136,1,0,0,0,1332, + 1333,5,34,0,0,1333,1334,5,84,0,0,1334,1335,5,105,0,0,1335,1336,5, + 109,0,0,1336,1337,5,101,0,0,1337,1338,5,115,0,0,1338,1339,5,116, + 0,0,1339,1340,5,97,0,0,1340,1341,5,109,0,0,1341,1342,5,112,0,0,1342, + 1343,5,76,0,0,1343,1344,5,101,0,0,1344,1345,5,115,0,0,1345,1346, + 5,115,0,0,1346,1347,5,84,0,0,1347,1348,5,104,0,0,1348,1349,5,97, + 0,0,1349,1350,5,110,0,0,1350,1351,5,69,0,0,1351,1352,5,113,0,0,1352, + 1353,5,117,0,0,1353,1354,5,97,0,0,1354,1355,5,108,0,0,1355,1356, + 5,115,0,0,1356,1357,5,34,0,0,1357,138,1,0,0,0,1358,1359,5,34,0,0, + 1359,1360,5,84,0,0,1360,1361,5,105,0,0,1361,1362,5,109,0,0,1362, + 1363,5,101,0,0,1363,1364,5,115,0,0,1364,1365,5,116,0,0,1365,1366, + 5,97,0,0,1366,1367,5,109,0,0,1367,1368,5,112,0,0,1368,1369,5,76, + 0,0,1369,1370,5,101,0,0,1370,1371,5,115,0,0,1371,1372,5,115,0,0, + 1372,1373,5,84,0,0,1373,1374,5,104,0,0,1374,1375,5,97,0,0,1375,1376, + 5,110,0,0,1376,1377,5,69,0,0,1377,1378,5,113,0,0,1378,1379,5,117, + 0,0,1379,1380,5,97,0,0,1380,1381,5,108,0,0,1381,1382,5,115,0,0,1382, + 1383,5,80,0,0,1383,1384,5,97,0,0,1384,1385,5,116,0,0,1385,1386,5, + 104,0,0,1386,1387,5,34,0,0,1387,140,1,0,0,0,1388,1389,5,34,0,0,1389, + 1390,5,83,0,0,1390,1391,5,101,0,0,1391,1392,5,99,0,0,1392,1393,5, + 111,0,0,1393,1394,5,110,0,0,1394,1395,5,100,0,0,1395,1396,5,115, + 0,0,1396,1397,5,80,0,0,1397,1398,5,97,0,0,1398,1399,5,116,0,0,1399, + 1400,5,104,0,0,1400,1401,5,34,0,0,1401,142,1,0,0,0,1402,1403,5,34, + 0,0,1403,1404,5,83,0,0,1404,1405,5,101,0,0,1405,1406,5,99,0,0,1406, + 1407,5,111,0,0,1407,1408,5,110,0,0,1408,1409,5,100,0,0,1409,1410, + 5,115,0,0,1410,1411,5,34,0,0,1411,144,1,0,0,0,1412,1413,5,34,0,0, + 1413,1414,5,84,0,0,1414,1415,5,105,0,0,1415,1416,5,109,0,0,1416, + 1417,5,101,0,0,1417,1418,5,115,0,0,1418,1419,5,116,0,0,1419,1420, + 5,97,0,0,1420,1421,5,109,0,0,1421,1422,5,112,0,0,1422,1423,5,80, + 0,0,1423,1424,5,97,0,0,1424,1425,5,116,0,0,1425,1426,5,104,0,0,1426, + 1427,5,34,0,0,1427,146,1,0,0,0,1428,1429,5,34,0,0,1429,1430,5,84, + 0,0,1430,1431,5,105,0,0,1431,1432,5,109,0,0,1432,1433,5,101,0,0, + 1433,1434,5,115,0,0,1434,1435,5,116,0,0,1435,1436,5,97,0,0,1436, + 1437,5,109,0,0,1437,1438,5,112,0,0,1438,1439,5,34,0,0,1439,148,1, + 0,0,0,1440,1441,5,34,0,0,1441,1442,5,84,0,0,1442,1443,5,105,0,0, + 1443,1444,5,109,0,0,1444,1445,5,101,0,0,1445,1446,5,111,0,0,1446, + 1447,5,117,0,0,1447,1448,5,116,0,0,1448,1449,5,83,0,0,1449,1450, + 5,101,0,0,1450,1451,5,99,0,0,1451,1452,5,111,0,0,1452,1453,5,110, + 0,0,1453,1454,5,100,0,0,1454,1455,5,115,0,0,1455,1456,5,34,0,0,1456, + 150,1,0,0,0,1457,1458,5,34,0,0,1458,1459,5,84,0,0,1459,1460,5,105, + 0,0,1460,1461,5,109,0,0,1461,1462,5,101,0,0,1462,1463,5,111,0,0, + 1463,1464,5,117,0,0,1464,1465,5,116,0,0,1465,1466,5,83,0,0,1466, + 1467,5,101,0,0,1467,1468,5,99,0,0,1468,1469,5,111,0,0,1469,1470, + 5,110,0,0,1470,1471,5,100,0,0,1471,1472,5,115,0,0,1472,1473,5,80, + 0,0,1473,1474,5,97,0,0,1474,1475,5,116,0,0,1475,1476,5,104,0,0,1476, + 1477,5,34,0,0,1477,152,1,0,0,0,1478,1479,5,34,0,0,1479,1480,5,72, + 0,0,1480,1481,5,101,0,0,1481,1482,5,97,0,0,1482,1483,5,114,0,0,1483, + 1484,5,116,0,0,1484,1485,5,98,0,0,1485,1486,5,101,0,0,1486,1487, + 5,97,0,0,1487,1488,5,116,0,0,1488,1489,5,83,0,0,1489,1490,5,101, + 0,0,1490,1491,5,99,0,0,1491,1492,5,111,0,0,1492,1493,5,110,0,0,1493, + 1494,5,100,0,0,1494,1495,5,115,0,0,1495,1496,5,34,0,0,1496,154,1, + 0,0,0,1497,1498,5,34,0,0,1498,1499,5,72,0,0,1499,1500,5,101,0,0, + 1500,1501,5,97,0,0,1501,1502,5,114,0,0,1502,1503,5,116,0,0,1503, + 1504,5,98,0,0,1504,1505,5,101,0,0,1505,1506,5,97,0,0,1506,1507,5, + 116,0,0,1507,1508,5,83,0,0,1508,1509,5,101,0,0,1509,1510,5,99,0, + 0,1510,1511,5,111,0,0,1511,1512,5,110,0,0,1512,1513,5,100,0,0,1513, + 1514,5,115,0,0,1514,1515,5,80,0,0,1515,1516,5,97,0,0,1516,1517,5, + 116,0,0,1517,1518,5,104,0,0,1518,1519,5,34,0,0,1519,156,1,0,0,0, + 1520,1521,5,34,0,0,1521,1522,5,80,0,0,1522,1523,5,114,0,0,1523,1524, + 5,111,0,0,1524,1525,5,99,0,0,1525,1526,5,101,0,0,1526,1527,5,115, + 0,0,1527,1528,5,115,0,0,1528,1529,5,111,0,0,1529,1530,5,114,0,0, + 1530,1531,5,67,0,0,1531,1532,5,111,0,0,1532,1533,5,110,0,0,1533, + 1534,5,102,0,0,1534,1535,5,105,0,0,1535,1536,5,103,0,0,1536,1537, + 5,34,0,0,1537,158,1,0,0,0,1538,1539,5,34,0,0,1539,1540,5,77,0,0, + 1540,1541,5,111,0,0,1541,1542,5,100,0,0,1542,1543,5,101,0,0,1543, + 1544,5,34,0,0,1544,160,1,0,0,0,1545,1546,5,34,0,0,1546,1547,5,73, + 0,0,1547,1548,5,78,0,0,1548,1549,5,76,0,0,1549,1550,5,73,0,0,1550, + 1551,5,78,0,0,1551,1552,5,69,0,0,1552,1553,5,34,0,0,1553,162,1,0, + 0,0,1554,1555,5,34,0,0,1555,1556,5,68,0,0,1556,1557,5,73,0,0,1557, + 1558,5,83,0,0,1558,1559,5,84,0,0,1559,1560,5,82,0,0,1560,1561,5, + 73,0,0,1561,1562,5,66,0,0,1562,1563,5,85,0,0,1563,1564,5,84,0,0, + 1564,1565,5,69,0,0,1565,1566,5,68,0,0,1566,1567,5,34,0,0,1567,164, + 1,0,0,0,1568,1569,5,34,0,0,1569,1570,5,69,0,0,1570,1571,5,120,0, + 0,1571,1572,5,101,0,0,1572,1573,5,99,0,0,1573,1574,5,117,0,0,1574, + 1575,5,116,0,0,1575,1576,5,105,0,0,1576,1577,5,111,0,0,1577,1578, + 5,110,0,0,1578,1579,5,84,0,0,1579,1580,5,121,0,0,1580,1581,5,112, + 0,0,1581,1582,5,101,0,0,1582,1583,5,34,0,0,1583,166,1,0,0,0,1584, + 1585,5,34,0,0,1585,1586,5,83,0,0,1586,1587,5,84,0,0,1587,1588,5, + 65,0,0,1588,1589,5,78,0,0,1589,1590,5,68,0,0,1590,1591,5,65,0,0, + 1591,1592,5,82,0,0,1592,1593,5,68,0,0,1593,1594,5,34,0,0,1594,168, + 1,0,0,0,1595,1596,5,34,0,0,1596,1597,5,73,0,0,1597,1598,5,116,0, + 0,1598,1599,5,101,0,0,1599,1600,5,109,0,0,1600,1601,5,80,0,0,1601, + 1602,5,114,0,0,1602,1603,5,111,0,0,1603,1604,5,99,0,0,1604,1605, + 5,101,0,0,1605,1606,5,115,0,0,1606,1607,5,115,0,0,1607,1608,5,111, + 0,0,1608,1609,5,114,0,0,1609,1610,5,34,0,0,1610,170,1,0,0,0,1611, + 1612,5,34,0,0,1612,1613,5,73,0,0,1613,1614,5,116,0,0,1614,1615,5, + 101,0,0,1615,1616,5,114,0,0,1616,1617,5,97,0,0,1617,1618,5,116,0, + 0,1618,1619,5,111,0,0,1619,1620,5,114,0,0,1620,1621,5,34,0,0,1621, + 172,1,0,0,0,1622,1623,5,34,0,0,1623,1624,5,73,0,0,1624,1625,5,116, + 0,0,1625,1626,5,101,0,0,1626,1627,5,109,0,0,1627,1628,5,83,0,0,1628, + 1629,5,101,0,0,1629,1630,5,108,0,0,1630,1631,5,101,0,0,1631,1632, + 5,99,0,0,1632,1633,5,116,0,0,1633,1634,5,111,0,0,1634,1635,5,114, + 0,0,1635,1636,5,34,0,0,1636,174,1,0,0,0,1637,1638,5,34,0,0,1638, + 1639,5,77,0,0,1639,1640,5,97,0,0,1640,1641,5,120,0,0,1641,1642,5, + 67,0,0,1642,1643,5,111,0,0,1643,1644,5,110,0,0,1644,1645,5,99,0, + 0,1645,1646,5,117,0,0,1646,1647,5,114,0,0,1647,1648,5,114,0,0,1648, + 1649,5,101,0,0,1649,1650,5,110,0,0,1650,1651,5,99,0,0,1651,1652, + 5,121,0,0,1652,1653,5,80,0,0,1653,1654,5,97,0,0,1654,1655,5,116, + 0,0,1655,1656,5,104,0,0,1656,1657,5,34,0,0,1657,176,1,0,0,0,1658, + 1659,5,34,0,0,1659,1660,5,77,0,0,1660,1661,5,97,0,0,1661,1662,5, + 120,0,0,1662,1663,5,67,0,0,1663,1664,5,111,0,0,1664,1665,5,110,0, + 0,1665,1666,5,99,0,0,1666,1667,5,117,0,0,1667,1668,5,114,0,0,1668, + 1669,5,114,0,0,1669,1670,5,101,0,0,1670,1671,5,110,0,0,1671,1672, + 5,99,0,0,1672,1673,5,121,0,0,1673,1674,5,34,0,0,1674,178,1,0,0,0, + 1675,1676,5,34,0,0,1676,1677,5,82,0,0,1677,1678,5,101,0,0,1678,1679, + 5,115,0,0,1679,1680,5,111,0,0,1680,1681,5,117,0,0,1681,1682,5,114, + 0,0,1682,1683,5,99,0,0,1683,1684,5,101,0,0,1684,1685,5,34,0,0,1685, + 180,1,0,0,0,1686,1687,5,34,0,0,1687,1688,5,73,0,0,1688,1689,5,110, + 0,0,1689,1690,5,112,0,0,1690,1691,5,117,0,0,1691,1692,5,116,0,0, + 1692,1693,5,80,0,0,1693,1694,5,97,0,0,1694,1695,5,116,0,0,1695,1696, + 5,104,0,0,1696,1697,5,34,0,0,1697,182,1,0,0,0,1698,1699,5,34,0,0, + 1699,1700,5,79,0,0,1700,1701,5,117,0,0,1701,1702,5,116,0,0,1702, + 1703,5,112,0,0,1703,1704,5,117,0,0,1704,1705,5,116,0,0,1705,1706, + 5,80,0,0,1706,1707,5,97,0,0,1707,1708,5,116,0,0,1708,1709,5,104, + 0,0,1709,1710,5,34,0,0,1710,184,1,0,0,0,1711,1712,5,34,0,0,1712, + 1713,5,73,0,0,1713,1714,5,116,0,0,1714,1715,5,101,0,0,1715,1716, + 5,109,0,0,1716,1717,5,115,0,0,1717,1718,5,34,0,0,1718,186,1,0,0, + 0,1719,1720,5,34,0,0,1720,1721,5,73,0,0,1721,1722,5,116,0,0,1722, + 1723,5,101,0,0,1723,1724,5,109,0,0,1724,1725,5,115,0,0,1725,1726, + 5,80,0,0,1726,1727,5,97,0,0,1727,1728,5,116,0,0,1728,1729,5,104, + 0,0,1729,1730,5,34,0,0,1730,188,1,0,0,0,1731,1732,5,34,0,0,1732, + 1733,5,82,0,0,1733,1734,5,101,0,0,1734,1735,5,115,0,0,1735,1736, + 5,117,0,0,1736,1737,5,108,0,0,1737,1738,5,116,0,0,1738,1739,5,80, + 0,0,1739,1740,5,97,0,0,1740,1741,5,116,0,0,1741,1742,5,104,0,0,1742, + 1743,5,34,0,0,1743,190,1,0,0,0,1744,1745,5,34,0,0,1745,1746,5,82, + 0,0,1746,1747,5,101,0,0,1747,1748,5,115,0,0,1748,1749,5,117,0,0, + 1749,1750,5,108,0,0,1750,1751,5,116,0,0,1751,1752,5,34,0,0,1752, + 192,1,0,0,0,1753,1754,5,34,0,0,1754,1755,5,80,0,0,1755,1756,5,97, + 0,0,1756,1757,5,114,0,0,1757,1758,5,97,0,0,1758,1759,5,109,0,0,1759, + 1760,5,101,0,0,1760,1761,5,116,0,0,1761,1762,5,101,0,0,1762,1763, + 5,114,0,0,1763,1764,5,115,0,0,1764,1765,5,34,0,0,1765,194,1,0,0, + 0,1766,1767,5,34,0,0,1767,1768,5,67,0,0,1768,1769,5,114,0,0,1769, + 1770,5,101,0,0,1770,1771,5,100,0,0,1771,1772,5,101,0,0,1772,1773, + 5,110,0,0,1773,1774,5,116,0,0,1774,1775,5,105,0,0,1775,1776,5,97, + 0,0,1776,1777,5,108,0,0,1777,1778,5,115,0,0,1778,1779,5,34,0,0,1779, + 196,1,0,0,0,1780,1781,5,34,0,0,1781,1782,5,82,0,0,1782,1783,5,111, + 0,0,1783,1784,5,108,0,0,1784,1785,5,101,0,0,1785,1786,5,65,0,0,1786, + 1787,5,114,0,0,1787,1788,5,110,0,0,1788,1789,5,34,0,0,1789,198,1, + 0,0,0,1790,1791,5,34,0,0,1791,1792,5,82,0,0,1792,1793,5,111,0,0, + 1793,1794,5,108,0,0,1794,1795,5,101,0,0,1795,1796,5,65,0,0,1796, + 1797,5,114,0,0,1797,1798,5,110,0,0,1798,1799,5,46,0,0,1799,1800, + 5,36,0,0,1800,1801,5,34,0,0,1801,200,1,0,0,0,1802,1803,5,34,0,0, + 1803,1804,5,82,0,0,1804,1805,5,101,0,0,1805,1806,5,115,0,0,1806, + 1807,5,117,0,0,1807,1808,5,108,0,0,1808,1809,5,116,0,0,1809,1810, + 5,83,0,0,1810,1811,5,101,0,0,1811,1812,5,108,0,0,1812,1813,5,101, + 0,0,1813,1814,5,99,0,0,1814,1815,5,116,0,0,1815,1816,5,111,0,0,1816, + 1817,5,114,0,0,1817,1818,5,34,0,0,1818,202,1,0,0,0,1819,1820,5,34, + 0,0,1820,1821,5,73,0,0,1821,1822,5,116,0,0,1822,1823,5,101,0,0,1823, + 1824,5,109,0,0,1824,1825,5,82,0,0,1825,1826,5,101,0,0,1826,1827, + 5,97,0,0,1827,1828,5,100,0,0,1828,1829,5,101,0,0,1829,1830,5,114, + 0,0,1830,1831,5,34,0,0,1831,204,1,0,0,0,1832,1833,5,34,0,0,1833, + 1834,5,82,0,0,1834,1835,5,101,0,0,1835,1836,5,97,0,0,1836,1837,5, + 100,0,0,1837,1838,5,101,0,0,1838,1839,5,114,0,0,1839,1840,5,67,0, + 0,1840,1841,5,111,0,0,1841,1842,5,110,0,0,1842,1843,5,102,0,0,1843, + 1844,5,105,0,0,1844,1845,5,103,0,0,1845,1846,5,34,0,0,1846,206,1, + 0,0,0,1847,1848,5,34,0,0,1848,1849,5,73,0,0,1849,1850,5,110,0,0, + 1850,1851,5,112,0,0,1851,1852,5,117,0,0,1852,1853,5,116,0,0,1853, + 1854,5,84,0,0,1854,1855,5,121,0,0,1855,1856,5,112,0,0,1856,1857, + 5,101,0,0,1857,1858,5,34,0,0,1858,208,1,0,0,0,1859,1860,5,34,0,0, + 1860,1861,5,67,0,0,1861,1862,5,83,0,0,1862,1863,5,86,0,0,1863,1864, + 5,72,0,0,1864,1865,5,101,0,0,1865,1866,5,97,0,0,1866,1867,5,100, + 0,0,1867,1868,5,101,0,0,1868,1869,5,114,0,0,1869,1870,5,76,0,0,1870, + 1871,5,111,0,0,1871,1872,5,99,0,0,1872,1873,5,97,0,0,1873,1874,5, + 116,0,0,1874,1875,5,105,0,0,1875,1876,5,111,0,0,1876,1877,5,110, + 0,0,1877,1878,5,34,0,0,1878,210,1,0,0,0,1879,1880,5,34,0,0,1880, + 1881,5,67,0,0,1881,1882,5,83,0,0,1882,1883,5,86,0,0,1883,1884,5, + 72,0,0,1884,1885,5,101,0,0,1885,1886,5,97,0,0,1886,1887,5,100,0, + 0,1887,1888,5,101,0,0,1888,1889,5,114,0,0,1889,1890,5,115,0,0,1890, + 1891,5,34,0,0,1891,212,1,0,0,0,1892,1893,5,34,0,0,1893,1894,5,77, + 0,0,1894,1895,5,97,0,0,1895,1896,5,120,0,0,1896,1897,5,73,0,0,1897, + 1898,5,116,0,0,1898,1899,5,101,0,0,1899,1900,5,109,0,0,1900,1901, + 5,115,0,0,1901,1902,5,34,0,0,1902,214,1,0,0,0,1903,1904,5,34,0,0, + 1904,1905,5,77,0,0,1905,1906,5,97,0,0,1906,1907,5,120,0,0,1907,1908, + 5,73,0,0,1908,1909,5,116,0,0,1909,1910,5,101,0,0,1910,1911,5,109, + 0,0,1911,1912,5,115,0,0,1912,1913,5,80,0,0,1913,1914,5,97,0,0,1914, + 1915,5,116,0,0,1915,1916,5,104,0,0,1916,1917,5,34,0,0,1917,216,1, + 0,0,0,1918,1919,5,34,0,0,1919,1920,5,84,0,0,1920,1921,5,111,0,0, + 1921,1922,5,108,0,0,1922,1923,5,101,0,0,1923,1924,5,114,0,0,1924, + 1925,5,97,0,0,1925,1926,5,116,0,0,1926,1927,5,101,0,0,1927,1928, + 5,100,0,0,1928,1929,5,70,0,0,1929,1930,5,97,0,0,1930,1931,5,105, + 0,0,1931,1932,5,108,0,0,1932,1933,5,117,0,0,1933,1934,5,114,0,0, + 1934,1935,5,101,0,0,1935,1936,5,67,0,0,1936,1937,5,111,0,0,1937, + 1938,5,117,0,0,1938,1939,5,110,0,0,1939,1940,5,116,0,0,1940,1941, + 5,34,0,0,1941,218,1,0,0,0,1942,1943,5,34,0,0,1943,1944,5,84,0,0, + 1944,1945,5,111,0,0,1945,1946,5,108,0,0,1946,1947,5,101,0,0,1947, + 1948,5,114,0,0,1948,1949,5,97,0,0,1949,1950,5,116,0,0,1950,1951, + 5,101,0,0,1951,1952,5,100,0,0,1952,1953,5,70,0,0,1953,1954,5,97, + 0,0,1954,1955,5,105,0,0,1955,1956,5,108,0,0,1956,1957,5,117,0,0, + 1957,1958,5,114,0,0,1958,1959,5,101,0,0,1959,1960,5,67,0,0,1960, + 1961,5,111,0,0,1961,1962,5,117,0,0,1962,1963,5,110,0,0,1963,1964, + 5,116,0,0,1964,1965,5,80,0,0,1965,1966,5,97,0,0,1966,1967,5,116, + 0,0,1967,1968,5,104,0,0,1968,1969,5,34,0,0,1969,220,1,0,0,0,1970, + 1971,5,34,0,0,1971,1972,5,84,0,0,1972,1973,5,111,0,0,1973,1974,5, + 108,0,0,1974,1975,5,101,0,0,1975,1976,5,114,0,0,1976,1977,5,97,0, + 0,1977,1978,5,116,0,0,1978,1979,5,101,0,0,1979,1980,5,100,0,0,1980, + 1981,5,70,0,0,1981,1982,5,97,0,0,1982,1983,5,105,0,0,1983,1984,5, + 108,0,0,1984,1985,5,117,0,0,1985,1986,5,114,0,0,1986,1987,5,101, + 0,0,1987,1988,5,80,0,0,1988,1989,5,101,0,0,1989,1990,5,114,0,0,1990, + 1991,5,99,0,0,1991,1992,5,101,0,0,1992,1993,5,110,0,0,1993,1994, + 5,116,0,0,1994,1995,5,97,0,0,1995,1996,5,103,0,0,1996,1997,5,101, + 0,0,1997,1998,5,34,0,0,1998,222,1,0,0,0,1999,2000,5,34,0,0,2000, + 2001,5,84,0,0,2001,2002,5,111,0,0,2002,2003,5,108,0,0,2003,2004, + 5,101,0,0,2004,2005,5,114,0,0,2005,2006,5,97,0,0,2006,2007,5,116, + 0,0,2007,2008,5,101,0,0,2008,2009,5,100,0,0,2009,2010,5,70,0,0,2010, + 2011,5,97,0,0,2011,2012,5,105,0,0,2012,2013,5,108,0,0,2013,2014, + 5,117,0,0,2014,2015,5,114,0,0,2015,2016,5,101,0,0,2016,2017,5,80, + 0,0,2017,2018,5,101,0,0,2018,2019,5,114,0,0,2019,2020,5,99,0,0,2020, + 2021,5,101,0,0,2021,2022,5,110,0,0,2022,2023,5,116,0,0,2023,2024, + 5,97,0,0,2024,2025,5,103,0,0,2025,2026,5,101,0,0,2026,2027,5,80, + 0,0,2027,2028,5,97,0,0,2028,2029,5,116,0,0,2029,2030,5,104,0,0,2030, + 2031,5,34,0,0,2031,224,1,0,0,0,2032,2033,5,34,0,0,2033,2034,5,76, + 0,0,2034,2035,5,97,0,0,2035,2036,5,98,0,0,2036,2037,5,101,0,0,2037, + 2038,5,108,0,0,2038,2039,5,34,0,0,2039,226,1,0,0,0,2040,2041,5,34, + 0,0,2041,2042,5,82,0,0,2042,2043,5,101,0,0,2043,2044,5,115,0,0,2044, + 2045,5,117,0,0,2045,2046,5,108,0,0,2046,2047,5,116,0,0,2047,2048, + 5,87,0,0,2048,2049,5,114,0,0,2049,2050,5,105,0,0,2050,2051,5,116, + 0,0,2051,2052,5,101,0,0,2052,2053,5,114,0,0,2053,2054,5,34,0,0,2054, + 228,1,0,0,0,2055,2056,5,34,0,0,2056,2057,5,78,0,0,2057,2058,5,101, + 0,0,2058,2059,5,120,0,0,2059,2060,5,116,0,0,2060,2061,5,34,0,0,2061, + 230,1,0,0,0,2062,2063,5,34,0,0,2063,2064,5,69,0,0,2064,2065,5,110, + 0,0,2065,2066,5,100,0,0,2066,2067,5,34,0,0,2067,232,1,0,0,0,2068, + 2069,5,34,0,0,2069,2070,5,67,0,0,2070,2071,5,97,0,0,2071,2072,5, + 117,0,0,2072,2073,5,115,0,0,2073,2074,5,101,0,0,2074,2075,5,34,0, + 0,2075,234,1,0,0,0,2076,2077,5,34,0,0,2077,2078,5,67,0,0,2078,2079, + 5,97,0,0,2079,2080,5,117,0,0,2080,2081,5,115,0,0,2081,2082,5,101, + 0,0,2082,2083,5,80,0,0,2083,2084,5,97,0,0,2084,2085,5,116,0,0,2085, + 2086,5,104,0,0,2086,2087,5,34,0,0,2087,236,1,0,0,0,2088,2089,5,34, + 0,0,2089,2090,5,69,0,0,2090,2091,5,114,0,0,2091,2092,5,114,0,0,2092, + 2093,5,111,0,0,2093,2094,5,114,0,0,2094,2095,5,34,0,0,2095,238,1, + 0,0,0,2096,2097,5,34,0,0,2097,2098,5,69,0,0,2098,2099,5,114,0,0, + 2099,2100,5,114,0,0,2100,2101,5,111,0,0,2101,2102,5,114,0,0,2102, + 2103,5,80,0,0,2103,2104,5,97,0,0,2104,2105,5,116,0,0,2105,2106,5, + 104,0,0,2106,2107,5,34,0,0,2107,240,1,0,0,0,2108,2109,5,34,0,0,2109, + 2110,5,82,0,0,2110,2111,5,101,0,0,2111,2112,5,116,0,0,2112,2113, + 5,114,0,0,2113,2114,5,121,0,0,2114,2115,5,34,0,0,2115,242,1,0,0, + 0,2116,2117,5,34,0,0,2117,2118,5,69,0,0,2118,2119,5,114,0,0,2119, + 2120,5,114,0,0,2120,2121,5,111,0,0,2121,2122,5,114,0,0,2122,2123, + 5,69,0,0,2123,2124,5,113,0,0,2124,2125,5,117,0,0,2125,2126,5,97, + 0,0,2126,2127,5,108,0,0,2127,2128,5,115,0,0,2128,2129,5,34,0,0,2129, + 244,1,0,0,0,2130,2131,5,34,0,0,2131,2132,5,73,0,0,2132,2133,5,110, + 0,0,2133,2134,5,116,0,0,2134,2135,5,101,0,0,2135,2136,5,114,0,0, + 2136,2137,5,118,0,0,2137,2138,5,97,0,0,2138,2139,5,108,0,0,2139, + 2140,5,83,0,0,2140,2141,5,101,0,0,2141,2142,5,99,0,0,2142,2143,5, + 111,0,0,2143,2144,5,110,0,0,2144,2145,5,100,0,0,2145,2146,5,115, + 0,0,2146,2147,5,34,0,0,2147,246,1,0,0,0,2148,2149,5,34,0,0,2149, + 2150,5,77,0,0,2150,2151,5,97,0,0,2151,2152,5,120,0,0,2152,2153,5, + 65,0,0,2153,2154,5,116,0,0,2154,2155,5,116,0,0,2155,2156,5,101,0, + 0,2156,2157,5,109,0,0,2157,2158,5,112,0,0,2158,2159,5,116,0,0,2159, + 2160,5,115,0,0,2160,2161,5,34,0,0,2161,248,1,0,0,0,2162,2163,5,34, + 0,0,2163,2164,5,66,0,0,2164,2165,5,97,0,0,2165,2166,5,99,0,0,2166, + 2167,5,107,0,0,2167,2168,5,111,0,0,2168,2169,5,102,0,0,2169,2170, + 5,102,0,0,2170,2171,5,82,0,0,2171,2172,5,97,0,0,2172,2173,5,116, + 0,0,2173,2174,5,101,0,0,2174,2175,5,34,0,0,2175,250,1,0,0,0,2176, + 2177,5,34,0,0,2177,2178,5,77,0,0,2178,2179,5,97,0,0,2179,2180,5, + 120,0,0,2180,2181,5,68,0,0,2181,2182,5,101,0,0,2182,2183,5,108,0, + 0,2183,2184,5,97,0,0,2184,2185,5,121,0,0,2185,2186,5,83,0,0,2186, + 2187,5,101,0,0,2187,2188,5,99,0,0,2188,2189,5,111,0,0,2189,2190, + 5,110,0,0,2190,2191,5,100,0,0,2191,2192,5,115,0,0,2192,2193,5,34, + 0,0,2193,252,1,0,0,0,2194,2195,5,34,0,0,2195,2196,5,74,0,0,2196, + 2197,5,105,0,0,2197,2198,5,116,0,0,2198,2199,5,116,0,0,2199,2200, + 5,101,0,0,2200,2201,5,114,0,0,2201,2202,5,83,0,0,2202,2203,5,116, + 0,0,2203,2204,5,114,0,0,2204,2205,5,97,0,0,2205,2206,5,116,0,0,2206, + 2207,5,101,0,0,2207,2208,5,103,0,0,2208,2209,5,121,0,0,2209,2210, + 5,34,0,0,2210,254,1,0,0,0,2211,2212,5,34,0,0,2212,2213,5,70,0,0, + 2213,2214,5,85,0,0,2214,2215,5,76,0,0,2215,2216,5,76,0,0,2216,2217, + 5,34,0,0,2217,256,1,0,0,0,2218,2219,5,34,0,0,2219,2220,5,78,0,0, + 2220,2221,5,79,0,0,2221,2222,5,78,0,0,2222,2223,5,69,0,0,2223,2224, + 5,34,0,0,2224,258,1,0,0,0,2225,2226,5,34,0,0,2226,2227,5,67,0,0, + 2227,2228,5,97,0,0,2228,2229,5,116,0,0,2229,2230,5,99,0,0,2230,2231, + 5,104,0,0,2231,2232,5,34,0,0,2232,260,1,0,0,0,2233,2234,5,34,0,0, + 2234,2235,5,81,0,0,2235,2236,5,117,0,0,2236,2237,5,101,0,0,2237, + 2238,5,114,0,0,2238,2239,5,121,0,0,2239,2240,5,76,0,0,2240,2241, + 5,97,0,0,2241,2242,5,110,0,0,2242,2243,5,103,0,0,2243,2244,5,117, + 0,0,2244,2245,5,97,0,0,2245,2246,5,103,0,0,2246,2247,5,101,0,0,2247, + 2248,5,34,0,0,2248,262,1,0,0,0,2249,2250,5,34,0,0,2250,2251,5,74, + 0,0,2251,2252,5,83,0,0,2252,2253,5,79,0,0,2253,2254,5,78,0,0,2254, + 2255,5,80,0,0,2255,2256,5,97,0,0,2256,2257,5,116,0,0,2257,2258,5, + 104,0,0,2258,2259,5,34,0,0,2259,264,1,0,0,0,2260,2261,5,34,0,0,2261, + 2262,5,74,0,0,2262,2263,5,83,0,0,2263,2264,5,79,0,0,2264,2265,5, + 78,0,0,2265,2266,5,97,0,0,2266,2267,5,116,0,0,2267,2268,5,97,0,0, + 2268,2269,5,34,0,0,2269,266,1,0,0,0,2270,2271,5,34,0,0,2271,2272, + 5,65,0,0,2272,2273,5,115,0,0,2273,2274,5,115,0,0,2274,2275,5,105, + 0,0,2275,2276,5,103,0,0,2276,2277,5,110,0,0,2277,2278,5,34,0,0,2278, + 268,1,0,0,0,2279,2280,5,34,0,0,2280,2281,5,79,0,0,2281,2282,5,117, + 0,0,2282,2283,5,116,0,0,2283,2284,5,112,0,0,2284,2285,5,117,0,0, + 2285,2286,5,116,0,0,2286,2287,5,34,0,0,2287,270,1,0,0,0,2288,2289, + 5,34,0,0,2289,2290,5,65,0,0,2290,2291,5,114,0,0,2291,2292,5,103, + 0,0,2292,2293,5,117,0,0,2293,2294,5,109,0,0,2294,2295,5,101,0,0, + 2295,2296,5,110,0,0,2296,2297,5,116,0,0,2297,2298,5,115,0,0,2298, + 2299,5,34,0,0,2299,272,1,0,0,0,2300,2301,5,34,0,0,2301,2302,5,83, + 0,0,2302,2303,5,116,0,0,2303,2304,5,97,0,0,2304,2305,5,116,0,0,2305, + 2306,5,101,0,0,2306,2307,5,115,0,0,2307,2308,5,46,0,0,2308,2309, + 5,65,0,0,2309,2310,5,76,0,0,2310,2311,5,76,0,0,2311,2312,5,34,0, + 0,2312,274,1,0,0,0,2313,2314,5,34,0,0,2314,2315,5,83,0,0,2315,2316, + 5,116,0,0,2316,2317,5,97,0,0,2317,2318,5,116,0,0,2318,2319,5,101, + 0,0,2319,2320,5,115,0,0,2320,2321,5,46,0,0,2321,2322,5,68,0,0,2322, + 2323,5,97,0,0,2323,2324,5,116,0,0,2324,2325,5,97,0,0,2325,2326,5, + 76,0,0,2326,2327,5,105,0,0,2327,2328,5,109,0,0,2328,2329,5,105,0, + 0,2329,2330,5,116,0,0,2330,2331,5,69,0,0,2331,2332,5,120,0,0,2332, + 2333,5,99,0,0,2333,2334,5,101,0,0,2334,2335,5,101,0,0,2335,2336, + 5,100,0,0,2336,2337,5,101,0,0,2337,2338,5,100,0,0,2338,2339,5,34, + 0,0,2339,276,1,0,0,0,2340,2341,5,34,0,0,2341,2342,5,83,0,0,2342, + 2343,5,116,0,0,2343,2344,5,97,0,0,2344,2345,5,116,0,0,2345,2346, + 5,101,0,0,2346,2347,5,115,0,0,2347,2348,5,46,0,0,2348,2349,5,72, + 0,0,2349,2350,5,101,0,0,2350,2351,5,97,0,0,2351,2352,5,114,0,0,2352, + 2353,5,116,0,0,2353,2354,5,98,0,0,2354,2355,5,101,0,0,2355,2356, + 5,97,0,0,2356,2357,5,116,0,0,2357,2358,5,84,0,0,2358,2359,5,105, + 0,0,2359,2360,5,109,0,0,2360,2361,5,101,0,0,2361,2362,5,111,0,0, + 2362,2363,5,117,0,0,2363,2364,5,116,0,0,2364,2365,5,34,0,0,2365, + 278,1,0,0,0,2366,2367,5,34,0,0,2367,2368,5,83,0,0,2368,2369,5,116, + 0,0,2369,2370,5,97,0,0,2370,2371,5,116,0,0,2371,2372,5,101,0,0,2372, + 2373,5,115,0,0,2373,2374,5,46,0,0,2374,2375,5,84,0,0,2375,2376,5, + 105,0,0,2376,2377,5,109,0,0,2377,2378,5,101,0,0,2378,2379,5,111, + 0,0,2379,2380,5,117,0,0,2380,2381,5,116,0,0,2381,2382,5,34,0,0,2382, + 280,1,0,0,0,2383,2384,5,34,0,0,2384,2385,5,83,0,0,2385,2386,5,116, + 0,0,2386,2387,5,97,0,0,2387,2388,5,116,0,0,2388,2389,5,101,0,0,2389, + 2390,5,115,0,0,2390,2391,5,46,0,0,2391,2392,5,84,0,0,2392,2393,5, + 97,0,0,2393,2394,5,115,0,0,2394,2395,5,107,0,0,2395,2396,5,70,0, + 0,2396,2397,5,97,0,0,2397,2398,5,105,0,0,2398,2399,5,108,0,0,2399, + 2400,5,101,0,0,2400,2401,5,100,0,0,2401,2402,5,34,0,0,2402,282,1, + 0,0,0,2403,2404,5,34,0,0,2404,2405,5,83,0,0,2405,2406,5,116,0,0, + 2406,2407,5,97,0,0,2407,2408,5,116,0,0,2408,2409,5,101,0,0,2409, + 2410,5,115,0,0,2410,2411,5,46,0,0,2411,2412,5,80,0,0,2412,2413,5, + 101,0,0,2413,2414,5,114,0,0,2414,2415,5,109,0,0,2415,2416,5,105, + 0,0,2416,2417,5,115,0,0,2417,2418,5,115,0,0,2418,2419,5,105,0,0, + 2419,2420,5,111,0,0,2420,2421,5,110,0,0,2421,2422,5,115,0,0,2422, + 2423,5,34,0,0,2423,284,1,0,0,0,2424,2425,5,34,0,0,2425,2426,5,83, + 0,0,2426,2427,5,116,0,0,2427,2428,5,97,0,0,2428,2429,5,116,0,0,2429, + 2430,5,101,0,0,2430,2431,5,115,0,0,2431,2432,5,46,0,0,2432,2433, + 5,82,0,0,2433,2434,5,101,0,0,2434,2435,5,115,0,0,2435,2436,5,117, + 0,0,2436,2437,5,108,0,0,2437,2438,5,116,0,0,2438,2439,5,80,0,0,2439, + 2440,5,97,0,0,2440,2441,5,116,0,0,2441,2442,5,104,0,0,2442,2443, + 5,77,0,0,2443,2444,5,97,0,0,2444,2445,5,116,0,0,2445,2446,5,99,0, + 0,2446,2447,5,104,0,0,2447,2448,5,70,0,0,2448,2449,5,97,0,0,2449, + 2450,5,105,0,0,2450,2451,5,108,0,0,2451,2452,5,117,0,0,2452,2453, + 5,114,0,0,2453,2454,5,101,0,0,2454,2455,5,34,0,0,2455,286,1,0,0, + 0,2456,2457,5,34,0,0,2457,2458,5,83,0,0,2458,2459,5,116,0,0,2459, + 2460,5,97,0,0,2460,2461,5,116,0,0,2461,2462,5,101,0,0,2462,2463, + 5,115,0,0,2463,2464,5,46,0,0,2464,2465,5,80,0,0,2465,2466,5,97,0, + 0,2466,2467,5,114,0,0,2467,2468,5,97,0,0,2468,2469,5,109,0,0,2469, + 2470,5,101,0,0,2470,2471,5,116,0,0,2471,2472,5,101,0,0,2472,2473, + 5,114,0,0,2473,2474,5,80,0,0,2474,2475,5,97,0,0,2475,2476,5,116, + 0,0,2476,2477,5,104,0,0,2477,2478,5,70,0,0,2478,2479,5,97,0,0,2479, + 2480,5,105,0,0,2480,2481,5,108,0,0,2481,2482,5,117,0,0,2482,2483, + 5,114,0,0,2483,2484,5,101,0,0,2484,2485,5,34,0,0,2485,288,1,0,0, + 0,2486,2487,5,34,0,0,2487,2488,5,83,0,0,2488,2489,5,116,0,0,2489, + 2490,5,97,0,0,2490,2491,5,116,0,0,2491,2492,5,101,0,0,2492,2493, + 5,115,0,0,2493,2494,5,46,0,0,2494,2495,5,66,0,0,2495,2496,5,114, + 0,0,2496,2497,5,97,0,0,2497,2498,5,110,0,0,2498,2499,5,99,0,0,2499, + 2500,5,104,0,0,2500,2501,5,70,0,0,2501,2502,5,97,0,0,2502,2503,5, + 105,0,0,2503,2504,5,108,0,0,2504,2505,5,101,0,0,2505,2506,5,100, + 0,0,2506,2507,5,34,0,0,2507,290,1,0,0,0,2508,2509,5,34,0,0,2509, + 2510,5,83,0,0,2510,2511,5,116,0,0,2511,2512,5,97,0,0,2512,2513,5, + 116,0,0,2513,2514,5,101,0,0,2514,2515,5,115,0,0,2515,2516,5,46,0, + 0,2516,2517,5,78,0,0,2517,2518,5,111,0,0,2518,2519,5,67,0,0,2519, + 2520,5,104,0,0,2520,2521,5,111,0,0,2521,2522,5,105,0,0,2522,2523, + 5,99,0,0,2523,2524,5,101,0,0,2524,2525,5,77,0,0,2525,2526,5,97,0, + 0,2526,2527,5,116,0,0,2527,2528,5,99,0,0,2528,2529,5,104,0,0,2529, + 2530,5,101,0,0,2530,2531,5,100,0,0,2531,2532,5,34,0,0,2532,292,1, + 0,0,0,2533,2534,5,34,0,0,2534,2535,5,83,0,0,2535,2536,5,116,0,0, + 2536,2537,5,97,0,0,2537,2538,5,116,0,0,2538,2539,5,101,0,0,2539, + 2540,5,115,0,0,2540,2541,5,46,0,0,2541,2542,5,73,0,0,2542,2543,5, + 110,0,0,2543,2544,5,116,0,0,2544,2545,5,114,0,0,2545,2546,5,105, + 0,0,2546,2547,5,110,0,0,2547,2548,5,115,0,0,2548,2549,5,105,0,0, + 2549,2550,5,99,0,0,2550,2551,5,70,0,0,2551,2552,5,97,0,0,2552,2553, + 5,105,0,0,2553,2554,5,108,0,0,2554,2555,5,117,0,0,2555,2556,5,114, + 0,0,2556,2557,5,101,0,0,2557,2558,5,34,0,0,2558,294,1,0,0,0,2559, + 2560,5,34,0,0,2560,2561,5,83,0,0,2561,2562,5,116,0,0,2562,2563,5, + 97,0,0,2563,2564,5,116,0,0,2564,2565,5,101,0,0,2565,2566,5,115,0, + 0,2566,2567,5,46,0,0,2567,2568,5,69,0,0,2568,2569,5,120,0,0,2569, + 2570,5,99,0,0,2570,2571,5,101,0,0,2571,2572,5,101,0,0,2572,2573, + 5,100,0,0,2573,2574,5,84,0,0,2574,2575,5,111,0,0,2575,2576,5,108, + 0,0,2576,2577,5,101,0,0,2577,2578,5,114,0,0,2578,2579,5,97,0,0,2579, + 2580,5,116,0,0,2580,2581,5,101,0,0,2581,2582,5,100,0,0,2582,2583, + 5,70,0,0,2583,2584,5,97,0,0,2584,2585,5,105,0,0,2585,2586,5,108, + 0,0,2586,2587,5,117,0,0,2587,2588,5,114,0,0,2588,2589,5,101,0,0, + 2589,2590,5,84,0,0,2590,2591,5,104,0,0,2591,2592,5,114,0,0,2592, + 2593,5,101,0,0,2593,2594,5,115,0,0,2594,2595,5,104,0,0,2595,2596, + 5,111,0,0,2596,2597,5,108,0,0,2597,2598,5,100,0,0,2598,2599,5,34, + 0,0,2599,296,1,0,0,0,2600,2601,5,34,0,0,2601,2602,5,83,0,0,2602, + 2603,5,116,0,0,2603,2604,5,97,0,0,2604,2605,5,116,0,0,2605,2606, + 5,101,0,0,2606,2607,5,115,0,0,2607,2608,5,46,0,0,2608,2609,5,73, + 0,0,2609,2610,5,116,0,0,2610,2611,5,101,0,0,2611,2612,5,109,0,0, + 2612,2613,5,82,0,0,2613,2614,5,101,0,0,2614,2615,5,97,0,0,2615,2616, + 5,100,0,0,2616,2617,5,101,0,0,2617,2618,5,114,0,0,2618,2619,5,70, + 0,0,2619,2620,5,97,0,0,2620,2621,5,105,0,0,2621,2622,5,108,0,0,2622, + 2623,5,101,0,0,2623,2624,5,100,0,0,2624,2625,5,34,0,0,2625,298,1, + 0,0,0,2626,2627,5,34,0,0,2627,2628,5,83,0,0,2628,2629,5,116,0,0, + 2629,2630,5,97,0,0,2630,2631,5,116,0,0,2631,2632,5,101,0,0,2632, + 2633,5,115,0,0,2633,2634,5,46,0,0,2634,2635,5,82,0,0,2635,2636,5, + 101,0,0,2636,2637,5,115,0,0,2637,2638,5,117,0,0,2638,2639,5,108, + 0,0,2639,2640,5,116,0,0,2640,2641,5,87,0,0,2641,2642,5,114,0,0,2642, + 2643,5,105,0,0,2643,2644,5,116,0,0,2644,2645,5,101,0,0,2645,2646, + 5,114,0,0,2646,2647,5,70,0,0,2647,2648,5,97,0,0,2648,2649,5,105, + 0,0,2649,2650,5,108,0,0,2650,2651,5,101,0,0,2651,2652,5,100,0,0, + 2652,2653,5,34,0,0,2653,300,1,0,0,0,2654,2655,5,34,0,0,2655,2656, + 5,83,0,0,2656,2657,5,116,0,0,2657,2658,5,97,0,0,2658,2659,5,116, + 0,0,2659,2660,5,101,0,0,2660,2661,5,115,0,0,2661,2662,5,46,0,0,2662, + 2663,5,81,0,0,2663,2664,5,117,0,0,2664,2665,5,101,0,0,2665,2666, + 5,114,0,0,2666,2667,5,121,0,0,2667,2668,5,69,0,0,2668,2669,5,118, + 0,0,2669,2670,5,97,0,0,2670,2671,5,108,0,0,2671,2672,5,117,0,0,2672, + 2673,5,97,0,0,2673,2674,5,116,0,0,2674,2675,5,105,0,0,2675,2676, + 5,111,0,0,2676,2677,5,110,0,0,2677,2678,5,69,0,0,2678,2679,5,114, + 0,0,2679,2680,5,114,0,0,2680,2681,5,111,0,0,2681,2682,5,114,0,0, + 2682,2683,5,34,0,0,2683,302,1,0,0,0,2684,2685,5,34,0,0,2685,2686, + 5,83,0,0,2686,2687,5,116,0,0,2687,2688,5,97,0,0,2688,2689,5,116, + 0,0,2689,2690,5,101,0,0,2690,2691,5,115,0,0,2691,2692,5,46,0,0,2692, + 2693,5,82,0,0,2693,2694,5,117,0,0,2694,2695,5,110,0,0,2695,2696, + 5,116,0,0,2696,2697,5,105,0,0,2697,2698,5,109,0,0,2698,2699,5,101, + 0,0,2699,2700,5,34,0,0,2700,304,1,0,0,0,2701,2706,5,34,0,0,2702, + 2705,3,319,159,0,2703,2705,3,325,162,0,2704,2702,1,0,0,0,2704,2703, + 1,0,0,0,2705,2708,1,0,0,0,2706,2704,1,0,0,0,2706,2707,1,0,0,0,2707, + 2709,1,0,0,0,2708,2706,1,0,0,0,2709,2710,5,46,0,0,2710,2711,5,36, + 0,0,2711,2712,5,34,0,0,2712,306,1,0,0,0,2713,2714,5,34,0,0,2714, + 2715,5,36,0,0,2715,2716,5,36,0,0,2716,2721,1,0,0,0,2717,2720,3,319, + 159,0,2718,2720,3,325,162,0,2719,2717,1,0,0,0,2719,2718,1,0,0,0, + 2720,2723,1,0,0,0,2721,2719,1,0,0,0,2721,2722,1,0,0,0,2722,2724, + 1,0,0,0,2723,2721,1,0,0,0,2724,2725,5,34,0,0,2725,308,1,0,0,0,2726, + 2727,5,34,0,0,2727,2728,5,36,0,0,2728,2742,5,34,0,0,2729,2730,5, + 34,0,0,2730,2731,5,36,0,0,2731,2732,1,0,0,0,2732,2737,7,0,0,0,2733, + 2736,3,319,159,0,2734,2736,3,325,162,0,2735,2733,1,0,0,0,2735,2734, + 1,0,0,0,2736,2739,1,0,0,0,2737,2735,1,0,0,0,2737,2738,1,0,0,0,2738, + 2740,1,0,0,0,2739,2737,1,0,0,0,2740,2742,5,34,0,0,2741,2726,1,0, + 0,0,2741,2729,1,0,0,0,2742,310,1,0,0,0,2743,2744,5,34,0,0,2744,2745, + 5,36,0,0,2745,2746,1,0,0,0,2746,2751,7,1,0,0,2747,2750,3,319,159, + 0,2748,2750,3,325,162,0,2749,2747,1,0,0,0,2749,2748,1,0,0,0,2750, + 2753,1,0,0,0,2751,2749,1,0,0,0,2751,2752,1,0,0,0,2752,2754,1,0,0, + 0,2753,2751,1,0,0,0,2754,2755,5,34,0,0,2755,312,1,0,0,0,2756,2757, + 5,34,0,0,2757,2758,5,83,0,0,2758,2759,5,116,0,0,2759,2760,5,97,0, + 0,2760,2761,5,116,0,0,2761,2762,5,101,0,0,2762,2763,5,115,0,0,2763, + 2764,5,46,0,0,2764,2767,1,0,0,0,2765,2768,3,319,159,0,2766,2768, + 3,325,162,0,2767,2765,1,0,0,0,2767,2766,1,0,0,0,2768,2769,1,0,0, + 0,2769,2767,1,0,0,0,2769,2770,1,0,0,0,2770,2771,1,0,0,0,2771,2776, + 5,40,0,0,2772,2775,3,319,159,0,2773,2775,3,325,162,0,2774,2772,1, + 0,0,0,2774,2773,1,0,0,0,2775,2778,1,0,0,0,2776,2774,1,0,0,0,2776, + 2777,1,0,0,0,2777,2779,1,0,0,0,2778,2776,1,0,0,0,2779,2780,5,41, + 0,0,2780,2781,5,34,0,0,2781,314,1,0,0,0,2782,2787,3,327,163,0,2783, + 2786,3,319,159,0,2784,2786,3,325,162,0,2785,2783,1,0,0,0,2785,2784, + 1,0,0,0,2786,2789,1,0,0,0,2787,2785,1,0,0,0,2787,2788,1,0,0,0,2788, + 2790,1,0,0,0,2789,2787,1,0,0,0,2790,2791,3,329,164,0,2791,316,1, + 0,0,0,2792,2797,5,34,0,0,2793,2796,3,319,159,0,2794,2796,3,325,162, + 0,2795,2793,1,0,0,0,2795,2794,1,0,0,0,2796,2799,1,0,0,0,2797,2795, + 1,0,0,0,2797,2798,1,0,0,0,2798,2800,1,0,0,0,2799,2797,1,0,0,0,2800, + 2801,5,34,0,0,2801,318,1,0,0,0,2802,2805,5,92,0,0,2803,2806,7,2, + 0,0,2804,2806,3,321,160,0,2805,2803,1,0,0,0,2805,2804,1,0,0,0,2806, + 320,1,0,0,0,2807,2808,5,117,0,0,2808,2809,3,323,161,0,2809,2810, + 3,323,161,0,2810,2811,3,323,161,0,2811,2812,3,323,161,0,2812,322, + 1,0,0,0,2813,2814,7,3,0,0,2814,324,1,0,0,0,2815,2816,8,4,0,0,2816, + 326,1,0,0,0,2817,2818,5,34,0,0,2818,2819,5,123,0,0,2819,2820,5,37, + 0,0,2820,328,1,0,0,0,2821,2822,5,37,0,0,2822,2823,5,125,0,0,2823, + 2824,5,34,0,0,2824,330,1,0,0,0,2825,2834,5,48,0,0,2826,2830,7,5, + 0,0,2827,2829,7,6,0,0,2828,2827,1,0,0,0,2829,2832,1,0,0,0,2830,2828, + 1,0,0,0,2830,2831,1,0,0,0,2831,2834,1,0,0,0,2832,2830,1,0,0,0,2833, + 2825,1,0,0,0,2833,2826,1,0,0,0,2834,332,1,0,0,0,2835,2837,5,45,0, + 0,2836,2835,1,0,0,0,2836,2837,1,0,0,0,2837,2838,1,0,0,0,2838,2845, + 3,331,165,0,2839,2841,5,46,0,0,2840,2842,7,6,0,0,2841,2840,1,0,0, + 0,2842,2843,1,0,0,0,2843,2841,1,0,0,0,2843,2844,1,0,0,0,2844,2846, + 1,0,0,0,2845,2839,1,0,0,0,2845,2846,1,0,0,0,2846,2848,1,0,0,0,2847, + 2849,3,335,167,0,2848,2847,1,0,0,0,2848,2849,1,0,0,0,2849,334,1, + 0,0,0,2850,2852,7,7,0,0,2851,2853,7,8,0,0,2852,2851,1,0,0,0,2852, + 2853,1,0,0,0,2853,2854,1,0,0,0,2854,2855,3,331,165,0,2855,336,1, + 0,0,0,2856,2858,7,9,0,0,2857,2856,1,0,0,0,2858,2859,1,0,0,0,2859, + 2857,1,0,0,0,2859,2860,1,0,0,0,2860,2861,1,0,0,0,2861,2862,6,168, + 0,0,2862,338,1,0,0,0,27,0,2704,2706,2719,2721,2735,2737,2741,2749, + 2751,2767,2769,2774,2776,2785,2787,2795,2797,2805,2830,2833,2836, + 2843,2845,2848,2852,2859,1,6,0,0 + ] + +class ASLLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + COMMA = 1 + COLON = 2 + LBRACK = 3 + RBRACK = 4 + LBRACE = 5 + RBRACE = 6 + TRUE = 7 + FALSE = 8 + NULL = 9 + COMMENT = 10 + STATES = 11 + STARTAT = 12 + NEXTSTATE = 13 + VERSION = 14 + TYPE = 15 + TASK = 16 + CHOICE = 17 + FAIL = 18 + SUCCEED = 19 + PASS = 20 + WAIT = 21 + PARALLEL = 22 + MAP = 23 + CHOICES = 24 + CONDITION = 25 + VARIABLE = 26 + DEFAULT = 27 + BRANCHES = 28 + AND = 29 + BOOLEANEQUALS = 30 + BOOLEANQUALSPATH = 31 + ISBOOLEAN = 32 + ISNULL = 33 + ISNUMERIC = 34 + ISPRESENT = 35 + ISSTRING = 36 + ISTIMESTAMP = 37 + NOT = 38 + NUMERICEQUALS = 39 + NUMERICEQUALSPATH = 40 + NUMERICGREATERTHAN = 41 + NUMERICGREATERTHANPATH = 42 + NUMERICGREATERTHANEQUALS = 43 + NUMERICGREATERTHANEQUALSPATH = 44 + NUMERICLESSTHAN = 45 + NUMERICLESSTHANPATH = 46 + NUMERICLESSTHANEQUALS = 47 + NUMERICLESSTHANEQUALSPATH = 48 + OR = 49 + STRINGEQUALS = 50 + STRINGEQUALSPATH = 51 + STRINGGREATERTHAN = 52 + STRINGGREATERTHANPATH = 53 + STRINGGREATERTHANEQUALS = 54 + STRINGGREATERTHANEQUALSPATH = 55 + STRINGLESSTHAN = 56 + STRINGLESSTHANPATH = 57 + STRINGLESSTHANEQUALS = 58 + STRINGLESSTHANEQUALSPATH = 59 + STRINGMATCHES = 60 + TIMESTAMPEQUALS = 61 + TIMESTAMPEQUALSPATH = 62 + TIMESTAMPGREATERTHAN = 63 + TIMESTAMPGREATERTHANPATH = 64 + TIMESTAMPGREATERTHANEQUALS = 65 + TIMESTAMPGREATERTHANEQUALSPATH = 66 + TIMESTAMPLESSTHAN = 67 + TIMESTAMPLESSTHANPATH = 68 + TIMESTAMPLESSTHANEQUALS = 69 + TIMESTAMPLESSTHANEQUALSPATH = 70 + SECONDSPATH = 71 + SECONDS = 72 + TIMESTAMPPATH = 73 + TIMESTAMP = 74 + TIMEOUTSECONDS = 75 + TIMEOUTSECONDSPATH = 76 + HEARTBEATSECONDS = 77 + HEARTBEATSECONDSPATH = 78 + PROCESSORCONFIG = 79 + MODE = 80 + INLINE = 81 + DISTRIBUTED = 82 + EXECUTIONTYPE = 83 + STANDARD = 84 + ITEMPROCESSOR = 85 + ITERATOR = 86 + ITEMSELECTOR = 87 + MAXCONCURRENCYPATH = 88 + MAXCONCURRENCY = 89 + RESOURCE = 90 + INPUTPATH = 91 + OUTPUTPATH = 92 + ITEMS = 93 + ITEMSPATH = 94 + RESULTPATH = 95 + RESULT = 96 + PARAMETERS = 97 + CREDENTIALS = 98 + ROLEARN = 99 + ROLEARNPATH = 100 + RESULTSELECTOR = 101 + ITEMREADER = 102 + READERCONFIG = 103 + INPUTTYPE = 104 + CSVHEADERLOCATION = 105 + CSVHEADERS = 106 + MAXITEMS = 107 + MAXITEMSPATH = 108 + TOLERATEDFAILURECOUNT = 109 + TOLERATEDFAILURECOUNTPATH = 110 + TOLERATEDFAILUREPERCENTAGE = 111 + TOLERATEDFAILUREPERCENTAGEPATH = 112 + LABEL = 113 + RESULTWRITER = 114 + NEXT = 115 + END = 116 + CAUSE = 117 + CAUSEPATH = 118 + ERROR = 119 + ERRORPATH = 120 + RETRY = 121 + ERROREQUALS = 122 + INTERVALSECONDS = 123 + MAXATTEMPTS = 124 + BACKOFFRATE = 125 + MAXDELAYSECONDS = 126 + JITTERSTRATEGY = 127 + FULL = 128 + NONE = 129 + CATCH = 130 + QUERYLANGUAGE = 131 + JSONPATH = 132 + JSONATA = 133 + ASSIGN = 134 + OUTPUT = 135 + ARGUMENTS = 136 + ERRORNAMEStatesALL = 137 + ERRORNAMEStatesDataLimitExceeded = 138 + ERRORNAMEStatesHeartbeatTimeout = 139 + ERRORNAMEStatesTimeout = 140 + ERRORNAMEStatesTaskFailed = 141 + ERRORNAMEStatesPermissions = 142 + ERRORNAMEStatesResultPathMatchFailure = 143 + ERRORNAMEStatesParameterPathFailure = 144 + ERRORNAMEStatesBranchFailed = 145 + ERRORNAMEStatesNoChoiceMatched = 146 + ERRORNAMEStatesIntrinsicFailure = 147 + ERRORNAMEStatesExceedToleratedFailureThreshold = 148 + ERRORNAMEStatesItemReaderFailed = 149 + ERRORNAMEStatesResultWriterFailed = 150 + ERRORNAMEStatesQueryEvaluationError = 151 + ERRORNAMEStatesRuntime = 152 + STRINGDOLLAR = 153 + STRINGPATHCONTEXTOBJ = 154 + STRINGPATH = 155 + STRINGVAR = 156 + STRINGINTRINSICFUNC = 157 + STRINGJSONATA = 158 + STRING = 159 + INT = 160 + NUMBER = 161 + WS = 162 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "", + "','", "':'", "'['", "']'", "'{'", "'}'", "'true'", "'false'", + "'null'", "'\"Comment\"'", "'\"States\"'", "'\"StartAt\"'", + "'\"NextState\"'", "'\"Version\"'", "'\"Type\"'", "'\"Task\"'", + "'\"Choice\"'", "'\"Fail\"'", "'\"Succeed\"'", "'\"Pass\"'", + "'\"Wait\"'", "'\"Parallel\"'", "'\"Map\"'", "'\"Choices\"'", + "'\"Condition\"'", "'\"Variable\"'", "'\"Default\"'", "'\"Branches\"'", + "'\"And\"'", "'\"BooleanEquals\"'", "'\"BooleanEqualsPath\"'", + "'\"IsBoolean\"'", "'\"IsNull\"'", "'\"IsNumeric\"'", "'\"IsPresent\"'", + "'\"IsString\"'", "'\"IsTimestamp\"'", "'\"Not\"'", "'\"NumericEquals\"'", + "'\"NumericEqualsPath\"'", "'\"NumericGreaterThan\"'", "'\"NumericGreaterThanPath\"'", + "'\"NumericGreaterThanEquals\"'", "'\"NumericGreaterThanEqualsPath\"'", + "'\"NumericLessThan\"'", "'\"NumericLessThanPath\"'", "'\"NumericLessThanEquals\"'", + "'\"NumericLessThanEqualsPath\"'", "'\"Or\"'", "'\"StringEquals\"'", + "'\"StringEqualsPath\"'", "'\"StringGreaterThan\"'", "'\"StringGreaterThanPath\"'", + "'\"StringGreaterThanEquals\"'", "'\"StringGreaterThanEqualsPath\"'", + "'\"StringLessThan\"'", "'\"StringLessThanPath\"'", "'\"StringLessThanEquals\"'", + "'\"StringLessThanEqualsPath\"'", "'\"StringMatches\"'", "'\"TimestampEquals\"'", + "'\"TimestampEqualsPath\"'", "'\"TimestampGreaterThan\"'", "'\"TimestampGreaterThanPath\"'", + "'\"TimestampGreaterThanEquals\"'", "'\"TimestampGreaterThanEqualsPath\"'", + "'\"TimestampLessThan\"'", "'\"TimestampLessThanPath\"'", "'\"TimestampLessThanEquals\"'", + "'\"TimestampLessThanEqualsPath\"'", "'\"SecondsPath\"'", "'\"Seconds\"'", + "'\"TimestampPath\"'", "'\"Timestamp\"'", "'\"TimeoutSeconds\"'", + "'\"TimeoutSecondsPath\"'", "'\"HeartbeatSeconds\"'", "'\"HeartbeatSecondsPath\"'", + "'\"ProcessorConfig\"'", "'\"Mode\"'", "'\"INLINE\"'", "'\"DISTRIBUTED\"'", + "'\"ExecutionType\"'", "'\"STANDARD\"'", "'\"ItemProcessor\"'", + "'\"Iterator\"'", "'\"ItemSelector\"'", "'\"MaxConcurrencyPath\"'", + "'\"MaxConcurrency\"'", "'\"Resource\"'", "'\"InputPath\"'", + "'\"OutputPath\"'", "'\"Items\"'", "'\"ItemsPath\"'", "'\"ResultPath\"'", + "'\"Result\"'", "'\"Parameters\"'", "'\"Credentials\"'", "'\"RoleArn\"'", + "'\"RoleArn.$\"'", "'\"ResultSelector\"'", "'\"ItemReader\"'", + "'\"ReaderConfig\"'", "'\"InputType\"'", "'\"CSVHeaderLocation\"'", + "'\"CSVHeaders\"'", "'\"MaxItems\"'", "'\"MaxItemsPath\"'", + "'\"ToleratedFailureCount\"'", "'\"ToleratedFailureCountPath\"'", + "'\"ToleratedFailurePercentage\"'", "'\"ToleratedFailurePercentagePath\"'", + "'\"Label\"'", "'\"ResultWriter\"'", "'\"Next\"'", "'\"End\"'", + "'\"Cause\"'", "'\"CausePath\"'", "'\"Error\"'", "'\"ErrorPath\"'", + "'\"Retry\"'", "'\"ErrorEquals\"'", "'\"IntervalSeconds\"'", + "'\"MaxAttempts\"'", "'\"BackoffRate\"'", "'\"MaxDelaySeconds\"'", + "'\"JitterStrategy\"'", "'\"FULL\"'", "'\"NONE\"'", "'\"Catch\"'", + "'\"QueryLanguage\"'", "'\"JSONPath\"'", "'\"JSONata\"'", "'\"Assign\"'", + "'\"Output\"'", "'\"Arguments\"'", "'\"States.ALL\"'", "'\"States.DataLimitExceeded\"'", + "'\"States.HeartbeatTimeout\"'", "'\"States.Timeout\"'", "'\"States.TaskFailed\"'", + "'\"States.Permissions\"'", "'\"States.ResultPathMatchFailure\"'", + "'\"States.ParameterPathFailure\"'", "'\"States.BranchFailed\"'", + "'\"States.NoChoiceMatched\"'", "'\"States.IntrinsicFailure\"'", + "'\"States.ExceedToleratedFailureThreshold\"'", "'\"States.ItemReaderFailed\"'", + "'\"States.ResultWriterFailed\"'", "'\"States.QueryEvaluationError\"'", + "'\"States.Runtime\"'" ] + + symbolicNames = [ "", + "COMMA", "COLON", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "TRUE", + "FALSE", "NULL", "COMMENT", "STATES", "STARTAT", "NEXTSTATE", + "VERSION", "TYPE", "TASK", "CHOICE", "FAIL", "SUCCEED", "PASS", + "WAIT", "PARALLEL", "MAP", "CHOICES", "CONDITION", "VARIABLE", + "DEFAULT", "BRANCHES", "AND", "BOOLEANEQUALS", "BOOLEANQUALSPATH", + "ISBOOLEAN", "ISNULL", "ISNUMERIC", "ISPRESENT", "ISSTRING", + "ISTIMESTAMP", "NOT", "NUMERICEQUALS", "NUMERICEQUALSPATH", + "NUMERICGREATERTHAN", "NUMERICGREATERTHANPATH", "NUMERICGREATERTHANEQUALS", + "NUMERICGREATERTHANEQUALSPATH", "NUMERICLESSTHAN", "NUMERICLESSTHANPATH", + "NUMERICLESSTHANEQUALS", "NUMERICLESSTHANEQUALSPATH", "OR", + "STRINGEQUALS", "STRINGEQUALSPATH", "STRINGGREATERTHAN", "STRINGGREATERTHANPATH", + "STRINGGREATERTHANEQUALS", "STRINGGREATERTHANEQUALSPATH", "STRINGLESSTHAN", + "STRINGLESSTHANPATH", "STRINGLESSTHANEQUALS", "STRINGLESSTHANEQUALSPATH", + "STRINGMATCHES", "TIMESTAMPEQUALS", "TIMESTAMPEQUALSPATH", "TIMESTAMPGREATERTHAN", + "TIMESTAMPGREATERTHANPATH", "TIMESTAMPGREATERTHANEQUALS", "TIMESTAMPGREATERTHANEQUALSPATH", + "TIMESTAMPLESSTHAN", "TIMESTAMPLESSTHANPATH", "TIMESTAMPLESSTHANEQUALS", + "TIMESTAMPLESSTHANEQUALSPATH", "SECONDSPATH", "SECONDS", "TIMESTAMPPATH", + "TIMESTAMP", "TIMEOUTSECONDS", "TIMEOUTSECONDSPATH", "HEARTBEATSECONDS", + "HEARTBEATSECONDSPATH", "PROCESSORCONFIG", "MODE", "INLINE", + "DISTRIBUTED", "EXECUTIONTYPE", "STANDARD", "ITEMPROCESSOR", + "ITERATOR", "ITEMSELECTOR", "MAXCONCURRENCYPATH", "MAXCONCURRENCY", + "RESOURCE", "INPUTPATH", "OUTPUTPATH", "ITEMS", "ITEMSPATH", + "RESULTPATH", "RESULT", "PARAMETERS", "CREDENTIALS", "ROLEARN", + "ROLEARNPATH", "RESULTSELECTOR", "ITEMREADER", "READERCONFIG", + "INPUTTYPE", "CSVHEADERLOCATION", "CSVHEADERS", "MAXITEMS", + "MAXITEMSPATH", "TOLERATEDFAILURECOUNT", "TOLERATEDFAILURECOUNTPATH", + "TOLERATEDFAILUREPERCENTAGE", "TOLERATEDFAILUREPERCENTAGEPATH", + "LABEL", "RESULTWRITER", "NEXT", "END", "CAUSE", "CAUSEPATH", + "ERROR", "ERRORPATH", "RETRY", "ERROREQUALS", "INTERVALSECONDS", + "MAXATTEMPTS", "BACKOFFRATE", "MAXDELAYSECONDS", "JITTERSTRATEGY", + "FULL", "NONE", "CATCH", "QUERYLANGUAGE", "JSONPATH", "JSONATA", + "ASSIGN", "OUTPUT", "ARGUMENTS", "ERRORNAMEStatesALL", "ERRORNAMEStatesDataLimitExceeded", + "ERRORNAMEStatesHeartbeatTimeout", "ERRORNAMEStatesTimeout", + "ERRORNAMEStatesTaskFailed", "ERRORNAMEStatesPermissions", "ERRORNAMEStatesResultPathMatchFailure", + "ERRORNAMEStatesParameterPathFailure", "ERRORNAMEStatesBranchFailed", + "ERRORNAMEStatesNoChoiceMatched", "ERRORNAMEStatesIntrinsicFailure", + "ERRORNAMEStatesExceedToleratedFailureThreshold", "ERRORNAMEStatesItemReaderFailed", + "ERRORNAMEStatesResultWriterFailed", "ERRORNAMEStatesQueryEvaluationError", + "ERRORNAMEStatesRuntime", "STRINGDOLLAR", "STRINGPATHCONTEXTOBJ", + "STRINGPATH", "STRINGVAR", "STRINGINTRINSICFUNC", "STRINGJSONATA", + "STRING", "INT", "NUMBER", "WS" ] + + ruleNames = [ "COMMA", "COLON", "LBRACK", "RBRACK", "LBRACE", "RBRACE", + "TRUE", "FALSE", "NULL", "COMMENT", "STATES", "STARTAT", + "NEXTSTATE", "VERSION", "TYPE", "TASK", "CHOICE", "FAIL", + "SUCCEED", "PASS", "WAIT", "PARALLEL", "MAP", "CHOICES", + "CONDITION", "VARIABLE", "DEFAULT", "BRANCHES", "AND", + "BOOLEANEQUALS", "BOOLEANQUALSPATH", "ISBOOLEAN", "ISNULL", + "ISNUMERIC", "ISPRESENT", "ISSTRING", "ISTIMESTAMP", "NOT", + "NUMERICEQUALS", "NUMERICEQUALSPATH", "NUMERICGREATERTHAN", + "NUMERICGREATERTHANPATH", "NUMERICGREATERTHANEQUALS", + "NUMERICGREATERTHANEQUALSPATH", "NUMERICLESSTHAN", "NUMERICLESSTHANPATH", + "NUMERICLESSTHANEQUALS", "NUMERICLESSTHANEQUALSPATH", + "OR", "STRINGEQUALS", "STRINGEQUALSPATH", "STRINGGREATERTHAN", + "STRINGGREATERTHANPATH", "STRINGGREATERTHANEQUALS", "STRINGGREATERTHANEQUALSPATH", + "STRINGLESSTHAN", "STRINGLESSTHANPATH", "STRINGLESSTHANEQUALS", + "STRINGLESSTHANEQUALSPATH", "STRINGMATCHES", "TIMESTAMPEQUALS", + "TIMESTAMPEQUALSPATH", "TIMESTAMPGREATERTHAN", "TIMESTAMPGREATERTHANPATH", + "TIMESTAMPGREATERTHANEQUALS", "TIMESTAMPGREATERTHANEQUALSPATH", + "TIMESTAMPLESSTHAN", "TIMESTAMPLESSTHANPATH", "TIMESTAMPLESSTHANEQUALS", + "TIMESTAMPLESSTHANEQUALSPATH", "SECONDSPATH", "SECONDS", + "TIMESTAMPPATH", "TIMESTAMP", "TIMEOUTSECONDS", "TIMEOUTSECONDSPATH", + "HEARTBEATSECONDS", "HEARTBEATSECONDSPATH", "PROCESSORCONFIG", + "MODE", "INLINE", "DISTRIBUTED", "EXECUTIONTYPE", "STANDARD", + "ITEMPROCESSOR", "ITERATOR", "ITEMSELECTOR", "MAXCONCURRENCYPATH", + "MAXCONCURRENCY", "RESOURCE", "INPUTPATH", "OUTPUTPATH", + "ITEMS", "ITEMSPATH", "RESULTPATH", "RESULT", "PARAMETERS", + "CREDENTIALS", "ROLEARN", "ROLEARNPATH", "RESULTSELECTOR", + "ITEMREADER", "READERCONFIG", "INPUTTYPE", "CSVHEADERLOCATION", + "CSVHEADERS", "MAXITEMS", "MAXITEMSPATH", "TOLERATEDFAILURECOUNT", + "TOLERATEDFAILURECOUNTPATH", "TOLERATEDFAILUREPERCENTAGE", + "TOLERATEDFAILUREPERCENTAGEPATH", "LABEL", "RESULTWRITER", + "NEXT", "END", "CAUSE", "CAUSEPATH", "ERROR", "ERRORPATH", + "RETRY", "ERROREQUALS", "INTERVALSECONDS", "MAXATTEMPTS", + "BACKOFFRATE", "MAXDELAYSECONDS", "JITTERSTRATEGY", "FULL", + "NONE", "CATCH", "QUERYLANGUAGE", "JSONPATH", "JSONATA", + "ASSIGN", "OUTPUT", "ARGUMENTS", "ERRORNAMEStatesALL", + "ERRORNAMEStatesDataLimitExceeded", "ERRORNAMEStatesHeartbeatTimeout", + "ERRORNAMEStatesTimeout", "ERRORNAMEStatesTaskFailed", + "ERRORNAMEStatesPermissions", "ERRORNAMEStatesResultPathMatchFailure", + "ERRORNAMEStatesParameterPathFailure", "ERRORNAMEStatesBranchFailed", + "ERRORNAMEStatesNoChoiceMatched", "ERRORNAMEStatesIntrinsicFailure", + "ERRORNAMEStatesExceedToleratedFailureThreshold", "ERRORNAMEStatesItemReaderFailed", + "ERRORNAMEStatesResultWriterFailed", "ERRORNAMEStatesQueryEvaluationError", + "ERRORNAMEStatesRuntime", "STRINGDOLLAR", "STRINGPATHCONTEXTOBJ", + "STRINGPATH", "STRINGVAR", "STRINGINTRINSICFUNC", "STRINGJSONATA", + "STRING", "ESC", "UNICODE", "HEX", "SAFECODEPOINT", "LJSONATA", + "RJSONATA", "INT", "NUMBER", "EXP", "WS" ] + + grammarFileName = "ASLLexer.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.2") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParser.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParser.py new file mode 100644 index 0000000000000..aeeb665fbc1c9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParser.py @@ -0,0 +1,11717 @@ +# Generated from ASLParser.g4 by ANTLR 4.13.2 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + +def serializedATN(): + return [ + 4,1,162,1154,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6, + 7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,10,7,10,2,11,7,11,2,12,7,12,2,13,7, + 13,2,14,7,14,2,15,7,15,2,16,7,16,2,17,7,17,2,18,7,18,2,19,7,19,2, + 20,7,20,2,21,7,21,2,22,7,22,2,23,7,23,2,24,7,24,2,25,7,25,2,26,7, + 26,2,27,7,27,2,28,7,28,2,29,7,29,2,30,7,30,2,31,7,31,2,32,7,32,2, + 33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,2,37,7,37,2,38,7,38,2,39,7, + 39,2,40,7,40,2,41,7,41,2,42,7,42,2,43,7,43,2,44,7,44,2,45,7,45,2, + 46,7,46,2,47,7,47,2,48,7,48,2,49,7,49,2,50,7,50,2,51,7,51,2,52,7, + 52,2,53,7,53,2,54,7,54,2,55,7,55,2,56,7,56,2,57,7,57,2,58,7,58,2, + 59,7,59,2,60,7,60,2,61,7,61,2,62,7,62,2,63,7,63,2,64,7,64,2,65,7, + 65,2,66,7,66,2,67,7,67,2,68,7,68,2,69,7,69,2,70,7,70,2,71,7,71,2, + 72,7,72,2,73,7,73,2,74,7,74,2,75,7,75,2,76,7,76,2,77,7,77,2,78,7, + 78,2,79,7,79,2,80,7,80,2,81,7,81,2,82,7,82,2,83,7,83,2,84,7,84,2, + 85,7,85,2,86,7,86,2,87,7,87,2,88,7,88,2,89,7,89,2,90,7,90,2,91,7, + 91,2,92,7,92,2,93,7,93,2,94,7,94,2,95,7,95,2,96,7,96,2,97,7,97,2, + 98,7,98,2,99,7,99,2,100,7,100,2,101,7,101,2,102,7,102,2,103,7,103, + 2,104,7,104,2,105,7,105,2,106,7,106,2,107,7,107,2,108,7,108,2,109, + 7,109,2,110,7,110,2,111,7,111,2,112,7,112,2,113,7,113,2,114,7,114, + 2,115,7,115,1,0,1,0,1,0,1,1,1,1,1,1,1,1,5,1,240,8,1,10,1,12,1,243, + 9,1,1,1,1,1,1,2,1,2,1,2,1,2,1,2,1,2,3,2,253,8,2,1,3,1,3,1,3,1,3, + 1,4,1,4,1,4,1,4,1,5,1,5,1,5,1,5,1,6,1,6,1,6,1,6,1,7,1,7,1,7,1,7, + 1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7, + 1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7,1,7, + 1,7,1,7,3,7,309,8,7,1,8,1,8,1,8,1,8,1,8,1,8,5,8,317,8,8,10,8,12, + 8,320,9,8,1,8,1,8,1,9,1,9,1,9,1,9,1,10,1,10,1,10,1,10,5,10,332,8, + 10,10,10,12,10,335,9,10,1,10,1,10,1,11,1,11,1,11,1,11,1,12,1,12, + 1,12,1,12,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,3,14,355,8,14, + 1,15,1,15,1,15,1,15,1,16,1,16,1,16,1,16,3,16,365,8,16,1,17,1,17, + 1,17,1,17,3,17,371,8,17,1,18,1,18,1,18,1,18,1,19,1,19,1,19,1,19, + 1,20,1,20,1,20,1,20,3,20,385,8,20,1,20,1,20,1,20,3,20,390,8,20,1, + 21,1,21,1,21,1,21,3,21,396,8,21,1,21,1,21,1,21,3,21,401,8,21,1,22, + 1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,3,22,412,8,22,1,23,1,23, + 1,23,1,23,3,23,418,8,23,1,23,1,23,1,23,3,23,423,8,23,1,24,1,24,1, + 24,1,24,1,24,1,24,3,24,431,8,24,1,25,1,25,1,25,1,25,1,26,1,26,1, + 26,1,26,1,26,1,26,1,26,1,26,1,26,3,26,446,8,26,1,27,1,27,1,27,1, + 27,1,28,1,28,1,28,1,28,1,28,1,28,1,29,1,29,1,29,1,29,3,29,462,8, + 29,1,29,1,29,1,29,3,29,467,8,29,1,30,1,30,1,30,1,30,1,30,1,30,1, + 30,1,30,1,30,3,30,478,8,30,1,31,1,31,1,31,1,31,1,31,1,31,1,31,1, + 31,1,31,3,31,489,8,31,1,32,1,32,1,32,1,32,5,32,495,8,32,10,32,12, + 32,498,9,32,1,32,1,32,1,32,1,32,3,32,504,8,32,1,33,1,33,1,33,1,33, + 1,33,1,33,1,33,3,33,513,8,33,1,34,1,34,1,34,1,34,5,34,519,8,34,10, + 34,12,34,522,9,34,1,34,1,34,1,34,1,34,3,34,528,8,34,1,35,1,35,1, + 35,3,35,533,8,35,1,36,1,36,1,36,1,36,1,36,3,36,540,8,36,1,37,1,37, + 1,37,1,37,1,38,1,38,1,38,1,38,1,38,1,38,5,38,552,8,38,10,38,12,38, + 555,9,38,1,38,1,38,3,38,559,8,38,1,39,1,39,1,40,1,40,1,40,1,40,1, + 40,1,40,5,40,569,8,40,10,40,12,40,572,9,40,1,40,1,40,3,40,576,8, + 40,1,41,1,41,1,41,1,41,1,41,1,41,1,41,3,41,585,8,41,1,42,1,42,1, + 42,3,42,590,8,42,1,43,1,43,1,43,1,43,1,43,1,43,5,43,598,8,43,10, + 43,12,43,601,9,43,1,43,1,43,3,43,605,8,43,1,44,1,44,1,44,1,44,1, + 44,1,44,3,44,613,8,44,1,45,1,45,1,45,1,45,1,45,1,45,3,45,621,8,45, + 1,46,1,46,1,46,1,46,1,47,1,47,1,47,1,47,1,47,1,47,5,47,633,8,47, + 10,47,12,47,636,9,47,1,47,1,47,3,47,640,8,47,1,48,1,48,1,48,1,48, + 1,49,1,49,1,49,3,49,649,8,49,1,50,1,50,1,50,1,50,1,50,1,50,5,50, + 657,8,50,10,50,12,50,660,9,50,1,50,1,50,3,50,664,8,50,1,51,1,51, + 1,51,1,51,1,51,1,51,3,51,672,8,51,1,52,1,52,1,52,1,52,1,53,1,53, + 1,54,1,54,1,54,1,54,1,54,1,54,5,54,686,8,54,10,54,12,54,689,9,54, + 1,54,1,54,1,55,1,55,1,55,1,55,4,55,697,8,55,11,55,12,55,698,1,55, + 1,55,1,55,1,55,1,55,1,55,5,55,707,8,55,10,55,12,55,710,9,55,1,55, + 1,55,3,55,714,8,55,1,56,1,56,1,56,1,56,1,56,1,56,3,56,722,8,56,1, + 57,1,57,1,57,1,57,3,57,728,8,57,1,58,1,58,1,58,1,58,1,58,1,58,1, + 58,5,58,737,8,58,10,58,12,58,740,9,58,1,58,1,58,3,58,744,8,58,1, + 59,1,59,1,59,1,59,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1,60,1, + 60,1,60,1,60,1,60,1,60,3,60,764,8,60,1,61,1,61,1,61,1,61,1,61,1, + 61,5,61,772,8,61,10,61,12,61,775,9,61,1,61,1,61,1,62,1,62,1,62,1, + 62,1,62,1,62,5,62,785,8,62,10,62,12,62,788,9,62,1,62,1,62,1,63,1, + 63,1,63,1,63,3,63,796,8,63,1,64,1,64,1,64,1,64,1,64,1,64,5,64,804, + 8,64,10,64,12,64,807,9,64,1,64,1,64,1,65,1,65,3,65,813,8,65,1,66, + 1,66,1,66,1,66,1,67,1,67,1,68,1,68,1,68,1,68,1,69,1,69,1,70,1,70, + 1,70,1,70,1,70,1,70,5,70,833,8,70,10,70,12,70,836,9,70,1,70,1,70, + 1,71,1,71,1,71,1,71,3,71,844,8,71,1,72,1,72,1,72,1,72,1,73,1,73, + 1,73,1,73,1,73,1,73,5,73,856,8,73,10,73,12,73,859,9,73,1,73,1,73, + 1,74,1,74,1,74,1,74,3,74,867,8,74,1,75,1,75,1,75,1,75,1,75,1,75, + 5,75,875,8,75,10,75,12,75,878,9,75,1,75,1,75,1,76,1,76,1,76,1,76, + 3,76,886,8,76,1,77,1,77,1,77,1,77,1,78,1,78,1,78,1,78,1,79,1,79, + 1,79,1,79,1,79,1,79,5,79,902,8,79,10,79,12,79,905,9,79,1,79,1,79, + 1,80,1,80,1,80,1,80,1,80,1,80,1,80,1,80,1,80,3,80,918,8,80,1,81, + 1,81,1,81,1,81,1,81,1,81,1,81,1,81,1,81,3,81,929,8,81,1,82,1,82, + 1,82,1,82,1,82,1,82,1,82,1,82,1,82,3,82,940,8,82,1,83,1,83,1,83, + 1,83,1,84,1,84,1,84,1,84,1,84,1,84,5,84,952,8,84,10,84,12,84,955, + 9,84,1,84,1,84,1,85,1,85,3,85,961,8,85,1,86,1,86,1,86,1,86,1,86, + 1,86,5,86,969,8,86,10,86,12,86,972,9,86,3,86,974,8,86,1,86,1,86, + 1,87,1,87,1,87,1,87,5,87,982,8,87,10,87,12,87,985,9,87,1,87,1,87, + 1,88,1,88,1,88,1,88,1,88,1,88,1,88,3,88,996,8,88,1,89,1,89,1,89, + 1,89,1,89,1,89,5,89,1004,8,89,10,89,12,89,1007,9,89,1,89,1,89,1, + 90,1,90,1,90,1,90,1,91,1,91,1,91,1,91,1,92,1,92,1,92,1,92,1,93,1, + 93,1,93,1,93,1,94,1,94,1,94,1,94,1,95,1,95,1,95,1,95,1,95,1,95,5, + 95,1037,8,95,10,95,12,95,1040,9,95,3,95,1042,8,95,1,95,1,95,1,96, + 1,96,1,96,1,96,5,96,1050,8,96,10,96,12,96,1053,9,96,1,96,1,96,1, + 97,1,97,1,97,1,97,1,97,1,97,3,97,1063,8,97,1,98,1,98,1,99,1,99,1, + 100,1,100,1,101,1,101,3,101,1073,8,101,1,102,1,102,1,102,1,102,5, + 102,1079,8,102,10,102,12,102,1082,9,102,1,102,1,102,1,102,1,102, + 3,102,1088,8,102,1,103,1,103,1,103,1,103,1,104,1,104,1,104,1,104, + 5,104,1098,8,104,10,104,12,104,1101,9,104,1,104,1,104,1,104,1,104, + 3,104,1107,8,104,1,105,1,105,1,105,1,105,1,105,1,105,1,105,1,105, + 1,105,3,105,1118,8,105,1,106,1,106,1,106,3,106,1123,8,106,1,107, + 1,107,3,107,1127,8,107,1,108,1,108,3,108,1131,8,108,1,109,1,109, + 1,110,1,110,1,111,1,111,1,112,1,112,1,113,1,113,1,114,1,114,1,114, + 1,114,1,114,1,114,1,114,3,114,1150,8,114,1,115,1,115,1,115,0,0,116, + 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44, + 46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88, + 90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124, + 126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156, + 158,160,162,164,166,168,170,172,174,176,178,180,182,184,186,188, + 190,192,194,196,198,200,202,204,206,208,210,212,214,216,218,220, + 222,224,226,228,230,0,10,1,0,132,133,1,0,7,8,1,0,16,23,1,0,81,82, + 1,0,160,161,1,0,128,129,3,0,30,37,39,48,50,70,3,0,29,29,38,38,49, + 49,1,0,137,152,5,0,10,28,71,117,119,119,121,131,134,136,1225,0,232, + 1,0,0,0,2,235,1,0,0,0,4,252,1,0,0,0,6,254,1,0,0,0,8,258,1,0,0,0, + 10,262,1,0,0,0,12,266,1,0,0,0,14,308,1,0,0,0,16,310,1,0,0,0,18,323, + 1,0,0,0,20,327,1,0,0,0,22,338,1,0,0,0,24,342,1,0,0,0,26,346,1,0, + 0,0,28,350,1,0,0,0,30,356,1,0,0,0,32,360,1,0,0,0,34,366,1,0,0,0, + 36,372,1,0,0,0,38,376,1,0,0,0,40,389,1,0,0,0,42,400,1,0,0,0,44,411, + 1,0,0,0,46,422,1,0,0,0,48,430,1,0,0,0,50,432,1,0,0,0,52,445,1,0, + 0,0,54,447,1,0,0,0,56,451,1,0,0,0,58,466,1,0,0,0,60,477,1,0,0,0, + 62,488,1,0,0,0,64,503,1,0,0,0,66,512,1,0,0,0,68,527,1,0,0,0,70,532, + 1,0,0,0,72,539,1,0,0,0,74,541,1,0,0,0,76,558,1,0,0,0,78,560,1,0, + 0,0,80,575,1,0,0,0,82,584,1,0,0,0,84,589,1,0,0,0,86,604,1,0,0,0, + 88,612,1,0,0,0,90,620,1,0,0,0,92,622,1,0,0,0,94,639,1,0,0,0,96,641, + 1,0,0,0,98,648,1,0,0,0,100,663,1,0,0,0,102,671,1,0,0,0,104,673,1, + 0,0,0,106,677,1,0,0,0,108,679,1,0,0,0,110,713,1,0,0,0,112,721,1, + 0,0,0,114,727,1,0,0,0,116,729,1,0,0,0,118,745,1,0,0,0,120,763,1, + 0,0,0,122,765,1,0,0,0,124,778,1,0,0,0,126,795,1,0,0,0,128,797,1, + 0,0,0,130,812,1,0,0,0,132,814,1,0,0,0,134,818,1,0,0,0,136,820,1, + 0,0,0,138,824,1,0,0,0,140,826,1,0,0,0,142,843,1,0,0,0,144,845,1, + 0,0,0,146,849,1,0,0,0,148,866,1,0,0,0,150,868,1,0,0,0,152,885,1, + 0,0,0,154,887,1,0,0,0,156,891,1,0,0,0,158,895,1,0,0,0,160,917,1, + 0,0,0,162,928,1,0,0,0,164,939,1,0,0,0,166,941,1,0,0,0,168,945,1, + 0,0,0,170,960,1,0,0,0,172,962,1,0,0,0,174,977,1,0,0,0,176,995,1, + 0,0,0,178,997,1,0,0,0,180,1010,1,0,0,0,182,1014,1,0,0,0,184,1018, + 1,0,0,0,186,1022,1,0,0,0,188,1026,1,0,0,0,190,1030,1,0,0,0,192,1045, + 1,0,0,0,194,1062,1,0,0,0,196,1064,1,0,0,0,198,1066,1,0,0,0,200,1068, + 1,0,0,0,202,1072,1,0,0,0,204,1087,1,0,0,0,206,1089,1,0,0,0,208,1106, + 1,0,0,0,210,1117,1,0,0,0,212,1122,1,0,0,0,214,1126,1,0,0,0,216,1130, + 1,0,0,0,218,1132,1,0,0,0,220,1134,1,0,0,0,222,1136,1,0,0,0,224,1138, + 1,0,0,0,226,1140,1,0,0,0,228,1149,1,0,0,0,230,1151,1,0,0,0,232,233, + 3,2,1,0,233,234,5,0,0,1,234,1,1,0,0,0,235,236,5,5,0,0,236,241,3, + 4,2,0,237,238,5,1,0,0,238,240,3,4,2,0,239,237,1,0,0,0,240,243,1, + 0,0,0,241,239,1,0,0,0,241,242,1,0,0,0,242,244,1,0,0,0,243,241,1, + 0,0,0,244,245,5,6,0,0,245,3,1,0,0,0,246,253,3,8,4,0,247,253,3,10, + 5,0,248,253,3,12,6,0,249,253,3,6,3,0,250,253,3,16,8,0,251,253,3, + 60,30,0,252,246,1,0,0,0,252,247,1,0,0,0,252,248,1,0,0,0,252,249, + 1,0,0,0,252,250,1,0,0,0,252,251,1,0,0,0,253,5,1,0,0,0,254,255,5, + 12,0,0,255,256,5,2,0,0,256,257,3,228,114,0,257,7,1,0,0,0,258,259, + 5,10,0,0,259,260,5,2,0,0,260,261,3,228,114,0,261,9,1,0,0,0,262,263, + 5,14,0,0,263,264,5,2,0,0,264,265,3,228,114,0,265,11,1,0,0,0,266, + 267,5,131,0,0,267,268,5,2,0,0,268,269,7,0,0,0,269,13,1,0,0,0,270, + 309,3,8,4,0,271,309,3,12,6,0,272,309,3,22,11,0,273,309,3,28,14,0, + 274,309,3,26,13,0,275,309,3,24,12,0,276,309,3,30,15,0,277,309,3, + 32,16,0,278,309,3,34,17,0,279,309,3,36,18,0,280,309,3,38,19,0,281, + 309,3,108,54,0,282,309,3,40,20,0,283,309,3,42,21,0,284,309,3,44, + 22,0,285,309,3,46,23,0,286,309,3,48,24,0,287,309,3,50,25,0,288,309, + 3,124,62,0,289,309,3,140,70,0,290,309,3,144,72,0,291,309,3,146,73, + 0,292,309,3,52,26,0,293,309,3,60,30,0,294,309,3,62,31,0,295,309, + 3,122,61,0,296,309,3,54,27,0,297,309,3,172,86,0,298,309,3,190,95, + 0,299,309,3,104,52,0,300,309,3,162,81,0,301,309,3,164,82,0,302,309, + 3,166,83,0,303,309,3,168,84,0,304,309,3,74,37,0,305,309,3,90,45, + 0,306,309,3,92,46,0,307,309,3,56,28,0,308,270,1,0,0,0,308,271,1, + 0,0,0,308,272,1,0,0,0,308,273,1,0,0,0,308,274,1,0,0,0,308,275,1, + 0,0,0,308,276,1,0,0,0,308,277,1,0,0,0,308,278,1,0,0,0,308,279,1, + 0,0,0,308,280,1,0,0,0,308,281,1,0,0,0,308,282,1,0,0,0,308,283,1, + 0,0,0,308,284,1,0,0,0,308,285,1,0,0,0,308,286,1,0,0,0,308,287,1, + 0,0,0,308,288,1,0,0,0,308,289,1,0,0,0,308,290,1,0,0,0,308,291,1, + 0,0,0,308,292,1,0,0,0,308,293,1,0,0,0,308,294,1,0,0,0,308,295,1, + 0,0,0,308,296,1,0,0,0,308,297,1,0,0,0,308,298,1,0,0,0,308,299,1, + 0,0,0,308,300,1,0,0,0,308,301,1,0,0,0,308,302,1,0,0,0,308,303,1, + 0,0,0,308,304,1,0,0,0,308,305,1,0,0,0,308,306,1,0,0,0,308,307,1, + 0,0,0,309,15,1,0,0,0,310,311,5,11,0,0,311,312,5,2,0,0,312,313,5, + 5,0,0,313,318,3,18,9,0,314,315,5,1,0,0,315,317,3,18,9,0,316,314, + 1,0,0,0,317,320,1,0,0,0,318,316,1,0,0,0,318,319,1,0,0,0,319,321, + 1,0,0,0,320,318,1,0,0,0,321,322,5,6,0,0,322,17,1,0,0,0,323,324,3, + 228,114,0,324,325,5,2,0,0,325,326,3,20,10,0,326,19,1,0,0,0,327,328, + 5,5,0,0,328,333,3,14,7,0,329,330,5,1,0,0,330,332,3,14,7,0,331,329, + 1,0,0,0,332,335,1,0,0,0,333,331,1,0,0,0,333,334,1,0,0,0,334,336, + 1,0,0,0,335,333,1,0,0,0,336,337,5,6,0,0,337,21,1,0,0,0,338,339,5, + 15,0,0,339,340,5,2,0,0,340,341,3,106,53,0,341,23,1,0,0,0,342,343, + 5,115,0,0,343,344,5,2,0,0,344,345,3,228,114,0,345,25,1,0,0,0,346, + 347,5,90,0,0,347,348,5,2,0,0,348,349,3,228,114,0,349,27,1,0,0,0, + 350,351,5,91,0,0,351,354,5,2,0,0,352,355,5,9,0,0,353,355,3,212,106, + 0,354,352,1,0,0,0,354,353,1,0,0,0,355,29,1,0,0,0,356,357,5,96,0, + 0,357,358,5,2,0,0,358,359,3,210,105,0,359,31,1,0,0,0,360,361,5,95, + 0,0,361,364,5,2,0,0,362,365,5,9,0,0,363,365,3,218,109,0,364,362, + 1,0,0,0,364,363,1,0,0,0,365,33,1,0,0,0,366,367,5,92,0,0,367,370, + 5,2,0,0,368,371,5,9,0,0,369,371,3,212,106,0,370,368,1,0,0,0,370, + 369,1,0,0,0,371,35,1,0,0,0,372,373,5,116,0,0,373,374,5,2,0,0,374, + 375,7,1,0,0,375,37,1,0,0,0,376,377,5,27,0,0,377,378,5,2,0,0,378, + 379,3,228,114,0,379,39,1,0,0,0,380,381,5,119,0,0,381,384,5,2,0,0, + 382,385,3,226,113,0,383,385,3,228,114,0,384,382,1,0,0,0,384,383, + 1,0,0,0,385,390,1,0,0,0,386,387,5,120,0,0,387,388,5,2,0,0,388,390, + 3,214,107,0,389,380,1,0,0,0,389,386,1,0,0,0,390,41,1,0,0,0,391,392, + 5,117,0,0,392,395,5,2,0,0,393,396,3,226,113,0,394,396,3,228,114, + 0,395,393,1,0,0,0,395,394,1,0,0,0,396,401,1,0,0,0,397,398,5,118, + 0,0,398,399,5,2,0,0,399,401,3,214,107,0,400,391,1,0,0,0,400,397, + 1,0,0,0,401,43,1,0,0,0,402,403,5,72,0,0,403,404,5,2,0,0,404,412, + 3,226,113,0,405,406,5,72,0,0,406,407,5,2,0,0,407,412,5,160,0,0,408, + 409,5,71,0,0,409,410,5,2,0,0,410,412,3,212,106,0,411,402,1,0,0,0, + 411,405,1,0,0,0,411,408,1,0,0,0,412,45,1,0,0,0,413,414,5,74,0,0, + 414,417,5,2,0,0,415,418,3,226,113,0,416,418,3,228,114,0,417,415, + 1,0,0,0,417,416,1,0,0,0,418,423,1,0,0,0,419,420,5,73,0,0,420,421, + 5,2,0,0,421,423,3,212,106,0,422,413,1,0,0,0,422,419,1,0,0,0,423, + 47,1,0,0,0,424,425,5,93,0,0,425,426,5,2,0,0,426,431,3,100,50,0,427, + 428,5,93,0,0,428,429,5,2,0,0,429,431,3,226,113,0,430,424,1,0,0,0, + 430,427,1,0,0,0,431,49,1,0,0,0,432,433,5,94,0,0,433,434,5,2,0,0, + 434,435,3,212,106,0,435,51,1,0,0,0,436,437,5,89,0,0,437,438,5,2, + 0,0,438,446,3,226,113,0,439,440,5,89,0,0,440,441,5,2,0,0,441,446, + 5,160,0,0,442,443,5,88,0,0,443,444,5,2,0,0,444,446,3,212,106,0,445, + 436,1,0,0,0,445,439,1,0,0,0,445,442,1,0,0,0,446,53,1,0,0,0,447,448, + 5,97,0,0,448,449,5,2,0,0,449,450,3,64,32,0,450,55,1,0,0,0,451,452, + 5,98,0,0,452,453,5,2,0,0,453,454,5,5,0,0,454,455,3,58,29,0,455,456, + 5,6,0,0,456,57,1,0,0,0,457,458,5,99,0,0,458,461,5,2,0,0,459,462, + 3,226,113,0,460,462,3,228,114,0,461,459,1,0,0,0,461,460,1,0,0,0, + 462,467,1,0,0,0,463,464,5,100,0,0,464,465,5,2,0,0,465,467,3,214, + 107,0,466,457,1,0,0,0,466,463,1,0,0,0,467,59,1,0,0,0,468,469,5,75, + 0,0,469,470,5,2,0,0,470,478,3,226,113,0,471,472,5,75,0,0,472,473, + 5,2,0,0,473,478,5,160,0,0,474,475,5,76,0,0,475,476,5,2,0,0,476,478, + 3,212,106,0,477,468,1,0,0,0,477,471,1,0,0,0,477,474,1,0,0,0,478, + 61,1,0,0,0,479,480,5,77,0,0,480,481,5,2,0,0,481,489,3,226,113,0, + 482,483,5,77,0,0,483,484,5,2,0,0,484,489,5,160,0,0,485,486,5,78, + 0,0,486,487,5,2,0,0,487,489,3,212,106,0,488,479,1,0,0,0,488,482, + 1,0,0,0,488,485,1,0,0,0,489,63,1,0,0,0,490,491,5,5,0,0,491,496,3, + 66,33,0,492,493,5,1,0,0,493,495,3,66,33,0,494,492,1,0,0,0,495,498, + 1,0,0,0,496,494,1,0,0,0,496,497,1,0,0,0,497,499,1,0,0,0,498,496, + 1,0,0,0,499,500,5,6,0,0,500,504,1,0,0,0,501,502,5,5,0,0,502,504, + 5,6,0,0,503,490,1,0,0,0,503,501,1,0,0,0,504,65,1,0,0,0,505,506,5, + 153,0,0,506,507,5,2,0,0,507,513,3,214,107,0,508,509,3,228,114,0, + 509,510,5,2,0,0,510,511,3,70,35,0,511,513,1,0,0,0,512,505,1,0,0, + 0,512,508,1,0,0,0,513,67,1,0,0,0,514,515,5,3,0,0,515,520,3,70,35, + 0,516,517,5,1,0,0,517,519,3,70,35,0,518,516,1,0,0,0,519,522,1,0, + 0,0,520,518,1,0,0,0,520,521,1,0,0,0,521,523,1,0,0,0,522,520,1,0, + 0,0,523,524,5,4,0,0,524,528,1,0,0,0,525,526,5,3,0,0,526,528,5,4, + 0,0,527,514,1,0,0,0,527,525,1,0,0,0,528,69,1,0,0,0,529,533,3,68, + 34,0,530,533,3,64,32,0,531,533,3,72,36,0,532,529,1,0,0,0,532,530, + 1,0,0,0,532,531,1,0,0,0,533,71,1,0,0,0,534,540,5,161,0,0,535,540, + 5,160,0,0,536,540,7,1,0,0,537,540,5,9,0,0,538,540,3,228,114,0,539, + 534,1,0,0,0,539,535,1,0,0,0,539,536,1,0,0,0,539,537,1,0,0,0,539, + 538,1,0,0,0,540,73,1,0,0,0,541,542,5,134,0,0,542,543,5,2,0,0,543, + 544,3,76,38,0,544,75,1,0,0,0,545,546,5,5,0,0,546,559,5,6,0,0,547, + 548,5,5,0,0,548,553,3,78,39,0,549,550,5,1,0,0,550,552,3,78,39,0, + 551,549,1,0,0,0,552,555,1,0,0,0,553,551,1,0,0,0,553,554,1,0,0,0, + 554,556,1,0,0,0,555,553,1,0,0,0,556,557,5,6,0,0,557,559,1,0,0,0, + 558,545,1,0,0,0,558,547,1,0,0,0,559,77,1,0,0,0,560,561,3,82,41,0, + 561,79,1,0,0,0,562,563,5,5,0,0,563,576,5,6,0,0,564,565,5,5,0,0,565, + 570,3,82,41,0,566,567,5,1,0,0,567,569,3,82,41,0,568,566,1,0,0,0, + 569,572,1,0,0,0,570,568,1,0,0,0,570,571,1,0,0,0,571,573,1,0,0,0, + 572,570,1,0,0,0,573,574,5,6,0,0,574,576,1,0,0,0,575,562,1,0,0,0, + 575,564,1,0,0,0,576,81,1,0,0,0,577,578,5,153,0,0,578,579,5,2,0,0, + 579,585,3,214,107,0,580,581,3,228,114,0,581,582,5,2,0,0,582,583, + 3,84,42,0,583,585,1,0,0,0,584,577,1,0,0,0,584,580,1,0,0,0,585,83, + 1,0,0,0,586,590,3,80,40,0,587,590,3,86,43,0,588,590,3,88,44,0,589, + 586,1,0,0,0,589,587,1,0,0,0,589,588,1,0,0,0,590,85,1,0,0,0,591,592, + 5,3,0,0,592,605,5,4,0,0,593,594,5,3,0,0,594,599,3,84,42,0,595,596, + 5,1,0,0,596,598,3,84,42,0,597,595,1,0,0,0,598,601,1,0,0,0,599,597, + 1,0,0,0,599,600,1,0,0,0,600,602,1,0,0,0,601,599,1,0,0,0,602,603, + 5,4,0,0,603,605,1,0,0,0,604,591,1,0,0,0,604,593,1,0,0,0,605,87,1, + 0,0,0,606,613,5,161,0,0,607,613,5,160,0,0,608,613,7,1,0,0,609,613, + 5,9,0,0,610,613,3,226,113,0,611,613,3,228,114,0,612,606,1,0,0,0, + 612,607,1,0,0,0,612,608,1,0,0,0,612,609,1,0,0,0,612,610,1,0,0,0, + 612,611,1,0,0,0,613,89,1,0,0,0,614,615,5,136,0,0,615,616,5,2,0,0, + 616,621,3,94,47,0,617,618,5,136,0,0,618,619,5,2,0,0,619,621,3,226, + 113,0,620,614,1,0,0,0,620,617,1,0,0,0,621,91,1,0,0,0,622,623,5,135, + 0,0,623,624,5,2,0,0,624,625,3,98,49,0,625,93,1,0,0,0,626,627,5,5, + 0,0,627,640,5,6,0,0,628,629,5,5,0,0,629,634,3,96,48,0,630,631,5, + 1,0,0,631,633,3,96,48,0,632,630,1,0,0,0,633,636,1,0,0,0,634,632, + 1,0,0,0,634,635,1,0,0,0,635,637,1,0,0,0,636,634,1,0,0,0,637,638, + 5,6,0,0,638,640,1,0,0,0,639,626,1,0,0,0,639,628,1,0,0,0,640,95,1, + 0,0,0,641,642,3,228,114,0,642,643,5,2,0,0,643,644,3,98,49,0,644, + 97,1,0,0,0,645,649,3,94,47,0,646,649,3,100,50,0,647,649,3,102,51, + 0,648,645,1,0,0,0,648,646,1,0,0,0,648,647,1,0,0,0,649,99,1,0,0,0, + 650,651,5,3,0,0,651,664,5,4,0,0,652,653,5,3,0,0,653,658,3,98,49, + 0,654,655,5,1,0,0,655,657,3,98,49,0,656,654,1,0,0,0,657,660,1,0, + 0,0,658,656,1,0,0,0,658,659,1,0,0,0,659,661,1,0,0,0,660,658,1,0, + 0,0,661,662,5,4,0,0,662,664,1,0,0,0,663,650,1,0,0,0,663,652,1,0, + 0,0,664,101,1,0,0,0,665,672,5,161,0,0,666,672,5,160,0,0,667,672, + 7,1,0,0,668,672,5,9,0,0,669,672,3,226,113,0,670,672,3,228,114,0, + 671,665,1,0,0,0,671,666,1,0,0,0,671,667,1,0,0,0,671,668,1,0,0,0, + 671,669,1,0,0,0,671,670,1,0,0,0,672,103,1,0,0,0,673,674,5,101,0, + 0,674,675,5,2,0,0,675,676,3,64,32,0,676,105,1,0,0,0,677,678,7,2, + 0,0,678,107,1,0,0,0,679,680,5,24,0,0,680,681,5,2,0,0,681,682,5,3, + 0,0,682,687,3,110,55,0,683,684,5,1,0,0,684,686,3,110,55,0,685,683, + 1,0,0,0,686,689,1,0,0,0,687,685,1,0,0,0,687,688,1,0,0,0,688,690, + 1,0,0,0,689,687,1,0,0,0,690,691,5,4,0,0,691,109,1,0,0,0,692,693, + 5,5,0,0,693,696,3,112,56,0,694,695,5,1,0,0,695,697,3,112,56,0,696, + 694,1,0,0,0,697,698,1,0,0,0,698,696,1,0,0,0,698,699,1,0,0,0,699, + 700,1,0,0,0,700,701,5,6,0,0,701,714,1,0,0,0,702,703,5,5,0,0,703, + 708,3,114,57,0,704,705,5,1,0,0,705,707,3,114,57,0,706,704,1,0,0, + 0,707,710,1,0,0,0,708,706,1,0,0,0,708,709,1,0,0,0,709,711,1,0,0, + 0,710,708,1,0,0,0,711,712,5,6,0,0,712,714,1,0,0,0,713,692,1,0,0, + 0,713,702,1,0,0,0,714,111,1,0,0,0,715,722,3,118,59,0,716,722,3,120, + 60,0,717,722,3,24,12,0,718,722,3,74,37,0,719,722,3,92,46,0,720,722, + 3,8,4,0,721,715,1,0,0,0,721,716,1,0,0,0,721,717,1,0,0,0,721,718, + 1,0,0,0,721,719,1,0,0,0,721,720,1,0,0,0,722,113,1,0,0,0,723,728, + 3,116,58,0,724,728,3,24,12,0,725,728,3,74,37,0,726,728,3,8,4,0,727, + 723,1,0,0,0,727,724,1,0,0,0,727,725,1,0,0,0,727,726,1,0,0,0,728, + 115,1,0,0,0,729,730,3,198,99,0,730,743,5,2,0,0,731,744,3,110,55, + 0,732,733,5,3,0,0,733,738,3,110,55,0,734,735,5,1,0,0,735,737,3,110, + 55,0,736,734,1,0,0,0,737,740,1,0,0,0,738,736,1,0,0,0,738,739,1,0, + 0,0,739,741,1,0,0,0,740,738,1,0,0,0,741,742,5,4,0,0,742,744,1,0, + 0,0,743,731,1,0,0,0,743,732,1,0,0,0,744,117,1,0,0,0,745,746,5,26, + 0,0,746,747,5,2,0,0,747,748,3,212,106,0,748,119,1,0,0,0,749,750, + 5,25,0,0,750,751,5,2,0,0,751,764,7,1,0,0,752,753,5,25,0,0,753,754, + 5,2,0,0,754,764,3,226,113,0,755,756,3,196,98,0,756,757,5,2,0,0,757, + 758,3,222,111,0,758,764,1,0,0,0,759,760,3,196,98,0,760,761,5,2,0, + 0,761,762,3,210,105,0,762,764,1,0,0,0,763,749,1,0,0,0,763,752,1, + 0,0,0,763,755,1,0,0,0,763,759,1,0,0,0,764,121,1,0,0,0,765,766,5, + 28,0,0,766,767,5,2,0,0,767,768,5,3,0,0,768,773,3,2,1,0,769,770,5, + 1,0,0,770,772,3,2,1,0,771,769,1,0,0,0,772,775,1,0,0,0,773,771,1, + 0,0,0,773,774,1,0,0,0,774,776,1,0,0,0,775,773,1,0,0,0,776,777,5, + 4,0,0,777,123,1,0,0,0,778,779,5,85,0,0,779,780,5,2,0,0,780,781,5, + 5,0,0,781,786,3,126,63,0,782,783,5,1,0,0,783,785,3,126,63,0,784, + 782,1,0,0,0,785,788,1,0,0,0,786,784,1,0,0,0,786,787,1,0,0,0,787, + 789,1,0,0,0,788,786,1,0,0,0,789,790,5,6,0,0,790,125,1,0,0,0,791, + 796,3,128,64,0,792,796,3,6,3,0,793,796,3,16,8,0,794,796,3,8,4,0, + 795,791,1,0,0,0,795,792,1,0,0,0,795,793,1,0,0,0,795,794,1,0,0,0, + 796,127,1,0,0,0,797,798,5,79,0,0,798,799,5,2,0,0,799,800,5,5,0,0, + 800,805,3,130,65,0,801,802,5,1,0,0,802,804,3,130,65,0,803,801,1, + 0,0,0,804,807,1,0,0,0,805,803,1,0,0,0,805,806,1,0,0,0,806,808,1, + 0,0,0,807,805,1,0,0,0,808,809,5,6,0,0,809,129,1,0,0,0,810,813,3, + 132,66,0,811,813,3,136,68,0,812,810,1,0,0,0,812,811,1,0,0,0,813, + 131,1,0,0,0,814,815,5,80,0,0,815,816,5,2,0,0,816,817,3,134,67,0, + 817,133,1,0,0,0,818,819,7,3,0,0,819,135,1,0,0,0,820,821,5,83,0,0, + 821,822,5,2,0,0,822,823,3,138,69,0,823,137,1,0,0,0,824,825,5,84, + 0,0,825,139,1,0,0,0,826,827,5,86,0,0,827,828,5,2,0,0,828,829,5,5, + 0,0,829,834,3,142,71,0,830,831,5,1,0,0,831,833,3,142,71,0,832,830, + 1,0,0,0,833,836,1,0,0,0,834,832,1,0,0,0,834,835,1,0,0,0,835,837, + 1,0,0,0,836,834,1,0,0,0,837,838,5,6,0,0,838,141,1,0,0,0,839,844, + 3,6,3,0,840,844,3,16,8,0,841,844,3,8,4,0,842,844,3,128,64,0,843, + 839,1,0,0,0,843,840,1,0,0,0,843,841,1,0,0,0,843,842,1,0,0,0,844, + 143,1,0,0,0,845,846,5,87,0,0,846,847,5,2,0,0,847,848,3,80,40,0,848, + 145,1,0,0,0,849,850,5,102,0,0,850,851,5,2,0,0,851,852,5,5,0,0,852, + 857,3,148,74,0,853,854,5,1,0,0,854,856,3,148,74,0,855,853,1,0,0, + 0,856,859,1,0,0,0,857,855,1,0,0,0,857,858,1,0,0,0,858,860,1,0,0, + 0,859,857,1,0,0,0,860,861,5,6,0,0,861,147,1,0,0,0,862,867,3,26,13, + 0,863,867,3,150,75,0,864,867,3,54,27,0,865,867,3,90,45,0,866,862, + 1,0,0,0,866,863,1,0,0,0,866,864,1,0,0,0,866,865,1,0,0,0,867,149, + 1,0,0,0,868,869,5,103,0,0,869,870,5,2,0,0,870,871,5,5,0,0,871,876, + 3,152,76,0,872,873,5,1,0,0,873,875,3,152,76,0,874,872,1,0,0,0,875, + 878,1,0,0,0,876,874,1,0,0,0,876,877,1,0,0,0,877,879,1,0,0,0,878, + 876,1,0,0,0,879,880,5,6,0,0,880,151,1,0,0,0,881,886,3,154,77,0,882, + 886,3,156,78,0,883,886,3,158,79,0,884,886,3,160,80,0,885,881,1,0, + 0,0,885,882,1,0,0,0,885,883,1,0,0,0,885,884,1,0,0,0,886,153,1,0, + 0,0,887,888,5,104,0,0,888,889,5,2,0,0,889,890,3,228,114,0,890,155, + 1,0,0,0,891,892,5,105,0,0,892,893,5,2,0,0,893,894,3,228,114,0,894, + 157,1,0,0,0,895,896,5,106,0,0,896,897,5,2,0,0,897,898,5,3,0,0,898, + 903,3,228,114,0,899,900,5,1,0,0,900,902,3,228,114,0,901,899,1,0, + 0,0,902,905,1,0,0,0,903,901,1,0,0,0,903,904,1,0,0,0,904,906,1,0, + 0,0,905,903,1,0,0,0,906,907,5,4,0,0,907,159,1,0,0,0,908,909,5,107, + 0,0,909,910,5,2,0,0,910,918,3,226,113,0,911,912,5,107,0,0,912,913, + 5,2,0,0,913,918,5,160,0,0,914,915,5,108,0,0,915,916,5,2,0,0,916, + 918,3,212,106,0,917,908,1,0,0,0,917,911,1,0,0,0,917,914,1,0,0,0, + 918,161,1,0,0,0,919,920,5,109,0,0,920,921,5,2,0,0,921,929,3,226, + 113,0,922,923,5,109,0,0,923,924,5,2,0,0,924,929,5,160,0,0,925,926, + 5,110,0,0,926,927,5,2,0,0,927,929,3,212,106,0,928,919,1,0,0,0,928, + 922,1,0,0,0,928,925,1,0,0,0,929,163,1,0,0,0,930,931,5,111,0,0,931, + 932,5,2,0,0,932,940,3,226,113,0,933,934,5,111,0,0,934,935,5,2,0, + 0,935,940,5,161,0,0,936,937,5,112,0,0,937,938,5,2,0,0,938,940,3, + 212,106,0,939,930,1,0,0,0,939,933,1,0,0,0,939,936,1,0,0,0,940,165, + 1,0,0,0,941,942,5,113,0,0,942,943,5,2,0,0,943,944,3,228,114,0,944, + 167,1,0,0,0,945,946,5,114,0,0,946,947,5,2,0,0,947,948,5,5,0,0,948, + 953,3,170,85,0,949,950,5,1,0,0,950,952,3,170,85,0,951,949,1,0,0, + 0,952,955,1,0,0,0,953,951,1,0,0,0,953,954,1,0,0,0,954,956,1,0,0, + 0,955,953,1,0,0,0,956,957,5,6,0,0,957,169,1,0,0,0,958,961,3,26,13, + 0,959,961,3,54,27,0,960,958,1,0,0,0,960,959,1,0,0,0,961,171,1,0, + 0,0,962,963,5,121,0,0,963,964,5,2,0,0,964,973,5,3,0,0,965,970,3, + 174,87,0,966,967,5,1,0,0,967,969,3,174,87,0,968,966,1,0,0,0,969, + 972,1,0,0,0,970,968,1,0,0,0,970,971,1,0,0,0,971,974,1,0,0,0,972, + 970,1,0,0,0,973,965,1,0,0,0,973,974,1,0,0,0,974,975,1,0,0,0,975, + 976,5,4,0,0,976,173,1,0,0,0,977,978,5,5,0,0,978,983,3,176,88,0,979, + 980,5,1,0,0,980,982,3,176,88,0,981,979,1,0,0,0,982,985,1,0,0,0,983, + 981,1,0,0,0,983,984,1,0,0,0,984,986,1,0,0,0,985,983,1,0,0,0,986, + 987,5,6,0,0,987,175,1,0,0,0,988,996,3,178,89,0,989,996,3,180,90, + 0,990,996,3,182,91,0,991,996,3,184,92,0,992,996,3,186,93,0,993,996, + 3,188,94,0,994,996,3,8,4,0,995,988,1,0,0,0,995,989,1,0,0,0,995,990, + 1,0,0,0,995,991,1,0,0,0,995,992,1,0,0,0,995,993,1,0,0,0,995,994, + 1,0,0,0,996,177,1,0,0,0,997,998,5,122,0,0,998,999,5,2,0,0,999,1000, + 5,3,0,0,1000,1005,3,202,101,0,1001,1002,5,1,0,0,1002,1004,3,202, + 101,0,1003,1001,1,0,0,0,1004,1007,1,0,0,0,1005,1003,1,0,0,0,1005, + 1006,1,0,0,0,1006,1008,1,0,0,0,1007,1005,1,0,0,0,1008,1009,5,4,0, + 0,1009,179,1,0,0,0,1010,1011,5,123,0,0,1011,1012,5,2,0,0,1012,1013, + 5,160,0,0,1013,181,1,0,0,0,1014,1015,5,124,0,0,1015,1016,5,2,0,0, + 1016,1017,5,160,0,0,1017,183,1,0,0,0,1018,1019,5,125,0,0,1019,1020, + 5,2,0,0,1020,1021,7,4,0,0,1021,185,1,0,0,0,1022,1023,5,126,0,0,1023, + 1024,5,2,0,0,1024,1025,5,160,0,0,1025,187,1,0,0,0,1026,1027,5,127, + 0,0,1027,1028,5,2,0,0,1028,1029,7,5,0,0,1029,189,1,0,0,0,1030,1031, + 5,130,0,0,1031,1032,5,2,0,0,1032,1041,5,3,0,0,1033,1038,3,192,96, + 0,1034,1035,5,1,0,0,1035,1037,3,192,96,0,1036,1034,1,0,0,0,1037, + 1040,1,0,0,0,1038,1036,1,0,0,0,1038,1039,1,0,0,0,1039,1042,1,0,0, + 0,1040,1038,1,0,0,0,1041,1033,1,0,0,0,1041,1042,1,0,0,0,1042,1043, + 1,0,0,0,1043,1044,5,4,0,0,1044,191,1,0,0,0,1045,1046,5,5,0,0,1046, + 1051,3,194,97,0,1047,1048,5,1,0,0,1048,1050,3,194,97,0,1049,1047, + 1,0,0,0,1050,1053,1,0,0,0,1051,1049,1,0,0,0,1051,1052,1,0,0,0,1052, + 1054,1,0,0,0,1053,1051,1,0,0,0,1054,1055,5,6,0,0,1055,193,1,0,0, + 0,1056,1063,3,178,89,0,1057,1063,3,32,16,0,1058,1063,3,24,12,0,1059, + 1063,3,74,37,0,1060,1063,3,92,46,0,1061,1063,3,8,4,0,1062,1056,1, + 0,0,0,1062,1057,1,0,0,0,1062,1058,1,0,0,0,1062,1059,1,0,0,0,1062, + 1060,1,0,0,0,1062,1061,1,0,0,0,1063,195,1,0,0,0,1064,1065,7,6,0, + 0,1065,197,1,0,0,0,1066,1067,7,7,0,0,1067,199,1,0,0,0,1068,1069, + 7,8,0,0,1069,201,1,0,0,0,1070,1073,3,200,100,0,1071,1073,3,228,114, + 0,1072,1070,1,0,0,0,1072,1071,1,0,0,0,1073,203,1,0,0,0,1074,1075, + 5,5,0,0,1075,1080,3,206,103,0,1076,1077,5,1,0,0,1077,1079,3,206, + 103,0,1078,1076,1,0,0,0,1079,1082,1,0,0,0,1080,1078,1,0,0,0,1080, + 1081,1,0,0,0,1081,1083,1,0,0,0,1082,1080,1,0,0,0,1083,1084,5,6,0, + 0,1084,1088,1,0,0,0,1085,1086,5,5,0,0,1086,1088,5,6,0,0,1087,1074, + 1,0,0,0,1087,1085,1,0,0,0,1088,205,1,0,0,0,1089,1090,3,228,114,0, + 1090,1091,5,2,0,0,1091,1092,3,210,105,0,1092,207,1,0,0,0,1093,1094, + 5,3,0,0,1094,1099,3,210,105,0,1095,1096,5,1,0,0,1096,1098,3,210, + 105,0,1097,1095,1,0,0,0,1098,1101,1,0,0,0,1099,1097,1,0,0,0,1099, + 1100,1,0,0,0,1100,1102,1,0,0,0,1101,1099,1,0,0,0,1102,1103,5,4,0, + 0,1103,1107,1,0,0,0,1104,1105,5,3,0,0,1105,1107,5,4,0,0,1106,1093, + 1,0,0,0,1106,1104,1,0,0,0,1107,209,1,0,0,0,1108,1118,5,161,0,0,1109, + 1118,5,160,0,0,1110,1118,5,7,0,0,1111,1118,5,8,0,0,1112,1118,5,9, + 0,0,1113,1118,3,206,103,0,1114,1118,3,208,104,0,1115,1118,3,204, + 102,0,1116,1118,3,228,114,0,1117,1108,1,0,0,0,1117,1109,1,0,0,0, + 1117,1110,1,0,0,0,1117,1111,1,0,0,0,1117,1112,1,0,0,0,1117,1113, + 1,0,0,0,1117,1114,1,0,0,0,1117,1115,1,0,0,0,1117,1116,1,0,0,0,1118, + 211,1,0,0,0,1119,1123,3,218,109,0,1120,1123,3,220,110,0,1121,1123, + 3,222,111,0,1122,1119,1,0,0,0,1122,1120,1,0,0,0,1122,1121,1,0,0, + 0,1123,213,1,0,0,0,1124,1127,3,212,106,0,1125,1127,3,224,112,0,1126, + 1124,1,0,0,0,1126,1125,1,0,0,0,1127,215,1,0,0,0,1128,1131,3,214, + 107,0,1129,1131,3,226,113,0,1130,1128,1,0,0,0,1130,1129,1,0,0,0, + 1131,217,1,0,0,0,1132,1133,5,155,0,0,1133,219,1,0,0,0,1134,1135, + 5,154,0,0,1135,221,1,0,0,0,1136,1137,5,156,0,0,1137,223,1,0,0,0, + 1138,1139,5,157,0,0,1139,225,1,0,0,0,1140,1141,5,158,0,0,1141,227, + 1,0,0,0,1142,1150,5,159,0,0,1143,1150,5,153,0,0,1144,1150,3,230, + 115,0,1145,1150,3,196,98,0,1146,1150,3,198,99,0,1147,1150,3,200, + 100,0,1148,1150,3,216,108,0,1149,1142,1,0,0,0,1149,1143,1,0,0,0, + 1149,1144,1,0,0,0,1149,1145,1,0,0,0,1149,1146,1,0,0,0,1149,1147, + 1,0,0,0,1149,1148,1,0,0,0,1150,229,1,0,0,0,1151,1152,7,9,0,0,1152, + 231,1,0,0,0,89,241,252,308,318,333,354,364,370,384,389,395,400,411, + 417,422,430,445,461,466,477,488,496,503,512,520,527,532,539,553, + 558,570,575,584,589,599,604,612,620,634,639,648,658,663,671,687, + 698,708,713,721,727,738,743,763,773,786,795,805,812,834,843,857, + 866,876,885,903,917,928,939,953,960,970,973,983,995,1005,1038,1041, + 1051,1062,1072,1080,1087,1099,1106,1117,1122,1126,1130,1149 + ] + +class ASLParser ( Parser ): + + grammarFileName = "ASLParser.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ "", "','", "':'", "'['", "']'", "'{'", "'}'", + "'true'", "'false'", "'null'", "'\"Comment\"'", "'\"States\"'", + "'\"StartAt\"'", "'\"NextState\"'", "'\"Version\"'", + "'\"Type\"'", "'\"Task\"'", "'\"Choice\"'", "'\"Fail\"'", + "'\"Succeed\"'", "'\"Pass\"'", "'\"Wait\"'", "'\"Parallel\"'", + "'\"Map\"'", "'\"Choices\"'", "'\"Condition\"'", "'\"Variable\"'", + "'\"Default\"'", "'\"Branches\"'", "'\"And\"'", "'\"BooleanEquals\"'", + "'\"BooleanEqualsPath\"'", "'\"IsBoolean\"'", "'\"IsNull\"'", + "'\"IsNumeric\"'", "'\"IsPresent\"'", "'\"IsString\"'", + "'\"IsTimestamp\"'", "'\"Not\"'", "'\"NumericEquals\"'", + "'\"NumericEqualsPath\"'", "'\"NumericGreaterThan\"'", + "'\"NumericGreaterThanPath\"'", "'\"NumericGreaterThanEquals\"'", + "'\"NumericGreaterThanEqualsPath\"'", "'\"NumericLessThan\"'", + "'\"NumericLessThanPath\"'", "'\"NumericLessThanEquals\"'", + "'\"NumericLessThanEqualsPath\"'", "'\"Or\"'", "'\"StringEquals\"'", + "'\"StringEqualsPath\"'", "'\"StringGreaterThan\"'", + "'\"StringGreaterThanPath\"'", "'\"StringGreaterThanEquals\"'", + "'\"StringGreaterThanEqualsPath\"'", "'\"StringLessThan\"'", + "'\"StringLessThanPath\"'", "'\"StringLessThanEquals\"'", + "'\"StringLessThanEqualsPath\"'", "'\"StringMatches\"'", + "'\"TimestampEquals\"'", "'\"TimestampEqualsPath\"'", + "'\"TimestampGreaterThan\"'", "'\"TimestampGreaterThanPath\"'", + "'\"TimestampGreaterThanEquals\"'", "'\"TimestampGreaterThanEqualsPath\"'", + "'\"TimestampLessThan\"'", "'\"TimestampLessThanPath\"'", + "'\"TimestampLessThanEquals\"'", "'\"TimestampLessThanEqualsPath\"'", + "'\"SecondsPath\"'", "'\"Seconds\"'", "'\"TimestampPath\"'", + "'\"Timestamp\"'", "'\"TimeoutSeconds\"'", "'\"TimeoutSecondsPath\"'", + "'\"HeartbeatSeconds\"'", "'\"HeartbeatSecondsPath\"'", + "'\"ProcessorConfig\"'", "'\"Mode\"'", "'\"INLINE\"'", + "'\"DISTRIBUTED\"'", "'\"ExecutionType\"'", "'\"STANDARD\"'", + "'\"ItemProcessor\"'", "'\"Iterator\"'", "'\"ItemSelector\"'", + "'\"MaxConcurrencyPath\"'", "'\"MaxConcurrency\"'", + "'\"Resource\"'", "'\"InputPath\"'", "'\"OutputPath\"'", + "'\"Items\"'", "'\"ItemsPath\"'", "'\"ResultPath\"'", + "'\"Result\"'", "'\"Parameters\"'", "'\"Credentials\"'", + "'\"RoleArn\"'", "'\"RoleArn.$\"'", "'\"ResultSelector\"'", + "'\"ItemReader\"'", "'\"ReaderConfig\"'", "'\"InputType\"'", + "'\"CSVHeaderLocation\"'", "'\"CSVHeaders\"'", "'\"MaxItems\"'", + "'\"MaxItemsPath\"'", "'\"ToleratedFailureCount\"'", + "'\"ToleratedFailureCountPath\"'", "'\"ToleratedFailurePercentage\"'", + "'\"ToleratedFailurePercentagePath\"'", "'\"Label\"'", + "'\"ResultWriter\"'", "'\"Next\"'", "'\"End\"'", "'\"Cause\"'", + "'\"CausePath\"'", "'\"Error\"'", "'\"ErrorPath\"'", + "'\"Retry\"'", "'\"ErrorEquals\"'", "'\"IntervalSeconds\"'", + "'\"MaxAttempts\"'", "'\"BackoffRate\"'", "'\"MaxDelaySeconds\"'", + "'\"JitterStrategy\"'", "'\"FULL\"'", "'\"NONE\"'", + "'\"Catch\"'", "'\"QueryLanguage\"'", "'\"JSONPath\"'", + "'\"JSONata\"'", "'\"Assign\"'", "'\"Output\"'", "'\"Arguments\"'", + "'\"States.ALL\"'", "'\"States.DataLimitExceeded\"'", + "'\"States.HeartbeatTimeout\"'", "'\"States.Timeout\"'", + "'\"States.TaskFailed\"'", "'\"States.Permissions\"'", + "'\"States.ResultPathMatchFailure\"'", "'\"States.ParameterPathFailure\"'", + "'\"States.BranchFailed\"'", "'\"States.NoChoiceMatched\"'", + "'\"States.IntrinsicFailure\"'", "'\"States.ExceedToleratedFailureThreshold\"'", + "'\"States.ItemReaderFailed\"'", "'\"States.ResultWriterFailed\"'", + "'\"States.QueryEvaluationError\"'", "'\"States.Runtime\"'" ] + + symbolicNames = [ "", "COMMA", "COLON", "LBRACK", "RBRACK", + "LBRACE", "RBRACE", "TRUE", "FALSE", "NULL", "COMMENT", + "STATES", "STARTAT", "NEXTSTATE", "VERSION", "TYPE", + "TASK", "CHOICE", "FAIL", "SUCCEED", "PASS", "WAIT", + "PARALLEL", "MAP", "CHOICES", "CONDITION", "VARIABLE", + "DEFAULT", "BRANCHES", "AND", "BOOLEANEQUALS", "BOOLEANQUALSPATH", + "ISBOOLEAN", "ISNULL", "ISNUMERIC", "ISPRESENT", "ISSTRING", + "ISTIMESTAMP", "NOT", "NUMERICEQUALS", "NUMERICEQUALSPATH", + "NUMERICGREATERTHAN", "NUMERICGREATERTHANPATH", "NUMERICGREATERTHANEQUALS", + "NUMERICGREATERTHANEQUALSPATH", "NUMERICLESSTHAN", + "NUMERICLESSTHANPATH", "NUMERICLESSTHANEQUALS", "NUMERICLESSTHANEQUALSPATH", + "OR", "STRINGEQUALS", "STRINGEQUALSPATH", "STRINGGREATERTHAN", + "STRINGGREATERTHANPATH", "STRINGGREATERTHANEQUALS", + "STRINGGREATERTHANEQUALSPATH", "STRINGLESSTHAN", "STRINGLESSTHANPATH", + "STRINGLESSTHANEQUALS", "STRINGLESSTHANEQUALSPATH", + "STRINGMATCHES", "TIMESTAMPEQUALS", "TIMESTAMPEQUALSPATH", + "TIMESTAMPGREATERTHAN", "TIMESTAMPGREATERTHANPATH", + "TIMESTAMPGREATERTHANEQUALS", "TIMESTAMPGREATERTHANEQUALSPATH", + "TIMESTAMPLESSTHAN", "TIMESTAMPLESSTHANPATH", "TIMESTAMPLESSTHANEQUALS", + "TIMESTAMPLESSTHANEQUALSPATH", "SECONDSPATH", "SECONDS", + "TIMESTAMPPATH", "TIMESTAMP", "TIMEOUTSECONDS", "TIMEOUTSECONDSPATH", + "HEARTBEATSECONDS", "HEARTBEATSECONDSPATH", "PROCESSORCONFIG", + "MODE", "INLINE", "DISTRIBUTED", "EXECUTIONTYPE", + "STANDARD", "ITEMPROCESSOR", "ITERATOR", "ITEMSELECTOR", + "MAXCONCURRENCYPATH", "MAXCONCURRENCY", "RESOURCE", + "INPUTPATH", "OUTPUTPATH", "ITEMS", "ITEMSPATH", "RESULTPATH", + "RESULT", "PARAMETERS", "CREDENTIALS", "ROLEARN", + "ROLEARNPATH", "RESULTSELECTOR", "ITEMREADER", "READERCONFIG", + "INPUTTYPE", "CSVHEADERLOCATION", "CSVHEADERS", "MAXITEMS", + "MAXITEMSPATH", "TOLERATEDFAILURECOUNT", "TOLERATEDFAILURECOUNTPATH", + "TOLERATEDFAILUREPERCENTAGE", "TOLERATEDFAILUREPERCENTAGEPATH", + "LABEL", "RESULTWRITER", "NEXT", "END", "CAUSE", "CAUSEPATH", + "ERROR", "ERRORPATH", "RETRY", "ERROREQUALS", "INTERVALSECONDS", + "MAXATTEMPTS", "BACKOFFRATE", "MAXDELAYSECONDS", "JITTERSTRATEGY", + "FULL", "NONE", "CATCH", "QUERYLANGUAGE", "JSONPATH", + "JSONATA", "ASSIGN", "OUTPUT", "ARGUMENTS", "ERRORNAMEStatesALL", + "ERRORNAMEStatesDataLimitExceeded", "ERRORNAMEStatesHeartbeatTimeout", + "ERRORNAMEStatesTimeout", "ERRORNAMEStatesTaskFailed", + "ERRORNAMEStatesPermissions", "ERRORNAMEStatesResultPathMatchFailure", + "ERRORNAMEStatesParameterPathFailure", "ERRORNAMEStatesBranchFailed", + "ERRORNAMEStatesNoChoiceMatched", "ERRORNAMEStatesIntrinsicFailure", + "ERRORNAMEStatesExceedToleratedFailureThreshold", + "ERRORNAMEStatesItemReaderFailed", "ERRORNAMEStatesResultWriterFailed", + "ERRORNAMEStatesQueryEvaluationError", "ERRORNAMEStatesRuntime", + "STRINGDOLLAR", "STRINGPATHCONTEXTOBJ", "STRINGPATH", + "STRINGVAR", "STRINGINTRINSICFUNC", "STRINGJSONATA", + "STRING", "INT", "NUMBER", "WS" ] + + RULE_state_machine = 0 + RULE_program_decl = 1 + RULE_top_layer_stmt = 2 + RULE_startat_decl = 3 + RULE_comment_decl = 4 + RULE_version_decl = 5 + RULE_query_language_decl = 6 + RULE_state_stmt = 7 + RULE_states_decl = 8 + RULE_state_decl = 9 + RULE_state_decl_body = 10 + RULE_type_decl = 11 + RULE_next_decl = 12 + RULE_resource_decl = 13 + RULE_input_path_decl = 14 + RULE_result_decl = 15 + RULE_result_path_decl = 16 + RULE_output_path_decl = 17 + RULE_end_decl = 18 + RULE_default_decl = 19 + RULE_error_decl = 20 + RULE_cause_decl = 21 + RULE_seconds_decl = 22 + RULE_timestamp_decl = 23 + RULE_items_decl = 24 + RULE_items_path_decl = 25 + RULE_max_concurrency_decl = 26 + RULE_parameters_decl = 27 + RULE_credentials_decl = 28 + RULE_role_arn_decl = 29 + RULE_timeout_seconds_decl = 30 + RULE_heartbeat_seconds_decl = 31 + RULE_payload_tmpl_decl = 32 + RULE_payload_binding = 33 + RULE_payload_arr_decl = 34 + RULE_payload_value_decl = 35 + RULE_payload_value_lit = 36 + RULE_assign_decl = 37 + RULE_assign_decl_body = 38 + RULE_assign_decl_binding = 39 + RULE_assign_template_value_object = 40 + RULE_assign_template_binding = 41 + RULE_assign_template_value = 42 + RULE_assign_template_value_array = 43 + RULE_assign_template_value_terminal = 44 + RULE_arguments_decl = 45 + RULE_output_decl = 46 + RULE_jsonata_template_value_object = 47 + RULE_jsonata_template_binding = 48 + RULE_jsonata_template_value = 49 + RULE_jsonata_template_value_array = 50 + RULE_jsonata_template_value_terminal = 51 + RULE_result_selector_decl = 52 + RULE_state_type = 53 + RULE_choices_decl = 54 + RULE_choice_rule = 55 + RULE_comparison_variable_stmt = 56 + RULE_comparison_composite_stmt = 57 + RULE_comparison_composite = 58 + RULE_variable_decl = 59 + RULE_comparison_func = 60 + RULE_branches_decl = 61 + RULE_item_processor_decl = 62 + RULE_item_processor_item = 63 + RULE_processor_config_decl = 64 + RULE_processor_config_field = 65 + RULE_mode_decl = 66 + RULE_mode_type = 67 + RULE_execution_decl = 68 + RULE_execution_type = 69 + RULE_iterator_decl = 70 + RULE_iterator_decl_item = 71 + RULE_item_selector_decl = 72 + RULE_item_reader_decl = 73 + RULE_items_reader_field = 74 + RULE_reader_config_decl = 75 + RULE_reader_config_field = 76 + RULE_input_type_decl = 77 + RULE_csv_header_location_decl = 78 + RULE_csv_headers_decl = 79 + RULE_max_items_decl = 80 + RULE_tolerated_failure_count_decl = 81 + RULE_tolerated_failure_percentage_decl = 82 + RULE_label_decl = 83 + RULE_result_writer_decl = 84 + RULE_result_writer_field = 85 + RULE_retry_decl = 86 + RULE_retrier_decl = 87 + RULE_retrier_stmt = 88 + RULE_error_equals_decl = 89 + RULE_interval_seconds_decl = 90 + RULE_max_attempts_decl = 91 + RULE_backoff_rate_decl = 92 + RULE_max_delay_seconds_decl = 93 + RULE_jitter_strategy_decl = 94 + RULE_catch_decl = 95 + RULE_catcher_decl = 96 + RULE_catcher_stmt = 97 + RULE_comparison_op = 98 + RULE_choice_operator = 99 + RULE_states_error_name = 100 + RULE_error_name = 101 + RULE_json_obj_decl = 102 + RULE_json_binding = 103 + RULE_json_arr_decl = 104 + RULE_json_value_decl = 105 + RULE_string_sampler = 106 + RULE_string_expression_simple = 107 + RULE_string_expression = 108 + RULE_string_jsonpath = 109 + RULE_string_context_path = 110 + RULE_string_variable_sample = 111 + RULE_string_intrinsic_function = 112 + RULE_string_jsonata = 113 + RULE_string_literal = 114 + RULE_soft_string_keyword = 115 + + ruleNames = [ "state_machine", "program_decl", "top_layer_stmt", "startat_decl", + "comment_decl", "version_decl", "query_language_decl", + "state_stmt", "states_decl", "state_decl", "state_decl_body", + "type_decl", "next_decl", "resource_decl", "input_path_decl", + "result_decl", "result_path_decl", "output_path_decl", + "end_decl", "default_decl", "error_decl", "cause_decl", + "seconds_decl", "timestamp_decl", "items_decl", "items_path_decl", + "max_concurrency_decl", "parameters_decl", "credentials_decl", + "role_arn_decl", "timeout_seconds_decl", "heartbeat_seconds_decl", + "payload_tmpl_decl", "payload_binding", "payload_arr_decl", + "payload_value_decl", "payload_value_lit", "assign_decl", + "assign_decl_body", "assign_decl_binding", "assign_template_value_object", + "assign_template_binding", "assign_template_value", "assign_template_value_array", + "assign_template_value_terminal", "arguments_decl", "output_decl", + "jsonata_template_value_object", "jsonata_template_binding", + "jsonata_template_value", "jsonata_template_value_array", + "jsonata_template_value_terminal", "result_selector_decl", + "state_type", "choices_decl", "choice_rule", "comparison_variable_stmt", + "comparison_composite_stmt", "comparison_composite", + "variable_decl", "comparison_func", "branches_decl", + "item_processor_decl", "item_processor_item", "processor_config_decl", + "processor_config_field", "mode_decl", "mode_type", "execution_decl", + "execution_type", "iterator_decl", "iterator_decl_item", + "item_selector_decl", "item_reader_decl", "items_reader_field", + "reader_config_decl", "reader_config_field", "input_type_decl", + "csv_header_location_decl", "csv_headers_decl", "max_items_decl", + "tolerated_failure_count_decl", "tolerated_failure_percentage_decl", + "label_decl", "result_writer_decl", "result_writer_field", + "retry_decl", "retrier_decl", "retrier_stmt", "error_equals_decl", + "interval_seconds_decl", "max_attempts_decl", "backoff_rate_decl", + "max_delay_seconds_decl", "jitter_strategy_decl", "catch_decl", + "catcher_decl", "catcher_stmt", "comparison_op", "choice_operator", + "states_error_name", "error_name", "json_obj_decl", "json_binding", + "json_arr_decl", "json_value_decl", "string_sampler", + "string_expression_simple", "string_expression", "string_jsonpath", + "string_context_path", "string_variable_sample", "string_intrinsic_function", + "string_jsonata", "string_literal", "soft_string_keyword" ] + + EOF = Token.EOF + COMMA=1 + COLON=2 + LBRACK=3 + RBRACK=4 + LBRACE=5 + RBRACE=6 + TRUE=7 + FALSE=8 + NULL=9 + COMMENT=10 + STATES=11 + STARTAT=12 + NEXTSTATE=13 + VERSION=14 + TYPE=15 + TASK=16 + CHOICE=17 + FAIL=18 + SUCCEED=19 + PASS=20 + WAIT=21 + PARALLEL=22 + MAP=23 + CHOICES=24 + CONDITION=25 + VARIABLE=26 + DEFAULT=27 + BRANCHES=28 + AND=29 + BOOLEANEQUALS=30 + BOOLEANQUALSPATH=31 + ISBOOLEAN=32 + ISNULL=33 + ISNUMERIC=34 + ISPRESENT=35 + ISSTRING=36 + ISTIMESTAMP=37 + NOT=38 + NUMERICEQUALS=39 + NUMERICEQUALSPATH=40 + NUMERICGREATERTHAN=41 + NUMERICGREATERTHANPATH=42 + NUMERICGREATERTHANEQUALS=43 + NUMERICGREATERTHANEQUALSPATH=44 + NUMERICLESSTHAN=45 + NUMERICLESSTHANPATH=46 + NUMERICLESSTHANEQUALS=47 + NUMERICLESSTHANEQUALSPATH=48 + OR=49 + STRINGEQUALS=50 + STRINGEQUALSPATH=51 + STRINGGREATERTHAN=52 + STRINGGREATERTHANPATH=53 + STRINGGREATERTHANEQUALS=54 + STRINGGREATERTHANEQUALSPATH=55 + STRINGLESSTHAN=56 + STRINGLESSTHANPATH=57 + STRINGLESSTHANEQUALS=58 + STRINGLESSTHANEQUALSPATH=59 + STRINGMATCHES=60 + TIMESTAMPEQUALS=61 + TIMESTAMPEQUALSPATH=62 + TIMESTAMPGREATERTHAN=63 + TIMESTAMPGREATERTHANPATH=64 + TIMESTAMPGREATERTHANEQUALS=65 + TIMESTAMPGREATERTHANEQUALSPATH=66 + TIMESTAMPLESSTHAN=67 + TIMESTAMPLESSTHANPATH=68 + TIMESTAMPLESSTHANEQUALS=69 + TIMESTAMPLESSTHANEQUALSPATH=70 + SECONDSPATH=71 + SECONDS=72 + TIMESTAMPPATH=73 + TIMESTAMP=74 + TIMEOUTSECONDS=75 + TIMEOUTSECONDSPATH=76 + HEARTBEATSECONDS=77 + HEARTBEATSECONDSPATH=78 + PROCESSORCONFIG=79 + MODE=80 + INLINE=81 + DISTRIBUTED=82 + EXECUTIONTYPE=83 + STANDARD=84 + ITEMPROCESSOR=85 + ITERATOR=86 + ITEMSELECTOR=87 + MAXCONCURRENCYPATH=88 + MAXCONCURRENCY=89 + RESOURCE=90 + INPUTPATH=91 + OUTPUTPATH=92 + ITEMS=93 + ITEMSPATH=94 + RESULTPATH=95 + RESULT=96 + PARAMETERS=97 + CREDENTIALS=98 + ROLEARN=99 + ROLEARNPATH=100 + RESULTSELECTOR=101 + ITEMREADER=102 + READERCONFIG=103 + INPUTTYPE=104 + CSVHEADERLOCATION=105 + CSVHEADERS=106 + MAXITEMS=107 + MAXITEMSPATH=108 + TOLERATEDFAILURECOUNT=109 + TOLERATEDFAILURECOUNTPATH=110 + TOLERATEDFAILUREPERCENTAGE=111 + TOLERATEDFAILUREPERCENTAGEPATH=112 + LABEL=113 + RESULTWRITER=114 + NEXT=115 + END=116 + CAUSE=117 + CAUSEPATH=118 + ERROR=119 + ERRORPATH=120 + RETRY=121 + ERROREQUALS=122 + INTERVALSECONDS=123 + MAXATTEMPTS=124 + BACKOFFRATE=125 + MAXDELAYSECONDS=126 + JITTERSTRATEGY=127 + FULL=128 + NONE=129 + CATCH=130 + QUERYLANGUAGE=131 + JSONPATH=132 + JSONATA=133 + ASSIGN=134 + OUTPUT=135 + ARGUMENTS=136 + ERRORNAMEStatesALL=137 + ERRORNAMEStatesDataLimitExceeded=138 + ERRORNAMEStatesHeartbeatTimeout=139 + ERRORNAMEStatesTimeout=140 + ERRORNAMEStatesTaskFailed=141 + ERRORNAMEStatesPermissions=142 + ERRORNAMEStatesResultPathMatchFailure=143 + ERRORNAMEStatesParameterPathFailure=144 + ERRORNAMEStatesBranchFailed=145 + ERRORNAMEStatesNoChoiceMatched=146 + ERRORNAMEStatesIntrinsicFailure=147 + ERRORNAMEStatesExceedToleratedFailureThreshold=148 + ERRORNAMEStatesItemReaderFailed=149 + ERRORNAMEStatesResultWriterFailed=150 + ERRORNAMEStatesQueryEvaluationError=151 + ERRORNAMEStatesRuntime=152 + STRINGDOLLAR=153 + STRINGPATHCONTEXTOBJ=154 + STRINGPATH=155 + STRINGVAR=156 + STRINGINTRINSICFUNC=157 + STRINGJSONATA=158 + STRING=159 + INT=160 + NUMBER=161 + WS=162 + + def __init__(self, input:TokenStream, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.13.2") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + + class State_machineContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def program_decl(self): + return self.getTypedRuleContext(ASLParser.Program_declContext,0) + + + def EOF(self): + return self.getToken(ASLParser.EOF, 0) + + def getRuleIndex(self): + return ASLParser.RULE_state_machine + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_machine" ): + listener.enterState_machine(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_machine" ): + listener.exitState_machine(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_machine" ): + return visitor.visitState_machine(self) + else: + return visitor.visitChildren(self) + + + + + def state_machine(self): + + localctx = ASLParser.State_machineContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_state_machine) + try: + self.enterOuterAlt(localctx, 1) + self.state = 232 + self.program_decl() + self.state = 233 + self.match(ASLParser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Program_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def top_layer_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Top_layer_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.Top_layer_stmtContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_program_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterProgram_decl" ): + listener.enterProgram_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitProgram_decl" ): + listener.exitProgram_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitProgram_decl" ): + return visitor.visitProgram_decl(self) + else: + return visitor.visitChildren(self) + + + + + def program_decl(self): + + localctx = ASLParser.Program_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 2, self.RULE_program_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 235 + self.match(ASLParser.LBRACE) + self.state = 236 + self.top_layer_stmt() + self.state = 241 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 237 + self.match(ASLParser.COMMA) + self.state = 238 + self.top_layer_stmt() + self.state = 243 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 244 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Top_layer_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def version_decl(self): + return self.getTypedRuleContext(ASLParser.Version_declContext,0) + + + def query_language_decl(self): + return self.getTypedRuleContext(ASLParser.Query_language_declContext,0) + + + def startat_decl(self): + return self.getTypedRuleContext(ASLParser.Startat_declContext,0) + + + def states_decl(self): + return self.getTypedRuleContext(ASLParser.States_declContext,0) + + + def timeout_seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Timeout_seconds_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_top_layer_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTop_layer_stmt" ): + listener.enterTop_layer_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTop_layer_stmt" ): + listener.exitTop_layer_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTop_layer_stmt" ): + return visitor.visitTop_layer_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def top_layer_stmt(self): + + localctx = ASLParser.Top_layer_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_top_layer_stmt) + try: + self.state = 252 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [10]: + self.enterOuterAlt(localctx, 1) + self.state = 246 + self.comment_decl() + pass + elif token in [14]: + self.enterOuterAlt(localctx, 2) + self.state = 247 + self.version_decl() + pass + elif token in [131]: + self.enterOuterAlt(localctx, 3) + self.state = 248 + self.query_language_decl() + pass + elif token in [12]: + self.enterOuterAlt(localctx, 4) + self.state = 249 + self.startat_decl() + pass + elif token in [11]: + self.enterOuterAlt(localctx, 5) + self.state = 250 + self.states_decl() + pass + elif token in [75, 76]: + self.enterOuterAlt(localctx, 6) + self.state = 251 + self.timeout_seconds_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Startat_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STARTAT(self): + return self.getToken(ASLParser.STARTAT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_startat_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterStartat_decl" ): + listener.enterStartat_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitStartat_decl" ): + listener.exitStartat_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitStartat_decl" ): + return visitor.visitStartat_decl(self) + else: + return visitor.visitChildren(self) + + + + + def startat_decl(self): + + localctx = ASLParser.Startat_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_startat_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 254 + self.match(ASLParser.STARTAT) + self.state = 255 + self.match(ASLParser.COLON) + self.state = 256 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comment_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def COMMENT(self): + return self.getToken(ASLParser.COMMENT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_comment_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComment_decl" ): + listener.enterComment_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComment_decl" ): + listener.exitComment_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComment_decl" ): + return visitor.visitComment_decl(self) + else: + return visitor.visitChildren(self) + + + + + def comment_decl(self): + + localctx = ASLParser.Comment_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 8, self.RULE_comment_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 258 + self.match(ASLParser.COMMENT) + self.state = 259 + self.match(ASLParser.COLON) + self.state = 260 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Version_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def VERSION(self): + return self.getToken(ASLParser.VERSION, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_version_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterVersion_decl" ): + listener.enterVersion_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitVersion_decl" ): + listener.exitVersion_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitVersion_decl" ): + return visitor.visitVersion_decl(self) + else: + return visitor.visitChildren(self) + + + + + def version_decl(self): + + localctx = ASLParser.Version_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 10, self.RULE_version_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 262 + self.match(ASLParser.VERSION) + self.state = 263 + self.match(ASLParser.COLON) + self.state = 264 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Query_language_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def QUERYLANGUAGE(self): + return self.getToken(ASLParser.QUERYLANGUAGE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def JSONPATH(self): + return self.getToken(ASLParser.JSONPATH, 0) + + def JSONATA(self): + return self.getToken(ASLParser.JSONATA, 0) + + def getRuleIndex(self): + return ASLParser.RULE_query_language_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterQuery_language_decl" ): + listener.enterQuery_language_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitQuery_language_decl" ): + listener.exitQuery_language_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitQuery_language_decl" ): + return visitor.visitQuery_language_decl(self) + else: + return visitor.visitChildren(self) + + + + + def query_language_decl(self): + + localctx = ASLParser.Query_language_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 12, self.RULE_query_language_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 266 + self.match(ASLParser.QUERYLANGUAGE) + self.state = 267 + self.match(ASLParser.COLON) + self.state = 268 + _la = self._input.LA(1) + if not(_la==132 or _la==133): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class State_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def query_language_decl(self): + return self.getTypedRuleContext(ASLParser.Query_language_declContext,0) + + + def type_decl(self): + return self.getTypedRuleContext(ASLParser.Type_declContext,0) + + + def input_path_decl(self): + return self.getTypedRuleContext(ASLParser.Input_path_declContext,0) + + + def resource_decl(self): + return self.getTypedRuleContext(ASLParser.Resource_declContext,0) + + + def next_decl(self): + return self.getTypedRuleContext(ASLParser.Next_declContext,0) + + + def result_decl(self): + return self.getTypedRuleContext(ASLParser.Result_declContext,0) + + + def result_path_decl(self): + return self.getTypedRuleContext(ASLParser.Result_path_declContext,0) + + + def output_path_decl(self): + return self.getTypedRuleContext(ASLParser.Output_path_declContext,0) + + + def end_decl(self): + return self.getTypedRuleContext(ASLParser.End_declContext,0) + + + def default_decl(self): + return self.getTypedRuleContext(ASLParser.Default_declContext,0) + + + def choices_decl(self): + return self.getTypedRuleContext(ASLParser.Choices_declContext,0) + + + def error_decl(self): + return self.getTypedRuleContext(ASLParser.Error_declContext,0) + + + def cause_decl(self): + return self.getTypedRuleContext(ASLParser.Cause_declContext,0) + + + def seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Seconds_declContext,0) + + + def timestamp_decl(self): + return self.getTypedRuleContext(ASLParser.Timestamp_declContext,0) + + + def items_decl(self): + return self.getTypedRuleContext(ASLParser.Items_declContext,0) + + + def items_path_decl(self): + return self.getTypedRuleContext(ASLParser.Items_path_declContext,0) + + + def item_processor_decl(self): + return self.getTypedRuleContext(ASLParser.Item_processor_declContext,0) + + + def iterator_decl(self): + return self.getTypedRuleContext(ASLParser.Iterator_declContext,0) + + + def item_selector_decl(self): + return self.getTypedRuleContext(ASLParser.Item_selector_declContext,0) + + + def item_reader_decl(self): + return self.getTypedRuleContext(ASLParser.Item_reader_declContext,0) + + + def max_concurrency_decl(self): + return self.getTypedRuleContext(ASLParser.Max_concurrency_declContext,0) + + + def timeout_seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Timeout_seconds_declContext,0) + + + def heartbeat_seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Heartbeat_seconds_declContext,0) + + + def branches_decl(self): + return self.getTypedRuleContext(ASLParser.Branches_declContext,0) + + + def parameters_decl(self): + return self.getTypedRuleContext(ASLParser.Parameters_declContext,0) + + + def retry_decl(self): + return self.getTypedRuleContext(ASLParser.Retry_declContext,0) + + + def catch_decl(self): + return self.getTypedRuleContext(ASLParser.Catch_declContext,0) + + + def result_selector_decl(self): + return self.getTypedRuleContext(ASLParser.Result_selector_declContext,0) + + + def tolerated_failure_count_decl(self): + return self.getTypedRuleContext(ASLParser.Tolerated_failure_count_declContext,0) + + + def tolerated_failure_percentage_decl(self): + return self.getTypedRuleContext(ASLParser.Tolerated_failure_percentage_declContext,0) + + + def label_decl(self): + return self.getTypedRuleContext(ASLParser.Label_declContext,0) + + + def result_writer_decl(self): + return self.getTypedRuleContext(ASLParser.Result_writer_declContext,0) + + + def assign_decl(self): + return self.getTypedRuleContext(ASLParser.Assign_declContext,0) + + + def arguments_decl(self): + return self.getTypedRuleContext(ASLParser.Arguments_declContext,0) + + + def output_decl(self): + return self.getTypedRuleContext(ASLParser.Output_declContext,0) + + + def credentials_decl(self): + return self.getTypedRuleContext(ASLParser.Credentials_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_state_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_stmt" ): + listener.enterState_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_stmt" ): + listener.exitState_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_stmt" ): + return visitor.visitState_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def state_stmt(self): + + localctx = ASLParser.State_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 14, self.RULE_state_stmt) + try: + self.state = 308 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [10]: + self.enterOuterAlt(localctx, 1) + self.state = 270 + self.comment_decl() + pass + elif token in [131]: + self.enterOuterAlt(localctx, 2) + self.state = 271 + self.query_language_decl() + pass + elif token in [15]: + self.enterOuterAlt(localctx, 3) + self.state = 272 + self.type_decl() + pass + elif token in [91]: + self.enterOuterAlt(localctx, 4) + self.state = 273 + self.input_path_decl() + pass + elif token in [90]: + self.enterOuterAlt(localctx, 5) + self.state = 274 + self.resource_decl() + pass + elif token in [115]: + self.enterOuterAlt(localctx, 6) + self.state = 275 + self.next_decl() + pass + elif token in [96]: + self.enterOuterAlt(localctx, 7) + self.state = 276 + self.result_decl() + pass + elif token in [95]: + self.enterOuterAlt(localctx, 8) + self.state = 277 + self.result_path_decl() + pass + elif token in [92]: + self.enterOuterAlt(localctx, 9) + self.state = 278 + self.output_path_decl() + pass + elif token in [116]: + self.enterOuterAlt(localctx, 10) + self.state = 279 + self.end_decl() + pass + elif token in [27]: + self.enterOuterAlt(localctx, 11) + self.state = 280 + self.default_decl() + pass + elif token in [24]: + self.enterOuterAlt(localctx, 12) + self.state = 281 + self.choices_decl() + pass + elif token in [119, 120]: + self.enterOuterAlt(localctx, 13) + self.state = 282 + self.error_decl() + pass + elif token in [117, 118]: + self.enterOuterAlt(localctx, 14) + self.state = 283 + self.cause_decl() + pass + elif token in [71, 72]: + self.enterOuterAlt(localctx, 15) + self.state = 284 + self.seconds_decl() + pass + elif token in [73, 74]: + self.enterOuterAlt(localctx, 16) + self.state = 285 + self.timestamp_decl() + pass + elif token in [93]: + self.enterOuterAlt(localctx, 17) + self.state = 286 + self.items_decl() + pass + elif token in [94]: + self.enterOuterAlt(localctx, 18) + self.state = 287 + self.items_path_decl() + pass + elif token in [85]: + self.enterOuterAlt(localctx, 19) + self.state = 288 + self.item_processor_decl() + pass + elif token in [86]: + self.enterOuterAlt(localctx, 20) + self.state = 289 + self.iterator_decl() + pass + elif token in [87]: + self.enterOuterAlt(localctx, 21) + self.state = 290 + self.item_selector_decl() + pass + elif token in [102]: + self.enterOuterAlt(localctx, 22) + self.state = 291 + self.item_reader_decl() + pass + elif token in [88, 89]: + self.enterOuterAlt(localctx, 23) + self.state = 292 + self.max_concurrency_decl() + pass + elif token in [75, 76]: + self.enterOuterAlt(localctx, 24) + self.state = 293 + self.timeout_seconds_decl() + pass + elif token in [77, 78]: + self.enterOuterAlt(localctx, 25) + self.state = 294 + self.heartbeat_seconds_decl() + pass + elif token in [28]: + self.enterOuterAlt(localctx, 26) + self.state = 295 + self.branches_decl() + pass + elif token in [97]: + self.enterOuterAlt(localctx, 27) + self.state = 296 + self.parameters_decl() + pass + elif token in [121]: + self.enterOuterAlt(localctx, 28) + self.state = 297 + self.retry_decl() + pass + elif token in [130]: + self.enterOuterAlt(localctx, 29) + self.state = 298 + self.catch_decl() + pass + elif token in [101]: + self.enterOuterAlt(localctx, 30) + self.state = 299 + self.result_selector_decl() + pass + elif token in [109, 110]: + self.enterOuterAlt(localctx, 31) + self.state = 300 + self.tolerated_failure_count_decl() + pass + elif token in [111, 112]: + self.enterOuterAlt(localctx, 32) + self.state = 301 + self.tolerated_failure_percentage_decl() + pass + elif token in [113]: + self.enterOuterAlt(localctx, 33) + self.state = 302 + self.label_decl() + pass + elif token in [114]: + self.enterOuterAlt(localctx, 34) + self.state = 303 + self.result_writer_decl() + pass + elif token in [134]: + self.enterOuterAlt(localctx, 35) + self.state = 304 + self.assign_decl() + pass + elif token in [136]: + self.enterOuterAlt(localctx, 36) + self.state = 305 + self.arguments_decl() + pass + elif token in [135]: + self.enterOuterAlt(localctx, 37) + self.state = 306 + self.output_decl() + pass + elif token in [98]: + self.enterOuterAlt(localctx, 38) + self.state = 307 + self.credentials_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class States_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STATES(self): + return self.getToken(ASLParser.STATES, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def state_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.State_declContext) + else: + return self.getTypedRuleContext(ASLParser.State_declContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_states_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterStates_decl" ): + listener.enterStates_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitStates_decl" ): + listener.exitStates_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitStates_decl" ): + return visitor.visitStates_decl(self) + else: + return visitor.visitChildren(self) + + + + + def states_decl(self): + + localctx = ASLParser.States_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 16, self.RULE_states_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 310 + self.match(ASLParser.STATES) + self.state = 311 + self.match(ASLParser.COLON) + self.state = 312 + self.match(ASLParser.LBRACE) + self.state = 313 + self.state_decl() + self.state = 318 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 314 + self.match(ASLParser.COMMA) + self.state = 315 + self.state_decl() + self.state = 320 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 321 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class State_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def state_decl_body(self): + return self.getTypedRuleContext(ASLParser.State_decl_bodyContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_state_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_decl" ): + listener.enterState_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_decl" ): + listener.exitState_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_decl" ): + return visitor.visitState_decl(self) + else: + return visitor.visitChildren(self) + + + + + def state_decl(self): + + localctx = ASLParser.State_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 18, self.RULE_state_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 323 + self.string_literal() + self.state = 324 + self.match(ASLParser.COLON) + self.state = 325 + self.state_decl_body() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class State_decl_bodyContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def state_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.State_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.State_stmtContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_state_decl_body + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_decl_body" ): + listener.enterState_decl_body(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_decl_body" ): + listener.exitState_decl_body(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_decl_body" ): + return visitor.visitState_decl_body(self) + else: + return visitor.visitChildren(self) + + + + + def state_decl_body(self): + + localctx = ASLParser.State_decl_bodyContext(self, self._ctx, self.state) + self.enterRule(localctx, 20, self.RULE_state_decl_body) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 327 + self.match(ASLParser.LBRACE) + self.state = 328 + self.state_stmt() + self.state = 333 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 329 + self.match(ASLParser.COMMA) + self.state = 330 + self.state_stmt() + self.state = 335 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 336 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Type_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TYPE(self): + return self.getToken(ASLParser.TYPE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def state_type(self): + return self.getTypedRuleContext(ASLParser.State_typeContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_type_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterType_decl" ): + listener.enterType_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitType_decl" ): + listener.exitType_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitType_decl" ): + return visitor.visitType_decl(self) + else: + return visitor.visitChildren(self) + + + + + def type_decl(self): + + localctx = ASLParser.Type_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 22, self.RULE_type_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 338 + self.match(ASLParser.TYPE) + self.state = 339 + self.match(ASLParser.COLON) + self.state = 340 + self.state_type() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Next_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def NEXT(self): + return self.getToken(ASLParser.NEXT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_next_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterNext_decl" ): + listener.enterNext_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitNext_decl" ): + listener.exitNext_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitNext_decl" ): + return visitor.visitNext_decl(self) + else: + return visitor.visitChildren(self) + + + + + def next_decl(self): + + localctx = ASLParser.Next_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 24, self.RULE_next_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 342 + self.match(ASLParser.NEXT) + self.state = 343 + self.match(ASLParser.COLON) + self.state = 344 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Resource_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RESOURCE(self): + return self.getToken(ASLParser.RESOURCE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_resource_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResource_decl" ): + listener.enterResource_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResource_decl" ): + listener.exitResource_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResource_decl" ): + return visitor.visitResource_decl(self) + else: + return visitor.visitChildren(self) + + + + + def resource_decl(self): + + localctx = ASLParser.Resource_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 26, self.RULE_resource_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 346 + self.match(ASLParser.RESOURCE) + self.state = 347 + self.match(ASLParser.COLON) + self.state = 348 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Input_path_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INPUTPATH(self): + return self.getToken(ASLParser.INPUTPATH, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_input_path_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInput_path_decl" ): + listener.enterInput_path_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInput_path_decl" ): + listener.exitInput_path_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInput_path_decl" ): + return visitor.visitInput_path_decl(self) + else: + return visitor.visitChildren(self) + + + + + def input_path_decl(self): + + localctx = ASLParser.Input_path_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 28, self.RULE_input_path_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 350 + self.match(ASLParser.INPUTPATH) + self.state = 351 + self.match(ASLParser.COLON) + self.state = 354 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [9]: + self.state = 352 + self.match(ASLParser.NULL) + pass + elif token in [154, 155, 156]: + self.state = 353 + self.string_sampler() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Result_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RESULT(self): + return self.getToken(ASLParser.RESULT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def json_value_decl(self): + return self.getTypedRuleContext(ASLParser.Json_value_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_result_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResult_decl" ): + listener.enterResult_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResult_decl" ): + listener.exitResult_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResult_decl" ): + return visitor.visitResult_decl(self) + else: + return visitor.visitChildren(self) + + + + + def result_decl(self): + + localctx = ASLParser.Result_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 30, self.RULE_result_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 356 + self.match(ASLParser.RESULT) + self.state = 357 + self.match(ASLParser.COLON) + self.state = 358 + self.json_value_decl() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Result_path_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RESULTPATH(self): + return self.getToken(ASLParser.RESULTPATH, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def string_jsonpath(self): + return self.getTypedRuleContext(ASLParser.String_jsonpathContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_result_path_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResult_path_decl" ): + listener.enterResult_path_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResult_path_decl" ): + listener.exitResult_path_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResult_path_decl" ): + return visitor.visitResult_path_decl(self) + else: + return visitor.visitChildren(self) + + + + + def result_path_decl(self): + + localctx = ASLParser.Result_path_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 32, self.RULE_result_path_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 360 + self.match(ASLParser.RESULTPATH) + self.state = 361 + self.match(ASLParser.COLON) + self.state = 364 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [9]: + self.state = 362 + self.match(ASLParser.NULL) + pass + elif token in [155]: + self.state = 363 + self.string_jsonpath() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Output_path_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def OUTPUTPATH(self): + return self.getToken(ASLParser.OUTPUTPATH, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_output_path_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterOutput_path_decl" ): + listener.enterOutput_path_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitOutput_path_decl" ): + listener.exitOutput_path_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitOutput_path_decl" ): + return visitor.visitOutput_path_decl(self) + else: + return visitor.visitChildren(self) + + + + + def output_path_decl(self): + + localctx = ASLParser.Output_path_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 34, self.RULE_output_path_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 366 + self.match(ASLParser.OUTPUTPATH) + self.state = 367 + self.match(ASLParser.COLON) + self.state = 370 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [9]: + self.state = 368 + self.match(ASLParser.NULL) + pass + elif token in [154, 155, 156]: + self.state = 369 + self.string_sampler() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class End_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def END(self): + return self.getToken(ASLParser.END, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def getRuleIndex(self): + return ASLParser.RULE_end_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterEnd_decl" ): + listener.enterEnd_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitEnd_decl" ): + listener.exitEnd_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitEnd_decl" ): + return visitor.visitEnd_decl(self) + else: + return visitor.visitChildren(self) + + + + + def end_decl(self): + + localctx = ASLParser.End_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 36, self.RULE_end_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 372 + self.match(ASLParser.END) + self.state = 373 + self.match(ASLParser.COLON) + self.state = 374 + _la = self._input.LA(1) + if not(_la==7 or _la==8): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Default_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def DEFAULT(self): + return self.getToken(ASLParser.DEFAULT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_default_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterDefault_decl" ): + listener.enterDefault_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitDefault_decl" ): + listener.exitDefault_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitDefault_decl" ): + return visitor.visitDefault_decl(self) + else: + return visitor.visitChildren(self) + + + + + def default_decl(self): + + localctx = ASLParser.Default_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 38, self.RULE_default_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 376 + self.match(ASLParser.DEFAULT) + self.state = 377 + self.match(ASLParser.COLON) + self.state = 378 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Error_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_error_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Error_pathContext(Error_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Error_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ERRORPATH(self): + return self.getToken(ASLParser.ERRORPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterError_path" ): + listener.enterError_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitError_path" ): + listener.exitError_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitError_path" ): + return visitor.visitError_path(self) + else: + return visitor.visitChildren(self) + + + class ErrorContext(Error_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Error_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ERROR(self): + return self.getToken(ASLParser.ERROR, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterError" ): + listener.enterError(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitError" ): + listener.exitError(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitError" ): + return visitor.visitError(self) + else: + return visitor.visitChildren(self) + + + + def error_decl(self): + + localctx = ASLParser.Error_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 40, self.RULE_error_decl) + try: + self.state = 389 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [119]: + localctx = ASLParser.ErrorContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 380 + self.match(ASLParser.ERROR) + self.state = 381 + self.match(ASLParser.COLON) + self.state = 384 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,8,self._ctx) + if la_ == 1: + self.state = 382 + self.string_jsonata() + pass + + elif la_ == 2: + self.state = 383 + self.string_literal() + pass + + + pass + elif token in [120]: + localctx = ASLParser.Error_pathContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 386 + self.match(ASLParser.ERRORPATH) + self.state = 387 + self.match(ASLParser.COLON) + self.state = 388 + self.string_expression_simple() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Cause_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_cause_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Cause_pathContext(Cause_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Cause_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def CAUSEPATH(self): + return self.getToken(ASLParser.CAUSEPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCause_path" ): + listener.enterCause_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCause_path" ): + listener.exitCause_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCause_path" ): + return visitor.visitCause_path(self) + else: + return visitor.visitChildren(self) + + + class CauseContext(Cause_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Cause_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def CAUSE(self): + return self.getToken(ASLParser.CAUSE, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCause" ): + listener.enterCause(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCause" ): + listener.exitCause(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCause" ): + return visitor.visitCause(self) + else: + return visitor.visitChildren(self) + + + + def cause_decl(self): + + localctx = ASLParser.Cause_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 42, self.RULE_cause_decl) + try: + self.state = 400 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [117]: + localctx = ASLParser.CauseContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 391 + self.match(ASLParser.CAUSE) + self.state = 392 + self.match(ASLParser.COLON) + self.state = 395 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,10,self._ctx) + if la_ == 1: + self.state = 393 + self.string_jsonata() + pass + + elif la_ == 2: + self.state = 394 + self.string_literal() + pass + + + pass + elif token in [118]: + localctx = ASLParser.Cause_pathContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 397 + self.match(ASLParser.CAUSEPATH) + self.state = 398 + self.match(ASLParser.COLON) + self.state = 399 + self.string_expression_simple() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Seconds_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_seconds_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Seconds_jsonataContext(Seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def SECONDS(self): + return self.getToken(ASLParser.SECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSeconds_jsonata" ): + listener.enterSeconds_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSeconds_jsonata" ): + listener.exitSeconds_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSeconds_jsonata" ): + return visitor.visitSeconds_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Seconds_pathContext(Seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def SECONDSPATH(self): + return self.getToken(ASLParser.SECONDSPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSeconds_path" ): + listener.enterSeconds_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSeconds_path" ): + listener.exitSeconds_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSeconds_path" ): + return visitor.visitSeconds_path(self) + else: + return visitor.visitChildren(self) + + + class Seconds_intContext(Seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def SECONDS(self): + return self.getToken(ASLParser.SECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSeconds_int" ): + listener.enterSeconds_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSeconds_int" ): + listener.exitSeconds_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSeconds_int" ): + return visitor.visitSeconds_int(self) + else: + return visitor.visitChildren(self) + + + + def seconds_decl(self): + + localctx = ASLParser.Seconds_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 44, self.RULE_seconds_decl) + try: + self.state = 411 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,12,self._ctx) + if la_ == 1: + localctx = ASLParser.Seconds_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 402 + self.match(ASLParser.SECONDS) + self.state = 403 + self.match(ASLParser.COLON) + self.state = 404 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Seconds_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 405 + self.match(ASLParser.SECONDS) + self.state = 406 + self.match(ASLParser.COLON) + self.state = 407 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Seconds_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 408 + self.match(ASLParser.SECONDSPATH) + self.state = 409 + self.match(ASLParser.COLON) + self.state = 410 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Timestamp_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_timestamp_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Timestamp_pathContext(Timestamp_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Timestamp_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TIMESTAMPPATH(self): + return self.getToken(ASLParser.TIMESTAMPPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTimestamp_path" ): + listener.enterTimestamp_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTimestamp_path" ): + listener.exitTimestamp_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTimestamp_path" ): + return visitor.visitTimestamp_path(self) + else: + return visitor.visitChildren(self) + + + class TimestampContext(Timestamp_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Timestamp_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TIMESTAMP(self): + return self.getToken(ASLParser.TIMESTAMP, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTimestamp" ): + listener.enterTimestamp(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTimestamp" ): + listener.exitTimestamp(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTimestamp" ): + return visitor.visitTimestamp(self) + else: + return visitor.visitChildren(self) + + + + def timestamp_decl(self): + + localctx = ASLParser.Timestamp_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 46, self.RULE_timestamp_decl) + try: + self.state = 422 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [74]: + localctx = ASLParser.TimestampContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 413 + self.match(ASLParser.TIMESTAMP) + self.state = 414 + self.match(ASLParser.COLON) + self.state = 417 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,13,self._ctx) + if la_ == 1: + self.state = 415 + self.string_jsonata() + pass + + elif la_ == 2: + self.state = 416 + self.string_literal() + pass + + + pass + elif token in [73]: + localctx = ASLParser.Timestamp_pathContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 419 + self.match(ASLParser.TIMESTAMPPATH) + self.state = 420 + self.match(ASLParser.COLON) + self.state = 421 + self.string_sampler() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Items_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_items_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Items_arrayContext(Items_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Items_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ITEMS(self): + return self.getToken(ASLParser.ITEMS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def jsonata_template_value_array(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_value_arrayContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItems_array" ): + listener.enterItems_array(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItems_array" ): + listener.exitItems_array(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItems_array" ): + return visitor.visitItems_array(self) + else: + return visitor.visitChildren(self) + + + class Items_jsonataContext(Items_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Items_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ITEMS(self): + return self.getToken(ASLParser.ITEMS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItems_jsonata" ): + listener.enterItems_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItems_jsonata" ): + listener.exitItems_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItems_jsonata" ): + return visitor.visitItems_jsonata(self) + else: + return visitor.visitChildren(self) + + + + def items_decl(self): + + localctx = ASLParser.Items_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 48, self.RULE_items_decl) + try: + self.state = 430 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,15,self._ctx) + if la_ == 1: + localctx = ASLParser.Items_arrayContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 424 + self.match(ASLParser.ITEMS) + self.state = 425 + self.match(ASLParser.COLON) + self.state = 426 + self.jsonata_template_value_array() + pass + + elif la_ == 2: + localctx = ASLParser.Items_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 427 + self.match(ASLParser.ITEMS) + self.state = 428 + self.match(ASLParser.COLON) + self.state = 429 + self.string_jsonata() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Items_path_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ITEMSPATH(self): + return self.getToken(ASLParser.ITEMSPATH, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_items_path_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItems_path_decl" ): + listener.enterItems_path_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItems_path_decl" ): + listener.exitItems_path_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItems_path_decl" ): + return visitor.visitItems_path_decl(self) + else: + return visitor.visitChildren(self) + + + + + def items_path_decl(self): + + localctx = ASLParser.Items_path_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 50, self.RULE_items_path_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 432 + self.match(ASLParser.ITEMSPATH) + self.state = 433 + self.match(ASLParser.COLON) + self.state = 434 + self.string_sampler() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Max_concurrency_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_max_concurrency_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Max_concurrency_jsonataContext(Max_concurrency_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_concurrency_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXCONCURRENCY(self): + return self.getToken(ASLParser.MAXCONCURRENCY, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_concurrency_jsonata" ): + listener.enterMax_concurrency_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_concurrency_jsonata" ): + listener.exitMax_concurrency_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_concurrency_jsonata" ): + return visitor.visitMax_concurrency_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Max_concurrency_pathContext(Max_concurrency_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_concurrency_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXCONCURRENCYPATH(self): + return self.getToken(ASLParser.MAXCONCURRENCYPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_concurrency_path" ): + listener.enterMax_concurrency_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_concurrency_path" ): + listener.exitMax_concurrency_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_concurrency_path" ): + return visitor.visitMax_concurrency_path(self) + else: + return visitor.visitChildren(self) + + + class Max_concurrency_intContext(Max_concurrency_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_concurrency_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXCONCURRENCY(self): + return self.getToken(ASLParser.MAXCONCURRENCY, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_concurrency_int" ): + listener.enterMax_concurrency_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_concurrency_int" ): + listener.exitMax_concurrency_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_concurrency_int" ): + return visitor.visitMax_concurrency_int(self) + else: + return visitor.visitChildren(self) + + + + def max_concurrency_decl(self): + + localctx = ASLParser.Max_concurrency_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 52, self.RULE_max_concurrency_decl) + try: + self.state = 445 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,16,self._ctx) + if la_ == 1: + localctx = ASLParser.Max_concurrency_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 436 + self.match(ASLParser.MAXCONCURRENCY) + self.state = 437 + self.match(ASLParser.COLON) + self.state = 438 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Max_concurrency_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 439 + self.match(ASLParser.MAXCONCURRENCY) + self.state = 440 + self.match(ASLParser.COLON) + self.state = 441 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Max_concurrency_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 442 + self.match(ASLParser.MAXCONCURRENCYPATH) + self.state = 443 + self.match(ASLParser.COLON) + self.state = 444 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Parameters_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def PARAMETERS(self): + return self.getToken(ASLParser.PARAMETERS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def payload_tmpl_decl(self): + return self.getTypedRuleContext(ASLParser.Payload_tmpl_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_parameters_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterParameters_decl" ): + listener.enterParameters_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitParameters_decl" ): + listener.exitParameters_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitParameters_decl" ): + return visitor.visitParameters_decl(self) + else: + return visitor.visitChildren(self) + + + + + def parameters_decl(self): + + localctx = ASLParser.Parameters_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 54, self.RULE_parameters_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 447 + self.match(ASLParser.PARAMETERS) + self.state = 448 + self.match(ASLParser.COLON) + self.state = 449 + self.payload_tmpl_decl() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Credentials_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CREDENTIALS(self): + return self.getToken(ASLParser.CREDENTIALS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def role_arn_decl(self): + return self.getTypedRuleContext(ASLParser.Role_arn_declContext,0) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def getRuleIndex(self): + return ASLParser.RULE_credentials_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCredentials_decl" ): + listener.enterCredentials_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCredentials_decl" ): + listener.exitCredentials_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCredentials_decl" ): + return visitor.visitCredentials_decl(self) + else: + return visitor.visitChildren(self) + + + + + def credentials_decl(self): + + localctx = ASLParser.Credentials_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 56, self.RULE_credentials_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 451 + self.match(ASLParser.CREDENTIALS) + self.state = 452 + self.match(ASLParser.COLON) + self.state = 453 + self.match(ASLParser.LBRACE) + self.state = 454 + self.role_arn_decl() + self.state = 455 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Role_arn_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_role_arn_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Role_arnContext(Role_arn_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Role_arn_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ROLEARN(self): + return self.getToken(ASLParser.ROLEARN, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRole_arn" ): + listener.enterRole_arn(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRole_arn" ): + listener.exitRole_arn(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitRole_arn" ): + return visitor.visitRole_arn(self) + else: + return visitor.visitChildren(self) + + + class Role_pathContext(Role_arn_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Role_arn_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ROLEARNPATH(self): + return self.getToken(ASLParser.ROLEARNPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRole_path" ): + listener.enterRole_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRole_path" ): + listener.exitRole_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitRole_path" ): + return visitor.visitRole_path(self) + else: + return visitor.visitChildren(self) + + + + def role_arn_decl(self): + + localctx = ASLParser.Role_arn_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 58, self.RULE_role_arn_decl) + try: + self.state = 466 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [99]: + localctx = ASLParser.Role_arnContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 457 + self.match(ASLParser.ROLEARN) + self.state = 458 + self.match(ASLParser.COLON) + self.state = 461 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,17,self._ctx) + if la_ == 1: + self.state = 459 + self.string_jsonata() + pass + + elif la_ == 2: + self.state = 460 + self.string_literal() + pass + + + pass + elif token in [100]: + localctx = ASLParser.Role_pathContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 463 + self.match(ASLParser.ROLEARNPATH) + self.state = 464 + self.match(ASLParser.COLON) + self.state = 465 + self.string_expression_simple() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Timeout_seconds_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_timeout_seconds_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Timeout_seconds_jsonataContext(Timeout_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Timeout_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TIMEOUTSECONDS(self): + return self.getToken(ASLParser.TIMEOUTSECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTimeout_seconds_jsonata" ): + listener.enterTimeout_seconds_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTimeout_seconds_jsonata" ): + listener.exitTimeout_seconds_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTimeout_seconds_jsonata" ): + return visitor.visitTimeout_seconds_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Timeout_seconds_pathContext(Timeout_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Timeout_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TIMEOUTSECONDSPATH(self): + return self.getToken(ASLParser.TIMEOUTSECONDSPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTimeout_seconds_path" ): + listener.enterTimeout_seconds_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTimeout_seconds_path" ): + listener.exitTimeout_seconds_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTimeout_seconds_path" ): + return visitor.visitTimeout_seconds_path(self) + else: + return visitor.visitChildren(self) + + + class Timeout_seconds_intContext(Timeout_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Timeout_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TIMEOUTSECONDS(self): + return self.getToken(ASLParser.TIMEOUTSECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTimeout_seconds_int" ): + listener.enterTimeout_seconds_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTimeout_seconds_int" ): + listener.exitTimeout_seconds_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTimeout_seconds_int" ): + return visitor.visitTimeout_seconds_int(self) + else: + return visitor.visitChildren(self) + + + + def timeout_seconds_decl(self): + + localctx = ASLParser.Timeout_seconds_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 60, self.RULE_timeout_seconds_decl) + try: + self.state = 477 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,19,self._ctx) + if la_ == 1: + localctx = ASLParser.Timeout_seconds_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 468 + self.match(ASLParser.TIMEOUTSECONDS) + self.state = 469 + self.match(ASLParser.COLON) + self.state = 470 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Timeout_seconds_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 471 + self.match(ASLParser.TIMEOUTSECONDS) + self.state = 472 + self.match(ASLParser.COLON) + self.state = 473 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Timeout_seconds_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 474 + self.match(ASLParser.TIMEOUTSECONDSPATH) + self.state = 475 + self.match(ASLParser.COLON) + self.state = 476 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Heartbeat_seconds_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_heartbeat_seconds_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Heartbeat_seconds_intContext(Heartbeat_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Heartbeat_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def HEARTBEATSECONDS(self): + return self.getToken(ASLParser.HEARTBEATSECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterHeartbeat_seconds_int" ): + listener.enterHeartbeat_seconds_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitHeartbeat_seconds_int" ): + listener.exitHeartbeat_seconds_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitHeartbeat_seconds_int" ): + return visitor.visitHeartbeat_seconds_int(self) + else: + return visitor.visitChildren(self) + + + class Heartbeat_seconds_jsonataContext(Heartbeat_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Heartbeat_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def HEARTBEATSECONDS(self): + return self.getToken(ASLParser.HEARTBEATSECONDS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterHeartbeat_seconds_jsonata" ): + listener.enterHeartbeat_seconds_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitHeartbeat_seconds_jsonata" ): + listener.exitHeartbeat_seconds_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitHeartbeat_seconds_jsonata" ): + return visitor.visitHeartbeat_seconds_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Heartbeat_seconds_pathContext(Heartbeat_seconds_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Heartbeat_seconds_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def HEARTBEATSECONDSPATH(self): + return self.getToken(ASLParser.HEARTBEATSECONDSPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterHeartbeat_seconds_path" ): + listener.enterHeartbeat_seconds_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitHeartbeat_seconds_path" ): + listener.exitHeartbeat_seconds_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitHeartbeat_seconds_path" ): + return visitor.visitHeartbeat_seconds_path(self) + else: + return visitor.visitChildren(self) + + + + def heartbeat_seconds_decl(self): + + localctx = ASLParser.Heartbeat_seconds_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 62, self.RULE_heartbeat_seconds_decl) + try: + self.state = 488 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,20,self._ctx) + if la_ == 1: + localctx = ASLParser.Heartbeat_seconds_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 479 + self.match(ASLParser.HEARTBEATSECONDS) + self.state = 480 + self.match(ASLParser.COLON) + self.state = 481 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Heartbeat_seconds_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 482 + self.match(ASLParser.HEARTBEATSECONDS) + self.state = 483 + self.match(ASLParser.COLON) + self.state = 484 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Heartbeat_seconds_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 485 + self.match(ASLParser.HEARTBEATSECONDSPATH) + self.state = 486 + self.match(ASLParser.COLON) + self.state = 487 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Payload_tmpl_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def payload_binding(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Payload_bindingContext) + else: + return self.getTypedRuleContext(ASLParser.Payload_bindingContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_payload_tmpl_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_tmpl_decl" ): + listener.enterPayload_tmpl_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_tmpl_decl" ): + listener.exitPayload_tmpl_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_tmpl_decl" ): + return visitor.visitPayload_tmpl_decl(self) + else: + return visitor.visitChildren(self) + + + + + def payload_tmpl_decl(self): + + localctx = ASLParser.Payload_tmpl_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 64, self.RULE_payload_tmpl_decl) + self._la = 0 # Token type + try: + self.state = 503 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,22,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 490 + self.match(ASLParser.LBRACE) + self.state = 491 + self.payload_binding() + self.state = 496 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 492 + self.match(ASLParser.COMMA) + self.state = 493 + self.payload_binding() + self.state = 498 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 499 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 501 + self.match(ASLParser.LBRACE) + self.state = 502 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Payload_bindingContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_payload_binding + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Payload_binding_sampleContext(Payload_bindingContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_bindingContext + super().__init__(parser) + self.copyFrom(ctx) + + def STRINGDOLLAR(self): + return self.getToken(ASLParser.STRINGDOLLAR, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_binding_sample" ): + listener.enterPayload_binding_sample(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_binding_sample" ): + listener.exitPayload_binding_sample(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_binding_sample" ): + return visitor.visitPayload_binding_sample(self) + else: + return visitor.visitChildren(self) + + + class Payload_binding_valueContext(Payload_bindingContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_bindingContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def payload_value_decl(self): + return self.getTypedRuleContext(ASLParser.Payload_value_declContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_binding_value" ): + listener.enterPayload_binding_value(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_binding_value" ): + listener.exitPayload_binding_value(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_binding_value" ): + return visitor.visitPayload_binding_value(self) + else: + return visitor.visitChildren(self) + + + + def payload_binding(self): + + localctx = ASLParser.Payload_bindingContext(self, self._ctx, self.state) + self.enterRule(localctx, 66, self.RULE_payload_binding) + try: + self.state = 512 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,23,self._ctx) + if la_ == 1: + localctx = ASLParser.Payload_binding_sampleContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 505 + self.match(ASLParser.STRINGDOLLAR) + self.state = 506 + self.match(ASLParser.COLON) + self.state = 507 + self.string_expression_simple() + pass + + elif la_ == 2: + localctx = ASLParser.Payload_binding_valueContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 508 + self.string_literal() + self.state = 509 + self.match(ASLParser.COLON) + self.state = 510 + self.payload_value_decl() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Payload_arr_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def payload_value_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Payload_value_declContext) + else: + return self.getTypedRuleContext(ASLParser.Payload_value_declContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_payload_arr_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_arr_decl" ): + listener.enterPayload_arr_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_arr_decl" ): + listener.exitPayload_arr_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_arr_decl" ): + return visitor.visitPayload_arr_decl(self) + else: + return visitor.visitChildren(self) + + + + + def payload_arr_decl(self): + + localctx = ASLParser.Payload_arr_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 68, self.RULE_payload_arr_decl) + self._la = 0 # Token type + try: + self.state = 527 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,25,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 514 + self.match(ASLParser.LBRACK) + self.state = 515 + self.payload_value_decl() + self.state = 520 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 516 + self.match(ASLParser.COMMA) + self.state = 517 + self.payload_value_decl() + self.state = 522 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 523 + self.match(ASLParser.RBRACK) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 525 + self.match(ASLParser.LBRACK) + self.state = 526 + self.match(ASLParser.RBRACK) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Payload_value_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def payload_arr_decl(self): + return self.getTypedRuleContext(ASLParser.Payload_arr_declContext,0) + + + def payload_tmpl_decl(self): + return self.getTypedRuleContext(ASLParser.Payload_tmpl_declContext,0) + + + def payload_value_lit(self): + return self.getTypedRuleContext(ASLParser.Payload_value_litContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_payload_value_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_decl" ): + listener.enterPayload_value_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_decl" ): + listener.exitPayload_value_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_decl" ): + return visitor.visitPayload_value_decl(self) + else: + return visitor.visitChildren(self) + + + + + def payload_value_decl(self): + + localctx = ASLParser.Payload_value_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 70, self.RULE_payload_value_decl) + try: + self.state = 532 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [3]: + self.enterOuterAlt(localctx, 1) + self.state = 529 + self.payload_arr_decl() + pass + elif token in [5]: + self.enterOuterAlt(localctx, 2) + self.state = 530 + self.payload_tmpl_decl() + pass + elif token in [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]: + self.enterOuterAlt(localctx, 3) + self.state = 531 + self.payload_value_lit() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Payload_value_litContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_payload_value_lit + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Payload_value_boolContext(Payload_value_litContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_value_litContext + super().__init__(parser) + self.copyFrom(ctx) + + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_bool" ): + listener.enterPayload_value_bool(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_bool" ): + listener.exitPayload_value_bool(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_bool" ): + return visitor.visitPayload_value_bool(self) + else: + return visitor.visitChildren(self) + + + class Payload_value_intContext(Payload_value_litContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_value_litContext + super().__init__(parser) + self.copyFrom(ctx) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_int" ): + listener.enterPayload_value_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_int" ): + listener.exitPayload_value_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_int" ): + return visitor.visitPayload_value_int(self) + else: + return visitor.visitChildren(self) + + + class Payload_value_strContext(Payload_value_litContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_value_litContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_str" ): + listener.enterPayload_value_str(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_str" ): + listener.exitPayload_value_str(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_str" ): + return visitor.visitPayload_value_str(self) + else: + return visitor.visitChildren(self) + + + class Payload_value_floatContext(Payload_value_litContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_value_litContext + super().__init__(parser) + self.copyFrom(ctx) + + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_float" ): + listener.enterPayload_value_float(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_float" ): + listener.exitPayload_value_float(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_float" ): + return visitor.visitPayload_value_float(self) + else: + return visitor.visitChildren(self) + + + class Payload_value_nullContext(Payload_value_litContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Payload_value_litContext + super().__init__(parser) + self.copyFrom(ctx) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPayload_value_null" ): + listener.enterPayload_value_null(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPayload_value_null" ): + listener.exitPayload_value_null(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPayload_value_null" ): + return visitor.visitPayload_value_null(self) + else: + return visitor.visitChildren(self) + + + + def payload_value_lit(self): + + localctx = ASLParser.Payload_value_litContext(self, self._ctx, self.state) + self.enterRule(localctx, 72, self.RULE_payload_value_lit) + self._la = 0 # Token type + try: + self.state = 539 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [161]: + localctx = ASLParser.Payload_value_floatContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 534 + self.match(ASLParser.NUMBER) + pass + elif token in [160]: + localctx = ASLParser.Payload_value_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 535 + self.match(ASLParser.INT) + pass + elif token in [7, 8]: + localctx = ASLParser.Payload_value_boolContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 536 + _la = self._input.LA(1) + if not(_la==7 or _la==8): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + pass + elif token in [9]: + localctx = ASLParser.Payload_value_nullContext(self, localctx) + self.enterOuterAlt(localctx, 4) + self.state = 537 + self.match(ASLParser.NULL) + pass + elif token in [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159]: + localctx = ASLParser.Payload_value_strContext(self, localctx) + self.enterOuterAlt(localctx, 5) + self.state = 538 + self.string_literal() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ASSIGN(self): + return self.getToken(ASLParser.ASSIGN, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def assign_decl_body(self): + return self.getTypedRuleContext(ASLParser.Assign_decl_bodyContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_assign_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_decl" ): + listener.enterAssign_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_decl" ): + listener.exitAssign_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_decl" ): + return visitor.visitAssign_decl(self) + else: + return visitor.visitChildren(self) + + + + + def assign_decl(self): + + localctx = ASLParser.Assign_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 74, self.RULE_assign_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 541 + self.match(ASLParser.ASSIGN) + self.state = 542 + self.match(ASLParser.COLON) + self.state = 543 + self.assign_decl_body() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_decl_bodyContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def assign_decl_binding(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Assign_decl_bindingContext) + else: + return self.getTypedRuleContext(ASLParser.Assign_decl_bindingContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_assign_decl_body + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_decl_body" ): + listener.enterAssign_decl_body(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_decl_body" ): + listener.exitAssign_decl_body(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_decl_body" ): + return visitor.visitAssign_decl_body(self) + else: + return visitor.visitChildren(self) + + + + + def assign_decl_body(self): + + localctx = ASLParser.Assign_decl_bodyContext(self, self._ctx, self.state) + self.enterRule(localctx, 76, self.RULE_assign_decl_body) + self._la = 0 # Token type + try: + self.state = 558 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,29,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 545 + self.match(ASLParser.LBRACE) + self.state = 546 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 547 + self.match(ASLParser.LBRACE) + self.state = 548 + self.assign_decl_binding() + self.state = 553 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 549 + self.match(ASLParser.COMMA) + self.state = 550 + self.assign_decl_binding() + self.state = 555 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 556 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_decl_bindingContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def assign_template_binding(self): + return self.getTypedRuleContext(ASLParser.Assign_template_bindingContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_assign_decl_binding + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_decl_binding" ): + listener.enterAssign_decl_binding(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_decl_binding" ): + listener.exitAssign_decl_binding(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_decl_binding" ): + return visitor.visitAssign_decl_binding(self) + else: + return visitor.visitChildren(self) + + + + + def assign_decl_binding(self): + + localctx = ASLParser.Assign_decl_bindingContext(self, self._ctx, self.state) + self.enterRule(localctx, 78, self.RULE_assign_decl_binding) + try: + self.enterOuterAlt(localctx, 1) + self.state = 560 + self.assign_template_binding() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_template_value_objectContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def assign_template_binding(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Assign_template_bindingContext) + else: + return self.getTypedRuleContext(ASLParser.Assign_template_bindingContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_assign_template_value_object + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_object" ): + listener.enterAssign_template_value_object(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_object" ): + listener.exitAssign_template_value_object(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_object" ): + return visitor.visitAssign_template_value_object(self) + else: + return visitor.visitChildren(self) + + + + + def assign_template_value_object(self): + + localctx = ASLParser.Assign_template_value_objectContext(self, self._ctx, self.state) + self.enterRule(localctx, 80, self.RULE_assign_template_value_object) + self._la = 0 # Token type + try: + self.state = 575 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,31,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 562 + self.match(ASLParser.LBRACE) + self.state = 563 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 564 + self.match(ASLParser.LBRACE) + self.state = 565 + self.assign_template_binding() + self.state = 570 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 566 + self.match(ASLParser.COMMA) + self.state = 567 + self.assign_template_binding() + self.state = 572 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 573 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_template_bindingContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_assign_template_binding + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Assign_template_binding_valueContext(Assign_template_bindingContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_bindingContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def assign_template_value(self): + return self.getTypedRuleContext(ASLParser.Assign_template_valueContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_binding_value" ): + listener.enterAssign_template_binding_value(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_binding_value" ): + listener.exitAssign_template_binding_value(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_binding_value" ): + return visitor.visitAssign_template_binding_value(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_binding_string_expression_simpleContext(Assign_template_bindingContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_bindingContext + super().__init__(parser) + self.copyFrom(ctx) + + def STRINGDOLLAR(self): + return self.getToken(ASLParser.STRINGDOLLAR, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_binding_string_expression_simple" ): + listener.enterAssign_template_binding_string_expression_simple(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_binding_string_expression_simple" ): + listener.exitAssign_template_binding_string_expression_simple(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_binding_string_expression_simple" ): + return visitor.visitAssign_template_binding_string_expression_simple(self) + else: + return visitor.visitChildren(self) + + + + def assign_template_binding(self): + + localctx = ASLParser.Assign_template_bindingContext(self, self._ctx, self.state) + self.enterRule(localctx, 82, self.RULE_assign_template_binding) + try: + self.state = 584 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,32,self._ctx) + if la_ == 1: + localctx = ASLParser.Assign_template_binding_string_expression_simpleContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 577 + self.match(ASLParser.STRINGDOLLAR) + self.state = 578 + self.match(ASLParser.COLON) + self.state = 579 + self.string_expression_simple() + pass + + elif la_ == 2: + localctx = ASLParser.Assign_template_binding_valueContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 580 + self.string_literal() + self.state = 581 + self.match(ASLParser.COLON) + self.state = 582 + self.assign_template_value() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_template_valueContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def assign_template_value_object(self): + return self.getTypedRuleContext(ASLParser.Assign_template_value_objectContext,0) + + + def assign_template_value_array(self): + return self.getTypedRuleContext(ASLParser.Assign_template_value_arrayContext,0) + + + def assign_template_value_terminal(self): + return self.getTypedRuleContext(ASLParser.Assign_template_value_terminalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_assign_template_value + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value" ): + listener.enterAssign_template_value(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value" ): + listener.exitAssign_template_value(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value" ): + return visitor.visitAssign_template_value(self) + else: + return visitor.visitChildren(self) + + + + + def assign_template_value(self): + + localctx = ASLParser.Assign_template_valueContext(self, self._ctx, self.state) + self.enterRule(localctx, 84, self.RULE_assign_template_value) + try: + self.state = 589 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [5]: + self.enterOuterAlt(localctx, 1) + self.state = 586 + self.assign_template_value_object() + pass + elif token in [3]: + self.enterOuterAlt(localctx, 2) + self.state = 587 + self.assign_template_value_array() + pass + elif token in [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]: + self.enterOuterAlt(localctx, 3) + self.state = 588 + self.assign_template_value_terminal() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_template_value_arrayContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def assign_template_value(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Assign_template_valueContext) + else: + return self.getTypedRuleContext(ASLParser.Assign_template_valueContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_assign_template_value_array + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_array" ): + listener.enterAssign_template_value_array(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_array" ): + listener.exitAssign_template_value_array(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_array" ): + return visitor.visitAssign_template_value_array(self) + else: + return visitor.visitChildren(self) + + + + + def assign_template_value_array(self): + + localctx = ASLParser.Assign_template_value_arrayContext(self, self._ctx, self.state) + self.enterRule(localctx, 86, self.RULE_assign_template_value_array) + self._la = 0 # Token type + try: + self.state = 604 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,35,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 591 + self.match(ASLParser.LBRACK) + self.state = 592 + self.match(ASLParser.RBRACK) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 593 + self.match(ASLParser.LBRACK) + self.state = 594 + self.assign_template_value() + self.state = 599 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 595 + self.match(ASLParser.COMMA) + self.state = 596 + self.assign_template_value() + self.state = 601 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 602 + self.match(ASLParser.RBRACK) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Assign_template_value_terminalContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_assign_template_value_terminal + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Assign_template_value_terminal_nullContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_null" ): + listener.enterAssign_template_value_terminal_null(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_null" ): + listener.exitAssign_template_value_terminal_null(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_null" ): + return visitor.visitAssign_template_value_terminal_null(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_value_terminal_string_literalContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_string_literal" ): + listener.enterAssign_template_value_terminal_string_literal(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_string_literal" ): + listener.exitAssign_template_value_terminal_string_literal(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_string_literal" ): + return visitor.visitAssign_template_value_terminal_string_literal(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_value_terminal_intContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_int" ): + listener.enterAssign_template_value_terminal_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_int" ): + listener.exitAssign_template_value_terminal_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_int" ): + return visitor.visitAssign_template_value_terminal_int(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_value_terminal_boolContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_bool" ): + listener.enterAssign_template_value_terminal_bool(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_bool" ): + listener.exitAssign_template_value_terminal_bool(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_bool" ): + return visitor.visitAssign_template_value_terminal_bool(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_value_terminal_floatContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_float" ): + listener.enterAssign_template_value_terminal_float(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_float" ): + listener.exitAssign_template_value_terminal_float(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_float" ): + return visitor.visitAssign_template_value_terminal_float(self) + else: + return visitor.visitChildren(self) + + + class Assign_template_value_terminal_string_jsonataContext(Assign_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Assign_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterAssign_template_value_terminal_string_jsonata" ): + listener.enterAssign_template_value_terminal_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitAssign_template_value_terminal_string_jsonata" ): + listener.exitAssign_template_value_terminal_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitAssign_template_value_terminal_string_jsonata" ): + return visitor.visitAssign_template_value_terminal_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + + def assign_template_value_terminal(self): + + localctx = ASLParser.Assign_template_value_terminalContext(self, self._ctx, self.state) + self.enterRule(localctx, 88, self.RULE_assign_template_value_terminal) + self._la = 0 # Token type + try: + self.state = 612 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,36,self._ctx) + if la_ == 1: + localctx = ASLParser.Assign_template_value_terminal_floatContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 606 + self.match(ASLParser.NUMBER) + pass + + elif la_ == 2: + localctx = ASLParser.Assign_template_value_terminal_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 607 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Assign_template_value_terminal_boolContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 608 + _la = self._input.LA(1) + if not(_la==7 or _la==8): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + pass + + elif la_ == 4: + localctx = ASLParser.Assign_template_value_terminal_nullContext(self, localctx) + self.enterOuterAlt(localctx, 4) + self.state = 609 + self.match(ASLParser.NULL) + pass + + elif la_ == 5: + localctx = ASLParser.Assign_template_value_terminal_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 5) + self.state = 610 + self.string_jsonata() + pass + + elif la_ == 6: + localctx = ASLParser.Assign_template_value_terminal_string_literalContext(self, localctx) + self.enterOuterAlt(localctx, 6) + self.state = 611 + self.string_literal() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Arguments_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_arguments_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Arguments_string_jsonataContext(Arguments_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Arguments_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ARGUMENTS(self): + return self.getToken(ASLParser.ARGUMENTS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterArguments_string_jsonata" ): + listener.enterArguments_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitArguments_string_jsonata" ): + listener.exitArguments_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitArguments_string_jsonata" ): + return visitor.visitArguments_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Arguments_jsonata_template_value_objectContext(Arguments_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Arguments_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def ARGUMENTS(self): + return self.getToken(ASLParser.ARGUMENTS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def jsonata_template_value_object(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_value_objectContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterArguments_jsonata_template_value_object" ): + listener.enterArguments_jsonata_template_value_object(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitArguments_jsonata_template_value_object" ): + listener.exitArguments_jsonata_template_value_object(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitArguments_jsonata_template_value_object" ): + return visitor.visitArguments_jsonata_template_value_object(self) + else: + return visitor.visitChildren(self) + + + + def arguments_decl(self): + + localctx = ASLParser.Arguments_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 90, self.RULE_arguments_decl) + try: + self.state = 620 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,37,self._ctx) + if la_ == 1: + localctx = ASLParser.Arguments_jsonata_template_value_objectContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 614 + self.match(ASLParser.ARGUMENTS) + self.state = 615 + self.match(ASLParser.COLON) + self.state = 616 + self.jsonata_template_value_object() + pass + + elif la_ == 2: + localctx = ASLParser.Arguments_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 617 + self.match(ASLParser.ARGUMENTS) + self.state = 618 + self.match(ASLParser.COLON) + self.state = 619 + self.string_jsonata() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Output_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def OUTPUT(self): + return self.getToken(ASLParser.OUTPUT, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def jsonata_template_value(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_valueContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_output_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterOutput_decl" ): + listener.enterOutput_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitOutput_decl" ): + listener.exitOutput_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitOutput_decl" ): + return visitor.visitOutput_decl(self) + else: + return visitor.visitChildren(self) + + + + + def output_decl(self): + + localctx = ASLParser.Output_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 92, self.RULE_output_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 622 + self.match(ASLParser.OUTPUT) + self.state = 623 + self.match(ASLParser.COLON) + self.state = 624 + self.jsonata_template_value() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jsonata_template_value_objectContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def jsonata_template_binding(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Jsonata_template_bindingContext) + else: + return self.getTypedRuleContext(ASLParser.Jsonata_template_bindingContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_jsonata_template_value_object + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_object" ): + listener.enterJsonata_template_value_object(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_object" ): + listener.exitJsonata_template_value_object(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_object" ): + return visitor.visitJsonata_template_value_object(self) + else: + return visitor.visitChildren(self) + + + + + def jsonata_template_value_object(self): + + localctx = ASLParser.Jsonata_template_value_objectContext(self, self._ctx, self.state) + self.enterRule(localctx, 94, self.RULE_jsonata_template_value_object) + self._la = 0 # Token type + try: + self.state = 639 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,39,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 626 + self.match(ASLParser.LBRACE) + self.state = 627 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 628 + self.match(ASLParser.LBRACE) + self.state = 629 + self.jsonata_template_binding() + self.state = 634 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 630 + self.match(ASLParser.COMMA) + self.state = 631 + self.jsonata_template_binding() + self.state = 636 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 637 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jsonata_template_bindingContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def jsonata_template_value(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_valueContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_jsonata_template_binding + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_binding" ): + listener.enterJsonata_template_binding(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_binding" ): + listener.exitJsonata_template_binding(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_binding" ): + return visitor.visitJsonata_template_binding(self) + else: + return visitor.visitChildren(self) + + + + + def jsonata_template_binding(self): + + localctx = ASLParser.Jsonata_template_bindingContext(self, self._ctx, self.state) + self.enterRule(localctx, 96, self.RULE_jsonata_template_binding) + try: + self.enterOuterAlt(localctx, 1) + self.state = 641 + self.string_literal() + self.state = 642 + self.match(ASLParser.COLON) + self.state = 643 + self.jsonata_template_value() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jsonata_template_valueContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def jsonata_template_value_object(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_value_objectContext,0) + + + def jsonata_template_value_array(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_value_arrayContext,0) + + + def jsonata_template_value_terminal(self): + return self.getTypedRuleContext(ASLParser.Jsonata_template_value_terminalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_jsonata_template_value + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value" ): + listener.enterJsonata_template_value(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value" ): + listener.exitJsonata_template_value(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value" ): + return visitor.visitJsonata_template_value(self) + else: + return visitor.visitChildren(self) + + + + + def jsonata_template_value(self): + + localctx = ASLParser.Jsonata_template_valueContext(self, self._ctx, self.state) + self.enterRule(localctx, 98, self.RULE_jsonata_template_value) + try: + self.state = 648 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [5]: + self.enterOuterAlt(localctx, 1) + self.state = 645 + self.jsonata_template_value_object() + pass + elif token in [3]: + self.enterOuterAlt(localctx, 2) + self.state = 646 + self.jsonata_template_value_array() + pass + elif token in [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]: + self.enterOuterAlt(localctx, 3) + self.state = 647 + self.jsonata_template_value_terminal() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jsonata_template_value_arrayContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def jsonata_template_value(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Jsonata_template_valueContext) + else: + return self.getTypedRuleContext(ASLParser.Jsonata_template_valueContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_jsonata_template_value_array + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_array" ): + listener.enterJsonata_template_value_array(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_array" ): + listener.exitJsonata_template_value_array(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_array" ): + return visitor.visitJsonata_template_value_array(self) + else: + return visitor.visitChildren(self) + + + + + def jsonata_template_value_array(self): + + localctx = ASLParser.Jsonata_template_value_arrayContext(self, self._ctx, self.state) + self.enterRule(localctx, 100, self.RULE_jsonata_template_value_array) + self._la = 0 # Token type + try: + self.state = 663 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,42,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 650 + self.match(ASLParser.LBRACK) + self.state = 651 + self.match(ASLParser.RBRACK) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 652 + self.match(ASLParser.LBRACK) + self.state = 653 + self.jsonata_template_value() + self.state = 658 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 654 + self.match(ASLParser.COMMA) + self.state = 655 + self.jsonata_template_value() + self.state = 660 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 661 + self.match(ASLParser.RBRACK) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jsonata_template_value_terminalContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_jsonata_template_value_terminal + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Jsonata_template_value_terminal_boolContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_bool" ): + listener.enterJsonata_template_value_terminal_bool(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_bool" ): + listener.exitJsonata_template_value_terminal_bool(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_bool" ): + return visitor.visitJsonata_template_value_terminal_bool(self) + else: + return visitor.visitChildren(self) + + + class Jsonata_template_value_terminal_string_jsonataContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_string_jsonata" ): + listener.enterJsonata_template_value_terminal_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_string_jsonata" ): + listener.exitJsonata_template_value_terminal_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_string_jsonata" ): + return visitor.visitJsonata_template_value_terminal_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Jsonata_template_value_terminal_intContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_int" ): + listener.enterJsonata_template_value_terminal_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_int" ): + listener.exitJsonata_template_value_terminal_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_int" ): + return visitor.visitJsonata_template_value_terminal_int(self) + else: + return visitor.visitChildren(self) + + + class Jsonata_template_value_terminal_string_literalContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_string_literal" ): + listener.enterJsonata_template_value_terminal_string_literal(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_string_literal" ): + listener.exitJsonata_template_value_terminal_string_literal(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_string_literal" ): + return visitor.visitJsonata_template_value_terminal_string_literal(self) + else: + return visitor.visitChildren(self) + + + class Jsonata_template_value_terminal_floatContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_float" ): + listener.enterJsonata_template_value_terminal_float(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_float" ): + listener.exitJsonata_template_value_terminal_float(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_float" ): + return visitor.visitJsonata_template_value_terminal_float(self) + else: + return visitor.visitChildren(self) + + + class Jsonata_template_value_terminal_nullContext(Jsonata_template_value_terminalContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Jsonata_template_value_terminalContext + super().__init__(parser) + self.copyFrom(ctx) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJsonata_template_value_terminal_null" ): + listener.enterJsonata_template_value_terminal_null(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJsonata_template_value_terminal_null" ): + listener.exitJsonata_template_value_terminal_null(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJsonata_template_value_terminal_null" ): + return visitor.visitJsonata_template_value_terminal_null(self) + else: + return visitor.visitChildren(self) + + + + def jsonata_template_value_terminal(self): + + localctx = ASLParser.Jsonata_template_value_terminalContext(self, self._ctx, self.state) + self.enterRule(localctx, 102, self.RULE_jsonata_template_value_terminal) + self._la = 0 # Token type + try: + self.state = 671 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,43,self._ctx) + if la_ == 1: + localctx = ASLParser.Jsonata_template_value_terminal_floatContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 665 + self.match(ASLParser.NUMBER) + pass + + elif la_ == 2: + localctx = ASLParser.Jsonata_template_value_terminal_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 666 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Jsonata_template_value_terminal_boolContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 667 + _la = self._input.LA(1) + if not(_la==7 or _la==8): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + pass + + elif la_ == 4: + localctx = ASLParser.Jsonata_template_value_terminal_nullContext(self, localctx) + self.enterOuterAlt(localctx, 4) + self.state = 668 + self.match(ASLParser.NULL) + pass + + elif la_ == 5: + localctx = ASLParser.Jsonata_template_value_terminal_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 5) + self.state = 669 + self.string_jsonata() + pass + + elif la_ == 6: + localctx = ASLParser.Jsonata_template_value_terminal_string_literalContext(self, localctx) + self.enterOuterAlt(localctx, 6) + self.state = 670 + self.string_literal() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Result_selector_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RESULTSELECTOR(self): + return self.getToken(ASLParser.RESULTSELECTOR, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def payload_tmpl_decl(self): + return self.getTypedRuleContext(ASLParser.Payload_tmpl_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_result_selector_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResult_selector_decl" ): + listener.enterResult_selector_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResult_selector_decl" ): + listener.exitResult_selector_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResult_selector_decl" ): + return visitor.visitResult_selector_decl(self) + else: + return visitor.visitChildren(self) + + + + + def result_selector_decl(self): + + localctx = ASLParser.Result_selector_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 104, self.RULE_result_selector_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 673 + self.match(ASLParser.RESULTSELECTOR) + self.state = 674 + self.match(ASLParser.COLON) + self.state = 675 + self.payload_tmpl_decl() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class State_typeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TASK(self): + return self.getToken(ASLParser.TASK, 0) + + def PASS(self): + return self.getToken(ASLParser.PASS, 0) + + def CHOICE(self): + return self.getToken(ASLParser.CHOICE, 0) + + def FAIL(self): + return self.getToken(ASLParser.FAIL, 0) + + def SUCCEED(self): + return self.getToken(ASLParser.SUCCEED, 0) + + def WAIT(self): + return self.getToken(ASLParser.WAIT, 0) + + def MAP(self): + return self.getToken(ASLParser.MAP, 0) + + def PARALLEL(self): + return self.getToken(ASLParser.PARALLEL, 0) + + def getRuleIndex(self): + return ASLParser.RULE_state_type + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterState_type" ): + listener.enterState_type(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitState_type" ): + listener.exitState_type(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitState_type" ): + return visitor.visitState_type(self) + else: + return visitor.visitChildren(self) + + + + + def state_type(self): + + localctx = ASLParser.State_typeContext(self, self._ctx, self.state) + self.enterRule(localctx, 106, self.RULE_state_type) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 677 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & 16711680) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Choices_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CHOICES(self): + return self.getToken(ASLParser.CHOICES, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def choice_rule(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Choice_ruleContext) + else: + return self.getTypedRuleContext(ASLParser.Choice_ruleContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_choices_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterChoices_decl" ): + listener.enterChoices_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitChoices_decl" ): + listener.exitChoices_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitChoices_decl" ): + return visitor.visitChoices_decl(self) + else: + return visitor.visitChildren(self) + + + + + def choices_decl(self): + + localctx = ASLParser.Choices_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 108, self.RULE_choices_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 679 + self.match(ASLParser.CHOICES) + self.state = 680 + self.match(ASLParser.COLON) + self.state = 681 + self.match(ASLParser.LBRACK) + self.state = 682 + self.choice_rule() + self.state = 687 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 683 + self.match(ASLParser.COMMA) + self.state = 684 + self.choice_rule() + self.state = 689 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 690 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Choice_ruleContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_choice_rule + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Choice_rule_comparison_variableContext(Choice_ruleContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Choice_ruleContext + super().__init__(parser) + self.copyFrom(ctx) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + def comparison_variable_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Comparison_variable_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.Comparison_variable_stmtContext,i) + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterChoice_rule_comparison_variable" ): + listener.enterChoice_rule_comparison_variable(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitChoice_rule_comparison_variable" ): + listener.exitChoice_rule_comparison_variable(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitChoice_rule_comparison_variable" ): + return visitor.visitChoice_rule_comparison_variable(self) + else: + return visitor.visitChildren(self) + + + class Choice_rule_comparison_compositeContext(Choice_ruleContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Choice_ruleContext + super().__init__(parser) + self.copyFrom(ctx) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + def comparison_composite_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Comparison_composite_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.Comparison_composite_stmtContext,i) + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterChoice_rule_comparison_composite" ): + listener.enterChoice_rule_comparison_composite(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitChoice_rule_comparison_composite" ): + listener.exitChoice_rule_comparison_composite(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitChoice_rule_comparison_composite" ): + return visitor.visitChoice_rule_comparison_composite(self) + else: + return visitor.visitChildren(self) + + + + def choice_rule(self): + + localctx = ASLParser.Choice_ruleContext(self, self._ctx, self.state) + self.enterRule(localctx, 110, self.RULE_choice_rule) + self._la = 0 # Token type + try: + self.state = 713 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,47,self._ctx) + if la_ == 1: + localctx = ASLParser.Choice_rule_comparison_variableContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 692 + self.match(ASLParser.LBRACE) + self.state = 693 + self.comparison_variable_stmt() + self.state = 696 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 694 + self.match(ASLParser.COMMA) + self.state = 695 + self.comparison_variable_stmt() + self.state = 698 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not (_la==1): + break + + self.state = 700 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + localctx = ASLParser.Choice_rule_comparison_compositeContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 702 + self.match(ASLParser.LBRACE) + self.state = 703 + self.comparison_composite_stmt() + self.state = 708 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 704 + self.match(ASLParser.COMMA) + self.state = 705 + self.comparison_composite_stmt() + self.state = 710 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 711 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comparison_variable_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def variable_decl(self): + return self.getTypedRuleContext(ASLParser.Variable_declContext,0) + + + def comparison_func(self): + return self.getTypedRuleContext(ASLParser.Comparison_funcContext,0) + + + def next_decl(self): + return self.getTypedRuleContext(ASLParser.Next_declContext,0) + + + def assign_decl(self): + return self.getTypedRuleContext(ASLParser.Assign_declContext,0) + + + def output_decl(self): + return self.getTypedRuleContext(ASLParser.Output_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_comparison_variable_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_variable_stmt" ): + listener.enterComparison_variable_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_variable_stmt" ): + listener.exitComparison_variable_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_variable_stmt" ): + return visitor.visitComparison_variable_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def comparison_variable_stmt(self): + + localctx = ASLParser.Comparison_variable_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 112, self.RULE_comparison_variable_stmt) + try: + self.state = 721 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [26]: + self.enterOuterAlt(localctx, 1) + self.state = 715 + self.variable_decl() + pass + elif token in [25, 30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70]: + self.enterOuterAlt(localctx, 2) + self.state = 716 + self.comparison_func() + pass + elif token in [115]: + self.enterOuterAlt(localctx, 3) + self.state = 717 + self.next_decl() + pass + elif token in [134]: + self.enterOuterAlt(localctx, 4) + self.state = 718 + self.assign_decl() + pass + elif token in [135]: + self.enterOuterAlt(localctx, 5) + self.state = 719 + self.output_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 6) + self.state = 720 + self.comment_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comparison_composite_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def comparison_composite(self): + return self.getTypedRuleContext(ASLParser.Comparison_compositeContext,0) + + + def next_decl(self): + return self.getTypedRuleContext(ASLParser.Next_declContext,0) + + + def assign_decl(self): + return self.getTypedRuleContext(ASLParser.Assign_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_comparison_composite_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_composite_stmt" ): + listener.enterComparison_composite_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_composite_stmt" ): + listener.exitComparison_composite_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_composite_stmt" ): + return visitor.visitComparison_composite_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def comparison_composite_stmt(self): + + localctx = ASLParser.Comparison_composite_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 114, self.RULE_comparison_composite_stmt) + try: + self.state = 727 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [29, 38, 49]: + self.enterOuterAlt(localctx, 1) + self.state = 723 + self.comparison_composite() + pass + elif token in [115]: + self.enterOuterAlt(localctx, 2) + self.state = 724 + self.next_decl() + pass + elif token in [134]: + self.enterOuterAlt(localctx, 3) + self.state = 725 + self.assign_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 4) + self.state = 726 + self.comment_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comparison_compositeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def choice_operator(self): + return self.getTypedRuleContext(ASLParser.Choice_operatorContext,0) + + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def choice_rule(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Choice_ruleContext) + else: + return self.getTypedRuleContext(ASLParser.Choice_ruleContext,i) + + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_comparison_composite + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_composite" ): + listener.enterComparison_composite(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_composite" ): + listener.exitComparison_composite(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_composite" ): + return visitor.visitComparison_composite(self) + else: + return visitor.visitChildren(self) + + + + + def comparison_composite(self): + + localctx = ASLParser.Comparison_compositeContext(self, self._ctx, self.state) + self.enterRule(localctx, 116, self.RULE_comparison_composite) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 729 + self.choice_operator() + self.state = 730 + self.match(ASLParser.COLON) + self.state = 743 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [5]: + self.state = 731 + self.choice_rule() + pass + elif token in [3]: + self.state = 732 + self.match(ASLParser.LBRACK) + self.state = 733 + self.choice_rule() + self.state = 738 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 734 + self.match(ASLParser.COMMA) + self.state = 735 + self.choice_rule() + self.state = 740 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 741 + self.match(ASLParser.RBRACK) + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Variable_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def VARIABLE(self): + return self.getToken(ASLParser.VARIABLE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_variable_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterVariable_decl" ): + listener.enterVariable_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitVariable_decl" ): + listener.exitVariable_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitVariable_decl" ): + return visitor.visitVariable_decl(self) + else: + return visitor.visitChildren(self) + + + + + def variable_decl(self): + + localctx = ASLParser.Variable_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 118, self.RULE_variable_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 745 + self.match(ASLParser.VARIABLE) + self.state = 746 + self.match(ASLParser.COLON) + self.state = 747 + self.string_sampler() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comparison_funcContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_comparison_func + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Condition_string_jsonataContext(Comparison_funcContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Comparison_funcContext + super().__init__(parser) + self.copyFrom(ctx) + + def CONDITION(self): + return self.getToken(ASLParser.CONDITION, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCondition_string_jsonata" ): + listener.enterCondition_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCondition_string_jsonata" ): + listener.exitCondition_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCondition_string_jsonata" ): + return visitor.visitCondition_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Comparison_func_string_variable_sampleContext(Comparison_funcContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Comparison_funcContext + super().__init__(parser) + self.copyFrom(ctx) + + def comparison_op(self): + return self.getTypedRuleContext(ASLParser.Comparison_opContext,0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_variable_sample(self): + return self.getTypedRuleContext(ASLParser.String_variable_sampleContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_func_string_variable_sample" ): + listener.enterComparison_func_string_variable_sample(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_func_string_variable_sample" ): + listener.exitComparison_func_string_variable_sample(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_func_string_variable_sample" ): + return visitor.visitComparison_func_string_variable_sample(self) + else: + return visitor.visitChildren(self) + + + class Condition_litContext(Comparison_funcContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Comparison_funcContext + super().__init__(parser) + self.copyFrom(ctx) + + def CONDITION(self): + return self.getToken(ASLParser.CONDITION, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCondition_lit" ): + listener.enterCondition_lit(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCondition_lit" ): + listener.exitCondition_lit(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCondition_lit" ): + return visitor.visitCondition_lit(self) + else: + return visitor.visitChildren(self) + + + class Comparison_func_valueContext(Comparison_funcContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Comparison_funcContext + super().__init__(parser) + self.copyFrom(ctx) + + def comparison_op(self): + return self.getTypedRuleContext(ASLParser.Comparison_opContext,0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def json_value_decl(self): + return self.getTypedRuleContext(ASLParser.Json_value_declContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_func_value" ): + listener.enterComparison_func_value(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_func_value" ): + listener.exitComparison_func_value(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_func_value" ): + return visitor.visitComparison_func_value(self) + else: + return visitor.visitChildren(self) + + + + def comparison_func(self): + + localctx = ASLParser.Comparison_funcContext(self, self._ctx, self.state) + self.enterRule(localctx, 120, self.RULE_comparison_func) + self._la = 0 # Token type + try: + self.state = 763 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,52,self._ctx) + if la_ == 1: + localctx = ASLParser.Condition_litContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 749 + self.match(ASLParser.CONDITION) + self.state = 750 + self.match(ASLParser.COLON) + self.state = 751 + _la = self._input.LA(1) + if not(_la==7 or _la==8): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + pass + + elif la_ == 2: + localctx = ASLParser.Condition_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 752 + self.match(ASLParser.CONDITION) + self.state = 753 + self.match(ASLParser.COLON) + self.state = 754 + self.string_jsonata() + pass + + elif la_ == 3: + localctx = ASLParser.Comparison_func_string_variable_sampleContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 755 + self.comparison_op() + self.state = 756 + self.match(ASLParser.COLON) + self.state = 757 + self.string_variable_sample() + pass + + elif la_ == 4: + localctx = ASLParser.Comparison_func_valueContext(self, localctx) + self.enterOuterAlt(localctx, 4) + self.state = 759 + self.comparison_op() + self.state = 760 + self.match(ASLParser.COLON) + self.state = 761 + self.json_value_decl() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Branches_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BRANCHES(self): + return self.getToken(ASLParser.BRANCHES, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def program_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Program_declContext) + else: + return self.getTypedRuleContext(ASLParser.Program_declContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_branches_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterBranches_decl" ): + listener.enterBranches_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitBranches_decl" ): + listener.exitBranches_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitBranches_decl" ): + return visitor.visitBranches_decl(self) + else: + return visitor.visitChildren(self) + + + + + def branches_decl(self): + + localctx = ASLParser.Branches_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 122, self.RULE_branches_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 765 + self.match(ASLParser.BRANCHES) + self.state = 766 + self.match(ASLParser.COLON) + self.state = 767 + self.match(ASLParser.LBRACK) + self.state = 768 + self.program_decl() + self.state = 773 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 769 + self.match(ASLParser.COMMA) + self.state = 770 + self.program_decl() + self.state = 775 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 776 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Item_processor_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ITEMPROCESSOR(self): + return self.getToken(ASLParser.ITEMPROCESSOR, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def item_processor_item(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Item_processor_itemContext) + else: + return self.getTypedRuleContext(ASLParser.Item_processor_itemContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_item_processor_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItem_processor_decl" ): + listener.enterItem_processor_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItem_processor_decl" ): + listener.exitItem_processor_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItem_processor_decl" ): + return visitor.visitItem_processor_decl(self) + else: + return visitor.visitChildren(self) + + + + + def item_processor_decl(self): + + localctx = ASLParser.Item_processor_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 124, self.RULE_item_processor_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 778 + self.match(ASLParser.ITEMPROCESSOR) + self.state = 779 + self.match(ASLParser.COLON) + self.state = 780 + self.match(ASLParser.LBRACE) + self.state = 781 + self.item_processor_item() + self.state = 786 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 782 + self.match(ASLParser.COMMA) + self.state = 783 + self.item_processor_item() + self.state = 788 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 789 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Item_processor_itemContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def processor_config_decl(self): + return self.getTypedRuleContext(ASLParser.Processor_config_declContext,0) + + + def startat_decl(self): + return self.getTypedRuleContext(ASLParser.Startat_declContext,0) + + + def states_decl(self): + return self.getTypedRuleContext(ASLParser.States_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_item_processor_item + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItem_processor_item" ): + listener.enterItem_processor_item(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItem_processor_item" ): + listener.exitItem_processor_item(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItem_processor_item" ): + return visitor.visitItem_processor_item(self) + else: + return visitor.visitChildren(self) + + + + + def item_processor_item(self): + + localctx = ASLParser.Item_processor_itemContext(self, self._ctx, self.state) + self.enterRule(localctx, 126, self.RULE_item_processor_item) + try: + self.state = 795 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [79]: + self.enterOuterAlt(localctx, 1) + self.state = 791 + self.processor_config_decl() + pass + elif token in [12]: + self.enterOuterAlt(localctx, 2) + self.state = 792 + self.startat_decl() + pass + elif token in [11]: + self.enterOuterAlt(localctx, 3) + self.state = 793 + self.states_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 4) + self.state = 794 + self.comment_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Processor_config_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def PROCESSORCONFIG(self): + return self.getToken(ASLParser.PROCESSORCONFIG, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def processor_config_field(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Processor_config_fieldContext) + else: + return self.getTypedRuleContext(ASLParser.Processor_config_fieldContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_processor_config_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterProcessor_config_decl" ): + listener.enterProcessor_config_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitProcessor_config_decl" ): + listener.exitProcessor_config_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitProcessor_config_decl" ): + return visitor.visitProcessor_config_decl(self) + else: + return visitor.visitChildren(self) + + + + + def processor_config_decl(self): + + localctx = ASLParser.Processor_config_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 128, self.RULE_processor_config_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 797 + self.match(ASLParser.PROCESSORCONFIG) + self.state = 798 + self.match(ASLParser.COLON) + self.state = 799 + self.match(ASLParser.LBRACE) + self.state = 800 + self.processor_config_field() + self.state = 805 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 801 + self.match(ASLParser.COMMA) + self.state = 802 + self.processor_config_field() + self.state = 807 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 808 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Processor_config_fieldContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def mode_decl(self): + return self.getTypedRuleContext(ASLParser.Mode_declContext,0) + + + def execution_decl(self): + return self.getTypedRuleContext(ASLParser.Execution_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_processor_config_field + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterProcessor_config_field" ): + listener.enterProcessor_config_field(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitProcessor_config_field" ): + listener.exitProcessor_config_field(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitProcessor_config_field" ): + return visitor.visitProcessor_config_field(self) + else: + return visitor.visitChildren(self) + + + + + def processor_config_field(self): + + localctx = ASLParser.Processor_config_fieldContext(self, self._ctx, self.state) + self.enterRule(localctx, 130, self.RULE_processor_config_field) + try: + self.state = 812 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [80]: + self.enterOuterAlt(localctx, 1) + self.state = 810 + self.mode_decl() + pass + elif token in [83]: + self.enterOuterAlt(localctx, 2) + self.state = 811 + self.execution_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Mode_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def MODE(self): + return self.getToken(ASLParser.MODE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def mode_type(self): + return self.getTypedRuleContext(ASLParser.Mode_typeContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_mode_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMode_decl" ): + listener.enterMode_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMode_decl" ): + listener.exitMode_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMode_decl" ): + return visitor.visitMode_decl(self) + else: + return visitor.visitChildren(self) + + + + + def mode_decl(self): + + localctx = ASLParser.Mode_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 132, self.RULE_mode_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 814 + self.match(ASLParser.MODE) + self.state = 815 + self.match(ASLParser.COLON) + self.state = 816 + self.mode_type() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Mode_typeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INLINE(self): + return self.getToken(ASLParser.INLINE, 0) + + def DISTRIBUTED(self): + return self.getToken(ASLParser.DISTRIBUTED, 0) + + def getRuleIndex(self): + return ASLParser.RULE_mode_type + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMode_type" ): + listener.enterMode_type(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMode_type" ): + listener.exitMode_type(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMode_type" ): + return visitor.visitMode_type(self) + else: + return visitor.visitChildren(self) + + + + + def mode_type(self): + + localctx = ASLParser.Mode_typeContext(self, self._ctx, self.state) + self.enterRule(localctx, 134, self.RULE_mode_type) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 818 + _la = self._input.LA(1) + if not(_la==81 or _la==82): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Execution_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def EXECUTIONTYPE(self): + return self.getToken(ASLParser.EXECUTIONTYPE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def execution_type(self): + return self.getTypedRuleContext(ASLParser.Execution_typeContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_execution_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterExecution_decl" ): + listener.enterExecution_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitExecution_decl" ): + listener.exitExecution_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitExecution_decl" ): + return visitor.visitExecution_decl(self) + else: + return visitor.visitChildren(self) + + + + + def execution_decl(self): + + localctx = ASLParser.Execution_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 136, self.RULE_execution_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 820 + self.match(ASLParser.EXECUTIONTYPE) + self.state = 821 + self.match(ASLParser.COLON) + self.state = 822 + self.execution_type() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Execution_typeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STANDARD(self): + return self.getToken(ASLParser.STANDARD, 0) + + def getRuleIndex(self): + return ASLParser.RULE_execution_type + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterExecution_type" ): + listener.enterExecution_type(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitExecution_type" ): + listener.exitExecution_type(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitExecution_type" ): + return visitor.visitExecution_type(self) + else: + return visitor.visitChildren(self) + + + + + def execution_type(self): + + localctx = ASLParser.Execution_typeContext(self, self._ctx, self.state) + self.enterRule(localctx, 138, self.RULE_execution_type) + try: + self.enterOuterAlt(localctx, 1) + self.state = 824 + self.match(ASLParser.STANDARD) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Iterator_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ITERATOR(self): + return self.getToken(ASLParser.ITERATOR, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def iterator_decl_item(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Iterator_decl_itemContext) + else: + return self.getTypedRuleContext(ASLParser.Iterator_decl_itemContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_iterator_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterIterator_decl" ): + listener.enterIterator_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitIterator_decl" ): + listener.exitIterator_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitIterator_decl" ): + return visitor.visitIterator_decl(self) + else: + return visitor.visitChildren(self) + + + + + def iterator_decl(self): + + localctx = ASLParser.Iterator_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 140, self.RULE_iterator_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 826 + self.match(ASLParser.ITERATOR) + self.state = 827 + self.match(ASLParser.COLON) + self.state = 828 + self.match(ASLParser.LBRACE) + self.state = 829 + self.iterator_decl_item() + self.state = 834 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 830 + self.match(ASLParser.COMMA) + self.state = 831 + self.iterator_decl_item() + self.state = 836 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 837 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Iterator_decl_itemContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def startat_decl(self): + return self.getTypedRuleContext(ASLParser.Startat_declContext,0) + + + def states_decl(self): + return self.getTypedRuleContext(ASLParser.States_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def processor_config_decl(self): + return self.getTypedRuleContext(ASLParser.Processor_config_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_iterator_decl_item + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterIterator_decl_item" ): + listener.enterIterator_decl_item(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitIterator_decl_item" ): + listener.exitIterator_decl_item(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitIterator_decl_item" ): + return visitor.visitIterator_decl_item(self) + else: + return visitor.visitChildren(self) + + + + + def iterator_decl_item(self): + + localctx = ASLParser.Iterator_decl_itemContext(self, self._ctx, self.state) + self.enterRule(localctx, 142, self.RULE_iterator_decl_item) + try: + self.state = 843 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [12]: + self.enterOuterAlt(localctx, 1) + self.state = 839 + self.startat_decl() + pass + elif token in [11]: + self.enterOuterAlt(localctx, 2) + self.state = 840 + self.states_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 3) + self.state = 841 + self.comment_decl() + pass + elif token in [79]: + self.enterOuterAlt(localctx, 4) + self.state = 842 + self.processor_config_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Item_selector_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ITEMSELECTOR(self): + return self.getToken(ASLParser.ITEMSELECTOR, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def assign_template_value_object(self): + return self.getTypedRuleContext(ASLParser.Assign_template_value_objectContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_item_selector_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItem_selector_decl" ): + listener.enterItem_selector_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItem_selector_decl" ): + listener.exitItem_selector_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItem_selector_decl" ): + return visitor.visitItem_selector_decl(self) + else: + return visitor.visitChildren(self) + + + + + def item_selector_decl(self): + + localctx = ASLParser.Item_selector_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 144, self.RULE_item_selector_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 845 + self.match(ASLParser.ITEMSELECTOR) + self.state = 846 + self.match(ASLParser.COLON) + self.state = 847 + self.assign_template_value_object() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Item_reader_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ITEMREADER(self): + return self.getToken(ASLParser.ITEMREADER, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def items_reader_field(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Items_reader_fieldContext) + else: + return self.getTypedRuleContext(ASLParser.Items_reader_fieldContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_item_reader_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItem_reader_decl" ): + listener.enterItem_reader_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItem_reader_decl" ): + listener.exitItem_reader_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItem_reader_decl" ): + return visitor.visitItem_reader_decl(self) + else: + return visitor.visitChildren(self) + + + + + def item_reader_decl(self): + + localctx = ASLParser.Item_reader_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 146, self.RULE_item_reader_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 849 + self.match(ASLParser.ITEMREADER) + self.state = 850 + self.match(ASLParser.COLON) + self.state = 851 + self.match(ASLParser.LBRACE) + self.state = 852 + self.items_reader_field() + self.state = 857 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 853 + self.match(ASLParser.COMMA) + self.state = 854 + self.items_reader_field() + self.state = 859 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 860 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Items_reader_fieldContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def resource_decl(self): + return self.getTypedRuleContext(ASLParser.Resource_declContext,0) + + + def reader_config_decl(self): + return self.getTypedRuleContext(ASLParser.Reader_config_declContext,0) + + + def parameters_decl(self): + return self.getTypedRuleContext(ASLParser.Parameters_declContext,0) + + + def arguments_decl(self): + return self.getTypedRuleContext(ASLParser.Arguments_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_items_reader_field + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterItems_reader_field" ): + listener.enterItems_reader_field(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitItems_reader_field" ): + listener.exitItems_reader_field(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitItems_reader_field" ): + return visitor.visitItems_reader_field(self) + else: + return visitor.visitChildren(self) + + + + + def items_reader_field(self): + + localctx = ASLParser.Items_reader_fieldContext(self, self._ctx, self.state) + self.enterRule(localctx, 148, self.RULE_items_reader_field) + try: + self.state = 866 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [90]: + self.enterOuterAlt(localctx, 1) + self.state = 862 + self.resource_decl() + pass + elif token in [103]: + self.enterOuterAlt(localctx, 2) + self.state = 863 + self.reader_config_decl() + pass + elif token in [97]: + self.enterOuterAlt(localctx, 3) + self.state = 864 + self.parameters_decl() + pass + elif token in [136]: + self.enterOuterAlt(localctx, 4) + self.state = 865 + self.arguments_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Reader_config_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def READERCONFIG(self): + return self.getToken(ASLParser.READERCONFIG, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def reader_config_field(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Reader_config_fieldContext) + else: + return self.getTypedRuleContext(ASLParser.Reader_config_fieldContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_reader_config_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterReader_config_decl" ): + listener.enterReader_config_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitReader_config_decl" ): + listener.exitReader_config_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitReader_config_decl" ): + return visitor.visitReader_config_decl(self) + else: + return visitor.visitChildren(self) + + + + + def reader_config_decl(self): + + localctx = ASLParser.Reader_config_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 150, self.RULE_reader_config_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 868 + self.match(ASLParser.READERCONFIG) + self.state = 869 + self.match(ASLParser.COLON) + self.state = 870 + self.match(ASLParser.LBRACE) + self.state = 871 + self.reader_config_field() + self.state = 876 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 872 + self.match(ASLParser.COMMA) + self.state = 873 + self.reader_config_field() + self.state = 878 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 879 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Reader_config_fieldContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def input_type_decl(self): + return self.getTypedRuleContext(ASLParser.Input_type_declContext,0) + + + def csv_header_location_decl(self): + return self.getTypedRuleContext(ASLParser.Csv_header_location_declContext,0) + + + def csv_headers_decl(self): + return self.getTypedRuleContext(ASLParser.Csv_headers_declContext,0) + + + def max_items_decl(self): + return self.getTypedRuleContext(ASLParser.Max_items_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_reader_config_field + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterReader_config_field" ): + listener.enterReader_config_field(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitReader_config_field" ): + listener.exitReader_config_field(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitReader_config_field" ): + return visitor.visitReader_config_field(self) + else: + return visitor.visitChildren(self) + + + + + def reader_config_field(self): + + localctx = ASLParser.Reader_config_fieldContext(self, self._ctx, self.state) + self.enterRule(localctx, 152, self.RULE_reader_config_field) + try: + self.state = 885 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [104]: + self.enterOuterAlt(localctx, 1) + self.state = 881 + self.input_type_decl() + pass + elif token in [105]: + self.enterOuterAlt(localctx, 2) + self.state = 882 + self.csv_header_location_decl() + pass + elif token in [106]: + self.enterOuterAlt(localctx, 3) + self.state = 883 + self.csv_headers_decl() + pass + elif token in [107, 108]: + self.enterOuterAlt(localctx, 4) + self.state = 884 + self.max_items_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Input_type_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INPUTTYPE(self): + return self.getToken(ASLParser.INPUTTYPE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_input_type_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInput_type_decl" ): + listener.enterInput_type_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInput_type_decl" ): + listener.exitInput_type_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInput_type_decl" ): + return visitor.visitInput_type_decl(self) + else: + return visitor.visitChildren(self) + + + + + def input_type_decl(self): + + localctx = ASLParser.Input_type_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 154, self.RULE_input_type_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 887 + self.match(ASLParser.INPUTTYPE) + self.state = 888 + self.match(ASLParser.COLON) + self.state = 889 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Csv_header_location_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CSVHEADERLOCATION(self): + return self.getToken(ASLParser.CSVHEADERLOCATION, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_csv_header_location_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCsv_header_location_decl" ): + listener.enterCsv_header_location_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCsv_header_location_decl" ): + listener.exitCsv_header_location_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCsv_header_location_decl" ): + return visitor.visitCsv_header_location_decl(self) + else: + return visitor.visitChildren(self) + + + + + def csv_header_location_decl(self): + + localctx = ASLParser.Csv_header_location_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 156, self.RULE_csv_header_location_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 891 + self.match(ASLParser.CSVHEADERLOCATION) + self.state = 892 + self.match(ASLParser.COLON) + self.state = 893 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Csv_headers_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CSVHEADERS(self): + return self.getToken(ASLParser.CSVHEADERS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def string_literal(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.String_literalContext) + else: + return self.getTypedRuleContext(ASLParser.String_literalContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_csv_headers_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCsv_headers_decl" ): + listener.enterCsv_headers_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCsv_headers_decl" ): + listener.exitCsv_headers_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCsv_headers_decl" ): + return visitor.visitCsv_headers_decl(self) + else: + return visitor.visitChildren(self) + + + + + def csv_headers_decl(self): + + localctx = ASLParser.Csv_headers_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 158, self.RULE_csv_headers_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 895 + self.match(ASLParser.CSVHEADERS) + self.state = 896 + self.match(ASLParser.COLON) + self.state = 897 + self.match(ASLParser.LBRACK) + self.state = 898 + self.string_literal() + self.state = 903 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 899 + self.match(ASLParser.COMMA) + self.state = 900 + self.string_literal() + self.state = 905 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 906 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Max_items_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_max_items_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Max_items_string_jsonataContext(Max_items_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_items_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXITEMS(self): + return self.getToken(ASLParser.MAXITEMS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_items_string_jsonata" ): + listener.enterMax_items_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_items_string_jsonata" ): + listener.exitMax_items_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_items_string_jsonata" ): + return visitor.visitMax_items_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Max_items_intContext(Max_items_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_items_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXITEMS(self): + return self.getToken(ASLParser.MAXITEMS, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_items_int" ): + listener.enterMax_items_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_items_int" ): + listener.exitMax_items_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_items_int" ): + return visitor.visitMax_items_int(self) + else: + return visitor.visitChildren(self) + + + class Max_items_pathContext(Max_items_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Max_items_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def MAXITEMSPATH(self): + return self.getToken(ASLParser.MAXITEMSPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_items_path" ): + listener.enterMax_items_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_items_path" ): + listener.exitMax_items_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_items_path" ): + return visitor.visitMax_items_path(self) + else: + return visitor.visitChildren(self) + + + + def max_items_decl(self): + + localctx = ASLParser.Max_items_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 160, self.RULE_max_items_decl) + try: + self.state = 917 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,65,self._ctx) + if la_ == 1: + localctx = ASLParser.Max_items_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 908 + self.match(ASLParser.MAXITEMS) + self.state = 909 + self.match(ASLParser.COLON) + self.state = 910 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Max_items_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 911 + self.match(ASLParser.MAXITEMS) + self.state = 912 + self.match(ASLParser.COLON) + self.state = 913 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Max_items_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 914 + self.match(ASLParser.MAXITEMSPATH) + self.state = 915 + self.match(ASLParser.COLON) + self.state = 916 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Tolerated_failure_count_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_tolerated_failure_count_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Tolerated_failure_count_intContext(Tolerated_failure_count_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_count_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILURECOUNT(self): + return self.getToken(ASLParser.TOLERATEDFAILURECOUNT, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_count_int" ): + listener.enterTolerated_failure_count_int(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_count_int" ): + listener.exitTolerated_failure_count_int(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_count_int" ): + return visitor.visitTolerated_failure_count_int(self) + else: + return visitor.visitChildren(self) + + + class Tolerated_failure_count_pathContext(Tolerated_failure_count_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_count_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILURECOUNTPATH(self): + return self.getToken(ASLParser.TOLERATEDFAILURECOUNTPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_count_path" ): + listener.enterTolerated_failure_count_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_count_path" ): + listener.exitTolerated_failure_count_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_count_path" ): + return visitor.visitTolerated_failure_count_path(self) + else: + return visitor.visitChildren(self) + + + class Tolerated_failure_count_string_jsonataContext(Tolerated_failure_count_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_count_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILURECOUNT(self): + return self.getToken(ASLParser.TOLERATEDFAILURECOUNT, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_count_string_jsonata" ): + listener.enterTolerated_failure_count_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_count_string_jsonata" ): + listener.exitTolerated_failure_count_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_count_string_jsonata" ): + return visitor.visitTolerated_failure_count_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + + def tolerated_failure_count_decl(self): + + localctx = ASLParser.Tolerated_failure_count_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 162, self.RULE_tolerated_failure_count_decl) + try: + self.state = 928 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,66,self._ctx) + if la_ == 1: + localctx = ASLParser.Tolerated_failure_count_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 919 + self.match(ASLParser.TOLERATEDFAILURECOUNT) + self.state = 920 + self.match(ASLParser.COLON) + self.state = 921 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Tolerated_failure_count_intContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 922 + self.match(ASLParser.TOLERATEDFAILURECOUNT) + self.state = 923 + self.match(ASLParser.COLON) + self.state = 924 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + localctx = ASLParser.Tolerated_failure_count_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 925 + self.match(ASLParser.TOLERATEDFAILURECOUNTPATH) + self.state = 926 + self.match(ASLParser.COLON) + self.state = 927 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Tolerated_failure_percentage_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + + def getRuleIndex(self): + return ASLParser.RULE_tolerated_failure_percentage_decl + + + def copyFrom(self, ctx:ParserRuleContext): + super().copyFrom(ctx) + + + + class Tolerated_failure_percentage_pathContext(Tolerated_failure_percentage_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_percentage_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILUREPERCENTAGEPATH(self): + return self.getToken(ASLParser.TOLERATEDFAILUREPERCENTAGEPATH, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_percentage_path" ): + listener.enterTolerated_failure_percentage_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_percentage_path" ): + listener.exitTolerated_failure_percentage_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_percentage_path" ): + return visitor.visitTolerated_failure_percentage_path(self) + else: + return visitor.visitChildren(self) + + + class Tolerated_failure_percentage_string_jsonataContext(Tolerated_failure_percentage_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_percentage_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILUREPERCENTAGE(self): + return self.getToken(ASLParser.TOLERATEDFAILUREPERCENTAGE, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_percentage_string_jsonata" ): + listener.enterTolerated_failure_percentage_string_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_percentage_string_jsonata" ): + listener.exitTolerated_failure_percentage_string_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_percentage_string_jsonata" ): + return visitor.visitTolerated_failure_percentage_string_jsonata(self) + else: + return visitor.visitChildren(self) + + + class Tolerated_failure_percentage_numberContext(Tolerated_failure_percentage_declContext): + + def __init__(self, parser, ctx:ParserRuleContext): # actually a ASLParser.Tolerated_failure_percentage_declContext + super().__init__(parser) + self.copyFrom(ctx) + + def TOLERATEDFAILUREPERCENTAGE(self): + return self.getToken(ASLParser.TOLERATEDFAILUREPERCENTAGE, 0) + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterTolerated_failure_percentage_number" ): + listener.enterTolerated_failure_percentage_number(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitTolerated_failure_percentage_number" ): + listener.exitTolerated_failure_percentage_number(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitTolerated_failure_percentage_number" ): + return visitor.visitTolerated_failure_percentage_number(self) + else: + return visitor.visitChildren(self) + + + + def tolerated_failure_percentage_decl(self): + + localctx = ASLParser.Tolerated_failure_percentage_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 164, self.RULE_tolerated_failure_percentage_decl) + try: + self.state = 939 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,67,self._ctx) + if la_ == 1: + localctx = ASLParser.Tolerated_failure_percentage_string_jsonataContext(self, localctx) + self.enterOuterAlt(localctx, 1) + self.state = 930 + self.match(ASLParser.TOLERATEDFAILUREPERCENTAGE) + self.state = 931 + self.match(ASLParser.COLON) + self.state = 932 + self.string_jsonata() + pass + + elif la_ == 2: + localctx = ASLParser.Tolerated_failure_percentage_numberContext(self, localctx) + self.enterOuterAlt(localctx, 2) + self.state = 933 + self.match(ASLParser.TOLERATEDFAILUREPERCENTAGE) + self.state = 934 + self.match(ASLParser.COLON) + self.state = 935 + self.match(ASLParser.NUMBER) + pass + + elif la_ == 3: + localctx = ASLParser.Tolerated_failure_percentage_pathContext(self, localctx) + self.enterOuterAlt(localctx, 3) + self.state = 936 + self.match(ASLParser.TOLERATEDFAILUREPERCENTAGEPATH) + self.state = 937 + self.match(ASLParser.COLON) + self.state = 938 + self.string_sampler() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Label_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LABEL(self): + return self.getToken(ASLParser.LABEL, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_label_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterLabel_decl" ): + listener.enterLabel_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitLabel_decl" ): + listener.exitLabel_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitLabel_decl" ): + return visitor.visitLabel_decl(self) + else: + return visitor.visitChildren(self) + + + + + def label_decl(self): + + localctx = ASLParser.Label_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 166, self.RULE_label_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 941 + self.match(ASLParser.LABEL) + self.state = 942 + self.match(ASLParser.COLON) + self.state = 943 + self.string_literal() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Result_writer_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RESULTWRITER(self): + return self.getToken(ASLParser.RESULTWRITER, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def result_writer_field(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Result_writer_fieldContext) + else: + return self.getTypedRuleContext(ASLParser.Result_writer_fieldContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_result_writer_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResult_writer_decl" ): + listener.enterResult_writer_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResult_writer_decl" ): + listener.exitResult_writer_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResult_writer_decl" ): + return visitor.visitResult_writer_decl(self) + else: + return visitor.visitChildren(self) + + + + + def result_writer_decl(self): + + localctx = ASLParser.Result_writer_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 168, self.RULE_result_writer_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 945 + self.match(ASLParser.RESULTWRITER) + self.state = 946 + self.match(ASLParser.COLON) + self.state = 947 + self.match(ASLParser.LBRACE) + self.state = 948 + self.result_writer_field() + self.state = 953 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 949 + self.match(ASLParser.COMMA) + self.state = 950 + self.result_writer_field() + self.state = 955 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 956 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Result_writer_fieldContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def resource_decl(self): + return self.getTypedRuleContext(ASLParser.Resource_declContext,0) + + + def parameters_decl(self): + return self.getTypedRuleContext(ASLParser.Parameters_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_result_writer_field + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResult_writer_field" ): + listener.enterResult_writer_field(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResult_writer_field" ): + listener.exitResult_writer_field(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResult_writer_field" ): + return visitor.visitResult_writer_field(self) + else: + return visitor.visitChildren(self) + + + + + def result_writer_field(self): + + localctx = ASLParser.Result_writer_fieldContext(self, self._ctx, self.state) + self.enterRule(localctx, 170, self.RULE_result_writer_field) + try: + self.state = 960 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [90]: + self.enterOuterAlt(localctx, 1) + self.state = 958 + self.resource_decl() + pass + elif token in [97]: + self.enterOuterAlt(localctx, 2) + self.state = 959 + self.parameters_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Retry_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RETRY(self): + return self.getToken(ASLParser.RETRY, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def retrier_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Retrier_declContext) + else: + return self.getTypedRuleContext(ASLParser.Retrier_declContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_retry_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRetry_decl" ): + listener.enterRetry_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRetry_decl" ): + listener.exitRetry_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitRetry_decl" ): + return visitor.visitRetry_decl(self) + else: + return visitor.visitChildren(self) + + + + + def retry_decl(self): + + localctx = ASLParser.Retry_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 172, self.RULE_retry_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 962 + self.match(ASLParser.RETRY) + self.state = 963 + self.match(ASLParser.COLON) + self.state = 964 + self.match(ASLParser.LBRACK) + self.state = 973 + self._errHandler.sync(self) + _la = self._input.LA(1) + if _la==5: + self.state = 965 + self.retrier_decl() + self.state = 970 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 966 + self.match(ASLParser.COMMA) + self.state = 967 + self.retrier_decl() + self.state = 972 + self._errHandler.sync(self) + _la = self._input.LA(1) + + + + self.state = 975 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Retrier_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def retrier_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Retrier_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.Retrier_stmtContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_retrier_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRetrier_decl" ): + listener.enterRetrier_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRetrier_decl" ): + listener.exitRetrier_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitRetrier_decl" ): + return visitor.visitRetrier_decl(self) + else: + return visitor.visitChildren(self) + + + + + def retrier_decl(self): + + localctx = ASLParser.Retrier_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 174, self.RULE_retrier_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 977 + self.match(ASLParser.LBRACE) + self.state = 978 + self.retrier_stmt() + self.state = 983 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 979 + self.match(ASLParser.COMMA) + self.state = 980 + self.retrier_stmt() + self.state = 985 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 986 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Retrier_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def error_equals_decl(self): + return self.getTypedRuleContext(ASLParser.Error_equals_declContext,0) + + + def interval_seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Interval_seconds_declContext,0) + + + def max_attempts_decl(self): + return self.getTypedRuleContext(ASLParser.Max_attempts_declContext,0) + + + def backoff_rate_decl(self): + return self.getTypedRuleContext(ASLParser.Backoff_rate_declContext,0) + + + def max_delay_seconds_decl(self): + return self.getTypedRuleContext(ASLParser.Max_delay_seconds_declContext,0) + + + def jitter_strategy_decl(self): + return self.getTypedRuleContext(ASLParser.Jitter_strategy_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_retrier_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterRetrier_stmt" ): + listener.enterRetrier_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitRetrier_stmt" ): + listener.exitRetrier_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitRetrier_stmt" ): + return visitor.visitRetrier_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def retrier_stmt(self): + + localctx = ASLParser.Retrier_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 176, self.RULE_retrier_stmt) + try: + self.state = 995 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [122]: + self.enterOuterAlt(localctx, 1) + self.state = 988 + self.error_equals_decl() + pass + elif token in [123]: + self.enterOuterAlt(localctx, 2) + self.state = 989 + self.interval_seconds_decl() + pass + elif token in [124]: + self.enterOuterAlt(localctx, 3) + self.state = 990 + self.max_attempts_decl() + pass + elif token in [125]: + self.enterOuterAlt(localctx, 4) + self.state = 991 + self.backoff_rate_decl() + pass + elif token in [126]: + self.enterOuterAlt(localctx, 5) + self.state = 992 + self.max_delay_seconds_decl() + pass + elif token in [127]: + self.enterOuterAlt(localctx, 6) + self.state = 993 + self.jitter_strategy_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 7) + self.state = 994 + self.comment_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Error_equals_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ERROREQUALS(self): + return self.getToken(ASLParser.ERROREQUALS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def error_name(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Error_nameContext) + else: + return self.getTypedRuleContext(ASLParser.Error_nameContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_error_equals_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterError_equals_decl" ): + listener.enterError_equals_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitError_equals_decl" ): + listener.exitError_equals_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitError_equals_decl" ): + return visitor.visitError_equals_decl(self) + else: + return visitor.visitChildren(self) + + + + + def error_equals_decl(self): + + localctx = ASLParser.Error_equals_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 178, self.RULE_error_equals_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 997 + self.match(ASLParser.ERROREQUALS) + self.state = 998 + self.match(ASLParser.COLON) + self.state = 999 + self.match(ASLParser.LBRACK) + self.state = 1000 + self.error_name() + self.state = 1005 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 1001 + self.match(ASLParser.COMMA) + self.state = 1002 + self.error_name() + self.state = 1007 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 1008 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Interval_seconds_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INTERVALSECONDS(self): + return self.getToken(ASLParser.INTERVALSECONDS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def getRuleIndex(self): + return ASLParser.RULE_interval_seconds_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInterval_seconds_decl" ): + listener.enterInterval_seconds_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInterval_seconds_decl" ): + listener.exitInterval_seconds_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInterval_seconds_decl" ): + return visitor.visitInterval_seconds_decl(self) + else: + return visitor.visitChildren(self) + + + + + def interval_seconds_decl(self): + + localctx = ASLParser.Interval_seconds_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 180, self.RULE_interval_seconds_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1010 + self.match(ASLParser.INTERVALSECONDS) + self.state = 1011 + self.match(ASLParser.COLON) + self.state = 1012 + self.match(ASLParser.INT) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Max_attempts_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def MAXATTEMPTS(self): + return self.getToken(ASLParser.MAXATTEMPTS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def getRuleIndex(self): + return ASLParser.RULE_max_attempts_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_attempts_decl" ): + listener.enterMax_attempts_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_attempts_decl" ): + listener.exitMax_attempts_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_attempts_decl" ): + return visitor.visitMax_attempts_decl(self) + else: + return visitor.visitChildren(self) + + + + + def max_attempts_decl(self): + + localctx = ASLParser.Max_attempts_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 182, self.RULE_max_attempts_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1014 + self.match(ASLParser.MAXATTEMPTS) + self.state = 1015 + self.match(ASLParser.COLON) + self.state = 1016 + self.match(ASLParser.INT) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Backoff_rate_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BACKOFFRATE(self): + return self.getToken(ASLParser.BACKOFFRATE, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def getRuleIndex(self): + return ASLParser.RULE_backoff_rate_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterBackoff_rate_decl" ): + listener.enterBackoff_rate_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitBackoff_rate_decl" ): + listener.exitBackoff_rate_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitBackoff_rate_decl" ): + return visitor.visitBackoff_rate_decl(self) + else: + return visitor.visitChildren(self) + + + + + def backoff_rate_decl(self): + + localctx = ASLParser.Backoff_rate_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 184, self.RULE_backoff_rate_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1018 + self.match(ASLParser.BACKOFFRATE) + self.state = 1019 + self.match(ASLParser.COLON) + self.state = 1020 + _la = self._input.LA(1) + if not(_la==160 or _la==161): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Max_delay_seconds_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def MAXDELAYSECONDS(self): + return self.getToken(ASLParser.MAXDELAYSECONDS, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def getRuleIndex(self): + return ASLParser.RULE_max_delay_seconds_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterMax_delay_seconds_decl" ): + listener.enterMax_delay_seconds_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitMax_delay_seconds_decl" ): + listener.exitMax_delay_seconds_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMax_delay_seconds_decl" ): + return visitor.visitMax_delay_seconds_decl(self) + else: + return visitor.visitChildren(self) + + + + + def max_delay_seconds_decl(self): + + localctx = ASLParser.Max_delay_seconds_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 186, self.RULE_max_delay_seconds_decl) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1022 + self.match(ASLParser.MAXDELAYSECONDS) + self.state = 1023 + self.match(ASLParser.COLON) + self.state = 1024 + self.match(ASLParser.INT) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Jitter_strategy_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def JITTERSTRATEGY(self): + return self.getToken(ASLParser.JITTERSTRATEGY, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def FULL(self): + return self.getToken(ASLParser.FULL, 0) + + def NONE(self): + return self.getToken(ASLParser.NONE, 0) + + def getRuleIndex(self): + return ASLParser.RULE_jitter_strategy_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJitter_strategy_decl" ): + listener.enterJitter_strategy_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJitter_strategy_decl" ): + listener.exitJitter_strategy_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJitter_strategy_decl" ): + return visitor.visitJitter_strategy_decl(self) + else: + return visitor.visitChildren(self) + + + + + def jitter_strategy_decl(self): + + localctx = ASLParser.Jitter_strategy_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 188, self.RULE_jitter_strategy_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1026 + self.match(ASLParser.JITTERSTRATEGY) + self.state = 1027 + self.match(ASLParser.COLON) + self.state = 1028 + _la = self._input.LA(1) + if not(_la==128 or _la==129): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Catch_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CATCH(self): + return self.getToken(ASLParser.CATCH, 0) + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def catcher_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Catcher_declContext) + else: + return self.getTypedRuleContext(ASLParser.Catcher_declContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_catch_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCatch_decl" ): + listener.enterCatch_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCatch_decl" ): + listener.exitCatch_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCatch_decl" ): + return visitor.visitCatch_decl(self) + else: + return visitor.visitChildren(self) + + + + + def catch_decl(self): + + localctx = ASLParser.Catch_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 190, self.RULE_catch_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1030 + self.match(ASLParser.CATCH) + self.state = 1031 + self.match(ASLParser.COLON) + self.state = 1032 + self.match(ASLParser.LBRACK) + self.state = 1041 + self._errHandler.sync(self) + _la = self._input.LA(1) + if _la==5: + self.state = 1033 + self.catcher_decl() + self.state = 1038 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 1034 + self.match(ASLParser.COMMA) + self.state = 1035 + self.catcher_decl() + self.state = 1040 + self._errHandler.sync(self) + _la = self._input.LA(1) + + + + self.state = 1043 + self.match(ASLParser.RBRACK) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Catcher_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def catcher_stmt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Catcher_stmtContext) + else: + return self.getTypedRuleContext(ASLParser.Catcher_stmtContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_catcher_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCatcher_decl" ): + listener.enterCatcher_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCatcher_decl" ): + listener.exitCatcher_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCatcher_decl" ): + return visitor.visitCatcher_decl(self) + else: + return visitor.visitChildren(self) + + + + + def catcher_decl(self): + + localctx = ASLParser.Catcher_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 192, self.RULE_catcher_decl) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1045 + self.match(ASLParser.LBRACE) + self.state = 1046 + self.catcher_stmt() + self.state = 1051 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 1047 + self.match(ASLParser.COMMA) + self.state = 1048 + self.catcher_stmt() + self.state = 1053 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 1054 + self.match(ASLParser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Catcher_stmtContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def error_equals_decl(self): + return self.getTypedRuleContext(ASLParser.Error_equals_declContext,0) + + + def result_path_decl(self): + return self.getTypedRuleContext(ASLParser.Result_path_declContext,0) + + + def next_decl(self): + return self.getTypedRuleContext(ASLParser.Next_declContext,0) + + + def assign_decl(self): + return self.getTypedRuleContext(ASLParser.Assign_declContext,0) + + + def output_decl(self): + return self.getTypedRuleContext(ASLParser.Output_declContext,0) + + + def comment_decl(self): + return self.getTypedRuleContext(ASLParser.Comment_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_catcher_stmt + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterCatcher_stmt" ): + listener.enterCatcher_stmt(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitCatcher_stmt" ): + listener.exitCatcher_stmt(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitCatcher_stmt" ): + return visitor.visitCatcher_stmt(self) + else: + return visitor.visitChildren(self) + + + + + def catcher_stmt(self): + + localctx = ASLParser.Catcher_stmtContext(self, self._ctx, self.state) + self.enterRule(localctx, 194, self.RULE_catcher_stmt) + try: + self.state = 1062 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [122]: + self.enterOuterAlt(localctx, 1) + self.state = 1056 + self.error_equals_decl() + pass + elif token in [95]: + self.enterOuterAlt(localctx, 2) + self.state = 1057 + self.result_path_decl() + pass + elif token in [115]: + self.enterOuterAlt(localctx, 3) + self.state = 1058 + self.next_decl() + pass + elif token in [134]: + self.enterOuterAlt(localctx, 4) + self.state = 1059 + self.assign_decl() + pass + elif token in [135]: + self.enterOuterAlt(localctx, 5) + self.state = 1060 + self.output_decl() + pass + elif token in [10]: + self.enterOuterAlt(localctx, 6) + self.state = 1061 + self.comment_decl() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Comparison_opContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BOOLEANEQUALS(self): + return self.getToken(ASLParser.BOOLEANEQUALS, 0) + + def BOOLEANQUALSPATH(self): + return self.getToken(ASLParser.BOOLEANQUALSPATH, 0) + + def ISBOOLEAN(self): + return self.getToken(ASLParser.ISBOOLEAN, 0) + + def ISNULL(self): + return self.getToken(ASLParser.ISNULL, 0) + + def ISNUMERIC(self): + return self.getToken(ASLParser.ISNUMERIC, 0) + + def ISPRESENT(self): + return self.getToken(ASLParser.ISPRESENT, 0) + + def ISSTRING(self): + return self.getToken(ASLParser.ISSTRING, 0) + + def ISTIMESTAMP(self): + return self.getToken(ASLParser.ISTIMESTAMP, 0) + + def NUMERICEQUALS(self): + return self.getToken(ASLParser.NUMERICEQUALS, 0) + + def NUMERICEQUALSPATH(self): + return self.getToken(ASLParser.NUMERICEQUALSPATH, 0) + + def NUMERICGREATERTHAN(self): + return self.getToken(ASLParser.NUMERICGREATERTHAN, 0) + + def NUMERICGREATERTHANPATH(self): + return self.getToken(ASLParser.NUMERICGREATERTHANPATH, 0) + + def NUMERICGREATERTHANEQUALS(self): + return self.getToken(ASLParser.NUMERICGREATERTHANEQUALS, 0) + + def NUMERICGREATERTHANEQUALSPATH(self): + return self.getToken(ASLParser.NUMERICGREATERTHANEQUALSPATH, 0) + + def NUMERICLESSTHAN(self): + return self.getToken(ASLParser.NUMERICLESSTHAN, 0) + + def NUMERICLESSTHANPATH(self): + return self.getToken(ASLParser.NUMERICLESSTHANPATH, 0) + + def NUMERICLESSTHANEQUALS(self): + return self.getToken(ASLParser.NUMERICLESSTHANEQUALS, 0) + + def NUMERICLESSTHANEQUALSPATH(self): + return self.getToken(ASLParser.NUMERICLESSTHANEQUALSPATH, 0) + + def STRINGEQUALS(self): + return self.getToken(ASLParser.STRINGEQUALS, 0) + + def STRINGEQUALSPATH(self): + return self.getToken(ASLParser.STRINGEQUALSPATH, 0) + + def STRINGGREATERTHAN(self): + return self.getToken(ASLParser.STRINGGREATERTHAN, 0) + + def STRINGGREATERTHANPATH(self): + return self.getToken(ASLParser.STRINGGREATERTHANPATH, 0) + + def STRINGGREATERTHANEQUALS(self): + return self.getToken(ASLParser.STRINGGREATERTHANEQUALS, 0) + + def STRINGGREATERTHANEQUALSPATH(self): + return self.getToken(ASLParser.STRINGGREATERTHANEQUALSPATH, 0) + + def STRINGLESSTHAN(self): + return self.getToken(ASLParser.STRINGLESSTHAN, 0) + + def STRINGLESSTHANPATH(self): + return self.getToken(ASLParser.STRINGLESSTHANPATH, 0) + + def STRINGLESSTHANEQUALS(self): + return self.getToken(ASLParser.STRINGLESSTHANEQUALS, 0) + + def STRINGLESSTHANEQUALSPATH(self): + return self.getToken(ASLParser.STRINGLESSTHANEQUALSPATH, 0) + + def STRINGMATCHES(self): + return self.getToken(ASLParser.STRINGMATCHES, 0) + + def TIMESTAMPEQUALS(self): + return self.getToken(ASLParser.TIMESTAMPEQUALS, 0) + + def TIMESTAMPEQUALSPATH(self): + return self.getToken(ASLParser.TIMESTAMPEQUALSPATH, 0) + + def TIMESTAMPGREATERTHAN(self): + return self.getToken(ASLParser.TIMESTAMPGREATERTHAN, 0) + + def TIMESTAMPGREATERTHANPATH(self): + return self.getToken(ASLParser.TIMESTAMPGREATERTHANPATH, 0) + + def TIMESTAMPGREATERTHANEQUALS(self): + return self.getToken(ASLParser.TIMESTAMPGREATERTHANEQUALS, 0) + + def TIMESTAMPGREATERTHANEQUALSPATH(self): + return self.getToken(ASLParser.TIMESTAMPGREATERTHANEQUALSPATH, 0) + + def TIMESTAMPLESSTHAN(self): + return self.getToken(ASLParser.TIMESTAMPLESSTHAN, 0) + + def TIMESTAMPLESSTHANPATH(self): + return self.getToken(ASLParser.TIMESTAMPLESSTHANPATH, 0) + + def TIMESTAMPLESSTHANEQUALS(self): + return self.getToken(ASLParser.TIMESTAMPLESSTHANEQUALS, 0) + + def TIMESTAMPLESSTHANEQUALSPATH(self): + return self.getToken(ASLParser.TIMESTAMPLESSTHANEQUALSPATH, 0) + + def getRuleIndex(self): + return ASLParser.RULE_comparison_op + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterComparison_op" ): + listener.enterComparison_op(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitComparison_op" ): + listener.exitComparison_op(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitComparison_op" ): + return visitor.visitComparison_op(self) + else: + return visitor.visitChildren(self) + + + + + def comparison_op(self): + + localctx = ASLParser.Comparison_opContext(self, self._ctx, self.state) + self.enterRule(localctx, 196, self.RULE_comparison_op) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1064 + _la = self._input.LA(1) + if not(((((_la - 30)) & ~0x3f) == 0 and ((1 << (_la - 30)) & 2199022731007) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Choice_operatorContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def NOT(self): + return self.getToken(ASLParser.NOT, 0) + + def AND(self): + return self.getToken(ASLParser.AND, 0) + + def OR(self): + return self.getToken(ASLParser.OR, 0) + + def getRuleIndex(self): + return ASLParser.RULE_choice_operator + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterChoice_operator" ): + listener.enterChoice_operator(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitChoice_operator" ): + listener.exitChoice_operator(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitChoice_operator" ): + return visitor.visitChoice_operator(self) + else: + return visitor.visitChildren(self) + + + + + def choice_operator(self): + + localctx = ASLParser.Choice_operatorContext(self, self._ctx, self.state) + self.enterRule(localctx, 198, self.RULE_choice_operator) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1066 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & 563225368199168) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class States_error_nameContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ERRORNAMEStatesALL(self): + return self.getToken(ASLParser.ERRORNAMEStatesALL, 0) + + def ERRORNAMEStatesDataLimitExceeded(self): + return self.getToken(ASLParser.ERRORNAMEStatesDataLimitExceeded, 0) + + def ERRORNAMEStatesHeartbeatTimeout(self): + return self.getToken(ASLParser.ERRORNAMEStatesHeartbeatTimeout, 0) + + def ERRORNAMEStatesTimeout(self): + return self.getToken(ASLParser.ERRORNAMEStatesTimeout, 0) + + def ERRORNAMEStatesTaskFailed(self): + return self.getToken(ASLParser.ERRORNAMEStatesTaskFailed, 0) + + def ERRORNAMEStatesPermissions(self): + return self.getToken(ASLParser.ERRORNAMEStatesPermissions, 0) + + def ERRORNAMEStatesResultPathMatchFailure(self): + return self.getToken(ASLParser.ERRORNAMEStatesResultPathMatchFailure, 0) + + def ERRORNAMEStatesParameterPathFailure(self): + return self.getToken(ASLParser.ERRORNAMEStatesParameterPathFailure, 0) + + def ERRORNAMEStatesBranchFailed(self): + return self.getToken(ASLParser.ERRORNAMEStatesBranchFailed, 0) + + def ERRORNAMEStatesNoChoiceMatched(self): + return self.getToken(ASLParser.ERRORNAMEStatesNoChoiceMatched, 0) + + def ERRORNAMEStatesIntrinsicFailure(self): + return self.getToken(ASLParser.ERRORNAMEStatesIntrinsicFailure, 0) + + def ERRORNAMEStatesExceedToleratedFailureThreshold(self): + return self.getToken(ASLParser.ERRORNAMEStatesExceedToleratedFailureThreshold, 0) + + def ERRORNAMEStatesItemReaderFailed(self): + return self.getToken(ASLParser.ERRORNAMEStatesItemReaderFailed, 0) + + def ERRORNAMEStatesResultWriterFailed(self): + return self.getToken(ASLParser.ERRORNAMEStatesResultWriterFailed, 0) + + def ERRORNAMEStatesRuntime(self): + return self.getToken(ASLParser.ERRORNAMEStatesRuntime, 0) + + def ERRORNAMEStatesQueryEvaluationError(self): + return self.getToken(ASLParser.ERRORNAMEStatesQueryEvaluationError, 0) + + def getRuleIndex(self): + return ASLParser.RULE_states_error_name + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterStates_error_name" ): + listener.enterStates_error_name(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitStates_error_name" ): + listener.exitStates_error_name(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitStates_error_name" ): + return visitor.visitStates_error_name(self) + else: + return visitor.visitChildren(self) + + + + + def states_error_name(self): + + localctx = ASLParser.States_error_nameContext(self, self._ctx, self.state) + self.enterRule(localctx, 200, self.RULE_states_error_name) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1068 + _la = self._input.LA(1) + if not(((((_la - 137)) & ~0x3f) == 0 and ((1 << (_la - 137)) & 65535) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Error_nameContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def states_error_name(self): + return self.getTypedRuleContext(ASLParser.States_error_nameContext,0) + + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_error_name + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterError_name" ): + listener.enterError_name(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitError_name" ): + listener.exitError_name(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitError_name" ): + return visitor.visitError_name(self) + else: + return visitor.visitChildren(self) + + + + + def error_name(self): + + localctx = ASLParser.Error_nameContext(self, self._ctx, self.state) + self.enterRule(localctx, 202, self.RULE_error_name) + try: + self.state = 1072 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,79,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 1070 + self.states_error_name() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 1071 + self.string_literal() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Json_obj_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACE(self): + return self.getToken(ASLParser.LBRACE, 0) + + def json_binding(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Json_bindingContext) + else: + return self.getTypedRuleContext(ASLParser.Json_bindingContext,i) + + + def RBRACE(self): + return self.getToken(ASLParser.RBRACE, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_json_obj_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJson_obj_decl" ): + listener.enterJson_obj_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJson_obj_decl" ): + listener.exitJson_obj_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJson_obj_decl" ): + return visitor.visitJson_obj_decl(self) + else: + return visitor.visitChildren(self) + + + + + def json_obj_decl(self): + + localctx = ASLParser.Json_obj_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 204, self.RULE_json_obj_decl) + self._la = 0 # Token type + try: + self.state = 1087 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,81,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 1074 + self.match(ASLParser.LBRACE) + self.state = 1075 + self.json_binding() + self.state = 1080 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 1076 + self.match(ASLParser.COMMA) + self.state = 1077 + self.json_binding() + self.state = 1082 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 1083 + self.match(ASLParser.RBRACE) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 1085 + self.match(ASLParser.LBRACE) + self.state = 1086 + self.match(ASLParser.RBRACE) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Json_bindingContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def COLON(self): + return self.getToken(ASLParser.COLON, 0) + + def json_value_decl(self): + return self.getTypedRuleContext(ASLParser.Json_value_declContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_json_binding + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJson_binding" ): + listener.enterJson_binding(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJson_binding" ): + listener.exitJson_binding(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJson_binding" ): + return visitor.visitJson_binding(self) + else: + return visitor.visitChildren(self) + + + + + def json_binding(self): + + localctx = ASLParser.Json_bindingContext(self, self._ctx, self.state) + self.enterRule(localctx, 206, self.RULE_json_binding) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1089 + self.string_literal() + self.state = 1090 + self.match(ASLParser.COLON) + self.state = 1091 + self.json_value_decl() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Json_arr_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LBRACK(self): + return self.getToken(ASLParser.LBRACK, 0) + + def json_value_decl(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ASLParser.Json_value_declContext) + else: + return self.getTypedRuleContext(ASLParser.Json_value_declContext,i) + + + def RBRACK(self): + return self.getToken(ASLParser.RBRACK, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ASLParser.COMMA) + else: + return self.getToken(ASLParser.COMMA, i) + + def getRuleIndex(self): + return ASLParser.RULE_json_arr_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJson_arr_decl" ): + listener.enterJson_arr_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJson_arr_decl" ): + listener.exitJson_arr_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJson_arr_decl" ): + return visitor.visitJson_arr_decl(self) + else: + return visitor.visitChildren(self) + + + + + def json_arr_decl(self): + + localctx = ASLParser.Json_arr_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 208, self.RULE_json_arr_decl) + self._la = 0 # Token type + try: + self.state = 1106 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,83,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 1093 + self.match(ASLParser.LBRACK) + self.state = 1094 + self.json_value_decl() + self.state = 1099 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==1: + self.state = 1095 + self.match(ASLParser.COMMA) + self.state = 1096 + self.json_value_decl() + self.state = 1101 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 1102 + self.match(ASLParser.RBRACK) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 1104 + self.match(ASLParser.LBRACK) + self.state = 1105 + self.match(ASLParser.RBRACK) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Json_value_declContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def NUMBER(self): + return self.getToken(ASLParser.NUMBER, 0) + + def INT(self): + return self.getToken(ASLParser.INT, 0) + + def TRUE(self): + return self.getToken(ASLParser.TRUE, 0) + + def FALSE(self): + return self.getToken(ASLParser.FALSE, 0) + + def NULL(self): + return self.getToken(ASLParser.NULL, 0) + + def json_binding(self): + return self.getTypedRuleContext(ASLParser.Json_bindingContext,0) + + + def json_arr_decl(self): + return self.getTypedRuleContext(ASLParser.Json_arr_declContext,0) + + + def json_obj_decl(self): + return self.getTypedRuleContext(ASLParser.Json_obj_declContext,0) + + + def string_literal(self): + return self.getTypedRuleContext(ASLParser.String_literalContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_json_value_decl + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterJson_value_decl" ): + listener.enterJson_value_decl(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitJson_value_decl" ): + listener.exitJson_value_decl(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitJson_value_decl" ): + return visitor.visitJson_value_decl(self) + else: + return visitor.visitChildren(self) + + + + + def json_value_decl(self): + + localctx = ASLParser.Json_value_declContext(self, self._ctx, self.state) + self.enterRule(localctx, 210, self.RULE_json_value_decl) + try: + self.state = 1117 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,84,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 1108 + self.match(ASLParser.NUMBER) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 1109 + self.match(ASLParser.INT) + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 1110 + self.match(ASLParser.TRUE) + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 1111 + self.match(ASLParser.FALSE) + pass + + elif la_ == 5: + self.enterOuterAlt(localctx, 5) + self.state = 1112 + self.match(ASLParser.NULL) + pass + + elif la_ == 6: + self.enterOuterAlt(localctx, 6) + self.state = 1113 + self.json_binding() + pass + + elif la_ == 7: + self.enterOuterAlt(localctx, 7) + self.state = 1114 + self.json_arr_decl() + pass + + elif la_ == 8: + self.enterOuterAlt(localctx, 8) + self.state = 1115 + self.json_obj_decl() + pass + + elif la_ == 9: + self.enterOuterAlt(localctx, 9) + self.state = 1116 + self.string_literal() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_samplerContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_jsonpath(self): + return self.getTypedRuleContext(ASLParser.String_jsonpathContext,0) + + + def string_context_path(self): + return self.getTypedRuleContext(ASLParser.String_context_pathContext,0) + + + def string_variable_sample(self): + return self.getTypedRuleContext(ASLParser.String_variable_sampleContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_string_sampler + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_sampler" ): + listener.enterString_sampler(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_sampler" ): + listener.exitString_sampler(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_sampler" ): + return visitor.visitString_sampler(self) + else: + return visitor.visitChildren(self) + + + + + def string_sampler(self): + + localctx = ASLParser.String_samplerContext(self, self._ctx, self.state) + self.enterRule(localctx, 212, self.RULE_string_sampler) + try: + self.state = 1122 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [155]: + self.enterOuterAlt(localctx, 1) + self.state = 1119 + self.string_jsonpath() + pass + elif token in [154]: + self.enterOuterAlt(localctx, 2) + self.state = 1120 + self.string_context_path() + pass + elif token in [156]: + self.enterOuterAlt(localctx, 3) + self.state = 1121 + self.string_variable_sample() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_expression_simpleContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_sampler(self): + return self.getTypedRuleContext(ASLParser.String_samplerContext,0) + + + def string_intrinsic_function(self): + return self.getTypedRuleContext(ASLParser.String_intrinsic_functionContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_string_expression_simple + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_expression_simple" ): + listener.enterString_expression_simple(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_expression_simple" ): + listener.exitString_expression_simple(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_expression_simple" ): + return visitor.visitString_expression_simple(self) + else: + return visitor.visitChildren(self) + + + + + def string_expression_simple(self): + + localctx = ASLParser.String_expression_simpleContext(self, self._ctx, self.state) + self.enterRule(localctx, 214, self.RULE_string_expression_simple) + try: + self.state = 1126 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [154, 155, 156]: + self.enterOuterAlt(localctx, 1) + self.state = 1124 + self.string_sampler() + pass + elif token in [157]: + self.enterOuterAlt(localctx, 2) + self.state = 1125 + self.string_intrinsic_function() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_expressionContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def string_expression_simple(self): + return self.getTypedRuleContext(ASLParser.String_expression_simpleContext,0) + + + def string_jsonata(self): + return self.getTypedRuleContext(ASLParser.String_jsonataContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_string_expression + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_expression" ): + listener.enterString_expression(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_expression" ): + listener.exitString_expression(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_expression" ): + return visitor.visitString_expression(self) + else: + return visitor.visitChildren(self) + + + + + def string_expression(self): + + localctx = ASLParser.String_expressionContext(self, self._ctx, self.state) + self.enterRule(localctx, 216, self.RULE_string_expression) + try: + self.state = 1130 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [154, 155, 156, 157]: + self.enterOuterAlt(localctx, 1) + self.state = 1128 + self.string_expression_simple() + pass + elif token in [158]: + self.enterOuterAlt(localctx, 2) + self.state = 1129 + self.string_jsonata() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_jsonpathContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRINGPATH(self): + return self.getToken(ASLParser.STRINGPATH, 0) + + def getRuleIndex(self): + return ASLParser.RULE_string_jsonpath + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_jsonpath" ): + listener.enterString_jsonpath(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_jsonpath" ): + listener.exitString_jsonpath(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_jsonpath" ): + return visitor.visitString_jsonpath(self) + else: + return visitor.visitChildren(self) + + + + + def string_jsonpath(self): + + localctx = ASLParser.String_jsonpathContext(self, self._ctx, self.state) + self.enterRule(localctx, 218, self.RULE_string_jsonpath) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1132 + self.match(ASLParser.STRINGPATH) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_context_pathContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRINGPATHCONTEXTOBJ(self): + return self.getToken(ASLParser.STRINGPATHCONTEXTOBJ, 0) + + def getRuleIndex(self): + return ASLParser.RULE_string_context_path + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_context_path" ): + listener.enterString_context_path(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_context_path" ): + listener.exitString_context_path(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_context_path" ): + return visitor.visitString_context_path(self) + else: + return visitor.visitChildren(self) + + + + + def string_context_path(self): + + localctx = ASLParser.String_context_pathContext(self, self._ctx, self.state) + self.enterRule(localctx, 220, self.RULE_string_context_path) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1134 + self.match(ASLParser.STRINGPATHCONTEXTOBJ) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_variable_sampleContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRINGVAR(self): + return self.getToken(ASLParser.STRINGVAR, 0) + + def getRuleIndex(self): + return ASLParser.RULE_string_variable_sample + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_variable_sample" ): + listener.enterString_variable_sample(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_variable_sample" ): + listener.exitString_variable_sample(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_variable_sample" ): + return visitor.visitString_variable_sample(self) + else: + return visitor.visitChildren(self) + + + + + def string_variable_sample(self): + + localctx = ASLParser.String_variable_sampleContext(self, self._ctx, self.state) + self.enterRule(localctx, 222, self.RULE_string_variable_sample) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1136 + self.match(ASLParser.STRINGVAR) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_intrinsic_functionContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRINGINTRINSICFUNC(self): + return self.getToken(ASLParser.STRINGINTRINSICFUNC, 0) + + def getRuleIndex(self): + return ASLParser.RULE_string_intrinsic_function + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_intrinsic_function" ): + listener.enterString_intrinsic_function(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_intrinsic_function" ): + listener.exitString_intrinsic_function(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_intrinsic_function" ): + return visitor.visitString_intrinsic_function(self) + else: + return visitor.visitChildren(self) + + + + + def string_intrinsic_function(self): + + localctx = ASLParser.String_intrinsic_functionContext(self, self._ctx, self.state) + self.enterRule(localctx, 224, self.RULE_string_intrinsic_function) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1138 + self.match(ASLParser.STRINGINTRINSICFUNC) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_jsonataContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRINGJSONATA(self): + return self.getToken(ASLParser.STRINGJSONATA, 0) + + def getRuleIndex(self): + return ASLParser.RULE_string_jsonata + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_jsonata" ): + listener.enterString_jsonata(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_jsonata" ): + listener.exitString_jsonata(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_jsonata" ): + return visitor.visitString_jsonata(self) + else: + return visitor.visitChildren(self) + + + + + def string_jsonata(self): + + localctx = ASLParser.String_jsonataContext(self, self._ctx, self.state) + self.enterRule(localctx, 226, self.RULE_string_jsonata) + try: + self.enterOuterAlt(localctx, 1) + self.state = 1140 + self.match(ASLParser.STRINGJSONATA) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class String_literalContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRING(self): + return self.getToken(ASLParser.STRING, 0) + + def STRINGDOLLAR(self): + return self.getToken(ASLParser.STRINGDOLLAR, 0) + + def soft_string_keyword(self): + return self.getTypedRuleContext(ASLParser.Soft_string_keywordContext,0) + + + def comparison_op(self): + return self.getTypedRuleContext(ASLParser.Comparison_opContext,0) + + + def choice_operator(self): + return self.getTypedRuleContext(ASLParser.Choice_operatorContext,0) + + + def states_error_name(self): + return self.getTypedRuleContext(ASLParser.States_error_nameContext,0) + + + def string_expression(self): + return self.getTypedRuleContext(ASLParser.String_expressionContext,0) + + + def getRuleIndex(self): + return ASLParser.RULE_string_literal + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterString_literal" ): + listener.enterString_literal(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitString_literal" ): + listener.exitString_literal(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitString_literal" ): + return visitor.visitString_literal(self) + else: + return visitor.visitChildren(self) + + + + + def string_literal(self): + + localctx = ASLParser.String_literalContext(self, self._ctx, self.state) + self.enterRule(localctx, 228, self.RULE_string_literal) + try: + self.state = 1149 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [159]: + self.enterOuterAlt(localctx, 1) + self.state = 1142 + self.match(ASLParser.STRING) + pass + elif token in [153]: + self.enterOuterAlt(localctx, 2) + self.state = 1143 + self.match(ASLParser.STRINGDOLLAR) + pass + elif token in [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 119, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 134, 135, 136]: + self.enterOuterAlt(localctx, 3) + self.state = 1144 + self.soft_string_keyword() + pass + elif token in [30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70]: + self.enterOuterAlt(localctx, 4) + self.state = 1145 + self.comparison_op() + pass + elif token in [29, 38, 49]: + self.enterOuterAlt(localctx, 5) + self.state = 1146 + self.choice_operator() + pass + elif token in [137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152]: + self.enterOuterAlt(localctx, 6) + self.state = 1147 + self.states_error_name() + pass + elif token in [154, 155, 156, 157, 158]: + self.enterOuterAlt(localctx, 7) + self.state = 1148 + self.string_expression() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class Soft_string_keywordContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def QUERYLANGUAGE(self): + return self.getToken(ASLParser.QUERYLANGUAGE, 0) + + def ASSIGN(self): + return self.getToken(ASLParser.ASSIGN, 0) + + def ARGUMENTS(self): + return self.getToken(ASLParser.ARGUMENTS, 0) + + def OUTPUT(self): + return self.getToken(ASLParser.OUTPUT, 0) + + def COMMENT(self): + return self.getToken(ASLParser.COMMENT, 0) + + def STATES(self): + return self.getToken(ASLParser.STATES, 0) + + def STARTAT(self): + return self.getToken(ASLParser.STARTAT, 0) + + def NEXTSTATE(self): + return self.getToken(ASLParser.NEXTSTATE, 0) + + def TYPE(self): + return self.getToken(ASLParser.TYPE, 0) + + def TASK(self): + return self.getToken(ASLParser.TASK, 0) + + def CHOICE(self): + return self.getToken(ASLParser.CHOICE, 0) + + def FAIL(self): + return self.getToken(ASLParser.FAIL, 0) + + def SUCCEED(self): + return self.getToken(ASLParser.SUCCEED, 0) + + def PASS(self): + return self.getToken(ASLParser.PASS, 0) + + def WAIT(self): + return self.getToken(ASLParser.WAIT, 0) + + def PARALLEL(self): + return self.getToken(ASLParser.PARALLEL, 0) + + def MAP(self): + return self.getToken(ASLParser.MAP, 0) + + def CHOICES(self): + return self.getToken(ASLParser.CHOICES, 0) + + def CONDITION(self): + return self.getToken(ASLParser.CONDITION, 0) + + def VARIABLE(self): + return self.getToken(ASLParser.VARIABLE, 0) + + def DEFAULT(self): + return self.getToken(ASLParser.DEFAULT, 0) + + def BRANCHES(self): + return self.getToken(ASLParser.BRANCHES, 0) + + def SECONDSPATH(self): + return self.getToken(ASLParser.SECONDSPATH, 0) + + def SECONDS(self): + return self.getToken(ASLParser.SECONDS, 0) + + def TIMESTAMPPATH(self): + return self.getToken(ASLParser.TIMESTAMPPATH, 0) + + def TIMESTAMP(self): + return self.getToken(ASLParser.TIMESTAMP, 0) + + def TIMEOUTSECONDS(self): + return self.getToken(ASLParser.TIMEOUTSECONDS, 0) + + def TIMEOUTSECONDSPATH(self): + return self.getToken(ASLParser.TIMEOUTSECONDSPATH, 0) + + def HEARTBEATSECONDS(self): + return self.getToken(ASLParser.HEARTBEATSECONDS, 0) + + def HEARTBEATSECONDSPATH(self): + return self.getToken(ASLParser.HEARTBEATSECONDSPATH, 0) + + def PROCESSORCONFIG(self): + return self.getToken(ASLParser.PROCESSORCONFIG, 0) + + def MODE(self): + return self.getToken(ASLParser.MODE, 0) + + def INLINE(self): + return self.getToken(ASLParser.INLINE, 0) + + def DISTRIBUTED(self): + return self.getToken(ASLParser.DISTRIBUTED, 0) + + def EXECUTIONTYPE(self): + return self.getToken(ASLParser.EXECUTIONTYPE, 0) + + def STANDARD(self): + return self.getToken(ASLParser.STANDARD, 0) + + def ITEMS(self): + return self.getToken(ASLParser.ITEMS, 0) + + def ITEMPROCESSOR(self): + return self.getToken(ASLParser.ITEMPROCESSOR, 0) + + def ITERATOR(self): + return self.getToken(ASLParser.ITERATOR, 0) + + def ITEMSELECTOR(self): + return self.getToken(ASLParser.ITEMSELECTOR, 0) + + def MAXCONCURRENCY(self): + return self.getToken(ASLParser.MAXCONCURRENCY, 0) + + def MAXCONCURRENCYPATH(self): + return self.getToken(ASLParser.MAXCONCURRENCYPATH, 0) + + def RESOURCE(self): + return self.getToken(ASLParser.RESOURCE, 0) + + def INPUTPATH(self): + return self.getToken(ASLParser.INPUTPATH, 0) + + def OUTPUTPATH(self): + return self.getToken(ASLParser.OUTPUTPATH, 0) + + def ITEMSPATH(self): + return self.getToken(ASLParser.ITEMSPATH, 0) + + def RESULTPATH(self): + return self.getToken(ASLParser.RESULTPATH, 0) + + def RESULT(self): + return self.getToken(ASLParser.RESULT, 0) + + def PARAMETERS(self): + return self.getToken(ASLParser.PARAMETERS, 0) + + def CREDENTIALS(self): + return self.getToken(ASLParser.CREDENTIALS, 0) + + def ROLEARN(self): + return self.getToken(ASLParser.ROLEARN, 0) + + def ROLEARNPATH(self): + return self.getToken(ASLParser.ROLEARNPATH, 0) + + def RESULTSELECTOR(self): + return self.getToken(ASLParser.RESULTSELECTOR, 0) + + def ITEMREADER(self): + return self.getToken(ASLParser.ITEMREADER, 0) + + def READERCONFIG(self): + return self.getToken(ASLParser.READERCONFIG, 0) + + def INPUTTYPE(self): + return self.getToken(ASLParser.INPUTTYPE, 0) + + def CSVHEADERLOCATION(self): + return self.getToken(ASLParser.CSVHEADERLOCATION, 0) + + def CSVHEADERS(self): + return self.getToken(ASLParser.CSVHEADERS, 0) + + def MAXITEMS(self): + return self.getToken(ASLParser.MAXITEMS, 0) + + def MAXITEMSPATH(self): + return self.getToken(ASLParser.MAXITEMSPATH, 0) + + def TOLERATEDFAILURECOUNT(self): + return self.getToken(ASLParser.TOLERATEDFAILURECOUNT, 0) + + def TOLERATEDFAILURECOUNTPATH(self): + return self.getToken(ASLParser.TOLERATEDFAILURECOUNTPATH, 0) + + def TOLERATEDFAILUREPERCENTAGE(self): + return self.getToken(ASLParser.TOLERATEDFAILUREPERCENTAGE, 0) + + def TOLERATEDFAILUREPERCENTAGEPATH(self): + return self.getToken(ASLParser.TOLERATEDFAILUREPERCENTAGEPATH, 0) + + def LABEL(self): + return self.getToken(ASLParser.LABEL, 0) + + def RESULTWRITER(self): + return self.getToken(ASLParser.RESULTWRITER, 0) + + def NEXT(self): + return self.getToken(ASLParser.NEXT, 0) + + def END(self): + return self.getToken(ASLParser.END, 0) + + def CAUSE(self): + return self.getToken(ASLParser.CAUSE, 0) + + def ERROR(self): + return self.getToken(ASLParser.ERROR, 0) + + def RETRY(self): + return self.getToken(ASLParser.RETRY, 0) + + def ERROREQUALS(self): + return self.getToken(ASLParser.ERROREQUALS, 0) + + def INTERVALSECONDS(self): + return self.getToken(ASLParser.INTERVALSECONDS, 0) + + def MAXATTEMPTS(self): + return self.getToken(ASLParser.MAXATTEMPTS, 0) + + def BACKOFFRATE(self): + return self.getToken(ASLParser.BACKOFFRATE, 0) + + def MAXDELAYSECONDS(self): + return self.getToken(ASLParser.MAXDELAYSECONDS, 0) + + def JITTERSTRATEGY(self): + return self.getToken(ASLParser.JITTERSTRATEGY, 0) + + def FULL(self): + return self.getToken(ASLParser.FULL, 0) + + def NONE(self): + return self.getToken(ASLParser.NONE, 0) + + def CATCH(self): + return self.getToken(ASLParser.CATCH, 0) + + def VERSION(self): + return self.getToken(ASLParser.VERSION, 0) + + def getRuleIndex(self): + return ASLParser.RULE_soft_string_keyword + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSoft_string_keyword" ): + listener.enterSoft_string_keyword(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSoft_string_keyword" ): + listener.exitSoft_string_keyword(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSoft_string_keyword" ): + return visitor.visitSoft_string_keyword(self) + else: + return visitor.visitChildren(self) + + + + + def soft_string_keyword(self): + + localctx = ASLParser.Soft_string_keywordContext(self, self._ctx, self.state) + self.enterRule(localctx, 230, self.RULE_soft_string_keyword) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 1151 + _la = self._input.LA(1) + if not(((((_la - 10)) & ~0x3f) == 0 and ((1 << (_la - 10)) & -2305843009213169665) != 0) or ((((_la - 74)) & ~0x3f) == 0 and ((1 << (_la - 74)) & 8358592947469418495) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + + diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserListener.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserListener.py new file mode 100644 index 0000000000000..ad736a14516e2 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserListener.py @@ -0,0 +1,1416 @@ +# Generated from ASLParser.g4 by ANTLR 4.13.2 +from antlr4 import * +if "." in __name__: + from .ASLParser import ASLParser +else: + from ASLParser import ASLParser + +# This class defines a complete listener for a parse tree produced by ASLParser. +class ASLParserListener(ParseTreeListener): + + # Enter a parse tree produced by ASLParser#state_machine. + def enterState_machine(self, ctx:ASLParser.State_machineContext): + pass + + # Exit a parse tree produced by ASLParser#state_machine. + def exitState_machine(self, ctx:ASLParser.State_machineContext): + pass + + + # Enter a parse tree produced by ASLParser#program_decl. + def enterProgram_decl(self, ctx:ASLParser.Program_declContext): + pass + + # Exit a parse tree produced by ASLParser#program_decl. + def exitProgram_decl(self, ctx:ASLParser.Program_declContext): + pass + + + # Enter a parse tree produced by ASLParser#top_layer_stmt. + def enterTop_layer_stmt(self, ctx:ASLParser.Top_layer_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#top_layer_stmt. + def exitTop_layer_stmt(self, ctx:ASLParser.Top_layer_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#startat_decl. + def enterStartat_decl(self, ctx:ASLParser.Startat_declContext): + pass + + # Exit a parse tree produced by ASLParser#startat_decl. + def exitStartat_decl(self, ctx:ASLParser.Startat_declContext): + pass + + + # Enter a parse tree produced by ASLParser#comment_decl. + def enterComment_decl(self, ctx:ASLParser.Comment_declContext): + pass + + # Exit a parse tree produced by ASLParser#comment_decl. + def exitComment_decl(self, ctx:ASLParser.Comment_declContext): + pass + + + # Enter a parse tree produced by ASLParser#version_decl. + def enterVersion_decl(self, ctx:ASLParser.Version_declContext): + pass + + # Exit a parse tree produced by ASLParser#version_decl. + def exitVersion_decl(self, ctx:ASLParser.Version_declContext): + pass + + + # Enter a parse tree produced by ASLParser#query_language_decl. + def enterQuery_language_decl(self, ctx:ASLParser.Query_language_declContext): + pass + + # Exit a parse tree produced by ASLParser#query_language_decl. + def exitQuery_language_decl(self, ctx:ASLParser.Query_language_declContext): + pass + + + # Enter a parse tree produced by ASLParser#state_stmt. + def enterState_stmt(self, ctx:ASLParser.State_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#state_stmt. + def exitState_stmt(self, ctx:ASLParser.State_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#states_decl. + def enterStates_decl(self, ctx:ASLParser.States_declContext): + pass + + # Exit a parse tree produced by ASLParser#states_decl. + def exitStates_decl(self, ctx:ASLParser.States_declContext): + pass + + + # Enter a parse tree produced by ASLParser#state_decl. + def enterState_decl(self, ctx:ASLParser.State_declContext): + pass + + # Exit a parse tree produced by ASLParser#state_decl. + def exitState_decl(self, ctx:ASLParser.State_declContext): + pass + + + # Enter a parse tree produced by ASLParser#state_decl_body. + def enterState_decl_body(self, ctx:ASLParser.State_decl_bodyContext): + pass + + # Exit a parse tree produced by ASLParser#state_decl_body. + def exitState_decl_body(self, ctx:ASLParser.State_decl_bodyContext): + pass + + + # Enter a parse tree produced by ASLParser#type_decl. + def enterType_decl(self, ctx:ASLParser.Type_declContext): + pass + + # Exit a parse tree produced by ASLParser#type_decl. + def exitType_decl(self, ctx:ASLParser.Type_declContext): + pass + + + # Enter a parse tree produced by ASLParser#next_decl. + def enterNext_decl(self, ctx:ASLParser.Next_declContext): + pass + + # Exit a parse tree produced by ASLParser#next_decl. + def exitNext_decl(self, ctx:ASLParser.Next_declContext): + pass + + + # Enter a parse tree produced by ASLParser#resource_decl. + def enterResource_decl(self, ctx:ASLParser.Resource_declContext): + pass + + # Exit a parse tree produced by ASLParser#resource_decl. + def exitResource_decl(self, ctx:ASLParser.Resource_declContext): + pass + + + # Enter a parse tree produced by ASLParser#input_path_decl. + def enterInput_path_decl(self, ctx:ASLParser.Input_path_declContext): + pass + + # Exit a parse tree produced by ASLParser#input_path_decl. + def exitInput_path_decl(self, ctx:ASLParser.Input_path_declContext): + pass + + + # Enter a parse tree produced by ASLParser#result_decl. + def enterResult_decl(self, ctx:ASLParser.Result_declContext): + pass + + # Exit a parse tree produced by ASLParser#result_decl. + def exitResult_decl(self, ctx:ASLParser.Result_declContext): + pass + + + # Enter a parse tree produced by ASLParser#result_path_decl. + def enterResult_path_decl(self, ctx:ASLParser.Result_path_declContext): + pass + + # Exit a parse tree produced by ASLParser#result_path_decl. + def exitResult_path_decl(self, ctx:ASLParser.Result_path_declContext): + pass + + + # Enter a parse tree produced by ASLParser#output_path_decl. + def enterOutput_path_decl(self, ctx:ASLParser.Output_path_declContext): + pass + + # Exit a parse tree produced by ASLParser#output_path_decl. + def exitOutput_path_decl(self, ctx:ASLParser.Output_path_declContext): + pass + + + # Enter a parse tree produced by ASLParser#end_decl. + def enterEnd_decl(self, ctx:ASLParser.End_declContext): + pass + + # Exit a parse tree produced by ASLParser#end_decl. + def exitEnd_decl(self, ctx:ASLParser.End_declContext): + pass + + + # Enter a parse tree produced by ASLParser#default_decl. + def enterDefault_decl(self, ctx:ASLParser.Default_declContext): + pass + + # Exit a parse tree produced by ASLParser#default_decl. + def exitDefault_decl(self, ctx:ASLParser.Default_declContext): + pass + + + # Enter a parse tree produced by ASLParser#error. + def enterError(self, ctx:ASLParser.ErrorContext): + pass + + # Exit a parse tree produced by ASLParser#error. + def exitError(self, ctx:ASLParser.ErrorContext): + pass + + + # Enter a parse tree produced by ASLParser#error_path. + def enterError_path(self, ctx:ASLParser.Error_pathContext): + pass + + # Exit a parse tree produced by ASLParser#error_path. + def exitError_path(self, ctx:ASLParser.Error_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#cause. + def enterCause(self, ctx:ASLParser.CauseContext): + pass + + # Exit a parse tree produced by ASLParser#cause. + def exitCause(self, ctx:ASLParser.CauseContext): + pass + + + # Enter a parse tree produced by ASLParser#cause_path. + def enterCause_path(self, ctx:ASLParser.Cause_pathContext): + pass + + # Exit a parse tree produced by ASLParser#cause_path. + def exitCause_path(self, ctx:ASLParser.Cause_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#seconds_jsonata. + def enterSeconds_jsonata(self, ctx:ASLParser.Seconds_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#seconds_jsonata. + def exitSeconds_jsonata(self, ctx:ASLParser.Seconds_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#seconds_int. + def enterSeconds_int(self, ctx:ASLParser.Seconds_intContext): + pass + + # Exit a parse tree produced by ASLParser#seconds_int. + def exitSeconds_int(self, ctx:ASLParser.Seconds_intContext): + pass + + + # Enter a parse tree produced by ASLParser#seconds_path. + def enterSeconds_path(self, ctx:ASLParser.Seconds_pathContext): + pass + + # Exit a parse tree produced by ASLParser#seconds_path. + def exitSeconds_path(self, ctx:ASLParser.Seconds_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#timestamp. + def enterTimestamp(self, ctx:ASLParser.TimestampContext): + pass + + # Exit a parse tree produced by ASLParser#timestamp. + def exitTimestamp(self, ctx:ASLParser.TimestampContext): + pass + + + # Enter a parse tree produced by ASLParser#timestamp_path. + def enterTimestamp_path(self, ctx:ASLParser.Timestamp_pathContext): + pass + + # Exit a parse tree produced by ASLParser#timestamp_path. + def exitTimestamp_path(self, ctx:ASLParser.Timestamp_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#items_array. + def enterItems_array(self, ctx:ASLParser.Items_arrayContext): + pass + + # Exit a parse tree produced by ASLParser#items_array. + def exitItems_array(self, ctx:ASLParser.Items_arrayContext): + pass + + + # Enter a parse tree produced by ASLParser#items_jsonata. + def enterItems_jsonata(self, ctx:ASLParser.Items_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#items_jsonata. + def exitItems_jsonata(self, ctx:ASLParser.Items_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#items_path_decl. + def enterItems_path_decl(self, ctx:ASLParser.Items_path_declContext): + pass + + # Exit a parse tree produced by ASLParser#items_path_decl. + def exitItems_path_decl(self, ctx:ASLParser.Items_path_declContext): + pass + + + # Enter a parse tree produced by ASLParser#max_concurrency_jsonata. + def enterMax_concurrency_jsonata(self, ctx:ASLParser.Max_concurrency_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#max_concurrency_jsonata. + def exitMax_concurrency_jsonata(self, ctx:ASLParser.Max_concurrency_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#max_concurrency_int. + def enterMax_concurrency_int(self, ctx:ASLParser.Max_concurrency_intContext): + pass + + # Exit a parse tree produced by ASLParser#max_concurrency_int. + def exitMax_concurrency_int(self, ctx:ASLParser.Max_concurrency_intContext): + pass + + + # Enter a parse tree produced by ASLParser#max_concurrency_path. + def enterMax_concurrency_path(self, ctx:ASLParser.Max_concurrency_pathContext): + pass + + # Exit a parse tree produced by ASLParser#max_concurrency_path. + def exitMax_concurrency_path(self, ctx:ASLParser.Max_concurrency_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#parameters_decl. + def enterParameters_decl(self, ctx:ASLParser.Parameters_declContext): + pass + + # Exit a parse tree produced by ASLParser#parameters_decl. + def exitParameters_decl(self, ctx:ASLParser.Parameters_declContext): + pass + + + # Enter a parse tree produced by ASLParser#credentials_decl. + def enterCredentials_decl(self, ctx:ASLParser.Credentials_declContext): + pass + + # Exit a parse tree produced by ASLParser#credentials_decl. + def exitCredentials_decl(self, ctx:ASLParser.Credentials_declContext): + pass + + + # Enter a parse tree produced by ASLParser#role_arn. + def enterRole_arn(self, ctx:ASLParser.Role_arnContext): + pass + + # Exit a parse tree produced by ASLParser#role_arn. + def exitRole_arn(self, ctx:ASLParser.Role_arnContext): + pass + + + # Enter a parse tree produced by ASLParser#role_path. + def enterRole_path(self, ctx:ASLParser.Role_pathContext): + pass + + # Exit a parse tree produced by ASLParser#role_path. + def exitRole_path(self, ctx:ASLParser.Role_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#timeout_seconds_jsonata. + def enterTimeout_seconds_jsonata(self, ctx:ASLParser.Timeout_seconds_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#timeout_seconds_jsonata. + def exitTimeout_seconds_jsonata(self, ctx:ASLParser.Timeout_seconds_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#timeout_seconds_int. + def enterTimeout_seconds_int(self, ctx:ASLParser.Timeout_seconds_intContext): + pass + + # Exit a parse tree produced by ASLParser#timeout_seconds_int. + def exitTimeout_seconds_int(self, ctx:ASLParser.Timeout_seconds_intContext): + pass + + + # Enter a parse tree produced by ASLParser#timeout_seconds_path. + def enterTimeout_seconds_path(self, ctx:ASLParser.Timeout_seconds_pathContext): + pass + + # Exit a parse tree produced by ASLParser#timeout_seconds_path. + def exitTimeout_seconds_path(self, ctx:ASLParser.Timeout_seconds_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#heartbeat_seconds_jsonata. + def enterHeartbeat_seconds_jsonata(self, ctx:ASLParser.Heartbeat_seconds_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#heartbeat_seconds_jsonata. + def exitHeartbeat_seconds_jsonata(self, ctx:ASLParser.Heartbeat_seconds_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#heartbeat_seconds_int. + def enterHeartbeat_seconds_int(self, ctx:ASLParser.Heartbeat_seconds_intContext): + pass + + # Exit a parse tree produced by ASLParser#heartbeat_seconds_int. + def exitHeartbeat_seconds_int(self, ctx:ASLParser.Heartbeat_seconds_intContext): + pass + + + # Enter a parse tree produced by ASLParser#heartbeat_seconds_path. + def enterHeartbeat_seconds_path(self, ctx:ASLParser.Heartbeat_seconds_pathContext): + pass + + # Exit a parse tree produced by ASLParser#heartbeat_seconds_path. + def exitHeartbeat_seconds_path(self, ctx:ASLParser.Heartbeat_seconds_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_tmpl_decl. + def enterPayload_tmpl_decl(self, ctx:ASLParser.Payload_tmpl_declContext): + pass + + # Exit a parse tree produced by ASLParser#payload_tmpl_decl. + def exitPayload_tmpl_decl(self, ctx:ASLParser.Payload_tmpl_declContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_binding_sample. + def enterPayload_binding_sample(self, ctx:ASLParser.Payload_binding_sampleContext): + pass + + # Exit a parse tree produced by ASLParser#payload_binding_sample. + def exitPayload_binding_sample(self, ctx:ASLParser.Payload_binding_sampleContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_binding_value. + def enterPayload_binding_value(self, ctx:ASLParser.Payload_binding_valueContext): + pass + + # Exit a parse tree produced by ASLParser#payload_binding_value. + def exitPayload_binding_value(self, ctx:ASLParser.Payload_binding_valueContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_arr_decl. + def enterPayload_arr_decl(self, ctx:ASLParser.Payload_arr_declContext): + pass + + # Exit a parse tree produced by ASLParser#payload_arr_decl. + def exitPayload_arr_decl(self, ctx:ASLParser.Payload_arr_declContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_decl. + def enterPayload_value_decl(self, ctx:ASLParser.Payload_value_declContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_decl. + def exitPayload_value_decl(self, ctx:ASLParser.Payload_value_declContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_float. + def enterPayload_value_float(self, ctx:ASLParser.Payload_value_floatContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_float. + def exitPayload_value_float(self, ctx:ASLParser.Payload_value_floatContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_int. + def enterPayload_value_int(self, ctx:ASLParser.Payload_value_intContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_int. + def exitPayload_value_int(self, ctx:ASLParser.Payload_value_intContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_bool. + def enterPayload_value_bool(self, ctx:ASLParser.Payload_value_boolContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_bool. + def exitPayload_value_bool(self, ctx:ASLParser.Payload_value_boolContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_null. + def enterPayload_value_null(self, ctx:ASLParser.Payload_value_nullContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_null. + def exitPayload_value_null(self, ctx:ASLParser.Payload_value_nullContext): + pass + + + # Enter a parse tree produced by ASLParser#payload_value_str. + def enterPayload_value_str(self, ctx:ASLParser.Payload_value_strContext): + pass + + # Exit a parse tree produced by ASLParser#payload_value_str. + def exitPayload_value_str(self, ctx:ASLParser.Payload_value_strContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_decl. + def enterAssign_decl(self, ctx:ASLParser.Assign_declContext): + pass + + # Exit a parse tree produced by ASLParser#assign_decl. + def exitAssign_decl(self, ctx:ASLParser.Assign_declContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_decl_body. + def enterAssign_decl_body(self, ctx:ASLParser.Assign_decl_bodyContext): + pass + + # Exit a parse tree produced by ASLParser#assign_decl_body. + def exitAssign_decl_body(self, ctx:ASLParser.Assign_decl_bodyContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_decl_binding. + def enterAssign_decl_binding(self, ctx:ASLParser.Assign_decl_bindingContext): + pass + + # Exit a parse tree produced by ASLParser#assign_decl_binding. + def exitAssign_decl_binding(self, ctx:ASLParser.Assign_decl_bindingContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_object. + def enterAssign_template_value_object(self, ctx:ASLParser.Assign_template_value_objectContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_object. + def exitAssign_template_value_object(self, ctx:ASLParser.Assign_template_value_objectContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_binding_string_expression_simple. + def enterAssign_template_binding_string_expression_simple(self, ctx:ASLParser.Assign_template_binding_string_expression_simpleContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_binding_string_expression_simple. + def exitAssign_template_binding_string_expression_simple(self, ctx:ASLParser.Assign_template_binding_string_expression_simpleContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_binding_value. + def enterAssign_template_binding_value(self, ctx:ASLParser.Assign_template_binding_valueContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_binding_value. + def exitAssign_template_binding_value(self, ctx:ASLParser.Assign_template_binding_valueContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value. + def enterAssign_template_value(self, ctx:ASLParser.Assign_template_valueContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value. + def exitAssign_template_value(self, ctx:ASLParser.Assign_template_valueContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_array. + def enterAssign_template_value_array(self, ctx:ASLParser.Assign_template_value_arrayContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_array. + def exitAssign_template_value_array(self, ctx:ASLParser.Assign_template_value_arrayContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_float. + def enterAssign_template_value_terminal_float(self, ctx:ASLParser.Assign_template_value_terminal_floatContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_float. + def exitAssign_template_value_terminal_float(self, ctx:ASLParser.Assign_template_value_terminal_floatContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_int. + def enterAssign_template_value_terminal_int(self, ctx:ASLParser.Assign_template_value_terminal_intContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_int. + def exitAssign_template_value_terminal_int(self, ctx:ASLParser.Assign_template_value_terminal_intContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_bool. + def enterAssign_template_value_terminal_bool(self, ctx:ASLParser.Assign_template_value_terminal_boolContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_bool. + def exitAssign_template_value_terminal_bool(self, ctx:ASLParser.Assign_template_value_terminal_boolContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_null. + def enterAssign_template_value_terminal_null(self, ctx:ASLParser.Assign_template_value_terminal_nullContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_null. + def exitAssign_template_value_terminal_null(self, ctx:ASLParser.Assign_template_value_terminal_nullContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_string_jsonata. + def enterAssign_template_value_terminal_string_jsonata(self, ctx:ASLParser.Assign_template_value_terminal_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_string_jsonata. + def exitAssign_template_value_terminal_string_jsonata(self, ctx:ASLParser.Assign_template_value_terminal_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#assign_template_value_terminal_string_literal. + def enterAssign_template_value_terminal_string_literal(self, ctx:ASLParser.Assign_template_value_terminal_string_literalContext): + pass + + # Exit a parse tree produced by ASLParser#assign_template_value_terminal_string_literal. + def exitAssign_template_value_terminal_string_literal(self, ctx:ASLParser.Assign_template_value_terminal_string_literalContext): + pass + + + # Enter a parse tree produced by ASLParser#arguments_jsonata_template_value_object. + def enterArguments_jsonata_template_value_object(self, ctx:ASLParser.Arguments_jsonata_template_value_objectContext): + pass + + # Exit a parse tree produced by ASLParser#arguments_jsonata_template_value_object. + def exitArguments_jsonata_template_value_object(self, ctx:ASLParser.Arguments_jsonata_template_value_objectContext): + pass + + + # Enter a parse tree produced by ASLParser#arguments_string_jsonata. + def enterArguments_string_jsonata(self, ctx:ASLParser.Arguments_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#arguments_string_jsonata. + def exitArguments_string_jsonata(self, ctx:ASLParser.Arguments_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#output_decl. + def enterOutput_decl(self, ctx:ASLParser.Output_declContext): + pass + + # Exit a parse tree produced by ASLParser#output_decl. + def exitOutput_decl(self, ctx:ASLParser.Output_declContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_object. + def enterJsonata_template_value_object(self, ctx:ASLParser.Jsonata_template_value_objectContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_object. + def exitJsonata_template_value_object(self, ctx:ASLParser.Jsonata_template_value_objectContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_binding. + def enterJsonata_template_binding(self, ctx:ASLParser.Jsonata_template_bindingContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_binding. + def exitJsonata_template_binding(self, ctx:ASLParser.Jsonata_template_bindingContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value. + def enterJsonata_template_value(self, ctx:ASLParser.Jsonata_template_valueContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value. + def exitJsonata_template_value(self, ctx:ASLParser.Jsonata_template_valueContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_array. + def enterJsonata_template_value_array(self, ctx:ASLParser.Jsonata_template_value_arrayContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_array. + def exitJsonata_template_value_array(self, ctx:ASLParser.Jsonata_template_value_arrayContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_float. + def enterJsonata_template_value_terminal_float(self, ctx:ASLParser.Jsonata_template_value_terminal_floatContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_float. + def exitJsonata_template_value_terminal_float(self, ctx:ASLParser.Jsonata_template_value_terminal_floatContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_int. + def enterJsonata_template_value_terminal_int(self, ctx:ASLParser.Jsonata_template_value_terminal_intContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_int. + def exitJsonata_template_value_terminal_int(self, ctx:ASLParser.Jsonata_template_value_terminal_intContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_bool. + def enterJsonata_template_value_terminal_bool(self, ctx:ASLParser.Jsonata_template_value_terminal_boolContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_bool. + def exitJsonata_template_value_terminal_bool(self, ctx:ASLParser.Jsonata_template_value_terminal_boolContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_null. + def enterJsonata_template_value_terminal_null(self, ctx:ASLParser.Jsonata_template_value_terminal_nullContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_null. + def exitJsonata_template_value_terminal_null(self, ctx:ASLParser.Jsonata_template_value_terminal_nullContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_string_jsonata. + def enterJsonata_template_value_terminal_string_jsonata(self, ctx:ASLParser.Jsonata_template_value_terminal_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_string_jsonata. + def exitJsonata_template_value_terminal_string_jsonata(self, ctx:ASLParser.Jsonata_template_value_terminal_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#jsonata_template_value_terminal_string_literal. + def enterJsonata_template_value_terminal_string_literal(self, ctx:ASLParser.Jsonata_template_value_terminal_string_literalContext): + pass + + # Exit a parse tree produced by ASLParser#jsonata_template_value_terminal_string_literal. + def exitJsonata_template_value_terminal_string_literal(self, ctx:ASLParser.Jsonata_template_value_terminal_string_literalContext): + pass + + + # Enter a parse tree produced by ASLParser#result_selector_decl. + def enterResult_selector_decl(self, ctx:ASLParser.Result_selector_declContext): + pass + + # Exit a parse tree produced by ASLParser#result_selector_decl. + def exitResult_selector_decl(self, ctx:ASLParser.Result_selector_declContext): + pass + + + # Enter a parse tree produced by ASLParser#state_type. + def enterState_type(self, ctx:ASLParser.State_typeContext): + pass + + # Exit a parse tree produced by ASLParser#state_type. + def exitState_type(self, ctx:ASLParser.State_typeContext): + pass + + + # Enter a parse tree produced by ASLParser#choices_decl. + def enterChoices_decl(self, ctx:ASLParser.Choices_declContext): + pass + + # Exit a parse tree produced by ASLParser#choices_decl. + def exitChoices_decl(self, ctx:ASLParser.Choices_declContext): + pass + + + # Enter a parse tree produced by ASLParser#choice_rule_comparison_variable. + def enterChoice_rule_comparison_variable(self, ctx:ASLParser.Choice_rule_comparison_variableContext): + pass + + # Exit a parse tree produced by ASLParser#choice_rule_comparison_variable. + def exitChoice_rule_comparison_variable(self, ctx:ASLParser.Choice_rule_comparison_variableContext): + pass + + + # Enter a parse tree produced by ASLParser#choice_rule_comparison_composite. + def enterChoice_rule_comparison_composite(self, ctx:ASLParser.Choice_rule_comparison_compositeContext): + pass + + # Exit a parse tree produced by ASLParser#choice_rule_comparison_composite. + def exitChoice_rule_comparison_composite(self, ctx:ASLParser.Choice_rule_comparison_compositeContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_variable_stmt. + def enterComparison_variable_stmt(self, ctx:ASLParser.Comparison_variable_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_variable_stmt. + def exitComparison_variable_stmt(self, ctx:ASLParser.Comparison_variable_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_composite_stmt. + def enterComparison_composite_stmt(self, ctx:ASLParser.Comparison_composite_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_composite_stmt. + def exitComparison_composite_stmt(self, ctx:ASLParser.Comparison_composite_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_composite. + def enterComparison_composite(self, ctx:ASLParser.Comparison_compositeContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_composite. + def exitComparison_composite(self, ctx:ASLParser.Comparison_compositeContext): + pass + + + # Enter a parse tree produced by ASLParser#variable_decl. + def enterVariable_decl(self, ctx:ASLParser.Variable_declContext): + pass + + # Exit a parse tree produced by ASLParser#variable_decl. + def exitVariable_decl(self, ctx:ASLParser.Variable_declContext): + pass + + + # Enter a parse tree produced by ASLParser#condition_lit. + def enterCondition_lit(self, ctx:ASLParser.Condition_litContext): + pass + + # Exit a parse tree produced by ASLParser#condition_lit. + def exitCondition_lit(self, ctx:ASLParser.Condition_litContext): + pass + + + # Enter a parse tree produced by ASLParser#condition_string_jsonata. + def enterCondition_string_jsonata(self, ctx:ASLParser.Condition_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#condition_string_jsonata. + def exitCondition_string_jsonata(self, ctx:ASLParser.Condition_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_func_string_variable_sample. + def enterComparison_func_string_variable_sample(self, ctx:ASLParser.Comparison_func_string_variable_sampleContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_func_string_variable_sample. + def exitComparison_func_string_variable_sample(self, ctx:ASLParser.Comparison_func_string_variable_sampleContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_func_value. + def enterComparison_func_value(self, ctx:ASLParser.Comparison_func_valueContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_func_value. + def exitComparison_func_value(self, ctx:ASLParser.Comparison_func_valueContext): + pass + + + # Enter a parse tree produced by ASLParser#branches_decl. + def enterBranches_decl(self, ctx:ASLParser.Branches_declContext): + pass + + # Exit a parse tree produced by ASLParser#branches_decl. + def exitBranches_decl(self, ctx:ASLParser.Branches_declContext): + pass + + + # Enter a parse tree produced by ASLParser#item_processor_decl. + def enterItem_processor_decl(self, ctx:ASLParser.Item_processor_declContext): + pass + + # Exit a parse tree produced by ASLParser#item_processor_decl. + def exitItem_processor_decl(self, ctx:ASLParser.Item_processor_declContext): + pass + + + # Enter a parse tree produced by ASLParser#item_processor_item. + def enterItem_processor_item(self, ctx:ASLParser.Item_processor_itemContext): + pass + + # Exit a parse tree produced by ASLParser#item_processor_item. + def exitItem_processor_item(self, ctx:ASLParser.Item_processor_itemContext): + pass + + + # Enter a parse tree produced by ASLParser#processor_config_decl. + def enterProcessor_config_decl(self, ctx:ASLParser.Processor_config_declContext): + pass + + # Exit a parse tree produced by ASLParser#processor_config_decl. + def exitProcessor_config_decl(self, ctx:ASLParser.Processor_config_declContext): + pass + + + # Enter a parse tree produced by ASLParser#processor_config_field. + def enterProcessor_config_field(self, ctx:ASLParser.Processor_config_fieldContext): + pass + + # Exit a parse tree produced by ASLParser#processor_config_field. + def exitProcessor_config_field(self, ctx:ASLParser.Processor_config_fieldContext): + pass + + + # Enter a parse tree produced by ASLParser#mode_decl. + def enterMode_decl(self, ctx:ASLParser.Mode_declContext): + pass + + # Exit a parse tree produced by ASLParser#mode_decl. + def exitMode_decl(self, ctx:ASLParser.Mode_declContext): + pass + + + # Enter a parse tree produced by ASLParser#mode_type. + def enterMode_type(self, ctx:ASLParser.Mode_typeContext): + pass + + # Exit a parse tree produced by ASLParser#mode_type. + def exitMode_type(self, ctx:ASLParser.Mode_typeContext): + pass + + + # Enter a parse tree produced by ASLParser#execution_decl. + def enterExecution_decl(self, ctx:ASLParser.Execution_declContext): + pass + + # Exit a parse tree produced by ASLParser#execution_decl. + def exitExecution_decl(self, ctx:ASLParser.Execution_declContext): + pass + + + # Enter a parse tree produced by ASLParser#execution_type. + def enterExecution_type(self, ctx:ASLParser.Execution_typeContext): + pass + + # Exit a parse tree produced by ASLParser#execution_type. + def exitExecution_type(self, ctx:ASLParser.Execution_typeContext): + pass + + + # Enter a parse tree produced by ASLParser#iterator_decl. + def enterIterator_decl(self, ctx:ASLParser.Iterator_declContext): + pass + + # Exit a parse tree produced by ASLParser#iterator_decl. + def exitIterator_decl(self, ctx:ASLParser.Iterator_declContext): + pass + + + # Enter a parse tree produced by ASLParser#iterator_decl_item. + def enterIterator_decl_item(self, ctx:ASLParser.Iterator_decl_itemContext): + pass + + # Exit a parse tree produced by ASLParser#iterator_decl_item. + def exitIterator_decl_item(self, ctx:ASLParser.Iterator_decl_itemContext): + pass + + + # Enter a parse tree produced by ASLParser#item_selector_decl. + def enterItem_selector_decl(self, ctx:ASLParser.Item_selector_declContext): + pass + + # Exit a parse tree produced by ASLParser#item_selector_decl. + def exitItem_selector_decl(self, ctx:ASLParser.Item_selector_declContext): + pass + + + # Enter a parse tree produced by ASLParser#item_reader_decl. + def enterItem_reader_decl(self, ctx:ASLParser.Item_reader_declContext): + pass + + # Exit a parse tree produced by ASLParser#item_reader_decl. + def exitItem_reader_decl(self, ctx:ASLParser.Item_reader_declContext): + pass + + + # Enter a parse tree produced by ASLParser#items_reader_field. + def enterItems_reader_field(self, ctx:ASLParser.Items_reader_fieldContext): + pass + + # Exit a parse tree produced by ASLParser#items_reader_field. + def exitItems_reader_field(self, ctx:ASLParser.Items_reader_fieldContext): + pass + + + # Enter a parse tree produced by ASLParser#reader_config_decl. + def enterReader_config_decl(self, ctx:ASLParser.Reader_config_declContext): + pass + + # Exit a parse tree produced by ASLParser#reader_config_decl. + def exitReader_config_decl(self, ctx:ASLParser.Reader_config_declContext): + pass + + + # Enter a parse tree produced by ASLParser#reader_config_field. + def enterReader_config_field(self, ctx:ASLParser.Reader_config_fieldContext): + pass + + # Exit a parse tree produced by ASLParser#reader_config_field. + def exitReader_config_field(self, ctx:ASLParser.Reader_config_fieldContext): + pass + + + # Enter a parse tree produced by ASLParser#input_type_decl. + def enterInput_type_decl(self, ctx:ASLParser.Input_type_declContext): + pass + + # Exit a parse tree produced by ASLParser#input_type_decl. + def exitInput_type_decl(self, ctx:ASLParser.Input_type_declContext): + pass + + + # Enter a parse tree produced by ASLParser#csv_header_location_decl. + def enterCsv_header_location_decl(self, ctx:ASLParser.Csv_header_location_declContext): + pass + + # Exit a parse tree produced by ASLParser#csv_header_location_decl. + def exitCsv_header_location_decl(self, ctx:ASLParser.Csv_header_location_declContext): + pass + + + # Enter a parse tree produced by ASLParser#csv_headers_decl. + def enterCsv_headers_decl(self, ctx:ASLParser.Csv_headers_declContext): + pass + + # Exit a parse tree produced by ASLParser#csv_headers_decl. + def exitCsv_headers_decl(self, ctx:ASLParser.Csv_headers_declContext): + pass + + + # Enter a parse tree produced by ASLParser#max_items_string_jsonata. + def enterMax_items_string_jsonata(self, ctx:ASLParser.Max_items_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#max_items_string_jsonata. + def exitMax_items_string_jsonata(self, ctx:ASLParser.Max_items_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#max_items_int. + def enterMax_items_int(self, ctx:ASLParser.Max_items_intContext): + pass + + # Exit a parse tree produced by ASLParser#max_items_int. + def exitMax_items_int(self, ctx:ASLParser.Max_items_intContext): + pass + + + # Enter a parse tree produced by ASLParser#max_items_path. + def enterMax_items_path(self, ctx:ASLParser.Max_items_pathContext): + pass + + # Exit a parse tree produced by ASLParser#max_items_path. + def exitMax_items_path(self, ctx:ASLParser.Max_items_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_count_string_jsonata. + def enterTolerated_failure_count_string_jsonata(self, ctx:ASLParser.Tolerated_failure_count_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_count_string_jsonata. + def exitTolerated_failure_count_string_jsonata(self, ctx:ASLParser.Tolerated_failure_count_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_count_int. + def enterTolerated_failure_count_int(self, ctx:ASLParser.Tolerated_failure_count_intContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_count_int. + def exitTolerated_failure_count_int(self, ctx:ASLParser.Tolerated_failure_count_intContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_count_path. + def enterTolerated_failure_count_path(self, ctx:ASLParser.Tolerated_failure_count_pathContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_count_path. + def exitTolerated_failure_count_path(self, ctx:ASLParser.Tolerated_failure_count_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_percentage_string_jsonata. + def enterTolerated_failure_percentage_string_jsonata(self, ctx:ASLParser.Tolerated_failure_percentage_string_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_percentage_string_jsonata. + def exitTolerated_failure_percentage_string_jsonata(self, ctx:ASLParser.Tolerated_failure_percentage_string_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_percentage_number. + def enterTolerated_failure_percentage_number(self, ctx:ASLParser.Tolerated_failure_percentage_numberContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_percentage_number. + def exitTolerated_failure_percentage_number(self, ctx:ASLParser.Tolerated_failure_percentage_numberContext): + pass + + + # Enter a parse tree produced by ASLParser#tolerated_failure_percentage_path. + def enterTolerated_failure_percentage_path(self, ctx:ASLParser.Tolerated_failure_percentage_pathContext): + pass + + # Exit a parse tree produced by ASLParser#tolerated_failure_percentage_path. + def exitTolerated_failure_percentage_path(self, ctx:ASLParser.Tolerated_failure_percentage_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#label_decl. + def enterLabel_decl(self, ctx:ASLParser.Label_declContext): + pass + + # Exit a parse tree produced by ASLParser#label_decl. + def exitLabel_decl(self, ctx:ASLParser.Label_declContext): + pass + + + # Enter a parse tree produced by ASLParser#result_writer_decl. + def enterResult_writer_decl(self, ctx:ASLParser.Result_writer_declContext): + pass + + # Exit a parse tree produced by ASLParser#result_writer_decl. + def exitResult_writer_decl(self, ctx:ASLParser.Result_writer_declContext): + pass + + + # Enter a parse tree produced by ASLParser#result_writer_field. + def enterResult_writer_field(self, ctx:ASLParser.Result_writer_fieldContext): + pass + + # Exit a parse tree produced by ASLParser#result_writer_field. + def exitResult_writer_field(self, ctx:ASLParser.Result_writer_fieldContext): + pass + + + # Enter a parse tree produced by ASLParser#retry_decl. + def enterRetry_decl(self, ctx:ASLParser.Retry_declContext): + pass + + # Exit a parse tree produced by ASLParser#retry_decl. + def exitRetry_decl(self, ctx:ASLParser.Retry_declContext): + pass + + + # Enter a parse tree produced by ASLParser#retrier_decl. + def enterRetrier_decl(self, ctx:ASLParser.Retrier_declContext): + pass + + # Exit a parse tree produced by ASLParser#retrier_decl. + def exitRetrier_decl(self, ctx:ASLParser.Retrier_declContext): + pass + + + # Enter a parse tree produced by ASLParser#retrier_stmt. + def enterRetrier_stmt(self, ctx:ASLParser.Retrier_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#retrier_stmt. + def exitRetrier_stmt(self, ctx:ASLParser.Retrier_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#error_equals_decl. + def enterError_equals_decl(self, ctx:ASLParser.Error_equals_declContext): + pass + + # Exit a parse tree produced by ASLParser#error_equals_decl. + def exitError_equals_decl(self, ctx:ASLParser.Error_equals_declContext): + pass + + + # Enter a parse tree produced by ASLParser#interval_seconds_decl. + def enterInterval_seconds_decl(self, ctx:ASLParser.Interval_seconds_declContext): + pass + + # Exit a parse tree produced by ASLParser#interval_seconds_decl. + def exitInterval_seconds_decl(self, ctx:ASLParser.Interval_seconds_declContext): + pass + + + # Enter a parse tree produced by ASLParser#max_attempts_decl. + def enterMax_attempts_decl(self, ctx:ASLParser.Max_attempts_declContext): + pass + + # Exit a parse tree produced by ASLParser#max_attempts_decl. + def exitMax_attempts_decl(self, ctx:ASLParser.Max_attempts_declContext): + pass + + + # Enter a parse tree produced by ASLParser#backoff_rate_decl. + def enterBackoff_rate_decl(self, ctx:ASLParser.Backoff_rate_declContext): + pass + + # Exit a parse tree produced by ASLParser#backoff_rate_decl. + def exitBackoff_rate_decl(self, ctx:ASLParser.Backoff_rate_declContext): + pass + + + # Enter a parse tree produced by ASLParser#max_delay_seconds_decl. + def enterMax_delay_seconds_decl(self, ctx:ASLParser.Max_delay_seconds_declContext): + pass + + # Exit a parse tree produced by ASLParser#max_delay_seconds_decl. + def exitMax_delay_seconds_decl(self, ctx:ASLParser.Max_delay_seconds_declContext): + pass + + + # Enter a parse tree produced by ASLParser#jitter_strategy_decl. + def enterJitter_strategy_decl(self, ctx:ASLParser.Jitter_strategy_declContext): + pass + + # Exit a parse tree produced by ASLParser#jitter_strategy_decl. + def exitJitter_strategy_decl(self, ctx:ASLParser.Jitter_strategy_declContext): + pass + + + # Enter a parse tree produced by ASLParser#catch_decl. + def enterCatch_decl(self, ctx:ASLParser.Catch_declContext): + pass + + # Exit a parse tree produced by ASLParser#catch_decl. + def exitCatch_decl(self, ctx:ASLParser.Catch_declContext): + pass + + + # Enter a parse tree produced by ASLParser#catcher_decl. + def enterCatcher_decl(self, ctx:ASLParser.Catcher_declContext): + pass + + # Exit a parse tree produced by ASLParser#catcher_decl. + def exitCatcher_decl(self, ctx:ASLParser.Catcher_declContext): + pass + + + # Enter a parse tree produced by ASLParser#catcher_stmt. + def enterCatcher_stmt(self, ctx:ASLParser.Catcher_stmtContext): + pass + + # Exit a parse tree produced by ASLParser#catcher_stmt. + def exitCatcher_stmt(self, ctx:ASLParser.Catcher_stmtContext): + pass + + + # Enter a parse tree produced by ASLParser#comparison_op. + def enterComparison_op(self, ctx:ASLParser.Comparison_opContext): + pass + + # Exit a parse tree produced by ASLParser#comparison_op. + def exitComparison_op(self, ctx:ASLParser.Comparison_opContext): + pass + + + # Enter a parse tree produced by ASLParser#choice_operator. + def enterChoice_operator(self, ctx:ASLParser.Choice_operatorContext): + pass + + # Exit a parse tree produced by ASLParser#choice_operator. + def exitChoice_operator(self, ctx:ASLParser.Choice_operatorContext): + pass + + + # Enter a parse tree produced by ASLParser#states_error_name. + def enterStates_error_name(self, ctx:ASLParser.States_error_nameContext): + pass + + # Exit a parse tree produced by ASLParser#states_error_name. + def exitStates_error_name(self, ctx:ASLParser.States_error_nameContext): + pass + + + # Enter a parse tree produced by ASLParser#error_name. + def enterError_name(self, ctx:ASLParser.Error_nameContext): + pass + + # Exit a parse tree produced by ASLParser#error_name. + def exitError_name(self, ctx:ASLParser.Error_nameContext): + pass + + + # Enter a parse tree produced by ASLParser#json_obj_decl. + def enterJson_obj_decl(self, ctx:ASLParser.Json_obj_declContext): + pass + + # Exit a parse tree produced by ASLParser#json_obj_decl. + def exitJson_obj_decl(self, ctx:ASLParser.Json_obj_declContext): + pass + + + # Enter a parse tree produced by ASLParser#json_binding. + def enterJson_binding(self, ctx:ASLParser.Json_bindingContext): + pass + + # Exit a parse tree produced by ASLParser#json_binding. + def exitJson_binding(self, ctx:ASLParser.Json_bindingContext): + pass + + + # Enter a parse tree produced by ASLParser#json_arr_decl. + def enterJson_arr_decl(self, ctx:ASLParser.Json_arr_declContext): + pass + + # Exit a parse tree produced by ASLParser#json_arr_decl. + def exitJson_arr_decl(self, ctx:ASLParser.Json_arr_declContext): + pass + + + # Enter a parse tree produced by ASLParser#json_value_decl. + def enterJson_value_decl(self, ctx:ASLParser.Json_value_declContext): + pass + + # Exit a parse tree produced by ASLParser#json_value_decl. + def exitJson_value_decl(self, ctx:ASLParser.Json_value_declContext): + pass + + + # Enter a parse tree produced by ASLParser#string_sampler. + def enterString_sampler(self, ctx:ASLParser.String_samplerContext): + pass + + # Exit a parse tree produced by ASLParser#string_sampler. + def exitString_sampler(self, ctx:ASLParser.String_samplerContext): + pass + + + # Enter a parse tree produced by ASLParser#string_expression_simple. + def enterString_expression_simple(self, ctx:ASLParser.String_expression_simpleContext): + pass + + # Exit a parse tree produced by ASLParser#string_expression_simple. + def exitString_expression_simple(self, ctx:ASLParser.String_expression_simpleContext): + pass + + + # Enter a parse tree produced by ASLParser#string_expression. + def enterString_expression(self, ctx:ASLParser.String_expressionContext): + pass + + # Exit a parse tree produced by ASLParser#string_expression. + def exitString_expression(self, ctx:ASLParser.String_expressionContext): + pass + + + # Enter a parse tree produced by ASLParser#string_jsonpath. + def enterString_jsonpath(self, ctx:ASLParser.String_jsonpathContext): + pass + + # Exit a parse tree produced by ASLParser#string_jsonpath. + def exitString_jsonpath(self, ctx:ASLParser.String_jsonpathContext): + pass + + + # Enter a parse tree produced by ASLParser#string_context_path. + def enterString_context_path(self, ctx:ASLParser.String_context_pathContext): + pass + + # Exit a parse tree produced by ASLParser#string_context_path. + def exitString_context_path(self, ctx:ASLParser.String_context_pathContext): + pass + + + # Enter a parse tree produced by ASLParser#string_variable_sample. + def enterString_variable_sample(self, ctx:ASLParser.String_variable_sampleContext): + pass + + # Exit a parse tree produced by ASLParser#string_variable_sample. + def exitString_variable_sample(self, ctx:ASLParser.String_variable_sampleContext): + pass + + + # Enter a parse tree produced by ASLParser#string_intrinsic_function. + def enterString_intrinsic_function(self, ctx:ASLParser.String_intrinsic_functionContext): + pass + + # Exit a parse tree produced by ASLParser#string_intrinsic_function. + def exitString_intrinsic_function(self, ctx:ASLParser.String_intrinsic_functionContext): + pass + + + # Enter a parse tree produced by ASLParser#string_jsonata. + def enterString_jsonata(self, ctx:ASLParser.String_jsonataContext): + pass + + # Exit a parse tree produced by ASLParser#string_jsonata. + def exitString_jsonata(self, ctx:ASLParser.String_jsonataContext): + pass + + + # Enter a parse tree produced by ASLParser#string_literal. + def enterString_literal(self, ctx:ASLParser.String_literalContext): + pass + + # Exit a parse tree produced by ASLParser#string_literal. + def exitString_literal(self, ctx:ASLParser.String_literalContext): + pass + + + # Enter a parse tree produced by ASLParser#soft_string_keyword. + def enterSoft_string_keyword(self, ctx:ASLParser.Soft_string_keywordContext): + pass + + # Exit a parse tree produced by ASLParser#soft_string_keyword. + def exitSoft_string_keyword(self, ctx:ASLParser.Soft_string_keywordContext): + pass + + + +del ASLParser \ No newline at end of file diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserVisitor.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserVisitor.py new file mode 100644 index 0000000000000..ed1b7b0611097 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/ASLParserVisitor.py @@ -0,0 +1,793 @@ +# Generated from ASLParser.g4 by ANTLR 4.13.2 +from antlr4 import * +if "." in __name__: + from .ASLParser import ASLParser +else: + from ASLParser import ASLParser + +# This class defines a complete generic visitor for a parse tree produced by ASLParser. + +class ASLParserVisitor(ParseTreeVisitor): + + # Visit a parse tree produced by ASLParser#state_machine. + def visitState_machine(self, ctx:ASLParser.State_machineContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#program_decl. + def visitProgram_decl(self, ctx:ASLParser.Program_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#top_layer_stmt. + def visitTop_layer_stmt(self, ctx:ASLParser.Top_layer_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#startat_decl. + def visitStartat_decl(self, ctx:ASLParser.Startat_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comment_decl. + def visitComment_decl(self, ctx:ASLParser.Comment_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#version_decl. + def visitVersion_decl(self, ctx:ASLParser.Version_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#query_language_decl. + def visitQuery_language_decl(self, ctx:ASLParser.Query_language_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#state_stmt. + def visitState_stmt(self, ctx:ASLParser.State_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#states_decl. + def visitStates_decl(self, ctx:ASLParser.States_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#state_decl. + def visitState_decl(self, ctx:ASLParser.State_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#state_decl_body. + def visitState_decl_body(self, ctx:ASLParser.State_decl_bodyContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#type_decl. + def visitType_decl(self, ctx:ASLParser.Type_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#next_decl. + def visitNext_decl(self, ctx:ASLParser.Next_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#resource_decl. + def visitResource_decl(self, ctx:ASLParser.Resource_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#input_path_decl. + def visitInput_path_decl(self, ctx:ASLParser.Input_path_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#result_decl. + def visitResult_decl(self, ctx:ASLParser.Result_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#result_path_decl. + def visitResult_path_decl(self, ctx:ASLParser.Result_path_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#output_path_decl. + def visitOutput_path_decl(self, ctx:ASLParser.Output_path_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#end_decl. + def visitEnd_decl(self, ctx:ASLParser.End_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#default_decl. + def visitDefault_decl(self, ctx:ASLParser.Default_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#error. + def visitError(self, ctx:ASLParser.ErrorContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#error_path. + def visitError_path(self, ctx:ASLParser.Error_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#cause. + def visitCause(self, ctx:ASLParser.CauseContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#cause_path. + def visitCause_path(self, ctx:ASLParser.Cause_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#seconds_jsonata. + def visitSeconds_jsonata(self, ctx:ASLParser.Seconds_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#seconds_int. + def visitSeconds_int(self, ctx:ASLParser.Seconds_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#seconds_path. + def visitSeconds_path(self, ctx:ASLParser.Seconds_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#timestamp. + def visitTimestamp(self, ctx:ASLParser.TimestampContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#timestamp_path. + def visitTimestamp_path(self, ctx:ASLParser.Timestamp_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#items_array. + def visitItems_array(self, ctx:ASLParser.Items_arrayContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#items_jsonata. + def visitItems_jsonata(self, ctx:ASLParser.Items_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#items_path_decl. + def visitItems_path_decl(self, ctx:ASLParser.Items_path_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_concurrency_jsonata. + def visitMax_concurrency_jsonata(self, ctx:ASLParser.Max_concurrency_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_concurrency_int. + def visitMax_concurrency_int(self, ctx:ASLParser.Max_concurrency_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_concurrency_path. + def visitMax_concurrency_path(self, ctx:ASLParser.Max_concurrency_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#parameters_decl. + def visitParameters_decl(self, ctx:ASLParser.Parameters_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#credentials_decl. + def visitCredentials_decl(self, ctx:ASLParser.Credentials_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#role_arn. + def visitRole_arn(self, ctx:ASLParser.Role_arnContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#role_path. + def visitRole_path(self, ctx:ASLParser.Role_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#timeout_seconds_jsonata. + def visitTimeout_seconds_jsonata(self, ctx:ASLParser.Timeout_seconds_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#timeout_seconds_int. + def visitTimeout_seconds_int(self, ctx:ASLParser.Timeout_seconds_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#timeout_seconds_path. + def visitTimeout_seconds_path(self, ctx:ASLParser.Timeout_seconds_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#heartbeat_seconds_jsonata. + def visitHeartbeat_seconds_jsonata(self, ctx:ASLParser.Heartbeat_seconds_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#heartbeat_seconds_int. + def visitHeartbeat_seconds_int(self, ctx:ASLParser.Heartbeat_seconds_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#heartbeat_seconds_path. + def visitHeartbeat_seconds_path(self, ctx:ASLParser.Heartbeat_seconds_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_tmpl_decl. + def visitPayload_tmpl_decl(self, ctx:ASLParser.Payload_tmpl_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_binding_sample. + def visitPayload_binding_sample(self, ctx:ASLParser.Payload_binding_sampleContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_binding_value. + def visitPayload_binding_value(self, ctx:ASLParser.Payload_binding_valueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_arr_decl. + def visitPayload_arr_decl(self, ctx:ASLParser.Payload_arr_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_decl. + def visitPayload_value_decl(self, ctx:ASLParser.Payload_value_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_float. + def visitPayload_value_float(self, ctx:ASLParser.Payload_value_floatContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_int. + def visitPayload_value_int(self, ctx:ASLParser.Payload_value_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_bool. + def visitPayload_value_bool(self, ctx:ASLParser.Payload_value_boolContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_null. + def visitPayload_value_null(self, ctx:ASLParser.Payload_value_nullContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#payload_value_str. + def visitPayload_value_str(self, ctx:ASLParser.Payload_value_strContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_decl. + def visitAssign_decl(self, ctx:ASLParser.Assign_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_decl_body. + def visitAssign_decl_body(self, ctx:ASLParser.Assign_decl_bodyContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_decl_binding. + def visitAssign_decl_binding(self, ctx:ASLParser.Assign_decl_bindingContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_object. + def visitAssign_template_value_object(self, ctx:ASLParser.Assign_template_value_objectContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_binding_string_expression_simple. + def visitAssign_template_binding_string_expression_simple(self, ctx:ASLParser.Assign_template_binding_string_expression_simpleContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_binding_value. + def visitAssign_template_binding_value(self, ctx:ASLParser.Assign_template_binding_valueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value. + def visitAssign_template_value(self, ctx:ASLParser.Assign_template_valueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_array. + def visitAssign_template_value_array(self, ctx:ASLParser.Assign_template_value_arrayContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_float. + def visitAssign_template_value_terminal_float(self, ctx:ASLParser.Assign_template_value_terminal_floatContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_int. + def visitAssign_template_value_terminal_int(self, ctx:ASLParser.Assign_template_value_terminal_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_bool. + def visitAssign_template_value_terminal_bool(self, ctx:ASLParser.Assign_template_value_terminal_boolContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_null. + def visitAssign_template_value_terminal_null(self, ctx:ASLParser.Assign_template_value_terminal_nullContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_string_jsonata. + def visitAssign_template_value_terminal_string_jsonata(self, ctx:ASLParser.Assign_template_value_terminal_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#assign_template_value_terminal_string_literal. + def visitAssign_template_value_terminal_string_literal(self, ctx:ASLParser.Assign_template_value_terminal_string_literalContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#arguments_jsonata_template_value_object. + def visitArguments_jsonata_template_value_object(self, ctx:ASLParser.Arguments_jsonata_template_value_objectContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#arguments_string_jsonata. + def visitArguments_string_jsonata(self, ctx:ASLParser.Arguments_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#output_decl. + def visitOutput_decl(self, ctx:ASLParser.Output_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_object. + def visitJsonata_template_value_object(self, ctx:ASLParser.Jsonata_template_value_objectContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_binding. + def visitJsonata_template_binding(self, ctx:ASLParser.Jsonata_template_bindingContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value. + def visitJsonata_template_value(self, ctx:ASLParser.Jsonata_template_valueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_array. + def visitJsonata_template_value_array(self, ctx:ASLParser.Jsonata_template_value_arrayContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_float. + def visitJsonata_template_value_terminal_float(self, ctx:ASLParser.Jsonata_template_value_terminal_floatContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_int. + def visitJsonata_template_value_terminal_int(self, ctx:ASLParser.Jsonata_template_value_terminal_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_bool. + def visitJsonata_template_value_terminal_bool(self, ctx:ASLParser.Jsonata_template_value_terminal_boolContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_null. + def visitJsonata_template_value_terminal_null(self, ctx:ASLParser.Jsonata_template_value_terminal_nullContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_string_jsonata. + def visitJsonata_template_value_terminal_string_jsonata(self, ctx:ASLParser.Jsonata_template_value_terminal_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jsonata_template_value_terminal_string_literal. + def visitJsonata_template_value_terminal_string_literal(self, ctx:ASLParser.Jsonata_template_value_terminal_string_literalContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#result_selector_decl. + def visitResult_selector_decl(self, ctx:ASLParser.Result_selector_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#state_type. + def visitState_type(self, ctx:ASLParser.State_typeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#choices_decl. + def visitChoices_decl(self, ctx:ASLParser.Choices_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#choice_rule_comparison_variable. + def visitChoice_rule_comparison_variable(self, ctx:ASLParser.Choice_rule_comparison_variableContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#choice_rule_comparison_composite. + def visitChoice_rule_comparison_composite(self, ctx:ASLParser.Choice_rule_comparison_compositeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_variable_stmt. + def visitComparison_variable_stmt(self, ctx:ASLParser.Comparison_variable_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_composite_stmt. + def visitComparison_composite_stmt(self, ctx:ASLParser.Comparison_composite_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_composite. + def visitComparison_composite(self, ctx:ASLParser.Comparison_compositeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#variable_decl. + def visitVariable_decl(self, ctx:ASLParser.Variable_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#condition_lit. + def visitCondition_lit(self, ctx:ASLParser.Condition_litContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#condition_string_jsonata. + def visitCondition_string_jsonata(self, ctx:ASLParser.Condition_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_func_string_variable_sample. + def visitComparison_func_string_variable_sample(self, ctx:ASLParser.Comparison_func_string_variable_sampleContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_func_value. + def visitComparison_func_value(self, ctx:ASLParser.Comparison_func_valueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#branches_decl. + def visitBranches_decl(self, ctx:ASLParser.Branches_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#item_processor_decl. + def visitItem_processor_decl(self, ctx:ASLParser.Item_processor_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#item_processor_item. + def visitItem_processor_item(self, ctx:ASLParser.Item_processor_itemContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#processor_config_decl. + def visitProcessor_config_decl(self, ctx:ASLParser.Processor_config_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#processor_config_field. + def visitProcessor_config_field(self, ctx:ASLParser.Processor_config_fieldContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#mode_decl. + def visitMode_decl(self, ctx:ASLParser.Mode_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#mode_type. + def visitMode_type(self, ctx:ASLParser.Mode_typeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#execution_decl. + def visitExecution_decl(self, ctx:ASLParser.Execution_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#execution_type. + def visitExecution_type(self, ctx:ASLParser.Execution_typeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#iterator_decl. + def visitIterator_decl(self, ctx:ASLParser.Iterator_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#iterator_decl_item. + def visitIterator_decl_item(self, ctx:ASLParser.Iterator_decl_itemContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#item_selector_decl. + def visitItem_selector_decl(self, ctx:ASLParser.Item_selector_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#item_reader_decl. + def visitItem_reader_decl(self, ctx:ASLParser.Item_reader_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#items_reader_field. + def visitItems_reader_field(self, ctx:ASLParser.Items_reader_fieldContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#reader_config_decl. + def visitReader_config_decl(self, ctx:ASLParser.Reader_config_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#reader_config_field. + def visitReader_config_field(self, ctx:ASLParser.Reader_config_fieldContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#input_type_decl. + def visitInput_type_decl(self, ctx:ASLParser.Input_type_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#csv_header_location_decl. + def visitCsv_header_location_decl(self, ctx:ASLParser.Csv_header_location_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#csv_headers_decl. + def visitCsv_headers_decl(self, ctx:ASLParser.Csv_headers_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_items_string_jsonata. + def visitMax_items_string_jsonata(self, ctx:ASLParser.Max_items_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_items_int. + def visitMax_items_int(self, ctx:ASLParser.Max_items_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_items_path. + def visitMax_items_path(self, ctx:ASLParser.Max_items_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_count_string_jsonata. + def visitTolerated_failure_count_string_jsonata(self, ctx:ASLParser.Tolerated_failure_count_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_count_int. + def visitTolerated_failure_count_int(self, ctx:ASLParser.Tolerated_failure_count_intContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_count_path. + def visitTolerated_failure_count_path(self, ctx:ASLParser.Tolerated_failure_count_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_percentage_string_jsonata. + def visitTolerated_failure_percentage_string_jsonata(self, ctx:ASLParser.Tolerated_failure_percentage_string_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_percentage_number. + def visitTolerated_failure_percentage_number(self, ctx:ASLParser.Tolerated_failure_percentage_numberContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#tolerated_failure_percentage_path. + def visitTolerated_failure_percentage_path(self, ctx:ASLParser.Tolerated_failure_percentage_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#label_decl. + def visitLabel_decl(self, ctx:ASLParser.Label_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#result_writer_decl. + def visitResult_writer_decl(self, ctx:ASLParser.Result_writer_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#result_writer_field. + def visitResult_writer_field(self, ctx:ASLParser.Result_writer_fieldContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#retry_decl. + def visitRetry_decl(self, ctx:ASLParser.Retry_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#retrier_decl. + def visitRetrier_decl(self, ctx:ASLParser.Retrier_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#retrier_stmt. + def visitRetrier_stmt(self, ctx:ASLParser.Retrier_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#error_equals_decl. + def visitError_equals_decl(self, ctx:ASLParser.Error_equals_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#interval_seconds_decl. + def visitInterval_seconds_decl(self, ctx:ASLParser.Interval_seconds_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_attempts_decl. + def visitMax_attempts_decl(self, ctx:ASLParser.Max_attempts_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#backoff_rate_decl. + def visitBackoff_rate_decl(self, ctx:ASLParser.Backoff_rate_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#max_delay_seconds_decl. + def visitMax_delay_seconds_decl(self, ctx:ASLParser.Max_delay_seconds_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#jitter_strategy_decl. + def visitJitter_strategy_decl(self, ctx:ASLParser.Jitter_strategy_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#catch_decl. + def visitCatch_decl(self, ctx:ASLParser.Catch_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#catcher_decl. + def visitCatcher_decl(self, ctx:ASLParser.Catcher_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#catcher_stmt. + def visitCatcher_stmt(self, ctx:ASLParser.Catcher_stmtContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#comparison_op. + def visitComparison_op(self, ctx:ASLParser.Comparison_opContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#choice_operator. + def visitChoice_operator(self, ctx:ASLParser.Choice_operatorContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#states_error_name. + def visitStates_error_name(self, ctx:ASLParser.States_error_nameContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#error_name. + def visitError_name(self, ctx:ASLParser.Error_nameContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#json_obj_decl. + def visitJson_obj_decl(self, ctx:ASLParser.Json_obj_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#json_binding. + def visitJson_binding(self, ctx:ASLParser.Json_bindingContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#json_arr_decl. + def visitJson_arr_decl(self, ctx:ASLParser.Json_arr_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#json_value_decl. + def visitJson_value_decl(self, ctx:ASLParser.Json_value_declContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_sampler. + def visitString_sampler(self, ctx:ASLParser.String_samplerContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_expression_simple. + def visitString_expression_simple(self, ctx:ASLParser.String_expression_simpleContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_expression. + def visitString_expression(self, ctx:ASLParser.String_expressionContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_jsonpath. + def visitString_jsonpath(self, ctx:ASLParser.String_jsonpathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_context_path. + def visitString_context_path(self, ctx:ASLParser.String_context_pathContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_variable_sample. + def visitString_variable_sample(self, ctx:ASLParser.String_variable_sampleContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_intrinsic_function. + def visitString_intrinsic_function(self, ctx:ASLParser.String_intrinsic_functionContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_jsonata. + def visitString_jsonata(self, ctx:ASLParser.String_jsonataContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#string_literal. + def visitString_literal(self, ctx:ASLParser.String_literalContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by ASLParser#soft_string_keyword. + def visitSoft_string_keyword(self, ctx:ASLParser.Soft_string_keywordContext): + return self.visitChildren(ctx) + + + +del ASLParser \ No newline at end of file diff --git a/tests/integration/awslambda/functions/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/__init__.py similarity index 100% rename from tests/integration/awslambda/functions/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/antlr/runtime/__init__.py diff --git a/tests/integration/awslambda/functions/python3/lambda1/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/antlt4utils/__init__.py similarity index 100% rename from tests/integration/awslambda/functions/python3/lambda1/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/antlt4utils/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/antlt4utils/antlr4utils.py b/localstack-core/localstack/services/stepfunctions/asl/antlt4utils/antlr4utils.py new file mode 100644 index 0000000000000..61c7d073abb19 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/antlt4utils/antlr4utils.py @@ -0,0 +1,35 @@ +import ast +from typing import Optional + +from antlr4 import ParserRuleContext +from antlr4.tree.Tree import ParseTree, TerminalNodeImpl + + +def is_production(pt: ParseTree, rule_index: Optional[int] = None) -> Optional[ParserRuleContext]: + if isinstance(pt, ParserRuleContext): + prc = pt.getRuleContext() # noqa + if rule_index is not None: + return prc if prc.getRuleIndex() == rule_index else None + return prc + return None + + +def is_terminal(pt: ParseTree, token_type: Optional[int] = None) -> Optional[TerminalNodeImpl]: + if isinstance(pt, TerminalNodeImpl): + if token_type is not None: + return pt if pt.getSymbol().type == token_type else None + return pt + return None + + +def from_string_literal(parser_rule_context: ParserRuleContext) -> Optional[str]: + string_literal = parser_rule_context.getText() + if string_literal.startswith('"') and string_literal.endswith('"'): + string_literal = string_literal[1:-1] + # Interpret escape sequences into their character representations + try: + string_literal = ast.literal_eval(f'"{string_literal}"') + except Exception: + # Fallback if literal_eval fails + pass + return string_literal diff --git a/tests/integration/awslambda/functions/python3/lambda2/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/__init__.py similarity index 100% rename from tests/integration/awslambda/functions/python3/lambda2/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/__init__.py diff --git a/tests/integration/cloudformation/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/__init__.py similarity index 100% rename from tests/integration/cloudformation/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/__init__.py diff --git a/tests/integration/cloudformation/api/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/__init__.py similarity index 100% rename from tests/integration/cloudformation/api/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/assign/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl.py new file mode 100644 index 0000000000000..494fb10db595d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl.py @@ -0,0 +1,24 @@ +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl_binding import ( + AssignDeclBinding, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignDecl(EvalComponent): + declaration_bindings: Final[list[AssignDeclBinding]] + + def __init__(self, declaration_bindings: list[AssignDeclBinding]): + super().__init__() + self.declaration_bindings = declaration_bindings + + def _eval_body(self, env: Environment) -> None: + declarations: dict[str, Any] = dict() + for declaration_binding in self.declaration_bindings: + declaration_binding.eval(env=env) + binding: dict[str, Any] = env.stack.pop() + declarations.update(binding) + for identifier, value in declarations.items(): + env.variable_store.set(variable_identifier=identifier, variable_value=value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl_binding.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl_binding.py new file mode 100644 index 0000000000000..8695bfea82678 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_decl_binding.py @@ -0,0 +1,19 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_binding import ( + AssignTemplateBinding, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignDeclBinding(EvalComponent): + binding: Final[AssignTemplateBinding] + + def __init__(self, binding: AssignTemplateBinding): + super().__init__() + self.binding = binding + + def _eval_body(self, env: Environment) -> None: + env.stack.append(dict()) + self.binding.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_binding.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_binding.py new file mode 100644 index 0000000000000..ad7d688595195 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_binding.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import abc +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value import ( + AssignTemplateValue, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpressionSimple, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignTemplateBinding(EvalComponent, abc.ABC): + identifier: Final[str] + + def __init__(self, identifier: str): + super().__init__() + self.identifier = identifier + + @abc.abstractmethod + def _eval_value(self, env: Environment) -> Any: ... + + def _eval_body(self, env: Environment) -> None: + assign_object: dict = env.stack.pop() + assign_value = self._eval_value(env=env) + assign_object[self.identifier] = assign_value + env.stack.append(assign_object) + + +class AssignTemplateBindingStringExpressionSimple(AssignTemplateBinding): + string_expression_simple: Final[StringExpressionSimple] + + def __init__(self, identifier: str, string_expression_simple: StringExpressionSimple): + super().__init__(identifier=identifier) + self.string_expression_simple = string_expression_simple + + def _eval_value(self, env: Environment) -> Any: + self.string_expression_simple.eval(env=env) + value = env.stack.pop() + return value + + +class AssignTemplateBindingValue(AssignTemplateBinding): + assign_value: Final[AssignTemplateValue] + + def __init__(self, identifier: str, assign_value: AssignTemplateValue): + super().__init__(identifier=identifier) + self.assign_value = assign_value + + def _eval_value(self, env: Environment) -> Any: + self.assign_value.eval(env=env) + value = env.stack.pop() + return value diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value.py new file mode 100644 index 0000000000000..797a40f5896ac --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value.py @@ -0,0 +1,6 @@ +import abc + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent + + +class AssignTemplateValue(EvalComponent, abc.ABC): ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_array.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_array.py new file mode 100644 index 0000000000000..b2ff0a71ec733 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_array.py @@ -0,0 +1,20 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value import ( + AssignTemplateValue, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignTemplateValueArray(AssignTemplateValue): + values: Final[list[AssignTemplateValue]] + + def __init__(self, values: list[AssignTemplateValue]): + self.values = values + + def _eval_body(self, env: Environment) -> None: + arr = list() + for value in self.values: + value.eval(env) + arr.append(env.stack.pop()) + env.stack.append(arr) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_object.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_object.py new file mode 100644 index 0000000000000..2b4c451595e9b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_object.py @@ -0,0 +1,21 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_binding import ( + AssignTemplateBinding, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value import ( + AssignTemplateValue, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignTemplateValueObject(AssignTemplateValue): + bindings: Final[list[AssignTemplateBinding]] + + def __init__(self, bindings: list[AssignTemplateBinding]): + self.bindings = bindings + + def _eval_body(self, env: Environment) -> None: + env.stack.append(dict()) + for binding in self.bindings: + binding.eval(env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_terminal.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_terminal.py new file mode 100644 index 0000000000000..e7c8959ae6964 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/assign/assign_template_value_terminal.py @@ -0,0 +1,35 @@ +import abc +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value import ( + AssignTemplateValue, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class AssignTemplateValueTerminal(AssignTemplateValue, abc.ABC): ... + + +class AssignTemplateValueTerminalLit(AssignTemplateValueTerminal): + value: Final[Any] + + def __init__(self, value: Any): + super().__init__() + self.value = value + + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.value) + + +class AssignTemplateValueTerminalStringJSONata(AssignTemplateValueTerminal): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_body(self, env: Environment) -> None: + self.string_jsonata.eval(env=env) diff --git a/tests/integration/cloudformation/resources/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/__init__.py similarity index 100% rename from tests/integration/cloudformation/resources/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/catch/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_decl.py new file mode 100644 index 0000000000000..6663b476b1571 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_decl.py @@ -0,0 +1,28 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.catch.catch_outcome import ( + CatchOutcome, +) +from localstack.services.stepfunctions.asl.component.common.catch.catcher_decl import CatcherDecl +from localstack.services.stepfunctions.asl.component.common.catch.catcher_outcome import ( + CatcherOutcome, + CatcherOutcomeCaught, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class CatchDecl(EvalComponent): + def __init__(self, catchers: list[CatcherDecl]): + self.catchers: Final[list[CatcherDecl]] = catchers + + def _eval_body(self, env: Environment) -> None: + for catcher in self.catchers: + catcher.eval(env) + catcher_outcome: CatcherOutcome = env.stack.pop() + + if isinstance(catcher_outcome, CatcherOutcomeCaught): + env.stack.append(CatchOutcome.Caught) + return + + env.stack.append(CatchOutcome.NotCaught) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_outcome.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_outcome.py new file mode 100644 index 0000000000000..e31a946c6b625 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catch_outcome.py @@ -0,0 +1,6 @@ +from enum import Enum + + +class CatchOutcome(Enum): + Caught = 0 + NotCaught = 1 diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_decl.py new file mode 100644 index 0000000000000..44705370da1cd --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_decl.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl import AssignDecl +from localstack.services.stepfunctions.asl.component.common.catch.catcher_outcome import ( + CatcherOutcomeCaught, + CatcherOutcomeNotCaught, +) +from localstack.services.stepfunctions.asl.component.common.catch.catcher_props import CatcherProps +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.error_equals_decl import ( + ErrorEqualsDecl, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.common.outputdecl import Output +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class CatcherOutput(dict): + def __init__(self, error: str, cause: str): + super().__init__() + self["Error"] = error + self["Cause"] = cause + + +class CatcherDecl(EvalComponent): + DEFAULT_RESULT_PATH: Final[ResultPath] = ResultPath(result_path_src="$") + + error_equals: Final[ErrorEqualsDecl] + next_decl: Final[Next] + result_path: Final[Optional[ResultPath]] + assign: Final[Optional[AssignDecl]] + output: Final[Optional[Output]] + comment: Final[Optional[Comment]] + + def __init__( + self, + error_equals: ErrorEqualsDecl, + next_decl: Next, + result_path: Optional[ResultPath], + assign: Optional[AssignDecl], + output: Optional[Output], + comment: Optional[Comment], + ): + self.error_equals = error_equals + self.next_decl = next_decl + self.result_path = result_path + self.assign = assign + self.output = output + self.comment = comment + + @classmethod + def from_catcher_props(cls, props: CatcherProps) -> CatcherDecl: + return cls( + error_equals=props.get( + typ=ErrorEqualsDecl, + raise_on_missing=ValueError( + f"Missing ErrorEquals declaration for Catcher declaration, in props '{props}'." + ), + ), + next_decl=props.get( + typ=Next, + raise_on_missing=ValueError( + f"Missing Next declaration for Catcher declaration, in props '{props}'." + ), + ), + result_path=props.get(typ=ResultPath), + assign=props.get(typ=AssignDecl), + output=props.get(typ=Output), + comment=props.get(typ=Comment), + ) + + def _eval_body(self, env: Environment) -> None: + failure_event: FailureEvent = env.stack.pop() + + env.stack.append(failure_event.error_name) + self.error_equals.eval(env) + + equals: bool = env.stack.pop() + if equals: + # Input for the catch block is the error output. + env.stack.append(env.states.get_error_output()) + + if self.assign: + self.assign.eval(env=env) + + if self.result_path: + self.result_path.eval(env) + + # Prepare the state output: successful catch states override the states' output procedure. + if self.output: + self.output.eval(env=env) + else: + output_value = env.stack.pop() + env.states.reset(output_value) + + # Append successful output to notify the outcome upstream. + env.next_state_name = self.next_decl.name + env.stack.append(CatcherOutcomeCaught()) + else: + env.stack.append(failure_event) + env.stack.append(CatcherOutcomeNotCaught()) diff --git a/localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py similarity index 76% rename from localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py index d37768355bfdb..83166764b103a 100644 --- a/localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_outcome.py @@ -1,8 +1,7 @@ import abc -class CatcherOutcome(abc.ABC): - ... +class CatcherOutcome(abc.ABC): ... class CatcherOutcomeCaught(CatcherOutcome): diff --git a/localstack/services/stepfunctions/asl/component/common/catch/catcher_props.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_props.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/catch/catcher_props.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/catch/catcher_props.py diff --git a/localstack/services/stepfunctions/asl/component/common/comment.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/comment.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/comment.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/comment.py diff --git a/tests/integration/s3/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/__init__.py similarity index 100% rename from tests/integration/s3/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/custom_error_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/custom_error_name.py new file mode 100644 index 0000000000000..6d4ed3954ad1f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/custom_error_name.py @@ -0,0 +1,18 @@ +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.error_name.error_name import ErrorName + +ILLEGAL_CUSTOM_ERROR_PREFIX: Final[str] = "States." + + +class CustomErrorName(ErrorName): + """ + States MAY report errors with other names, which MUST NOT begin with the prefix "States.". + """ + + def __init__(self, error_name: Optional[str]): + if error_name is not None and error_name.startswith(ILLEGAL_CUSTOM_ERROR_PREFIX): + raise ValueError( + f"Custom Error Names MUST NOT begin with the prefix 'States.', got '{error_name}'." + ) + super().__init__(error_name=error_name) diff --git a/localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py similarity index 84% rename from localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py index 7e9b887403f8b..6fd2f54544b38 100644 --- a/localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_equals_decl.py @@ -19,6 +19,9 @@ class ErrorEqualsDecl(EvalComponent): """ _STATE_ALL_ERROR: Final[StatesErrorName] = StatesErrorName(typ=StatesErrorNameType.StatesALL) + _STATE_TASK_ERROR: Final[StatesErrorName] = StatesErrorName( + typ=StatesErrorNameType.StatesTaskFailed + ) def __init__(self, error_names: list[ErrorName]): # The reserved name "States.ALL" in a Retrier’s "ErrorEquals" field is a wildcard @@ -45,6 +48,11 @@ def _eval_body(self, env: Environment) -> None: if ErrorEqualsDecl._STATE_ALL_ERROR in self.error_names: res = True + elif ( + ErrorEqualsDecl._STATE_TASK_ERROR in self.error_names + and not isinstance(error_name, StatesErrorName) + ): # TODO: consider binding a 'context' variable to error_names to more formally detect their evaluation type. + res = True else: res = error_name in self.error_names diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_name.py new file mode 100644 index 0000000000000..50e09e290aa4f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/error_name.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import abc +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.component import Component + + +class ErrorName(Component, abc.ABC): + error_name: Final[Optional[str]] + + def __init__(self, error_name: Optional[str]): + self.error_name = error_name + + def matches(self, error_name: Optional[str]) -> bool: + return self.error_name == error_name + + def __eq__(self, other): + if isinstance(other, ErrorName): + return self.matches(other.error_name) + return False diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/failure_event.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/failure_event.py new file mode 100644 index 0000000000000..4624ea025395b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/failure_event.py @@ -0,0 +1,103 @@ +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + EvaluationFailedEventDetails, + ExecutionFailedEventDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.error_name import ErrorName +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class FailureEvent: + state_name: Final[str] + source_event_id: Final[int] + error_name: Final[Optional[ErrorName]] + event_type: Final[HistoryEventType] + event_details: Final[Optional[EventDetails]] + + def __init__( + self, + env: Environment, + error_name: Optional[ErrorName], + event_type: HistoryEventType, + event_details: Optional[EventDetails] = None, + ): + self.state_name = env.next_state_name + self.source_event_id = env.event_history_context.source_event_id + self.error_name = error_name + self.event_type = event_type + self.event_details = event_details + + +class FailureEventException(Exception): + failure_event: Final[FailureEvent] + + def __init__(self, failure_event: FailureEvent): + self.failure_event = failure_event + + def extract_error_cause_pair(self) -> Optional[tuple[Optional[str], Optional[str]]]: + if self.failure_event.event_details is None: + return None + + failure_event_spec = list(self.failure_event.event_details.values())[0] + + error = None + cause = None + if "error" in failure_event_spec: + error = failure_event_spec["error"] + if "cause" in failure_event_spec: + cause = failure_event_spec["cause"] + return error, cause + + def get_evaluation_failed_event_details(self) -> Optional[EvaluationFailedEventDetails]: + original_failed_event_details = self.failure_event.event_details[ + "evaluationFailedEventDetails" + ] + evaluation_failed_event_details = EvaluationFailedEventDetails() + + error = original_failed_event_details["error"] + cause = original_failed_event_details["cause"] + location = original_failed_event_details.get("location") + state_name = self.failure_event.state_name + + if error != StatesErrorNameType.StatesQueryEvaluationError.to_name(): + return None + if error: + evaluation_failed_event_details["error"] = error + if cause: + event_id = self.failure_event.source_event_id + decorated_cause = f"An error occurred while executing the state '{state_name}' (entered at the event id #{event_id}). {cause}" + evaluation_failed_event_details["cause"] = decorated_cause + if location: + evaluation_failed_event_details["location"] = location + if state_name: + evaluation_failed_event_details["state"] = state_name + + return evaluation_failed_event_details + + def get_execution_failed_event_details(self) -> Optional[ExecutionFailedEventDetails]: + maybe_error_cause_pair = self.extract_error_cause_pair() + if maybe_error_cause_pair is None: + return None + execution_failed_event_details = ExecutionFailedEventDetails() + error, cause = maybe_error_cause_pair + if error: + execution_failed_event_details["error"] = error + if cause: + if ( + error == StatesErrorNameType.StatesRuntime.to_name() + or error == StatesErrorNameType.StatesQueryEvaluationError.to_name() + ): + state_name = self.failure_event.state_name + event_id = self.failure_event.source_event_id + decorated_cause = f"An error occurred while executing the state '{state_name}' (entered at the event id #{event_id}). {cause}" + execution_failed_event_details["cause"] = decorated_cause + else: + execution_failed_event_details["cause"] = cause + + return execution_failed_event_details diff --git a/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/error_name/states_error_name.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name.py diff --git a/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py similarity index 90% rename from localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py index 1d9a2986a1307..9dcda9350ffcd 100644 --- a/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/error_name/states_error_name_type.py @@ -8,6 +8,7 @@ class StatesErrorNameType(Enum): StatesALL = ASLLexer.ERRORNAMEStatesALL + StatesStatesDataLimitExceeded = ASLLexer.ERRORNAMEStatesDataLimitExceeded StatesHeartbeatTimeout = ASLLexer.ERRORNAMEStatesHeartbeatTimeout StatesTimeout = ASLLexer.ERRORNAMEStatesTimeout StatesTaskFailed = ASLLexer.ERRORNAMEStatesTaskFailed @@ -20,6 +21,8 @@ class StatesErrorNameType(Enum): StatesExceedToleratedFailureThreshold = ASLLexer.ERRORNAMEStatesExceedToleratedFailureThreshold StatesItemReaderFailed = ASLLexer.ERRORNAMEStatesItemReaderFailed StatesResultWriterFailed = ASLLexer.ERRORNAMEStatesResultWriterFailed + StatesRuntime = ASLLexer.ERRORNAMEStatesRuntime + StatesQueryEvaluationError = ASLLexer.ERRORNAMEStatesQueryEvaluationError def to_name(self) -> str: return _error_name(self) diff --git a/tests/integration/secretsmanager/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/__init__.py similarity index 100% rename from tests/integration/secretsmanager/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/flow/__init__.py diff --git a/localstack/services/stepfunctions/asl/component/common/flow/end.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/end.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/flow/end.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/flow/end.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/next.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/next.py new file mode 100644 index 0000000000000..d64a341646fb4 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/next.py @@ -0,0 +1,11 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class Next(Component): + name: Final[str] + + def __init__(self, name: str): + # The name of the next state that is run when the current state finishes. + self.name = name diff --git a/localstack/services/stepfunctions/asl/component/common/flow/start_at.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/flow/start_at.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/flow/start_at.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/flow/start_at.py diff --git a/tests/integration/secretsmanager/functions/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/__init__.py similarity index 100% rename from tests/integration/secretsmanager/functions/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_binding.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_binding.py new file mode 100644 index 0000000000000..3833f14c0abdc --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_binding.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JSONataTemplateBinding(EvalComponent): + identifier: Final[str] + value: Final[JSONataTemplateValue] + + def __init__(self, identifier: str, value: JSONataTemplateValue): + self.identifier = identifier + self.value = value + + def _field_name(self) -> Optional[str]: + return self.identifier + + def _eval_body(self, env: Environment) -> None: + binding_container: dict = env.stack.pop() + self.value.eval(env=env) + value = env.stack.pop() + binding_container[self.identifier] = value + env.stack.append(binding_container) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value.py new file mode 100644 index 0000000000000..d1f48c79c9210 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value.py @@ -0,0 +1,6 @@ +import abc + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent + + +class JSONataTemplateValue(EvalComponent, abc.ABC): ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_array.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_array.py new file mode 100644 index 0000000000000..552b168299e2a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_array.py @@ -0,0 +1,20 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JSONataTemplateValueArray(JSONataTemplateValue): + values: Final[list[JSONataTemplateValue]] + + def __init__(self, values: list[JSONataTemplateValue]): + self.values = values + + def _eval_body(self, env: Environment) -> None: + arr = list() + for value in self.values: + value.eval(env) + arr.append(env.stack.pop()) + env.stack.append(arr) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_object.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_object.py new file mode 100644 index 0000000000000..81b1c19a00c53 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_object.py @@ -0,0 +1,21 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_binding import ( + JSONataTemplateBinding, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JSONataTemplateValueObject(JSONataTemplateValue): + bindings: Final[list[JSONataTemplateBinding]] + + def __init__(self, bindings: list[JSONataTemplateBinding]): + self.bindings = bindings + + def _eval_body(self, env: Environment) -> None: + env.stack.append(dict()) + for binding in self.bindings: + binding.eval(env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_terminal.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_terminal.py new file mode 100644 index 0000000000000..97ce01ef43f00 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/jsonata/jsonata_template_value_terminal.py @@ -0,0 +1,35 @@ +import abc +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JSONataTemplateValueTerminal(JSONataTemplateValue, abc.ABC): ... + + +class JSONataTemplateValueTerminalLit(JSONataTemplateValueTerminal): + value: Final[Any] + + def __init__(self, value: Any): + super().__init__() + self.value = value + + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.value) + + +class JSONataTemplateValueTerminalStringJSONata(JSONataTemplateValueTerminal): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_body(self, env: Environment) -> None: + self.string_jsonata.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/outputdecl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/outputdecl.py new file mode 100644 index 0000000000000..9ddf3471204f8 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/outputdecl.py @@ -0,0 +1,19 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Output(EvalComponent): + jsonata_template_value: Final[JSONataTemplateValue] + + def __init__(self, jsonata_template_value: JSONataTemplateValue): + self.jsonata_template_value = jsonata_template_value + + def _eval_body(self, env: Environment) -> None: + self.jsonata_template_value.eval(env=env) + output_value = env.stack.pop() + env.states.reset(input_value=output_value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/parargs.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/parargs.py new file mode 100644 index 0000000000000..5741e5de3c23d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/parargs.py @@ -0,0 +1,42 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value_object import ( + JSONataTemplateValueObject, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadtmpl.payload_tmpl import ( + PayloadTmpl, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Parargs(EvalComponent, abc.ABC): + template_eval_component: Final[EvalComponent] + + def __init__(self, template_eval_component: EvalComponent): + self.template_eval_component = template_eval_component + + def _eval_body(self, env: Environment) -> None: + self.template_eval_component.eval(env=env) + + +class Parameters(Parargs): + def __init__(self, payload_tmpl: PayloadTmpl): + super().__init__(template_eval_component=payload_tmpl) + + +class Arguments(Parargs, abc.ABC): ... + + +class ArgumentsJSONataTemplateValueObject(Arguments): + def __init__(self, jsonata_template_value_object: JSONataTemplateValueObject): + super().__init__(template_eval_component=jsonata_template_value_object) + + +class ArgumentsStringJSONata(Arguments): + def __init__(self, string_jsonata: StringJSONata): + super().__init__(template_eval_component=string_jsonata) diff --git a/tests/integration/stepfunctions/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/__init__.py similarity index 100% rename from tests/integration/stepfunctions/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/path/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/path/input_path.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/input_path.py new file mode 100644 index 0000000000000..8c0d4e6cbb4e7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/input_path.py @@ -0,0 +1,53 @@ +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJsonPath, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError + + +class InputPath(EvalComponent): + string_sampler: Final[Optional[StringSampler]] + + def __init__(self, string_sampler: Optional[StringSampler]): + self.string_sampler = string_sampler + + def _eval_body(self, env: Environment) -> None: + if self.string_sampler is None: + env.stack.append(dict()) + return + if isinstance(self.string_sampler, StringJsonPath): + # JsonPaths are sampled from a given state, hence pass the state's input. + env.stack.append(env.states.get_input()) + try: + self.string_sampler.eval(env=env) + except NoSuchJsonPathError as no_such_json_path_error: + json_path = no_such_json_path_error.json_path + cause = f"Invalid path '{json_path}' : No results for path: $['{json_path[2:]}']" + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/path/items_path.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/items_path.py new file mode 100644 index 0000000000000..05991bd37dfa6 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/items_path.py @@ -0,0 +1,17 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ItemsPath(EvalComponent): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _eval_body(self, env: Environment) -> None: + self.string_sampler.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/path/output_path.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/output_path.py new file mode 100644 index 0000000000000..b40586aa8e716 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/output_path.py @@ -0,0 +1,51 @@ +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError + + +class OutputPath(EvalComponent): + string_sampler: Final[Optional[StringSampler]] + + def __init__(self, string_sampler: Optional[StringSampler]): + self.string_sampler = string_sampler + + def _eval_body(self, env: Environment) -> None: + if self.string_sampler is None: + env.states.reset(input_value=dict()) + return + try: + self.string_sampler.eval(env=env) + except NoSuchJsonPathError as no_such_json_path_error: + json_path = no_such_json_path_error.json_path + cause = f"Invalid path '{json_path}' : No results for path: $['{json_path[2:]}']" + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + output_value = env.stack.pop() + env.states.reset(output_value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/path/result_path.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/result_path.py new file mode 100644 index 0000000000000..bfcb3f2cfe91d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/path/result_path.py @@ -0,0 +1,31 @@ +import copy +from typing import Final, Optional + +from jsonpath_ng import parse + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ResultPath(EvalComponent): + DEFAULT_PATH: Final[str] = "$" + + result_path_src: Final[Optional[str]] + + def __init__(self, result_path_src: Optional[str]): + self.result_path_src = result_path_src + + def _eval_body(self, env: Environment) -> None: + state_input = env.states.get_input() + + # Discard task output if there is one, and set the output ot be the state's input. + if self.result_path_src is None: + env.stack.clear() + env.stack.append(state_input) + return + + # Transform the output with the input. + current_output = env.stack.pop() + result_expr = parse(self.result_path_src) + state_output = result_expr.update_or_create(state_input, copy.deepcopy(current_output)) + env.stack.append(state_output) diff --git a/tests/integration/stepfunctions/lambda_functions/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/__init__.py similarity index 100% rename from tests/integration/stepfunctions/lambda_functions/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/__init__.py diff --git a/tests/integration/stepfunctions/lambda_functions/base/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/__init__.py similarity index 100% rename from tests/integration/stepfunctions/lambda_functions/base/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payload_value.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payload_value.py new file mode 100644 index 0000000000000..773195e38fe20 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payload_value.py @@ -0,0 +1,6 @@ +import abc + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent + + +class PayloadValue(EvalComponent, abc.ABC): ... diff --git a/tests/integration/stepfunctions/legacy/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/__init__.py similarity index 100% rename from tests/integration/stepfunctions/legacy/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/__init__.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/payload_arr.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/payload_arr.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/payload_arr.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadarr/payload_arr.py diff --git a/tests/integration/stepfunctions/templates/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/__init__.py similarity index 100% rename from tests/integration/stepfunctions/templates/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/payload_binding.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/payload_binding.py new file mode 100644 index 0000000000000..1b7d7fb527634 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadbinding/payload_binding.py @@ -0,0 +1,58 @@ +import abc +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payload_value import ( + PayloadValue, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpressionSimple, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class PayloadBinding(PayloadValue, abc.ABC): + field: Final[str] + + def __init__(self, field: str): + self.field = field + + def _field_name(self) -> Optional[str]: + return self.field + + @abc.abstractmethod + def _eval_val(self, env: Environment) -> Any: ... + + def _eval_body(self, env: Environment) -> None: + cnt: dict = env.stack.pop() + val = self._eval_val(env=env) + cnt[self.field] = val + env.stack.append(cnt) + + +class PayloadBindingStringExpressionSimple(PayloadBinding): + string_expression_simple: Final[StringExpressionSimple] + + def __init__(self, field: str, string_expression_simple: StringExpressionSimple): + super().__init__(field=field) + self.string_expression_simple = string_expression_simple + + def _field_name(self) -> Optional[str]: + return f"{self.field}.$" + + def _eval_val(self, env: Environment) -> Any: + self.string_expression_simple.eval(env=env) + value = env.stack.pop() + return value + + +class PayloadBindingValue(PayloadBinding): + payload_value: Final[PayloadValue] + + def __init__(self, field: str, payload_value: PayloadValue): + super().__init__(field=field) + self.payload_value = payload_value + + def _eval_val(self, env: Environment) -> Any: + self.payload_value.eval(env) + val: Any = env.stack.pop() + return val diff --git a/tests/integration/stepfunctions/v2/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/__init__.py similarity index 100% rename from tests/integration/stepfunctions/v2/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/__init__.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/payload_tmpl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/payload_tmpl.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/payload_tmpl.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadtmpl/payload_tmpl.py diff --git a/tests/unit/persistence/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/__init__.py similarity index 100% rename from tests/unit/persistence/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/__init__.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_bool.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_bool.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_bool.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_bool.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_float.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_float.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_float.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_float.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_int.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_int.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_int.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_int.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_lit.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_lit.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_lit.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_lit.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_null.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_null.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_null.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_null.py diff --git a/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_str.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_str.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_str.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/payload/payloadvalue/payloadvaluelit/payload_value_str.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/query_language.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/query_language.py new file mode 100644 index 0000000000000..a1c97e255a7bc --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/query_language.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import enum +from typing import Final + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.component.component import Component + + +class QueryLanguageMode(enum.Enum): + JSONPath = ASLLexer.JSONPATH + JSONata = ASLLexer.JSONATA + + def __str__(self): + return self.name + + def __repr__(self): + return f"QueryLanguageMode.{self}({self.value})" + + +DEFAULT_QUERY_LANGUAGE_MODE: Final[QueryLanguageMode] = QueryLanguageMode.JSONPath + + +class QueryLanguage(Component): + query_language_mode: Final[QueryLanguageMode] + + def __init__(self, query_language_mode: QueryLanguageMode = DEFAULT_QUERY_LANGUAGE_MODE): + self.query_language_mode = query_language_mode diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/result_selector.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/result_selector.py new file mode 100644 index 0000000000000..b194c514d8fb9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/result_selector.py @@ -0,0 +1,17 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadtmpl.payload_tmpl import ( + PayloadTmpl, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ResultSelector(EvalComponent): + payload_tmpl: Final[PayloadTmpl] + + def __init__(self, payload_tmpl: PayloadTmpl): + self.payload_tmpl = payload_tmpl + + def _eval_body(self, env: Environment) -> None: + self.payload_tmpl.eval(env=env) diff --git a/tests/unit/services/awslambda/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/__init__.py similarity index 100% rename from tests/unit/services/awslambda/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/retry/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/backoff_rate_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/backoff_rate_decl.py new file mode 100644 index 0000000000000..6fa1d37ac578b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/backoff_rate_decl.py @@ -0,0 +1,41 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class BackoffRateDecl(EvalComponent): + """ + "BackoffRate": a number which is the multiplier that increases the retry interval on each + attempt (default: 2.0). The value of BackoffRate MUST be greater than or equal to 1.0. + """ + + DEFAULT_RATE: Final[float] = 2.0 + MIN_RATE: Final[float] = 1.0 + + def __init__(self, rate: float = DEFAULT_RATE): + if not (rate >= self.MIN_RATE): + raise ValueError( + f"The value of BackoffRate MUST be greater than or equal to {BackoffRateDecl.MIN_RATE}, got '{rate}'." + ) + self.rate: Final[float] = rate + + def _next_multiplier_key(self) -> str: + return f"BackoffRateDecl-{self.heap_key}-next_multiplier" + + def _access_next_multiplier(self, env: Environment) -> float: + return env.heap.get(self._next_multiplier_key(), 1.0) + + def _store_next_multiplier(self, env: Environment, next_multiplier: float) -> None: + env.heap[self._next_multiplier_key()] = next_multiplier + + def _eval_body(self, env: Environment) -> None: + interval_seconds: int = env.stack.pop() + + next_multiplier: float = self._access_next_multiplier(env=env) + + next_interval_seconds = interval_seconds * next_multiplier + env.stack.append(next_interval_seconds) + + next_multiplier *= self.rate + self._store_next_multiplier(env=env, next_multiplier=next_multiplier) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/interval_seconds_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/interval_seconds_decl.py new file mode 100644 index 0000000000000..8772c0276e290 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/interval_seconds_decl.py @@ -0,0 +1,26 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class IntervalSecondsDecl(EvalComponent): + """ + IntervalSeconds: its value MUST be a positive integer, representing the number of seconds before the + first retry attempt (default value: 1); + """ + + DEFAULT_SECONDS: Final[int] = 1 + MAX_VALUE: Final[int] = 99999999 + + def __init__(self, seconds: int = DEFAULT_SECONDS): + if not (1 <= seconds <= IntervalSecondsDecl.MAX_VALUE): + raise ValueError( + f"IntervalSeconds value MUST be a positive integer between " + f"1 and {IntervalSecondsDecl.MAX_VALUE}, got '{seconds}'." + ) + + self.seconds: Final[int] = seconds + + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.seconds) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/jitter_strategy_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/jitter_strategy_decl.py new file mode 100644 index 0000000000000..4f0a641946d73 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/jitter_strategy_decl.py @@ -0,0 +1,35 @@ +import enum +import random +from typing import Final + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JitterStrategy(enum.Enum): + FULL = ASLLexer.FULL + NONE = ASLLexer.NONE + + def __str__(self): + return self.name + + def __repr__(self): + return f"JitterStrategy.{self}({self.value})" + + +class JitterStrategyDecl(EvalComponent): + DEFAULT_STRATEGY: Final[JitterStrategy] = JitterStrategy.NONE + + jitter_strategy: Final[JitterStrategy] + + def __init__(self, jitter_strategy: JitterStrategy = JitterStrategy.NONE): + self.jitter_strategy = jitter_strategy + + def _eval_body(self, env: Environment) -> None: + if self.jitter_strategy == JitterStrategy.NONE: + return + + interval_seconds = env.stack.pop() + jitter_interval = random.uniform(0, interval_seconds) + env.stack.append(jitter_interval) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_attempts_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_attempts_decl.py new file mode 100644 index 0000000000000..ef8f71ad396b4 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_attempts_decl.py @@ -0,0 +1,45 @@ +import enum +from typing import Final + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class MaxAttemptsOutcome(enum.Enum): + EXHAUSTED = False + SUCCESS = True + + +class MaxAttemptsDecl(EvalComponent): + """ + "MaxAttempts": value MUST be a non-negative integer, representing the maximum number + of retry attempts (default: 3) + """ + + DEFAULT_ATTEMPTS: Final[int] = 3 + MAX_VALUE: Final[int] = 99999999 + + attempts: Final[int] + + def __init__(self, attempts: int = DEFAULT_ATTEMPTS): + if not (0 <= attempts <= MaxAttemptsDecl.MAX_VALUE): + raise ValueError( + f"MaxAttempts value MUST be a positive integer between " + f"0 and {MaxAttemptsDecl.MAX_VALUE}, got '{attempts}'." + ) + self.attempts = attempts + + def _attempt_number_key(self) -> str: + return f"MaxAttemptsDecl-{self.heap_key}-attempt_number" + + def _access_attempt_number(self, env: Environment) -> int: + return env.heap.get(self._attempt_number_key(), -1) + + def _store_attempt_number(self, env: Environment, attempt_number: float) -> None: + env.heap[self._attempt_number_key()] = attempt_number + + def _eval_body(self, env: Environment) -> None: + attempt_number: int = self._access_attempt_number(env=env) + attempt_number += 1 + env.stack.append(MaxAttemptsOutcome(attempt_number < self.attempts)) + self._store_attempt_number(env=env, attempt_number=attempt_number) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_delay_seconds_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_delay_seconds_decl.py new file mode 100644 index 0000000000000..9e1a57352f1cd --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/max_delay_seconds_decl.py @@ -0,0 +1,24 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class MaxDelaySecondsDecl(EvalComponent): + MAX_VALUE: Final[int] = 31622401 + + max_delays_seconds: Final[int] + + def __init__(self, max_delays_seconds: int = MAX_VALUE): + if not (1 <= max_delays_seconds <= MaxDelaySecondsDecl.MAX_VALUE): + raise ValueError( + f"MaxDelaySeconds value MUST be a positive integer between " + f"1 and {MaxDelaySecondsDecl.MAX_VALUE}, got '{max_delays_seconds}'." + ) + + self.max_delays_seconds = max_delays_seconds + + def _eval_body(self, env: Environment) -> None: + interval_seconds = env.stack.pop() + new_interval_seconds = min(interval_seconds, self.max_delays_seconds) + env.stack.append(new_interval_seconds) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_decl.py new file mode 100644 index 0000000000000..108a4f97790e5 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_decl.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import time +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.error_equals_decl import ( + ErrorEqualsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.backoff_rate_decl import ( + BackoffRateDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.interval_seconds_decl import ( + IntervalSecondsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.jitter_strategy_decl import ( + JitterStrategyDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.max_attempts_decl import ( + MaxAttemptsDecl, + MaxAttemptsOutcome, +) +from localstack.services.stepfunctions.asl.component.common.retry.max_delay_seconds_decl import ( + MaxDelaySecondsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.retrier_outcome import ( + RetrierOutcome, +) +from localstack.services.stepfunctions.asl.component.common.retry.retrier_props import RetrierProps +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class RetrierDecl(EvalComponent): + error_equals: Final[ErrorEqualsDecl] + interval_seconds: Final[IntervalSecondsDecl] + max_attempts: Final[MaxAttemptsDecl] + backoff_rate: Final[BackoffRateDecl] + max_delay_seconds: Final[MaxDelaySecondsDecl] + jitter_strategy: Final[JitterStrategyDecl] + comment: Final[Optional[Comment]] + + def __init__( + self, + error_equals: ErrorEqualsDecl, + interval_seconds: Optional[IntervalSecondsDecl] = None, + max_attempts: Optional[MaxAttemptsDecl] = None, + backoff_rate: Optional[BackoffRateDecl] = None, + max_delay_seconds: Optional[MaxDelaySecondsDecl] = None, + jitter_strategy: Optional[JitterStrategyDecl] = None, + comment: Optional[Comment] = None, + ): + self.error_equals = error_equals + self.interval_seconds = interval_seconds or IntervalSecondsDecl() + self.max_attempts = max_attempts or MaxAttemptsDecl() + self.backoff_rate = backoff_rate or BackoffRateDecl() + self.max_delay_seconds = max_delay_seconds or MaxDelaySecondsDecl() + self.jitter_strategy = jitter_strategy or JitterStrategyDecl() + self.comment = comment + + @classmethod + def from_retrier_props(cls, props: RetrierProps) -> RetrierDecl: + return cls( + error_equals=props.get( + typ=ErrorEqualsDecl, + raise_on_missing=ValueError( + f"Missing ErrorEquals declaration for Retrier declaration, in props '{props}'." + ), + ), + interval_seconds=props.get(IntervalSecondsDecl), + max_attempts=props.get(MaxAttemptsDecl), + backoff_rate=props.get(BackoffRateDecl), + max_delay_seconds=props.get(MaxDelaySecondsDecl), + jitter_strategy=props.get(JitterStrategyDecl), + comment=props.get(Comment), + ) + + def _eval_body(self, env: Environment) -> None: + # When a state reports an error, the interpreter scans through the Retriers and, when the Error Name appears + # in the value of a Retrier’s "ErrorEquals" field, implements the retry policy described in that Retrier. + + self.error_equals.eval(env) + res: bool = env.stack.pop() + + # This Retrier does not match + if not res: + env.stack.append(RetrierOutcome.Skipped) + return + + # Request another attempt. + self.max_attempts.eval(env=env) + max_attempts_outcome = env.stack.pop() + if max_attempts_outcome == MaxAttemptsOutcome.EXHAUSTED: + env.stack.append(RetrierOutcome.Failed) + return + + # Compute the next interval. + self.interval_seconds.eval(env=env) + self.backoff_rate.eval(env=env) + self.max_delay_seconds.eval(env=env) + self.jitter_strategy.eval(env=env) + + # Execute wait. + interval_seconds: float = env.stack.pop() + time.sleep(interval_seconds) + + env.stack.append(RetrierOutcome.Executed) diff --git a/localstack/services/stepfunctions/asl/component/common/retry/retrier_outcome.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_outcome.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/retry/retrier_outcome.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_outcome.py diff --git a/localstack/services/stepfunctions/asl/component/common/retry/retrier_props.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_props.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/retry/retrier_props.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retrier_props.py diff --git a/localstack/services/stepfunctions/asl/component/common/retry/retry_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retry_decl.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/retry/retry_decl.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retry_decl.py diff --git a/localstack/services/stepfunctions/asl/component/common/retry/retry_outcome.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retry_outcome.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/common/retry/retry_outcome.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/retry/retry_outcome.py diff --git a/tests/unit/utils/testing/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/string/__init__.py similarity index 100% rename from tests/unit/utils/testing/__init__.py rename to localstack-core/localstack/services/stepfunctions/asl/component/common/string/__init__.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/string/string_expression.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/string/string_expression.py new file mode 100644 index 0000000000000..3f4be28c7e14c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/string/string_expression.py @@ -0,0 +1,209 @@ +import abc +import copy +from typing import Any, Final, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.events.utils import to_json_str +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguageMode +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.intrinsic.jsonata import ( + get_intrinsic_functions_declarations, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + JSONataExpression, + VariableDeclarations, + VariableReference, + compose_jsonata_expression, + eval_jsonata_expression, + extract_jsonata_variable_references, +) +from localstack.services.stepfunctions.asl.jsonata.validations import ( + validate_jsonata_expression_output, +) +from localstack.services.stepfunctions.asl.utils.json_path import ( + NoSuchJsonPathError, + extract_json, +) + +JSONPATH_ROOT_PATH: Final[str] = "$" + + +class StringExpression(EvalComponent, abc.ABC): + literal_value: Final[str] + + def __init__(self, literal_value: str): + self.literal_value = literal_value + + def _field_name(self) -> Optional[str]: + return None + + +class StringExpressionSimple(StringExpression, abc.ABC): ... + + +class StringSampler(StringExpressionSimple, abc.ABC): ... + + +class StringLiteral(StringExpression): + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.literal_value) + + +class StringJsonPath(StringSampler): + json_path: Final[str] + + def __init__(self, json_path: str): + super().__init__(literal_value=json_path) + self.json_path = json_path + + def _eval_body(self, env: Environment) -> None: + input_value: Any = env.stack[-1] + if self.json_path == JSONPATH_ROOT_PATH: + output_value = input_value + else: + output_value = extract_json(self.json_path, input_value) + # TODO: introduce copy on write approach + env.stack.append(copy.deepcopy(output_value)) + + +class StringContextPath(StringJsonPath): + context_object_path: Final[str] + + def __init__(self, context_object_path: str): + json_path = context_object_path[1:] + super().__init__(json_path=json_path) + self.context_object_path = context_object_path + + def _eval_body(self, env: Environment) -> None: + input_value = env.states.context_object.context_object_data + if self.json_path == JSONPATH_ROOT_PATH: + output_value = input_value + else: + try: + output_value = extract_json(self.json_path, input_value) + except NoSuchJsonPathError: + input_value_json_str = to_json_str(input_value) + cause = ( + f"The JSONPath '${self.json_path}' specified for the field '{env.next_field_name}' " + f"could not be found in the input '{input_value_json_str}'" + ) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + # TODO: introduce copy on write approach + env.stack.append(copy.deepcopy(output_value)) + + +class StringVariableSample(StringSampler): + query_language_mode: Final[QueryLanguageMode] + expression: Final[str] + + def __init__(self, query_language_mode: QueryLanguageMode, expression: str): + super().__init__(literal_value=expression) + self.query_language_mode = query_language_mode + self.expression = expression + + def _eval_body(self, env: Environment) -> None: + # Get the variables sampled in the jsonata expression. + expression_variable_references: set[VariableReference] = ( + extract_jsonata_variable_references(self.expression) + ) + variable_declarations_list = list() + if self.query_language_mode == QueryLanguageMode.JSONata: + # Sample $states values into expression. + states_variable_declarations: VariableDeclarations = ( + env.states.to_variable_declarations( + variable_references=expression_variable_references + ) + ) + variable_declarations_list.append(states_variable_declarations) + + # Sample Variable store values in to expression. + # TODO: this could be optimised by sampling only those invoked. + variable_declarations: VariableDeclarations = env.variable_store.get_variable_declarations() + variable_declarations_list.append(variable_declarations) + + rich_jsonata_expression: JSONataExpression = compose_jsonata_expression( + final_jsonata_expression=self.expression, + variable_declarations_list=variable_declarations_list, + ) + result = eval_jsonata_expression(rich_jsonata_expression) + env.stack.append(result) + + +class StringIntrinsicFunction(StringExpressionSimple): + intrinsic_function_derivation: Final[str] + function: Final[EvalComponent] + + def __init__(self, intrinsic_function_derivation: str, function: EvalComponent) -> None: + super().__init__(literal_value=intrinsic_function_derivation) + self.intrinsic_function_derivation = intrinsic_function_derivation + self.function = function + + def _eval_body(self, env: Environment) -> None: + self.function.eval(env=env) + + +class StringJSONata(StringExpression): + expression: Final[str] + + def __init__(self, expression: str): + super().__init__(literal_value=expression) + # TODO: check for illegal functions ($, $$, $eval) + self.expression = expression + + def _eval_body(self, env: Environment) -> None: + # Get the variables sampled in the jsonata expression. + expression_variable_references: set[VariableReference] = ( + extract_jsonata_variable_references(self.expression) + ) + + # Sample declarations for used intrinsic functions. Place this at the start allowing users to + # override these identifiers with custom variable declarations. + functions_variable_declarations: VariableDeclarations = ( + get_intrinsic_functions_declarations(variable_references=expression_variable_references) + ) + + # Sample $states values into expression. + states_variable_declarations: VariableDeclarations = env.states.to_variable_declarations( + variable_references=expression_variable_references + ) + + # Sample Variable store values in to expression. + # TODO: this could be optimised by sampling only those invoked. + variable_declarations: VariableDeclarations = env.variable_store.get_variable_declarations() + + rich_jsonata_expression: JSONataExpression = compose_jsonata_expression( + final_jsonata_expression=self.expression, + variable_declarations_list=[ + functions_variable_declarations, + states_variable_declarations, + variable_declarations, + ], + ) + result = eval_jsonata_expression(rich_jsonata_expression) + + validate_jsonata_expression_output(env, self.expression, rich_jsonata_expression, result) + + env.stack.append(result) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/heartbeat.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/heartbeat.py new file mode 100644 index 0000000000000..c268239346079 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/heartbeat.py @@ -0,0 +1,89 @@ +import abc +from typing import Final + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError + + +class Heartbeat(EvalComponent, abc.ABC): + @abc.abstractmethod + def _eval_seconds(self, env: Environment) -> int: ... + + def _eval_body(self, env: Environment) -> None: + seconds = self._eval_seconds(env=env) + env.stack.append(seconds) + + +class HeartbeatSeconds(Heartbeat): + def __init__(self, heartbeat_seconds: int): + if not isinstance(heartbeat_seconds, int) and heartbeat_seconds <= 0: + raise ValueError( + f"Expected non-negative integer for HeartbeatSeconds, got '{heartbeat_seconds}' instead." + ) + self.heartbeat_seconds: Final[int] = heartbeat_seconds + + def _eval_seconds(self, env: Environment) -> int: + return self.heartbeat_seconds + + +class HeartbeatSecondsJSONata(Heartbeat): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_seconds(self, env: Environment) -> int: + self.string_jsonata.eval(env=env) + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + seconds = int(env.stack.pop()) + return seconds + + +class HeartbeatSecondsPath(Heartbeat): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _eval_seconds(self, env: Environment) -> int: + try: + self.string_sampler.eval(env=env) + except NoSuchJsonPathError as no_such_json_path_error: + json_path = no_such_json_path_error.json_path + cause = f"Invalid path '{json_path}' : No results for path: $['{json_path[2:]}']" + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + seconds = env.stack.pop() + if not isinstance(seconds, int) and seconds <= 0: + raise ValueError( + f"Expected non-negative integer for HeartbeatSecondsPath, got '{seconds}' instead." + ) + return seconds diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/timeout.py b/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/timeout.py new file mode 100644 index 0000000000000..03ae1a6ba2e33 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/common/timeouts/timeout.py @@ -0,0 +1,113 @@ +import abc +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + ExecutionFailedEventDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError + + +class EvalTimeoutError(TimeoutError): + pass + + +class Timeout(EvalComponent, abc.ABC): + @abc.abstractmethod + def is_default_value(self) -> bool: ... + + @abc.abstractmethod + def _eval_seconds(self, env: Environment) -> int: ... + + def _eval_body(self, env: Environment) -> None: + seconds = self._eval_seconds(env=env) + env.stack.append(seconds) + + +class TimeoutSeconds(Timeout): + DEFAULT_TIMEOUT_SECONDS: Final[int] = 99999999 + + def __init__(self, timeout_seconds: int, is_default: Optional[bool] = None): + if not isinstance(timeout_seconds, int) and timeout_seconds <= 0: + raise ValueError( + f"Expected non-negative integer for TimeoutSeconds, got '{timeout_seconds}' instead." + ) + self.timeout_seconds: Final[int] = timeout_seconds + self.is_default: Optional[bool] = is_default + + def is_default_value(self) -> bool: + if self.is_default is not None: + return self.is_default + return self.timeout_seconds == self.DEFAULT_TIMEOUT_SECONDS + + def _eval_seconds(self, env: Environment) -> int: + return self.timeout_seconds + + +class TimeoutSecondsJSONata(Timeout): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def is_default_value(self) -> bool: + return False + + def _eval_seconds(self, env: Environment) -> int: + self.string_jsonata.eval(env=env) + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + seconds = int(env.stack.pop()) + return seconds + + +class TimeoutSecondsPath(Timeout): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def is_default_value(self) -> bool: + return False + + def _eval_seconds(self, env: Environment) -> int: + try: + self.string_sampler.eval(env=env) + except NoSuchJsonPathError as no_such_json_path_error: + json_path = no_such_json_path_error.json_path + cause = f"Invalid path '{json_path}' : No results for path: $['{json_path[2:]}']" + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + seconds = env.stack.pop() + if not isinstance(seconds, int) and seconds <= 0: + raise ValueError( + f"Expected non-negative integer for TimeoutSecondsPath, got '{seconds}' instead." + ) + return seconds diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/component.py b/localstack-core/localstack/services/stepfunctions/asl/component/component.py new file mode 100644 index 0000000000000..029db9d43bce7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/component.py @@ -0,0 +1,9 @@ +import abc + + +class Component(abc.ABC): + def __str__(self): + return f"({self.__class__.__name__}| {vars(self)}" + + def __repr__(self): + return str(self) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/eval_component.py b/localstack-core/localstack/services/stepfunctions/asl/component/eval_component.py new file mode 100644 index 0000000000000..cd7940208f5cc --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/eval_component.py @@ -0,0 +1,86 @@ +import abc +import logging +from typing import Optional + +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.component import Component +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.strings import long_uid + +LOG = logging.getLogger(__name__) + + +class EvalComponent(Component, abc.ABC): + __heap_key: Optional[str] = None + + @property + def heap_key(self) -> str: + if self.__heap_key is None: + self.__heap_key = long_uid() + return self.__heap_key + + def _log_evaluation_step(self, subject: str = "Generic") -> None: + if LOG.isEnabledFor(logging.DEBUG): + LOG.debug( + "[ASL] [%s] [%s]: '%s'", + subject.lower()[:4], + self.__class__.__name__, + repr(self), + ) + + def _log_failure_event_exception(self, failure_event_exception: FailureEventException) -> None: + error_log_parts = ["Exception=FailureEventException"] + + error_name = failure_event_exception.failure_event.error_name + if error_name: + error_log_parts.append(f"Error={error_name.error_name}") + + event_details = failure_event_exception.failure_event.event_details + if event_details: + error_log_parts.append(f"Details={to_json_str(event_details)}") + + error_log = ", ".join(error_log_parts) + component_repr = repr(self) + LOG.error("%s at '%s'", error_log, component_repr) + + def _log_exception(self, exception: Exception) -> None: + exception_name = exception.__class__.__name__ + + error_log_parts = [f"Exception={exception_name}"] + + exception_body = list(exception.args) + if exception_body: + error_log_parts.append(f"Details={exception_body}") + else: + error_log_parts.append("Details=None-Available") + + error_log = ", ".join(error_log_parts) + component_repr = repr(self) + LOG.error("%s at '%s'", error_log, component_repr) + + def eval(self, env: Environment) -> None: + if env.is_running(): + self._log_evaluation_step("Computing") + try: + field_name = self._field_name() + if field_name is not None: + env.next_field_name = field_name + self._eval_body(env) + except FailureEventException as failure_event_exception: + self._log_failure_event_exception(failure_event_exception=failure_event_exception) + raise failure_event_exception + except Exception as exception: + self._log_exception(exception=exception) + raise exception + else: + self._log_evaluation_step("Pruning") + + @abc.abstractmethod + def _eval_body(self, env: Environment) -> None: + raise NotImplementedError() + + def _field_name(self) -> Optional[str]: + return self.__class__.__name__ diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/argument/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/argument/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/argument/argument.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/argument/argument.py new file mode 100644 index 0000000000000..6438471c8becb --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/argument/argument.py @@ -0,0 +1,105 @@ +import abc +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringVariableSample, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.json_path import extract_json + + +class Argument(EvalComponent, abc.ABC): + """ + Represents an Intrinsic Function argument that can be evaluated and whose + result is pushed onto the stack. + + Subclasses must override `_eval_argument()` to evaluate the specific value + of the argument they represent. This abstract class manages the type and + environment handling by appending the evaluated result to the environment's + stack in `_eval_body`. + + The `_eval_body` method calls `_eval_argument()` and pushes the resulting + value to the stack. + """ + + @abc.abstractmethod + def _eval_argument(self, env: Environment) -> Any: ... + + def _eval_body(self, env: Environment) -> None: + argument = self._eval_argument(env=env) + env.stack.append(argument) + + +class ArgumentLiteral(Argument): + definition_value: Final[Optional[Any]] + + def __init__(self, definition_value: Optional[Any]): + self.definition_value = definition_value + + def _eval_argument(self, env: Environment) -> Any: + return self.definition_value + + +class ArgumentJsonPath(Argument): + json_path: Final[str] + + def __init__(self, json_path: str): + self.json_path = json_path + + def _eval_argument(self, env: Environment) -> Any: + inp = env.stack[-1] + value = extract_json(self.json_path, inp) + return value + + +class ArgumentContextPath(ArgumentJsonPath): + def __init__(self, context_path: str): + json_path = context_path[1:] + super().__init__(json_path=json_path) + + def _eval_argument(self, env: Environment) -> Any: + value = extract_json(self.json_path, env.states.context_object.context_object_data) + return value + + +class ArgumentFunction(Argument): + function: Final[EvalComponent] + + def __init__(self, function: EvalComponent): + self.function = function + + def _eval_argument(self, env: Environment) -> Any: + self.function.eval(env=env) + output_value = env.stack.pop() + return output_value + + +class ArgumentVar(Argument): + string_variable_sample: Final[StringVariableSample] + + def __init__(self, string_variable_sample: StringVariableSample): + super().__init__() + self.string_variable_sample = string_variable_sample + + def _eval_argument(self, env: Environment) -> Any: + self.string_variable_sample.eval(env=env) + value = env.stack.pop() + return value + + +class ArgumentList(Argument): + arguments: Final[list[Argument]] + size: Final[int] + + def __init__(self, arguments: list[Argument]): + self.arguments = arguments + self.size = len(arguments) + + def _eval_argument(self, env: Environment) -> Any: + values = list() + for argument in self.arguments: + argument.eval(env=env) + argument_value = env.stack.pop() + values.append(argument_value) + return values diff --git a/localstack/services/stepfunctions/asl/component/intrinsic/component.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/component.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/intrinsic/component.py rename to localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/component.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/function.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/function.py new file mode 100644 index 0000000000000..dd41bdeab2028 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/function.py @@ -0,0 +1,17 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ArgumentList +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.function_name import ( + FunctionName, +) + + +class Function(EvalComponent, abc.ABC): + name: FunctionName + argument_list: Final[ArgumentList] + + def __init__(self, name: FunctionName, argument_list: ArgumentList): + self.name = name + self.argument_list = argument_list diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array.py new file mode 100644 index 0000000000000..1b10fa1e97735 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array.py @@ -0,0 +1,28 @@ +from typing import Any + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Array(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Array), + argument_list=argument_list, + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + values: list[Any] = env.stack.pop() + env.stack.append(values) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_contains.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_contains.py new file mode 100644 index 0000000000000..340fa5ec6d2a9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_contains.py @@ -0,0 +1,50 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayContains(StatesFunction): + # Determines if a specific value is present in an array. + # + # For example: + # With input + # { + # "inputArray": [1,2,3,4,5,6,7,8,9], + # "lookingFor": 5 + # } + # + # The call: + # States.ArrayContains($.inputArray, $.lookingFor) + # + # Returns: + # true + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayContains), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + array = args[0] + value = args[1] + if not isinstance(array, list): + raise TypeError(f"Expected an array type as first argument, but got {array}.") + contains = value in array + env.stack.append(contains) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_get_item.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_get_item.py new file mode 100644 index 0000000000000..fc9448d28d5a5 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_get_item.py @@ -0,0 +1,54 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayGetItem(StatesFunction): + # Returns a specified index's value. + # + # For example: + # With input + # { + # "inputArray": [1,2,3,4,5,6,7,8,9], + # "index": 5 + # } + # + # The call + # States.ArrayGetItem($.inputArray, $.index) + # + # Returns + # 6 + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayGetItem), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + index = args.pop() + if not isinstance(index, int): + raise TypeError(f"Expected an integer index value, but got '{index}'.") + + array = args.pop() + if not isinstance(array, list): + raise TypeError(f"Expected an array type, but got '{array}'.") + + item = array[index] + env.stack.append(item) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_length.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_length.py new file mode 100644 index 0000000000000..f1050fab9aaf2 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_length.py @@ -0,0 +1,49 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayLength(StatesFunction): + # Returns the length of the array. + # + # For example: + # With input + # { + # "inputArray": [1,2,3,4,5,6,7,8,9] + # } + # + # The call + # States.ArrayLength($.inputArray) + # + # Returns + # 9 + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayLength), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + array = args.pop() + if not isinstance(array, list): + raise TypeError(f"Expected an array type, but got '{array}'.") + + length = len(array) + env.stack.append(length) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_partition.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_partition.py new file mode 100644 index 0000000000000..a12b2780c0faf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_partition.py @@ -0,0 +1,66 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayPartition(StatesFunction): + # Partitions the input array. + # + # For example: + # With input + # { + # "inputArray": [1, 2, 3, 4, 5, 6, 7, 8, 9] + # } + # + # The call + # States.ArrayPartition($.inputArray,4) + # + # Returns + # [ [1,2,3,4], [5,6,7,8], [9]] + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayPartition), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + chunk_size = args.pop() + if not isinstance(chunk_size, (int, float)): + raise TypeError(f"Expected an integer value as chunk_size, but got {chunk_size}.") + chunk_size = round(chunk_size) + if chunk_size < 0: + raise ValueError( + f"Expected a non-zero, positive integer as chuck_size, but got {chunk_size}." + ) + + array = args.pop() + if not isinstance(array, list): + raise TypeError(f"Expected an array type as first argument, but got {array}.") + + chunks = self._to_chunks(array=array, chunk_size=chunk_size) + env.stack.append(chunks) + + @staticmethod + def _to_chunks(array: list, chunk_size: int): + chunks = list() + for i in range(0, len(array), chunk_size): + chunks.append(array[i : i + chunk_size]) + return chunks diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_range.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_range.py new file mode 100644 index 0000000000000..5528d62b57159 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_range.py @@ -0,0 +1,56 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayRange(StatesFunction): + # Creates a new array containing a specific range of elements. + # + # For example: + # The call + # States.ArrayRange(1, 9, 2) + # + # Returns + # [1,3,5,7,9] + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayRange), + argument_list=argument_list, + ) + if argument_list.size != 3: + raise ValueError( + f"Expected 3 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + range_vals = env.stack.pop() + + for range_val in range_vals: + if not isinstance(range_val, (int, float)): + raise TypeError( + f"Expected 3 integer arguments for function type '{type(self)}', but got: '{range_vals}'." + ) + first = round(range_vals[0]) + last = round(range_vals[1]) + step = round(range_vals[2]) + + if step <= 0: + raise ValueError(f"Expected step argument to be non negative, but got: '{step}'.") + + array = list(range(first, last + 1, step)) + + if len(array) > 1000: + raise ValueError(f"Arrays cannot contain more than 1000 items, size: {len(array)}.") + + env.stack.append(array) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_unique.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_unique.py new file mode 100644 index 0000000000000..93833f686ba41 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/array/array_unique.py @@ -0,0 +1,54 @@ +from collections import OrderedDict + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ArrayUnique(StatesFunction): + # Removes duplicate values from an array and returns an array containing only unique elements + # + # For example: + # With input + # { + # "inputArray": [1,2,3,3,3,3,3,3,4] + # } + # + # The call + # States.ArrayUnique($.inputArray) + # + # Returns + # [1,2,3,4] + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.ArrayUnique), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + array = args.pop() + if not isinstance(array, list): + raise TypeError(f"Expected an array type, but got '{array}'.") + + # Remove duplicates through an ordered set, in this + # case we consider they key set of an ordered dict. + items_odict = OrderedDict.fromkeys(array).keys() + unique_array = list(items_odict) + env.stack.append(unique_array) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_decode.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_decode.py new file mode 100644 index 0000000000000..8a4ebe8d94835 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_decode.py @@ -0,0 +1,60 @@ +import base64 +from typing import Final + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Base64Decode(StatesFunction): + # Encodes data based on MIME Base64 encoding scheme. + # + # For example: + # With input + # { + # "input": "Data to encode" + # } + # + # The call + # "base64.$": "States.Base64Encode($.input)" + # + # Returns + # {"base64": "RGF0YSB0byBlbmNvZGU="} + + MAX_INPUT_CHAR_LEN: Final[int] = 10_000 + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Base64Decode), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + base64_string: str = args.pop() + if len(base64_string) > self.MAX_INPUT_CHAR_LEN: + raise ValueError( + f"Maximum input string for function type '{type(self)}' " + f"is '{self.MAX_INPUT_CHAR_LEN}', but got '{len(base64_string)}'." + ) + + base64_string_bytes = base64_string.encode("ascii") + string_bytes = base64.b64decode(base64_string_bytes) + string = string_bytes.decode("ascii") + env.stack.append(string) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_encode.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_encode.py new file mode 100644 index 0000000000000..33a72f845c0b1 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/encoding_decoding/base_64_encode.py @@ -0,0 +1,60 @@ +import base64 +from typing import Final + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Base64Encode(StatesFunction): + # Decodes data based on MIME Base64 encoding scheme. + # + # For example: + # With input + # { + # "base64": "RGF0YSB0byBlbmNvZGU=" + # } + # + # The call + # "data.$": "States.Base64Decode($.base64)" + # + # Returns + # {"data": "Decoded data"} + + MAX_INPUT_CHAR_LEN: Final[int] = 10_000 + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Base64Encode), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + string: str = args.pop() + if len(string) > self.MAX_INPUT_CHAR_LEN: + raise ValueError( + f"Maximum input string for function type '{type(self)}' " + f"is '{self.MAX_INPUT_CHAR_LEN}', but got '{len(string)}'." + ) + + string_bytes = string.encode("ascii") + string_base64_bytes = base64.b64encode(string_bytes) + base64_string = string_base64_bytes.decode("ascii") + env.stack.append(base64_string) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/factory.py new file mode 100644 index 0000000000000..bbfb779802782 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/factory.py @@ -0,0 +1,106 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ArgumentList +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.array import ( + array, + array_contains, + array_get_item, + array_length, + array_partition, + array_range, + array_unique, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.encoding_decoding import ( + base_64_decode, + base_64_encode, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.generic import ( + string_format, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.hash_calculations import ( + hash_func, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.json_manipulation import ( + json_merge, + json_to_string, + string_to_json, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.math_operations import ( + math_add, + math_random, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.string_operations import ( + string_split, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.unique_id_generation import ( + uuid, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) + + +# TODO: could use reflection on StatesFunctionNameType values. +class StatesFunctionFactory: + @staticmethod + def from_name(func_name: StatesFunctionName, argument_list: ArgumentList) -> StatesFunction: + match func_name.function_type: + # Array. + case StatesFunctionNameType.Array: + return array.Array(argument_list=argument_list) + case StatesFunctionNameType.ArrayPartition: + return array_partition.ArrayPartition(argument_list=argument_list) + case StatesFunctionNameType.ArrayContains: + return array_contains.ArrayContains(argument_list=argument_list) + case StatesFunctionNameType.ArrayRange: + return array_range.ArrayRange(argument_list=argument_list) + case StatesFunctionNameType.ArrayGetItem: + return array_get_item.ArrayGetItem(argument_list=argument_list) + case StatesFunctionNameType.ArrayLength: + return array_length.ArrayLength(argument_list=argument_list) + case StatesFunctionNameType.ArrayUnique: + return array_unique.ArrayUnique(argument_list=argument_list) + + # JSON Manipulation + case StatesFunctionNameType.JsonToString: + return json_to_string.JsonToString(argument_list=argument_list) + case StatesFunctionNameType.StringToJson: + return string_to_json.StringToJson(argument_list=argument_list) + case StatesFunctionNameType.JsonMerge: + return json_merge.JsonMerge(argument_list=argument_list) + + # Unique Id Generation. + case StatesFunctionNameType.UUID: + return uuid.UUID(argument_list=argument_list) + + # String Operations. + case StatesFunctionNameType.StringSplit: + return string_split.StringSplit(argument_list=argument_list) + + # Hash Calculations. + case StatesFunctionNameType.Hash: + return hash_func.HashFunc(argument_list=argument_list) + + # Encoding and Decoding. + case StatesFunctionNameType.Base64Encode: + return base_64_encode.Base64Encode(argument_list=argument_list) + case StatesFunctionNameType.Base64Decode: + return base_64_decode.Base64Decode(argument_list=argument_list) + + # Math Operations. + case StatesFunctionNameType.MathRandom: + return math_random.MathRandom(argument_list=argument_list) + case StatesFunctionNameType.MathAdd: + return math_add.MathAdd(argument_list=argument_list) + + # Generic. + case StatesFunctionNameType.Format: + return string_format.StringFormat(argument_list=argument_list) + + # Unsupported. + case unsupported: + raise NotImplementedError(unsupported) # noqa diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/generic/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/generic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/generic/string_format.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/generic/string_format.py new file mode 100644 index 0000000000000..86e8b50050518 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/generic/string_format.py @@ -0,0 +1,105 @@ +import json +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentContextPath, + ArgumentJsonPath, + ArgumentList, + ArgumentLiteral, + ArgumentVar, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StringFormat(StatesFunction): + # It constructs a string from both literal and interpolated values. This function takes one or more arguments. + # The value of the first argument must be a string, and may include zero or more instances of the character + # sequence {}. The interpreter returns the string defined in the first argument with each {} replaced by the value + # of the positionally-corresponding argument in the Intrinsic invocation. + # + # For example: + # With input + # { + # "name": "Arnav", + # "template": "Hello, my name is {}." + # } + # + # Calls + # States.Format('Hello, my name is {}.', $.name) + # States.Format($.template, $.name) + # + # Return + # Hello, my name is Arnav. + _DELIMITER: Final[str] = "{}" + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Format), + argument_list=argument_list, + ) + if argument_list.size == 0: + raise ValueError( + f"Expected at least 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + first_argument = argument_list.arguments[0] + if isinstance(first_argument, ArgumentLiteral) and not isinstance( + first_argument.definition_value, str + ): + raise ValueError( + f"Expected the first argument for function type '{type(self)}' to be a string, but got: '{first_argument.definition_value}'." + ) + elif not isinstance( + first_argument, (ArgumentLiteral, ArgumentVar, ArgumentJsonPath, ArgumentContextPath) + ): + raise ValueError( + f"Expected the first argument for function type '{type(self)}' to be a string, but got: '{first_argument}'." + ) + + def _eval_body(self, env: Environment) -> None: + # TODO: investigate behaviour for incorrect number of arguments in string format. + self.argument_list.eval(env=env) + args = env.stack.pop() + + string_format: str = args[0] + values: list[Any] = args[1:] + + values_str_repr = map(self._to_str_repr, values) + string_result = string_format.format(*values_str_repr) + + env.stack.append(string_result) + + @staticmethod + def _to_str_repr(value: Any) -> str: + # Converts a value or object to a string representation compatible with sfn. + # For example: + # Input object + # { + # "Arg1": 1, + # "Arg2": [] + # } + # Is mapped to the string + # {Arg1=1, Arg2=[]} + + if isinstance(value, str): + return value + elif isinstance(value, list): + value_parts: list[str] = list(map(StringFormat._to_str_repr, value)) + return f"[{', '.join(value_parts)}]" + elif isinstance(value, dict): + dict_items = list() + for d_key, d_value in value.items(): + d_value_lit = StringFormat._to_str_repr(d_value) + dict_items.append(f"{d_key}={d_value_lit}") + return f"{{{', '.join(dict_items)}}}" + else: + # Return json representation of terminal value. + return json.dumps(value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_algorithm.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_algorithm.py new file mode 100644 index 0000000000000..efb4239e14f43 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_algorithm.py @@ -0,0 +1,9 @@ +import enum + + +class HashAlgorithm(enum.Enum): + MD5 = "MD5" + SHA_1 = "SHA-1" + SHA_256 = "SHA-256" + SHA_384 = "SHA-384" + SHA_512 = "SHA-512" diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_func.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_func.py new file mode 100644 index 0000000000000..135f73826f86b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/hash_calculations/hash_func.py @@ -0,0 +1,76 @@ +import hashlib +from typing import Final + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.hash_calculations.hash_algorithm import ( + HashAlgorithm, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class HashFunc(StatesFunction): + MAX_INPUT_CHAR_LEN: Final[int] = 10_000 + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Hash), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + @staticmethod + def _hash_inp_with_alg(inp: str, alg: HashAlgorithm) -> str: + inp_enc = inp.encode() + hash_inp = None + match alg: + case HashAlgorithm.MD5: + hash_inp = hashlib.md5(inp_enc) + case HashAlgorithm.SHA_1: + hash_inp = hashlib.sha1(inp_enc) + case HashAlgorithm.SHA_256: + hash_inp = hashlib.sha256(inp_enc) + case HashAlgorithm.SHA_384: + hash_inp = hashlib.sha384(inp_enc) + case HashAlgorithm.SHA_512: + hash_inp = hashlib.sha512(inp_enc) + hash_value: str = hash_inp.hexdigest() + return hash_value + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + algorithm = args.pop() + try: + hash_algorithm = HashAlgorithm(algorithm) + except Exception: + raise ValueError(f"Unknown hash function '{algorithm}'.") + + input_data = args.pop() + if not isinstance(input_data, str): + raise TypeError( + f"Expected string type as input data for function type '{type(self)}', but got: '{input_data}'." + ) + + if len(input_data) > self.MAX_INPUT_CHAR_LEN: + raise ValueError( + f"Maximum character input length for for function type '{type(self)}' " + f"is '{self.MAX_INPUT_CHAR_LEN}', but got '{len(input_data)}'." + ) + + res = self._hash_inp_with_alg(input_data, hash_algorithm) + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_merge.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_merge.py new file mode 100644 index 0000000000000..a6e9221d26c81 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_merge.py @@ -0,0 +1,89 @@ +import copy +from typing import Any + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JsonMerge(StatesFunction): + # Merges two JSON objects into a single object + # + # For example: + # With input + # { + # "json1": { "a": {"a1": 1, "a2": 2}, "b": 2, }, + # "json2": { "a": {"a3": 1, "a4": 2}, "c": 3 } + # } + # + # Call + # "output.$": "States.JsonMerge($.json1, $.json2, false)" + # + # Returns + # { + # "output": { + # "a": {"a3": 1, "a4": 2}, + # "b": 2, + # "c": 3 + # } + # } + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.JsonMerge), + argument_list=argument_list, + ) + if argument_list.size != 3: + raise ValueError( + f"Expected 3 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + @staticmethod + def _validate_is_deep_merge_argument(is_deep_merge: Any) -> None: + if not isinstance(is_deep_merge, bool): + raise TypeError( + f"Expected boolean value for deep merge mode, but got: '{is_deep_merge}'." + ) + if is_deep_merge: + # This is AWS's limitation, not LocalStack's. + raise NotImplementedError( + "Currently, Step Functions only supports the shallow merging mode; " + "therefore, you must specify the boolean value as false." + ) + + @staticmethod + def _validate_merge_argument(argument: Any, num: int) -> None: + if not isinstance(argument, dict): + raise TypeError(f"Expected a JSON object the argument {num}, but got: '{argument}'.") + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + is_deep_merge = args.pop() + self._validate_is_deep_merge_argument(is_deep_merge) + + snd = args.pop() + self._validate_merge_argument(snd, 2) + + fst = args.pop() + self._validate_merge_argument(snd, 2) + + # Currently, Step Functions only supports the shallow merging mode; therefore, you must specify the boolean + # value as false. In the shallow mode, if the same key exists in both JSON objects, the latter object's key + # overrides the same key in the first object. Additionally, objects nested within a JSON object aren't merged + # when you use shallow merging. + merged = copy.deepcopy(fst) + merged.update(snd) + + env.stack.append(merged) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_to_string.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_to_string.py new file mode 100644 index 0000000000000..9dfff92d8c449 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/json_to_string.py @@ -0,0 +1,34 @@ +import json + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class JsonToString(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.JsonToString), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + json_obj: json = args.pop() + json_string: str = json.dumps(json_obj, separators=(",", ":")) + env.stack.append(json_string) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/string_to_json.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/string_to_json.py new file mode 100644 index 0000000000000..cc42874cf2baa --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/json_manipulation/string_to_json.py @@ -0,0 +1,39 @@ +import json + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StringToJson(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.StringToJson), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + string_json: str = args.pop() + + if string_json is not None and string_json.strip(): + json_obj: json = json.loads(string_json) + else: + json_obj: json = None + env.stack.append(json_obj) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_add.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_add.py new file mode 100644 index 0000000000000..c4124f1195159 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_add.py @@ -0,0 +1,78 @@ +import decimal +from typing import Any + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +def _round_like_java(f: float) -> int: + # this behaves a bit weird for boundary values + # AWS stepfunctions is implemented in Java, so we need to adjust the rounding accordingly + # python by default rounds half to even + if f >= 0: + decimal.getcontext().rounding = decimal.ROUND_HALF_UP + else: + decimal.getcontext().rounding = decimal.ROUND_HALF_DOWN + d = decimal.Decimal(f) + return round(d, 0) + + +class MathAdd(StatesFunction): + # Returns the sum of two numbers. + # + # For example: + # With input + # { + # "value1": 111, + # "step": -1 + # } + # + # Call + # "value1.$": "States.MathAdd($.value1, $.step)" + # + # Returns + # {"value1": 110 } + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.MathAdd), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + @staticmethod + def _validate_integer_value(value: Any) -> int: + if not isinstance(value, (int, float)): + raise TypeError(f"Expected integer value, but got: '{value}'.") + # If you specify a non-integer value for one or both the arguments, + # Step Functions will round it off to the nearest integer. + + if isinstance(value, float): + result = _round_like_java(value) + return int(result) + + return value + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + a = self._validate_integer_value(args[0]) + b = self._validate_integer_value(args[1]) + + res = a + b + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_random.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_random.py new file mode 100644 index 0000000000000..b50d1dcb4368d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/math_operations/math_random.py @@ -0,0 +1,67 @@ +import random +from typing import Any + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class MathRandom(StatesFunction): + # Returns a random number between the specified start and end number. + # + # For example: + # With input + # { + # "start": 1, + # "end": 999 + # } + # + # Call + # "random.$": "States.MathRandom($.start, $.end)" + # + # Returns + # {"random": 456 } + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.MathRandom), + argument_list=argument_list, + ) + if argument_list.size < 2 or argument_list.size > 3: + raise ValueError( + f"Expected 2-3 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + @staticmethod + def _validate_integer_value(value: Any, argument_name: str) -> int: + if not isinstance(value, (int, float)): + raise TypeError(f"Expected integer value for {argument_name}, but got: '{value}'.") + # If you specify a non-integer value for the start number or end number argument, + # Step Functions will round it off to the nearest integer. + return int(value) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + seed = None + if self.argument_list.size == 3: + seed = args.pop() + self._validate_integer_value(seed, "seed") + + end = self._validate_integer_value(args.pop(), "end") + start = self._validate_integer_value(args.pop(), "start") + + rand_gen = random.Random(seed) + rand_int = rand_gen.randint(start, end) + env.stack.append(rand_int) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function.py new file mode 100644 index 0000000000000..dfb4b6e420560 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function.py @@ -0,0 +1,16 @@ +import abc + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.function import Function +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) + + +class StatesFunction(Function, abc.ABC): + name: StatesFunctionName + + def __init__(self, states_name: StatesFunctionName, argument_list: ArgumentList): + super().__init__(name=states_name, argument_list=argument_list) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_array.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_array.py new file mode 100644 index 0000000000000..5cce091f0fd85 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_array.py @@ -0,0 +1,31 @@ +from typing import Any + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StatesFunctionArray(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Array), + argument_list=argument_list, + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + values: list[Any] = list() + for _ in range(self.argument_list.size): + values.append(env.stack.pop()) + values.reverse() + env.stack.append(values) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_format.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_format.py new file mode 100644 index 0000000000000..8b71a07fbd122 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_format.py @@ -0,0 +1,56 @@ +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, + ArgumentLiteral, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StatesFunctionFormat(StatesFunction): + _DELIMITER: Final[str] = "{}" + + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.Format), + argument_list=argument_list, + ) + if argument_list.size == 0: + raise ValueError( + f"Expected at least 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + first_argument = argument_list.arguments[0] + if not ( + isinstance(first_argument, ArgumentLiteral) + and isinstance(first_argument.definition_value, str) + ): + raise ValueError( + f"Expected the first argument for function type '{type(self)}' to be a string, but got: '{first_argument}'." + ) + + def _eval_body(self, env: Environment) -> None: + # TODO: investigate behaviour for incorrect number of arguments in string format. + self.argument_list.eval(env=env) + + values: list[Any] = list() + for _ in range(self.argument_list.size): + values.append(env.stack.pop()) + string_format: str = values.pop() + values.reverse() + + string_format_parts: list[str] = string_format.split(self._DELIMITER) + string_result: str = "" + for part in string_format_parts: + string_result += part + string_result += values.pop() + + env.stack.append(string_result) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_json_to_string.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_json_to_string.py new file mode 100644 index 0000000000000..f2a29724dad80 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_json_to_string.py @@ -0,0 +1,33 @@ +import json + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StatesFunctionJsonToString(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.JsonToString), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + json_obj: json = env.stack.pop() + json_string: str = json.dumps(json_obj) + env.stack.append(json_string) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_string_to_json.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_string_to_json.py new file mode 100644 index 0000000000000..1dde28d4257e1 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_string_to_json.py @@ -0,0 +1,33 @@ +import json + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StatesFunctionStringToJson(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.StringToJson), + argument_list=argument_list, + ) + if argument_list.size != 1: + raise ValueError( + f"Expected 1 argument for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + string_json: str = env.stack.pop() + json_obj: json = json.loads(string_json) + env.stack.append(json_obj) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_uuid.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_uuid.py new file mode 100644 index 0000000000000..34b23541e0b0a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/states_function_uuid.py @@ -0,0 +1,29 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.utils.strings import long_uid + + +class StatesFunctionUUID(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.UUID), + argument_list=argument_list, + ) + if argument_list.size != 0: + raise ValueError( + f"Expected no arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + env.stack.append(long_uid()) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/string_operations/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/string_operations/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/string_operations/string_split.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/string_operations/string_split.py new file mode 100644 index 0000000000000..a1187e9aa4465 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/string_operations/string_split.py @@ -0,0 +1,69 @@ +import re + +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StringSplit(StatesFunction): + # Splits a string into an array of values. + # + # For example: + # With input + # { + # "inputString": "This.is+a,test=string", + # "splitter": ".+,=" + # } + # + # The call + # { + # "myStringArray.$": "States.StringSplit($.inputString, $.splitter)" + # } + # + # Returns + # {"myStringArray": [ + # "This", + # "is", + # "a", + # "test", + # "string" + # ]} + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.StringSplit), + argument_list=argument_list, + ) + if argument_list.size != 2: + raise ValueError( + f"Expected 2 arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + self.argument_list.eval(env=env) + args = env.stack.pop() + + del_chars = args.pop() + if not isinstance(del_chars, str): + raise ValueError( + f"Expected string value as delimiting characters, but got '{del_chars}'." + ) + + string = args.pop() + if not isinstance(del_chars, str): + raise ValueError(f"Expected string value, but got '{del_chars}'.") + + pattern = "|".join(re.escape(c) for c in del_chars) + + parts = re.split(pattern, string) + parts_clean = list(filter(bool, parts)) + env.stack.append(parts_clean) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/unique_id_generation/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/unique_id_generation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/unique_id_generation/uuid.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/unique_id_generation/uuid.py new file mode 100644 index 0000000000000..1a0d6a75f7b09 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/function/statesfunction/unique_id_generation/uuid.py @@ -0,0 +1,29 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + ArgumentList, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.utils.strings import long_uid + + +class UUID(StatesFunction): + def __init__(self, argument_list: ArgumentList): + super().__init__( + states_name=StatesFunctionName(function_type=StatesFunctionNameType.UUID), + argument_list=argument_list, + ) + if argument_list.size != 0: + raise ValueError( + f"Expected no arguments for function type '{type(self)}', but got: '{argument_list}'." + ) + + def _eval_body(self, env: Environment) -> None: + env.stack.append(long_uid()) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/custom_function_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/custom_function_name.py new file mode 100644 index 0000000000000..ba92c7a5837bf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/custom_function_name.py @@ -0,0 +1,8 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.function_name import ( + FunctionName, +) + + +class CustomFunctionName(FunctionName): + def __init__(self, name: str): + super().__init__(name=name) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/function_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/function_name.py new file mode 100644 index 0000000000000..c90af50033a85 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/function_name.py @@ -0,0 +1,10 @@ +import abc + +from localstack.services.stepfunctions.asl.component.component import Component + + +class FunctionName(Component, abc.ABC): + name: str + + def __init__(self, name: str): + self.name = name diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_fuinction_name_types.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_fuinction_name_types.py new file mode 100644 index 0000000000000..3c4b142c15ef9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_fuinction_name_types.py @@ -0,0 +1,27 @@ +from enum import Enum + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicLexer import ASLIntrinsicLexer + + +class StatesFunctionNameType(Enum): + Format = ASLIntrinsicLexer.Format + StringToJson = ASLIntrinsicLexer.StringToJson + JsonToString = ASLIntrinsicLexer.JsonToString + Array = ASLIntrinsicLexer.Array + ArrayPartition = ASLIntrinsicLexer.ArrayPartition + ArrayContains = ASLIntrinsicLexer.ArrayContains + ArrayRange = ASLIntrinsicLexer.ArrayRange + ArrayGetItem = ASLIntrinsicLexer.ArrayGetItem + ArrayLength = ASLIntrinsicLexer.ArrayLength + ArrayUnique = ASLIntrinsicLexer.ArrayUnique + Base64Encode = ASLIntrinsicLexer.Base64Encode + Base64Decode = ASLIntrinsicLexer.Base64Decode + Hash = ASLIntrinsicLexer.Hash + JsonMerge = ASLIntrinsicLexer.JsonMerge + MathRandom = ASLIntrinsicLexer.MathRandom + MathAdd = ASLIntrinsicLexer.MathAdd + StringSplit = ASLIntrinsicLexer.StringSplit + UUID = ASLIntrinsicLexer.UUID + + def name(self) -> str: + return ASLIntrinsicLexer.symbolicNames[self.value][1:-1] diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_function_name_types.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_function_name_types.py new file mode 100644 index 0000000000000..3c4b142c15ef9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/state_function_name_types.py @@ -0,0 +1,27 @@ +from enum import Enum + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicLexer import ASLIntrinsicLexer + + +class StatesFunctionNameType(Enum): + Format = ASLIntrinsicLexer.Format + StringToJson = ASLIntrinsicLexer.StringToJson + JsonToString = ASLIntrinsicLexer.JsonToString + Array = ASLIntrinsicLexer.Array + ArrayPartition = ASLIntrinsicLexer.ArrayPartition + ArrayContains = ASLIntrinsicLexer.ArrayContains + ArrayRange = ASLIntrinsicLexer.ArrayRange + ArrayGetItem = ASLIntrinsicLexer.ArrayGetItem + ArrayLength = ASLIntrinsicLexer.ArrayLength + ArrayUnique = ASLIntrinsicLexer.ArrayUnique + Base64Encode = ASLIntrinsicLexer.Base64Encode + Base64Decode = ASLIntrinsicLexer.Base64Decode + Hash = ASLIntrinsicLexer.Hash + JsonMerge = ASLIntrinsicLexer.JsonMerge + MathRandom = ASLIntrinsicLexer.MathRandom + MathAdd = ASLIntrinsicLexer.MathAdd + StringSplit = ASLIntrinsicLexer.StringSplit + UUID = ASLIntrinsicLexer.UUID + + def name(self) -> str: + return ASLIntrinsicLexer.symbolicNames[self.value][1:-1] diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/states_function_name.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/states_function_name.py new file mode 100644 index 0000000000000..c9623f8c0c112 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/functionname/states_function_name.py @@ -0,0 +1,12 @@ +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.function_name import ( + FunctionName, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) + + +class StatesFunctionName(FunctionName): + def __init__(self, function_type: StatesFunctionNameType): + super().__init__(name=function_type.name()) + self.function_type: StatesFunctionNameType = function_type diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/jsonata.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/jsonata.py new file mode 100644 index 0000000000000..8602aed713e63 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/jsonata.py @@ -0,0 +1,85 @@ +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + VariableDeclarations, + VariableReference, +) + +_VARIABLE_REFERENCE_PARTITION: Final[VariableReference] = "$partition" +_DECLARATION_PARTITION: Final[str] = """ +$partition:=function($array,$chunk_size){ + $chunk_size=0?null: + $chunk_size>=$count($array)?[[$array]]: + $map( + [0..$floor($count($array)/$chunk_size)-(1-$count($array)%$chunk_size)], + function($i){ + $filter($array,function($v,$index){ + $index>=$i*$chunk_size and $index<($i+1)*$chunk_size + }) + } + ) +}; +""".replace("\n", "") + +_VARIABLE_REFERENCE_RANGE: Final[VariableReference] = "$range" +_DECLARATION_RANGE: Final[str] = """ +$range:=function($first,$last,$step){ + $first>$last and $step>0?[]: + $first<$last and $step<0?[]: + $map([0..$floor(($last-$first)/$step)],function($i){ + $first+$i*$step + }) +}; +""".replace("\n", "") + +# TODO: add support for $hash. +_VARIABLE_REFERENCE_HASH: Final[VariableReference] = "$hash" +_DECLARATION_HASH: Final[VariableReference] = """ +$hash:=function($value,$algo){ + "Function $hash is currently not supported" +}; +""".replace("\n", "") + +_VARIABLE_REFERENCE_RANDOMSEEDED: Final[VariableReference] = "$randomSeeded" +_DECLARATION_RANDOMSEEDED: Final[str] = """ +$randomSeeded:=function($seed){ + ($seed*9301+49297)%233280/233280 +}; +""" + +# TODO: add support for $uuid +_VARIABLE_REFERENCE_UUID: Final[VariableReference] = "$uuid" +_DECLARATION_UUID: Final[str] = """ +$uuid:=function(){ + "Function $uuid is currently not supported" +}; +""" + +_VARIABLE_REFERENCE_PARSE: Final[VariableReference] = "$parse" +_DECLARATION_PARSE: Final[str] = """ +$parse:=function($v){ + $eval($v) +}; +""" + +_DECLARATION_BY_VARIABLE_REFERENCE: Final[dict[VariableReference, str]] = { + _VARIABLE_REFERENCE_PARTITION: _DECLARATION_PARTITION, + _VARIABLE_REFERENCE_RANGE: _DECLARATION_RANGE, + _VARIABLE_REFERENCE_HASH: _DECLARATION_HASH, + _VARIABLE_REFERENCE_RANDOMSEEDED: _DECLARATION_RANDOMSEEDED, + _VARIABLE_REFERENCE_UUID: _DECLARATION_UUID, + _VARIABLE_REFERENCE_PARSE: _DECLARATION_PARSE, +} + + +def get_intrinsic_functions_declarations( + variable_references: set[VariableReference], +) -> VariableDeclarations: + declarations: list[str] = list() + for variable_reference in variable_references: + declaration: Optional[VariableDeclarations] = _DECLARATION_BY_VARIABLE_REFERENCE.get( + variable_reference + ) + if declaration: + declarations.append(declaration) + return "".join(declarations) diff --git a/localstack/services/stepfunctions/asl/component/intrinsic/member.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/member.py similarity index 91% rename from localstack/services/stepfunctions/asl/component/intrinsic/member.py rename to localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/member.py index ab8948e2392ce..2e66657a2b59f 100644 --- a/localstack/services/stepfunctions/asl/component/intrinsic/member.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/member.py @@ -3,8 +3,7 @@ from localstack.services.stepfunctions.asl.component.intrinsic.component import Component -class Member(Component): - ... +class Member(Component): ... class IdentifiedMember(Member): diff --git a/localstack/services/stepfunctions/asl/component/intrinsic/member_access.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/member_access.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/intrinsic/member_access.py rename to localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/member_access.py diff --git a/localstack/services/stepfunctions/asl/component/intrinsic/program.py b/localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/program.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/intrinsic/program.py rename to localstack-core/localstack/services/stepfunctions/asl/component/intrinsic/program.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/program/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/program/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/program/program.py b/localstack-core/localstack/services/stepfunctions/asl/component/program/program.py new file mode 100644 index 0000000000000..e86a5cd076620 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/program/program.py @@ -0,0 +1,152 @@ +import logging +import threading +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + ExecutionAbortedEventDetails, + ExecutionFailedEventDetails, + ExecutionSucceededEventDetails, + ExecutionTimedOutEventDetails, + HistoryEventExecutionDataDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.program.version import Version +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramEnded, + ProgramError, + ProgramState, + ProgramStopped, + ProgramTimedOut, +) +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.collections import select_from_typed_dict +from localstack.utils.threads import TMP_THREADS + +LOG = logging.getLogger(__name__) + + +class Program(EvalComponent): + query_language: Final[QueryLanguage] + start_at: Final[StartAt] + states: Final[States] + timeout_seconds: Final[Optional[TimeoutSeconds]] + comment: Final[Optional[Comment]] + version: Final[Optional[Version]] + + def __init__( + self, + query_language: QueryLanguage, + start_at: StartAt, + states: States, + timeout_seconds: Optional[TimeoutSeconds], + comment: Optional[Comment] = None, + version: Optional[Version] = None, + ): + self.query_language = query_language + self.start_at = start_at + self.states = states + self.timeout_seconds = timeout_seconds + self.comment = comment + self.version = version + + def _get_state(self, state_name: str) -> CommonStateField: + state: Optional[CommonStateField] = self.states.states.get(state_name, None) + if state is None: + raise ValueError(f"No such state {state}.") + return state + + def eval(self, env: Environment) -> None: + timeout = self.timeout_seconds.timeout_seconds if self.timeout_seconds else None + env.next_state_name = self.start_at.start_at_name + worker_thread = threading.Thread(target=super().eval, args=(env,), daemon=True) + TMP_THREADS.append(worker_thread) + worker_thread.start() + worker_thread.join(timeout=timeout) + is_timeout = worker_thread.is_alive() + if is_timeout: + env.set_timed_out() + + def _eval_body(self, env: Environment) -> None: + try: + while env.is_running(): + next_state: CommonStateField = self._get_state(env.next_state_name) + next_state.eval(env) + # Garbage collect hanging values added by this last state. + env.stack.clear() + env.heap.clear() + except FailureEventException as ex: + env.set_error(error=ex.get_execution_failed_event_details()) + except Exception as ex: + cause = f"{type(ex).__name__}({str(ex)})" + LOG.error("Stepfunctions computation ended with exception '%s'.", cause) + env.set_error( + ExecutionFailedEventDetails( + error=StatesErrorName(typ=StatesErrorNameType.StatesRuntime).error_name, + cause=cause, + ) + ) + + # If the program is evaluating within a frames then these are not allowed to produce program termination states. + if env.is_frame(): + return + + program_state: ProgramState = env.program_state() + if isinstance(program_state, ProgramError): + exec_failed_event_details = select_from_typed_dict( + typed_dict=ExecutionFailedEventDetails, obj=program_state.error or dict() + ) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails(executionFailedEventDetails=exec_failed_event_details), + ) + elif isinstance(program_state, ProgramStopped): + env.event_history_context.source_event_id = 0 + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ExecutionAborted, + event_details=EventDetails( + executionAbortedEventDetails=ExecutionAbortedEventDetails( + error=program_state.error, cause=program_state.cause + ) + ), + ) + elif isinstance(program_state, ProgramTimedOut): + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ExecutionTimedOut, + event_details=EventDetails( + executionTimedOutEventDetails=ExecutionTimedOutEventDetails() + ), + ) + elif isinstance(program_state, ProgramEnded): + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ExecutionSucceeded, + event_details=EventDetails( + executionSucceededEventDetails=ExecutionSucceededEventDetails( + output=to_json_str(env.states.get_input(), separators=(",", ":")), + outputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + ), + ) diff --git a/localstack/services/stepfunctions/asl/component/states.py b/localstack-core/localstack/services/stepfunctions/asl/component/program/states.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/states.py rename to localstack-core/localstack/services/stepfunctions/asl/component/program/states.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/program/version.py b/localstack-core/localstack/services/stepfunctions/asl/component/program/version.py new file mode 100644 index 0000000000000..d951a69a89acf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/program/version.py @@ -0,0 +1,17 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class Version(Component): + _SUPPORTED_VERSIONS: Final[set[str]] = {"1.0"} + + version: Final[str] + + def __init__(self, version: str): + if version not in self._SUPPORTED_VERSIONS: + raise ValueError( + f"Version value '{version}' is not accepted. Supported Versions: {list(self._SUPPORTED_VERSIONS)}" + ) + + self.version = version diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state.py new file mode 100644 index 0000000000000..7e7004b27e31d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +import abc +import datetime +import json +import logging +from abc import ABC +from typing import Final, Optional, Union + +from localstack.aws.api.stepfunctions import ( + ExecutionFailedEventDetails, + HistoryEventExecutionDataDetails, + HistoryEventType, + StateEnteredEventDetails, + StateExitedEventDetails, + TaskFailedEventDetails, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl import AssignDecl +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.flow.end import End +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.common.outputdecl import Output +from localstack.services.stepfunctions.asl.component.common.path.input_path import ( + InputPath, +) +from localstack.services.stepfunctions.asl.component.common.path.output_path import OutputPath +from localstack.services.stepfunctions.asl.component.common.query_language import ( + QueryLanguage, + QueryLanguageMode, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + JSONPATH_ROOT_PATH, + StringJsonPath, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state_continue_with import ( + ContinueWith, + ContinueWithEnd, + ContinueWithNext, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.component.state.state_type import StateType +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.program_state import ProgramRunning +from localstack.services.stepfunctions.asl.eval.states import StateData +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError +from localstack.services.stepfunctions.quotas import is_within_size_quota + +LOG = logging.getLogger(__name__) + + +class CommonStateField(EvalComponent, ABC): + name: str + + query_language: QueryLanguage + + # The state's type. + state_type: StateType + + # There can be any number of terminal states per state machine. Only one of Next or End can + # be used in a state. Some state types, such as Choice, don't support or use the End field. + continue_with: ContinueWith + + # Holds a human-readable description of the state. + comment: Optional[Comment] + + # A path that selects a portion of the state's input to be passed to the state's state_task for processing. + # If omitted, it has the value $ which designates the entire input. + input_path: Optional[InputPath] + + # A path that selects a portion of the state's output to be passed to the next state. + # If omitted, it has the value $ which designates the entire output. + output_path: Optional[OutputPath] + + assign_decl: Optional[AssignDecl] + + output: Optional[Output] + + state_entered_event_type: Final[HistoryEventType] + state_exited_event_type: Final[Optional[HistoryEventType]] + + def __init__( + self, + state_entered_event_type: HistoryEventType, + state_exited_event_type: Optional[HistoryEventType], + ): + self.state_entered_event_type = state_entered_event_type + self.state_exited_event_type = state_exited_event_type + + def from_state_props(self, state_props: StateProps) -> None: + self.name = state_props.name + self.query_language = state_props.get(QueryLanguage) or QueryLanguage() + self.state_type = state_props.get(StateType) + self.continue_with = ( + ContinueWithEnd() if state_props.get(End) else ContinueWithNext(state_props.get(Next)) + ) + self.comment = state_props.get(Comment) + self.assign_decl = state_props.get(AssignDecl) + # JSONPath sub-productions. + if self.query_language.query_language_mode == QueryLanguageMode.JSONPath: + self.input_path = state_props.get(InputPath) or InputPath( + StringJsonPath(JSONPATH_ROOT_PATH) + ) + self.output_path = state_props.get(OutputPath) or OutputPath( + StringJsonPath(JSONPATH_ROOT_PATH) + ) + self.output = None + # JSONata sub-productions. + else: + self.input_path = None + self.output_path = None + self.output = state_props.get(Output) + + def _set_next(self, env: Environment) -> None: + if env.next_state_name != self.name: + # Next was already overridden. + return + + if isinstance(self.continue_with, ContinueWithNext): + env.next_state_name = self.continue_with.next_state.name + elif isinstance(self.continue_with, ContinueWithEnd): # This includes ContinueWithSuccess + env.set_ended() + else: + LOG.error("Could not handle ContinueWith type of '%s'.", type(self.continue_with)) + + def _is_language_query_jsonpath(self) -> bool: + return self.query_language.query_language_mode == QueryLanguageMode.JSONPath + + def _get_state_entered_event_details(self, env: Environment) -> StateEnteredEventDetails: + return StateEnteredEventDetails( + name=self.name, + input=to_json_str(env.states.get_input(), separators=(",", ":")), + inputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + + def _get_state_exited_event_details(self, env: Environment) -> StateExitedEventDetails: + event_details = StateExitedEventDetails( + name=self.name, + output=to_json_str(env.states.get_input(), separators=(",", ":")), + outputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + # TODO add typing when these become available in boto. + assigned_variables = env.variable_store.get_assigned_variables() + env.variable_store.reset_tracing() + if assigned_variables: + event_details["assignedVariables"] = assigned_variables # noqa + event_details["assignedVariablesDetails"] = {"truncated": False} # noqa + return event_details + + def _verify_size_quota(self, env: Environment, value: Union[str, json]) -> None: + is_within: bool = is_within_size_quota(value) + if is_within: + return + error_type = StatesErrorNameType.StatesStatesDataLimitExceeded + cause = ( + f"The state/task '{self.name}' returned a result with a size exceeding " + f"the maximum number of bytes service limit." + ) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=error_type), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=error_type.to_name(), + cause=cause, + ) + ), + ) + ) + + def _eval_state_input(self, env: Environment) -> None: + # Filter the input onto the stack. + if self.input_path: + self.input_path.eval(env) + else: + env.stack.append(env.states.get_input()) + + @abc.abstractmethod + def _eval_state(self, env: Environment) -> None: ... + + def _eval_state_output(self, env: Environment) -> None: + # Process output value as next state input. + if self.output_path: + self.output_path.eval(env=env) + elif self.output: + self.output.eval(env=env) + else: + current_output = env.stack.pop() + env.states.reset(input_value=current_output) + + def _eval_body(self, env: Environment) -> None: + env.event_manager.add_event( + context=env.event_history_context, + event_type=self.state_entered_event_type, + event_details=EventDetails( + stateEnteredEventDetails=self._get_state_entered_event_details(env=env) + ), + ) + env.states.context_object.context_object_data["State"] = StateData( + EnteredTime=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), Name=self.name + ) + + self._eval_state_input(env=env) + + try: + self._eval_state(env) + except NoSuchJsonPathError as no_such_json_path_error: + data_json_str = to_json_str(no_such_json_path_error.data) + cause = ( + f"The JSONPath '{no_such_json_path_error.json_path}' specified for the field '{env.next_field_name}' " + f"could not be found in the input '{data_json_str}'" + ) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + + if not isinstance(env.program_state(), ProgramRunning): + return + + self._eval_state_output(env=env) + + self._verify_size_quota(env=env, value=env.states.get_input()) + + self._set_next(env) + + if self.state_exited_event_type is not None: + env.event_manager.add_event( + context=env.event_history_context, + event_type=self.state_exited_event_type, + event_details=EventDetails( + stateExitedEventDetails=self._get_state_exited_event_details(env=env), + ), + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/choice_rule.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/choice_rule.py new file mode 100644 index 0000000000000..a946eec561292 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/choice_rule.py @@ -0,0 +1,43 @@ +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl import AssignDecl +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.common.outputdecl import Output +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ChoiceRule(EvalComponent): + comparison: Final[Optional[Comparison]] + next_stmt: Final[Optional[Next]] + comment: Final[Optional[Comment]] + assign: Final[Optional[AssignDecl]] + output: Final[Optional[Output]] + + def __init__( + self, + comparison: Optional[Comparison], + next_stmt: Optional[Next], + comment: Optional[Comment], + assign: Optional[AssignDecl], + output: Optional[Output], + ): + self.comparison = comparison + self.next_stmt = next_stmt + self.comment = comment + self.assign = assign + self.output = output + + def _eval_body(self, env: Environment) -> None: + self.comparison.eval(env) + is_condition_true: bool = env.stack[-1] + if not is_condition_true: + return + if self.assign: + self.assign.eval(env=env) + if self.output: + self.output.eval(env=env) diff --git a/localstack/services/stepfunctions/asl/component/state/state_choice/choices_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/choices_decl.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_choice/choices_decl.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/choices_decl.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison.py new file mode 100644 index 0000000000000..d70065dc56a92 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import abc +from enum import Enum +from typing import Any, Final + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.choice_rule import ( + ChoiceRule, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + + +class ComparisonCompositeProps(TypedProps): + def add(self, instance: Any) -> None: + inst_type = type(instance) + + if issubclass(inst_type, ComparisonComposite): + super()._add(ComparisonComposite, instance) + return + + super().add(instance) + + +class ConditionJSONataLit(Comparison): + literal: Final[bool] + + def __init__(self, literal: bool): + self.literal = literal + + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.literal) + + +class ConditionStringJSONata(Comparison): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_body(self, env: Environment) -> None: + self.string_jsonata.eval(env=env) + result = env.stack[-1] + if not isinstance(result, bool): + # TODO: add snapshot tests to verify AWS's behaviour about non boolean values. + raise RuntimeError( + f"Expected Condition to produce a boolean result but got result of type '{type(result)}' instead." + ) + + +class ComparisonComposite(Comparison, abc.ABC): + class ChoiceOp(Enum): + And = ASLLexer.AND + Or = ASLLexer.OR + Not = ASLLexer.NOT + + operator: Final[ComparisonComposite.ChoiceOp] + + def __init__(self, operator: ComparisonComposite.ChoiceOp): + self.operator = operator + + +class ComparisonCompositeSingle(ComparisonComposite, abc.ABC): + rule: Final[ChoiceRule] + + def __init__(self, operator: ComparisonComposite.ChoiceOp, rule: ChoiceRule): + super(ComparisonCompositeSingle, self).__init__(operator=operator) + self.rule = rule + + +class ComparisonCompositeMulti(ComparisonComposite, abc.ABC): + rules: Final[list[ChoiceRule]] + + def __init__(self, operator: ComparisonComposite.ChoiceOp, rules: list[ChoiceRule]): + super(ComparisonCompositeMulti, self).__init__(operator=operator) + self.rules = rules + + +class ComparisonCompositeNot(ComparisonCompositeSingle): + def __init__(self, rule: ChoiceRule): + super(ComparisonCompositeNot, self).__init__( + operator=ComparisonComposite.ChoiceOp.Not, rule=rule + ) + + def _eval_body(self, env: Environment) -> None: + self.rule.eval(env) + tmp: bool = env.stack.pop() + res = tmp is False + env.stack.append(res) + + +class ComparisonCompositeAnd(ComparisonCompositeMulti): + def __init__(self, rules: list[ChoiceRule]): + super(ComparisonCompositeAnd, self).__init__( + operator=ComparisonComposite.ChoiceOp.And, rules=rules + ) + + def _eval_body(self, env: Environment) -> None: + res = True + for rule in self.rules: + rule.eval(env) + rule_out = env.stack.pop() + if not rule_out: + res = False + break # TODO: Lazy evaluation? Can use all function instead? how's eval for that? + env.stack.append(res) + + +class ComparisonCompositeOr(ComparisonCompositeMulti): + def __init__(self, rules: list[ChoiceRule]): + super(ComparisonCompositeOr, self).__init__( + operator=ComparisonComposite.ChoiceOp.Or, rules=rules + ) + + def _eval_body(self, env: Environment) -> None: + res = False + for rule in self.rules: + rule.eval(env) + rule_out = env.stack.pop() + res = res or rule_out + if res: + break # TODO: Lazy evaluation? Can use all function instead? how's eval for that? + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_func.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_func.py new file mode 100644 index 0000000000000..cf5d6c9bfb2b1 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_func.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import abc +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringVariableSample, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.factory import ( + OperatorFactory, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ComparisonFunc(Comparison, abc.ABC): + operator_type: Final[ComparisonOperatorType] + + def __init__(self, operator_type: ComparisonOperatorType): + self.operator_type = operator_type + + +class ComparisonFuncValue(ComparisonFunc): + value: Final[Any] + + def __init__(self, operator_type: ComparisonOperatorType, value: Any): + super().__init__(operator_type=operator_type) + self.value = value + + def _eval_body(self, env: Environment) -> None: + operator: Operator = OperatorFactory.get(self.operator_type) + operator.eval(env=env, value=self.value) + + +class ComparisonFuncStringVariableSample(ComparisonFuncValue): + _COMPARISON_FUNC_VAR_VALUE: Final[str] = "$" + string_variable_sample: Final[StringVariableSample] + + def __init__( + self, operator_type: ComparisonOperatorType, string_variable_sample: StringVariableSample + ): + super().__init__(operator_type=operator_type, value=self._COMPARISON_FUNC_VAR_VALUE) + self.string_variable_sample = string_variable_sample + + def _eval_body(self, env: Environment) -> None: + self.string_variable_sample.eval(env=env) + super()._eval_body(env=env) + # Purge the outcome of the variable sampling form the + # stack as operators do not digest the input value. + del env.stack[-2] diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_operator_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_operator_type.py new file mode 100644 index 0000000000000..3c9722f721ce9 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_operator_type.py @@ -0,0 +1,45 @@ +from enum import Enum + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer + + +class ComparisonOperatorType(Enum): + BooleanEquals = ASLLexer.BOOLEANEQUALS + BooleanEqualsPath = ASLLexer.BOOLEANQUALSPATH + IsBoolean = ASLLexer.ISBOOLEAN + IsNull = ASLLexer.ISNULL + IsNumeric = ASLLexer.ISNUMERIC + IsPresent = ASLLexer.ISPRESENT + IsString = ASLLexer.ISSTRING + IsTimestamp = ASLLexer.ISTIMESTAMP + NumericEquals = ASLLexer.NUMERICEQUALS + NumericEqualsPath = ASLLexer.NUMERICEQUALSPATH + NumericGreaterThan = ASLLexer.NUMERICGREATERTHAN + NumericGreaterThanPath = ASLLexer.NUMERICGREATERTHANPATH + NumericGreaterThanEquals = ASLLexer.NUMERICGREATERTHANEQUALS + NumericGreaterThanEqualsPath = ASLLexer.NUMERICGREATERTHANEQUALSPATH + NumericLessThan = ASLLexer.NUMERICLESSTHAN + NumericLessThanPath = ASLLexer.NUMERICLESSTHANPATH + NumericLessThanEquals = ASLLexer.NUMERICLESSTHANEQUALS + NumericLessThanEqualsPath = ASLLexer.NUMERICLESSTHANEQUALSPATH + StringEquals = ASLLexer.STRINGEQUALS + StringEqualsPath = ASLLexer.STRINGEQUALSPATH + StringGreaterThan = ASLLexer.STRINGGREATERTHAN + StringGreaterThanPath = ASLLexer.STRINGGREATERTHANPATH + StringGreaterThanEquals = ASLLexer.STRINGGREATERTHANEQUALS + StringGreaterThanEqualsPath = ASLLexer.STRINGGREATERTHANEQUALSPATH + StringLessThan = ASLLexer.STRINGLESSTHAN + StringLessThanPath = ASLLexer.STRINGLESSTHANPATH + StringLessThanEquals = ASLLexer.STRINGLESSTHANEQUALS + StringLessThanEqualsPath = ASLLexer.STRINGLESSTHANEQUALSPATH + StringMatches = ASLLexer.STRINGMATCHES + TimestampEquals = ASLLexer.TIMESTAMPEQUALS + TimestampEqualsPath = ASLLexer.TIMESTAMPEQUALSPATH + TimestampGreaterThan = ASLLexer.TIMESTAMPGREATERTHAN + TimestampGreaterThanPath = ASLLexer.TIMESTAMPGREATERTHANPATH + TimestampGreaterThanEquals = ASLLexer.TIMESTAMPGREATERTHANEQUALS + TimestampGreaterThanEqualsPath = ASLLexer.TIMESTAMPGREATERTHANEQUALSPATH + TimestampLessThan = ASLLexer.TIMESTAMPLESSTHAN + TimestampLessThanPath = ASLLexer.TIMESTAMPLESSTHANPATH + TimestampLessThanEquals = ASLLexer.TIMESTAMPLESSTHANEQUALS + TimestampLessThanEqualsPath = ASLLexer.TIMESTAMPLESSTHANEQUALSPATH diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_type.py new file mode 100644 index 0000000000000..e1989a3cc5593 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_type.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from abc import ABC + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent + + +class Comparison(EvalComponent, ABC): ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_variable.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_variable.py new file mode 100644 index 0000000000000..724fc5de32850 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/comparison_variable.py @@ -0,0 +1,27 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_func import ( + ComparisonFunc, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.variable import ( + Variable, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ComparisonVariable(Comparison): + variable: Final[Variable] + comparison_function: Final[ComparisonFunc] + + def __init__(self, variable: Variable, func: ComparisonFunc): + self.variable = variable + self.comparison_function = func + + def _eval_body(self, env: Environment) -> None: + variable: Variable = self.variable + variable.eval(env) + comparison_function: ComparisonFunc = self.comparison_function + comparison_function.eval(env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/factory.py new file mode 100644 index 0000000000000..6bad8fb919c45 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/factory.py @@ -0,0 +1,20 @@ +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.boolean_equals import * # noqa +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.is_operator import * # noqa +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.numeric import * # noqa +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.string_operators import * # noqa +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.timestamp_operators import * # noqa +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) + + +class OperatorFactory: + @staticmethod + def get(typ: ComparisonOperatorType) -> Operator: + op = Operator.get((str(typ)), raise_if_missing=False) + if op is None: + raise NotImplementedError(f"{typ} is not supported.") + return op diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/boolean_equals.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/boolean_equals.py new file mode 100644 index 0000000000000..8461ada2c1627 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/boolean_equals.py @@ -0,0 +1,44 @@ +from typing import Any + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.json_path import extract_json + + +class BooleanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.BooleanEquals) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = False + if isinstance(variable, bool): + res = variable is value + env.stack.append(res) + + +class BooleanEqualsPath(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.BooleanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + + inp = env.stack[-1] + comp_value: bool = extract_json(value, inp) + if not isinstance(comp_value, bool): + raise TypeError(f"Expected type bool, but got '{comp_value}' from path '{value}'.") + + res = False + if isinstance(variable, bool): + res = bool(variable) is comp_value + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/is_operator.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/is_operator.py new file mode 100644 index 0000000000000..e998ae1a50a0c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/is_operator.py @@ -0,0 +1,110 @@ +import datetime +import logging +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.variable import ( + NoSuchVariable, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class IsBoolean(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsBoolean) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = isinstance(variable, bool) is value + env.stack.append(res) + + +class IsNull(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsNull) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + is_null = variable is None and not isinstance(variable, NoSuchVariable) + res = is_null is value + env.stack.append(res) + + +class IsNumeric(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsNumeric) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = (isinstance(variable, (int, float)) and not isinstance(variable, bool)) is value + env.stack.append(res) + + +class IsPresent(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsPresent) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = isinstance(variable, NoSuchVariable) is not value + env.stack.append(res) + + +class IsString(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsString) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = isinstance(variable, str) is value + env.stack.append(res) + + +class IsTimestamp(Operator): + # Timestamps are strings which MUST conform to the RFC3339 profile of ISO 8601, with the further restrictions that + # an uppercase "T" character MUST be used to separate date and time, and an uppercase "Z" character MUST be + # present in the absence of a numeric time zone offset, for example "2016-03-14T01:59:00Z". + TIMESTAMP_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%SZ" + + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.IsTimestamp) + + @staticmethod + def string_to_timestamp(string: str) -> Optional[datetime.datetime]: + try: + return datetime.datetime.strptime(string, IsTimestamp.TIMESTAMP_FORMAT) + except Exception: + return None + + @staticmethod + def is_timestamp(inp: Any) -> bool: + return isinstance(inp, str) and IsTimestamp.string_to_timestamp(inp) is not None + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + LOG.warning( + "State Choice's 'IsTimestamp' operator is not fully supported for input '%s' and target '%s'.", + variable, + value, + ) + res = IsTimestamp.is_timestamp(variable) is value + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/numeric.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/numeric.py new file mode 100644 index 0000000000000..aee40d4d623a0 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/numeric.py @@ -0,0 +1,179 @@ +from typing import Any + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.json_path import extract_json + + +def _is_numeric(variable: Any) -> bool: + return isinstance(variable, (int, float)) and not isinstance(variable, bool) + + +class NumericEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if _is_numeric(variable): + res = variable == comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = NumericEquals._compare(variable, value) + env.stack.append(res) + + +class NumericEqualsPath(NumericEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = NumericEquals._compare(variable, comp_value) + env.stack.append(res) + + +class NumericGreaterThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericGreaterThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if _is_numeric(variable): + res = variable > comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = NumericGreaterThan._compare(variable, value) + env.stack.append(res) + + +class NumericGreaterThanPath(NumericGreaterThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericGreaterThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = NumericGreaterThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class NumericGreaterThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericGreaterThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if _is_numeric(variable): + res = variable >= comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = NumericGreaterThanEquals._compare(variable, value) + env.stack.append(res) + + +class NumericGreaterThanEqualsPath(NumericGreaterThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericGreaterThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = NumericGreaterThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class NumericLessThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericLessThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if _is_numeric(variable): + res = variable < comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = NumericLessThan._compare(variable, value) + env.stack.append(res) + + +class NumericLessThanPath(NumericLessThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericLessThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = NumericLessThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class NumericLessThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericLessThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if _is_numeric(variable): + res = variable <= comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = NumericLessThanEquals._compare(variable, value) + env.stack.append(res) + + +class NumericLessThanEqualsPath(NumericLessThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.NumericLessThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = NumericLessThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/string_operators.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/string_operators.py new file mode 100644 index 0000000000000..e2fdfa714324f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/string_operators.py @@ -0,0 +1,195 @@ +import fnmatch +from typing import Any + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.json_path import extract_json + + +class StringEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = variable == comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringEquals._compare(variable, value) + env.stack.append(res) + + +class StringEqualsPath(StringEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = StringEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class StringGreaterThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringGreaterThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = variable > comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringGreaterThan._compare(variable, value) + env.stack.append(res) + + +class StringGreaterThanPath(StringGreaterThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringGreaterThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = StringGreaterThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class StringGreaterThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringGreaterThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = variable >= comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringGreaterThanEquals._compare(variable, value) + env.stack.append(res) + + +class StringGreaterThanEqualsPath(StringGreaterThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringGreaterThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = StringGreaterThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class StringLessThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringLessThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = variable < comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringLessThan._compare(variable, value) + env.stack.append(res) + + +class StringLessThanPath(StringLessThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringLessThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = StringLessThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class StringLessThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringLessThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = variable <= comparison_value + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringLessThanEquals._compare(variable, value) + env.stack.append(res) + + +class StringLessThanEqualsPath(StringLessThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringLessThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = StringLessThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class StringMatches(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.StringMatches) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + res = fnmatch.fnmatch(variable, comparison_value) + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = StringMatches._compare(variable, value) + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/timestamp_operators.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/timestamp_operators.py new file mode 100644 index 0000000000000..d1b9b57fc2c2e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/implementations/timestamp_operators.py @@ -0,0 +1,193 @@ +from typing import Any + +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.implementations.is_operator import ( + IsTimestamp, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.operator.operator import ( + Operator, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.json_path import extract_json + + +class TimestampEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + a = IsTimestamp.string_to_timestamp(variable) + if a is not None: + b = IsTimestamp.string_to_timestamp(comparison_value) + res = a == b + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = TimestampEquals._compare(variable, value) + env.stack.append(res) + + +class TimestampEqualsPath(TimestampEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = TimestampEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class TimestampGreaterThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampGreaterThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + a = IsTimestamp.string_to_timestamp(variable) + if a is not None: + b = IsTimestamp.string_to_timestamp(comparison_value) + res = a > b + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = TimestampGreaterThan._compare(variable, value) + env.stack.append(res) + + +class TimestampGreaterThanPath(TimestampGreaterThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampGreaterThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = TimestampGreaterThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class TimestampGreaterThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampGreaterThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + a = IsTimestamp.string_to_timestamp(variable) + if a is not None: + b = IsTimestamp.string_to_timestamp(comparison_value) + res = a >= b + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = TimestampGreaterThanEquals._compare(variable, value) + env.stack.append(res) + + +class TimestampGreaterThanEqualsPath(TimestampGreaterThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampGreaterThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = TimestampGreaterThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) + + +class TimestampLessThan(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampLessThan) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + a = IsTimestamp.string_to_timestamp(variable) + if a is not None: + b = IsTimestamp.string_to_timestamp(comparison_value) + res = a < b + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = TimestampLessThan._compare(variable, value) + env.stack.append(res) + + +class TimestampLessThanPath(TimestampLessThan): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampLessThanPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = TimestampLessThanPath._compare(variable, comp_value) + env.stack.append(res) + + +class TimestampLessThanEquals(Operator): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampLessThanEquals) + + @staticmethod + def _compare(variable: Any, comparison_value: Any) -> bool: + res = False + if isinstance(variable, str): + a = IsTimestamp.string_to_timestamp(variable) + if a is not None: + b = IsTimestamp.string_to_timestamp(comparison_value) + res = a <= b + return res + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + res = TimestampLessThanEquals._compare(variable, value) + env.stack.append(res) + + +class TimestampLessThanEqualsPath(TimestampLessThanEquals): + @staticmethod + def impl_name() -> str: + return str(ComparisonOperatorType.TimestampLessThanEqualsPath) + + @staticmethod + def eval(env: Environment, value: Any) -> None: + variable = env.stack.pop() + inp = env.stack[-1] + comp_value = extract_json(value, inp) + res = TimestampLessThanEqualsPath._compare(variable, comp_value) + env.stack.append(res) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/operator.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/operator.py new file mode 100644 index 0000000000000..56c4867fdc6dd --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/operator/operator.py @@ -0,0 +1,12 @@ +import abc +from typing import Any + +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.utils.objects import SubtypesInstanceManager + + +class Operator(abc.ABC, SubtypesInstanceManager): + @staticmethod + @abc.abstractmethod + def eval(env: Environment, value: Any) -> None: + pass diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/variable.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/variable.py new file mode 100644 index 0000000000000..ca49a2bf3bae4 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/comparison/variable.py @@ -0,0 +1,27 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class NoSuchVariable: + def __init__(self, path: str): + self.path: Final[str] = path + + +class Variable(EvalComponent): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _eval_body(self, env: Environment) -> None: + try: + self.string_sampler.eval(env=env) + value = env.stack.pop() + except Exception as ex: + value = NoSuchVariable(f"{self.string_sampler.literal_value}, {ex}") + env.stack.append(value) diff --git a/localstack/services/stepfunctions/asl/component/state/state_choice/default_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/default_decl.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_choice/default_decl.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/default_decl.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/state_choice.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/state_choice.py new file mode 100644 index 0000000000000..99d21029a3fc3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_choice/state_choice.py @@ -0,0 +1,78 @@ +from typing import Optional + +from localstack.aws.api.stepfunctions import HistoryEventType +from localstack.services.stepfunctions.asl.component.common.flow.end import End +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.component.state.state_choice.choices_decl import ( + ChoicesDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.default_decl import ( + DefaultDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StateChoice(CommonStateField): + choices_decl: ChoicesDecl + default_state: Optional[DefaultDecl] + + def __init__(self): + super(StateChoice, self).__init__( + state_entered_event_type=HistoryEventType.ChoiceStateEntered, + state_exited_event_type=HistoryEventType.ChoiceStateExited, + ) + self.default_state = None + self._next_state_name = None + + def from_state_props(self, state_props: StateProps) -> None: + super(StateChoice, self).from_state_props(state_props) + self.choices_decl = state_props.get(ChoicesDecl) + self.default_state = state_props.get(DefaultDecl) + + if state_props.get(Next) or state_props.get(End): + raise ValueError( + "Choice states don't support the End field. " + "In addition, they use Next only inside their Choices field. " + f"With state '{self}'." + ) + + def _set_next(self, env: Environment) -> None: + pass + + def _eval_state(self, env: Environment) -> None: + for rule in self.choices_decl.rules: + rule.eval(env) + res = env.stack.pop() + if res is True: + if not rule.next_stmt: + raise RuntimeError( + f"Missing Next definition for state_choice rule '{rule}' in choices '{self}'." + ) + env.stack.append(rule.next_stmt.name) + return + + if self.default_state is None: + raise RuntimeError("No branching option reached in state %s", self.name) + env.stack.append(self.default_state.state_name) + + def _eval_state_output(self, env: Environment) -> None: + next_state_name: str = env.stack.pop() + + # No choice rule matched: the default state is evaluated. + if self.default_state and self.default_state.state_name == next_state_name: + if self.assign_decl: + self.assign_decl.eval(env=env) + if self.output: + self.output.eval(env=env) + + # Handle legacy output sequences if in JsonPath mode. + if self._is_language_query_jsonpath(): + if self.output_path: + self.output_path.eval(env=env) + else: + current_output = env.stack.pop() + env.states.reset(input_value=current_output) + + env.next_state_name = next_state_name diff --git a/localstack/services/stepfunctions/asl/component/state/state_continue_with.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_continue_with.py similarity index 89% rename from localstack/services/stepfunctions/asl/component/state/state_continue_with.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_continue_with.py index c16b45b49a322..4253f10074126 100644 --- a/localstack/services/stepfunctions/asl/component/state/state_continue_with.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_continue_with.py @@ -3,8 +3,7 @@ from localstack.services.stepfunctions.asl.component.common.flow.next import Next -class ContinueWith(abc.ABC): - ... +class ContinueWith(abc.ABC): ... class ContinueWithEnd(ContinueWith): diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/execute_state.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/execute_state.py new file mode 100644 index 0000000000000..c32150cb3eb12 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/execute_state.py @@ -0,0 +1,277 @@ +import abc +import copy +import logging +import threading +from threading import Thread +from typing import Any, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.catch.catch_decl import CatchDecl +from localstack.services.stepfunctions.asl.component.common.catch.catch_outcome import CatchOutcome +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.common.result_selector import ResultSelector +from localstack.services.stepfunctions.asl.component.common.retry.retry_decl import RetryDecl +from localstack.services.stepfunctions.asl.component.common.retry.retry_outcome import RetryOutcome +from localstack.services.stepfunctions.asl.component.common.timeouts.heartbeat import ( + Heartbeat, + HeartbeatSeconds, +) +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import ( + EvalTimeoutError, + Timeout, + TimeoutSeconds, +) +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.utils.common import TMP_THREADS + +LOG = logging.getLogger(__name__) + + +class ExecutionState(CommonStateField, abc.ABC): + def __init__( + self, + state_entered_event_type: HistoryEventType, + state_exited_event_type: Optional[HistoryEventType], + ): + super().__init__( + state_entered_event_type=state_entered_event_type, + state_exited_event_type=state_exited_event_type, + ) + # ResultPath (Optional) + # Specifies where (in the input) to place the results of executing the state_task that's specified in Resource. + # The input is then filtered as specified by the OutputPath field (if present) before being used as the + # state's output. + self.result_path: Optional[ResultPath] = None + + # ResultSelector (Optional) + # Pass a collection of key value pairs, where the values are static or selected from the result. + self.result_selector: Optional[ResultSelector] = None + + # Retry (Optional) + # An array of objects, called Retriers, that define a retry policy if the state encounters runtime errors. + self.retry: Optional[RetryDecl] = None + + # Catch (Optional) + # An array of objects, called Catchers, that define a fallback state. This state is executed if the state + # encounters runtime errors and its retry policy is exhausted or isn't defined. + self.catch: Optional[CatchDecl] = None + + # TimeoutSeconds (Optional) + # If the state_task runs longer than the specified seconds, this state fails with a States.Timeout error name. + # Must be a positive, non-zero integer. If not provided, the default value is 99999999. The count begins after + # the state_task has been started, for example, when ActivityStarted or LambdaFunctionStarted are logged in the + # Execution event history. + # TimeoutSecondsPath (Optional) + # If you want to provide a timeout value dynamically from the state input using a reference path, use + # TimeoutSecondsPath. When resolved, the reference path must select fields whose values are positive integers. + # A Task state cannot include both TimeoutSeconds and TimeoutSecondsPath + # TimeoutSeconds and TimeoutSecondsPath fields are encoded by the timeout type. + self.timeout: Timeout = TimeoutSeconds( + timeout_seconds=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS + ) + + # HeartbeatSeconds (Optional) + # If more time than the specified seconds elapses between heartbeats from the task, this state fails with a + # States.Timeout error name. Must be a positive, non-zero integer less than the number of seconds specified in + # the TimeoutSeconds field. If not provided, the default value is 99999999. For Activities, the count begins + # when GetActivityTask receives a token and ActivityStarted is logged in the Execution event history. + # HeartbeatSecondsPath (Optional) + # If you want to provide a heartbeat value dynamically from the state input using a reference path, use + # HeartbeatSecondsPath. When resolved, the reference path must select fields whose values are positive integers. + # A Task state cannot include both HeartbeatSeconds and HeartbeatSecondsPath + # HeartbeatSeconds and HeartbeatSecondsPath fields are encoded by the Heartbeat type. + self.heartbeat: Optional[Heartbeat] = None + + def from_state_props(self, state_props: StateProps) -> None: + super().from_state_props(state_props=state_props) + self.result_path = state_props.get(ResultPath) or ResultPath( + result_path_src=ResultPath.DEFAULT_PATH + ) + self.result_selector = state_props.get(ResultSelector) + self.retry = state_props.get(RetryDecl) + self.catch = state_props.get(CatchDecl) + + # If provided, the "HeartbeatSeconds" interval MUST be smaller than the "TimeoutSeconds" value. + # If not provided, the default value of "TimeoutSeconds" is 60. + timeout = state_props.get(Timeout) + heartbeat = state_props.get(Heartbeat) + if isinstance(timeout, TimeoutSeconds) and isinstance(heartbeat, HeartbeatSeconds): + if timeout.timeout_seconds <= heartbeat.heartbeat_seconds: + raise RuntimeError( + f"'HeartbeatSeconds' interval MUST be smaller than the 'TimeoutSeconds' value, " + f"got '{timeout.timeout_seconds}' and '{heartbeat.heartbeat_seconds}' respectively." + ) + if heartbeat is not None and timeout is None: + timeout = TimeoutSeconds(timeout_seconds=60, is_default=True) + + if timeout is not None: + self.timeout = timeout + if heartbeat is not None: + self.heartbeat = heartbeat + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, FailureEventException): + return ex.failure_event + LOG.warning( + "State Task encountered an unhandled exception that lead to a State.Runtime error." + ) + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), + cause=str(ex), + ) + ), + ) + + @abc.abstractmethod + def _eval_execution(self, env: Environment) -> None: ... + + def _handle_retry(self, env: Environment, failure_event: FailureEvent) -> RetryOutcome: + env.stack.append(failure_event.error_name) + self.retry.eval(env) + res: RetryOutcome = env.stack.pop() + if res == RetryOutcome.CanRetry: + retry_count = env.states.context_object.context_object_data["State"]["RetryCount"] + env.states.context_object.context_object_data["State"]["RetryCount"] = retry_count + 1 + return res + + def _handle_catch(self, env: Environment, failure_event: FailureEvent) -> None: + env.stack.append(failure_event) + self.catch.eval(env) + + def _handle_uncaught(self, env: Environment, failure_event: FailureEvent) -> None: + self._terminate_with_event(env=env, failure_event=failure_event) + + @staticmethod + def _terminate_with_event(env: Environment, failure_event: FailureEvent) -> None: + raise FailureEventException(failure_event=failure_event) + + def _evaluate_with_timeout(self, env: Environment) -> None: + self.timeout.eval(env=env) + timeout_seconds: int = env.stack.pop() + + frame: Environment = env.open_frame() + frame.states.reset(input_value=env.states.get_input()) + frame.stack = copy.deepcopy(env.stack) + execution_outputs: list[Any] = list() + execution_exceptions: list[Optional[Exception]] = [None] + terminated_event = threading.Event() + + def _exec_and_notify(): + try: + self._eval_execution(frame) + execution_outputs.extend(frame.stack) + except Exception as ex: + execution_exceptions.append(ex) + terminated_event.set() + + thread = Thread(target=_exec_and_notify, daemon=True) + TMP_THREADS.append(thread) + thread.start() + + finished_on_time: bool = terminated_event.wait(timeout_seconds) + frame.set_ended() + env.close_frame(frame) + + execution_exception = execution_exceptions.pop() + if execution_exception: + raise execution_exception + + if not finished_on_time: + raise EvalTimeoutError() + + execution_output = execution_outputs.pop() + env.stack.append(execution_output) + + if not self._is_language_query_jsonpath(): + env.states.set_result(execution_output) + + if self.assign_decl: + self.assign_decl.eval(env=env) + + if self.result_selector: + self.result_selector.eval(env=env) + + if self.result_path: + self.result_path.eval(env) + else: + res = env.stack.pop() + env.states.reset(input_value=res) + + @staticmethod + def _construct_error_output_value(failure_event: FailureEvent) -> Any: + specs_event_details = list(failure_event.event_details.values()) + if ( + len(specs_event_details) != 1 + and "error" in specs_event_details + and "cause" in specs_event_details + ): + raise RuntimeError( + f"Internal Error: invalid event details declaration in FailureEvent: '{failure_event}'." + ) + spec_event_details: dict = list(failure_event.event_details.values())[0] + return { + # If no cause or error fields are given, AWS binds an empty string; otherwise it attaches the value. + "Error": spec_event_details.get("error", ""), + "Cause": spec_event_details.get("cause", ""), + } + + def _eval_state(self, env: Environment) -> None: + # Initialise the retry counter for execution states. + env.states.context_object.context_object_data["State"]["RetryCount"] = 0 + + # Attempt to evaluate the state's logic through until it's successful, caught, or retries have run out. + while env.is_running(): + try: + self._evaluate_with_timeout(env) + break + except Exception as ex: + failure_event: FailureEvent = self._from_error(env=env, ex=ex) + env.event_manager.add_event( + context=env.event_history_context, + event_type=failure_event.event_type, + event_details=failure_event.event_details, + ) + error_output = self._construct_error_output_value(failure_event=failure_event) + env.states.set_error_output(error_output) + env.states.set_result(error_output) + + if self.retry: + retry_outcome: RetryOutcome = self._handle_retry( + env=env, failure_event=failure_event + ) + if retry_outcome == RetryOutcome.CanRetry: + continue + + if self.catch: + self._handle_catch(env=env, failure_event=failure_event) + catch_outcome: CatchOutcome = env.stack[-1] + if catch_outcome == CatchOutcome.Caught: + break + + self._handle_uncaught(env=env, failure_event=failure_event) + + def _eval_state_output(self, env: Environment) -> None: + # Obtain a reference to the state output. + output = env.stack[-1] + # CatcherOutputs (i.e. outputs of Catch blocks) are never subjects of output normalisers, + # the entire value is instead passed by value as input to the next state, or program output. + if not isinstance(output, CatchOutcome): + super()._eval_state_output(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/execution_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/execution_type.py new file mode 100644 index 0000000000000..bc4a90718dc65 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/execution_type.py @@ -0,0 +1,7 @@ +from enum import Enum + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer + + +class ExecutionType(Enum): + Standard = ASLLexer.STANDARD diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/item_reader_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/item_reader_decl.py new file mode 100644 index 0000000000000..ed8e325034c56 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/item_reader_decl.py @@ -0,0 +1,74 @@ +import copy +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.reader_config_decl import ( + ReaderConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_eval_factory import ( + resource_eval_for, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer import ( + ResourceOutputTransformer, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer_factory import ( + resource_output_transformer_for, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ItemReader(EvalComponent): + resource_eval: Final[ResourceEval] + parargs: Final[Optional[Parargs]] + reader_config: Final[Optional[ReaderConfig]] + resource_output_transformer: Optional[ResourceOutputTransformer] + + def __init__( + self, + resource: Resource, + parargs: Optional[Parargs], + reader_config: Optional[ReaderConfig], + ): + self.resource_eval = resource_eval_for(resource=resource) + self.parargs = parargs + self.reader_config = reader_config + + self.resource_output_transformer = None + if self.reader_config: + self.resource_output_transformer = resource_output_transformer_for( + input_type=self.reader_config.input_type + ) + + @property + def resource(self): + return self.resource_eval.resource + + def __str__(self): + class_dict = copy.deepcopy(self.__dict__) + del class_dict["resource_eval"] + class_dict["resource"] = self.resource + return f"({self.__class__.__name__}| {class_dict})" + + def _eval_body(self, env: Environment) -> None: + resource_config = None + if self.reader_config: + self.reader_config.eval(env=env) + resource_config = env.stack.pop() + + if self.parargs: + self.parargs.eval(env=env) + else: + env.stack.append(dict()) + + self.resource_eval.eval_resource(env=env) + + if self.reader_config: + env.stack.append(resource_config) + self.resource_output_transformer.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_header_location.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_header_location.py new file mode 100644 index 0000000000000..38f32316f35bf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_header_location.py @@ -0,0 +1,18 @@ +import enum +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class CSVHeaderLocationValue(enum.Enum): + FIRST_ROW = "FIRST_ROW" + GIVEN = "GIVEN" + + +class CSVHeaderLocation(Component): + csv_header_location_value: Final[CSVHeaderLocationValue] + + def __init__(self, csv_header_location_value: str): + self.csv_header_location_value = CSVHeaderLocationValue( + csv_header_location_value + ) # Pass error upstream. diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_headers.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_headers.py new file mode 100644 index 0000000000000..1f6c61fadd150 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/csv_headers.py @@ -0,0 +1,10 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class CSVHeaders(Component): + header_names: Final[list[str]] + + def __init__(self, header_names: list[str]): + self.header_names = header_names diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/input_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/input_type.py new file mode 100644 index 0000000000000..bfe4806ddcead --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/input_type.py @@ -0,0 +1,27 @@ +import enum +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class InputTypeValue(enum.Enum): + """ + Represents the supported InputType values for ItemReader configurations. + """ + + # TODO: add support for MANIFEST InputTypeValue. + CSV = "CSV" + JSON = "JSON" + + +class InputType(Component): + """ + "InputType" Specifies the type of Amazon S3 data source, such as CSV file, object, JSON file, or an + Amazon S3 inventory list. In Workflow Studio, you can select an input type from the Amazon S3 item + source dropdown list under the Item source field. + """ + + input_type_value: Final[InputTypeValue] + + def __init__(self, input_type: str): + self.input_type_value = InputTypeValue(input_type) # Pass error upstream. diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/max_items_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/max_items_decl.py new file mode 100644 index 0000000000000..6c2e109d75f76 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/max_items_decl.py @@ -0,0 +1,130 @@ +import abc +from typing import Final + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class MaxItemsDecl(EvalComponent, abc.ABC): + """ + "MaxItems": Limits the number of data items passed to the Map state. For example, suppose that you provide a + CSV file that contains 1000 rows and specify a limit of 100. Then, the interpreter passes only 100 rows to the + Map state. The Map state processes items in sequential order, starting after the header row. + Currently, you can specify a limit of up to 100,000,000 + """ + + MAX_VALUE: Final[int] = 100_000_000 + + def _clip_value(self, value: int) -> int: + if value == 0: + return self.MAX_VALUE + return min(value, self.MAX_VALUE) + + @abc.abstractmethod + def _get_value(self, env: Environment) -> int: ... + + def _eval_body(self, env: Environment) -> None: + max_items: int = self._get_value(env=env) + max_items = self._clip_value(max_items) + env.stack.append(max_items) + + +class MaxItemsInt(MaxItemsDecl): + max_items: Final[int] + + def __init__(self, max_items: int = MaxItemsDecl.MAX_VALUE): + if max_items < 0 or max_items > MaxItemsInt.MAX_VALUE: + raise ValueError( + f"MaxItems value MUST be a non-negative integer " + f"non greater than '{MaxItemsInt.MAX_VALUE}', got '{max_items}'." + ) + self.max_items = max_items + + def _get_value(self, env: Environment) -> int: + return self.max_items + + +class MaxItemsStringJSONata(MaxItemsDecl): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _get_value(self, env: Environment) -> int: + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + self.string_jsonata.eval(env=env) + max_items: int = int(env.stack.pop()) + return max_items + + +class MaxItemsPath(MaxItemsDecl): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _validate_value(self, env: Environment, value: int) -> None: + if not isinstance(value, int): + # TODO: Note, this error appears to be validated at a earlier stage in AWS Step Functions, unlike the + # negative integer check that is validated at this exact depth. + error_typ = StatesErrorNameType.StatesItemReaderFailed + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=error_typ), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=error_typ.to_name(), + cause=( + f"The MaxItemsPath field refers to value '{value}' " + f"which is not a valid integer: {self.string_sampler.literal_value}" + ), + ) + ), + ) + ) + if value < 0: + error_typ = StatesErrorNameType.StatesItemReaderFailed + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=error_typ), + event_type=HistoryEventType.MapRunFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=error_typ.to_name(), + cause="field MaxItems must be positive", + ) + ), + ) + ) + + def _get_value(self, env: Environment) -> int: + self.string_sampler.eval(env=env) + max_items = env.stack.pop() + if isinstance(max_items, str): + try: + max_items = int(max_items) + except Exception: + # Pass incorrect type forward for validation and error reporting + pass + self._validate_value(env=env, value=max_items) + return max_items diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_decl.py new file mode 100644 index 0000000000000..fff888b474b5a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_decl.py @@ -0,0 +1,76 @@ +from typing import Final, Optional, TypedDict + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.csv_header_location import ( + CSVHeaderLocation, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.csv_headers import ( + CSVHeaders, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.input_type import ( + InputType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.max_items_decl import ( + MaxItemsDecl, + MaxItemsInt, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class InputTypeOutput(str): + CSV = "CSV" + JSON = "JSON" + + +class CSVHeaderLocationOutput(str): + FIRST_ROW = "FIRST_ROW" + GIVEN = "GIVEN" + + +CSVHeadersOutput = list[str] +MaxItemsValueOutput = int + + +class ReaderConfigOutput(TypedDict): + InputType: InputTypeOutput + CSVHeaderLocation: CSVHeaderLocationOutput + CSVHeaders: Optional[CSVHeadersOutput] + MaxItemsValue: MaxItemsValueOutput + + +class ReaderConfig(EvalComponent): + input_type: Final[InputType] + max_items_decl: Final[MaxItemsDecl] + csv_header_location: Final[CSVHeaderLocation] + csv_headers: Optional[CSVHeaders] + + def __init__( + self, + input_type: InputType, + csv_header_location: CSVHeaderLocation, + csv_headers: Optional[CSVHeaders], + max_items_decl: Optional[MaxItemsDecl], + ): + self.input_type = input_type + self.max_items_decl = max_items_decl or MaxItemsInt() + self.csv_header_location = csv_header_location + self.csv_headers = csv_headers + # TODO: verify behaviours: + # - csv fields are declared with json input type + # - headers are declared with first_fow location set + + def _eval_body(self, env: Environment) -> None: + self.max_items_decl.eval(env=env) + max_items_value: int = env.stack.pop() + + reader_config_output = ReaderConfigOutput( + InputType=InputTypeOutput(self.input_type.input_type_value), + MaxItemsValue=max_items_value, + ) + if self.csv_header_location: + reader_config_output["CSVHeaderLocation"] = ( + self.csv_header_location.csv_header_location_value.value + ) + if self.csv_headers: + reader_config_output["CSVHeaders"] = self.csv_headers.header_names + env.stack.append(reader_config_output) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_props.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_props.py new file mode 100644 index 0000000000000..f2ec88c87bba6 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/reader_config/reader_config_props.py @@ -0,0 +1,23 @@ +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.max_items_decl import ( + MaxItemsDecl, +) +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + + +class ReaderConfigProps(TypedProps): + _UNIQUE_SUB_INSTANCES: Final[set[type]] = {MaxItemsDecl} + name: str + + def add(self, instance: Any) -> None: + inst_type = type(instance) + + # Subclasses + for typ in self._UNIQUE_SUB_INSTANCES: + if issubclass(inst_type, typ): + super()._add(typ, instance) + return + + # Base and delegate to preprocessor. + super().add(instance) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval.py new file mode 100644 index 0000000000000..3de89f49f0c2a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval.py @@ -0,0 +1,16 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ServiceResource, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ResourceEval(abc.ABC): + resource: Final[ServiceResource] + + def __init__(self, resource: ServiceResource): + self.resource = resource + + def eval_resource(self, env: Environment) -> None: ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_factory.py new file mode 100644 index 0000000000000..59edc54d125cf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_factory.py @@ -0,0 +1,20 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_eval_s3 import ( + ResourceEvalS3, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, + ServiceResource, +) + + +def resource_eval_for(resource: Resource) -> ResourceEval: + if isinstance(resource, ServiceResource): + match resource.service_name: + case "s3": + return ResourceEvalS3(resource=resource) + raise ValueError( + f"ItemReader's Resource fields must be states service resource, instead got '{resource.resource_arn}'." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_s3.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_s3.py new file mode 100644 index 0000000000000..262c4f00ca540 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_eval_s3.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import Callable, Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.utils.strings import camel_to_snake_case, to_str + + +class ResourceEvalS3(ResourceEval): + _HANDLER_REFLECTION_PREFIX: Final[str] = "_handle_" + _API_ACTION_HANDLER_TYPE = Callable[[Environment, ResourceRuntimePart, StateCredentials], None] + + @staticmethod + def _get_s3_client( + resource_runtime_part: ResourceRuntimePart, state_credentials: StateCredentials + ): + return boto_client_for( + region=resource_runtime_part.region, service="s3", state_credentials=state_credentials + ) + + @staticmethod + def _handle_get_object( + env: Environment, + resource_runtime_part: ResourceRuntimePart, + state_credentials: StateCredentials, + ) -> None: + s3_client = ResourceEvalS3._get_s3_client( + resource_runtime_part=resource_runtime_part, state_credentials=state_credentials + ) + parameters = env.stack.pop() + response = s3_client.get_object(**parameters) # noqa + content = to_str(response["Body"].read()) + env.stack.append(content) + + @staticmethod + def _handle_list_objects_v2( + env: Environment, + resource_runtime_part: ResourceRuntimePart, + state_credentials: StateCredentials, + ) -> None: + s3_client = ResourceEvalS3._get_s3_client( + resource_runtime_part=resource_runtime_part, state_credentials=state_credentials + ) + parameters = env.stack.pop() + response = s3_client.list_objects_v2(**parameters) # noqa + contents = response["Contents"] + env.stack.append(contents) + + def _get_api_action_handler(self) -> ResourceEvalS3._API_ACTION_HANDLER_TYPE: + api_action = camel_to_snake_case(self.resource.api_action).strip() + handler_name = ResourceEvalS3._HANDLER_REFLECTION_PREFIX + api_action + resolver_handler = getattr(self, handler_name) + if resolver_handler is None: + raise ValueError(f"Unknown s3 action '{api_action}'.") + return resolver_handler + + def eval_resource(self, env: Environment) -> None: + self.resource.eval(env=env) + resource_runtime_part: ResourceRuntimePart = env.stack.pop() + resolver_handler = self._get_api_action_handler() + state_credentials = StateCredentials(role_arn=env.aws_execution_details.role_arn) + resolver_handler(env, resource_runtime_part, state_credentials) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer.py new file mode 100644 index 0000000000000..5a97d63aebe57 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer.py @@ -0,0 +1,6 @@ +import abc + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent + + +class ResourceOutputTransformer(EvalComponent, abc.ABC): ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_csv.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_csv.py new file mode 100644 index 0000000000000..065aacdbc56fc --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_csv.py @@ -0,0 +1,75 @@ +import csv +import io +import itertools +from collections import OrderedDict + +from localstack.aws.api.stepfunctions import HistoryEventType, MapRunFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.reader_config_decl import ( + CSVHeaderLocationOutput, + ReaderConfigOutput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer import ( + ResourceOutputTransformer, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class ResourceOutputTransformerCSV(ResourceOutputTransformer): + def _eval_body(self, env: Environment) -> None: + reader_config: ReaderConfigOutput = env.stack.pop() + resource_value: str = env.stack.pop() + + csv_file = io.StringIO(resource_value) + csv_reader = csv.reader(csv_file) + + max_items: int = reader_config["MaxItemsValue"] + csv_reader_slice = itertools.islice(csv_reader, max_items) + + match reader_config["CSVHeaderLocation"]: + case CSVHeaderLocationOutput.FIRST_ROW: + headers = next(csv_reader) + case CSVHeaderLocationOutput.GIVEN: + headers = reader_config["CSVHeaders"] + case unknown: + raise ValueError(f"Unknown CSVHeaderLocation value '{unknown}'.") + + if len(set(headers)) < len(headers): + error_name = StatesErrorName(typ=StatesErrorNameType.StatesItemReaderFailed) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + mapRunFailedEventDetails=MapRunFailedEventDetails( + error=error_name.error_name, + cause="CSV headers cannot contain duplicates.", + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + + transformed_outputs = list() + for row in csv_reader_slice: + transformed_output = dict() + for i, header in enumerate(headers): + transformed_output[header] = row[i] if i < len(row) else "" + transformed_outputs.append( + OrderedDict( + sorted( + transformed_output.items(), key=lambda item: (item[0].isalpha(), item[0]) + ) + ) + ) + + env.stack.append(transformed_outputs) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_factory.py new file mode 100644 index 0000000000000..f26021ccd4e71 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_factory.py @@ -0,0 +1,23 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.input_type import ( + InputType, + InputTypeValue, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer import ( + ResourceOutputTransformer, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer_csv import ( + ResourceOutputTransformerCSV, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer_json import ( + ResourceOutputTransformerJson, +) + + +def resource_output_transformer_for(input_type: InputType) -> ResourceOutputTransformer: + match input_type.input_type_value: + case InputTypeValue.CSV: + return ResourceOutputTransformerCSV() + case InputTypeValue.JSON: + return ResourceOutputTransformerJson() + case unknown: + raise ValueError(f"Unknown InputType value: '{unknown}'.") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_json.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_json.py new file mode 100644 index 0000000000000..02769e8f5a6e0 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_reader/resource_eval/resource_output_transformer/resource_output_transformer_json.py @@ -0,0 +1,48 @@ +import json + +from localstack.aws.api.stepfunctions import HistoryEventType, MapRunFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.reader_config_decl import ( + ReaderConfigOutput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.resource_eval.resource_output_transformer.resource_output_transformer import ( + ResourceOutputTransformer, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class ResourceOutputTransformerJson(ResourceOutputTransformer): + def _eval_body(self, env: Environment) -> None: + reader_config: ReaderConfigOutput = env.stack.pop() + resource_value: str = env.stack.pop() + + json_list = json.loads(resource_value) + + if not isinstance(json_list, list): + error_name = StatesErrorName(typ=StatesErrorNameType.StatesItemReaderFailed) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + mapRunFailedEventDetails=MapRunFailedEventDetails( + error=error_name.error_name, + cause="Attempting to map over non-iterable node.", + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + + max_items = reader_config["MaxItemsValue"] + json_list_slice = json_list[:max_items] + env.stack.append(json_list_slice) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_selector.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_selector.py new file mode 100644 index 0000000000000..a096c004270c8 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/item_selector.py @@ -0,0 +1,17 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value_object import ( + AssignTemplateValueObject, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ItemSelector(EvalComponent): + template_value_object: Final[AssignTemplateValueObject] + + def __init__(self, template_value_object: AssignTemplateValueObject): + self.template_value_object = template_value_object + + def _eval_body(self, env: Environment) -> None: + self.template_value_object.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/items/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/items/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/items/items.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/items/items.py new file mode 100644 index 0000000000000..79aa25edb2988 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/items/items.py @@ -0,0 +1,90 @@ +import abc +from typing import Final + +from localstack.aws.api.stepfunctions import ( + EvaluationFailedEventDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value_array import ( + JSONataTemplateValueArray, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + + +class Items(EvalComponent, abc.ABC): ... + + +class ItemsArray(Items): + jsonata_template_value_array: Final[JSONataTemplateValueArray] + + def __init__(self, jsonata_template_value_array: JSONataTemplateValueArray): + super().__init__() + self.jsonata_template_value_array = jsonata_template_value_array + + def _eval_body(self, env: Environment) -> None: + self.jsonata_template_value_array.eval(env=env) + + +class ItemsJSONata(Items): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + self.string_jsonata = string_jsonata + + def _eval_body(self, env: Environment) -> None: + self.string_jsonata.eval(env=env) + items = env.stack[-1] + if not isinstance(items, list): + # FIXME: If we pass in a 'function' type, the JSONata lib will return a dict and the + # 'unsupported result type state' wont be reached. + def _get_jsonata_value_type_pair(items) -> tuple[str, str]: + match items: + case None: + return "null", "null" + case int() | float(): + if isinstance(items, bool): + return "true" if items else "false", "boolean" + return items, "number" + case str(): + return f'"{items}"', "string" + case dict(): + return to_json_str(items, separators=(",", ":")), "object" + + expr = self.string_jsonata.literal_value + if jsonata_pair := _get_jsonata_value_type_pair(items): + jsonata_value, jsonata_type = jsonata_pair + error_cause = ( + f"The JSONata expression '{expr}' specified for the field 'Items' returned an unexpected result type. " + f"Expected 'array', but was '{jsonata_type}' for value: {jsonata_value}" + ) + else: + error_cause = f"The JSONata expression '{expr}' for the field 'Items' returned an unsupported result type." + + error_name = StatesErrorName(typ=StatesErrorNameType.StatesQueryEvaluationError) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=EvaluationFailedEventDetails( + error=error_name.error_name, cause=error_cause, location="Items" + ) + ), + ) + raise FailureEventException(failure_event=failure_event) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/distributed_iteration_component.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/distributed_iteration_component.py new file mode 100644 index 0000000000000..841a9db4f453a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/distributed_iteration_component.py @@ -0,0 +1,190 @@ +from __future__ import annotations + +import abc +import json +from typing import Any, Final, Optional + +from localstack.aws.api.stepfunctions import ( + HistoryEventType, + MapRunFailedEventDetails, + MapRunStartedEventDetails, + MapRunStatus, +) +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.item_reader_decl import ( + ItemReader, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.inline_iteration_component import ( + InlineIterationComponent, + InlineIterationComponentEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecord, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobClosed, + JobPool, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.max_concurrency import ( + DEFAULT_MAX_CONCURRENCY_VALUE, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.event.event_manager import ( + EventManager, +) + + +class DistributedIterationComponentEvalInput(InlineIterationComponentEvalInput): + item_reader: Final[Optional[ItemReader]] + label: Final[Optional[str]] + map_run_record: Final[MapRunRecord] + + def __init__( + self, + state_name: str, + max_concurrency: int, + input_items: list[json], + parameters: Optional[Parameters], + item_selector: Optional[ItemSelector], + item_reader: Optional[ItemReader], + tolerated_failure_count: int, + tolerated_failure_percentage: float, + label: Optional[str], + map_run_record: MapRunRecord, + ): + super().__init__( + state_name=state_name, + max_concurrency=max_concurrency, + input_items=input_items, + parameters=parameters, + item_selector=item_selector, + ) + self.item_reader = item_reader + self.tolerated_failure_count = tolerated_failure_count + self.tolerated_failure_percentage = tolerated_failure_percentage + self.label = label + self.map_run_record = map_run_record + + +class DistributedIterationComponent(InlineIterationComponent, abc.ABC): + def __init__( + self, + query_language: QueryLanguage, + start_at: StartAt, + states: States, + comment: Comment, + processor_config: ProcessorConfig, + ): + super().__init__( + query_language=query_language, + start_at=start_at, + states=states, + comment=comment, + processor_config=processor_config, + ) + + def _map_run( + self, env: Environment, eval_input: DistributedIterationComponentEvalInput + ) -> None: + input_items: list[json] = env.stack.pop() + + input_item_program: Final[Program] = self._get_iteration_program() + job_pool = JobPool(job_program=input_item_program, job_inputs=input_items) + + # TODO: add watch on map_run_record update event and adjust the number of running workers accordingly. + max_concurrency = eval_input.map_run_record.max_concurrency + workers_number = ( + len(input_items) + if max_concurrency == DEFAULT_MAX_CONCURRENCY_VALUE + else max_concurrency + ) + for _ in range(workers_number): + self._launch_worker(env=env, eval_input=eval_input, job_pool=job_pool) + + job_pool.await_jobs() + + worker_exception: Optional[Exception] = job_pool.get_worker_exception() + if worker_exception is not None: + raise worker_exception + + closed_jobs: list[JobClosed] = job_pool.get_closed_jobs() + outputs: list[Any] = [closed_job.job_output for closed_job in closed_jobs] + + env.stack.append(outputs) + + def _eval_body(self, env: Environment) -> None: + eval_input: DistributedIterationComponentEvalInput = env.stack.pop() + map_run_record = eval_input.map_run_record + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapRunStarted, + event_details=EventDetails( + mapRunStartedEventDetails=MapRunStartedEventDetails( + mapRunArn=map_run_record.map_run_arn + ) + ), + ) + + parent_event_manager = env.event_manager + try: + if eval_input.item_reader: + eval_input.item_reader.eval(env=env) + else: + env.stack.append(eval_input.input_items) + + env.event_manager = EventManager() + self._map_run(env=env, eval_input=eval_input) + + except FailureEventException as failure_event_ex: + map_run_fail_event_detail = MapRunFailedEventDetails() + + maybe_error_cause_pair = failure_event_ex.extract_error_cause_pair() + if maybe_error_cause_pair: + error, cause = maybe_error_cause_pair + if error: + map_run_fail_event_detail["error"] = error + if cause: + map_run_fail_event_detail["cause"] = cause + + env.event_manager = parent_event_manager + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapRunFailed, + event_details=EventDetails(mapRunFailedEventDetails=map_run_fail_event_detail), + ) + map_run_record.set_stop(status=MapRunStatus.FAILED) + raise failure_event_ex + + except Exception as ex: + env.event_manager = parent_event_manager + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapRunFailed, + event_details=EventDetails(mapRunFailedEventDetails=MapRunFailedEventDetails()), + ) + map_run_record.set_stop(status=MapRunStatus.FAILED) + raise ex + finally: + env.event_manager = parent_event_manager + + # TODO: review workflow of program stops and map run stops + env.event_manager.add_event( + context=env.event_history_context, event_type=HistoryEventType.MapRunSucceeded + ) + map_run_record.set_stop(status=MapRunStatus.SUCCEEDED) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/inline_iteration_component.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/inline_iteration_component.py new file mode 100644 index 0000000000000..3eb020678142c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/inline_iteration_component.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +import abc +import json +import threading +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_component import ( + IterationComponent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_worker import ( + IterationWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobClosed, + JobPool, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.max_concurrency import ( + DEFAULT_MAX_CONCURRENCY_VALUE, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.utils.threads import TMP_THREADS + + +class InlineIterationComponentEvalInput: + state_name: Final[str] + max_concurrency: Final[int] + input_items: Final[list[json]] + parameters: Final[Optional[Parameters]] + item_selector: Final[Optional[ItemSelector]] + + def __init__( + self, + state_name: str, + max_concurrency: int, + input_items: list[json], + parameters: Optional[Parameters], + item_selector: Optional[ItemSelector], + ): + self.state_name = state_name + self.max_concurrency = max_concurrency + self.input_items = input_items + self.parameters = parameters + self.item_selector = item_selector + + +class InlineIterationComponent(IterationComponent, abc.ABC): + _processor_config: Final[ProcessorConfig] + + def __init__( + self, + query_language: QueryLanguage, + start_at: StartAt, + states: States, + processor_config: ProcessorConfig, + comment: Optional[Comment], + ): + super().__init__( + query_language=query_language, start_at=start_at, states=states, comment=comment + ) + self._processor_config = processor_config + + @abc.abstractmethod + def _create_worker( + self, env: Environment, eval_input: InlineIterationComponentEvalInput, job_pool: JobPool + ) -> IterationWorker: ... + + def _launch_worker( + self, env: Environment, eval_input: InlineIterationComponentEvalInput, job_pool: JobPool + ) -> IterationWorker: + worker = self._create_worker(env=env, eval_input=eval_input, job_pool=job_pool) + worker_thread = threading.Thread(target=worker.eval, daemon=True) + TMP_THREADS.append(worker_thread) + worker_thread.start() + return worker + + def _eval_body(self, env: Environment) -> None: + eval_input = env.stack.pop() + + max_concurrency: int = eval_input.max_concurrency + input_items: list[json] = eval_input.input_items + + input_item_program: Final[Program] = self._get_iteration_program() + job_pool = JobPool(job_program=input_item_program, job_inputs=eval_input.input_items) + + number_of_workers = ( + len(input_items) + if max_concurrency == DEFAULT_MAX_CONCURRENCY_VALUE + else max_concurrency + ) + for _ in range(number_of_workers): + self._launch_worker(env=env, eval_input=eval_input, job_pool=job_pool) + + job_pool.await_jobs() + + worker_exception: Optional[Exception] = job_pool.get_worker_exception() + if worker_exception is not None: + raise worker_exception + + closed_jobs: list[JobClosed] = job_pool.get_closed_jobs() + outputs: list[Any] = [closed_job.job_output for closed_job in closed_jobs] + + env.stack.append(outputs) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor.py new file mode 100644 index 0000000000000..bd669394c8e04 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.distributed_iteration_component import ( + DistributedIterationComponent, + DistributedIterationComponentEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.distributed_item_processor_worker import ( + DistributedItemProcessorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + + +class DistributedItemProcessorEvalInput(DistributedIterationComponentEvalInput): + pass + + +class DistributedItemProcessor(DistributedIterationComponent): + @classmethod + def from_props(cls, props: TypedProps) -> DistributedItemProcessor: + item_processor = cls( + query_language=props.get(QueryLanguage) or QueryLanguage(), + start_at=props.get( + typ=StartAt, + raise_on_missing=ValueError(f"Missing StartAt declaration in props '{props}'."), + ), + states=props.get( + typ=States, + raise_on_missing=ValueError(f"Missing States declaration in props '{props}'."), + ), + comment=props.get(Comment), + processor_config=props.get(ProcessorConfig) or ProcessorConfig(), + ) + return item_processor + + def _create_worker( + self, env: Environment, eval_input: DistributedItemProcessorEvalInput, job_pool: JobPool + ) -> DistributedItemProcessorWorker: + return DistributedItemProcessorWorker( + work_name=eval_input.state_name, + job_pool=job_pool, + env=env, + item_reader=eval_input.item_reader, + parameters=eval_input.parameters, + item_selector=eval_input.item_selector, + map_run_record=eval_input.map_run_record, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor_worker.py new file mode 100644 index 0000000000000..bde4c49bdf073 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/distributed_item_processor_worker.py @@ -0,0 +1,156 @@ +import logging +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import EvalTimeoutError +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.item_reader_decl import ( + ItemReader, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.inline_item_processor_worker import ( + InlineItemProcessorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecord, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + Job, + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramError, + ProgramState, + ProgramStopped, +) +from localstack.services.stepfunctions.asl.eval.states import ItemData, MapData +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +LOG = logging.getLogger(__name__) + + +class DistributedItemProcessorWorker(InlineItemProcessorWorker): + _item_reader: Final[ItemReader] + _map_run_record: MapRunRecord + + def __init__( + self, + work_name: str, + job_pool: JobPool, + env: Environment, + item_reader: ItemReader, + parameters: Optional[Parameters], + item_selector: Optional[ItemSelector], + map_run_record: MapRunRecord, + ): + super().__init__( + work_name=work_name, + job_pool=job_pool, + env=env, + parameters=parameters, + item_selector=item_selector, + ) + self._item_reader = item_reader + self._map_run_record = map_run_record + + def _eval_job(self, env: Environment, job: Job) -> None: + self._map_run_record.item_counter.total.count() + self._map_run_record.item_counter.running.count() + + self._map_run_record.execution_counter.total.count() + self._map_run_record.execution_counter.running.count() + + job_output = None + try: + env.states.context_object.context_object_data["Map"] = MapData( + Item=ItemData(Index=job.job_index, Value=job.job_input) + ) + + env.states.reset(job.job_input) + env.stack.append(env.states.get_input()) + self._eval_input(env_frame=env) + + job.job_program.eval(env) + + # TODO: verify behaviour with all of these scenarios. + end_program_state: ProgramState = env.program_state() + if isinstance(end_program_state, ProgramError): + self._map_run_record.execution_counter.failed.count() + self._map_run_record.item_counter.failed.count() + job_output = None + elif isinstance(end_program_state, ProgramStopped): + self._map_run_record.execution_counter.aborted.count() + self._map_run_record.item_counter.aborted.count() + else: + self._map_run_record.item_counter.succeeded.count() + self._map_run_record.item_counter.results_written.count() + + self._map_run_record.execution_counter.succeeded.count() + self._map_run_record.execution_counter.results_written.count() + self._map_run_record.execution_counter.running.offset(-1) + + job_output = env.states.get_input() + + except EvalTimeoutError as timeout_error: + LOG.debug( + "MapRun worker Timeout Error '%s' for input '%s'.", + timeout_error, + to_json_str(job.job_input), + ) + self._map_run_record.item_counter.timed_out.count() + + except FailureEventException as failure_event_ex: + LOG.debug( + "MapRun worker Event Exception '%s' for input '%s'.", + to_json_str(failure_event_ex.failure_event), + to_json_str(job.job_input), + ) + self._map_run_record.item_counter.failed.count() + + except Exception as exception: + LOG.debug( + "MapRun worker Error '%s' for input '%s'.", + exception, + to_json_str(job.job_input), + ) + self._map_run_record.item_counter.failed.count() + + finally: + self._map_run_record.item_counter.running.offset(-1) + job.job_output = job_output + + def _eval_pool(self, job: Optional[Job], worker_frame: Environment) -> None: + if job is None: + self._env.delete_frame(worker_frame) + return + + # Evaluate the job. + job_frame = worker_frame.open_inner_frame() + self._eval_job(env=job_frame, job=job) + worker_frame.delete_frame(job_frame) + + # Evaluation terminates here due to exception in job, or worker was stopped. + if isinstance(job.job_output, Exception) or self.stopped(): + self._env.delete_frame(worker_frame) + self._job_pool.close_job(job) + return + + next_job: Job = self._job_pool.next_job() + # Iteration will terminate after this job. + if next_job is None: + # The frame has to be closed before the job, to ensure the owner environment is correctly updated + # before the evaluation continues; map states await for job termination not workers termination. + self._env.delete_frame(worker_frame) + self._job_pool.close_job(job) + return + + self._job_pool.close_job(job) + self._eval_pool(job=next_job, worker_frame=worker_frame) + + def eval(self) -> None: + self._eval_pool(job=self._job_pool.next_job(), worker_frame=self._env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor.py new file mode 100644 index 0000000000000..8b1d4012ddf5c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +import logging + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.inline_iteration_component import ( + InlineIterationComponent, + InlineIterationComponentEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.inline_item_processor_worker import ( + InlineItemProcessorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + +LOG = logging.getLogger(__name__) + + +class InlineItemProcessorEvalInput(InlineIterationComponentEvalInput): + pass + + +class InlineItemProcessor(InlineIterationComponent): + @classmethod + def from_props(cls, props: TypedProps) -> InlineItemProcessor: + if not props.get(States): + raise ValueError(f"Missing States declaration in props '{props}'.") + if not props.get(StartAt): + raise ValueError(f"Missing StartAt declaration in props '{props}'.") + item_processor = cls( + query_language=props.get(QueryLanguage) or QueryLanguage(), + start_at=props.get(StartAt), + states=props.get(States), + comment=props.get(Comment), + processor_config=props.get(ProcessorConfig) or ProcessorConfig(), + ) + return item_processor + + def _create_worker( + self, env: Environment, eval_input: InlineItemProcessorEvalInput, job_pool: JobPool + ) -> InlineItemProcessorWorker: + return InlineItemProcessorWorker( + work_name=eval_input.state_name, + job_pool=job_pool, + env=env, + item_selector=eval_input.item_selector, + parameters=eval_input.parameters, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor_worker.py new file mode 100644 index 0000000000000..2562108ebac80 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/inline_item_processor_worker.py @@ -0,0 +1,49 @@ +import logging +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_worker import ( + IterationWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class InlineItemProcessorWorker(IterationWorker): + _parameters: Final[Optional[Parameters]] + _item_selector: Final[Optional[ItemSelector]] + + def __init__( + self, + work_name: str, + job_pool: JobPool, + env: Environment, + item_selector: Optional[ItemSelector], + parameters: Optional[Parameters], + ): + super().__init__(work_name=work_name, job_pool=job_pool, env=env) + self._item_selector = item_selector + self._parameters = parameters + + def _eval_input(self, env_frame: Environment) -> None: + if not self._parameters and not self._item_selector: + return + + map_state_input = self._env.stack[-1] + env_frame.states.reset(input_value=map_state_input) + env_frame.stack.append(map_state_input) + + if self._item_selector: + self._item_selector.eval(env_frame) + elif self._parameters: + self._parameters.eval(env_frame) + + output_value = env_frame.stack[-1] + env_frame.states.reset(input_value=output_value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_decl.py new file mode 100644 index 0000000000000..5f9131cb12191 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_decl.py @@ -0,0 +1,7 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_declaration import ( + IterationDecl, +) + + +class ItemProcessorDecl(IterationDecl): + pass diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_factory.py new file mode 100644 index 0000000000000..b633903959be7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/item_processor_factory.py @@ -0,0 +1,37 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.distributed_item_processor import ( + DistributedItemProcessor, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.inline_item_processor import ( + InlineItemProcessor, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.item_processor_decl import ( + ItemProcessorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_component import ( + IterationComponent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.mode import ( + Mode, +) + + +def from_item_processor_decl(item_processor_decl: ItemProcessorDecl) -> IterationComponent: + match item_processor_decl.processor_config.mode: + case Mode.Inline: + return InlineItemProcessor( + query_language=item_processor_decl.query_language, + start_at=item_processor_decl.start_at, + states=item_processor_decl.states, + comment=item_processor_decl.comment, + processor_config=item_processor_decl.processor_config, + ) + case Mode.Distributed: + return DistributedItemProcessor( + query_language=item_processor_decl.query_language, + start_at=item_processor_decl.start_at, + states=item_processor_decl.states, + comment=item_processor_decl.comment, + processor_config=item_processor_decl.processor_config, + ) + case unknown: + raise ValueError(f"Unknown Map state processing mode: '{unknown}'.") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/map_run_record.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/map_run_record.py new file mode 100644 index 0000000000000..52599e7abf489 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/map_run_record.py @@ -0,0 +1,205 @@ +import abc +import datetime +import threading +from collections import OrderedDict +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + Arn, + DescribeMapRunOutput, + LongArn, + MapRunExecutionCounts, + MapRunItemCounts, + MapRunListItem, + MapRunStatus, + Timestamp, +) +from localstack.utils.strings import long_uid + + +class Counter: + _mutex: Final[threading.Lock] + _count: int + + def __init__(self): + self._mutex = threading.Lock() + self._count = 0 + + def offset(self, offset: int) -> None: + with self._mutex: + self._count = self._count + offset + + def count(self, increment: int = 1) -> None: + with self._mutex: + self._count += increment + + def get(self) -> int: + return self._count + + +class ProgressCounter(abc.ABC): + aborted: Final[Counter] + failed: Final[Counter] + pending: Final[Counter] + results_written: Final[Counter] + running: Final[Counter] + succeeded: Final[Counter] + timed_out: Final[Counter] + total: Final[Counter] + + def __init__(self): + self.aborted = Counter() + self.failed = Counter() + self.pending = Counter() + self.results_written = Counter() + self.running = Counter() + self.succeeded = Counter() + self.timed_out = Counter() + self.total = Counter() + + +class ExecutionCounter(ProgressCounter): + def describe(self) -> MapRunExecutionCounts: + return MapRunExecutionCounts( + aborted=self.aborted.get(), + failed=self.failed.get(), + pending=self.pending.get(), + resultsWritten=self.results_written.get(), + running=self.running.get(), + succeeded=self.succeeded.get(), + timedOut=self.timed_out.get(), + total=self.total.get(), + ) + + +class ItemCounter(ProgressCounter): + def describe(self) -> MapRunItemCounts: + return MapRunItemCounts( + aborted=self.aborted.get(), + failed=self.failed.get(), + pending=self.pending.get(), + resultsWritten=self.results_written.get(), + running=self.running.get(), + succeeded=self.succeeded.get(), + timedOut=self.timed_out.get(), + total=self.total.get(), + ) + + +class MapRunRecord: + update_event: Final[threading.Event] + map_state_machine_arn: Final[ + LongArn + ] # This is the original state machine arn plut the map run arn postfix. + execution_arn: Final[Arn] + map_run_arn: Final[LongArn] + max_concurrency: int + execution_counter: Final[ExecutionCounter] + item_counter: Final[ItemCounter] + start_date: Timestamp + status: MapRunStatus + stop_date: Optional[Timestamp] + # TODO: add support for failure toleration fields. + tolerated_failure_count: int + tolerated_failure_percentage: float + + def __init__( + self, + state_machine_arn: Arn, + execution_arn: Arn, + max_concurrency: int, + tolerated_failure_count: int, + tolerated_failure_percentage: float, + label: Optional[str], + ): + self.update_event = threading.Event() + ( + map_state_machine_arn, + map_run_arn, + ) = self._generate_map_run_arns(state_machine_arn=state_machine_arn, label=label) + self.map_run_arn = map_run_arn + self.map_state_machine_arn = map_state_machine_arn + self.execution_arn = execution_arn + self.max_concurrency = max_concurrency + self.execution_counter = ExecutionCounter() + self.item_counter = ItemCounter() + self.start_date = datetime.datetime.now(tz=datetime.timezone.utc) + self.status = MapRunStatus.RUNNING + self.stop_date = None + self.tolerated_failure_count = tolerated_failure_count + self.tolerated_failure_percentage = tolerated_failure_percentage + + @staticmethod + def _generate_map_run_arns( + state_machine_arn: Arn, label: Optional[str] + ) -> tuple[LongArn, LongArn]: + # Generate a new MapRunArn given the StateMachineArn, such that: + # inp: arn:aws:states::111111111111:stateMachine: + # MRA: arn:aws:states::111111111111:mapRun:/: + # SMA: arn:aws:states::111111111111:mapRun:/ + map_run_arn = state_machine_arn.replace(":stateMachine:", ":mapRun:") + part_1 = long_uid() if label is None else label + map_run_arn = f"{map_run_arn}/{part_1}:{long_uid()}" + return f"{state_machine_arn}/{part_1}", map_run_arn + + def set_stop(self, status: MapRunStatus): + self.status = status + self.stop_date = datetime.datetime.now(tz=datetime.timezone.utc) + + def describe(self) -> DescribeMapRunOutput: + describe_output = DescribeMapRunOutput( + mapRunArn=self.map_run_arn, + executionArn=self.execution_arn, + status=self.status, + startDate=self.start_date, + maxConcurrency=self.max_concurrency, + toleratedFailurePercentage=self.tolerated_failure_percentage, + toleratedFailureCount=self.tolerated_failure_count, + itemCounts=self.item_counter.describe(), + executionCounts=self.execution_counter.describe(), + ) + stop_date = self.stop_date + if stop_date is not None: + describe_output["stopDate"] = self.stop_date + return describe_output + + def list_item(self) -> MapRunListItem: + list_item = MapRunListItem( + executionArn=self.execution_arn, + mapRunArn=self.map_run_arn, + stateMachineArn=self.map_state_machine_arn, + startDate=self.start_date, + ) + if self.stop_date: + list_item["stopDate"] = self.stop_date + return list_item + + def update( + self, + max_concurrency: Optional[int], + tolerated_failure_count: Optional[int], + tolerated_failure_percentage: Optional[float], + ) -> None: + if max_concurrency is not None: + self.max_concurrency = max_concurrency + if tolerated_failure_count is not None: + self.tolerated_failure_count = tolerated_failure_count + if tolerated_failure_percentage is not None: + self.tolerated_failure_percentage = tolerated_failure_percentage + self.update_event.set() + + +class MapRunRecordPoolManager: + _pool: dict[LongArn, MapRunRecord] + + def __init__(self): + self._pool = OrderedDict() + + def add(self, map_run_record: MapRunRecord) -> None: + self._pool[map_run_record.map_run_arn] = map_run_record + + def get(self, map_run_arn: LongArn) -> Optional[MapRunRecord]: + return self._pool.get(map_run_arn) + + def get_all(self) -> list[MapRunRecord]: + return list(self._pool.values()) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/processor_config.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/processor_config.py new file mode 100644 index 0000000000000..76e804b42cf67 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/itemprocessor/processor_config.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.execution_type import ( + ExecutionType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.mode import ( + Mode, +) + + +class ProcessorConfig(Component): + DEFAULT_MODE: Final[Mode] = Mode.Inline + DEFAULT_EXECUTION_TYPE: Final[ExecutionType] = ExecutionType.Standard + + mode: Final[Mode] + execution_type: Final[ExecutionType] + + def __init__( + self, mode: Mode = DEFAULT_MODE, execution_type: ExecutionType = DEFAULT_EXECUTION_TYPE + ): + super().__init__() + self.mode = mode + self.execution_type = execution_type diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_component.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_component.py new file mode 100644 index 0000000000000..92e1be15ccd64 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_component.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import abc +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.component.program.states import States + + +class IterationComponent(EvalComponent, abc.ABC): + # Ensure no member variables are used to keep track of the state of + # iteration components: the evaluation must be stateless as for all + # EvalComponents to ensure they can be reused or used concurrently. + _query_language: Final[QueryLanguage] + _start_at: Final[StartAt] + _states: Final[States] + _comment: Final[Optional[Comment]] + + def __init__( + self, + query_language: QueryLanguage, + start_at: StartAt, + states: States, + comment: Optional[Comment], + ): + self._query_language = query_language + self._start_at = start_at + self._states = states + self._comment = comment + + def _get_iteration_program(self) -> Program: + return Program( + query_language=self._query_language, + start_at=self._start_at, + states=self._states, + timeout_seconds=None, + comment=self._comment, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_declaration.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_declaration.py new file mode 100644 index 0000000000000..b26b87ec1437e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_declaration.py @@ -0,0 +1,32 @@ +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.component import Component +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) + + +class IterationDecl(Component): + comment: Final[Optional[Comment]] + query_language: Final[QueryLanguage] + start_at: Final[StartAt] + states: Final[States] + processor_config: Final[ProcessorConfig] + + def __init__( + self, + comment: Optional[Comment], + query_language: QueryLanguage, + start_at: StartAt, + states: States, + processor_config: ProcessorConfig, + ): + self.comment = comment + self.query_language = query_language + self.start_at = start_at + self.states = states + self.processor_config = processor_config diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_worker.py new file mode 100644 index 0000000000000..1603149ca0b57 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iteration_worker.py @@ -0,0 +1,205 @@ +import abc +import logging +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, MapIterationEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + Job, + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramError, + ProgramState, + ProgramStopped, +) +from localstack.services.stepfunctions.asl.eval.states import ItemData, MapData + +LOG = logging.getLogger(__name__) + + +class IterationWorker(abc.ABC): + _work_name: Final[str] + _job_pool: Final[JobPool] + _env: Final[Environment] + _stop_signal_received: bool + + def __init__( + self, + work_name: str, + job_pool: JobPool, + env: Environment, + ): + self._work_name = work_name + self._job_pool = job_pool + self._env = env + self._stop_signal_received = False + + def sig_stop(self): + self._stop_signal_received = True + + def stopped(self): + return self._stop_signal_received + + @abc.abstractmethod + def _eval_input(self, env_frame: Environment) -> None: ... + + def _eval_job(self, env: Environment, job: Job) -> None: + map_iteration_event_details = MapIterationEventDetails( + name=self._work_name, index=job.job_index + ) + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapIterationStarted, + event_details=EventDetails(mapIterationStartedEventDetails=map_iteration_event_details), + ) + + job_output = RuntimeError( + f"Unexpected Runtime Error in ItemProcessor worker for input '{job.job_index}'." + ) + try: + env.states.context_object.context_object_data["Map"] = MapData( + Item=ItemData(Index=job.job_index, Value=job.job_input) + ) + + env.states.reset(input_value=job.job_input) + self._eval_input(env_frame=env) + + job.job_program.eval(env) + + # Program evaluation suppressed runtime exceptions into an execution exception in the program state. + # Hence, here the routine extract this error triggering FailureEventExceptions, to allow the error at this + # depth to be logged appropriately and propagate to parent states. + + # In case of internal error that lead to failure, then raise execution Exception + # and hence leading to a MapIterationFailed event. + end_program_state: ProgramState = env.program_state() + if isinstance(end_program_state, ProgramError): + error_name = end_program_state.error.get("error") + if error_name is not None: + error_name = CustomErrorName(error_name=error_name) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.MapIterationFailed, + event_details=EventDetails( + executionFailedEventDetails=end_program_state.error + ), + ) + ) + # If instead the program (parent state machine) was halted, then raise an execution Exception. + elif isinstance(end_program_state, ProgramStopped): + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=CustomErrorName(error_name=HistoryEventType.MapIterationAborted), + event_type=HistoryEventType.MapIterationAborted, + event_details=EventDetails( + executionFailedEventDetails=end_program_state.error + ), + ) + ) + + # Otherwise, execution succeeded and the output of this operation is available. + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapIterationSucceeded, + event_details=EventDetails( + mapIterationSucceededEventDetails=map_iteration_event_details + ), + update_source_event_id=False, + ) + # Extract the output otherwise. + job_output = env.states.get_input() + + except FailureEventException as failure_event_ex: + # Extract the output to be this exception: this will trigger a failure workflow in the jobs pool. + job_output = failure_event_ex + + # At this depth, the next event is either a MapIterationFailed (for any reasons) or a MapIterationAborted + # if explicitly indicated. + if failure_event_ex.failure_event.event_type == HistoryEventType.MapIterationAborted: + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapIterationAborted, + event_details=EventDetails( + mapIterationAbortedEventDetails=map_iteration_event_details + ), + update_source_event_id=False, + ) + else: + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapIterationFailed, + event_details=EventDetails( + mapIterationFailedEventDetails=map_iteration_event_details + ), + update_source_event_id=False, + ) + + except Exception as ex: + # Error case. + LOG.warning( + "Unhandled termination error in item processor worker for job '%s'.", + job.job_index, + ) + + # Pass the exception upstream leading to evaluation halt. + job_output = ex + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapIterationFailed, + event_details=EventDetails( + mapIterationFailedEventDetails=map_iteration_event_details + ), + update_source_event_id=False, + ) + + finally: + job.job_output = job_output + + def _eval_pool(self, job: Optional[Job], worker_frame: Environment) -> None: + # Note: the frame has to be closed before the job, to ensure the owner environment is correctly updated + # before the evaluation continues; map states await for job termination not workers termination. + if job is None: + self._env.close_frame(worker_frame) + return + + # Evaluate the job. + job_frame = worker_frame.open_inner_frame() + self._eval_job(env=job_frame, job=job) + worker_frame.close_frame(job_frame) + + # Evaluation terminates here due to exception in job, or worker was stopped. + if isinstance(job.job_output, Exception) or self.stopped(): + self._env.close_frame(worker_frame) + self._job_pool.close_job(job) + return + + next_job: Job = self._job_pool.next_job() + # Iteration will terminate after this job. + if next_job is None: + # Non-faulty terminal iteration update events are used as source of the following states. + worker_frame.event_history_context.source_event_id = ( + job_frame.event_history_context.last_published_event_id + ) + self._env.close_frame(worker_frame) + self._job_pool.close_job(job) + return + + self._job_pool.close_job(job) + self._eval_pool(job=next_job, worker_frame=worker_frame) + + def eval(self) -> None: + self._eval_pool(job=self._job_pool.next_job(), worker_frame=self._env.open_frame()) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator.py new file mode 100644 index 0000000000000..039007fc31229 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.distributed_iteration_component import ( + DistributedIterationComponent, + DistributedIterationComponentEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.distributed_iterator_worker import ( + DistributedIteratorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + + +class DistributedIteratorEvalInput(DistributedIterationComponentEvalInput): + pass + + +class DistributedIterator(DistributedIterationComponent): + @classmethod + def from_props(cls, props: TypedProps) -> DistributedIterator: + item_processor = cls( + query_language=props.get(QueryLanguage) or QueryLanguage(), + start_at=props.get( + typ=StartAt, + raise_on_missing=ValueError(f"Missing StartAt declaration in props '{props}'."), + ), + states=props.get( + typ=States, + raise_on_missing=ValueError(f"Missing States declaration in props '{props}'."), + ), + comment=props.get(Comment), + processor_config=props.get(ProcessorConfig), + ) + return item_processor + + def _create_worker( + self, env: Environment, eval_input: DistributedIteratorEvalInput, job_pool: JobPool + ) -> DistributedIteratorWorker: + return DistributedIteratorWorker( + work_name=eval_input.state_name, + job_pool=job_pool, + env=env, + parameters=eval_input.parameters, + map_run_record=eval_input.map_run_record, + item_selector=eval_input.item_selector, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator_worker.py new file mode 100644 index 0000000000000..583ab6e666473 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/distributed_iterator_worker.py @@ -0,0 +1,131 @@ +from typing import Optional + +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import EvalTimeoutError +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecord, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.inline_iterator_worker import ( + InlineIteratorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + Job, + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramError, + ProgramState, + ProgramStopped, +) +from localstack.services.stepfunctions.asl.eval.states import ItemData, MapData + + +class DistributedIteratorWorker(InlineIteratorWorker): + _map_run_record: MapRunRecord + + def __init__( + self, + work_name: str, + job_pool: JobPool, + env: Environment, + parameters: Optional[Parameters], + map_run_record: MapRunRecord, + item_selector: Optional[ItemSelector], + ): + super().__init__( + work_name=work_name, + job_pool=job_pool, + env=env, + parameters=parameters, + item_selector=item_selector, + ) + self._map_run_record = map_run_record + + def _eval_job(self, env: Environment, job: Job) -> None: + self._map_run_record.item_counter.total.count() + self._map_run_record.item_counter.running.count() + + self._map_run_record.execution_counter.total.count() + self._map_run_record.execution_counter.running.count() + + job_output = None + try: + env.states.context_object.context_object_data["Map"] = MapData( + Item=ItemData(Index=job.job_index, Value=job.job_input) + ) + + env.states.reset(input_value=job.job_input) + env.stack.append(env.states.get_input()) + self._eval_input(env_frame=env) + + job.job_program.eval(env) + + # TODO: verify behaviour with all of these scenarios. + end_program_state: ProgramState = env.program_state() + if isinstance(end_program_state, ProgramError): + self._map_run_record.execution_counter.failed.count() + self._map_run_record.item_counter.failed.count() + job_output = None + elif isinstance(end_program_state, ProgramStopped): + self._map_run_record.execution_counter.aborted.count() + self._map_run_record.item_counter.aborted.count() + else: + self._map_run_record.item_counter.succeeded.count() + self._map_run_record.item_counter.results_written.count() + + self._map_run_record.execution_counter.succeeded.count() + self._map_run_record.execution_counter.results_written.count() + self._map_run_record.execution_counter.running.offset(-1) + + job_output = env.states.get_input() + + except EvalTimeoutError: + self._map_run_record.item_counter.timed_out.count() + + except FailureEventException: + self._map_run_record.item_counter.failed.count() + + except Exception: + self._map_run_record.item_counter.failed.count() + + finally: + self._map_run_record.item_counter.running.offset(-1) + job.job_output = job_output + + def _eval_pool(self, job: Optional[Job], worker_frame: Environment) -> None: + # Note: the frame has to be closed before the job, to ensure the owner environment is correctly updated + # before the evaluation continues; map states await for job termination not workers termination. + if job is None: + self._env.delete_frame(worker_frame) + return + + # Evaluate the job. + job_frame = worker_frame.open_frame() + self._eval_job(env=job_frame, job=job) + worker_frame.delete_frame(job_frame) + + # Evaluation terminates here due to exception in job, or worker was stopped. + if isinstance(job.job_output, Exception) or self.stopped(): + self._env.delete_frame(worker_frame) + self._job_pool.close_job(job) + return + + next_job: Job = self._job_pool.next_job() + # Iteration will terminate after this job. + if next_job is None: + self._env.delete_frame(worker_frame) + self._job_pool.close_job(job) + return + + self._job_pool.close_job(job) + self._eval_pool(job=next_job, worker_frame=worker_frame) + + def eval(self) -> None: + self._eval_pool(job=self._job_pool.next_job(), worker_frame=self._env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator.py new file mode 100644 index 0000000000000..6100e412df44c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import logging +from typing import Optional + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.inline_iteration_component import ( + InlineIterationComponent, + InlineIterationComponentEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.inline_iterator_worker import ( + InlineIteratorWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.iterator_decl import ( + IteratorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class InlineIteratorEvalInput(InlineIterationComponentEvalInput): + pass + + +class InlineIterator(InlineIterationComponent): + _eval_input: Optional[InlineIteratorEvalInput] + + def _create_worker( + self, env: Environment, eval_input: InlineIteratorEvalInput, job_pool: JobPool + ) -> InlineIteratorWorker: + return InlineIteratorWorker( + work_name=eval_input.state_name, + job_pool=job_pool, + env=env, + parameters=eval_input.parameters, + item_selector=eval_input.item_selector, + ) + + @classmethod + def from_declaration(cls, iterator_decl: IteratorDecl): + return cls( + query_language=iterator_decl.query_language, + start_at=iterator_decl.start_at, + states=iterator_decl.states, + comment=iterator_decl.comment, + processor_config=iterator_decl.processor_config, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator_worker.py new file mode 100644 index 0000000000000..45db68a00e8b1 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/inline_iterator_worker.py @@ -0,0 +1,48 @@ +import logging +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_worker import ( + IterationWorker, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.job import ( + JobPool, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class InlineIteratorWorker(IterationWorker): + _parameters: Final[Optional[Parameters]] + _item_selector: Final[Optional[ItemSelector]] + + def __init__( + self, + work_name: str, + job_pool: JobPool, + env: Environment, + item_selector: Optional[ItemSelector], + parameters: Optional[Parameters], + ): + super().__init__(work_name=work_name, job_pool=job_pool, env=env) + self._item_selector = item_selector + self._parameters = parameters + + def _eval_input(self, env_frame: Environment) -> None: + if not self._parameters and not self._item_selector: + return + + map_state_input = self._env.stack[-1] + env_frame.states.reset(input_value=map_state_input) + env_frame.stack.append(env_frame.states.get_input()) + + if self._item_selector: + self._item_selector.eval(env_frame) + elif self._parameters: + self._parameters.eval(env_frame) + + env_frame.states.reset(input_value=env_frame.stack[-1]) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_decl.py new file mode 100644 index 0000000000000..c49bde9a40e64 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_decl.py @@ -0,0 +1,7 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_declaration import ( + IterationDecl, +) + + +class IteratorDecl(IterationDecl): + pass diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_factory.py new file mode 100644 index 0000000000000..287a82fce6c9b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/iterator/iterator_factory.py @@ -0,0 +1,37 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_component import ( + IterationComponent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.distributed_iterator import ( + DistributedIterator, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.inline_iterator import ( + InlineIterator, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.iterator_decl import ( + IteratorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.mode import ( + Mode, +) + + +def from_iterator_decl(iterator_decl: IteratorDecl) -> IterationComponent: + match iterator_decl.processor_config.mode: + case Mode.Inline: + return InlineIterator( + query_language=iterator_decl.query_language, + start_at=iterator_decl.start_at, + states=iterator_decl.states, + comment=iterator_decl.comment, + processor_config=iterator_decl.processor_config, + ) + case Mode.Distributed: + return DistributedIterator( + query_language=iterator_decl.query_language, + start_at=iterator_decl.start_at, + states=iterator_decl.states, + comment=iterator_decl.comment, + processor_config=iterator_decl.processor_config, + ) + case unknown: + raise ValueError(f"Unknown Map state processing mode: '{unknown}'.") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/job.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/job.py new file mode 100644 index 0000000000000..1ef24a6e17593 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/iteration/job.py @@ -0,0 +1,104 @@ +import copy +import logging +import threading +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +LOG = logging.getLogger(__name__) + + +class Job: + job_index: Final[int] + job_program: Final[Program] + job_input: Final[Optional[Any]] + job_output: Optional[Any] + + def __init__(self, job_index: int, job_program: Program, job_input: Optional[Any]): + self.job_index = job_index + self.job_program = job_program + self.job_input = job_input + self.job_output = None + + +class JobClosed: + job_index: Final[int] + job_output: Optional[Any] + + def __init__(self, job_index: int, job_output: Optional[Any]): + self.job_index = job_index + self.job_output = job_output + + def __hash__(self): + return hash(self.job_index) + + +class JobPool: + _mutex: Final[threading.Lock] + _termination_event: Final[threading.Event] + _worker_exception: Optional[Exception] + + _jobs_number: Final[int] + _open_jobs: Final[list[Job]] + _closed_jobs: Final[set[JobClosed]] + + def __init__(self, job_program: Program, job_inputs: list[Any]): + self._mutex = threading.Lock() + self._termination_event = threading.Event() + self._worker_exception = None + + self._jobs_number = len(job_inputs) + self._open_jobs = [ + Job(job_index=job_index, job_program=job_program, job_input=job_input) + for job_index, job_input in enumerate(job_inputs) + ] + self._open_jobs.reverse() + self._closed_jobs = set() + + def next_job(self) -> Optional[Any]: + with self._mutex: + if self._worker_exception is not None: + return None + try: + return self._open_jobs.pop() + except IndexError: + return None + + def _is_terminated(self) -> bool: + return len(self._closed_jobs) == self._jobs_number or self._worker_exception is not None + + def _notify_on_termination(self) -> None: + if self._is_terminated(): + self._termination_event.set() + + def get_worker_exception(self) -> Optional[Exception]: + return self._worker_exception + + def close_job(self, job: Job) -> None: + with self._mutex: + if self._is_terminated(): + return + + if job in self._closed_jobs: + LOG.warning( + "Duplicate execution of Job with index '%s' and input '%s'", + job.job_index, + to_json_str(job.job_input), + ) + + if isinstance(job.job_output, Exception): + self._worker_exception = job.job_output + else: + self._closed_jobs.add(JobClosed(job_index=job.job_index, job_output=job.job_output)) + + self._notify_on_termination() + + def get_closed_jobs(self) -> list[JobClosed]: + with self._mutex: + closed_jobs = copy.deepcopy(self._closed_jobs) + return sorted(closed_jobs, key=lambda closed_job: closed_job.job_index) + + def await_jobs(self) -> None: + if not self._is_terminated(): + self._termination_event.wait() diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/label.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/label.py new file mode 100644 index 0000000000000..3b9c2bce7be6a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/label.py @@ -0,0 +1,24 @@ +from itertools import chain +from typing import Final + +from localstack.services.stepfunctions.asl.component.component import Component + + +class Label(Component): + label: Final[str] + + def __init__(self, label: str): + self.label = label.encode().decode("unicode-escape") + + if len(self.label) == 0: + raise ValueError("Label cannot be empty") + + if len(self.label) > 40: + raise ValueError("Label cannot exceed 40 characters") + + for invalid_char in list(' ?*<>{}[]:;,\\|^~$#%&`"') + [ + chr(i) for i in chain(range(0x00, 0x20), range(0x7F, 0xA0)) + ]: + if invalid_char in self.label: + escaped_char = invalid_char.encode("unicode-escape").decode() + raise ValueError(f"Label contains invalid character: '{escaped_char}'") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/max_concurrency.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/max_concurrency.py new file mode 100644 index 0000000000000..2aa4de3920e1e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/max_concurrency.py @@ -0,0 +1,104 @@ +import abc +from typing import Final + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +DEFAULT_MAX_CONCURRENCY_VALUE: Final[int] = 0 # No limit. + + +class MaxConcurrencyDecl(EvalComponent, abc.ABC): + @abc.abstractmethod + def _eval_max_concurrency(self, env: Environment) -> int: ... + + def _eval_body(self, env: Environment) -> None: + max_concurrency_value = self._eval_max_concurrency(env=env) + env.stack.append(max_concurrency_value) + + +class MaxConcurrency(MaxConcurrencyDecl): + max_concurrency_value: Final[int] + + def __init__(self, num: int = DEFAULT_MAX_CONCURRENCY_VALUE): + super().__init__() + self.max_concurrency_value = num + + def _eval_max_concurrency(self, env: Environment) -> int: + return self.max_concurrency_value + + +class MaxConcurrencyJSONata(MaxConcurrencyDecl): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_max_concurrency(self, env: Environment) -> int: + self.string_jsonata.eval(env=env) + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + seconds = int(env.stack.pop()) + return seconds + + +class MaxConcurrencyPath(MaxConcurrency): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + super().__init__() + self.string_sampler = string_sampler + + def _eval_max_concurrency(self, env: Environment) -> int: + self.string_sampler.eval(env=env) + max_concurrency_value = env.stack.pop() + + if not isinstance(max_concurrency_value, int): + try: + max_concurrency_value = int(max_concurrency_value) + except Exception: + # Pass the wrong type forward. + pass + + error_cause = None + if not isinstance(max_concurrency_value, int): + value_str = ( + to_json_str(max_concurrency_value) + if not isinstance(max_concurrency_value, str) + else max_concurrency_value + ) + error_cause = f'The MaxConcurrencyPath field refers to value "{value_str}" which is not a valid integer: {self.string_sampler.literal_value}' + elif max_concurrency_value < 0: + error_cause = f"Expected non-negative integer for MaxConcurrency, got '{max_concurrency_value}' instead." + + if error_cause is not None: + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=error_cause + ) + ), + ) + ) + + return max_concurrency_value diff --git a/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py similarity index 79% rename from localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py index b9eb024ca1952..3baccf195fb74 100644 --- a/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/mode.py @@ -5,3 +5,4 @@ class Mode(Enum): Inline = ASLLexer.INLINE + Distributed = ASLLexer.DISTRIBUTED diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval.py new file mode 100644 index 0000000000000..3de89f49f0c2a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval.py @@ -0,0 +1,16 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ServiceResource, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ResourceEval(abc.ABC): + resource: Final[ServiceResource] + + def __init__(self, resource: ServiceResource): + self.resource = resource + + def eval_resource(self, env: Environment) -> None: ... diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_factory.py new file mode 100644 index 0000000000000..7695e3ee58bf3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_factory.py @@ -0,0 +1,20 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.resource_eval.resource_eval_s3 import ( + ResourceEvalS3, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, + ServiceResource, +) + + +def resource_eval_for(resource: Resource) -> ResourceEval: + if isinstance(resource, ServiceResource): + match resource.service_name: + case "s3": + return ResourceEvalS3(resource=resource) + raise ValueError( + f"ResultWriter's Resource fields must be states service resource, instead got '{resource.resource_arn}'." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_s3.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_s3.py new file mode 100644 index 0000000000000..178c9653c83c6 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/resource_eval/resource_eval_s3.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import json +from typing import Callable, Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.utils.strings import camel_to_snake_case + + +class ResourceEvalS3(ResourceEval): + _HANDLER_REFLECTION_PREFIX: Final[str] = "_handle_" + _API_ACTION_HANDLER_TYPE = Callable[[Environment, ResourceRuntimePart, StateCredentials], None] + + @staticmethod + def _get_s3_client( + resource_runtime_part: ResourceRuntimePart, state_credentials: StateCredentials + ): + return boto_client_for( + service="s3", region=resource_runtime_part.region, state_credentials=state_credentials + ) + + @staticmethod + def _handle_put_object( + env: Environment, + resource_runtime_part: ResourceRuntimePart, + state_credentials: StateCredentials, + ) -> None: + parameters = env.stack.pop() + env.stack.pop() # TODO: results + + s3_client = ResourceEvalS3._get_s3_client( + resource_runtime_part=resource_runtime_part, state_credentials=state_credentials + ) + map_run_record = env.map_run_record_pool_manager.get_all().pop() + map_run_uuid = map_run_record.map_run_arn.split(":")[-1] + if parameters["Prefix"] != "" and not parameters["Prefix"].endswith("/"): + parameters["Prefix"] += "/" + + # TODO: generate result files and upload them to s3. + body = { + "DestinationBucket": parameters["Bucket"], + "MapRunArn": map_run_record.map_run_arn, + "ResultFiles": {"FAILED": [], "PENDING": [], "SUCCEEDED": []}, + } + key = parameters["Prefix"] + map_run_uuid + "/manifest.json" + s3_client.put_object( + Bucket=parameters["Bucket"], Key=key, Body=json.dumps(body, indent=2).encode("utf8") + ) + env.stack.append( + { + "MapRunArn": map_run_record.map_run_arn, + "ResultWriterDetails": {"Bucket": parameters["Bucket"], "Key": key}, + } + ) + + def _get_api_action_handler(self) -> ResourceEvalS3._API_ACTION_HANDLER_TYPE: + api_action = camel_to_snake_case(self.resource.api_action).strip() + handler_name = ResourceEvalS3._HANDLER_REFLECTION_PREFIX + api_action + resolver_handler = getattr(self, handler_name) + if resolver_handler is None: + raise ValueError(f"Unknown s3 action '{api_action}'.") + return resolver_handler + + def eval_resource(self, env: Environment) -> None: + self.resource.eval(env=env) + resource_runtime_part: ResourceRuntimePart = env.stack.pop() + resolver_handler = self._get_api_action_handler() + state_credentials = StateCredentials(role_arn=env.aws_execution_details.role_arn) + resolver_handler(env, resource_runtime_part, state_credentials) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/result_writer_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/result_writer_decl.py new file mode 100644 index 0000000000000..244c78417aab4 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/result_writer/result_writer_decl.py @@ -0,0 +1,45 @@ +import copy +import logging +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.resource_eval.resource_eval import ( + ResourceEval, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.resource_eval.resource_eval_factory import ( + resource_eval_for, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class ResultWriter(EvalComponent): + resource_eval: Final[ResourceEval] + parargs: Final[Parargs] + + def __init__( + self, + resource: Resource, + parargs: Parargs, + ): + self.resource_eval = resource_eval_for(resource=resource) + self.parargs = parargs + + @property + def resource(self): + return self.resource_eval.resource + + def __str__(self): + class_dict = copy.deepcopy(self.__dict__) + del class_dict["resource_eval"] + class_dict["resource"] = self.resource + return f"({self.__class__.__name__}| {class_dict})" + + def _eval_body(self, env: Environment) -> None: + self.parargs.eval(env=env) + self.resource_eval.eval_resource(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/state_map.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/state_map.py new file mode 100644 index 0000000000000..ea0aebac7751d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/state_map.py @@ -0,0 +1,347 @@ +import copy +from typing import Optional + +from localstack.aws.api.stepfunctions import ( + EvaluationFailedEventDetails, + HistoryEventType, + MapStateStartedEventDetails, +) +from localstack.services.stepfunctions.asl.component.common.catch.catch_decl import CatchDecl +from localstack.services.stepfunctions.asl.component.common.catch.catch_outcome import CatchOutcome +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters, Parargs +from localstack.services.stepfunctions.asl.component.common.path.items_path import ItemsPath +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.common.result_selector import ResultSelector +from localstack.services.stepfunctions.asl.component.common.retry.retry_decl import RetryDecl +from localstack.services.stepfunctions.asl.component.common.retry.retry_outcome import RetryOutcome +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + JSONPATH_ROOT_PATH, + StringJsonPath, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.execute_state import ( + ExecutionState, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.item_reader_decl import ( + ItemReader, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.items.items import ( + Items, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.distributed_iteration_component import ( + DistributedIterationComponent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.distributed_item_processor import ( + DistributedItemProcessor, + DistributedItemProcessorEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.inline_item_processor import ( + InlineItemProcessor, + InlineItemProcessorEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.item_processor_decl import ( + ItemProcessorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.item_processor_factory import ( + from_item_processor_decl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecord, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iteration_component import ( + IterationComponent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.distributed_iterator import ( + DistributedIterator, + DistributedIteratorEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.inline_iterator import ( + InlineIterator, + InlineIteratorEvalInput, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.iterator_decl import ( + IteratorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.iterator_factory import ( + from_iterator_decl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.label import ( + Label, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.max_concurrency import ( + MaxConcurrency, + MaxConcurrencyDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.result_writer_decl import ( + ResultWriter, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.tolerated_failure import ( + ToleratedFailureCountDecl, + ToleratedFailureCountInt, + ToleratedFailurePercentage, + ToleratedFailurePercentageDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class StateMap(ExecutionState): + items: Optional[Items] + items_path: Optional[ItemsPath] + iteration_component: IterationComponent + item_reader: Optional[ItemReader] + item_selector: Optional[ItemSelector] + parameters: Optional[Parameters] + max_concurrency_decl: MaxConcurrencyDecl + tolerated_failure_count_decl: ToleratedFailureCountDecl + tolerated_failure_percentage_decl: ToleratedFailurePercentage + result_path: Optional[ResultPath] + result_selector: ResultSelector + retry: Optional[RetryDecl] + catch: Optional[CatchDecl] + label: Optional[Label] + result_writer: Optional[ResultWriter] + + def __init__(self): + super(StateMap, self).__init__( + state_entered_event_type=HistoryEventType.MapStateEntered, + state_exited_event_type=HistoryEventType.MapStateExited, + ) + + def from_state_props(self, state_props: StateProps) -> None: + super(StateMap, self).from_state_props(state_props) + if self._is_language_query_jsonpath(): + self.items = None + self.items_path = state_props.get(ItemsPath) or ItemsPath( + string_sampler=StringJsonPath(JSONPATH_ROOT_PATH) + ) + else: + # TODO: add snapshot test to assert what missing definitions of items means for a states map + self.items_path = None + self.items = state_props.get(Items) + self.item_reader = state_props.get(ItemReader) + self.item_selector = state_props.get(ItemSelector) + self.parameters = state_props.get(Parargs) + self.max_concurrency_decl = state_props.get(MaxConcurrencyDecl) or MaxConcurrency() + self.tolerated_failure_count_decl = ( + state_props.get(ToleratedFailureCountDecl) or ToleratedFailureCountInt() + ) + self.tolerated_failure_percentage_decl = ( + state_props.get(ToleratedFailurePercentageDecl) or ToleratedFailurePercentage() + ) + self.result_path = state_props.get(ResultPath) or ResultPath( + result_path_src=ResultPath.DEFAULT_PATH + ) + self.result_selector = state_props.get(ResultSelector) + self.retry = state_props.get(RetryDecl) + self.catch = state_props.get(CatchDecl) + self.label = state_props.get(Label) + self.result_writer = state_props.get(ResultWriter) + + iterator_decl = state_props.get(typ=IteratorDecl) + item_processor_decl = state_props.get(typ=ItemProcessorDecl) + + if iterator_decl and item_processor_decl: + raise ValueError("Cannot define both Iterator and ItemProcessor.") + + iteration_decl = iterator_decl or item_processor_decl + if iteration_decl is None: + raise ValueError(f"Missing ItemProcessor/Iterator definition in props '{state_props}'.") + + if isinstance(iteration_decl, IteratorDecl): + self.iteration_component = from_iterator_decl(iteration_decl) + elif isinstance(iteration_decl, ItemProcessorDecl): + self.iteration_component = from_item_processor_decl(iteration_decl) + else: + raise ValueError(f"Unknown value for IteratorDecl '{iteration_decl}'.") + + def _eval_execution(self, env: Environment) -> None: + self.max_concurrency_decl.eval(env=env) + max_concurrency_num = env.stack.pop() + label = self.label.label if self.label else None + + # Despite MaxConcurrency and Tolerance fields being state level fields, AWS StepFunctions evaluates only + # MaxConcurrency as a state level field. In contrast, Tolerance is evaluated only after the state start + # event but is logged with event IDs coherent with state level fields. To adhere to this quirk, an evaluation + # frame from this point is created for the evaluation of Tolerance fields following the state start event. + frame: Environment = env.open_frame() + frame.states.reset(input_value=env.states.get_input()) + frame.stack = copy.deepcopy(env.stack) + + try: + # ItemsPath in DistributedMap states is only used if a JSONinput is passed from the previous state. + if ( + not isinstance(self.iteration_component, DistributedIterationComponent) + or self.item_reader is None + ): + if self.items_path: + self.items_path.eval(env=env) + + if self.items: + self.items.eval(env=env) + + if self.item_reader: + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapStateStarted, + event_details=EventDetails( + mapStateStartedEventDetails=MapStateStartedEventDetails(length=0) + ), + ) + input_items = None + else: + input_items = env.stack.pop() + # TODO: This should probably be raised within an Items EvalComponent + if not isinstance(input_items, list): + error_name = StatesErrorName(typ=StatesErrorNameType.StatesQueryEvaluationError) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=EvaluationFailedEventDetails( + cause=f"Map state input must be an array but was: {type(input_items)}", + error=error_name.error_name, + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapStateStarted, + event_details=EventDetails( + mapStateStartedEventDetails=MapStateStartedEventDetails( + length=len(input_items) + ) + ), + ) + + self.tolerated_failure_count_decl.eval(env=frame) + tolerated_failure_count = frame.stack.pop() + self.tolerated_failure_percentage_decl.eval(env=frame) + tolerated_failure_percentage = frame.stack.pop() + finally: + env.close_frame(frame) + + if isinstance(self.iteration_component, InlineIterator): + eval_input = InlineIteratorEvalInput( + state_name=self.name, + max_concurrency=max_concurrency_num, + input_items=input_items, + parameters=self.parameters, + item_selector=self.item_selector, + ) + elif isinstance(self.iteration_component, InlineItemProcessor): + eval_input = InlineItemProcessorEvalInput( + state_name=self.name, + max_concurrency=max_concurrency_num, + input_items=input_items, + item_selector=self.item_selector, + parameters=self.parameters, + ) + else: + map_run_record = MapRunRecord( + state_machine_arn=env.states.context_object.context_object_data["StateMachine"][ + "Id" + ], + execution_arn=env.states.context_object.context_object_data["Execution"]["Id"], + max_concurrency=max_concurrency_num, + tolerated_failure_count=tolerated_failure_count, + tolerated_failure_percentage=tolerated_failure_percentage, + label=label, + ) + env.map_run_record_pool_manager.add(map_run_record) + # Choose the distributed input type depending on whether the definition + # asks for the legacy Iterator component or an ItemProcessor + if isinstance(self.iteration_component, DistributedIterator): + distributed_eval_input_class = DistributedIteratorEvalInput + elif isinstance(self.iteration_component, DistributedItemProcessor): + distributed_eval_input_class = DistributedItemProcessorEvalInput + else: + raise RuntimeError( + f"Unknown iteration component of type '{type(self.iteration_component)}' '{self.iteration_component}'." + ) + eval_input = distributed_eval_input_class( + state_name=self.name, + max_concurrency=max_concurrency_num, + input_items=input_items, + parameters=self.parameters, + item_selector=self.item_selector, + item_reader=self.item_reader, + tolerated_failure_count=tolerated_failure_count, + tolerated_failure_percentage=tolerated_failure_percentage, + label=label, + map_run_record=map_run_record, + ) + + env.stack.append(eval_input) + self.iteration_component.eval(env) + + if self.result_writer: + self.result_writer.eval(env) + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapStateSucceeded, + update_source_event_id=False, + ) + + def _eval_state(self, env: Environment) -> None: + # Initialise the retry counter for execution states. + env.states.context_object.context_object_data["State"]["RetryCount"] = 0 + + # Attempt to evaluate the state's logic through until it's successful, caught, or retries have run out. + while env.is_running(): + try: + self._evaluate_with_timeout(env) + break + except Exception as ex: + failure_event: FailureEvent = self._from_error(env=env, ex=ex) + error_output = self._construct_error_output_value(failure_event=failure_event) + env.states.set_error_output(error_output) + env.states.set_result(error_output) + + if self.retry: + retry_outcome: RetryOutcome = self._handle_retry( + env=env, failure_event=failure_event + ) + if retry_outcome == RetryOutcome.CanRetry: + continue + + if failure_event.event_type != HistoryEventType.ExecutionFailed: + if ( + isinstance(ex, FailureEventException) + and failure_event.event_type == HistoryEventType.EvaluationFailed + ): + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=ex.get_evaluation_failed_event_details(), + ), + ) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.MapStateFailed, + ) + + if self.catch: + self._handle_catch(env=env, failure_event=failure_event) + catch_outcome: CatchOutcome = env.stack[-1] + if catch_outcome == CatchOutcome.Caught: + break + + self._handle_uncaught(env=env, failure_event=failure_event) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/tolerated_failure.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/tolerated_failure.py new file mode 100644 index 0000000000000..c4284c388c402 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_map/tolerated_failure.py @@ -0,0 +1,198 @@ +import abc +from typing import Final + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, + StringSampler, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +TOLERATED_FAILURE_COUNT_MIN: Final[int] = 0 +TOLERATED_FAILURE_COUNT_DEFAULT: Final[int] = 0 +TOLERATED_FAILURE_PERCENTAGE_MIN: Final[float] = 0.0 +TOLERATED_FAILURE_PERCENTAGE_DEFAULT: Final[float] = 0.0 +TOLERATED_FAILURE_PERCENTAGE_MAX: Final[float] = 100.0 + + +class ToleratedFailureCountDecl(EvalComponent, abc.ABC): + @abc.abstractmethod + def _eval_tolerated_failure_count(self, env: Environment) -> int: ... + + def _eval_body(self, env: Environment) -> None: + tolerated_failure_count = self._eval_tolerated_failure_count(env=env) + env.stack.append(tolerated_failure_count) + + +class ToleratedFailureCountInt(ToleratedFailureCountDecl): + tolerated_failure_count: Final[int] + + def __init__(self, tolerated_failure_count: int = TOLERATED_FAILURE_COUNT_DEFAULT): + self.tolerated_failure_count = tolerated_failure_count + + def _eval_tolerated_failure_count(self, env: Environment) -> int: + return self.tolerated_failure_count + + +class ToleratedFailureCountStringJSONata(ToleratedFailureCountDecl): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_tolerated_failure_count(self, env: Environment) -> int: + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + self.string_jsonata.eval(env=env) + failure_count: int = int(env.stack.pop()) + return failure_count + + +class ToleratedFailureCountPath(ToleratedFailureCountDecl): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _eval_tolerated_failure_count(self, env: Environment) -> int: + self.string_sampler.eval(env=env) + tolerated_failure_count = env.stack.pop() + + if isinstance(tolerated_failure_count, str): + try: + tolerated_failure_count = int(tolerated_failure_count) + except Exception: + # Pass the invalid type forward for validation error + pass + + error_cause = None + if not isinstance(tolerated_failure_count, int): + value_str = ( + to_json_str(tolerated_failure_count) + if not isinstance(tolerated_failure_count, str) + else tolerated_failure_count + ) + error_cause = ( + f'The ToleratedFailureCountPath field refers to value "{value_str}" ' + f"which is not a valid integer: {self.string_sampler.literal_value}" + ) + + elif tolerated_failure_count < TOLERATED_FAILURE_COUNT_MIN: + error_cause = "ToleratedFailureCount cannot be negative." + + if error_cause is not None: + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=error_cause + ) + ), + ) + ) + + return tolerated_failure_count + + +class ToleratedFailurePercentageDecl(EvalComponent, abc.ABC): + @abc.abstractmethod + def _eval_tolerated_failure_percentage(self, env: Environment) -> float: ... + + def _eval_body(self, env: Environment) -> None: + tolerated_failure_percentage = self._eval_tolerated_failure_percentage(env=env) + env.stack.append(tolerated_failure_percentage) + + +class ToleratedFailurePercentage(ToleratedFailurePercentageDecl): + tolerated_failure_percentage: Final[float] + + def __init__(self, tolerated_failure_percentage: float = TOLERATED_FAILURE_PERCENTAGE_DEFAULT): + self.tolerated_failure_percentage = tolerated_failure_percentage + + def _eval_tolerated_failure_percentage(self, env: Environment) -> float: + return self.tolerated_failure_percentage + + +class ToleratedFailurePercentageStringJSONata(ToleratedFailurePercentageDecl): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _eval_tolerated_failure_percentage(self, env: Environment) -> float: + # TODO: add snapshot tests to verify AWS's behaviour about non floating values. + self.string_jsonata.eval(env=env) + failure_percentage: int = int(env.stack.pop()) + return failure_percentage + + +class ToleratedFailurePercentagePath(ToleratedFailurePercentageDecl): + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _eval_tolerated_failure_percentage(self, env: Environment) -> float: + self.string_sampler.eval(env=env) + tolerated_failure_percentage = env.stack.pop() + + if isinstance(tolerated_failure_percentage, str): + try: + tolerated_failure_percentage = int(tolerated_failure_percentage) + except Exception: + # Pass the invalid type forward for validation error + pass + + if isinstance(tolerated_failure_percentage, int): + tolerated_failure_percentage = float(tolerated_failure_percentage) + + error_cause = None + if not isinstance(tolerated_failure_percentage, float): + value_str = ( + to_json_str(tolerated_failure_percentage) + if not isinstance(tolerated_failure_percentage, str) + else tolerated_failure_percentage + ) + error_cause = ( + f'The ToleratedFailurePercentagePath field refers to value "{value_str}" ' + f"which is not a valid float: {self.string_sampler.literal_value}" + ) + elif ( + not TOLERATED_FAILURE_PERCENTAGE_MIN + <= tolerated_failure_percentage + <= TOLERATED_FAILURE_PERCENTAGE_MAX + ): + error_cause = "ToleratedFailurePercentage must be between 0 and 100." + + if error_cause is not None: + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=error_cause + ) + ), + ) + ) + + return tolerated_failure_percentage diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branch_worker.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branch_worker.py new file mode 100644 index 0000000000000..51ef19322cf5e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branch_worker.py @@ -0,0 +1,53 @@ +import abc +import logging +import threading +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import Timestamp +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.utils.threads import TMP_THREADS + +LOG = logging.getLogger(__name__) + + +class BranchWorker: + class BranchWorkerComm(abc.ABC): + @abc.abstractmethod + def on_terminated(self, env: Environment): ... + + _branch_worker_comm: Final[BranchWorkerComm] + _program: Final[Program] + _worker_thread: Optional[threading.Thread] + env: Final[Environment] + + def __init__(self, branch_worker_comm: BranchWorkerComm, program: Program, env: Environment): + self._branch_worker_comm = branch_worker_comm + self._program = program + self.env = env + self._worker_thread = None + + def _thread_routine(self) -> None: + LOG.info("[BranchWorker] [launched] [id: %s]", self._worker_thread.native_id) + self._program.eval(self.env) + LOG.info("[BranchWorker] [terminated] [id: %s]", self._worker_thread.native_id) + self._branch_worker_comm.on_terminated(env=self.env) + + def start(self): + if self._worker_thread is not None: + raise RuntimeError(f"Attempted to rerun BranchWorker for program ${self._program}.") + + self._worker_thread = threading.Thread( + target=self._thread_routine, name=f"BranchWorker_${self._program}", daemon=True + ) + TMP_THREADS.append(self._worker_thread) + self._worker_thread.start() + + def stop(self, stop_date: Timestamp, cause: Optional[str], error: Optional[str]) -> None: + env = self.env + if env: + try: + env.set_stop(stop_date, cause, error) + except Exception: + # Ignore closing exceptions, this method attempts to release resources earlier. + pass diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branches_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branches_decl.py new file mode 100644 index 0000000000000..d9c268e776f66 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/branches_decl.py @@ -0,0 +1,113 @@ +import datetime +import threading +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.component.state.state_execution.state_parallel.branch_worker import ( + BranchWorker, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.program_state import ProgramError, ProgramState +from localstack.utils.collections import select_from_typed_dict + + +class BranchWorkerPool(BranchWorker.BranchWorkerComm): + _mutex: Final[threading.Lock] + _termination_event: Final[threading.Event] + _active_workers_num: int + + _terminated_with_error: Optional[ExecutionFailedEventDetails] + + def __init__(self, workers_num: int): + self._mutex = threading.Lock() + self._termination_event = threading.Event() + self._active_workers_num = workers_num + + self._terminated_with_error = None + + def on_terminated(self, env: Environment): + if self._termination_event.is_set(): + return + with self._mutex: + end_program_state: ProgramState = env.program_state() + if isinstance(end_program_state, ProgramError): + self._terminated_with_error = select_from_typed_dict( + typed_dict=ExecutionFailedEventDetails, obj=end_program_state.error or dict() + ) + self._termination_event.set() + else: + self._active_workers_num -= 1 + if self._active_workers_num == 0: + self._termination_event.set() + + def wait(self): + self._termination_event.wait() + + def get_exit_event_details(self) -> Optional[ExecutionFailedEventDetails]: + return self._terminated_with_error + + +class BranchesDecl(EvalComponent): + def __init__(self, programs: list[Program]): + self.programs: Final[list[Program]] = programs + + def _eval_body(self, env: Environment) -> None: + # Input value for every state_parallel process. + input_val = env.stack.pop() + + branch_worker_pool = BranchWorkerPool(workers_num=len(self.programs)) + + branch_workers: list[BranchWorker] = list() + for program in self.programs: + # Environment frame for this sub process. + env_frame: Environment = env.open_inner_frame() + env_frame.states.reset(input_value=input_val) + + # Launch the worker. + worker = BranchWorker( + branch_worker_comm=branch_worker_pool, program=program, env=env_frame + ) + branch_workers.append(worker) + + worker.start() + + branch_worker_pool.wait() + + # Propagate exception if parallel task failed. + exit_event_details: Optional[ExecutionFailedEventDetails] = ( + branch_worker_pool.get_exit_event_details() + ) + if exit_event_details is not None: + for branch_worker in branch_workers: + branch_worker.stop(stop_date=datetime.datetime.now(), cause=None, error=None) + env.close_frame(branch_worker.env) + + exit_error_name = exit_event_details.get("error") + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=CustomErrorName(error_name=exit_error_name), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails(executionFailedEventDetails=exit_event_details), + ) + ) + + # Collect the results and return. + result_list = list() + + for worker in branch_workers: + env_frame = worker.env + result_list.append(env_frame.states.get_input()) + env.close_frame(env_frame) + + env.stack.append(result_list) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/state_parallel.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/state_parallel.py new file mode 100644 index 0000000000000..ce7c5c42d4109 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_parallel/state_parallel.py @@ -0,0 +1,97 @@ +import copy +from typing import Optional + +from localstack.aws.api.stepfunctions import HistoryEventType +from localstack.services.stepfunctions.asl.component.common.catch.catch_outcome import CatchOutcome +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.common.retry.retry_outcome import RetryOutcome +from localstack.services.stepfunctions.asl.component.state.state_execution.execute_state import ( + ExecutionState, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_parallel.branches_decl import ( + BranchesDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class StateParallel(ExecutionState): + # Branches (Required) + # An array of objects that specify state machines to execute in state_parallel. Each such state + # machine object must have fields named States and StartAt, whose meanings are exactly + # like those in the top level of a state machine. + branches: BranchesDecl + parargs: Optional[Parargs] + + def __init__(self): + super().__init__( + state_entered_event_type=HistoryEventType.ParallelStateEntered, + state_exited_event_type=HistoryEventType.ParallelStateExited, + ) + + def from_state_props(self, state_props: StateProps) -> None: + super(StateParallel, self).from_state_props(state_props) + self.branches = state_props.get( + typ=BranchesDecl, + raise_on_missing=ValueError(f"Missing Branches definition in props '{state_props}'."), + ) + self.parargs = state_props.get(Parargs) + + def _eval_execution(self, env: Environment) -> None: + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ParallelStateStarted, + ) + self.branches.eval(env) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ParallelStateSucceeded, + update_source_event_id=False, + ) + + def _eval_state(self, env: Environment) -> None: + # Initialise the retry counter for execution states. + env.states.context_object.context_object_data["State"]["RetryCount"] = 0 + + # Compute the branches' input: if declared this is the parameters, else the current memory state. + if self.parargs is not None: + self.parargs.eval(env=env) + # In both cases, the inputs are copied by value to the branches, to avoid cross branch state manipulation, and + # cached to allow them to be resubmitted in case of failure. + input_value = copy.deepcopy(env.stack.pop()) + + # Attempt to evaluate the state's logic through until it's successful, caught, or retries have run out. + while env.is_running(): + try: + env.stack.append(input_value) + self._evaluate_with_timeout(env) + break + except FailureEventException as failure_event_ex: + failure_event: FailureEvent = self._from_error(env=env, ex=failure_event_ex) + error_output = self._construct_error_output_value(failure_event=failure_event) + env.states.set_error_output(error_output) + env.states.set_result(error_output) + + if self.retry is not None: + retry_outcome: RetryOutcome = self._handle_retry( + env=env, failure_event=failure_event + ) + if retry_outcome == RetryOutcome.CanRetry: + continue + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ParallelStateFailed, + ) + + if self.catch is not None: + self._handle_catch(env=env, failure_event=failure_event) + catch_outcome: CatchOutcome = env.stack[-1] + if catch_outcome == CatchOutcome.Caught: + break + + self._handle_uncaught(env=env, failure_event=failure_event) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/credentials.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/credentials.py new file mode 100644 index 0000000000000..6839dc1c64a97 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/credentials.py @@ -0,0 +1,36 @@ +from dataclasses import dataclass +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpression, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +@dataclass +class StateCredentials: + role_arn: str + + +class RoleArn(EvalComponent): + string_expression: Final[StringExpression] + + def __init__(self, string_expression: StringExpression): + self.string_expression = string_expression + + def _eval_body(self, env: Environment) -> None: + self.string_expression.eval(env=env) + + +class Credentials(EvalComponent): + role_arn: Final[RoleArn] + + def __init__(self, role_arn: RoleArn): + self.role_arn = role_arn + + def _eval_body(self, env: Environment) -> None: + self.role_arn.eval(env=env) + role_arn = env.stack.pop() + computes_credentials = StateCredentials(role_arn=role_arn) + env.stack.append(computes_credentials) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py new file mode 100644 index 0000000000000..9f59414b844ab --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/lambda_eval_utils.py @@ -0,0 +1,102 @@ +import json +from json import JSONDecodeError +from typing import IO, Any, Final, Optional, Union + +from localstack.aws.api.lambda_ import InvocationResponse +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.mock_eval_utils import ( + eval_mocked_response, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.mocking.mock_config import MockedResponse +from localstack.utils.collections import select_from_typed_dict +from localstack.utils.strings import to_bytes + + +class LambdaFunctionErrorException(Exception): + function_error: Final[Optional[str]] + payload: Final[str] + + def __init__(self, function_error: Optional[str], payload: str): + self.function_error = function_error + self.payload = payload + + +def _from_payload(payload_streaming_body: IO[bytes]) -> Union[json, str]: + """ + This method extracts the lambda payload. The payload may be a string or a JSON stringified object. + In the first case, this function converts the output into a UTF-8 string, otherwise it parses the + JSON string into a JSON object. + """ + + payload_bytes: bytes = payload_streaming_body.read() + decoded_data: str = payload_bytes.decode("utf-8") + try: + json_data: json = json.loads(decoded_data) + return json_data + except (UnicodeDecodeError, json.JSONDecodeError): + return decoded_data + + +def _mocked_invoke_lambda_function(env: Environment) -> InvocationResponse: + mocked_response: MockedResponse = env.get_current_mocked_response() + eval_mocked_response(env=env, mocked_response=mocked_response) + invocation_resp: InvocationResponse = env.stack.pop() + return invocation_resp + + +def _invoke_lambda_function( + parameters: dict, region: str, state_credentials: StateCredentials +) -> InvocationResponse: + lambda_client = boto_client_for( + service="lambda", region=region, state_credentials=state_credentials + ) + + invocation_response: InvocationResponse = lambda_client.invoke(**parameters) + + payload = invocation_response["Payload"] + payload_json = _from_payload(payload) + invocation_response["Payload"] = payload_json + + return invocation_response + + +def execute_lambda_function_integration( + env: Environment, parameters: dict, region: str, state_credentials: StateCredentials +) -> None: + if env.is_mocked_mode(): + invocation_response: InvocationResponse = _mocked_invoke_lambda_function(env=env) + else: + invocation_response: InvocationResponse = _invoke_lambda_function( + parameters=parameters, region=region, state_credentials=state_credentials + ) + + function_error: Optional[str] = invocation_response.get("FunctionError") + if function_error: + payload_json = invocation_response["Payload"] + payload_str = json.dumps(payload_json, separators=(",", ":")) + raise LambdaFunctionErrorException(function_error, payload_str) + + response = select_from_typed_dict(typed_dict=InvocationResponse, obj=invocation_response) # noqa + env.stack.append(response) + + +def to_payload_type(payload: Any) -> Optional[bytes]: + if isinstance(payload, bytes): + return payload + + if payload is None: + str_value = to_json_str(dict()) + elif isinstance(payload, str): + try: + json.loads(payload) + str_value = payload + except JSONDecodeError: + str_value = to_json_str(payload) + else: + str_value = to_json_str(payload) + return to_bytes(str_value) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py new file mode 100644 index 0000000000000..aa8a9c423f433 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/mock_eval_utils.py @@ -0,0 +1,45 @@ +import copy + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.mocking.mock_config import ( + MockedResponse, + MockedResponseReturn, + MockedResponseThrow, +) + + +def _eval_mocked_response_throw(env: Environment, mocked_response: MockedResponseThrow) -> None: + task_failed_event_details = TaskFailedEventDetails( + error=mocked_response.error, cause=mocked_response.cause + ) + error_name = CustomErrorName(mocked_response.error) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails(taskFailedEventDetails=task_failed_event_details), + ) + raise FailureEventException(failure_event=failure_event) + + +def _eval_mocked_response_return(env: Environment, mocked_response: MockedResponseReturn) -> None: + payload_copy = copy.deepcopy(mocked_response.payload) + env.stack.append(payload_copy) + + +def eval_mocked_response(env: Environment, mocked_response: MockedResponse) -> None: + if isinstance(mocked_response, MockedResponseReturn): + _eval_mocked_response_return(env=env, mocked_response=mocked_response) + elif isinstance(mocked_response, MockedResponseThrow): + _eval_mocked_response_throw(env=env, mocked_response=mocked_response) + else: + raise RuntimeError(f"Invalid MockedResponse type '{type(mocked_response)}'") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/resource.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/resource.py new file mode 100644 index 0000000000000..ce1d4288d5a5c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/resource.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import abc +from itertools import takewhile +from typing import Final, Optional + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class ResourceCondition(str): + WaitForTaskToken = "waitForTaskToken" + Sync2 = "sync:2" + Sync = "sync" + + +class ResourceARN: + arn: str + partition: str + service: str + region: str + account: str + task_type: str + name: str + option: str + + def __init__( + self, + arn: str, + partition: str, + service: str, + region: str, + account: str, + task_type: str, + name: str, + option: Optional[str], + ): + self.arn = arn + self.partition = partition + self.service = service + self.region = region + self.account = account + self.task_type = task_type + self.name = name + self.option = option + + @staticmethod + def _consume_until(text: str, symbol: str) -> tuple[str, str]: + value = "".join(takewhile(lambda c: c != symbol, text)) + tail_idx = len(value) + 1 + return value, text[tail_idx:] + + @classmethod + def from_arn(cls, arn: str) -> ResourceARN: + _, arn_tail = ResourceARN._consume_until(arn, ":") + partition, arn_tail = ResourceARN._consume_until(arn_tail, ":") + service, arn_tail = ResourceARN._consume_until(arn_tail, ":") + region, arn_tail = ResourceARN._consume_until(arn_tail, ":") + account, arn_tail = ResourceARN._consume_until(arn_tail, ":") + task_type, arn_tail = ResourceARN._consume_until(arn_tail, ":") + name, arn_tail = ResourceARN._consume_until(arn_tail, ".") + option = arn_tail + return cls( + arn=arn, + partition=partition, + service=service, + region=region, + account=account, + task_type=task_type, + name=name, + option=option, + ) + + +class ResourceRuntimePart: + account: Final[str] + region: Final[str] + + def __init__(self, account: str, region: str): + self.region = region + self.account = account + + +class Resource(EvalComponent, abc.ABC): + _region: Final[str] + _account: Final[str] + resource_arn: Final[str] + partition: Final[str] + + def __init__(self, resource_arn: ResourceARN): + self._region = resource_arn.region + self._account = resource_arn.account + self.resource_arn = resource_arn.arn + self.partition = resource_arn.partition + + @staticmethod + def from_resource_arn(arn: str) -> Resource: + resource_arn = ResourceARN.from_arn(arn) + match resource_arn.service, resource_arn.task_type: + case "lambda", "function": + return LambdaResource(resource_arn=resource_arn) + case "states", "activity": + return ActivityResource(resource_arn=resource_arn) + case "states", _: + return ServiceResource(resource_arn=resource_arn) + + def _eval_runtime_part(self, env: Environment) -> ResourceRuntimePart: + region = self._region if self._region else env.aws_execution_details.region + account = self._account if self._account else env.aws_execution_details.account + return ResourceRuntimePart( + account=account, + region=region, + ) + + def _eval_body(self, env: Environment) -> None: + runtime_part = self._eval_runtime_part(env=env) + env.stack.append(runtime_part) + + +class ActivityResource(Resource): + name: Final[str] + + def __init__(self, resource_arn: ResourceARN): + super().__init__(resource_arn=resource_arn) + self.name = resource_arn.name + + +class LambdaResource(Resource): + function_name: Final[str] + + def __init__(self, resource_arn: ResourceARN): + super().__init__(resource_arn=resource_arn) + self.function_name = resource_arn.name + + +class ServiceResource(Resource): + service_name: Final[str] + api_name: Final[str] + api_action: Final[str] + condition: Final[Optional[str]] + + def __init__(self, resource_arn: ResourceARN): + super().__init__(resource_arn=resource_arn) + self.service_name = resource_arn.task_type + + name_parts = resource_arn.name.split(":") + if len(name_parts) == 1: + self.api_name = self.service_name + self.api_action = resource_arn.name + elif len(name_parts) > 1: + self.api_name = name_parts[0] + self.api_action = name_parts[1] + else: + raise RuntimeError(f"Incorrect definition of ResourceArn.name: '{resource_arn.name}'.") + + self.condition = None + option = resource_arn.option + if option: + match option: + case ResourceCondition.WaitForTaskToken: + self.condition = ResourceCondition.WaitForTaskToken + case "sync": + self.condition = ResourceCondition.Sync + case "sync:2": + self.condition = ResourceCondition.Sync2 + case unsupported: + raise RuntimeError(f"Unsupported condition '{unsupported}'.") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py new file mode 100644 index 0000000000000..c385368c25dc2 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service.py @@ -0,0 +1,378 @@ +from __future__ import annotations + +import abc +import copy +import json +import logging +from typing import Any, Final, Optional, Union + +from botocore.model import ListShape, OperationModel, Shape, StringShape, StructureShape +from botocore.response import StreamingBody + +from localstack.aws.api.stepfunctions import ( + HistoryEventExecutionDataDetails, + HistoryEventType, + TaskCredentials, + TaskFailedEventDetails, + TaskScheduledEventDetails, + TaskStartedEventDetails, + TaskSucceededEventDetails, + TaskTimedOutEventDetails, +) +from localstack.aws.spec import load_service +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.mock_eval_utils import ( + eval_mocked_response, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceRuntimePart, + ServiceResource, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task import ( + StateTask, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.mocking.mock_config import MockedResponse +from localstack.services.stepfunctions.quotas import is_within_size_quota +from localstack.utils.strings import camel_to_snake_case, snake_to_camel_case, to_bytes, to_str + +LOG = logging.getLogger(__name__) + + +class StateTaskService(StateTask, abc.ABC): + resource: ServiceResource + + _SERVICE_NAME_SFN_TO_BOTO_OVERRIDES: Final[dict[str, str]] = { + "sfn": "stepfunctions", + "states": "stepfunctions", + } + + def from_state_props(self, state_props: StateProps) -> None: + super().from_state_props(state_props=state_props) + # Validate the service integration is supported on program creation. + self._validate_service_integration_is_supported() + + def _validate_service_integration_is_supported(self): + # Validate the service integration is supported. + supported_parameters = self._get_supported_parameters() + if supported_parameters is None: + raise ValueError( + f"The resource provided {self.resource.resource_arn} not recognized. " + "The value is not a valid resource ARN, or the resource is not available in this region." + ) + + def _get_sfn_resource(self) -> str: + return self.resource.api_action + + def _get_sfn_resource_type(self) -> str: + return self.resource.service_name + + def _get_timed_out_failure_event(self, env: Environment) -> FailureEvent: + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTimeout), + event_type=HistoryEventType.TaskTimedOut, + event_details=EventDetails( + taskTimedOutEventDetails=TaskTimedOutEventDetails( + resourceType=self._get_sfn_resource_type(), + resource=self._get_sfn_resource(), + error=StatesErrorNameType.StatesTimeout.to_name(), + ) + ), + ) + + @staticmethod + def _get_boto_operation_model( + boto_service_name: str, service_action_name: str + ) -> OperationModel: + norm_service_action_name = camel_to_snake_case(service_action_name) + + service = load_service(service=boto_service_name) + + boto_operation_names = { + camel_to_snake_case(operation_name): operation_name + for operation_name in service.operation_names + } # noqa + boto_operation_name = boto_operation_names.get(norm_service_action_name) + if boto_operation_name is None: + raise RuntimeError( + f"No api action named '{service_action_name}' available for service '{boto_service_name}'." + ) + + operation_model = service.operation_model(boto_operation_name) + return operation_model + + def _to_boto_request_value(self, request_value: Any, value_shape: Shape) -> Any: + boto_request_value = request_value + if isinstance(value_shape, StructureShape): + self._to_boto_request(request_value, value_shape) + elif isinstance(value_shape, ListShape) and isinstance(request_value, list): + for request_list_value in request_value: + self._to_boto_request_value(request_list_value, value_shape.member) # noqa + elif isinstance(value_shape, StringShape) and not isinstance(request_value, str): + boto_request_value = to_json_str(request_value) + elif value_shape.type_name == "blob" and not isinstance(boto_request_value, bytes): + boto_request_value = to_json_str(request_value, separators=(",", ":")) + boto_request_value = to_bytes(boto_request_value) + return boto_request_value + + def _to_boto_request(self, parameters: dict, structure_shape: StructureShape) -> None: + if not isinstance(structure_shape, StructureShape): + LOG.warning( + "Step Functions could not normalise the request for integration '%s' due to the unexpected request template value of type '%s'", + self.resource.resource_arn, + type(structure_shape), + ) + return + shape_members = structure_shape.members + norm_member_binds: dict[str, tuple[str, StructureShape]] = { + camel_to_snake_case(member_key): (member_key, member_value) + for member_key, member_value in shape_members.items() + } + parameters_bind_keys: list[str] = list(parameters.keys()) + for parameter_key in parameters_bind_keys: + norm_parameter_key = camel_to_snake_case(parameter_key) + norm_member_bind: Optional[tuple[str, Optional[StructureShape]]] = ( + norm_member_binds.get(norm_parameter_key) + ) + if norm_member_bind is not None: + norm_member_bind_key, norm_member_bind_shape = norm_member_bind + parameter_value = parameters.pop(parameter_key) + parameter_value = self._to_boto_request_value( + parameter_value, norm_member_bind_shape + ) + parameters[norm_member_bind_key] = parameter_value + + @staticmethod + def _to_sfn_cased(member_key: str) -> str: + # Normalise the string to snake case, e.g. "HelloWorld_hello__world" -> "hello_world_hello_world" + norm_member_key = camel_to_snake_case(member_key) + # Normalise the snake case to camel case, e.g. "hello_world_hello_world" -> "HelloWorldHelloWorld" + norm_member_key = snake_to_camel_case(norm_member_key) + return norm_member_key + + @staticmethod + def _from_boto_response_value(response_value: Any) -> Any: + if isinstance(response_value, StreamingBody): + body_str = to_str(response_value.read()) + return body_str + return response_value + + def _from_boto_response(self, response: Any, structure_shape: StructureShape) -> None: + if not isinstance(response, dict): + return + + if not isinstance(structure_shape, StructureShape): + LOG.warning( + "Step Functions could not normalise the response of integration '%s' due to the unexpected request template value of type '%s'", + self.resource.resource_arn, + type(structure_shape), + ) + return + + shape_members = structure_shape.members + response_bind_keys: list[str] = list(response.keys()) + for response_key in response_bind_keys: + norm_response_key = self._to_sfn_cased(response_key) + if response_key in shape_members: + shape_member = shape_members[response_key] + + response_value = response.pop(response_key) + response_value = self._from_boto_response_value(response_value) + + if isinstance(shape_member, StructureShape): + self._from_boto_response(response_value, shape_member) + elif isinstance(shape_member, ListShape) and isinstance(response_value, list): + for response_value_member in response_value: + self._from_boto_response(response_value_member, shape_member.member) # noqa + + response[norm_response_key] = response_value + + def _get_boto_service_name(self, boto_service_name: Optional[str] = None) -> str: + api_name = boto_service_name or self.resource.api_name + return self._SERVICE_NAME_SFN_TO_BOTO_OVERRIDES.get(api_name, api_name) + + def _get_boto_service_action(self, service_action_name: Optional[str] = None) -> str: + api_action = service_action_name or self.resource.api_action + return camel_to_snake_case(api_action) + + def _normalise_parameters( + self, + parameters: dict, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + boto_service_name = self._get_boto_service_name(boto_service_name=boto_service_name) + service_action_name = self._get_boto_service_action(service_action_name=service_action_name) + input_shape = self._get_boto_operation_model( + boto_service_name=boto_service_name, service_action_name=service_action_name + ).input_shape + if input_shape is not None: + self._to_boto_request(parameters, input_shape) # noqa + + def _normalise_response( + self, + response: Any, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + boto_service_name = self._get_boto_service_name(boto_service_name=boto_service_name) + service_action_name = self._get_boto_service_action(service_action_name=service_action_name) + output_shape = self._get_boto_operation_model( + boto_service_name=boto_service_name, service_action_name=service_action_name + ).output_shape + if output_shape is not None: + self._from_boto_response(response, output_shape) # noqa + + def _verify_size_quota(self, env: Environment, value: Union[str, json]) -> None: + is_within: bool = is_within_size_quota(value) + if is_within: + return + resource_type = self._get_sfn_resource_type() + resource = self._get_sfn_resource() + cause = ( + f"The state/task '{resource_type}' returned a result with a size " + "exceeding the maximum number of bytes service limit." + ) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesStatesDataLimitExceeded), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=StatesErrorNameType.StatesStatesDataLimitExceeded.to_name(), + cause=cause, + resourceType=resource_type, + resource=resource, + ) + ), + ) + ) + + @abc.abstractmethod + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): ... + + def _before_eval_execution( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + raw_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + parameters_str = to_json_str(raw_parameters) + + scheduled_event_details = TaskScheduledEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + region=resource_runtime_part.region, + parameters=parameters_str, + ) + if not self.timeout.is_default_value(): + self.timeout.eval(env=env) + timeout_seconds = env.stack.pop() + scheduled_event_details["timeoutInSeconds"] = timeout_seconds + if self.heartbeat is not None: + self.heartbeat.eval(env=env) + heartbeat_seconds = env.stack.pop() + scheduled_event_details["heartbeatInSeconds"] = heartbeat_seconds + if self.credentials: + scheduled_event_details["taskCredentials"] = TaskCredentials( + roleArn=state_credentials.role_arn + ) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.TaskScheduled, + event_details=EventDetails(taskScheduledEventDetails=scheduled_event_details), + ) + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.TaskStarted, + event_details=EventDetails( + taskStartedEventDetails=TaskStartedEventDetails( + resource=self._get_sfn_resource(), resourceType=self._get_sfn_resource_type() + ) + ), + ) + + def _after_eval_execution( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + output = env.stack[-1] + self._verify_size_quota(env=env, value=output) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.TaskSucceeded, + event_details=EventDetails( + taskSucceededEventDetails=TaskSucceededEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + output=to_json_str(output), + outputDetails=HistoryEventExecutionDataDetails(truncated=False), + ) + ), + ) + + def _eval_execution(self, env: Environment) -> None: + self.resource.eval(env=env) + resource_runtime_part: ResourceRuntimePart = env.stack.pop() + + raw_parameters = self._eval_parameters(env=env) + state_credentials = self._eval_state_credentials(env=env) + + self._before_eval_execution( + env=env, + resource_runtime_part=resource_runtime_part, + raw_parameters=raw_parameters, + state_credentials=state_credentials, + ) + + normalised_parameters = copy.deepcopy(raw_parameters) + self._normalise_parameters(normalised_parameters) + + if env.is_mocked_mode(): + mocked_response: MockedResponse = env.get_current_mocked_response() + eval_mocked_response(env=env, mocked_response=mocked_response) + else: + self._eval_service_task( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) + + output_value = env.stack[-1] + self._normalise_response(output_value) + + self._after_eval_execution( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_api_gateway.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_api_gateway.py new file mode 100644 index 0000000000000..b4d8c660a8f81 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_api_gateway.py @@ -0,0 +1,322 @@ +from __future__ import annotations + +import http +import json +import logging +from json import JSONDecodeError +from typing import Any, Final, Optional, TypedDict +from urllib.parse import urlencode, urljoin, urlparse + +import requests +from requests import Response + +from localstack import config +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.constants import ( + APPLICATION_JSON, + HEADER_CONTENT_TYPE, + PATH_USER_REQUEST, +) +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.utils.collections import select_from_typed_dict +from localstack.utils.strings import long_uid +from localstack.utils.urls import localstack_host + +LOG = logging.getLogger(__name__) + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} + +ApiEndpoint = str +Headers = dict +Stage = str +Path = str +QueryParameters = dict +RequestBody = dict | str +ResponseBody = dict | str +StatusCode = int +StatusText = str +AllowNullValues = bool + + +class Method(str): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + PATCH = "PATCH" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + + +class AuthType(str): + NO_AUTH = "NO_AUTH" + IAM_ROLE = "IAM_ROLE" + RESOURCE_POLICY = "RESOURCE_POLICY" + + +class TaskParameters(TypedDict): + ApiEndpoint: ApiEndpoint + Method: Method + Headers: Optional[Headers] + Stage: Optional[Stage] + Path: Optional[Path] + QueryParameters: Optional[QueryParameters] + RequestBody: Optional[RequestBody] + AllowNullValues: Optional[AllowNullValues] + AuthType: Optional[AuthType] + + +class InvokeOutput(TypedDict): + Headers: Headers + ResponseBody: ResponseBody + StatusCode: StatusCode + StatusText: StatusText + + +class SupportedApiCalls(str): + invoke = "invoke" + + +class SfnGatewayException(Exception): + parameters: Final[TaskParameters] + response: Final[Response] + + def __init__(self, parameters: TaskParameters, response: Response): + self.parameters = parameters + self.response = response + + +class StateTaskServiceApiGateway(StateTaskServiceCallback): + _SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + SupportedApiCalls.invoke: set(TaskParameters.__required_keys__) # noqa + } + + _FORBIDDEN_HTTP_HEADERS_PREFIX: Final[set[str]] = {"X-Forwarded", "X-Amz", "X-Amzn"} + _FORBIDDEN_HTTP_HEADERS: Final[set[str]] = { + "Authorization", + "Connection", + "Content-md5", + "Expect", + "Host", + "Max-Forwards", + "Proxy-Authenticate", + "Server", + "TE", + "Transfer-Encoding", + "Trailer", + "Upgrade", + "Via", + "Www-Authenticate", + } + + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return self._SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _normalise_parameters( + self, + parameters: dict, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + # ApiGateway does not support botocore request relay. + pass + + def _normalise_response( + self, + response: Any, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + # ApiGateway does not support botocore request relay. + pass + + @staticmethod + def _query_parameters_of(parameters: TaskParameters) -> Optional[str]: + query_str = None + query_parameters = parameters.get("QueryParameters") + # TODO: add support for AllowNullValues. + if query_parameters is not None: + for key, value in list(query_parameters.items()): + if value: + query_parameters[key] = value[-1] + else: + query_parameters[key] = "" + query_str = f"?{urlencode(query_parameters)}" + return query_str + + @staticmethod + def _headers_of(parameters: TaskParameters) -> Optional[dict]: + headers = parameters.get("Headers", dict()) + if headers: + for key in headers.keys(): + # TODO: the following check takes place at parse time. + if key in StateTaskServiceApiGateway._FORBIDDEN_HTTP_HEADERS: + raise ValueError(f"The 'Headers' field contains unsupported values: {key}") + for forbidden_prefix in StateTaskServiceApiGateway._FORBIDDEN_HTTP_HEADERS_PREFIX: + if key.startswith(forbidden_prefix): + raise ValueError(f"The 'Headers' field contains unsupported values: {key}") + + value = headers.get(key) + if isinstance(value, list): + headers[key] = f"[{','.join(value)}]" + + if "RequestBody" in parameters: + headers[HEADER_CONTENT_TYPE] = APPLICATION_JSON + headers["Accept"] = APPLICATION_JSON + return headers + + @staticmethod + def _path_based_url_of(api_endpoint: ApiEndpoint) -> ApiEndpoint: + # Attempts to convert an url based api endpoint: + # .execute-api.<-region->.localhost.localstack.cloud + # To a path based: + # http://localhost:4566/restapis/ + # TODO: this heavily normalises url based api endpoints to path endpoint. + # there's an argument to be made that this may mast implementation mistakes: investigate further. + url_spec = urlparse(api_endpoint) + url_path = url_spec.path + if not url_path.endswith(localstack_host().host): + return api_endpoint + path_parts = url_path.split(".") + api_id = path_parts[0] + path_based_api_endpoint = f"{config.internal_service_url()}/restapis/{api_id}" + return path_based_api_endpoint + + @staticmethod + def _invoke_url_of(parameters: TaskParameters) -> str: + given_api_endpoint = parameters["ApiEndpoint"] + api_endpoint = StateTaskServiceApiGateway._path_based_url_of(given_api_endpoint) + if given_api_endpoint != api_endpoint: + LOG.warning( + "ApiEndpoint '%s' ignored in favour of %s", + given_api_endpoint, + api_endpoint, + ) + + url_base = api_endpoint + "/" + # http://localhost:4566/restapis///_user_request_/(?)? + url_tail = "/".join( + [ + parameters.get("Stage", ""), + PATH_USER_REQUEST, + parameters.get("Path", ""), + StateTaskServiceApiGateway._query_parameters_of(parameters) or "", + ] + ) + invoke_url = urljoin(url_base, url_tail) + return invoke_url + + @staticmethod + def _invoke_output_of(response: Response) -> InvokeOutput: + status_code = response.status_code + status_text = http.HTTPStatus(status_code).phrase + + headers = dict(response.headers) + + try: + response_body = response.json() + except JSONDecodeError: + response_body = response.text + if response_body == json.dumps(dict()): + response_body = dict() + + # since we are not using a case-insensitive dict, and we want to remove a header, for server + # compatibility we should consider both casing variants + headers.pop("server", None) + headers.pop("Server", None) + if "date" in headers: + headers["Date"] = [headers.pop("date")] + headers[HEADER_CONTENT_TYPE] = [APPLICATION_JSON] + headers["Content-Length"] = [headers["Content-Length"]] + # TODO: add support for the following generated fields. + headers["Connection"] = ["keep-alive"] + headers["x-amz-apigw-id"] = [long_uid()] + headers["X-Amz-Cf-Id"] = [long_uid()] + headers["X-Amz-Cf-Pop"] = [long_uid()] + headers["x-amzn-RequestId"] = [long_uid()] + headers["X-Amzn-Trace-Id"] = [long_uid()] + headers["X-Cache"] = ["Miss from cloudfront"] + headers["Via"] = ["UNSUPPORTED"] + + return InvokeOutput( + Headers=headers, + ResponseBody=response_body, + StatusCode=status_code, + StatusText=status_text, + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, SfnGatewayException): + error_name = f"ApiGateway.{ex.response.status_code}" + cause = ex.response.text + else: + ex_name = ex.__class__.__name__ + error_name = f"ApiGateway.{ex_name}" + cause = str(ex) + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error_name, + cause=cause, # TODO: add support for cause decoration. + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + # TODO: add support for task credentials + + task_parameters: TaskParameters = select_from_typed_dict( + typed_dict=TaskParameters, obj=normalised_parameters + ) + + method = task_parameters["Method"] + invoke_url = self._invoke_url_of(task_parameters) + headers = self._headers_of(task_parameters) + json_data = task_parameters.get("RequestBody") + + # RequestBody is only supported for PATCH, POST, and PUT + if json_data is not None and method not in {Method.PATCH, Method.POST, Method.PUT}: + raise ValueError() # TODO + + response: Response = getattr(requests, method.lower())( + invoke_url, headers=headers, json=json_data + ) + + if response.status_code != 200: + raise SfnGatewayException(parameters=task_parameters, response=response) + + invoke_output = self._invoke_output_of(response) + env.stack.append(invoke_output) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_aws_sdk.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_aws_sdk.py new file mode 100644 index 0000000000000..aff2642e29710 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_aws_sdk.py @@ -0,0 +1,138 @@ +import logging +from typing import Final + +from botocore.exceptions import ClientError, UnknownServiceError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.aws.spec import get_service_catalog +from localstack.services.stepfunctions.asl.component.common.error_name.error_name import ErrorName +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for + +LOG = logging.getLogger(__name__) + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} + +# Defines bindings of lower-cased service names to the StepFunctions service name included in error messages. +_SERVICE_ERROR_NAMES = {"dynamodb": "DynamoDb", "sfn": "Sfn"} + + +class StateTaskServiceAwsSdk(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _validate_service_integration_is_supported(self): + # As no aws-sdk support catalog is available, allow invalid aws-sdk integration to fail at runtime. + pass + + def _get_sfn_resource_type(self) -> str: + return f"{self.resource.service_name}:{self.resource.api_name}" + + @staticmethod + def _normalise_service_error_name(service_name: str) -> str: + # Computes the normalised service error name for the given service. + + # Return the explicit binding if one exists. + service_name_lower = service_name.lower() + if service_name_lower in _SERVICE_ERROR_NAMES: + return _SERVICE_ERROR_NAMES[service_name_lower] + + # Attempt to retrieve the service name from the catalog. + try: + service_model = get_service_catalog().get(service_name) + if service_model is not None: + sfn_normalised_service_name = service_model.service_id.replace(" ", "") + return sfn_normalised_service_name + except UnknownServiceError: + LOG.warning( + "No service for name '%s' when building aws-sdk service error name.", + service_name, + ) + + # Revert to returning the resource's service name and log the missing binding. + LOG.error( + "No normalised service error name for aws-sdk integration was found for service: '%s'", + service_name, + ) + return service_name + + @staticmethod + def _normalise_exception_name(norm_service_name: str, ex: Exception) -> str: + ex_name = ex.__class__.__name__ + norm_ex_name = ( + f"{norm_service_name}.{norm_service_name if ex_name == 'ClientError' else ex_name}" + ) + if not norm_ex_name.endswith("Exception"): + norm_ex_name += "Exception" + return norm_ex_name + + def _get_task_failure_event(self, env: Environment, error: str, cause: str) -> FailureEvent: + return FailureEvent( + env=env, + error_name=ErrorName(error_name=error), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + error=error, + cause=cause, + ) + ), + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + norm_service_name: str = self._normalise_service_error_name(self.resource.api_name) + error: str = self._normalise_exception_name(norm_service_name, ex) + + error_message: str = ex.response["Error"]["Message"] + cause_details = [ + f"Service: {norm_service_name}", + f"Status Code: {ex.response['ResponseMetadata']['HTTPStatusCode']}", + f"Request ID: {ex.response['ResponseMetadata']['RequestId']}", + ] + if "HostId" in ex.response["ResponseMetadata"]: + cause_details.append( + f"Extended Request ID: {ex.response['ResponseMetadata']['HostId']}" + ) + + cause: str = f"{error_message} ({', '.join(cause_details)})" + failure_event = self._get_task_failure_event(env=env, error=error, cause=cause) + return failure_event + return super()._from_error(env=env, ex=ex) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + api_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + response = getattr(api_client, api_action)(**normalised_parameters) or dict() + if response: + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_batch.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_batch.py new file mode 100644 index 0000000000000..bc83e1f327121 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_batch.py @@ -0,0 +1,200 @@ +from typing import Any, Callable, Final, Optional + +from botocore.exceptions import ClientError +from moto.batch.utils import JobStatus + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, + ResourceCondition.Sync, +} + +_BATCH_JOB_TERMINATION_STATUS_SET: Final[set[JobStatus]] = {JobStatus.SUCCEEDED, JobStatus.FAILED} + +_ENVIRONMENT_VARIABLE_MANAGED_BY_AWS: Final[str] = "MANAGED_BY_AWS" +_ENVIRONMENT_VARIABLE_MANAGED_BY_AWS_VALUE: Final[str] = "STARTED_BY_STEP_FUNCTIONS" + +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "submitjob": { + "ArrayProperties", + "ContainerOverrides", + "DependsOn", + "JobDefinition", + "JobName", + "JobQueue", + "Parameters", + "RetryStrategy", + "Timeout", + "Tags", + } +} + + +class StateTaskServiceBatch(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + @staticmethod + def _attach_aws_environment_variables(parameters: dict) -> None: + # Attaches to the ContainerOverrides environment variables the AWS managed flags. + container_overrides = parameters.get("ContainerOverrides") + if container_overrides is None: + container_overrides = dict() + parameters["ContainerOverrides"] = container_overrides + + environment = container_overrides.get("Environment") + if environment is None: + environment = list() + container_overrides["Environment"] = environment + + environment.append( + { + "name": _ENVIRONMENT_VARIABLE_MANAGED_BY_AWS, + "value": _ENVIRONMENT_VARIABLE_MANAGED_BY_AWS_VALUE, + } + ) + + def _before_eval_execution( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + raw_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + if self.resource.condition == ResourceCondition.Sync: + self._attach_aws_environment_variables(parameters=raw_parameters) + super()._before_eval_execution( + env=env, + resource_runtime_part=resource_runtime_part, + raw_parameters=raw_parameters, + state_credentials=state_credentials, + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + error_code = ex.response["Error"]["Code"] + error_name = f"Batch.{error_code}" + status_code = ex.response["ResponseMetadata"]["HTTPStatusCode"] + error_message = ex.response["Error"]["Message"] + request_id = ex.response["ResponseMetadata"]["RequestId"] + response_details = "; ".join( + [ + "Service: AWSBatch", + f"Status Code: {status_code}", + f"Error Code: {error_code}", + f"Request ID: {request_id}", + "Proxy: null", + ] + ) + cause = f"Error executing request, Exception : {error_message}, RequestId: {request_id} ({response_details})" + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error_name, + cause=cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + def _build_sync_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + batch_client = boto_client_for( + service="batch", + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + submission_output: dict = env.stack.pop() + job_id = submission_output["JobId"] + + def _sync_resolver() -> Optional[dict]: + describe_jobs_response = batch_client.describe_jobs(jobs=[job_id]) + describe_jobs = describe_jobs_response["jobs"] + if describe_jobs: + describe_job = describe_jobs[0] + describe_job_status: JobStatus = describe_job["status"] + # Add raise error if abnormal state + if describe_job_status in _BATCH_JOB_TERMINATION_STATUS_SET: + self._normalise_response( + response=describe_jobs_response, service_action_name="describe_jobs" + ) + if describe_job_status == JobStatus.SUCCEEDED: + return describe_job + + raise FailureEventException( + FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTaskFailed), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + error=StatesErrorNameType.StatesTaskFailed.to_name(), + cause=to_json_str(describe_job), + ) + ), + ) + ) + return None + + return _sync_resolver + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + batch_client = boto_client_for( + region=resource_runtime_part.region, + service=service_name, + state_credentials=state_credentials, + ) + response = getattr(batch_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py new file mode 100644 index 0000000000000..bed6e8b78fdd5 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_callback.py @@ -0,0 +1,360 @@ +import abc +import json +import threading +import time +from typing import Any, Callable, Final, Optional + +from localstack.aws.api.stepfunctions import ( + HistoryEventExecutionDataDetails, + HistoryEventType, + TaskFailedEventDetails, + TaskSubmittedEventDetails, +) +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service import ( + StateTaskService, +) +from localstack.services.stepfunctions.asl.eval.callback.callback import ( + CallbackEndpoint, + CallbackOutcome, + CallbackOutcomeFailure, + CallbackOutcomeFailureError, + CallbackOutcomeSuccess, + CallbackOutcomeTimedOut, + CallbackTimeoutError, + HeartbeatEndpoint, + HeartbeatTimedOut, + HeartbeatTimeoutError, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.threads import TMP_THREADS + +# TODO: consider implementing a polling pattern similar to that observable from AWS: +# https://repost.aws/questions/QUFFlHcbvIQFe-bS3RAi7TWA/a-glue-job-in-a-step-function-is-taking-so-long-to-continue-the-next-step +_DELAY_SECONDS_SYNC_CONDITION_CHECK: Final[float] = 0.5 + + +class StateTaskServiceCallback(StateTaskService, abc.ABC): + _supported_integration_patterns: Final[set[ResourceCondition]] + + def __init__(self, supported_integration_patterns: set[ResourceCondition]): + super().__init__() + self._supported_integration_patterns = supported_integration_patterns + + def _get_sfn_resource(self) -> str: + resource = super()._get_sfn_resource() + if self.resource.condition is not None: + resource += f".{self.resource.condition}" + return resource + + def _build_sync_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + raise RuntimeError( + f"Unsupported .sync callback procedure in resource {self.resource.resource_arn}" + ) + + def _build_sync2_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + raise RuntimeError( + f"Unsupported .sync2 callback procedure in resource {self.resource.resource_arn}" + ) + + def _eval_wait_for_task_token( + self, + env: Environment, + timeout_seconds: int, + callback_endpoint: CallbackEndpoint, + heartbeat_endpoint: Optional[HeartbeatEndpoint], + ) -> CallbackOutcome: + outcome: Optional[CallbackOutcome] + if heartbeat_endpoint is not None: + outcome = self._wait_for_task_token_heartbeat( + env, callback_endpoint, heartbeat_endpoint + ) + else: + outcome = self._wait_for_task_token_timeout(timeout_seconds, callback_endpoint) + if outcome is None: + return CallbackOutcomeTimedOut(callback_id=callback_endpoint.callback_id) + return outcome + + def _eval_sync( + self, + env: Environment, + sync_resolver: Callable[[], Optional[Any]], + timeout_seconds: Optional[int], + callback_endpoint: Optional[CallbackEndpoint], + heartbeat_endpoint: Optional[HeartbeatEndpoint], + ) -> CallbackOutcome | Any: + callback_output: Optional[CallbackOutcome] = None + + # Listen for WaitForTaskToken signals if an endpoint is provided. + if callback_endpoint is not None: + + def _local_update_wait_for_task_token(): + nonlocal callback_output + callback_output = self._eval_wait_for_task_token( + env=env, + timeout_seconds=timeout_seconds, + callback_endpoint=callback_endpoint, + heartbeat_endpoint=heartbeat_endpoint, + ) + + thread_wait_for_task_token = threading.Thread( + target=_local_update_wait_for_task_token, + name=f"WaitForTaskToken_SyncTask_{self.resource.resource_arn}", + daemon=True, + ) + TMP_THREADS.append(thread_wait_for_task_token) + thread_wait_for_task_token.start() + # Note: the stopping of this worker thread is handled indirectly through the state of env. + # an exception in this thread will invalidate env, and therefore the worker thread. + # hence why here there are no explicit stopping logic for thread_wait_for_task_token. + + sync_result: Optional[Any] = None + while env.is_running(): + sync_result = sync_resolver() + if callback_output or sync_result: + break + else: + time.sleep(_DELAY_SECONDS_SYNC_CONDITION_CHECK) + + return callback_output or sync_result + + def _eval_integration_pattern( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + task_output = env.stack.pop() + + # Initialise the waitForTaskToken Callback endpoint for this task if supported. + callback_endpoint: Optional[CallbackEndpoint] = None + if ResourceCondition.WaitForTaskToken in self._supported_integration_patterns: + callback_id = env.states.context_object.context_object_data["Task"]["Token"] + callback_endpoint = env.callback_pool_manager.get(callback_id) + + # Setup resources for timeout control. + self.timeout.eval(env=env) + timeout_seconds = env.stack.pop() + + # Setup resources for heartbeat workloads if necessary. + heartbeat_endpoint: Optional[HeartbeatEndpoint] = None + if self.heartbeat: + self.heartbeat.eval(env=env) + heartbeat_seconds = env.stack.pop() + heartbeat_endpoint: HeartbeatEndpoint = callback_endpoint.setup_heartbeat_endpoint( + heartbeat_seconds=heartbeat_seconds + ) + + # Collect the output of the integration pattern. + outcome: CallbackOutcome | Any + try: + if self.resource.condition == ResourceCondition.WaitForTaskToken: + outcome = self._eval_wait_for_task_token( + env=env, + timeout_seconds=timeout_seconds, + callback_endpoint=callback_endpoint, + heartbeat_endpoint=heartbeat_endpoint, + ) + else: + # Sync operations require the task output as input. + env.stack.append(task_output) + if self.resource.condition == ResourceCondition.Sync: + sync_resolver = self._build_sync_resolver( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) + else: + # The condition checks about the resource's condition is exhaustive leaving + # only Sync2 ResourceCondition types in this block. + sync_resolver = self._build_sync2_resolver( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) + + outcome = self._eval_sync( + env=env, + timeout_seconds=timeout_seconds, + callback_endpoint=callback_endpoint, + heartbeat_endpoint=heartbeat_endpoint, + sync_resolver=sync_resolver, + ) + except Exception as integration_exception: + outcome = integration_exception + finally: + # Now that the outcome is collected or the exception is about to be passed upstream, and the process has + # finished, ensure all waiting # threads on this endpoint (or task) will stop. This is in an effort to + # release resources sooner than when these would eventually synchronise with the updated environment + # state of this task. + callback_endpoint.interrupt_all() + + # Handle Callback outcome types. + if isinstance(outcome, CallbackOutcomeTimedOut): + raise CallbackTimeoutError() + elif isinstance(outcome, HeartbeatTimedOut): + raise HeartbeatTimeoutError() + elif isinstance(outcome, CallbackOutcomeFailure): + raise CallbackOutcomeFailureError(callback_outcome_failure=outcome) + elif isinstance(outcome, CallbackOutcomeSuccess): + outcome_output = json.loads(outcome.output) + env.stack.append(outcome_output) + # Pass evaluation exception upstream for error handling. + elif isinstance(outcome, Exception): + raise outcome + # Otherwise the outcome is the result of the integration pattern (sync, sync2) + # therefore push it onto the evaluation stack for the next operations. + else: + env.stack.append(outcome) + + def _wait_for_task_token_timeout( # noqa + self, + timeout_seconds: int, + callback_endpoint: CallbackEndpoint, + ) -> Optional[CallbackOutcome]: + # Awaits a callback notification and returns the outcome received. + # If the operation times out or is interrupted it returns None. + + # Although the timeout is handled already be the superclass (ExecutionState), + # the timeout value is specified here too, to allow this child process to terminate earlier even if + # discarded by the main process. + # Note: although this is the same timeout value, this can only decay strictly after the first timeout + # started as it is invoked strictly later. + outcome: Optional[CallbackOutcome] = callback_endpoint.wait(timeout=timeout_seconds) + return outcome + + def _wait_for_task_token_heartbeat( # noqa + self, + env: Environment, + callback_endpoint: CallbackEndpoint, + heartbeat_endpoint: HeartbeatEndpoint, + ) -> Optional[CallbackOutcome]: + outcome = None + while ( + env.is_running() + and outcome + is None # Note: the lifetime of this environment is this task's not the entire state machine program. + ): # Until subprocess hasn't timed out or result wasn't received. + received = heartbeat_endpoint.clear_and_wait() + if not received and env.is_running(): # Heartbeat timed out. + outcome = HeartbeatTimedOut() + else: + outcome = callback_endpoint.get_outcome() + return outcome + + def _assert_integration_pattern_is_supported(self): + integration_pattern = self.resource.condition + if integration_pattern not in self._supported_integration_patterns: + raise RuntimeError( + f"Unsupported {integration_pattern} callback procedure in resource {self.resource.resource_arn}" + ) + + def _is_integration_pattern(self): + return self.resource.condition is not None + + def _get_callback_outcome_failure_event( + self, env: Environment, ex: CallbackOutcomeFailureError + ) -> FailureEvent: + callback_outcome_failure: CallbackOutcomeFailure = ex.callback_outcome_failure + error: Optional[str] = callback_outcome_failure.error + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name=error), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resourceType=self._get_sfn_resource_type(), + resource=self._get_sfn_resource(), + error=error, + cause=callback_outcome_failure.cause, + ) + ), + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, CallbackOutcomeFailureError): + return self._get_callback_outcome_failure_event(env=env, ex=ex) + return super()._from_error(env=env, ex=ex) + + def _eval_body(self, env: Environment) -> None: + # Generate a TaskToken uuid within the context object, if this task resources has a callback condition. + # https://docs.aws.amazon.com/step-functions/latest/dg/connect-to-resource.html#connect-wait-token + if ( + self._is_integration_pattern() + and ResourceCondition.WaitForTaskToken in self._supported_integration_patterns + ): + self._assert_integration_pattern_is_supported() + task_token = env.states.context_object.update_task_token() + env.callback_pool_manager.add(task_token) + + super()._eval_body(env=env) + + # Ensure the TaskToken field is reset, as this is only available during waitForTaskToken task evaluations. + env.states.context_object.context_object_data.pop("Task", None) + + def _after_eval_execution( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + # TODO: In Mock mode, when simulating a failure, the mock response is handled by + # super()._eval_execution, so this block is never executed. Consequently, the + # "TaskSubmitted" event isn’t recorded in the event history. + if self._is_integration_pattern(): + output = env.stack[-1] + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.TaskSubmitted, + event_details=EventDetails( + taskSubmittedEventDetails=TaskSubmittedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + output=to_json_str(output), + outputDetails=HistoryEventExecutionDataDetails(truncated=False), + ) + ), + ) + if not env.is_mocked_mode(): + self._eval_integration_pattern( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) + super()._after_eval_execution( + env=env, + resource_runtime_part=resource_runtime_part, + normalised_parameters=normalised_parameters, + state_credentials=state_credentials, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_dynamodb.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_dynamodb.py new file mode 100644 index 0000000000000..9fb484abc6362 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_dynamodb.py @@ -0,0 +1,147 @@ +from typing import Final, Optional + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service import ( + StateTaskService, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for + +_ERROR_NAME_AWS: Final[str] = "DynamoDB.AmazonDynamoDBException" + +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "getitem": { + "Key", + "TableName", + "AttributesToGet", + "ConsistentRead", + "ExpressionAttributeNames", + "ProjectionExpression", + "ReturnConsumedCapacity", + }, + "putitem": { + "Item", + "TableName", + "ConditionalOperator", + "ConditionExpression", + "Expected", + "ExpressionAttributeNames", + "ExpressionAttributeValues", + "ReturnConsumedCapacity", + "ReturnItemCollectionMetrics", + "ReturnValues", + }, + "deleteitem": { + "Key", + "TableName", + "ConditionalOperator", + "ConditionExpression", + "Expected", + "ExpressionAttributeNames", + "ExpressionAttributeValues", + "ReturnConsumedCapacity", + "ReturnItemCollectionMetrics", + "ReturnValues", + }, + "updateitem": { + "Key", + "TableName", + "AttributeUpdates", + "ConditionalOperator", + "ConditionExpression", + "Expected", + "ExpressionAttributeNames", + "ExpressionAttributeValues", + "ReturnConsumedCapacity", + "ReturnItemCollectionMetrics", + "ReturnValues", + "UpdateExpression", + }, +} + + +class StateTaskServiceDynamoDB(StateTaskService): + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + @staticmethod + def _error_cause_from_client_error(client_error: ClientError) -> tuple[str, str]: + error_code: str = client_error.response["Error"]["Code"] + error_msg: str = client_error.response["Error"]["Message"] + response_details = "; ".join( + [ + "Service: AmazonDynamoDBv2", + f"Status Code: {client_error.response['ResponseMetadata']['HTTPStatusCode']}", + f"Error Code: {error_code}", + f"Request ID: {client_error.response['ResponseMetadata']['RequestId']}", + "Proxy: null", + ] + ) + error = f"DynamoDB.{error_code}" + cause = f"{error_msg} ({response_details})" + return error, cause + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + error, cause = self._error_cause_from_client_error(ex) + error_name = CustomErrorName(error) + return FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error, + cause=cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + else: + return FailureEvent( + env=env, + error_name=CustomErrorName(_ERROR_NAME_AWS), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=_ERROR_NAME_AWS, + cause=str(ex), # TODO: update to report expected cause. + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + dynamodb_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + response = getattr(dynamodb_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_ecs.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_ecs.py new file mode 100644 index 0000000000000..3b3473aaa848c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_ecs.py @@ -0,0 +1,127 @@ +from typing import Any, Callable, Final, Optional + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, + ResourceCondition.Sync, +} + +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "runtask": { + "Cluster", + "Group", + "LaunchType", + "NetworkConfiguration", + "Overrides", + "PlacementConstraints", + "PlacementStrategy", + "PlatformVersion", + "PropagateTags", + "TaskDefinition", + "EnableExecuteCommand", + } +} + +_STARTED_BY_PARAMETER_RAW_KEY: Final[str] = "StartedBy" +_STARTED_BY_PARAMETER_VALUE: Final[str] = "AWS Step Functions" + + +class StateTaskServiceEcs(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _before_eval_execution( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + raw_parameters: dict, + state_credentials: StateCredentials, + ) -> None: + if self.resource.condition == ResourceCondition.Sync: + raw_parameters[_STARTED_BY_PARAMETER_RAW_KEY] = _STARTED_BY_PARAMETER_VALUE + super()._before_eval_execution( + env=env, + resource_runtime_part=resource_runtime_part, + raw_parameters=raw_parameters, + state_credentials=state_credentials, + ) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + ecs_client = boto_client_for( + region=resource_runtime_part.region, + service=service_name, + state_credentials=state_credentials, + ) + response = getattr(ecs_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + + # AWS outputs the description of the task, not the output of run_task. + match self._get_boto_service_action(): + case "run_task": + self._normalise_response(response=response, service_action_name="run_task") + cluster_arn: str = response["Tasks"][0]["ClusterArn"] + task_arn: str = response["Tasks"][0]["TaskArn"] + describe_tasks_output = ecs_client.describe_tasks( + cluster=cluster_arn, tasks=[task_arn] + ) + describe_tasks_output.pop("ResponseMetadata", None) + self._normalise_response( + response=describe_tasks_output, service_action_name="describe_tasks" + ) + env.stack.append(describe_tasks_output) + return + + env.stack.append(response) + + def _build_sync_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + ecs_client = boto_client_for( + service="ecs", + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + submission_output: dict = env.stack.pop() + task_arn: str = submission_output["Tasks"][0]["TaskArn"] + cluster_arn: str = submission_output["Tasks"][0]["ClusterArn"] + + def _sync_resolver() -> Optional[dict]: + describe_tasks_output = ecs_client.describe_tasks(cluster=cluster_arn, tasks=[task_arn]) + last_status: str = describe_tasks_output["tasks"][0]["lastStatus"] + + if last_status == "STOPPED": + self._normalise_response( + response=describe_tasks_output, service_action_name="describe_tasks" + ) + return describe_tasks_output["Tasks"][0] # noqa + + return None + + return _sync_resolver diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_events.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_events.py new file mode 100644 index 0000000000000..19640f84ab02f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_events.py @@ -0,0 +1,112 @@ +import json +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.error_name import ErrorName +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} +_FAILED_ENTRY_ERROR_NAME: Final[ErrorName] = CustomErrorName(error_name="EventBridge.FailedEntry") + +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = {"putevents": {"Entries"}} + + +class SfnFailedEntryCountException(RuntimeError): + cause: Final[Optional[dict]] + + def __init__(self, cause: Optional[dict]): + super().__init__(json.dumps(cause)) + self.cause = cause + + +class StateTaskServiceEvents(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, SfnFailedEntryCountException): + return FailureEvent( + env=env, + error_name=_FAILED_ENTRY_ERROR_NAME, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=_FAILED_ENTRY_ERROR_NAME.error_name, + cause=ex.cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + @staticmethod + def _normalised_request_parameters(env: Environment, parameters: dict): + entries = parameters.get("Entries", []) + for entry in entries: + # Optimised integration for events automatically stringifies "Entries.Detail" if this is not a string, + # and only if these are json objects. + if "Detail" in entry: + detail = entry.get("Detail") + if isinstance(detail, dict): + entry["Detail"] = to_json_str(detail) # Pass runtime error upstream. + + # The execution ARN and the state machine ARN are automatically appended to the Resources + # field of each PutEventsRequestEntry. + resources = entry.get("Resources", []) + resources.append(env.states.context_object.context_object_data["StateMachine"]["Id"]) + resources.append(env.states.context_object.context_object_data["Execution"]["Id"]) + entry["Resources"] = resources + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + self._normalised_request_parameters(env=env, parameters=normalised_parameters) + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + events_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + response = getattr(events_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + + # If the response from PutEvents contains a non-zero FailedEntryCount then the + # Task state fails with the error EventBridge.FailedEntry. + if self.resource.api_action == "putEvents": + failed_entry_count = response.get("FailedEntryCount", 0) + if failed_entry_count > 0: + # TODO: pipe events' cause in the exception object. At them moment + # LS events does not update this field. + raise SfnFailedEntryCountException(cause=response) + + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_factory.py new file mode 100644 index 0000000000000..bd89c6ccc61ea --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_factory.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import Final + +from antlr4 import RecognitionException + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service import ( + StateTaskService, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_api_gateway import ( + StateTaskServiceApiGateway, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_aws_sdk import ( + StateTaskServiceAwsSdk, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_batch import ( + StateTaskServiceBatch, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_dynamodb import ( + StateTaskServiceDynamoDB, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_ecs import ( + StateTaskServiceEcs, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_events import ( + StateTaskServiceEvents, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_glue import ( + StateTaskServiceGlue, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_lambda import ( + StateTaskServiceLambda, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sfn import ( + StateTaskServiceSfn, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sns import ( + StateTaskServiceSns, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sqs import ( + StateTaskServiceSqs, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_unsupported import ( + StateTaskServiceUnsupported, +) + +_UNSUPPORTED_SERVICE_NAMES: Final[set[str]] = { + "athena", + "bedrock", + "codebuild", + "eks", + "elasticmapreduce", + "emr-containers", + "emr-serverless", + "databrew", + "mediaconvert", + "sagemaker", +} + + +# TODO: improve on factory constructor (don't use SubtypeManager: cannot reuse state task instances). +def state_task_service_for(service_name: str) -> StateTaskService: + match service_name: + case "aws-sdk": + return StateTaskServiceAwsSdk() + case "lambda": + return StateTaskServiceLambda() + case "sqs": + return StateTaskServiceSqs() + case "states": + return StateTaskServiceSfn() + case "dynamodb": + return StateTaskServiceDynamoDB() + case "apigateway": + return StateTaskServiceApiGateway() + case "sns": + return StateTaskServiceSns() + case "events": + return StateTaskServiceEvents() + case "ecs": + return StateTaskServiceEcs() + case "glue": + return StateTaskServiceGlue() + case "batch": + return StateTaskServiceBatch() + case _ if service_name in _UNSUPPORTED_SERVICE_NAMES: + return StateTaskServiceUnsupported() + case unknown: + raise RecognitionException(f"Unknown service '{unknown}'") diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_glue.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_glue.py new file mode 100644 index 0000000000000..f66a00e26d4ef --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_glue.py @@ -0,0 +1,240 @@ +from typing import Any, Callable, Final, Optional + +import boto3 +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.Sync, +} + +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "startjobrun": { + "JobName", + "JobRunQueuingEnabled", + "JobRunId", + "Arguments", + "AllocatedCapacity", + "Timeout", + "MaxCapacity", + "SecurityConfiguration", + "NotificationProperty", + "WorkerType", + "NumberOfWorkers", + "ExecutionClass", + } +} + +# Set of JobRunState value that indicate the JobRun had terminated in an abnormal state. +_JOB_RUN_STATE_ABNORMAL_TERMINAL_VALUE: Final[set[str]] = {"FAILED", "TIMEOUT", "ERROR"} + +# Set of JobRunState values that indicate the JobRun has terminated. +_JOB_RUN_STATE_TERMINAL_VALUES: Final[set[str]] = { + "STOPPED", + "SUCCEEDED", + *_JOB_RUN_STATE_ABNORMAL_TERMINAL_VALUE, +} + +# The handler function name prefix for StateTaskServiceGlue objects. +_HANDLER_REFLECTION_PREFIX: Final[str] = "_handle_" +# The sync handler function name prefix for StateTaskServiceGlue objects. +_SYNC_HANDLER_REFLECTION_PREFIX: Final[str] = "_sync_to_" +# The type of (sync)handler function for StateTaskServiceGlue objects. +_API_ACTION_HANDLER_TYPE = Callable[ + [Environment, ResourceRuntimePart, dict, StateCredentials], None +] +# The type of (sync)handler builder function for StateTaskServiceGlue objects. +_API_ACTION_HANDLER_BUILDER_TYPE = Callable[ + [Environment, ResourceRuntimePart, dict, StateCredentials], Callable[[], Optional[Any]] +] + + +class StateTaskServiceGlue(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _get_api_action_handler(self) -> _API_ACTION_HANDLER_TYPE: + api_action = self._get_boto_service_action() + handler_name = _HANDLER_REFLECTION_PREFIX + api_action + resolver_handler = getattr(self, handler_name) + if resolver_handler is None: + raise ValueError(f"Unknown or unsupported glue action '{api_action}'.") + return resolver_handler + + def _get_api_action_sync_builder_handler(self) -> _API_ACTION_HANDLER_BUILDER_TYPE: + api_action = self._get_boto_service_action() + handler_name = _SYNC_HANDLER_REFLECTION_PREFIX + api_action + resolver_handler = getattr(self, handler_name) + if resolver_handler is None: + raise ValueError(f"Unknown or unsupported glue action '{api_action}'.") + return resolver_handler + + @staticmethod + def _get_glue_client( + resource_runtime_part: ResourceRuntimePart, state_credentials: StateCredentials + ) -> boto3.client: + return boto_client_for( + service="glue", + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + error_code = ex.response["Error"]["Code"] + error_name: str = f"Glue.{error_code}" + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error_name, + cause=ex.response["Error"]["Message"], + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + def _wait_for_task_token( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + ) -> None: + raise RuntimeError( + f"Unsupported .waitForTaskToken callback procedure in resource {self.resource.resource_arn}" + ) + + def _handle_start_job_run( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + computed_credentials: StateCredentials, + ): + glue_client = self._get_glue_client( + resource_runtime_part=resource_runtime_part, state_credentials=computed_credentials + ) + response = glue_client.start_job_run(**normalised_parameters) + response.pop("ResponseMetadata", None) + # AWS StepFunctions extracts the JobName from the request and inserts it into the response, which + # normally only contains JobRunID; as this is a required field for start_job_run, the access at + # this depth is safe. + response["JobName"] = normalised_parameters.get("JobName") + env.stack.append(response) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + # Source the action handler and delegate the evaluation. + api_action_handler = self._get_api_action_handler() + api_action_handler(env, resource_runtime_part, normalised_parameters, state_credentials) + + def _sync_to_start_job_run( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + # Poll the job run state from glue, using GetJobRun until the job has terminated. Hence, append the output + # of GetJobRun to the state. + + # Access the JobName and the JobRunId from the StartJobRun output call that must + # have occurred before this point. + start_job_run_output: dict = env.stack.pop() + job_name: str = start_job_run_output["JobName"] + job_run_id: str = start_job_run_output["JobRunId"] + + glue_client = self._get_glue_client( + resource_runtime_part=resource_runtime_part, state_credentials=state_credentials + ) + + def _sync_resolver() -> Optional[Any]: + # Sample GetJobRun until completion. + get_job_run_response: dict = glue_client.get_job_run(JobName=job_name, RunId=job_run_id) + job_run: dict = get_job_run_response["JobRun"] + job_run_state: str = job_run["JobRunState"] + + # If the job run has not terminated, continue and check later. + is_terminated: bool = job_run_state in _JOB_RUN_STATE_TERMINAL_VALUES + if not is_terminated: + return None + + # AWS StepFunctions appears to append attach the JobName to the output both in case of error or success. + job_run["JobName"] = job_name + + # If the job run terminated in a normal state, return the result. + is_abnormal_termination = job_run_state in _JOB_RUN_STATE_ABNORMAL_TERMINAL_VALUE + if not is_abnormal_termination: + return job_run + + # If the job run has terminated with an abnormal state, raise the error in stepfunctions. + raise FailureEventException( + FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTaskFailed), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + error=StatesErrorNameType.StatesTaskFailed.to_name(), + cause=to_json_str(job_run), + ) + ), + ) + ) + + return _sync_resolver + + def _build_sync_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + sync_resolver_builder = self._get_api_action_sync_builder_handler() + sync_resolver = sync_resolver_builder( + env, resource_runtime_part, normalised_parameters, state_credentials + ) + return sync_resolver diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py new file mode 100644 index 0000000000000..8feebfa1cdc29 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_lambda.py @@ -0,0 +1,132 @@ +import json +import logging +from typing import Final, Optional + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task import ( + lambda_eval_utils, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + +LOG = logging.getLogger(__name__) + + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "invoke": { + "ClientContext", + "FunctionName", + "InvocationType", + "Qualifier", + "Payload", + # Outside the specification, but supported in practice: + "LogType", + } +} + + +class StateTaskServiceLambda(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + @staticmethod + def _error_cause_from_client_error(client_error: ClientError) -> tuple[str, str]: + error_code: str = client_error.response["Error"]["Code"] + error_msg: str = client_error.response["Error"]["Message"] + response_details = "; ".join( + [ + "Service: AWSLambda", + f"Status Code: {client_error.response['ResponseMetadata']['HTTPStatusCode']}", + f"Error Code: {error_code}", + f"Request ID: {client_error.response['ResponseMetadata']['RequestId']}", + "Proxy: null", + ] + ) + error = f"Lambda.{error_code}" + cause = f"{error_msg} ({response_details})" + return error, cause + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, lambda_eval_utils.LambdaFunctionErrorException): + cause = ex.payload + try: + cause_object = json.loads(cause) + error = cause_object["errorType"] + except Exception as ex: + LOG.warning( + "Could not retrieve 'errorType' field from LambdaFunctionErrorException object: %s", + ex, + ) + error = "Exception" + error_name = CustomErrorName(error) + elif isinstance(ex, ClientError): + error, cause = self._error_cause_from_client_error(ex) + error_name = CustomErrorName(error) + else: + return super()._from_error(env=env, ex=ex) + return FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error, + cause=cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + + def _normalise_parameters( + self, + parameters: dict, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + # Run Payload value casting before normalisation. + if "Payload" in parameters: + parameters["Payload"] = lambda_eval_utils.to_payload_type(parameters["Payload"]) + super()._normalise_parameters( + parameters=parameters, + boto_service_name=boto_service_name, + service_action_name=service_action_name, + ) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + lambda_eval_utils.execute_lambda_function_integration( + env=env, + parameters=normalised_parameters, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sfn.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sfn.py new file mode 100644 index 0000000000000..33bafc723a00e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sfn.py @@ -0,0 +1,239 @@ +import json +from typing import Any, Callable, Final, Optional + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import ( + DescribeExecutionOutput, + ExecutionStatus, + HistoryEventType, + TaskFailedEventDetails, +) +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.collections import select_from_typed_dict + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, + ResourceCondition.Sync, + ResourceCondition.Sync2, +} +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "startexecution": {"Input", "Name", "StateMachineArn"} +} + + +class StateTaskServiceSfn(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + error_code = ex.response["Error"]["Code"] + error_name: str = f"StepFunctions.{error_code}Exception" + error_cause_details = [ + "Service: AWSStepFunctions", + f"Status Code: {ex.response['ResponseMetadata']['HTTPStatusCode']}", + f"Error Code: {error_code}", + f"Request ID: {ex.response['ResponseMetadata']['RequestId']}", + "Proxy: null", # TODO: investigate this proxy value. + ] + if "HostId" in ex.response["ResponseMetadata"]: + error_cause_details.append( + f"Extended Request ID: {ex.response['ResponseMetadata']['HostId']}" + ) + error_cause: str = ( + f"{ex.response['Error']['Message']} ({'; '.join(error_cause_details)})" + ) + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error_name, + cause=error_cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + def _normalise_parameters( + self, + parameters: dict, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + if service_action_name is None: + if self._get_boto_service_action() == "start_execution": + optional_input = parameters.get("Input") + if not isinstance(optional_input, str): + # AWS Sfn's documentation states: + # If you don't include any JSON input data, you still must include the two braces. + if optional_input is None: + optional_input = {} + parameters["Input"] = to_json_str(optional_input, separators=(",", ":")) + super()._normalise_parameters( + parameters=parameters, + boto_service_name=boto_service_name, + service_action_name=service_action_name, + ) + + def _build_sync_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + sfn_client = boto_client_for( + service="stepfunctions", + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + submission_output: dict = env.stack.pop() + execution_arn: str = submission_output["ExecutionArn"] + + def _sync_resolver() -> Optional[Any]: + describe_execution_output = sfn_client.describe_execution(executionArn=execution_arn) + describe_execution_output: DescribeExecutionOutput = select_from_typed_dict( + DescribeExecutionOutput, describe_execution_output + ) + execution_status: ExecutionStatus = describe_execution_output["status"] + + if execution_status == ExecutionStatus.RUNNING: + return None + + self._normalise_response( + response=describe_execution_output, service_action_name="describe_execution" + ) + if execution_status == ExecutionStatus.SUCCEEDED: + return describe_execution_output + else: + raise FailureEventException( + FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTaskFailed), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + error=StatesErrorNameType.StatesTaskFailed.to_name(), + cause=to_json_str(describe_execution_output), + ) + ), + ) + ) + + return _sync_resolver + + @staticmethod + def _sync2_api_output_of(typ: type, value: json) -> None: + def _replace_with_json_if_str(key: str) -> None: + inner_value = value.get(key) + if isinstance(inner_value, str): + value[key] = json.loads(inner_value) + + match typ: + case DescribeExecutionOutput: # noqa + _replace_with_json_if_str("input") + _replace_with_json_if_str("output") + + def _build_sync2_resolver( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ) -> Callable[[], Optional[Any]]: + sfn_client = boto_client_for( + region=resource_runtime_part.region, + service="stepfunctions", + state_credentials=state_credentials, + ) + submission_output: dict = env.stack.pop() + execution_arn: str = submission_output["ExecutionArn"] + + def _sync2_resolver() -> Optional[Any]: + describe_execution_output = sfn_client.describe_execution(executionArn=execution_arn) + describe_execution_output: DescribeExecutionOutput = select_from_typed_dict( + DescribeExecutionOutput, describe_execution_output + ) + execution_status: ExecutionStatus = describe_execution_output["status"] + + if execution_status == ExecutionStatus.RUNNING: + return None + + self._sync2_api_output_of(typ=DescribeExecutionOutput, value=describe_execution_output) + self._normalise_response( + response=describe_execution_output, service_action_name="describe_execution" + ) + if execution_status == ExecutionStatus.SUCCEEDED: + return describe_execution_output + else: + raise FailureEventException( + FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTaskFailed), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + error=StatesErrorNameType.StatesTaskFailed.to_name(), + cause=to_json_str(describe_execution_output), + ) + ), + ) + ) + + return _sync2_resolver + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + sfn_client = boto_client_for( + region=resource_runtime_part.region, + service=service_name, + state_credentials=state_credentials, + ) + response = getattr(sfn_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sns.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sns.py new file mode 100644 index 0000000000000..45c6693d0dafd --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sns.py @@ -0,0 +1,111 @@ +from typing import Final, Optional + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "publish": { + "Message", + "MessageAttributes", + "MessageStructure", + "MessageDeduplicationId", + "MessageGroupId", + "PhoneNumber", + "Subject", + "TargetArn", + "TopicArn", + } +} + + +class StateTaskServiceSns(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + error_code = ex.response["Error"]["Code"] + + exception_name = error_code + if not exception_name.endswith("Exception"): + exception_name += "Exception" + error_name = f"SNS.{exception_name}" + + error_message = ex.response["Error"]["Message"] + status_code = ex.response["ResponseMetadata"]["HTTPStatusCode"] + request_id = ex.response["ResponseMetadata"]["RequestId"] + error_cause = ( + f"{error_message} " + f"(Service: AmazonSNS; " + f"Status Code: {status_code}; " + f"Error Code: {error_code}; " + f"Request ID: {request_id}; " + f"Proxy: null)" + ) + + return FailureEvent( + env=env, + error_name=CustomErrorName(error_name=error_name), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=error_name, + cause=error_cause, + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + sns_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + + # Optimised integration automatically stringifies + if "Message" in normalised_parameters and not isinstance( + message := normalised_parameters["Message"], str + ): + normalised_parameters["Message"] = to_json_str(message) + + response = getattr(sns_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sqs.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sqs.py new file mode 100644 index 0000000000000..836cb8ad1b95b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_sqs.py @@ -0,0 +1,113 @@ +from typing import Any, Final, Optional + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} +_ERROR_NAME_CLIENT: Final[str] = "SQS.SdkClientException" +_ERROR_NAME_AWS: Final[str] = "SQS.AmazonSQSException" +_SUPPORTED_API_PARAM_BINDINGS: Final[dict[str, set[str]]] = { + "sendmessage": { + "DelaySeconds", + "MessageAttributes", + "MessageBody", + "MessageDeduplicationId", + "MessageGroupId", + "QueueUrl", + } +} + + +class StateTaskServiceSqs(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _get_supported_parameters(self) -> Optional[set[str]]: + return _SUPPORTED_API_PARAM_BINDINGS.get(self.resource.api_action.lower()) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, ClientError): + return FailureEvent( + env=env, + error_name=CustomErrorName(_ERROR_NAME_CLIENT), + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails( + taskFailedEventDetails=TaskFailedEventDetails( + error=_ERROR_NAME_CLIENT, + cause=ex.response["Error"][ + "Message" + ], # TODO: update to report expected cause. + resource=self._get_sfn_resource(), + resourceType=self._get_sfn_resource_type(), + ) + ), + ) + return super()._from_error(env=env, ex=ex) + + def _normalise_response( + self, + response: Any, + boto_service_name: Optional[str] = None, + service_action_name: Optional[str] = None, + ) -> None: + super()._normalise_response( + response=response, + boto_service_name=boto_service_name, + service_action_name=service_action_name, + ) + # Normalise output value keys to SFN standard for Md5OfMessageBody and Md5OfMessageAttributes + if response and "Md5OfMessageBody" in response: + md5_message_body = response.pop("Md5OfMessageBody") + response["MD5OfMessageBody"] = md5_message_body + + if response and "Md5OfMessageAttributes" in response: + md5_message_attributes = response.pop("Md5OfMessageAttributes") + response["MD5OfMessageAttributes"] = md5_message_attributes + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + # TODO: Stepfunctions automatically dumps to json MessageBody's definitions. + # Are these other similar scenarios? + if "MessageBody" in normalised_parameters: + message_body = normalised_parameters["MessageBody"] + if message_body is not None and not isinstance(message_body, str): + normalised_parameters["MessageBody"] = to_json_str(message_body) + + service_name = self._get_boto_service_name() + api_action = self._get_boto_service_action() + sqs_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + response = getattr(sqs_client, api_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_unsupported.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_unsupported.py new file mode 100644 index 0000000000000..0719c6d2e73a3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/service/state_task_service_unsupported.py @@ -0,0 +1,63 @@ +import logging +from typing import Final + +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ResourceCondition, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_callback import ( + StateTaskServiceCallback, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.utils.boto_client import boto_client_for + +LOG = logging.getLogger(__name__) + +_SUPPORTED_INTEGRATION_PATTERNS: Final[set[ResourceCondition]] = { + ResourceCondition.WaitForTaskToken, +} + + +class StateTaskServiceUnsupported(StateTaskServiceCallback): + def __init__(self): + super().__init__(supported_integration_patterns=_SUPPORTED_INTEGRATION_PATTERNS) + + def _validate_service_integration_is_supported(self): + # Attempts to execute any derivation; logging this incident on creation. + self._log_unsupported_warning() + + def _log_unsupported_warning(self): + # Logs that the optimised service integration is not supported, + # however the request is being forwarded to the service. + service_name = self._get_boto_service_name() + resource_arn = self.resource.resource_arn + LOG.warning( + "Unsupported Optimised service integration for service_name '%s' in resource: '%s'. " + "Attempting to forward request to service.", + service_name, + resource_arn, + ) + + def _eval_service_task( + self, + env: Environment, + resource_runtime_part: ResourceRuntimePart, + normalised_parameters: dict, + state_credentials: StateCredentials, + ): + # Logs that the evaluation of this optimised service integration is not supported + # and relays the call to the target service with the computed parameters. + self._log_unsupported_warning() + service_name = self._get_boto_service_name() + boto_action = self._get_boto_service_action() + boto_client = boto_client_for( + service=service_name, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + response = getattr(boto_client, boto_action)(**normalised_parameters) + response.pop("ResponseMetadata", None) + env.stack.append(response) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task.py new file mode 100644 index 0000000000000..79c5f496d7bf8 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import abc +from typing import Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskTimedOutEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.state.state_execution.execute_state import ( + ExecutionState, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + Credentials, + StateCredentials, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, +) +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class StateTask(ExecutionState, abc.ABC): + resource: Resource + parargs: Optional[Parargs] + credentials: Optional[Credentials] + + def __init__(self): + super(StateTask, self).__init__( + state_entered_event_type=HistoryEventType.TaskStateEntered, + state_exited_event_type=HistoryEventType.TaskStateExited, + ) + + def from_state_props(self, state_props: StateProps) -> None: + super(StateTask, self).from_state_props(state_props) + self.resource = state_props.get(Resource) + self.parargs = state_props.get(Parargs) + self.credentials = state_props.get(Credentials) + + def _get_supported_parameters(self) -> Optional[set[str]]: # noqa + return None + + def _eval_parameters(self, env: Environment) -> dict: + # Eval raw parameters. + parameters = dict() + if self.parargs is not None: + self.parargs.eval(env=env) + parameters = env.stack.pop() + + # Handle supported parameters. + supported_parameters = self._get_supported_parameters() + if supported_parameters: + unsupported_parameters: list[str] = [ + parameter + for parameter in parameters.keys() + if parameter not in supported_parameters + ] + for unsupported_parameter in unsupported_parameters: + parameters.pop(unsupported_parameter, None) + + return parameters + + def _eval_state_credentials(self, env: Environment) -> StateCredentials: + if not self.credentials: + state_credentials = StateCredentials(role_arn=env.aws_execution_details.role_arn) + else: + self.credentials.eval(env=env) + state_credentials = env.stack.pop() + return state_credentials + + def _get_timed_out_failure_event(self, env: Environment) -> FailureEvent: + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTimeout), + event_type=HistoryEventType.TaskTimedOut, + event_details=EventDetails( + taskTimedOutEventDetails=TaskTimedOutEventDetails( + error=StatesErrorNameType.StatesTimeout.to_name(), + ) + ), + ) + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, TimeoutError): + return self._get_timed_out_failure_event(env) + return super()._from_error(env=env, ex=ex) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_activitiy.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_activitiy.py new file mode 100644 index 0000000000000..bfff9c4855e70 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_activitiy.py @@ -0,0 +1,207 @@ +import json + +from botocore.exceptions import ClientError + +from localstack.aws.api.stepfunctions import ( + ActivityDoesNotExist, + ActivityFailedEventDetails, + ActivityScheduledEventDetails, + ActivityStartedEventDetails, + ActivitySucceededEventDetails, + ActivityTimedOutEventDetails, + ExecutionFailedEventDetails, + HistoryEventExecutionDataDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import ( + EvalTimeoutError, + TimeoutSeconds, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ActivityResource, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task import ( + StateTask, +) +from localstack.services.stepfunctions.asl.eval.callback.callback import ( + ActivityTaskStartOutcome, + CallbackOutcomeFailure, + CallbackOutcomeFailureError, + CallbackOutcomeSuccess, + CallbackTimeoutError, + HeartbeatTimeoutError, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + + +class StateTaskActivity(StateTask): + resource: ActivityResource + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, TimeoutError): + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTimeout), + event_type=HistoryEventType.ActivityTimedOut, + event_details=EventDetails( + activityTimedOutEventDetails=ActivityTimedOutEventDetails( + error=StatesErrorNameType.StatesTimeout.to_name(), + ) + ), + ) + + if isinstance(ex, FailureEventException): + raise ex + + if isinstance(ex, CallbackOutcomeFailureError): + error = ex.callback_outcome_failure.error + error_name = CustomErrorName(error) + cause = ex.callback_outcome_failure.cause + else: + error_name = StatesErrorName(typ=StatesErrorNameType.StatesRuntime) + error = error_name.error_name + cause = ex.response["Error"]["Message"] if isinstance(ex, ClientError) else str(ex) + return FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.ActivityFailed, + event_details=EventDetails( + activityFailedEventDetails=ActivityFailedEventDetails(error=error, cause=cause) + ), + ) + + def _eval_parameters(self, env: Environment) -> dict: + if self.parargs: + self.parargs.eval(env=env) + activity_input = env.stack.pop() + return activity_input + + def _eval_execution(self, env: Environment) -> None: + # Compute the task input. + activity_task_input = self._eval_parameters(env=env) + if not isinstance(activity_task_input, str): + activity_task_input = to_json_str(activity_task_input) + + # Compute the timeout and heartbeat for this task. + timeout_seconds = TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS + + if not self.timeout.is_default_value(): + self.timeout.eval(env=env) + timeout_seconds = env.stack.pop() + + heartbeat_seconds = None + if self.heartbeat: + self.heartbeat.eval(env=env) + heartbeat_seconds = env.stack.pop() + + # Publish the activity task on the callback manager. + task_token = env.states.context_object.update_task_token() + try: + callback_endpoint = env.callback_pool_manager.add_activity_task( + callback_id=task_token, + activity_arn=self.resource.resource_arn, + activity_input=activity_task_input, + ) + except ActivityDoesNotExist: + failure_event = FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), + cause="The activity activity_arn does not exist.", + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + + # Log the task is scheduled. + scheduled_event_details = ActivityScheduledEventDetails( + resource=self.resource.resource_arn, + input=activity_task_input, + inputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + if timeout_seconds != TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS: + scheduled_event_details["timeoutInSeconds"] = timeout_seconds + if heartbeat_seconds is not None: + scheduled_event_details["heartbeatInSeconds"] = heartbeat_seconds + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ActivityScheduled, + event_details=EventDetails(activityScheduledEventDetails=scheduled_event_details), + ) + + # Await for the task to be sampled with timeout. + activity_task_start_endpoint = callback_endpoint.get_activity_task_start_endpoint() + task_start_outcome = activity_task_start_endpoint.wait(timeout_seconds=timeout_seconds) + # Log the task was sampled or timeout error if not. + if isinstance(task_start_outcome, ActivityTaskStartOutcome): + started_event_details = ActivityStartedEventDetails() + if task_start_outcome.worker_name is not None: + started_event_details["workerName"] = task_start_outcome.worker_name + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ActivityStarted, + event_details=EventDetails(activityStartedEventDetails=started_event_details), + ) + else: + raise EvalTimeoutError() + + # Await for the task outcome, with a heartbeat or timeout strategy. + outcome = None + if heartbeat_seconds is None: + # Total timeout is already handled upstream. Here we specify a timeout to allow this child operation to + # terminate gracefully sooner. This is why we don't compute the residual outcome. + outcome = callback_endpoint.wait(timeout=timeout_seconds) + else: + heartbeat_endpoint = callback_endpoint.setup_heartbeat_endpoint( + heartbeat_seconds=heartbeat_seconds + ) + while ( + env.is_running() and outcome is None + ): # Until subprocess hasn't timed out or result wasn't received. + received = heartbeat_endpoint.clear_and_wait() + if not received and env.is_running(): # Heartbeat timed out. + raise HeartbeatTimeoutError() + outcome = callback_endpoint.get_outcome() + + if outcome is None: + raise CallbackTimeoutError() + if isinstance(outcome, CallbackOutcomeSuccess): + outcome_output = json.loads(outcome.output) + env.stack.append(outcome_output) + elif isinstance(outcome, CallbackOutcomeFailure): + raise CallbackOutcomeFailureError(callback_outcome_failure=outcome) + else: + raise NotImplementedError(f"Unsupported CallbackOutcome type '{type(outcome)}'.") + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.ActivitySucceeded, + event_details=EventDetails( + activitySucceededEventDetails=ActivitySucceededEventDetails( + output=outcome.output, + outputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + ), + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_factory.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_factory.py new file mode 100644 index 0000000000000..4f474499d95dd --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_factory.py @@ -0,0 +1,34 @@ +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ActivityResource, + LambdaResource, + Resource, + ServiceResource, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_factory import ( + state_task_service_for, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task import ( + StateTask, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task_activitiy import ( + StateTaskActivity, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task_lambda import ( + StateTaskLambda, +) + + +def state_task_for(resource: Resource) -> StateTask: + if not resource: + raise ValueError("No Resource declaration in State Task.") + if isinstance(resource, ServiceResource): + state = state_task_service_for(service_name=resource.service_name) + elif isinstance(resource, LambdaResource): + state = StateTaskLambda() + elif isinstance(resource, ActivityResource): + state = StateTaskActivity() + else: + raise NotImplementedError( + f"Resource of type '{type(resource)}' are not supported: '{resource}'." + ) + return state diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py new file mode 100644 index 0000000000000..d33fc290b611e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_execution/state_task/state_task_lambda.py @@ -0,0 +1,192 @@ +import json +import logging +from typing import Union + +from botocore.exceptions import ClientError + +from localstack.aws.api.lambda_ import InvocationRequest, InvocationType +from localstack.aws.api.stepfunctions import ( + HistoryEventExecutionDataDetails, + HistoryEventType, + LambdaFunctionFailedEventDetails, + LambdaFunctionScheduledEventDetails, + LambdaFunctionSucceededEventDetails, + LambdaFunctionTimedOutEventDetails, + TaskCredentials, +) +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task import ( + lambda_eval_utils, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + LambdaResource, + ResourceRuntimePart, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task import ( + StateTask, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.quotas import is_within_size_quota + +LOG = logging.getLogger(__name__) + + +class StateTaskLambda(StateTask): + resource: LambdaResource + + def _from_error(self, env: Environment, ex: Exception) -> FailureEvent: + if isinstance(ex, TimeoutError): + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesTimeout), + event_type=HistoryEventType.LambdaFunctionTimedOut, + event_details=EventDetails( + lambdaFunctionTimedOutEventDetails=LambdaFunctionTimedOutEventDetails( + error=StatesErrorNameType.StatesTimeout.to_name(), + ) + ), + ) + if isinstance(ex, FailureEventException): + return ex.failure_event + + error = "Exception" + if isinstance(ex, ClientError): + error_name = CustomErrorName(error) + cause = ex.response["Error"]["Message"] + elif isinstance(ex, lambda_eval_utils.LambdaFunctionErrorException): + cause = ex.payload + try: + cause_object = json.loads(cause) + error = cause_object["errorType"] + except Exception as ex: + LOG.warning( + "Could not retrieve 'errorType' field from LambdaFunctionErrorException object: %s", + ex, + ) + error_name = CustomErrorName(error) + else: + error_name = StatesErrorName(StatesErrorNameType.StatesTaskFailed) + cause = str(ex) + + return FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.LambdaFunctionFailed, + event_details=EventDetails( + lambdaFunctionFailedEventDetails=LambdaFunctionFailedEventDetails( + error=error, + cause=cause, + ) + ), + ) + + def _verify_size_quota(self, env: Environment, value: Union[str, json]) -> None: + is_within: bool = is_within_size_quota(value=value) + if is_within: + return + error_type = StatesErrorNameType.StatesStatesDataLimitExceeded + cause = ( + f"The state/task '{self.resource.resource_arn}' returned a result " + "with a size exceeding the maximum number of bytes service limit." + ) + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=error_type), + event_type=HistoryEventType.LambdaFunctionFailed, + event_details=EventDetails( + lambdaFunctionFailedEventDetails=LambdaFunctionFailedEventDetails( + error=error_type.to_name(), + cause=cause, + ) + ), + ) + ) + + def _eval_parameters(self, env: Environment) -> dict: + if self.parargs: + self.parargs.eval(env=env) + + payload = env.stack.pop() + parameters = InvocationRequest( + FunctionName=self.resource.resource_arn, + InvocationType=InvocationType.RequestResponse, + Payload=payload, + ) + return parameters + + def _eval_execution(self, env: Environment) -> None: + parameters = self._eval_parameters(env=env) + state_credentials = self._eval_state_credentials(env=env) + payload = parameters["Payload"] + + scheduled_event_details = LambdaFunctionScheduledEventDetails( + resource=self.resource.resource_arn, + input=to_json_str(payload), + inputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + if not self.timeout.is_default_value(): + self.timeout.eval(env=env) + timeout_seconds = env.stack.pop() + scheduled_event_details["timeoutInSeconds"] = timeout_seconds + if self.credentials: + scheduled_event_details["taskCredentials"] = TaskCredentials( + roleArn=state_credentials.role_arn + ) + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.LambdaFunctionScheduled, + event_details=EventDetails(lambdaFunctionScheduledEventDetails=scheduled_event_details), + ) + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.LambdaFunctionStarted, + ) + + self.resource.eval(env=env) + resource_runtime_part: ResourceRuntimePart = env.stack.pop() + + parameters["Payload"] = lambda_eval_utils.to_payload_type(parameters["Payload"]) + lambda_eval_utils.execute_lambda_function_integration( + env=env, + parameters=parameters, + region=resource_runtime_part.region, + state_credentials=state_credentials, + ) + + # In lambda invocations, only payload is passed on as output. + output = env.stack.pop() + self._verify_size_quota(env=env, value=output) + + output_payload = output["Payload"] + env.stack.append(output_payload) + + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.LambdaFunctionSucceeded, + event_details=EventDetails( + lambdaFunctionSucceededEventDetails=LambdaFunctionSucceededEventDetails( + output=to_json_str(output_payload), + outputDetails=HistoryEventExecutionDataDetails( + truncated=False # Always False for api calls. + ), + ) + ), + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/cause_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/cause_decl.py new file mode 100644 index 0000000000000..60dda85944d7a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/cause_decl.py @@ -0,0 +1,48 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpression, + StringIntrinsicFunction, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +_STRING_RETURN_FUNCTIONS: Final[set[str]] = { + typ.name() + for typ in [ + StatesFunctionNameType.Format, + StatesFunctionNameType.JsonToString, + StatesFunctionNameType.ArrayGetItem, + StatesFunctionNameType.Base64Decode, + StatesFunctionNameType.Base64Encode, + StatesFunctionNameType.Hash, + StatesFunctionNameType.UUID, + ] +} + + +class CauseDecl(EvalComponent, abc.ABC): ... + + +class Cause(CauseDecl): + string_expression: Final[StringExpression] + + def __init__(self, string_expression: StringExpression): + self.string_expression = string_expression + + def _eval_body(self, env: Environment) -> None: + self.string_expression.eval(env=env) + + +class CausePath(Cause): + def __init__(self, string_expression: StringExpression): + super().__init__(string_expression=string_expression) + if isinstance(string_expression, StringIntrinsicFunction): + if string_expression.function.name.name not in _STRING_RETURN_FUNCTIONS: + raise ValueError( + f"Unsupported Intrinsic Function for CausePath declaration: '{string_expression.intrinsic_function_derivation}'." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/error_decl.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/error_decl.py new file mode 100644 index 0000000000000..a5a7ba89c2648 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/error_decl.py @@ -0,0 +1,48 @@ +import abc +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpression, + StringIntrinsicFunction, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_fuinction_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + +_STRING_RETURN_FUNCTIONS: Final[set[str]] = { + typ.name() + for typ in [ + StatesFunctionNameType.Format, + StatesFunctionNameType.JsonToString, + StatesFunctionNameType.ArrayGetItem, + StatesFunctionNameType.Base64Decode, + StatesFunctionNameType.Base64Encode, + StatesFunctionNameType.Hash, + StatesFunctionNameType.UUID, + ] +} + + +class ErrorDecl(EvalComponent, abc.ABC): ... + + +class Error(ErrorDecl): + string_expression: Final[StringExpression] + + def __init__(self, string_expression: StringExpression): + self.string_expression = string_expression + + def _eval_body(self, env: Environment) -> None: + self.string_expression.eval(env=env) + + +class ErrorPath(Error): + def __init__(self, string_expression: StringExpression): + super().__init__(string_expression=string_expression) + if isinstance(string_expression, StringIntrinsicFunction): + if string_expression.function.name.name not in _STRING_RETURN_FUNCTIONS: + raise ValueError( + f"Unsupported Intrinsic Function for ErrorPath declaration: '{string_expression.intrinsic_function_derivation}'." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/state_fail.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/state_fail.py new file mode 100644 index 0000000000000..608b27f2044fc --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_fail/state_fail.py @@ -0,0 +1,54 @@ +from typing import Optional + +from localstack.aws.api.stepfunctions import HistoryEventType, TaskFailedEventDetails +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.component.state.state_fail.cause_decl import CauseDecl +from localstack.services.stepfunctions.asl.component.state.state_fail.error_decl import ErrorDecl +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + + +class StateFail(CommonStateField): + def __init__(self): + super().__init__( + state_entered_event_type=HistoryEventType.FailStateEntered, + state_exited_event_type=None, + ) + self.cause: Optional[CauseDecl] = None + self.error: Optional[ErrorDecl] = None + + def from_state_props(self, state_props: StateProps) -> None: + super(StateFail, self).from_state_props(state_props) + self.cause = state_props.get(CauseDecl) + self.error = state_props.get(ErrorDecl) + + def _eval_state(self, env: Environment) -> None: + task_failed_event_details = TaskFailedEventDetails() + + error_value = None + if self.error: + self.error.eval(env=env) + error_value = env.stack.pop() + task_failed_event_details["error"] = error_value + + if self.cause: + self.cause.eval(env=env) + cause_value = env.stack.pop() + task_failed_event_details["cause"] = cause_value + + error_name = CustomErrorName(error_value) if error_value else None + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.TaskFailed, + event_details=EventDetails(taskFailedEventDetails=task_failed_event_details), + ) + raise FailureEventException(failure_event=failure_event) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/result.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/result.py new file mode 100644 index 0000000000000..11e86ed536654 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/result.py @@ -0,0 +1,14 @@ +import json + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Result(EvalComponent): + result_obj: json + + def __init__(self, result_obj: json): + self.result_obj = result_obj + + def _eval_body(self, env: Environment) -> None: + env.stack.append(self.result_obj) diff --git a/localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py similarity index 76% rename from localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py index 12f32675a2033..3a13b935b73ac 100644 --- a/localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_pass/state_pass.py @@ -1,7 +1,9 @@ from typing import Optional -from localstack.aws.api.stepfunctions import HistoryEventType -from localstack.services.stepfunctions.asl.component.common.parameters import Parameters +from localstack.aws.api.stepfunctions import ( + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters, Parargs from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath from localstack.services.stepfunctions.asl.component.state.state import CommonStateField from localstack.services.stepfunctions.asl.component.state.state_pass.result import Result @@ -34,17 +36,24 @@ def __init__(self): def from_state_props(self, state_props: StateProps) -> None: super(StatePass, self).from_state_props(state_props) self.result = state_props.get(Result) - self.result_path = state_props.get(ResultPath) - self.parameters = state_props.get(Parameters) - - if self.result is not None and self.result_path is None: - self.result_path = ResultPath(result_path_src=ResultPath.DEFAULT_PATH) + self.result_path = state_props.get(ResultPath) or ResultPath( + result_path_src=ResultPath.DEFAULT_PATH + ) + self.parameters = state_props.get(Parargs) def _eval_state(self, env: Environment) -> None: + if self.parameters: + self.parameters.eval(env=env) + if self.result: - env.stack.append(self.result.result_obj) - else: - env.stack.append(env.inp) + self.result.eval(env=env) + + if not self._is_language_query_jsonpath(): + output_value = env.stack[-1] + env.states.set_result(output_value) + + if self.assign_decl: + self.assign_decl.eval(env=env) if self.result_path: self.result_path.eval(env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_props.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_props.py new file mode 100644 index 0000000000000..8c56165ce58c3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_props.py @@ -0,0 +1,74 @@ +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.flow.end import End +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.common.timeouts.heartbeat import Heartbeat +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import Timeout +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.variable import ( + Variable, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.max_items_decl import ( + MaxItemsDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.items.items import ( + Items, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.max_concurrency import ( + MaxConcurrencyDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.tolerated_failure import ( + ToleratedFailureCountDecl, + ToleratedFailurePercentageDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, +) +from localstack.services.stepfunctions.asl.component.state.state_fail.cause_decl import CauseDecl +from localstack.services.stepfunctions.asl.component.state.state_fail.error_decl import ErrorDecl +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.wait_function import ( + WaitFunction, +) +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + +UNIQUE_SUBINSTANCES: Final[set[type]] = { + Items, + Resource, + WaitFunction, + Timeout, + Heartbeat, + MaxItemsDecl, + MaxConcurrencyDecl, + ToleratedFailureCountDecl, + ToleratedFailurePercentageDecl, + ErrorDecl, + CauseDecl, + Variable, + Parargs, + Comparison, +} + + +class StateProps(TypedProps): + name: str + + def add(self, instance: Any) -> None: + inst_type = type(instance) + + # End-Next conflicts: + if inst_type == End and Next in self._instance_by_type: + raise ValueError(f"End redefines Next, from '{self.get(Next)}' to '{instance}'.") + if inst_type == Next and End in self._instance_by_type: + raise ValueError(f"Next redefines End, from '{self.get(End)}' to '{instance}'.") + + # Subclasses + for typ in UNIQUE_SUBINSTANCES: + if issubclass(inst_type, typ): + super()._add(typ, instance) + return + + # Base and delegate to preprocessor. + super().add(instance) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_succeed/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_succeed/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack/services/stepfunctions/asl/component/state/state_succeed/state_succeed.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_succeed/state_succeed.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_succeed/state_succeed.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_succeed/state_succeed.py diff --git a/localstack/services/stepfunctions/asl/component/state/state_type.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_type.py similarity index 100% rename from localstack/services/stepfunctions/asl/component/state/state_type.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_type.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py similarity index 94% rename from localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py rename to localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py index aa13464c5f101..958377cbcc7e8 100644 --- a/localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/state_wait.py @@ -27,3 +27,5 @@ def from_state_props(self, state_props: StateProps) -> None: def _eval_state(self, env: Environment) -> None: self.wait_function.eval(env) + if self.assign_decl: + self.assign_decl.eval(env=env) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds.py new file mode 100644 index 0000000000000..d7a3fc79b8731 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds.py @@ -0,0 +1,35 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.wait_function import ( + WaitFunction, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment + + +class Seconds(WaitFunction): + # Seconds + # A time, in seconds, to state_wait before beginning the state specified in the Next + # field. You must specify time as a positive, integer value. + + def __init__(self, seconds: int): + self.seconds: Final[int] = seconds + + def _get_wait_seconds(self, env: Environment) -> int: + return self.seconds + + +class SecondsJSONata(WaitFunction): + string_jsonata: Final[StringJSONata] + + def __init__(self, string_jsonata: StringJSONata): + super().__init__() + self.string_jsonata = string_jsonata + + def _get_wait_seconds(self, env: Environment) -> int: + # TODO: add snapshot tests to verify AWS's behaviour about non integer values. + self.string_jsonata.eval(env=env) + max_items: int = int(env.stack.pop()) + return max_items diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds_path.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds_path.py new file mode 100644 index 0000000000000..af840602c5133 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/seconds_path.py @@ -0,0 +1,83 @@ +from typing import Any, Final + +from localstack.aws.api.stepfunctions import ( + ExecutionFailedEventDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringSampler, +) +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.wait_function import ( + WaitFunction, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError + + +class SecondsPath(WaitFunction): + # SecondsPath + # A time, in seconds, to state_wait before beginning the state specified in the Next + # field, specified using a path from the state's input data. + # You must specify an integer value for this field. + string_sampler: Final[StringSampler] + + def __init__(self, string_sampler: StringSampler): + self.string_sampler = string_sampler + + def _validate_seconds_value(self, env: Environment, seconds: Any): + if isinstance(seconds, int) and seconds >= 0: + return + error_type = StatesErrorNameType.StatesRuntime + + assignment_description = f"{self.string_sampler.literal_value} == {seconds}" + if not isinstance(seconds, int): + cause = f"The SecondsPath parameter cannot be parsed as a long value: {assignment_description}" + else: # seconds < 0 + cause = ( + f"The SecondsPath parameter references a negative value: {assignment_description}" + ) + + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=error_type), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=error_type.to_name(), cause=cause + ) + ), + ) + ) + + def _get_wait_seconds(self, env: Environment) -> int: + try: + self.string_sampler.eval(env=env) + except NoSuchJsonPathError as no_such_json_path_error: + cause = f"The SecondsPath parameter does not reference an input value: {no_such_json_path_error.json_path}" + raise FailureEventException( + failure_event=FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), cause=cause + ) + ), + ) + ) + seconds = env.stack.pop() + self._validate_seconds_value(env=env, seconds=seconds) + return seconds diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/timestamp.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/timestamp.py new file mode 100644 index 0000000000000..f26583bf77d10 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/timestamp.py @@ -0,0 +1,102 @@ +import datetime +import re +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, HistoryEventType +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringExpression, + StringLiteral, +) +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.wait_function import ( + WaitFunction, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails + +TIMESTAMP_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%SZ" +# TODO: could be a bit more exact (e.g. 90 shouldn't be a valid minute) +TIMESTAMP_PATTERN: Final[str] = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z$" + + +class Timestamp(WaitFunction): + string: Final[StringExpression] + + def __init__(self, string: StringExpression): + self.string = string + # If a string literal, assert it encodes a valid timestamp. + if isinstance(string, StringLiteral): + timestamp = string.literal_value + if self._from_timestamp_string(timestamp) is None: + raise ValueError( + "The Timestamp value does not reference a valid ISO-8601 " + f"extended offset date-time format string: '{timestamp}'" + ) + + @staticmethod + def _is_valid_timestamp_pattern(timestamp: str) -> bool: + return re.match(TIMESTAMP_PATTERN, timestamp) is not None + + @staticmethod + def _from_timestamp_string(timestamp: str) -> Optional[datetime]: + if not Timestamp._is_valid_timestamp_pattern(timestamp): + return None + try: + # anything lower than seconds is truncated + processed_timestamp = timestamp.rsplit(".", 2)[0] + # add back the "Z" suffix if we removed it + if not processed_timestamp.endswith("Z"): + processed_timestamp = f"{processed_timestamp}Z" + datetime_timestamp = datetime.datetime.strptime(processed_timestamp, TIMESTAMP_FORMAT) + return datetime_timestamp + except Exception: + return None + + def _create_failure_event(self, env: Environment, timestamp_str: str) -> FailureEvent: + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), + cause="The Timestamp parameter does not reference a valid ISO-8601 " + f"extended offset date-time format string: {self.string.literal_value} == {timestamp_str}", + ) + ), + ) + + def _get_wait_seconds(self, env: Environment) -> int: + self.string.eval(env=env) + timestamp_str: str = env.stack.pop() + timestamp = self._from_timestamp_string(timestamp=timestamp_str) + if timestamp is None: + raise FailureEventException(self._create_failure_event(env, timestamp_str)) + delta = timestamp - datetime.datetime.now() + delta_sec = int(delta.total_seconds()) + return delta_sec + + +class TimestampPath(Timestamp): + def _create_failure_event(self, env: Environment, timestamp_str: str) -> FailureEvent: + return FailureEvent( + env=env, + error_name=StatesErrorName(typ=StatesErrorNameType.StatesRuntime), + event_type=HistoryEventType.ExecutionFailed, + event_details=EventDetails( + executionFailedEventDetails=ExecutionFailedEventDetails( + error=StatesErrorNameType.StatesRuntime.to_name(), + cause="The TimestampPath parameter does not reference a valid ISO-8601 " + f"extended offset date-time format string: {self.string.literal_value} == {timestamp_str}", + ) + ), + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/wait_function.py b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/wait_function.py new file mode 100644 index 0000000000000..48611e897c532 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/state/state_wait/wait_function/wait_function.py @@ -0,0 +1,41 @@ +import abc +import logging +import time + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment + +LOG = logging.getLogger(__name__) + + +class WaitFunction(EvalComponent, abc.ABC): + @abc.abstractmethod + def _get_wait_seconds(self, env: Environment) -> int: ... + + def _wait_interval(self, env: Environment, wait_seconds: int) -> None: + t0 = time.time() + if wait_seconds > 0: + env.program_state_event.wait(wait_seconds) + t1 = time.time() + round_sec_waited = int(t1 - t0) + wait_seconds_delta = wait_seconds - round_sec_waited + if wait_seconds_delta <= 0: + return + elif env.is_running(): + # Unrelated interrupt: continue waiting. + LOG.warning( + "Wait function '%s' successfully reentered waiting for another '%s' seconds.", + self, + wait_seconds_delta, + ) + return self._wait_interval(env=env, wait_seconds=wait_seconds_delta) + else: + LOG.info( + "Wait function '%s' successfully interrupted after '%s' seconds.", + self, + round_sec_waited, + ) + + def _eval_body(self, env: Environment) -> None: + w_sec = self._get_wait_seconds(env=env) + self._wait_interval(env=env, wait_seconds=w_sec) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/test_state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/test_state/program/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/program/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/test_state/program/test_state_program.py b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/program/test_state_program.py new file mode 100644 index 0000000000000..a89aa948605d7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/program/test_state_program.py @@ -0,0 +1,61 @@ +import logging +import threading +from typing import Final + +from localstack.aws.api.stepfunctions import ( + ExecutionFailedEventDetails, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.eval.test_state.environment import TestStateEnvironment +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.threads import TMP_THREADS + +LOG = logging.getLogger(__name__) + +TEST_CASE_EXECUTION_TIMEOUT_SECONDS: Final[int] = 300 # 5 minutes. + + +class TestStateProgram(EvalComponent): + test_state: Final[CommonStateField] + + def __init__( + self, + test_state: CommonStateField, + ): + self.test_state = test_state + + def eval(self, env: TestStateEnvironment) -> None: + env.next_state_name = self.test_state.name + worker_thread = threading.Thread(target=super().eval, args=(env,), daemon=True) + TMP_THREADS.append(worker_thread) + worker_thread.start() + worker_thread.join(timeout=TEST_CASE_EXECUTION_TIMEOUT_SECONDS) + is_timeout = worker_thread.is_alive() + if is_timeout: + env.set_timed_out() + + def _eval_body(self, env: TestStateEnvironment) -> None: + try: + env.inspection_data["input"] = to_json_str(env.states.get_input()) + self.test_state.eval(env=env) + except FailureEventException as ex: + env.set_error(error=ex.get_execution_failed_event_details()) + except Exception as ex: + cause = f"{type(ex).__name__}({str(ex)})" + LOG.error("Stepfunctions computation ended with exception '%s'.", cause) + env.set_error( + ExecutionFailedEventDetails( + error=StatesErrorName(typ=StatesErrorNameType.StatesRuntime).error_name, + cause=cause, + ) + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/test_state/state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/component/test_state/state/test_state_state_props.py b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/state/test_state_state_props.py new file mode 100644 index 0000000000000..00d65036f0653 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/component/test_state/state/test_state_state_props.py @@ -0,0 +1,21 @@ +from typing import Any, Final + +from localstack.services.stepfunctions.asl.component.common.parargs import Parargs +from localstack.services.stepfunctions.asl.component.common.path.input_path import InputPath +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.common.result_selector import ResultSelector +from localstack.services.stepfunctions.asl.component.state.state_pass.result import Result +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps + +EQUAL_SUBTYPES: Final[list[type]] = [InputPath, Parargs, ResultSelector, ResultPath, Result] + + +class TestStateStateProps(StateProps): + def add(self, instance: Any) -> None: + inst_type = type(instance) + # Subclasses + for typ in EQUAL_SUBTYPES: + if issubclass(inst_type, typ): + self._add(typ, instance) + return + super().add(instance=instance) diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/eval/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/callback/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/eval/callback/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/callback/callback.py b/localstack-core/localstack/services/stepfunctions/asl/eval/callback/callback.py new file mode 100644 index 0000000000000..c5c27a05f4723 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/callback/callback.py @@ -0,0 +1,251 @@ +import abc +from collections import OrderedDict +from threading import Event, Lock +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ActivityDoesNotExist, Arn +from localstack.services.stepfunctions.backend.activity import Activity, ActivityTask +from localstack.utils.strings import long_uid + +CallbackId = str + + +class CallbackOutcome(abc.ABC): + callback_id: Final[CallbackId] + + def __init__(self, callback_id: str): + self.callback_id = callback_id + + +class CallbackOutcomeSuccess(CallbackOutcome): + output: Final[str] + + def __init__(self, callback_id: CallbackId, output: str): + super().__init__(callback_id=callback_id) + self.output = output + + +class CallbackOutcomeFailure(CallbackOutcome): + error: Final[Optional[str]] + cause: Final[Optional[str]] + + def __init__(self, callback_id: CallbackId, error: Optional[str], cause: Optional[str]): + super().__init__(callback_id=callback_id) + self.error = error + self.cause = cause + + +class CallbackOutcomeTimedOut(CallbackOutcome): + pass + + +class CallbackTimeoutError(TimeoutError): + pass + + +class CallbackConsumerError(abc.ABC): ... + + +class CallbackConsumerTimeout(CallbackConsumerError): + pass + + +class CallbackConsumerLeft(CallbackConsumerError): + pass + + +class HeartbeatEndpoint: + _mutex: Final[Lock] + _next_heartbeat_event: Final[Event] + _heartbeat_seconds: Final[int] + + def __init__(self, heartbeat_seconds: int): + self._mutex = Lock() + self._next_heartbeat_event = Event() + self._heartbeat_seconds = heartbeat_seconds + + def clear_and_wait(self) -> bool: + with self._mutex: + if self._next_heartbeat_event.is_set(): + self._next_heartbeat_event.clear() + return True + return self._next_heartbeat_event.wait(timeout=self._heartbeat_seconds) + + def notify(self): + with self._mutex: + self._next_heartbeat_event.set() + + +class HeartbeatTimeoutError(TimeoutError): + pass + + +class HeartbeatTimedOut(CallbackConsumerError): + pass + + +class ActivityTaskStartOutcome: + worker_name: Optional[str] + + def __init__(self, worker_name: Optional[str] = None): + self.worker_name = worker_name + + +class ActivityTaskStartEndpoint: + _next_activity_task_start_event: Final[Event] + _outcome: Optional[ActivityTaskStartOutcome] + + def __init__(self): + self._next_activity_task_start_event = Event() + + def wait(self, timeout_seconds: float) -> Optional[ActivityTaskStartOutcome]: + self._next_activity_task_start_event.wait(timeout=timeout_seconds) + return self._outcome + + def notify(self, activity_task: ActivityTaskStartOutcome) -> None: + self._outcome = activity_task + self._next_activity_task_start_event.set() + + +class CallbackEndpoint: + callback_id: Final[CallbackId] + _notify_event: Final[Event] + _outcome: Optional[CallbackOutcome] + consumer_error: Optional[CallbackConsumerError] + _heartbeat_endpoint: Optional[HeartbeatEndpoint] + + def __init__(self, callback_id: CallbackId): + self.callback_id = callback_id + self._notify_event = Event() + self._outcome = None + self.consumer_error = None + self._heartbeat_endpoint = None + + def setup_heartbeat_endpoint(self, heartbeat_seconds: int) -> HeartbeatEndpoint: + self._heartbeat_endpoint = HeartbeatEndpoint(heartbeat_seconds=heartbeat_seconds) + return self._heartbeat_endpoint + + def interrupt_all(self) -> None: + # Interrupts all waiting processes on this endpoint. + self._notify_event.set() + heartbeat_endpoint = self._heartbeat_endpoint + if heartbeat_endpoint is not None: + heartbeat_endpoint.notify() + + def notify(self, outcome: CallbackOutcome): + self._outcome = outcome + self._notify_event.set() + if self._heartbeat_endpoint: + self._heartbeat_endpoint.notify() + + def notify_heartbeat(self) -> bool: + if not self._heartbeat_endpoint: + return False + self._heartbeat_endpoint.notify() + return True + + def wait(self, timeout: Optional[float] = None) -> Optional[CallbackOutcome]: + self._notify_event.wait(timeout=timeout) + return self._outcome + + def get_outcome(self) -> Optional[CallbackOutcome]: + return self._outcome + + def report(self, consumer_error: CallbackConsumerError) -> None: + self.consumer_error = consumer_error + + +class ActivityCallbackEndpoint(CallbackEndpoint): + _activity_task_start_endpoint: Final[ActivityTaskStartEndpoint] + _activity_input: Final[str] + + def __init__(self, callback_id: str, activity_input: str): + super().__init__(callback_id=callback_id) + self._activity_input = activity_input + self._activity_task_start_endpoint = ActivityTaskStartEndpoint() + + def get_activity_input(self) -> str: + return self._activity_input + + def get_activity_task_start_endpoint(self) -> ActivityTaskStartEndpoint: + return self._activity_task_start_endpoint + + def notify_activity_task_start(self, worker_name: Optional[str]) -> None: + self._activity_task_start_endpoint.notify(ActivityTaskStartOutcome(worker_name=worker_name)) + + +class CallbackNotifyConsumerError(RuntimeError): + callback_consumer_error: CallbackConsumerError + + def __init__(self, callback_consumer_error: CallbackConsumerError): + self.callback_consumer_error = callback_consumer_error + + +class CallbackOutcomeFailureError(RuntimeError): + callback_outcome_failure: CallbackOutcomeFailure + + def __init__(self, callback_outcome_failure: CallbackOutcomeFailure): + self.callback_outcome_failure = callback_outcome_failure + + +class CallbackPoolManager: + _activity_store: Final[dict[CallbackId, Activity]] + _pool: Final[dict[CallbackId, CallbackEndpoint]] + + def __init__(self, activity_store: dict[Arn, Activity]): + self._activity_store = activity_store + self._pool = OrderedDict() + + def get(self, callback_id: CallbackId) -> Optional[CallbackEndpoint]: + return self._pool.get(callback_id) + + def add(self, callback_id: CallbackId) -> CallbackEndpoint: + if callback_id in self._pool: + raise ValueError("Duplicate callback token id value.") + callback_endpoint = CallbackEndpoint(callback_id=callback_id) + self._pool[callback_id] = callback_endpoint + return callback_endpoint + + def add_activity_task( + self, callback_id: CallbackId, activity_arn: Arn, activity_input: str + ) -> ActivityCallbackEndpoint: + if callback_id in self._pool: + raise ValueError("Duplicate callback token id value.") + + maybe_activity: Optional[Activity] = self._activity_store.get(activity_arn) + if maybe_activity is None: + raise ActivityDoesNotExist() + + maybe_activity.add_task(ActivityTask(task_token=callback_id, task_input=activity_input)) + + callback_endpoint = ActivityCallbackEndpoint( + callback_id=callback_id, activity_input=activity_input + ) + self._pool[callback_id] = callback_endpoint + return callback_endpoint + + def generate(self) -> CallbackEndpoint: + return self.add(long_uid()) + + def notify(self, callback_id: CallbackId, outcome: CallbackOutcome) -> bool: + callback_endpoint = self._pool.get(callback_id, None) + if callback_endpoint is None: + return False + + consumer_error: Optional[CallbackConsumerError] = callback_endpoint.consumer_error + if consumer_error is not None: + raise CallbackNotifyConsumerError(callback_consumer_error=consumer_error) + + callback_endpoint.notify(outcome=outcome) + return True + + def heartbeat(self, callback_id: CallbackId) -> bool: + callback_endpoint = self._pool.get(callback_id, None) + if callback_endpoint is None: + return False + + consumer_error: Optional[CallbackConsumerError] = callback_endpoint.consumer_error + if consumer_error is not None: + raise CallbackNotifyConsumerError(callback_consumer_error=consumer_error) + + return callback_endpoint.notify_heartbeat() diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/contex_object.py b/localstack-core/localstack/services/stepfunctions/asl/eval/contex_object.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack/services/stepfunctions/asl/eval/count_down_latch.py b/localstack-core/localstack/services/stepfunctions/asl/eval/count_down_latch.py similarity index 100% rename from localstack/services/stepfunctions/asl/eval/count_down_latch.py rename to localstack-core/localstack/services/stepfunctions/asl/eval/count_down_latch.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py b/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py new file mode 100644 index 0000000000000..ecb90be5b8d07 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/environment.py @@ -0,0 +1,300 @@ +from __future__ import annotations + +import copy +import logging +import threading +from typing import Any, Final, Optional + +from localstack.aws.api.stepfunctions import ( + Arn, + ExecutionFailedEventDetails, + StateMachineType, + Timestamp, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecordPoolManager, +) +from localstack.services.stepfunctions.asl.eval.callback.callback import CallbackPoolManager +from localstack.services.stepfunctions.asl.eval.evaluation_details import AWSExecutionDetails +from localstack.services.stepfunctions.asl.eval.event.event_manager import ( + EventHistoryContext, + EventManager, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingSession, +) +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramEnded, + ProgramError, + ProgramRunning, + ProgramState, + ProgramStopped, + ProgramTimedOut, +) +from localstack.services.stepfunctions.asl.eval.states import ContextObjectData, States +from localstack.services.stepfunctions.asl.eval.variable_store import VariableStore +from localstack.services.stepfunctions.backend.activity import Activity +from localstack.services.stepfunctions.mocking.mock_config import MockedResponse, MockTestCase + +LOG = logging.getLogger(__name__) + + +class Environment: + _state_mutex: Final[threading.RLock()] + _program_state: Optional[ProgramState] + program_state_event: Final[threading.Event()] + + event_manager: EventManager + event_history_context: Final[EventHistoryContext] + cloud_watch_logging_session: Final[Optional[CloudWatchLoggingSession]] + aws_execution_details: Final[AWSExecutionDetails] + execution_type: Final[StateMachineType] + callback_pool_manager: CallbackPoolManager + map_run_record_pool_manager: MapRunRecordPoolManager + activity_store: Final[dict[Arn, Activity]] + mock_test_case: Optional[MockTestCase] = None + + _frames: Final[list[Environment]] + _is_frame: bool = False + + heap: dict[str, Any] = dict() + stack: list[Any] = list() + states: Final[States] + variable_store: Final[VariableStore] + + def __init__( + self, + aws_execution_details: AWSExecutionDetails, + execution_type: StateMachineType, + context: ContextObjectData, + event_history_context: EventHistoryContext, + cloud_watch_logging_session: Optional[CloudWatchLoggingSession], + activity_store: dict[Arn, Activity], + variable_store: Optional[VariableStore] = None, + mock_test_case: Optional[MockTestCase] = None, + ): + super(Environment, self).__init__() + self._state_mutex = threading.RLock() + self._program_state = None + self.program_state_event = threading.Event() + + self.cloud_watch_logging_session = cloud_watch_logging_session + self.event_manager = EventManager(cloud_watch_logging_session=cloud_watch_logging_session) + self.event_history_context = event_history_context + + self.aws_execution_details = aws_execution_details + self.execution_type = execution_type + self.callback_pool_manager = CallbackPoolManager(activity_store=activity_store) + self.map_run_record_pool_manager = MapRunRecordPoolManager() + + self.activity_store = activity_store + + self.mock_test_case = mock_test_case + + self._frames = list() + self._is_frame = False + + self.heap = dict() + self.stack = list() + self.states = States(context=context) + self.variable_store = variable_store or VariableStore() + + @classmethod + def as_frame_of( + cls, env: Environment, event_history_frame_cache: Optional[EventHistoryContext] = None + ) -> Environment: + return Environment.as_inner_frame_of( + env=env, + variable_store=env.variable_store, + event_history_frame_cache=event_history_frame_cache, + ) + + @classmethod + def as_inner_frame_of( + cls, + env: Environment, + variable_store: VariableStore, + event_history_frame_cache: Optional[EventHistoryContext] = None, + ) -> Environment: + # Construct the frame's context object data. + context = ContextObjectData( + Execution=env.states.context_object.context_object_data["Execution"], + StateMachine=env.states.context_object.context_object_data["StateMachine"], + ) + if "Task" in env.states.context_object.context_object_data: + context["Task"] = env.states.context_object.context_object_data["Task"] + + # The default logic provisions for child frame to extend the source frame event id. + if event_history_frame_cache is None: + event_history_frame_cache = EventHistoryContext( + previous_event_id=env.event_history_context.source_event_id + ) + + frame = cls( + aws_execution_details=env.aws_execution_details, + execution_type=env.execution_type, + context=context, + event_history_context=event_history_frame_cache, + cloud_watch_logging_session=env.cloud_watch_logging_session, + activity_store=env.activity_store, + variable_store=variable_store, + mock_test_case=env.mock_test_case, + ) + frame._is_frame = True + frame.event_manager = env.event_manager + if "State" in env.states.context_object.context_object_data: + frame.states.context_object.context_object_data["State"] = copy.deepcopy( + env.states.context_object.context_object_data["State"] + ) + frame.callback_pool_manager = env.callback_pool_manager + frame.map_run_record_pool_manager = env.map_run_record_pool_manager + frame.heap = dict() + frame._program_state = copy.deepcopy(env._program_state) + return frame + + @property + def next_state_name(self) -> Optional[str]: + next_state_name: Optional[str] = None + program_state = self._program_state + if isinstance(program_state, ProgramRunning): + next_state_name = program_state.next_state_name + return next_state_name + + @next_state_name.setter + def next_state_name(self, next_state_name: str) -> None: + if self._program_state is None: + self._program_state = ProgramRunning() + + if isinstance(self._program_state, ProgramRunning): + self._program_state.next_state_name = next_state_name + else: + raise RuntimeError( + f"Could not set NextState value when in state '{type(self._program_state)}'." + ) + + @property + def next_field_name(self) -> Optional[str]: + next_field_name: Optional[str] = None + program_state = self._program_state + if isinstance(program_state, ProgramRunning): + next_field_name = program_state.next_field_name + return next_field_name + + @next_field_name.setter + def next_field_name(self, next_field_name: str) -> None: + if isinstance(self._program_state, ProgramRunning): + self._program_state.next_field_name = next_field_name + else: + raise RuntimeError( + f"Could not set NextField value when in state '{type(self._program_state)}'." + ) + + def program_state(self) -> ProgramState: + return copy.deepcopy(self._program_state) + + def is_running(self) -> bool: + return isinstance(self._program_state, ProgramRunning) + + def set_ended(self) -> None: + with self._state_mutex: + if isinstance(self._program_state, ProgramRunning): + self._program_state = ProgramEnded() + for frame in self._frames: + frame.set_ended() + self.program_state_event.set() + self.program_state_event.clear() + + def set_error(self, error: ExecutionFailedEventDetails) -> None: + with self._state_mutex: + self._program_state = ProgramError(error=error) + for frame in self._frames: + frame.set_error(error=error) + self.program_state_event.set() + self.program_state_event.clear() + + def set_timed_out(self) -> None: + with self._state_mutex: + self._program_state = ProgramTimedOut() + for frame in self._frames: + frame.set_timed_out() + self.program_state_event.set() + self.program_state_event.clear() + + def set_stop(self, stop_date: Timestamp, cause: Optional[str], error: Optional[str]) -> None: + with self._state_mutex: + if isinstance(self._program_state, ProgramRunning): + self._program_state = ProgramStopped(stop_date=stop_date, cause=cause, error=error) + for frame in self._frames: + frame.set_stop(stop_date=stop_date, cause=cause, error=error) + self.program_state_event.set() + self.program_state_event.clear() + + def open_frame( + self, event_history_context: Optional[EventHistoryContext] = None + ) -> Environment: + with self._state_mutex: + frame = self.as_frame_of(env=self, event_history_frame_cache=event_history_context) + self._frames.append(frame) + return frame + + def open_inner_frame( + self, event_history_context: Optional[EventHistoryContext] = None + ) -> Environment: + with self._state_mutex: + variable_store = VariableStore.as_inner_scope_of( + outer_variable_store=self.variable_store + ) + frame = self.as_inner_frame_of( + env=self, + variable_store=variable_store, + event_history_frame_cache=event_history_context, + ) + self._frames.append(frame) + return frame + + def close_frame(self, frame: Environment) -> None: + with self._state_mutex: + if frame in self._frames: + self._frames.remove(frame) + self.event_history_context.integrate(frame.event_history_context) + + def delete_frame(self, frame: Environment) -> None: + with self._state_mutex: + if frame in self._frames: + self._frames.remove(frame) + + def is_frame(self) -> bool: + return self._is_frame + + def is_standard_workflow(self) -> bool: + return self.execution_type == StateMachineType.STANDARD + + def is_mocked_mode(self) -> bool: + """ + Returns True if the state machine is running in mock mode and the current + state has a defined mock configuration in the target environment or frame; + otherwise, returns False. + """ + return ( + self.mock_test_case is not None + and self.next_state_name in self.mock_test_case.state_mocked_responses + ) + + def get_current_mocked_response(self) -> MockedResponse: + if not self.is_mocked_mode(): + raise RuntimeError( + "Cannot retrieve mocked response: execution is not operating in mocked mode" + ) + state_name = self.next_state_name + state_mocked_responses: Optional = self.mock_test_case.state_mocked_responses.get( + state_name + ) + if state_mocked_responses is None: + raise RuntimeError(f"No mocked response definition for state '{state_name}'") + retry_count = self.states.context_object.context_object_data["State"]["RetryCount"] + if len(state_mocked_responses.mocked_responses) <= retry_count: + raise RuntimeError( + f"No mocked response definition for state '{state_name}' " + f"and retry number '{retry_count}'" + ) + return state_mocked_responses.mocked_responses[retry_count] diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/evaluation_details.py b/localstack-core/localstack/services/stepfunctions/asl/eval/evaluation_details.py new file mode 100644 index 0000000000000..d053ae70e2187 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/evaluation_details.py @@ -0,0 +1,60 @@ +from typing import Any, Final, Optional + +from localstack.aws.api.stepfunctions import Arn, Definition, LongArn, StateMachineType + + +class AWSExecutionDetails: + account: Final[str] + region: Final[str] + role_arn: Final[str] + + def __init__(self, account: str, region: str, role_arn: str): + self.account = account + self.region = region + self.role_arn = role_arn + + +class ExecutionDetails: + arn: Final[LongArn] + name: Final[str] + role_arn: Final[Arn] + inpt: Final[Optional[Any]] + start_time: Final[str] + + def __init__( + self, arn: LongArn, name: str, role_arn: Arn, inpt: Optional[Any], start_time: str + ): + self.arn = arn + self.name = name + self.role_arn = role_arn + self.inpt = inpt + self.start_time = start_time + + +class StateMachineDetails: + arn: Final[Arn] + name: Final[str] + typ: Final[StateMachineType] + definition: Final[Definition] + + def __init__(self, arn: Arn, name: str, typ: StateMachineType, definition: str): + self.arn = arn + self.name = name + self.typ = typ + self.definition = definition + + +class EvaluationDetails: + aws_execution_details: Final[AWSExecutionDetails] + execution_details: Final[ExecutionDetails] + state_machine_details: Final[StateMachineDetails] + + def __init__( + self, + aws_execution_details: AWSExecutionDetails, + execution_details: ExecutionDetails, + state_machine_details: StateMachineDetails, + ): + self.aws_execution_details = aws_execution_details + self.execution_details = execution_details + self.state_machine_details = state_machine_details diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/event/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/eval/event/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_detail.py b/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_detail.py new file mode 100644 index 0000000000000..c096a8d3f9556 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_detail.py @@ -0,0 +1,74 @@ +from typing import NotRequired, TypedDict + +from localstack.aws.api.stepfunctions import ( + ActivityFailedEventDetails, + ActivityScheduledEventDetails, + ActivityScheduleFailedEventDetails, + ActivityStartedEventDetails, + ActivitySucceededEventDetails, + ActivityTimedOutEventDetails, + EvaluationFailedEventDetails, + ExecutionAbortedEventDetails, + ExecutionFailedEventDetails, + ExecutionStartedEventDetails, + ExecutionSucceededEventDetails, + ExecutionTimedOutEventDetails, + LambdaFunctionFailedEventDetails, + LambdaFunctionScheduledEventDetails, + LambdaFunctionScheduleFailedEventDetails, + LambdaFunctionStartFailedEventDetails, + LambdaFunctionSucceededEventDetails, + LambdaFunctionTimedOutEventDetails, + MapIterationEventDetails, + MapRunFailedEventDetails, + MapRunStartedEventDetails, + MapStateStartedEventDetails, + StateEnteredEventDetails, + StateExitedEventDetails, + TaskFailedEventDetails, + TaskScheduledEventDetails, + TaskStartedEventDetails, + TaskStartFailedEventDetails, + TaskSubmitFailedEventDetails, + TaskSubmittedEventDetails, + TaskSucceededEventDetails, + TaskTimedOutEventDetails, +) + + +class EventDetails(TypedDict): + activityFailedEventDetails: NotRequired[ActivityFailedEventDetails] + activityScheduleFailedEventDetails: NotRequired[ActivityScheduleFailedEventDetails] + activityScheduledEventDetails: NotRequired[ActivityScheduledEventDetails] + activityStartedEventDetails: NotRequired[ActivityStartedEventDetails] + activitySucceededEventDetails: NotRequired[ActivitySucceededEventDetails] + activityTimedOutEventDetails: NotRequired[ActivityTimedOutEventDetails] + taskFailedEventDetails: NotRequired[TaskFailedEventDetails] + taskScheduledEventDetails: NotRequired[TaskScheduledEventDetails] + taskStartFailedEventDetails: NotRequired[TaskStartFailedEventDetails] + taskStartedEventDetails: NotRequired[TaskStartedEventDetails] + taskSubmitFailedEventDetails: NotRequired[TaskSubmitFailedEventDetails] + taskSubmittedEventDetails: NotRequired[TaskSubmittedEventDetails] + taskSucceededEventDetails: NotRequired[TaskSucceededEventDetails] + taskTimedOutEventDetails: NotRequired[TaskTimedOutEventDetails] + evaluationFailedEventDetails: NotRequired[EvaluationFailedEventDetails] + executionFailedEventDetails: NotRequired[ExecutionFailedEventDetails] + executionStartedEventDetails: NotRequired[ExecutionStartedEventDetails] + executionSucceededEventDetails: NotRequired[ExecutionSucceededEventDetails] + executionAbortedEventDetails: NotRequired[ExecutionAbortedEventDetails] + executionTimedOutEventDetails: NotRequired[ExecutionTimedOutEventDetails] + mapStateStartedEventDetails: NotRequired[MapStateStartedEventDetails] + mapIterationStartedEventDetails: NotRequired[MapIterationEventDetails] + mapIterationSucceededEventDetails: NotRequired[MapIterationEventDetails] + mapIterationFailedEventDetails: NotRequired[MapIterationEventDetails] + mapIterationAbortedEventDetails: NotRequired[MapIterationEventDetails] + lambdaFunctionFailedEventDetails: NotRequired[LambdaFunctionFailedEventDetails] + lambdaFunctionScheduleFailedEventDetails: NotRequired[LambdaFunctionScheduleFailedEventDetails] + lambdaFunctionScheduledEventDetails: NotRequired[LambdaFunctionScheduledEventDetails] + lambdaFunctionStartFailedEventDetails: NotRequired[LambdaFunctionStartFailedEventDetails] + lambdaFunctionSucceededEventDetails: NotRequired[LambdaFunctionSucceededEventDetails] + lambdaFunctionTimedOutEventDetails: NotRequired[LambdaFunctionTimedOutEventDetails] + stateEnteredEventDetails: NotRequired[StateEnteredEventDetails] + stateExitedEventDetails: NotRequired[StateExitedEventDetails] + mapRunStartedEventDetails: NotRequired[MapRunStartedEventDetails] + mapRunFailedEventDetails: NotRequired[MapRunFailedEventDetails] diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_manager.py b/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_manager.py new file mode 100644 index 0000000000000..8a9ea31a47287 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/event/event_manager.py @@ -0,0 +1,210 @@ +from __future__ import annotations + +import copy +import datetime +import logging +import threading +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + HistoryEvent, + HistoryEventList, + HistoryEventType, + LongArn, + Timestamp, +) +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingSession, + HistoryLog, +) +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +LOG = logging.getLogger(__name__) + + +class EventHistoryContext: + # The '0' event is the source event id of the program execution. + _PROGRAM_START_EVENT_ID: Final[int] = 0 + + source_event_id: int + last_published_event_id: int + + def __init__(self, previous_event_id: int): + self.source_event_id = previous_event_id + self.last_published_event_id = previous_event_id + + @classmethod + def of_program_start(cls) -> EventHistoryContext: + return cls(previous_event_id=cls._PROGRAM_START_EVENT_ID) + + def integrate(self, other: EventHistoryContext) -> None: + self.source_event_id = max(self.source_event_id, other.source_event_id) + self.last_published_event_id = max( + self.last_published_event_id, other.last_published_event_id + ) + + +class EventIdGenerator: + _next_id: int + + def __init__(self): + self._next_id = 1 + + def get(self) -> int: + next_id = self._next_id + self._next_id += 1 + return next_id + + +class EventManager: + _mutex: Final[threading.Lock] + _event_id_gen: EventIdGenerator + _history_event_list: Final[HistoryEventList] + _cloud_watch_logging_session: Final[Optional[CloudWatchLoggingSession]] + + def __init__(self, cloud_watch_logging_session: Optional[CloudWatchLoggingSession] = None): + self._mutex = threading.Lock() + self._event_id_gen = EventIdGenerator() + self._history_event_list = list() + self._cloud_watch_logging_session = cloud_watch_logging_session + + def add_event( + self, + context: EventHistoryContext, + event_type: HistoryEventType, + event_details: Optional[EventDetails] = None, + timestamp: Timestamp = None, + update_source_event_id: bool = True, + ) -> int: + with self._mutex: + event_id: int = self._event_id_gen.get() + source_event_id: int = context.source_event_id + timestamp = timestamp or self._get_current_timestamp() + + context.last_published_event_id = event_id + if update_source_event_id: + context.source_event_id = event_id + + self._publish_history_event( + event_id=event_id, + source_event_id=source_event_id, + event_type=event_type, + timestamp=timestamp, + event_details=event_details, + ) + self._publish_history_log( + event_id=event_id, + source_event_id=source_event_id, + event_type=event_type, + timestamp=timestamp, + event_details=event_details, + ) + + return event_id + + @staticmethod + def _get_current_timestamp() -> datetime.datetime: + return datetime.datetime.now(tz=datetime.timezone.utc) + + @staticmethod + def _create_history_event( + event_id: int, + source_event_id: int, + event_type: HistoryEventType, + timestamp: datetime.datetime, + event_details: Optional[EventDetails], + ) -> HistoryEvent: + history_event = HistoryEvent() + if event_details is not None: + history_event.update(event_details) + history_event["id"] = event_id + history_event["previousEventId"] = source_event_id + history_event["type"] = event_type + history_event["timestamp"] = timestamp + return history_event + + def _publish_history_event( + self, + event_id: int, + source_event_id: int, + event_type: HistoryEventType, + timestamp: datetime.datetime, + event_details: Optional[EventDetails], + ): + history_event = self._create_history_event( + event_id=event_id, + source_event_id=source_event_id, + event_type=event_type, + timestamp=timestamp, + event_details=event_details, + ) + self._history_event_list.append(history_event) + + @staticmethod + def _remove_data_from_history_log(details_body: dict) -> None: + remove_keys = ["input", "inputDetails", "output", "outputDetails"] + for remove_key in remove_keys: + details_body.pop(remove_key, None) + + @staticmethod + def _create_history_log( + event_id: int, + source_event_id: int, + event_type: HistoryEventType, + timestamp: datetime.datetime, + execution_arn: LongArn, + event_details: Optional[EventDetails], + include_execution_data: bool, + ) -> HistoryLog: + log = HistoryLog( + id=str(event_id), + previous_event_id=str(source_event_id), + event_timestamp=timestamp, + type=event_type, + execution_arn=execution_arn, + ) + if event_details: + if len(event_details) > 1: + LOG.warning( + "Event details with multiple bindings: %s", + to_json_str(event_details), + ) + details_body = next(iter(event_details.values())) + if not include_execution_data: + # clone the object before modifying it as the change is limited to the history log value. + details_body = copy.deepcopy(details_body) + EventManager._remove_data_from_history_log(details_body=details_body) + log["details"] = details_body + return log + + def _publish_history_log( + self, + event_id: int, + source_event_id: int, + event_type: HistoryEventType, + timestamp: datetime.datetime, + event_details: Optional[EventDetails], + ): + # No logging session for this execution. + if self._cloud_watch_logging_session is None: + return + + # This event is not recorded by this execution's logging configuration. + if not self._cloud_watch_logging_session.log_level_filter(history_event_type=event_type): + return + + history_log = self._create_history_log( + event_id=event_id, + source_event_id=source_event_id, + event_type=event_type, + timestamp=timestamp, + execution_arn=self._cloud_watch_logging_session.execution_arn, + event_details=event_details, + include_execution_data=self._cloud_watch_logging_session.configuration.include_execution_data, + ) + self._cloud_watch_logging_session.publish_history_log(history_log=history_log) + + def get_event_history(self) -> HistoryEventList: + with self._mutex: + return copy.deepcopy(self._history_event_list) diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/event/logging.py b/localstack-core/localstack/services/stepfunctions/asl/eval/event/logging.py new file mode 100644 index 0000000000000..de504ad2a8255 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/event/logging.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +import logging +from datetime import datetime +from typing import Final, NotRequired, Optional, TypedDict + +from botocore.client import BaseClient +from botocore.exceptions import ClientError +from botocore.utils import InvalidArnException + +from localstack.aws.api.logs import InputLogEvent +from localstack.aws.api.stepfunctions import ( + HistoryEventType, + InvalidLoggingConfiguration, + LoggingConfiguration, + LogLevel, + LongArn, +) +from localstack.aws.connect import connect_to +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.utils.aws.arns import ( + ArnData, + parse_arn, +) + +LOG = logging.getLogger(__name__) + +ExecutionEventLogDetails = dict + +# The following event type sets are compiled according to AWS's +# log level definitions: https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html +_ERROR_LOG_EVENT_TYPES: Final[set[HistoryEventType]] = { + HistoryEventType.ExecutionAborted, + HistoryEventType.ExecutionFailed, + HistoryEventType.ExecutionTimedOut, + HistoryEventType.FailStateEntered, + HistoryEventType.LambdaFunctionFailed, + HistoryEventType.LambdaFunctionScheduleFailed, + HistoryEventType.LambdaFunctionStartFailed, + HistoryEventType.LambdaFunctionTimedOut, + HistoryEventType.MapStateAborted, + HistoryEventType.MapStateFailed, + HistoryEventType.MapIterationAborted, + HistoryEventType.MapIterationFailed, + HistoryEventType.MapRunAborted, + HistoryEventType.MapRunFailed, + HistoryEventType.ParallelStateAborted, + HistoryEventType.ParallelStateFailed, + HistoryEventType.TaskFailed, + HistoryEventType.TaskStartFailed, + HistoryEventType.TaskStateAborted, + HistoryEventType.TaskSubmitFailed, + HistoryEventType.TaskTimedOut, + HistoryEventType.WaitStateAborted, +} +_FATAL_LOG_EVENT_TYPES: Final[set[HistoryEventType]] = { + HistoryEventType.ExecutionAborted, + HistoryEventType.ExecutionFailed, + HistoryEventType.ExecutionTimedOut, +} + + +# The LogStreamName used when creating the empty Log Stream when validating the logging configuration. +VALIDATION_LOG_STREAM_NAME: Final[str] = ( + "log_stream_created_by_aws_to_validate_log_delivery_subscriptions" +) + + +def is_logging_enabled_for(log_level: LogLevel, history_event_type: HistoryEventType) -> bool: + # Checks whether the history event type is in the context of a give LogLevel. + if log_level == LogLevel.ALL: + return True + elif log_level == LogLevel.OFF: + return False + elif log_level == LogLevel.ERROR: + return history_event_type in _ERROR_LOG_EVENT_TYPES + elif log_level == LogLevel.FATAL: + return history_event_type in _FATAL_LOG_EVENT_TYPES + else: + LOG.error("Unknown LogLevel '%s'", log_level) + + +class CloudWatchLoggingConfiguration: + state_machine_arn: Final[LongArn] + log_level: Final[LogLevel] + log_account_id: Final[str] + log_region: Final[str] + log_group_name: Final[str] + log_stream_name: Final[str] + include_execution_data: Final[bool] + + def __init__( + self, + state_machine_arn: LongArn, + log_account_id: str, + log_region: str, + log_group_name: str, + log_level: LogLevel, + include_execution_data: bool, + ): + self.state_machine_arn = state_machine_arn + self.log_level = log_level + self.log_group_name = log_group_name + self.log_account_id = log_account_id + self.log_region = log_region + # TODO: AWS appears to append a date and a serial number to the log + # stream name: more investigations are needed in this area. + self.log_stream_name = f"states/{state_machine_arn}" + self.include_execution_data = include_execution_data + + @staticmethod + def extract_log_arn_parts_from( + logging_configuration: LoggingConfiguration, + ) -> Optional[tuple[str, str, str]]: + # Returns a tuple with: account_id, region, and log group name if the logging configuration + # specifies a valid cloud watch log group arn, none otherwise. + + destinations = logging_configuration.get("destinations") + if not destinations or len(destinations) > 1: # Only one destination can be defined. + return None + + log_group = destinations[0].get("cloudWatchLogsLogGroup") + if not log_group: + return None + + log_group_arn = log_group.get("logGroupArn") + if not log_group_arn: + return None + + try: + arn_data: ArnData = parse_arn(log_group_arn) + except InvalidArnException: + return None + + log_region = arn_data.get("region") + if log_region is None: + return None + + log_account_id = arn_data.get("account") + if log_account_id is None: + return None + + log_resource = arn_data.get("resource") + if log_resource is None: + return None + + log_resource_parts = log_resource.split("log-group:") + if not log_resource_parts: + return None + + log_group_name = log_resource_parts[-1].split(":")[0] + return log_account_id, log_region, log_group_name + + @staticmethod + def from_logging_configuration( + state_machine_arn: LongArn, + logging_configuration: LoggingConfiguration, + ) -> Optional[CloudWatchLoggingConfiguration]: + log_level = logging_configuration.get("level", LogLevel.OFF) + if log_level == LogLevel.OFF: + return None + + log_arn_parts = CloudWatchLoggingConfiguration.extract_log_arn_parts_from( + logging_configuration=logging_configuration + ) + if not log_arn_parts: + return None + log_account_id, log_region, log_group_name = log_arn_parts + + include_execution_data = logging_configuration["includeExecutionData"] + + return CloudWatchLoggingConfiguration( + state_machine_arn=state_machine_arn, + log_account_id=log_account_id, + log_region=log_region, + log_group_name=log_group_name, + log_level=log_level, + include_execution_data=include_execution_data, + ) + + def validate(self) -> None: + # Asserts that the logging configuration can be used for logging. + logs_client = connect_to( + aws_access_key_id=self.log_account_id, region_name=self.log_region + ).logs + try: + logs_client.create_log_stream( + logGroupName=self.log_group_name, logStreamName=VALIDATION_LOG_STREAM_NAME + ) + except ClientError as error: + error_code = error.response["Error"]["Code"] + if error_code != "ResourceAlreadyExistsException": + raise InvalidLoggingConfiguration( + "Invalid Logging Configuration: Log Destination not found." + ) + + +class HistoryLog(TypedDict): + id: str + previous_event_id: str + event_timestamp: datetime + type: HistoryEventType + execution_arn: LongArn + details: NotRequired[ExecutionEventLogDetails] + + +class CloudWatchLoggingSession: + execution_arn: Final[LongArn] + configuration: Final[CloudWatchLoggingConfiguration] + _logs_client: Final[BaseClient] + _is_log_stream_available: bool + + def __init__(self, execution_arn: LongArn, configuration: CloudWatchLoggingConfiguration): + self.execution_arn = execution_arn + self.configuration = configuration + self._logs_client = connect_to( + aws_access_key_id=self.configuration.log_account_id, + region_name=self.configuration.log_region, + ).logs + + def log_level_filter(self, history_event_type: HistoryEventType) -> bool: + # Checks whether the history event type should be logged in this session. + return is_logging_enabled_for( + log_level=self.configuration.log_level, history_event_type=history_event_type + ) + + def publish_history_log(self, history_log: HistoryLog) -> None: + timestamp_value = int(history_log["event_timestamp"].timestamp() * 1000) + message = to_json_str(history_log) + log_events = [InputLogEvent(timestamp=timestamp_value, message=message)] + LOG.debug( + "New CloudWatch Log for execution '%s' with message: '%s'", + self.execution_arn, + message, + ) + self._publish_history_log_or_setup(log_events=log_events) + + def _publish_history_log_or_setup(self, log_events: list[InputLogEvent]): + # Attempts to put the events into the given log group and stream, and attempts to create the stream if + # this does not already exist. + is_events_put = self._put_events(log_events=log_events) + if is_events_put: + return + + is_setup = self._setup() + if not is_setup: + LOG.debug( + "CloudWatch Log was not published due to setup errors encountered " + "while creating the LogStream for execution '%s'.", + self.execution_arn, + ) + return + + self._put_events(log_events=log_events) + + def _put_events(self, log_events: list[InputLogEvent]) -> bool: + # Puts the events to the targe log group and stream, and returns false if the LogGroup or LogStream could + # not be found, true otherwise. + try: + self._logs_client.put_log_events( + logGroupName=self.configuration.log_group_name, + logStreamName=self.configuration.log_stream_name, + logEvents=log_events, + ) + except ClientError as error: + error_code = error.response["Error"]["Code"] + if error_code == "ResourceNotFoundException": + return False + except Exception as ignored: + LOG.warning( + "State Machine execution log event could not be published due to an error: '%s'", + ignored, + ) + return True + + def _setup(self) -> bool: + # Create the log stream if one does not exist already. + # TODO: enhance the verification logic to match AWS's logic to ensure IAM features work as expected. + # https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html#cloudwatch-iam-policy + try: + self._logs_client.create_log_stream( + logGroupName=self.configuration.log_group_name, + logStreamName=self.configuration.log_stream_name, + ) + except ClientError as error: + error_code = error.response["Error"]["Code"] + if error_code != "ResourceAlreadyExistsException": + LOG.error( + "Could not create execution log stream for execution '%s' due to %s", + self.execution_arn, + error, + ) + return False + return True diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/program_state.py b/localstack-core/localstack/services/stepfunctions/asl/eval/program_state.py new file mode 100644 index 0000000000000..00f3af00cb82f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/program_state.py @@ -0,0 +1,64 @@ +import abc +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ExecutionFailedEventDetails, Timestamp + + +class ProgramState(abc.ABC): ... + + +class ProgramEnded(ProgramState): + pass + + +class ProgramStopped(ProgramState): + def __init__(self, stop_date: Timestamp, error: Optional[str], cause: Optional[str]): + super().__init__() + self.stop_date: Timestamp = stop_date + self.error: Optional[str] = error + self.cause: Optional[str] = cause + + +class ProgramRunning(ProgramState): + _next_state_name: Optional[str] + _next_field_name: Optional[str] + + def __init__(self): + super().__init__() + self._next_state_name = None + self._next_field_name = None + + @property + def next_state_name(self) -> str: + next_state_name = self._next_state_name + if next_state_name is None: + raise RuntimeError("Could not retrieve NextState from uninitialised ProgramState.") + return next_state_name + + @next_state_name.setter + def next_state_name(self, next_state_name) -> None: + self._next_state_name = next_state_name + self._next_field_name = None + + @property + def next_field_name(self) -> str: + return self._next_field_name + + @next_field_name.setter + def next_field_name(self, next_field_name) -> None: + next_state_name = self._next_state_name + if next_state_name is None: + raise RuntimeError("Could not set NextField from uninitialised ProgramState.") + self._next_field_name = next_field_name + + +class ProgramError(ProgramState): + error: Final[Optional[ExecutionFailedEventDetails]] + + def __init__(self, error: Optional[ExecutionFailedEventDetails]): + super().__init__() + self.error = error + + +class ProgramTimedOut(ProgramState): + pass diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/states.py b/localstack-core/localstack/services/stepfunctions/asl/eval/states.py new file mode 100644 index 0000000000000..295e4149344e7 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/states.py @@ -0,0 +1,155 @@ +import copy +from typing import Any, Final, NotRequired, Optional, TypedDict + +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + VariableDeclarations, + VariableReference, + encode_jsonata_variable_declarations, +) +from localstack.services.stepfunctions.asl.utils.json_path import extract_json +from localstack.utils.strings import long_uid + +_STATES_PREFIX: Final[str] = "$states" +_STATES_INPUT_PREFIX: Final[str] = "$states.input" +_STATES_CONTEXT_PREFIX: Final[str] = "$states.context" +_STATES_RESULT_PREFIX: Final[str] = "$states.result" +_STATES_ERROR_OUTPUT_PREFIX: Final[str] = "$states.errorOutput" + + +class ExecutionData(TypedDict): + Id: str + Input: Optional[Any] + Name: str + RoleArn: str + StartTime: str # Format: ISO 8601. + + +class StateData(TypedDict): + EnteredTime: str # Format: ISO 8601. + Name: str + RetryCount: int + + +class StateMachineData(TypedDict): + Id: str + Name: str + + +class TaskData(TypedDict): + Token: str + + +class ItemData(TypedDict): + # Contains the index number for the array item that is being currently processed. + Index: int + # Contains the array item being processed. + Value: Optional[Any] + + +class MapData(TypedDict): + Item: ItemData + + +class ContextObjectData(TypedDict): + Execution: ExecutionData + State: NotRequired[StateData] + StateMachine: StateMachineData + Task: NotRequired[TaskData] # Null if the Parameters field is outside a task state. + Map: NotRequired[MapData] # Only available when processing a Map state. + + +class ContextObject: + context_object_data: Final[ContextObjectData] + + def __init__(self, context_object: ContextObjectData): + self.context_object_data = context_object + + def update_task_token(self) -> str: + new_token = long_uid() + self.context_object_data["Task"] = TaskData(Token=new_token) + return new_token + + +class StatesData(TypedDict): + input: Any + context: ContextObjectData + result: NotRequired[Optional[Any]] + errorOutput: NotRequired[Optional[Any]] + + +class States: + _states_data: Final[StatesData] + context_object: Final[ContextObject] + + def __init__(self, context: ContextObjectData): + input_value = context["Execution"]["Input"] + self._states_data = StatesData(input=input_value, context=context) + self.context_object = ContextObject(context_object=context) + + @staticmethod + def _extract(query: Optional[str], data: Any) -> Any: + if query is None: + result = data + else: + result = extract_json(query, data) + return copy.deepcopy(result) + + def extract(self, query: str) -> Any: + if not query.startswith(_STATES_PREFIX): + raise RuntimeError(f"No such variable {query} in $states") + jsonpath_states_query = "$." + query[1:] + return self._extract(jsonpath_states_query, self._states_data) + + def get_input(self, query: Optional[str] = None) -> Any: + return self._extract(query, self._states_data["input"]) + + def reset(self, input_value: Any) -> None: + clone_input_value = copy.deepcopy(input_value) + self._states_data["input"] = clone_input_value + self._states_data["result"] = None + self._states_data["errorOutput"] = None + + def get_context(self, query: Optional[str] = None) -> Any: + return self._extract(query, self._states_data["context"]) + + def get_result(self, query: Optional[str] = None) -> Any: + if "result" not in self._states_data: + raise RuntimeError("Illegal access to $states.result") + return self._extract(query, self._states_data["result"]) + + def set_result(self, result: Any) -> Any: + clone_result = copy.deepcopy(result) + self._states_data["result"] = clone_result + + def get_error_output(self, query: Optional[str] = None) -> Any: + if "errorOutput" not in self._states_data: + raise RuntimeError("Illegal access to $states.errorOutput") + return self._extract(query, self._states_data["errorOutput"]) + + def set_error_output(self, error_output: Any) -> None: + clone_error_output = copy.deepcopy(error_output) + self._states_data["errorOutput"] = clone_error_output + + def to_variable_declarations( + self, variable_references: Optional[set[VariableReference]] = None + ) -> VariableDeclarations: + if not variable_references or _STATES_PREFIX in variable_references: + return encode_jsonata_variable_declarations( + bindings={_STATES_PREFIX: self._states_data} + ) + candidate_sub_states = { + "input": _STATES_INPUT_PREFIX, + "context": _STATES_CONTEXT_PREFIX, + "result": _STATES_RESULT_PREFIX, + "errorOutput": _STATES_ERROR_OUTPUT_PREFIX, + } + sub_states = dict() + for variable_reference in variable_references: + if not candidate_sub_states: + break + for sub_states_key, sub_states_prefix in candidate_sub_states.items(): + if variable_reference.startswith(sub_states_prefix): + sub_states[sub_states_key] = self._states_data[sub_states_key] # noqa + del candidate_sub_states[sub_states_key] + break + return encode_jsonata_variable_declarations(bindings={_STATES_PREFIX: sub_states}) diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/environment.py b/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/environment.py new file mode 100644 index 0000000000000..8db4b0e427cac --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/environment.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from typing import Optional + +from localstack.aws.api.stepfunctions import Arn, InspectionData, StateMachineType +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.evaluation_details import AWSExecutionDetails +from localstack.services.stepfunctions.asl.eval.event.event_manager import ( + EventHistoryContext, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingSession, +) +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramRunning, +) +from localstack.services.stepfunctions.asl.eval.states import ContextObjectData +from localstack.services.stepfunctions.asl.eval.test_state.program_state import ( + ProgramChoiceSelected, +) +from localstack.services.stepfunctions.asl.eval.variable_store import VariableStore +from localstack.services.stepfunctions.backend.activity import Activity + + +class TestStateEnvironment(Environment): + inspection_data: InspectionData + + def __init__( + self, + aws_execution_details: AWSExecutionDetails, + execution_type: StateMachineType, + context: ContextObjectData, + event_history_context: EventHistoryContext, + activity_store: dict[Arn, Activity], + cloud_watch_logging_session: Optional[CloudWatchLoggingSession] = None, + ): + super().__init__( + aws_execution_details=aws_execution_details, + execution_type=execution_type, + context=context, + event_history_context=event_history_context, + cloud_watch_logging_session=cloud_watch_logging_session, + activity_store=activity_store, + ) + self.inspection_data = InspectionData() + + def as_frame_of( + cls, + env: TestStateEnvironment, + event_history_frame_cache: Optional[EventHistoryContext] = None, + ) -> Environment: + frame = super().as_frame_of(env=env, event_history_frame_cache=event_history_frame_cache) + frame.inspection_data = env.inspection_data + return frame + + def as_inner_frame_of( + cls, + env: TestStateEnvironment, + variable_store: VariableStore, + event_history_frame_cache: Optional[EventHistoryContext] = None, + ) -> Environment: + frame = super().as_inner_frame_of( + env=env, + event_history_frame_cache=event_history_frame_cache, + variable_store=variable_store, + ) + frame.inspection_data = env.inspection_data + return frame + + def set_choice_selected(self, next_state_name: str) -> None: + with self._state_mutex: + if isinstance(self._program_state, ProgramRunning): + self._program_state = ProgramChoiceSelected(next_state_name=next_state_name) + self.program_state_event.set() + self.program_state_event.clear() + else: + raise RuntimeError("Cannot set choice selected for non running ProgramState.") diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/program_state.py b/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/program_state.py new file mode 100644 index 0000000000000..d9576ceda285b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/test_state/program_state.py @@ -0,0 +1,11 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.eval.program_state import ProgramState + + +class ProgramChoiceSelected(ProgramState): + next_state_name: Final[str] + + def __init__(self, next_state_name: str): + super().__init__() + self.next_state_name = next_state_name diff --git a/localstack-core/localstack/services/stepfunctions/asl/eval/variable_store.py b/localstack-core/localstack/services/stepfunctions/asl/eval/variable_store.py new file mode 100644 index 0000000000000..055fb9355ca5c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/eval/variable_store.py @@ -0,0 +1,121 @@ +from __future__ import annotations + +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + VariableDeclarations, + encode_jsonata_variable_declarations, +) +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +VariableIdentifier = str +VariableValue = Any + + +class VariableStoreError(RuntimeError): + message: Final[str] + + def __init__(self, message: str): + self.message = message + + def __str__(self): + return f"{self.__class__.__name__} {self.message}" + + def __repr__(self): + return str(self) + + +class NoSuchVariable(VariableStoreError): + variable_identifier: Final[VariableIdentifier] + + def __init__(self, variable_identifier: VariableIdentifier): + super().__init__(message=f"No such variable '{variable_identifier}' in scope") + self.variable_identifier = variable_identifier + + +class IllegalOuterScopeWrite(VariableStoreError): + variable_identifier: Final[VariableIdentifier] + variable_value: Final[VariableValue] + + def __init__(self, variable_identifier: VariableIdentifier, variable_value: VariableValue): + super().__init__( + message=f"Cannot bind value '{variable_value}' to variable '{variable_identifier}' as it belongs to an outer scope." + ) + self.variable_identifier = variable_identifier + self.variable_value = variable_value + + +class VariableStore: + _outer_scope: Final[dict] + _inner_scope: Final[dict] + + _declaration_tracing: Final[set[str]] + + _outer_variable_declaration_cache: Optional[VariableDeclarations] + _variable_declarations_cache: Optional[VariableDeclarations] + + def __init__(self): + self._outer_scope = dict() + self._inner_scope = dict() + self._declaration_tracing = set() + self._outer_variable_declaration_cache = None + self._variable_declarations_cache = None + + @classmethod + def as_inner_scope_of(cls, outer_variable_store: VariableStore) -> VariableStore: + inner_variable_store = cls() + inner_variable_store._outer_scope.update(outer_variable_store._outer_scope) + inner_variable_store._outer_scope.update(outer_variable_store._inner_scope) + return inner_variable_store + + def reset_tracing(self) -> None: + self._declaration_tracing.clear() + + # TODO: add typing when this available in service init. + def get_assigned_variables(self) -> dict[str, str]: + assigned_variables: dict[str, str] = dict() + for traced_declaration_identifier in self._declaration_tracing: + traced_declaration_value = self.get(traced_declaration_identifier) + if isinstance(traced_declaration_value, str): + traced_declaration_value_json_str = f'"{traced_declaration_value}"' + else: + traced_declaration_value_json_str: str = to_json_str( + traced_declaration_value, separators=(",", ":") + ) + assigned_variables[traced_declaration_identifier] = traced_declaration_value_json_str + return assigned_variables + + def get(self, variable_identifier: VariableIdentifier) -> VariableValue: + if variable_identifier in self._inner_scope: + return self._inner_scope[variable_identifier] + if variable_identifier in self._outer_scope: + return self._outer_scope[variable_identifier] + raise NoSuchVariable(variable_identifier=variable_identifier) + + def set(self, variable_identifier: VariableIdentifier, variable_value: VariableValue) -> None: + if variable_identifier in self._outer_scope: + raise IllegalOuterScopeWrite( + variable_identifier=variable_identifier, variable_value=variable_value + ) + self._declaration_tracing.add(variable_identifier) + self._inner_scope[variable_identifier] = variable_value + self._variable_declarations_cache = None + + @staticmethod + def _to_variable_declarations(bindings: dict[str, Any]) -> VariableDeclarations: + variables = {f"${key}": value for key, value in bindings.items()} + encoded = encode_jsonata_variable_declarations(variables) + return encoded + + def get_variable_declarations(self) -> VariableDeclarations: + if self._variable_declarations_cache is not None: + return self._variable_declarations_cache + if self._outer_variable_declaration_cache is None: + self._outer_variable_declaration_cache = self._to_variable_declarations( + self._outer_scope + ) + inner_variable_declarations_cache = self._to_variable_declarations(self._inner_scope) + self._variable_declarations_cache = "".join( + [self._outer_variable_declaration_cache, inner_variable_declarations_cache] + ) + return self._variable_declarations_cache diff --git a/localstack-core/localstack/services/stepfunctions/asl/jsonata/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/jsonata/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/jsonata/jsonata.py b/localstack-core/localstack/services/stepfunctions/asl/jsonata/jsonata.py new file mode 100644 index 0000000000000..1fa837f68815e --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/jsonata/jsonata.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import json +import re +from pathlib import Path +from typing import Any, Callable, Final, Optional + +import jpype +import jpype.imports + +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.packages import jpype_jsonata_package +from localstack.utils.objects import singleton_factory + +JSONataExpression = str +VariableReference = str +VariableDeclarations = str + +_PATTERN_VARIABLE_REFERENCE: Final[re.Pattern] = re.compile( + r"\$\$|\$[a-zA-Z0-9_$]+(?:\.[a-zA-Z0-9_][a-zA-Z0-9_$]*)*|\$" +) +_ILLEGAL_VARIABLE_REFERENCES: Final[set[str]] = {"$", "$$"} +_VARIABLE_REFERENCE_ASSIGNMENT_OPERATOR: Final[str] = ":=" +_VARIABLE_REFERENCE_ASSIGNMENT_STOP_SYMBOL: Final[str] = ";" +_EXPRESSION_OPEN_SYMBOL: Final[str] = "(" +_EXPRESSION_CLOSE_SYMBOL: Final[str] = ")" + + +class JSONataException(Exception): + error: Final[str] + details: Optional[str] + + def __init__(self, error: str, details: Optional[str]): + self.error = error + self.details = details + + +class _JSONataJVMBridge: + _java_OBJECT_MAPPER: "com.fasterxml.jackson.databind.ObjectMapper" # noqa + _java_JSONATA: "com.dashjoin.jsonata.Jsonata.jsonata" # noqa + + def __init__(self): + installer = jpype_jsonata_package.get_installer() + installer.install() + + from jpype import config as jpype_config + + jpype_config.destroy_jvm = False + + # Limitation: We can only start one JVM instance within LocalStack and using JPype for another purpose + # (e.g., event-ruler) fails unless we change the way we load/reload the classpath. + jvm_path = installer.get_java_lib_path() + jsonata_libs_path = Path(installer.get_installed_dir()) + jsonata_libs_pattern = jsonata_libs_path.joinpath("*") + jpype.startJVM(jvm_path, classpath=[jsonata_libs_pattern], interrupt=False) + + from com.fasterxml.jackson.databind import ObjectMapper # noqa + from com.dashjoin.jsonata.Jsonata import jsonata # noqa + + self._java_OBJECT_MAPPER = ObjectMapper() + self._java_JSONATA = jsonata + + @staticmethod + @singleton_factory + def get() -> _JSONataJVMBridge: + return _JSONataJVMBridge() + + def eval_jsonata(self, jsonata_expression: JSONataExpression) -> Any: + try: + # Evaluate the JSONata expression with the JVM. + # TODO: Investigate whether it is worth moving this chain of statements (java_*) to a + # Java program to reduce i/o between the JVM and this runtime. + java_expression = self._java_JSONATA(jsonata_expression) + java_output = java_expression.evaluate(None) + java_output_string = self._java_OBJECT_MAPPER.writeValueAsString(java_output) + + # Compute a Python json object from the java string, this is to: + # 1. Ensure we fully end interactions with the JVM about this value here; + # 2. The output object may undergo under operations that are not compatible + # with jpype objects (such as json.dumps, equality, instanceof, etc.). + result_str: str = str(java_output_string) + result_json = json.loads(result_str) + + return result_json + except Exception as ex: + raise JSONataException("UNKNOWN", str(ex)) + + +# Lazy initialization of the `eval_jsonata` function pointer. +# This ensures the JVM is only started when JSONata functionality is needed. +_eval_jsonata: Optional[Callable[[JSONataExpression], Any]] = None + + +def eval_jsonata_expression(jsonata_expression: JSONataExpression) -> Any: + global _eval_jsonata + if _eval_jsonata is None: + # Initialize _eval_jsonata only when invoked for the first time using the Singleton pattern. + _eval_jsonata = _JSONataJVMBridge.get().eval_jsonata + return _eval_jsonata(jsonata_expression) + + +class IllegalJSONataVariableReference(ValueError): + variable_reference: Final[VariableReference] + + def __init__(self, variable_reference: VariableReference): + self.variable_reference = variable_reference + + +def extract_jsonata_variable_references( + jsonata_expression: JSONataExpression, +) -> set[VariableReference]: + if not jsonata_expression: + return set() + variable_references: list[VariableReference] = _PATTERN_VARIABLE_REFERENCE.findall( + jsonata_expression + ) + for variable_reference in variable_references: + if variable_reference in _ILLEGAL_VARIABLE_REFERENCES: + raise IllegalJSONataVariableReference(variable_reference=variable_reference) + return set(variable_references) + + +def encode_jsonata_variable_declarations( + bindings: dict[VariableReference, Any], +) -> VariableDeclarations: + declarations_parts: list[str] = list() + for variable_reference, value in bindings.items(): + if isinstance(value, str): + value_str_lit = f'"{value}"' + else: + value_str_lit = to_json_str(value, separators=(",", ":")) + declarations_parts.extend( + [ + variable_reference, + _VARIABLE_REFERENCE_ASSIGNMENT_OPERATOR, + value_str_lit, + _VARIABLE_REFERENCE_ASSIGNMENT_STOP_SYMBOL, + ] + ) + return "".join(declarations_parts) + + +def compose_jsonata_expression( + final_jsonata_expression: JSONataExpression, + variable_declarations_list: list[VariableDeclarations], +) -> JSONataExpression: + variable_declarations = "".join(variable_declarations_list) + expression = "".join( + [ + _EXPRESSION_OPEN_SYMBOL, + variable_declarations, + final_jsonata_expression, + _EXPRESSION_CLOSE_SYMBOL, + ] + ) + return expression diff --git a/localstack-core/localstack/services/stepfunctions/asl/jsonata/validations.py b/localstack-core/localstack/services/stepfunctions/asl/jsonata/validations.py new file mode 100644 index 0000000000000..defc6bfe08517 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/jsonata/validations.py @@ -0,0 +1,91 @@ +from typing import Final + +from localstack.aws.api.stepfunctions import ( + EvaluationFailedEventDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import ( + FailureEvent, + FailureEventException, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + eval_jsonata_expression, +) + +_SUPPORTED_JSONATA_TYPES: Final[set[str]] = { + "null", + "number", + "string", + "boolean", + "array", + "object", +} + + +def _validate_null_output(env: Environment, expression: str, rich_jsonata_expression: str) -> None: + exists: bool = eval_jsonata_expression(f"$exists({rich_jsonata_expression})") + if exists: + return + error_name = StatesErrorName(typ=StatesErrorNameType.StatesQueryEvaluationError) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=EvaluationFailedEventDetails( + # TODO: Add snapshot test to investigate behaviour for field string + cause=f"The JSONata expression '{expression}' returned nothing (undefined).", + error=error_name.error_name, + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + + +def _validate_string_output( + env: Environment, expression: str, rich_jsonata_expression: str +) -> None: + jsonata_type: str = eval_jsonata_expression(f"$type({rich_jsonata_expression})") + if jsonata_type in _SUPPORTED_JSONATA_TYPES: + return + error_name = StatesErrorName(typ=StatesErrorNameType.StatesQueryEvaluationError) + failure_event = FailureEvent( + env=env, + error_name=error_name, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=EvaluationFailedEventDetails( + # TODO: Add snapshot test to investigate behaviour for field string + cause=f"The JSONata expression '{expression}' returned an unsupported result type.", + error=error_name.error_name, + ) + ), + ) + raise FailureEventException(failure_event=failure_event) + + +def validate_jsonata_expression_output( + env: Environment, expression: str, rich_jsonata_expression: str, jsonata_result: str +) -> None: + try: + if jsonata_result is None: + _validate_null_output(env, expression, rich_jsonata_expression) + if isinstance(jsonata_result, str): + _validate_string_output(env, expression, rich_jsonata_expression) + except FailureEventException as ex: + env.event_manager.add_event( + context=env.event_history_context, + event_type=HistoryEventType.EvaluationFailed, + event_details=EventDetails( + evaluationFailedEventDetails=ex.get_evaluation_failed_event_details() + ), + ) + raise ex diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/parse/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/asl_parser.py b/localstack-core/localstack/services/stepfunctions/asl/parse/asl_parser.py new file mode 100644 index 0000000000000..29c9c93f53bf5 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/asl_parser.py @@ -0,0 +1,71 @@ +import abc +from typing import Final + +from antlr4 import CommonTokenStream, InputStream, ParserRuleContext +from antlr4.error.ErrorListener import ErrorListener + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.parse.preprocessor import Preprocessor + + +class SyntaxErrorListener(ErrorListener): + errors: Final[list[str]] + + def __init__(self): + super().__init__() + self.errors = list() + + def syntaxError(self, recognizer, offending_symbol, line, column, message, exception): + log_parts = [f"line {line}:{column}"] + if offending_symbol is not None and offending_symbol.text: + log_parts.append(f"at {offending_symbol.text}") + if message: + log_parts.append(message) + error_log = ", ".join(log_parts) + self.errors.append(error_log) + + +class ASLParserException(Exception): + errors: Final[list[str]] + + def __init__(self, errors: list[str]): + self.errors = errors + + def __str__(self): + return repr(self) + + def __repr__(self): + if not self.errors: + error_str = "No error details available" + elif len(self.errors) == 1: + error_str = self.errors[0] + else: + error_str = str(self.errors) + return f"ASLParserException {error_str}" + + +class AmazonStateLanguageParser(abc.ABC): + @staticmethod + def parse(definition: str) -> tuple[EvalComponent, ParserRuleContext]: + # Attempt to build the AST and look out for syntax errors. + syntax_error_listener = SyntaxErrorListener() + + input_stream = InputStream(definition) + lexer = ASLLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ASLParser(stream) + parser.removeErrorListeners() + parser.addErrorListener(syntax_error_listener) + tree = parser.state_machine() + + errors = syntax_error_listener.errors + if errors: + raise ASLParserException(errors=errors) + + # Attempt to preprocess the AST into evaluation components. + preprocessor = Preprocessor() + program = preprocessor.visit(tree) + + return program, tree diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/intrinsic_parser.py b/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/intrinsic_parser.py new file mode 100644 index 0000000000000..b72696298cb19 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/intrinsic_parser.py @@ -0,0 +1,24 @@ +import abc + +from antlr4 import CommonTokenStream, InputStream +from antlr4.ParserRuleContext import ParserRuleContext + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicLexer import ASLIntrinsicLexer +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicParser import ( + ASLIntrinsicParser, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.function import Function +from localstack.services.stepfunctions.asl.parse.intrinsic.preprocessor import Preprocessor + + +class IntrinsicParser(abc.ABC): + @staticmethod + def parse(src: str) -> tuple[Function, ParserRuleContext]: + input_stream = InputStream(src) + lexer = ASLIntrinsicLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ASLIntrinsicParser(stream) + tree = parser.func_decl() + preprocessor = Preprocessor() + function: Function = preprocessor.visit(tree) + return function, tree diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/preprocessor.py b/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/preprocessor.py new file mode 100644 index 0000000000000..c25f0345b1b0d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/intrinsic/preprocessor.py @@ -0,0 +1,139 @@ +import re +from typing import Optional + +from antlr4.tree.Tree import ParseTree, TerminalNodeImpl + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicLexer import ASLIntrinsicLexer +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicParser import ( + ASLIntrinsicParser, +) +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicParserVisitor import ( + ASLIntrinsicParserVisitor, +) +from localstack.services.stepfunctions.asl.antlt4utils.antlr4utils import ( + is_production, + is_terminal, +) +from localstack.services.stepfunctions.asl.component.common.query_language import ( + QueryLanguageMode, +) +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringVariableSample, +) +from localstack.services.stepfunctions.asl.component.component import Component +from localstack.services.stepfunctions.asl.component.intrinsic.argument.argument import ( + Argument, + ArgumentContextPath, + ArgumentFunction, + ArgumentJsonPath, + ArgumentList, + ArgumentLiteral, + ArgumentVar, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.function import Function +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.factory import ( + StatesFunctionFactory, +) +from localstack.services.stepfunctions.asl.component.intrinsic.function.statesfunction.states_function import ( + StatesFunction, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.state_function_name_types import ( + StatesFunctionNameType, +) +from localstack.services.stepfunctions.asl.component.intrinsic.functionname.states_function_name import ( + StatesFunctionName, +) + + +class Preprocessor(ASLIntrinsicParserVisitor): + @staticmethod + def _replace_escaped_characters(match): + escaped_char = match.group(1) + if escaped_char.isalpha(): + replacements = {"n": "\n", "t": "\t", "r": "\r"} + return replacements.get(escaped_char, escaped_char) + elif escaped_char == '"': + return '"' + else: + return match.group(0) + + @staticmethod + def _text_of_str(parse_tree: ParseTree) -> str: + pt = is_production(parse_tree) or is_terminal(parse_tree) + inner_str = pt.getText() + inner_str = inner_str[1:-1] + inner_str = re.sub(r"\\(.)", Preprocessor._replace_escaped_characters, inner_str) + return inner_str + + def visitFunc_arg_int(self, ctx: ASLIntrinsicParser.Func_arg_intContext) -> ArgumentLiteral: + integer = int(ctx.INT().getText()) + return ArgumentLiteral(definition_value=integer) + + def visitFunc_arg_float(self, ctx: ASLIntrinsicParser.Func_arg_floatContext) -> ArgumentLiteral: + number = float(ctx.INT().getText()) + return ArgumentLiteral(definition_value=number) + + def visitFunc_arg_string( + self, ctx: ASLIntrinsicParser.Func_arg_stringContext + ) -> ArgumentLiteral: + text: str = self._text_of_str(ctx.STRING()) + return ArgumentLiteral(definition_value=text) + + def visitFunc_arg_bool(self, ctx: ASLIntrinsicParser.Func_arg_boolContext) -> ArgumentLiteral: + bool_term: TerminalNodeImpl = ctx.children[0] + bool_term_rule: int = bool_term.getSymbol().type + bool_val: bool = bool_term_rule == ASLIntrinsicLexer.TRUE + return ArgumentLiteral(definition_value=bool_val) + + def visitFunc_arg_list(self, ctx: ASLIntrinsicParser.Func_arg_listContext) -> ArgumentList: + arguments: list[Argument] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, Argument): + arguments.append(cmp) + return ArgumentList(arguments=arguments) + + def visitFunc_arg_context_path( + self, ctx: ASLIntrinsicParser.Func_arg_context_pathContext + ) -> ArgumentContextPath: + context_path: str = ctx.CONTEXT_PATH_STRING().getText() + return ArgumentContextPath(context_path=context_path) + + def visitFunc_arg_json_path( + self, ctx: ASLIntrinsicParser.Func_arg_json_pathContext + ) -> ArgumentJsonPath: + json_path: str = ctx.JSON_PATH_STRING().getText() + return ArgumentJsonPath(json_path=json_path) + + def visitFunc_arg_var(self, ctx: ASLIntrinsicParser.Func_arg_varContext) -> ArgumentVar: + expression: str = ctx.STRING_VARIABLE().getText() + string_variable_sample = StringVariableSample( + query_language_mode=QueryLanguageMode.JSONPath, expression=expression + ) + return ArgumentVar(string_variable_sample=string_variable_sample) + + def visitFunc_arg_func_decl( + self, ctx: ASLIntrinsicParser.Func_arg_func_declContext + ) -> ArgumentFunction: + function: Function = self.visit(ctx.states_func_decl()) + return ArgumentFunction(function=function) + + def visitState_fun_name( + self, ctx: ASLIntrinsicParser.State_fun_nameContext + ) -> StatesFunctionName: + tok_typ: int = ctx.children[0].symbol.type + name_typ = StatesFunctionNameType(tok_typ) + return StatesFunctionName(function_type=name_typ) + + def visitStates_func_decl( + self, ctx: ASLIntrinsicParser.States_func_declContext + ) -> StatesFunction: + func_name: StatesFunctionName = self.visit(ctx.state_fun_name()) + argument_list: ArgumentList = self.visit(ctx.func_arg_list()) + func: StatesFunction = StatesFunctionFactory.from_name( + func_name=func_name, argument_list=argument_list + ) + return func + + def visitFunc_decl(self, ctx: ASLIntrinsicParser.Func_declContext) -> Function: + return self.visit(ctx.children[0]) diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/preprocessor.py b/localstack-core/localstack/services/stepfunctions/asl/parse/preprocessor.py new file mode 100644 index 0000000000000..93132888e920b --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/preprocessor.py @@ -0,0 +1,1511 @@ +import json +import logging +from typing import Any, Optional + +from antlr4 import ParserRuleContext +from antlr4.tree.Tree import ParseTree, TerminalNodeImpl + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParserVisitor import ASLParserVisitor +from localstack.services.stepfunctions.asl.antlt4utils.antlr4utils import ( + from_string_literal, + is_production, + is_terminal, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl import AssignDecl +from localstack.services.stepfunctions.asl.component.common.assign.assign_decl_binding import ( + AssignDeclBinding, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_binding import ( + AssignTemplateBinding, + AssignTemplateBindingStringExpressionSimple, + AssignTemplateBindingValue, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value import ( + AssignTemplateValue, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value_array import ( + AssignTemplateValueArray, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value_object import ( + AssignTemplateValueObject, +) +from localstack.services.stepfunctions.asl.component.common.assign.assign_template_value_terminal import ( + AssignTemplateValueTerminal, + AssignTemplateValueTerminalLit, + AssignTemplateValueTerminalStringJSONata, +) +from localstack.services.stepfunctions.asl.component.common.catch.catch_decl import CatchDecl +from localstack.services.stepfunctions.asl.component.common.catch.catcher_decl import CatcherDecl +from localstack.services.stepfunctions.asl.component.common.catch.catcher_props import CatcherProps +from localstack.services.stepfunctions.asl.component.common.comment import Comment +from localstack.services.stepfunctions.asl.component.common.error_name.custom_error_name import ( + CustomErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.error_equals_decl import ( + ErrorEqualsDecl, +) +from localstack.services.stepfunctions.asl.component.common.error_name.error_name import ErrorName +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import ( + StatesErrorName, +) +from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import ( + StatesErrorNameType, +) +from localstack.services.stepfunctions.asl.component.common.flow.end import End +from localstack.services.stepfunctions.asl.component.common.flow.next import Next +from localstack.services.stepfunctions.asl.component.common.flow.start_at import StartAt +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_binding import ( + JSONataTemplateBinding, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value import ( + JSONataTemplateValue, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value_array import ( + JSONataTemplateValueArray, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value_object import ( + JSONataTemplateValueObject, +) +from localstack.services.stepfunctions.asl.component.common.jsonata.jsonata_template_value_terminal import ( + JSONataTemplateValueTerminalLit, + JSONataTemplateValueTerminalStringJSONata, +) +from localstack.services.stepfunctions.asl.component.common.outputdecl import Output +from localstack.services.stepfunctions.asl.component.common.parargs import ( + ArgumentsJSONataTemplateValueObject, + ArgumentsStringJSONata, + Parameters, + Parargs, +) +from localstack.services.stepfunctions.asl.component.common.path.input_path import InputPath +from localstack.services.stepfunctions.asl.component.common.path.items_path import ItemsPath +from localstack.services.stepfunctions.asl.component.common.path.output_path import OutputPath +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payload_value import ( + PayloadValue, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadarr.payload_arr import ( + PayloadArr, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadbinding.payload_binding import ( + PayloadBinding, + PayloadBindingStringExpressionSimple, + PayloadBindingValue, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadtmpl.payload_tmpl import ( + PayloadTmpl, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadvaluelit.payload_value_bool import ( + PayloadValueBool, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadvaluelit.payload_value_float import ( + PayloadValueFloat, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadvaluelit.payload_value_int import ( + PayloadValueInt, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadvaluelit.payload_value_null import ( + PayloadValueNull, +) +from localstack.services.stepfunctions.asl.component.common.payload.payloadvalue.payloadvaluelit.payload_value_str import ( + PayloadValueStr, +) +from localstack.services.stepfunctions.asl.component.common.query_language import ( + QueryLanguage, + QueryLanguageMode, +) +from localstack.services.stepfunctions.asl.component.common.result_selector import ResultSelector +from localstack.services.stepfunctions.asl.component.common.retry.backoff_rate_decl import ( + BackoffRateDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.interval_seconds_decl import ( + IntervalSecondsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.jitter_strategy_decl import ( + JitterStrategy, + JitterStrategyDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.max_attempts_decl import ( + MaxAttemptsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.max_delay_seconds_decl import ( + MaxDelaySecondsDecl, +) +from localstack.services.stepfunctions.asl.component.common.retry.retrier_decl import RetrierDecl +from localstack.services.stepfunctions.asl.component.common.retry.retrier_props import RetrierProps +from localstack.services.stepfunctions.asl.component.common.retry.retry_decl import RetryDecl +from localstack.services.stepfunctions.asl.component.common.string.string_expression import ( + StringContextPath, + StringExpression, + StringExpressionSimple, + StringIntrinsicFunction, + StringJSONata, + StringJsonPath, + StringLiteral, + StringSampler, + StringVariableSample, +) +from localstack.services.stepfunctions.asl.component.common.timeouts.heartbeat import ( + HeartbeatSeconds, + HeartbeatSecondsJSONata, + HeartbeatSecondsPath, +) +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import ( + TimeoutSeconds, + TimeoutSecondsJSONata, + TimeoutSecondsPath, +) +from localstack.services.stepfunctions.asl.component.component import Component +from localstack.services.stepfunctions.asl.component.program.program import Program +from localstack.services.stepfunctions.asl.component.program.states import States +from localstack.services.stepfunctions.asl.component.program.version import Version +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.component.state.state_choice.choice_rule import ( + ChoiceRule, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.choices_decl import ( + ChoicesDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison import ( + ComparisonComposite, + ComparisonCompositeAnd, + ComparisonCompositeNot, + ComparisonCompositeOr, + ComparisonCompositeProps, + ConditionJSONataLit, + ConditionStringJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_func import ( + ComparisonFunc, + ComparisonFuncStringVariableSample, + ComparisonFuncValue, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_operator_type import ( + ComparisonOperatorType, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_type import ( + Comparison, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.comparison_variable import ( + ComparisonVariable, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.comparison.variable import ( + Variable, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.default_decl import ( + DefaultDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_choice.state_choice import ( + StateChoice, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.execution_type import ( + ExecutionType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.item_reader_decl import ( + ItemReader, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.csv_header_location import ( + CSVHeaderLocation, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.csv_headers import ( + CSVHeaders, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.input_type import ( + InputType, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.max_items_decl import ( + MaxItemsDecl, + MaxItemsInt, + MaxItemsPath, + MaxItemsStringJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.reader_config_decl import ( + ReaderConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_reader.reader_config.reader_config_props import ( + ReaderConfigProps, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.item_selector import ( + ItemSelector, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.items.items import ( + ItemsArray, + ItemsJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.item_processor_decl import ( + ItemProcessorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.processor_config import ( + ProcessorConfig, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.iterator.iterator_decl import ( + IteratorDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.label import ( + Label, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.max_concurrency import ( + MaxConcurrency, + MaxConcurrencyJSONata, + MaxConcurrencyPath, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.mode import ( + Mode, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.result_writer.result_writer_decl import ( + ResultWriter, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.state_map import ( + StateMap, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.tolerated_failure import ( + ToleratedFailureCountInt, + ToleratedFailureCountPath, + ToleratedFailurePercentage, + ToleratedFailurePercentagePath, + ToleratedFailurePercentageStringJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_parallel.branches_decl import ( + BranchesDecl, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_parallel.state_parallel import ( + StateParallel, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + Credentials, + RoleArn, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + Resource, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task_factory import ( + state_task_for, +) +from localstack.services.stepfunctions.asl.component.state.state_fail.cause_decl import ( + Cause, + CausePath, +) +from localstack.services.stepfunctions.asl.component.state.state_fail.error_decl import ( + Error, + ErrorPath, +) +from localstack.services.stepfunctions.asl.component.state.state_fail.state_fail import StateFail +from localstack.services.stepfunctions.asl.component.state.state_pass.result import Result +from localstack.services.stepfunctions.asl.component.state.state_pass.state_pass import StatePass +from localstack.services.stepfunctions.asl.component.state.state_props import StateProps +from localstack.services.stepfunctions.asl.component.state.state_succeed.state_succeed import ( + StateSucceed, +) +from localstack.services.stepfunctions.asl.component.state.state_type import StateType +from localstack.services.stepfunctions.asl.component.state.state_wait.state_wait import StateWait +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.seconds import ( + Seconds, + SecondsJSONata, +) +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.seconds_path import ( + SecondsPath, +) +from localstack.services.stepfunctions.asl.component.state.state_wait.wait_function.timestamp import ( + Timestamp, + TimestampPath, +) +from localstack.services.stepfunctions.asl.parse.intrinsic.intrinsic_parser import IntrinsicParser +from localstack.services.stepfunctions.asl.parse.typed_props import TypedProps + +LOG = logging.getLogger(__name__) + + +class Preprocessor(ASLParserVisitor): + _query_language_per_scope: list[QueryLanguage] = list() + + def _get_current_query_language(self) -> QueryLanguage: + return self._query_language_per_scope[-1] + + def _open_query_language_scope(self, parse_tree: ParseTree) -> None: + production = is_production(parse_tree) + if production is None: + raise RuntimeError(f"Cannot expect QueryLanguage definition at depth: {parse_tree}") + + # Extract the QueryLanguage declaration at this ParseTree level, if any. + query_language = None + for child in production.children: + sub_production = is_production(child, ASLParser.RULE_top_layer_stmt) or is_production( + child, ASLParser.RULE_state_stmt + ) + if sub_production is not None: + child = sub_production.children[0] + sub_production = is_production(child, ASLParser.RULE_query_language_decl) + if sub_production is not None: + query_language = self.visit(sub_production) + break + + # Check this is the initial scope, if so set the initial value to the declaration or the default. + if not self._query_language_per_scope: + if query_language is None: + query_language = QueryLanguage() + # Otherwise, check for logical conflicts and add the latest or inherited value to as the next scope. + else: + current_query_language = self._get_current_query_language() + if query_language is None: + query_language = current_query_language + if ( + current_query_language.query_language_mode == QueryLanguageMode.JSONata + and query_language.query_language_mode == QueryLanguageMode.JSONPath + ): + raise ValueError( + f"Cannot downgrade from JSONata context to a JSONPath context at: {parse_tree}" + ) + + self._query_language_per_scope.append(query_language) + + def _close_query_language_scope(self) -> None: + self._query_language_per_scope.pop() + + def _is_query_language(self, query_language_mode: QueryLanguageMode) -> bool: + current_query_language = self._get_current_query_language() + return current_query_language.query_language_mode == query_language_mode + + def _raise_if_query_language_is_not( + self, query_language_mode: QueryLanguageMode, ctx: ParserRuleContext + ) -> None: + if not self._is_query_language(query_language_mode=query_language_mode): + raise ValueError( + f"Unsupported declaration in QueryLanguage={query_language_mode} block: {ctx.getText()}" + ) + + @staticmethod + def _inner_string_of(parser_rule_context: ParserRuleContext) -> Optional[str]: + if is_terminal(parser_rule_context, ASLLexer.NULL): + return None + inner_str = parser_rule_context.getText() + if inner_str.startswith('"') and inner_str.endswith('"'): + inner_str = inner_str[1:-1] + return inner_str + + def _inner_jsonata_expr(self, ctx: ParserRuleContext) -> str: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + inner_string_value = from_string_literal(parser_rule_context=ctx) + # Strip the start and end jsonata symbols {%%} + expression_body = inner_string_value[2:-2] + # Often leading and trailing spaces are used around the body: remove. + expression = expression_body.strip() + return expression + + def visitComment_decl(self, ctx: ASLParser.Comment_declContext) -> Comment: + inner_str = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return Comment(comment=inner_str) + + def visitVersion_decl(self, ctx: ASLParser.Version_declContext) -> Version: + version_str = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return Version(version=version_str) + + def visitStartat_decl(self, ctx: ASLParser.Startat_declContext) -> StartAt: + inner_str = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return StartAt(start_at_name=inner_str) + + def visitStates_decl(self, ctx: ASLParser.States_declContext) -> States: + states = States() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, CommonStateField): + # TODO move check to setter or checker layer? + if cmp.name in states.states: + raise ValueError(f"State redefinition {child.getText()}") + states.states[cmp.name] = cmp + return states + + def visitType_decl(self, ctx: ASLParser.Type_declContext) -> StateType: + return self.visit(ctx.state_type()) + + def visitState_type(self, ctx: ASLParser.State_typeContext) -> StateType: + state_type: int = ctx.children[0].symbol.type + return StateType(state_type) + + def visitResource_decl(self, ctx: ASLParser.Resource_declContext) -> Resource: + inner_str = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return Resource.from_resource_arn(inner_str) + + def visitEnd_decl(self, ctx: ASLParser.End_declContext) -> End: + bool_child: ParseTree = ctx.children[-1] + bool_term: Optional[TerminalNodeImpl] = is_terminal(bool_child) + if bool_term is None: + raise ValueError(f"Could not derive End from declaration context '{ctx.getText()}'") + bool_term_rule: int = bool_term.getSymbol().type + is_end = bool_term_rule == ASLLexer.TRUE + return End(is_end=is_end) + + def visitNext_decl(self, ctx: ASLParser.Next_declContext) -> Next: + inner_str = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return Next(name=inner_str) + + def visitResult_path_decl(self, ctx: ASLParser.Result_path_declContext) -> ResultPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + inner_str = self._inner_string_of(parser_rule_context=ctx.children[-1]) + return ResultPath(result_path_src=inner_str) + + def visitInput_path_decl(self, ctx: ASLParser.Input_path_declContext) -> InputPath: + string_sampler: Optional[StringSampler] = None + if not is_terminal(pt=ctx.children[-1], token_type=ASLLexer.NULL): + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return InputPath(string_sampler=string_sampler) + + def visitOutput_path_decl(self, ctx: ASLParser.Output_path_declContext) -> OutputPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: Optional[StringSampler] = None + if is_production(ctx.children[-1], ASLParser.RULE_string_sampler): + string_sampler: StringSampler = self.visitString_sampler(ctx.children[-1]) + return OutputPath(string_sampler=string_sampler) + + def visitResult_decl(self, ctx: ASLParser.Result_declContext) -> Result: + json_decl = ctx.json_value_decl() + json_str: str = json_decl.getText() + json_obj: json = json.loads(json_str) + return Result(result_obj=json_obj) + + def visitParameters_decl(self, ctx: ASLParser.Parameters_declContext) -> Parameters: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + payload_tmpl: PayloadTmpl = self.visit(ctx.payload_tmpl_decl()) + return Parameters(payload_tmpl=payload_tmpl) + + def visitTimeout_seconds_int(self, ctx: ASLParser.Timeout_seconds_intContext) -> TimeoutSeconds: + seconds = int(ctx.INT().getText()) + return TimeoutSeconds(timeout_seconds=seconds) + + def visitTimeout_seconds_jsonata( + self, ctx: ASLParser.Timeout_seconds_jsonataContext + ) -> TimeoutSecondsJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return TimeoutSecondsJSONata(string_jsonata=string_jsonata) + + def visitTimeout_seconds_path( + self, ctx: ASLParser.Timeout_seconds_pathContext + ) -> TimeoutSecondsPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return TimeoutSecondsPath(string_sampler=string_sampler) + + def visitHeartbeat_seconds_int( + self, ctx: ASLParser.Heartbeat_seconds_intContext + ) -> HeartbeatSeconds: + seconds = int(ctx.INT().getText()) + return HeartbeatSeconds(heartbeat_seconds=seconds) + + def visitHeartbeat_seconds_jsonata( + self, ctx: ASLParser.Heartbeat_seconds_jsonataContext + ) -> HeartbeatSecondsJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return HeartbeatSecondsJSONata(string_jsonata=string_jsonata) + + def visitHeartbeat_seconds_path( + self, ctx: ASLParser.Heartbeat_seconds_pathContext + ) -> HeartbeatSecondsPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return HeartbeatSecondsPath(string_sampler=string_sampler) + + def visitResult_selector_decl( + self, ctx: ASLParser.Result_selector_declContext + ) -> ResultSelector: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + payload_tmpl: PayloadTmpl = self.visit(ctx.payload_tmpl_decl()) + return ResultSelector(payload_tmpl=payload_tmpl) + + def visitBranches_decl(self, ctx: ASLParser.Branches_declContext) -> BranchesDecl: + programs: list[Program] = [] + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, Program): + programs.append(cmp) + return BranchesDecl(programs=programs) + + def visitState_decl_body(self, ctx: ASLParser.State_decl_bodyContext) -> StateProps: + self._open_query_language_scope(ctx) + state_props = StateProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + state_props.add(cmp) + if state_props.get(QueryLanguage) is None: + state_props.add(self._get_current_query_language()) + self._close_query_language_scope() + return state_props + + def visitState_decl(self, ctx: ASLParser.State_declContext) -> CommonStateField: + state_name = self._inner_string_of(parser_rule_context=ctx.string_literal()) + state_props: StateProps = self.visit(ctx.state_decl_body()) + state_props.name = state_name + common_state_field = self._common_state_field_of(state_props=state_props) + return common_state_field + + @staticmethod + def _common_state_field_of(state_props: StateProps) -> CommonStateField: + # TODO: use subtype loading strategy. + match state_props.get(StateType): + case StateType.Task: + resource: Resource = state_props.get(Resource) + state = state_task_for(resource) + case StateType.Pass: + state = StatePass() + case StateType.Choice: + state = StateChoice() + case StateType.Fail: + state = StateFail() + case StateType.Succeed: + state = StateSucceed() + case StateType.Wait: + state = StateWait() + case StateType.Map: + state = StateMap() + case StateType.Parallel: + state = StateParallel() + case None: + raise TypeError("No Type declaration for State in context.") + case unknown: + raise TypeError( + f"Unknown StateType value '{unknown}' in StateProps object in context." # noqa + ) + state.from_state_props(state_props) + return state + + def visitCondition_lit(self, ctx: ASLParser.Condition_litContext) -> ConditionJSONataLit: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + bool_child: ParseTree = ctx.children[-1] + bool_term: Optional[TerminalNodeImpl] = is_terminal(bool_child) + if bool_term is None: + raise ValueError( + f"Could not derive boolean literal from declaration context '{ctx.getText()}'." + ) + bool_term_rule: int = bool_term.getSymbol().type + bool_val: bool = bool_term_rule == ASLLexer.TRUE + return ConditionJSONataLit(literal=bool_val) + + def visitCondition_string_jsonata( + self, ctx: ASLParser.Condition_string_jsonataContext + ) -> ConditionStringJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx=ctx.string_jsonata()) + return ConditionStringJSONata(string_jsonata=string_jsonata) + + def visitVariable_decl(self, ctx: ASLParser.Variable_declContext) -> Variable: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx=ctx.string_sampler()) + return Variable(string_sampler=string_sampler) + + def visitComparison_op(self, ctx: ASLParser.Comparison_opContext) -> ComparisonOperatorType: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + try: + operator_type: int = ctx.children[0].symbol.type + return ComparisonOperatorType(operator_type) + except Exception: + raise ValueError(f"Could not derive ComparisonOperator from context '{ctx.getText()}'.") + + def visitComparison_func_value( + self, ctx: ASLParser.Comparison_func_valueContext + ) -> ComparisonFuncValue: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + comparison_op: ComparisonOperatorType = self.visit(ctx.comparison_op()) + json_decl = ctx.json_value_decl() + json_str: str = json_decl.getText() + json_obj: Any = json.loads(json_str) + return ComparisonFuncValue(operator_type=comparison_op, value=json_obj) + + def visitComparison_func_string_variable_sample( + self, ctx: ASLParser.Comparison_func_string_variable_sampleContext + ) -> ComparisonFuncStringVariableSample: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + comparison_op: ComparisonOperatorType = self.visit(ctx.comparison_op()) + string_variable_sample: StringVariableSample = self.visitString_variable_sample( + ctx.string_variable_sample() + ) + return ComparisonFuncStringVariableSample( + operator_type=comparison_op, string_variable_sample=string_variable_sample + ) + + def visitDefault_decl(self, ctx: ASLParser.Default_declContext) -> DefaultDecl: + state_name = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return DefaultDecl(state_name=state_name) + + def visitChoice_operator( + self, ctx: ASLParser.Choice_operatorContext + ) -> ComparisonComposite.ChoiceOp: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + pt: Optional[TerminalNodeImpl] = is_terminal(ctx.children[0]) + if not pt: + raise ValueError(f"Could not derive ChoiceOperator in block '{ctx.getText()}'.") + return ComparisonComposite.ChoiceOp(pt.symbol.type) + + def visitComparison_composite( + self, ctx: ASLParser.Comparison_compositeContext + ) -> ComparisonComposite: + choice_op: ComparisonComposite.ChoiceOp = self.visit(ctx.choice_operator()) + rules: list[ChoiceRule] = list() + for child in ctx.children[1:]: + cmp: Optional[Component] = self.visit(child) + if not cmp: + continue + elif isinstance(cmp, ChoiceRule): + rules.append(cmp) + + match choice_op: + case ComparisonComposite.ChoiceOp.Not: + if len(rules) != 1: + raise ValueError( + f"ComparisonCompositeNot must carry only one ComparisonCompositeStmt in: '{ctx.getText()}'." + ) + return ComparisonCompositeNot(rule=rules[0]) + case ComparisonComposite.ChoiceOp.And: + return ComparisonCompositeAnd(rules=rules) + case ComparisonComposite.ChoiceOp.Or: + return ComparisonCompositeOr(rules=rules) + + def visitChoice_rule_comparison_composite( + self, ctx: ASLParser.Choice_rule_comparison_compositeContext + ) -> ChoiceRule: + composite_stmts = ComparisonCompositeProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + composite_stmts.add(cmp) + return ChoiceRule( + comparison=composite_stmts.get( + typ=ComparisonComposite, + raise_on_missing=ValueError( + f"Expecting a 'ComparisonComposite' definition at '{ctx.getText()}'." + ), + ), + next_stmt=composite_stmts.get(Next), + comment=composite_stmts.get(Comment), + assign=composite_stmts.get(AssignDecl), + output=composite_stmts.get(Output), + ) + + def visitChoice_rule_comparison_variable( + self, ctx: ASLParser.Choice_rule_comparison_variableContext + ) -> ChoiceRule: + comparison_stmts = StateProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + comparison_stmts.add(cmp) + if self._is_query_language(query_language_mode=QueryLanguageMode.JSONPath): + variable: Variable = comparison_stmts.get( + typ=Variable, + raise_on_missing=ValueError( + f"Expected a Variable declaration in '{ctx.getText()}'." + ), + ) + comparison_func: Comparison = comparison_stmts.get( + typ=Comparison, + raise_on_missing=ValueError( + f"Expected a ComparisonFunction declaration in '{ctx.getText()}'." + ), + ) + if not isinstance(comparison_func, ComparisonFunc): + raise ValueError(f"Expected a ComparisonFunction declaration in '{ctx.getText()}'") + comparison_variable = ComparisonVariable(variable=variable, func=comparison_func) + return ChoiceRule( + comparison=comparison_variable, + next_stmt=comparison_stmts.get(Next), + comment=comparison_stmts.get(Comment), + assign=None, + output=None, + ) + else: + condition: Comparison = comparison_stmts.get( + typ=Comparison, + raise_on_missing=ValueError( + f"Expected a Condition declaration in '{ctx.getText()}'" + ), + ) + return ChoiceRule( + comparison=condition, + next_stmt=comparison_stmts.get(Next), + comment=comparison_stmts.get(Comment), + assign=comparison_stmts.get(AssignDecl), + output=comparison_stmts.get(Output), + ) + + def visitChoices_decl(self, ctx: ASLParser.Choices_declContext) -> ChoicesDecl: + rules: list[ChoiceRule] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if not cmp: + continue + elif isinstance(cmp, ChoiceRule): + rules.append(cmp) + return ChoicesDecl(rules=rules) + + def visitError(self, ctx: ASLParser.ErrorContext) -> Error: + string_expression: StringExpression = self.visit(ctx.children[-1]) + return Error(string_expression=string_expression) + + def visitError_path(self, ctx: ASLParser.Error_pathContext) -> ErrorPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_expression: StringExpression = self.visit(ctx.children[-1]) + return ErrorPath(string_expression=string_expression) + + def visitCause(self, ctx: ASLParser.CauseContext) -> Cause: + string_expression: StringExpression = self.visit(ctx.children[-1]) + return Cause(string_expression=string_expression) + + def visitCause_path(self, ctx: ASLParser.Cause_pathContext) -> CausePath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_expression: StringExpression = self.visit(ctx.children[-1]) + return CausePath(string_expression=string_expression) + + def visitRole_arn(self, ctx: ASLParser.Role_arnContext) -> RoleArn: + string_expression: StringExpression = self.visit(ctx.children[-1]) + return RoleArn(string_expression=string_expression) + + def visitRole_path(self, ctx: ASLParser.Role_pathContext) -> RoleArn: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_expression_simple: StringExpressionSimple = self.visitString_expression_simple( + ctx=ctx.string_expression_simple() + ) + return RoleArn(string_expression=string_expression_simple) + + def visitCredentials_decl(self, ctx: ASLParser.Credentials_declContext) -> Credentials: + role_arn: RoleArn = self.visit(ctx.role_arn_decl()) + return Credentials(role_arn=role_arn) + + def visitSeconds_int(self, ctx: ASLParser.Seconds_intContext) -> Seconds: + return Seconds(seconds=int(ctx.INT().getText())) + + def visitSeconds_jsonata(self, ctx: ASLParser.Seconds_jsonataContext) -> SecondsJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return SecondsJSONata(string_jsonata=string_jsonata) + + def visitSeconds_path(self, ctx: ASLParser.Seconds_pathContext) -> SecondsPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx=ctx.string_sampler()) + return SecondsPath(string_sampler=string_sampler) + + def visitItems_path_decl(self, ctx: ASLParser.Items_path_declContext) -> ItemsPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return ItemsPath(string_sampler=string_sampler) + + def visitMax_concurrency_int(self, ctx: ASLParser.Max_concurrency_intContext) -> MaxConcurrency: + return MaxConcurrency(num=int(ctx.INT().getText())) + + def visitMax_concurrency_jsonata( + self, ctx: ASLParser.Max_concurrency_jsonataContext + ) -> MaxConcurrencyJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return MaxConcurrencyJSONata(string_jsonata=string_jsonata) + + def visitMax_concurrency_path( + self, ctx: ASLParser.Max_concurrency_pathContext + ) -> MaxConcurrencyPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return MaxConcurrencyPath(string_sampler=string_sampler) + + def visitMode_decl(self, ctx: ASLParser.Mode_declContext) -> Mode: + mode_type: int = self.visit(ctx.mode_type()) + return Mode(mode_type) + + def visitMode_type(self, ctx: ASLParser.Mode_typeContext) -> int: + return ctx.children[0].symbol.type + + def visitExecution_decl(self, ctx: ASLParser.Execution_declContext) -> ExecutionType: + execution_type: int = self.visit(ctx.execution_type()) + return ExecutionType(execution_type) + + def visitExecution_type(self, ctx: ASLParser.Execution_typeContext) -> int: + return ctx.children[0].symbol.type + + def visitTimestamp(self, ctx: ASLParser.TimestampContext) -> Timestamp: + string: StringExpression = self.visit(ctx.children[-1]) + return Timestamp(string=string) + + def visitTimestamp_path(self, ctx: ASLParser.Timestamp_pathContext) -> TimestampPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return TimestampPath(string=string_sampler) + + def visitProcessor_config_decl( + self, ctx: ASLParser.Processor_config_declContext + ) -> ProcessorConfig: + props = TypedProps() + for child in ctx.children: + cmp = self.visit(child) + props.add(cmp) + return ProcessorConfig( + mode=props.get(typ=Mode) or ProcessorConfig.DEFAULT_MODE, + execution_type=props.get(typ=ExecutionType) or ProcessorConfig.DEFAULT_EXECUTION_TYPE, + ) + + def visitItem_processor_item(self, ctx: ASLParser.Item_processor_itemContext) -> Component: + return self.visit(ctx.children[0]) + + def visitItem_processor_decl( + self, ctx: ASLParser.Item_processor_declContext + ) -> ItemProcessorDecl: + props = TypedProps() + for child in ctx.children: + cmp = self.visit(child) + props.add(cmp) + return ItemProcessorDecl( + query_language=props.get(QueryLanguage) or QueryLanguage(), + start_at=props.get( + typ=StartAt, + raise_on_missing=ValueError( + f"Expected a StartAt declaration at '{ctx.getText()}'." + ), + ), + states=props.get( + typ=States, + raise_on_missing=ValueError(f"Expected a States declaration at '{ctx.getText()}'."), + ), + comment=props.get(typ=Comment), + processor_config=props.get(typ=ProcessorConfig) or ProcessorConfig(), + ) + + def visitIterator_decl(self, ctx: ASLParser.Iterator_declContext) -> IteratorDecl: + props = TypedProps() + for child in ctx.children: + cmp = self.visit(child) + props.add(cmp) + return IteratorDecl( + comment=props.get(typ=Comment), + query_language=self._get_current_query_language(), + start_at=props.get( + typ=StartAt, + raise_on_missing=ValueError( + f"Expected a StartAt declaration at '{ctx.getText()}'." + ), + ), + states=props.get( + typ=States, + raise_on_missing=ValueError(f"Expected a States declaration at '{ctx.getText()}'."), + ), + processor_config=props.get(typ=ProcessorConfig) or ProcessorConfig(), + ) + + def visitItem_selector_decl(self, ctx: ASLParser.Item_selector_declContext) -> ItemSelector: + template_value_object = self.visitAssign_template_value_object( + ctx=ctx.assign_template_value_object() + ) + return ItemSelector(template_value_object=template_value_object) + + def visitItem_reader_decl(self, ctx: ASLParser.Item_reader_declContext) -> ItemReader: + props = StateProps() + for child in ctx.children[3:-1]: + cmp = self.visit(child) + props.add(cmp) + resource: Resource = props.get( + typ=Resource, + raise_on_missing=ValueError(f"Expected a Resource declaration at '{ctx.getText()}'."), + ) + return ItemReader( + resource=resource, + parargs=props.get(Parargs), + reader_config=props.get(ReaderConfig), + ) + + def visitReader_config_decl(self, ctx: ASLParser.Reader_config_declContext) -> ReaderConfig: + props = ReaderConfigProps() + for child in ctx.children: + cmp = self.visit(child) + props.add(cmp) + return ReaderConfig( + input_type=props.get( + typ=InputType, + raise_on_missing=ValueError( + f"Expected a InputType declaration at '{ctx.getText()}'." + ), + ), + max_items_decl=props.get(typ=MaxItemsDecl), + csv_header_location=props.get(CSVHeaderLocation), + csv_headers=props.get(CSVHeaders), + ) + + def visitInput_type_decl(self, ctx: ASLParser.Input_type_declContext) -> InputType: + input_type = self._inner_string_of(ctx.string_literal()) + return InputType(input_type=input_type) + + def visitCsv_header_location_decl( + self, ctx: ASLParser.Csv_header_location_declContext + ) -> CSVHeaderLocation: + value = self._inner_string_of(ctx.string_literal()) + return CSVHeaderLocation(csv_header_location_value=value) + + def visitCsv_headers_decl(self, ctx: ASLParser.Csv_headers_declContext) -> CSVHeaders: + csv_headers: list[str] = list() + for child in ctx.children[3:-1]: + maybe_str = is_production(pt=child, rule_index=ASLParser.RULE_string_literal) + if maybe_str is not None: + csv_headers.append(self._inner_string_of(maybe_str)) + # TODO: check for empty headers behaviour. + return CSVHeaders(header_names=csv_headers) + + def visitMax_items_path(self, ctx: ASLParser.Max_items_pathContext) -> MaxItemsPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + string_sampler: StringSampler = self.visitString_sampler(ctx=ctx.string_sampler()) + return MaxItemsPath(string_sampler=string_sampler) + + def visitMax_items_int(self, ctx: ASLParser.Max_items_intContext) -> MaxItemsInt: + return MaxItemsInt(max_items=int(ctx.INT().getText())) + + def visitMax_items_string_jsonata( + self, ctx: ASLParser.Max_items_string_jsonataContext + ) -> MaxItemsStringJSONata: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return MaxItemsStringJSONata(string_jsonata=string_jsonata) + + def visitTolerated_failure_count_int( + self, ctx: ASLParser.Tolerated_failure_count_intContext + ) -> ToleratedFailureCountInt: + LOG.warning( + "ToleratedFailureCount declarations currently have no effect on the program evaluation." + ) + count = int(ctx.INT().getText()) + return ToleratedFailureCountInt(tolerated_failure_count=count) + + def visitTolerated_failure_count_string_jsonata( + self, ctx: ASLParser.Tolerated_failure_count_string_jsonataContext + ) -> ToleratedFailurePercentageStringJSONata: + LOG.warning( + "ToleratedFailureCount declarations currently have no effect on the program evaluation." + ) + string_jsonata: StringJSONata = self.visitString_jsonata(ctx=ctx.string_jsonata()) + return ToleratedFailurePercentageStringJSONata(string_jsonata=string_jsonata) + + def visitTolerated_failure_count_path( + self, ctx: ASLParser.Tolerated_failure_count_pathContext + ) -> ToleratedFailureCountPath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + LOG.warning( + "ToleratedFailureCountPath declarations currently have no effect on the program evaluation." + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return ToleratedFailureCountPath(string_sampler=string_sampler) + + def visitTolerated_failure_percentage_number( + self, ctx: ASLParser.Tolerated_failure_percentage_numberContext + ) -> ToleratedFailurePercentage: + LOG.warning( + "ToleratedFailurePercentage declarations currently have no effect on the program evaluation." + ) + percentage = float(ctx.NUMBER().getText()) + return ToleratedFailurePercentage(tolerated_failure_percentage=percentage) + + def visitTolerated_failure_percentage_string_jsonata( + self, ctx: ASLParser.Tolerated_failure_percentage_string_jsonataContext + ) -> ToleratedFailurePercentageStringJSONata: + LOG.warning( + "ToleratedFailurePercentage declarations currently have no effect on the program evaluation." + ) + string_jsonata: StringJSONata = self.visitString_jsonata(ctx=ctx.string_jsonata()) + return ToleratedFailurePercentageStringJSONata(string_jsonata=string_jsonata) + + def visitTolerated_failure_percentage_path( + self, ctx: ASLParser.Tolerated_failure_percentage_pathContext + ) -> ToleratedFailurePercentagePath: + self._raise_if_query_language_is_not( + query_language_mode=QueryLanguageMode.JSONPath, ctx=ctx + ) + LOG.warning( + "ToleratedFailurePercentagePath declarations currently have no effect on the program evaluation." + ) + string_sampler: StringSampler = self.visitString_sampler(ctx.string_sampler()) + return ToleratedFailurePercentagePath(string_sampler=string_sampler) + + def visitLabel_decl(self, ctx: ASLParser.Label_declContext) -> Label: + label = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return Label(label=label) + + def visitResult_writer_decl(self, ctx: ASLParser.Result_writer_declContext) -> ResultWriter: + props = StateProps() + for child in ctx.children[3:-1]: + cmp = self.visit(child) + props.add(cmp) + resource: Resource = props.get( + typ=Resource, + raise_on_missing=ValueError(f"Expected a Resource declaration at '{ctx.getText()}'."), + ) + # TODO: add tests for arguments in jsonata blocks using result writer + parargs: Parargs = props.get( + typ=Parargs, + raise_on_missing=ValueError( + f"Expected a Parameters/Arguments declaration at '{ctx.getText()}'." + ), + ) + return ResultWriter(resource=resource, parargs=parargs) + + def visitRetry_decl(self, ctx: ASLParser.Retry_declContext) -> RetryDecl: + retriers: list[RetrierDecl] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, RetrierDecl): + retriers.append(cmp) + return RetryDecl(retriers=retriers) + + def visitRetrier_decl(self, ctx: ASLParser.Retrier_declContext) -> RetrierDecl: + props = RetrierProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + props.add(cmp) + return RetrierDecl.from_retrier_props(props=props) + + def visitRetrier_stmt(self, ctx: ASLParser.Retrier_stmtContext): + return self.visit(ctx.children[0]) + + def visitError_equals_decl(self, ctx: ASLParser.Error_equals_declContext) -> ErrorEqualsDecl: + error_names: list[ErrorName] = list() + for child in ctx.children: + cmp = self.visit(child) + if isinstance(cmp, ErrorName): + error_names.append(cmp) + return ErrorEqualsDecl(error_names=error_names) + + def visitError_name(self, ctx: ASLParser.Error_nameContext) -> ErrorName: + pt = ctx.children[0] + + # Case: StatesErrorName. + prc: Optional[ParserRuleContext] = is_production( + pt=pt, rule_index=ASLParser.RULE_states_error_name + ) + if prc: + return self.visit(prc) + + # Case CustomErrorName. + error_name = self._inner_string_of(parser_rule_context=ctx.string_literal()) + return CustomErrorName(error_name=error_name) + + def visitStates_error_name(self, ctx: ASLParser.States_error_nameContext) -> StatesErrorName: + pt: Optional[TerminalNodeImpl] = is_terminal(ctx.children[0]) + if not pt: + raise ValueError(f"Could not derive ErrorName in block '{ctx.getText()}'.") + states_error_name_type = StatesErrorNameType(pt.symbol.type) + return StatesErrorName(states_error_name_type) + + def visitInterval_seconds_decl( + self, ctx: ASLParser.Interval_seconds_declContext + ) -> IntervalSecondsDecl: + return IntervalSecondsDecl(seconds=int(ctx.INT().getText())) + + def visitMax_attempts_decl(self, ctx: ASLParser.Max_attempts_declContext) -> MaxAttemptsDecl: + return MaxAttemptsDecl(attempts=int(ctx.INT().getText())) + + def visitBackoff_rate_decl(self, ctx: ASLParser.Backoff_rate_declContext) -> BackoffRateDecl: + return BackoffRateDecl(rate=float(ctx.children[-1].getText())) + + def visitMax_delay_seconds_decl( + self, ctx: ASLParser.Max_delay_seconds_declContext + ) -> MaxDelaySecondsDecl: + return MaxDelaySecondsDecl(max_delays_seconds=int(ctx.INT().getText())) + + def visitJitter_strategy_decl( + self, ctx: ASLParser.Jitter_strategy_declContext + ) -> JitterStrategyDecl: + last_child: ParseTree = ctx.children[-1] + strategy_child: Optional[TerminalNodeImpl] = is_terminal(last_child) + strategy_value = strategy_child.getSymbol().type + jitter_strategy = JitterStrategy(strategy_value) + return JitterStrategyDecl(jitter_strategy=jitter_strategy) + + def visitCatch_decl(self, ctx: ASLParser.Catch_declContext) -> CatchDecl: + catchers: list[CatcherDecl] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, CatcherDecl): + catchers.append(cmp) + return CatchDecl(catchers=catchers) + + def visitCatcher_decl(self, ctx: ASLParser.Catcher_declContext) -> CatcherDecl: + props = CatcherProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + props.add(cmp) + if self._is_query_language(QueryLanguageMode.JSONPath) and not props.get(ResultPath): + props.add(CatcherDecl.DEFAULT_RESULT_PATH) + return CatcherDecl.from_catcher_props(props=props) + + def visitPayload_value_float( + self, ctx: ASLParser.Payload_value_floatContext + ) -> PayloadValueFloat: + return PayloadValueFloat(val=float(ctx.NUMBER().getText())) + + def visitPayload_value_int(self, ctx: ASLParser.Payload_value_intContext) -> PayloadValueInt: + return PayloadValueInt(val=int(ctx.INT().getText())) + + def visitPayload_value_bool(self, ctx: ASLParser.Payload_value_boolContext) -> PayloadValueBool: + bool_child: ParseTree = ctx.children[0] + bool_term: Optional[TerminalNodeImpl] = is_terminal(bool_child) + if bool_term is None: + raise ValueError( + f"Could not derive PayloadValueBool from declaration context '{ctx.getText()}'." + ) + bool_term_rule: int = bool_term.getSymbol().type + bool_val: bool = bool_term_rule == ASLLexer.TRUE + return PayloadValueBool(val=bool_val) + + def visitPayload_value_null(self, ctx: ASLParser.Payload_value_nullContext) -> PayloadValueNull: + return PayloadValueNull() + + def visitPayload_value_str(self, ctx: ASLParser.Payload_value_strContext) -> PayloadValueStr: + string_literal: StringLiteral = self.visitString_literal(ctx=ctx.string_literal()) + return PayloadValueStr(val=string_literal.literal_value) + + def visitPayload_binding_sample( + self, ctx: ASLParser.Payload_binding_sampleContext + ) -> PayloadBindingStringExpressionSimple: + string_dollar: str = self._inner_string_of(parser_rule_context=ctx.STRINGDOLLAR()) + field = string_dollar[:-2] + string_expression_simple: StringExpressionSimple = self.visitString_expression_simple( + ctx.string_expression_simple() + ) + return PayloadBindingStringExpressionSimple( + field=field, string_expression_simple=string_expression_simple + ) + + def visitPayload_binding_value( + self, ctx: ASLParser.Payload_binding_valueContext + ) -> PayloadBindingValue: + string_literal: StringLiteral = self.visitString_literal(ctx=ctx.string_literal()) + payload_value: PayloadValue = self.visit(ctx.payload_value_decl()) + return PayloadBindingValue(field=string_literal.literal_value, payload_value=payload_value) + + def visitPayload_arr_decl(self, ctx: ASLParser.Payload_arr_declContext) -> PayloadArr: + payload_values: list[PayloadValue] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, PayloadValue): + payload_values.append(cmp) + return PayloadArr(payload_values=payload_values) + + def visitPayload_tmpl_decl(self, ctx: ASLParser.Payload_tmpl_declContext) -> PayloadTmpl: + payload_bindings: list[PayloadBinding] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, PayloadBinding): + payload_bindings.append(cmp) + return PayloadTmpl(payload_bindings=payload_bindings) + + def visitPayload_value_decl(self, ctx: ASLParser.Payload_value_declContext) -> PayloadValue: + value = ctx.children[0] + return self.visit(value) + + def visitProgram_decl(self, ctx: ASLParser.Program_declContext) -> Program: + self._open_query_language_scope(ctx) + props = TypedProps() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + props.add(cmp) + if props.get(QueryLanguage) is None: + props.add(self._get_current_query_language()) + program = Program( + query_language=props.get(typ=QueryLanguage) or QueryLanguage(), + start_at=props.get( + typ=StartAt, + raise_on_missing=ValueError( + f"No '{StartAt}' definition for Program in context: '{ctx.getText()}'." + ), + ), + states=props.get( + typ=States, + raise_on_missing=ValueError( + f"No '{States}' definition for Program in context: '{ctx.getText()}'." + ), + ), + timeout_seconds=props.get(TimeoutSeconds), + comment=props.get(typ=Comment), + version=props.get(typ=Version), + ) + self._close_query_language_scope() + return program + + def visitState_machine(self, ctx: ASLParser.State_machineContext) -> Program: + return self.visit(ctx.program_decl()) + + def visitQuery_language_decl(self, ctx: ASLParser.Query_language_declContext) -> QueryLanguage: + query_language_mode_int = ctx.children[-1].getSymbol().type + query_language_mode = QueryLanguageMode(value=query_language_mode_int) + return QueryLanguage(query_language_mode=query_language_mode) + + def visitAssign_template_value_terminal_float( + self, ctx: ASLParser.Assign_template_value_terminal_floatContext + ) -> AssignTemplateValueTerminalLit: + float_value = float(ctx.NUMBER().getText()) + return AssignTemplateValueTerminalLit(value=float_value) + + def visitAssign_template_value_terminal_int( + self, ctx: ASLParser.Assign_template_value_terminal_intContext + ) -> AssignTemplateValueTerminalLit: + int_value = int(ctx.INT().getText()) + return AssignTemplateValueTerminalLit(value=int_value) + + def visitAssign_template_value_terminal_bool( + self, ctx: ASLParser.Assign_template_value_terminal_boolContext + ) -> AssignTemplateValueTerminalLit: + bool_term_rule: int = ctx.children[0].getSymbol().type + bool_value: bool = bool_term_rule == ASLLexer.TRUE + return AssignTemplateValueTerminalLit(value=bool_value) + + def visitAssign_template_value_terminal_null( + self, ctx: ASLParser.Assign_template_value_terminal_nullContext + ) -> AssignTemplateValueTerminalLit: + return AssignTemplateValueTerminalLit(value=None) + + def visitAssign_template_value_terminal_string_jsonata( + self, ctx: ASLParser.Assign_template_value_terminal_string_jsonataContext + ) -> AssignTemplateValueTerminal: + # Return a JSONata expression resolver or a suppressed depending on the current language mode. + current_query_language = self._get_current_query_language() + if current_query_language.query_language_mode == QueryLanguageMode.JSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return AssignTemplateValueTerminalStringJSONata(string_jsonata=string_jsonata) + else: + inner_string_value = self._inner_string_of(parser_rule_context=ctx.string_jsonata()) + return AssignTemplateValueTerminalLit(value=inner_string_value) + + def visitAssign_template_value_terminal_string_literal( + self, ctx: ASLParser.Assign_template_value_terminal_string_literalContext + ) -> AssignTemplateValueTerminal: + string_literal = self._inner_string_of(ctx.string_literal()) + return AssignTemplateValueTerminalLit(value=string_literal) + + def visitAssign_template_value(self, ctx: ASLParser.Assign_template_valueContext): + return self.visit(ctx.children[0]) + + def visitAssign_template_value_array( + self, ctx: ASLParser.Assign_template_value_arrayContext + ) -> AssignTemplateValueArray: + values: list[AssignTemplateValue] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, AssignTemplateValue): + values.append(cmp) + return AssignTemplateValueArray(values=values) + + def visitAssign_template_value_object( + self, ctx: ASLParser.Assign_template_value_objectContext + ) -> AssignTemplateValueObject: + bindings: list[AssignTemplateBinding] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, AssignTemplateBinding): + bindings.append(cmp) + return AssignTemplateValueObject(bindings=bindings) + + def visitAssign_template_binding_value( + self, ctx: ASLParser.Assign_template_binding_valueContext + ) -> AssignTemplateBindingValue: + string_literal: StringLiteral = self.visitString_literal(ctx=ctx.string_literal()) + assign_value: AssignTemplateValue = self.visit(ctx.assign_template_value()) + return AssignTemplateBindingValue( + identifier=string_literal.literal_value, assign_value=assign_value + ) + + def visitAssign_template_binding_string_expression_simple( + self, ctx: ASLParser.Assign_template_binding_string_expression_simpleContext + ) -> AssignTemplateBindingStringExpressionSimple: + identifier: str = self._inner_string_of(ctx.STRINGDOLLAR()) + identifier = identifier[:-2] + string_expression_simple: StringExpressionSimple = self.visitString_expression_simple( + ctx.string_expression_simple() + ) + return AssignTemplateBindingStringExpressionSimple( + identifier=identifier, string_expression_simple=string_expression_simple + ) + + def visitAssign_decl_binding( + self, ctx: ASLParser.Assign_decl_bindingContext + ) -> AssignDeclBinding: + binding: AssignTemplateBinding = self.visit(ctx.assign_template_binding()) + return AssignDeclBinding(binding=binding) + + def visitAssign_decl_body( + self, ctx: ASLParser.Assign_decl_bodyContext + ) -> list[AssignDeclBinding]: + bindings: list[AssignDeclBinding] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, AssignDeclBinding): + bindings.append(cmp) + return bindings + + def visitAssign_decl(self, ctx: ASLParser.Assign_declContext) -> AssignDecl: + declaration_bindings: list[AssignDeclBinding] = self.visit(ctx.assign_decl_body()) + return AssignDecl(declaration_bindings=declaration_bindings) + + def visitJsonata_template_value_terminal_float( + self, ctx: ASLParser.Jsonata_template_value_terminal_floatContext + ) -> JSONataTemplateValueTerminalLit: + float_value = float(ctx.NUMBER().getText()) + return JSONataTemplateValueTerminalLit(value=float_value) + + def visitJsonata_template_value_terminal_int( + self, ctx: ASLParser.Jsonata_template_value_terminal_intContext + ) -> JSONataTemplateValueTerminalLit: + int_value = int(ctx.INT().getText()) + return JSONataTemplateValueTerminalLit(value=int_value) + + def visitJsonata_template_value_terminal_bool( + self, ctx: ASLParser.Jsonata_template_value_terminal_boolContext + ) -> JSONataTemplateValueTerminalLit: + bool_term_rule: int = ctx.children[0].getSymbol().type + bool_value: bool = bool_term_rule == ASLLexer.TRUE + return JSONataTemplateValueTerminalLit(value=bool_value) + + def visitJsonata_template_value_terminal_null( + self, ctx: ASLParser.Jsonata_template_value_terminal_nullContext + ) -> JSONataTemplateValueTerminalLit: + return JSONataTemplateValueTerminalLit(value=None) + + def visitJsonata_template_value_terminal_string_jsonata( + self, ctx: ASLParser.Jsonata_template_value_terminal_string_jsonataContext + ) -> JSONataTemplateValueTerminalStringJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return JSONataTemplateValueTerminalStringJSONata(string_jsonata=string_jsonata) + + def visitJsonata_template_value_terminal_string_literal( + self, ctx: ASLParser.Jsonata_template_value_terminal_string_literalContext + ) -> JSONataTemplateValueTerminalLit: + string = from_string_literal(ctx.string_literal()) + return JSONataTemplateValueTerminalLit(value=string) + + def visitJsonata_template_value( + self, ctx: ASLParser.Jsonata_template_valueContext + ) -> JSONataTemplateValue: + return self.visit(ctx.children[0]) + + def visitJsonata_template_value_array( + self, ctx: ASLParser.Jsonata_template_value_arrayContext + ) -> JSONataTemplateValueArray: + values: list[JSONataTemplateValue] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, JSONataTemplateValue): + values.append(cmp) + return JSONataTemplateValueArray(values=values) + + def visitJsonata_template_value_object( + self, ctx: ASLParser.Jsonata_template_value_objectContext + ) -> JSONataTemplateValueObject: + bindings: list[JSONataTemplateBinding] = list() + for child in ctx.children: + cmp: Optional[Component] = self.visit(child) + if isinstance(cmp, JSONataTemplateBinding): + bindings.append(cmp) + return JSONataTemplateValueObject(bindings=bindings) + + def visitJsonata_template_binding( + self, ctx: ASLParser.Jsonata_template_bindingContext + ) -> JSONataTemplateBinding: + identifier: str = self._inner_string_of(ctx.string_literal()) + value: JSONataTemplateValue = self.visit(ctx.jsonata_template_value()) + return JSONataTemplateBinding(identifier=identifier, value=value) + + def visitArguments_string_jsonata( + self, ctx: ASLParser.Arguments_string_jsonataContext + ) -> ArgumentsStringJSONata: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return ArgumentsStringJSONata(string_jsonata=string_jsonata) + + def visitArguments_jsonata_template_value_object( + self, ctx: ASLParser.Arguments_jsonata_template_value_objectContext + ) -> ArgumentsJSONataTemplateValueObject: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + jsonata_template_value_object: JSONataTemplateValueObject = self.visit( + ctx.jsonata_template_value_object() + ) + return ArgumentsJSONataTemplateValueObject( + jsonata_template_value_object=jsonata_template_value_object + ) + + def visitOutput_decl(self, ctx: ASLParser.Output_declContext) -> Output: + jsonata_template_value: JSONataTemplateValue = self.visit(ctx.jsonata_template_value()) + return Output(jsonata_template_value=jsonata_template_value) + + def visitItems_array(self, ctx: ASLParser.Items_arrayContext) -> ItemsArray: + jsonata_template_value_array: JSONataTemplateValueArray = self.visit( + ctx.jsonata_template_value_array() + ) + return ItemsArray(jsonata_template_value_array=jsonata_template_value_array) + + def visitItems_jsonata(self, ctx: ASLParser.Items_jsonataContext) -> ItemsJSONata: + string_jsonata: StringJSONata = self.visitString_jsonata(ctx.string_jsonata()) + return ItemsJSONata(string_jsonata=string_jsonata) + + def visitString_sampler(self, ctx: ASLParser.String_samplerContext) -> StringSampler: + return self.visit(ctx.children[0]) + + def visitString_literal(self, ctx: ASLParser.String_literalContext) -> StringLiteral: + string_literal = from_string_literal(parser_rule_context=ctx) + return StringLiteral(literal_value=string_literal) + + def visitString_jsonpath(self, ctx: ASLParser.String_jsonpathContext) -> StringJsonPath: + json_path: str = self._inner_string_of(parser_rule_context=ctx) + return StringJsonPath(json_path=json_path) + + def visitString_context_path( + self, ctx: ASLParser.String_context_pathContext + ) -> StringContextPath: + context_object_path: str = self._inner_string_of(parser_rule_context=ctx) + return StringContextPath(context_object_path=context_object_path) + + def visitString_variable_sample( + self, ctx: ASLParser.String_variable_sampleContext + ) -> StringVariableSample: + query_language_mode: QueryLanguageMode = ( + self._get_current_query_language().query_language_mode + ) + expression: str = self._inner_string_of(parser_rule_context=ctx) + return StringVariableSample(query_language_mode=query_language_mode, expression=expression) + + def visitString_jsonata(self, ctx: ASLParser.String_jsonataContext) -> StringJSONata: + self._raise_if_query_language_is_not(query_language_mode=QueryLanguageMode.JSONata, ctx=ctx) + expression = self._inner_jsonata_expr(ctx=ctx) + return StringJSONata(expression=expression) + + def visitString_intrinsic_function( + self, ctx: ASLParser.String_intrinsic_functionContext + ) -> StringIntrinsicFunction: + intrinsic_function_derivation = ctx.STRINGINTRINSICFUNC().getText()[1:-1] + function, _ = IntrinsicParser.parse(intrinsic_function_derivation) + return StringIntrinsicFunction( + intrinsic_function_derivation=intrinsic_function_derivation, function=function + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/asl_parser.py b/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/asl_parser.py new file mode 100644 index 0000000000000..d4c4b8b3ef582 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/asl_parser.py @@ -0,0 +1,39 @@ +from antlr4 import CommonTokenStream, InputStream, ParserRuleContext + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLLexer import ASLLexer +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.parse.asl_parser import ( + AmazonStateLanguageParser, + ASLParserException, + SyntaxErrorListener, +) +from localstack.services.stepfunctions.asl.parse.test_state.preprocessor import ( + TestStatePreprocessor, +) + + +class TestStateAmazonStateLanguageParser(AmazonStateLanguageParser): + @staticmethod + def parse(definition: str) -> tuple[EvalComponent, ParserRuleContext]: + # Attempt to build the AST and look out for syntax errors. + syntax_error_listener = SyntaxErrorListener() + + input_stream = InputStream(definition) + lexer = ASLLexer(input_stream) + stream = CommonTokenStream(lexer) + parser = ASLParser(stream) + parser.removeErrorListeners() + parser.addErrorListener(syntax_error_listener) + # Unlike the main Program parser, TestState parsing occurs at a state declaration level. + tree = parser.state_decl_body() + + errors = syntax_error_listener.errors + if errors: + raise ASLParserException(errors=errors) + + # Attempt to preprocess the AST into evaluation components. + preprocessor = TestStatePreprocessor() + test_state_program = preprocessor.visit(tree) + + return test_state_program, tree diff --git a/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/preprocessor.py b/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/preprocessor.py new file mode 100644 index 0000000000000..0565f74a67a55 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/parse/test_state/preprocessor.py @@ -0,0 +1,131 @@ +import enum +from typing import Final + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.common.parargs import Parameters +from localstack.services.stepfunctions.asl.component.common.path.input_path import InputPath +from localstack.services.stepfunctions.asl.component.common.path.result_path import ResultPath +from localstack.services.stepfunctions.asl.component.common.query_language import QueryLanguage +from localstack.services.stepfunctions.asl.component.common.result_selector import ResultSelector +from localstack.services.stepfunctions.asl.component.state.state import CommonStateField +from localstack.services.stepfunctions.asl.component.state.state_choice.state_choice import ( + StateChoice, +) +from localstack.services.stepfunctions.asl.component.state.state_execution.execute_state import ( + ExecutionState, +) +from localstack.services.stepfunctions.asl.component.state.state_pass.result import Result +from localstack.services.stepfunctions.asl.component.test_state.program.test_state_program import ( + TestStateProgram, +) +from localstack.services.stepfunctions.asl.component.test_state.state.test_state_state_props import ( + TestStateStateProps, +) +from localstack.services.stepfunctions.asl.eval.test_state.environment import TestStateEnvironment +from localstack.services.stepfunctions.asl.parse.preprocessor import Preprocessor +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + + +class InspectionDataKey(enum.Enum): + INPUT = "input" + AFTER_INPUT_PATH = "afterInputPath" + AFTER_PARAMETERS = "afterParameters" + RESULT = "result" + AFTER_RESULT_SELECTOR = "afterResultSelector" + AFTER_RESULT_PATH = "afterResultPath" + REQUEST = "request" + RESPONSE = "response" + + +def _decorated_updated_choice_inspection_data(method): + def wrapper(env: TestStateEnvironment, *args, **kwargs): + method(env, *args, **kwargs) + env.set_choice_selected(env.next_state_name) + + return wrapper + + +def _decorated_updates_inspection_data(method, inspection_data_key: InspectionDataKey): + def wrapper(env: TestStateEnvironment, *args, **kwargs): + method(env, *args, **kwargs) + result = to_json_str(env.stack[-1]) + # We know that the enum value used here corresponds to a supported inspection data field by design. + env.inspection_data[inspection_data_key.value] = result # noqa + + return wrapper + + +def _decorate_state_field(state_field: CommonStateField) -> None: + if isinstance(state_field, ExecutionState): + state_field._eval_execution = _decorated_updates_inspection_data( + # As part of the decoration process, we intentionally access this protected member + # to facilitate the decorator's functionality. + method=state_field._eval_execution, # noqa + inspection_data_key=InspectionDataKey.RESULT, + ) + elif isinstance(state_field, StateChoice): + state_field._eval_body = _decorated_updated_choice_inspection_data( + # As part of the decoration process, we intentionally access this protected member + # to facilitate the decorator's functionality. + method=state_field._eval_body # noqa + ) + + +class TestStatePreprocessor(Preprocessor): + STATE_NAME: Final[str] = "TestState" + + def visitState_decl_body(self, ctx: ASLParser.State_decl_bodyContext) -> TestStateProgram: + self._open_query_language_scope(ctx) + state_props = TestStateStateProps() + state_props.name = self.STATE_NAME + for child in ctx.children: + cmp = self.visit(child) + state_props.add(cmp) + state_field = self._common_state_field_of(state_props=state_props) + if state_props.get(QueryLanguage) is None: + state_props.add(self._get_current_query_language()) + _decorate_state_field(state_field) + self._close_query_language_scope() + return TestStateProgram(state_field) + + def visitInput_path_decl(self, ctx: ASLParser.Input_path_declContext) -> InputPath: + input_path: InputPath = super().visitInput_path_decl(ctx=ctx) + input_path._eval_body = _decorated_updates_inspection_data( + method=input_path._eval_body, # noqa + inspection_data_key=InspectionDataKey.AFTER_INPUT_PATH, + ) + return input_path + + def visitParameters_decl(self, ctx: ASLParser.Parameters_declContext) -> Parameters: + parameters: Parameters = super().visitParameters_decl(ctx=ctx) + parameters._eval_body = _decorated_updates_inspection_data( + method=parameters._eval_body, # noqa + inspection_data_key=InspectionDataKey.AFTER_PARAMETERS, + ) + return parameters + + def visitResult_selector_decl( + self, ctx: ASLParser.Result_selector_declContext + ) -> ResultSelector: + result_selector: ResultSelector = super().visitResult_selector_decl(ctx=ctx) + result_selector._eval_body = _decorated_updates_inspection_data( + method=result_selector._eval_body, # noqa + inspection_data_key=InspectionDataKey.AFTER_RESULT_SELECTOR, + ) + return result_selector + + def visitResult_path_decl(self, ctx: ASLParser.Result_path_declContext) -> ResultPath: + result_path: ResultPath = super().visitResult_path_decl(ctx=ctx) + result_path._eval_body = _decorated_updates_inspection_data( + method=result_path._eval_body, # noqa + inspection_data_key=InspectionDataKey.AFTER_RESULT_PATH, + ) + return result_path + + def visitResult_decl(self, ctx: ASLParser.Result_declContext) -> Result: + result: Result = super().visitResult_decl(ctx=ctx) + result._eval_body = _decorated_updates_inspection_data( + method=result._eval_body, + inspection_data_key=InspectionDataKey.RESULT, # noqa + ) + return result diff --git a/localstack/services/stepfunctions/asl/parse/typed_props.py b/localstack-core/localstack/services/stepfunctions/asl/parse/typed_props.py similarity index 100% rename from localstack/services/stepfunctions/asl/parse/typed_props.py rename to localstack-core/localstack/services/stepfunctions/asl/parse/typed_props.py diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/express_static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/express_static_analyser.py new file mode 100644 index 0000000000000..9242215e23d0d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/express_static_analyser.py @@ -0,0 +1,34 @@ +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ActivityResource, + Resource, + ResourceCondition, + ServiceResource, +) +from localstack.services.stepfunctions.asl.static_analyser.static_analyser import StaticAnalyser + + +class ExpressStaticAnalyser(StaticAnalyser): + def visitResource_decl(self, ctx: ASLParser.Resource_declContext) -> None: + # TODO add resource path to the error messages. + + resource_str: str = ctx.string_literal().getText()[1:-1] + resource = Resource.from_resource_arn(resource_str) + + if isinstance(resource, ActivityResource): + raise ValueError( + "Invalid State Machine Definition: 'SCHEMA_VALIDATION_FAILED: " + "Express state machine does not support Activity ARN'" + ) + + if isinstance(resource, ServiceResource): + if resource.condition == ResourceCondition.WaitForTaskToken: + raise ValueError( + "Invalid State Machine Definition: 'SCHEMA_VALIDATION_FAILED: " + "Express state machine does not support '.sync' service integration." + ) + if resource.condition is not None: + raise ValueError( + "Invalid State Machine Definition: 'SCHEMA_VALIDATION_FAILED: " + f"Express state machine does not support .'{resource.condition}' service integration." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/intrinsic_static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/intrinsic_static_analyser.py new file mode 100644 index 0000000000000..b3d11c27d0646 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/intrinsic_static_analyser.py @@ -0,0 +1,12 @@ +import abc + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicParserVisitor import ( + ASLIntrinsicParserVisitor, +) +from localstack.services.stepfunctions.asl.parse.intrinsic.intrinsic_parser import IntrinsicParser + + +class IntrinsicStaticAnalyser(ASLIntrinsicParserVisitor, abc.ABC): + def analyse(self, definition: str) -> None: + _, parser_rule_context = IntrinsicParser.parse(definition) + self.visit(parser_rule_context) diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/variable_names_intrinsic_static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/variable_names_intrinsic_static_analyser.py new file mode 100644 index 0000000000000..6c4514183bfa3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/intrinsic/variable_names_intrinsic_static_analyser.py @@ -0,0 +1,41 @@ +from localstack.aws.api.stepfunctions import VariableName, VariableNameList +from localstack.services.stepfunctions.asl.antlr.runtime.ASLIntrinsicParser import ( + ASLIntrinsicParser, +) +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + VariableReference, + extract_jsonata_variable_references, +) +from localstack.services.stepfunctions.asl.static_analyser.intrinsic.intrinsic_static_analyser import ( + IntrinsicStaticAnalyser, +) + + +class VariableNamesIntrinsicStaticAnalyser(IntrinsicStaticAnalyser): + _variable_names: VariableNameList + + def __init__(self): + super().__init__() + self._variable_names = list() + + @staticmethod + def process_and_get(definition: str) -> VariableNameList: + analyser = VariableNamesIntrinsicStaticAnalyser() + analyser.analyse(definition=definition) + return analyser.get_variable_name_list() + + def get_variable_name_list(self) -> VariableNameList: + return self._variable_names + + def visitFunc_arg_list(self, ctx: ASLIntrinsicParser.Func_arg_listContext) -> None: + # TODO: the extraction logic is not always in the same order as AWS's + for child in ctx.children[::-1]: + self.visit(child) + + def visitFunc_arg_var(self, ctx: ASLIntrinsicParser.Func_arg_varContext) -> None: + variable_references: set[VariableReference] = extract_jsonata_variable_references( + ctx.STRING_VARIABLE().getText() + ) + for variable_reference in variable_references: + variable_name: VariableName = variable_reference[1:] + self._variable_names.append(variable_name) diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/static_analyser.py new file mode 100644 index 0000000000000..81b8c576953fe --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/static_analyser.py @@ -0,0 +1,10 @@ +import abc + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParserVisitor import ASLParserVisitor +from localstack.services.stepfunctions.asl.parse.asl_parser import AmazonStateLanguageParser + + +class StaticAnalyser(ASLParserVisitor, abc.ABC): + def analyse(self, definition: str) -> None: + _, parser_rule_context = AmazonStateLanguageParser.parse(definition) + self.visit(parser_rule_context) diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/test_state/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/test_state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/test_state/test_state_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/test_state/test_state_analyser.py new file mode 100644 index 0000000000000..79cb80196b54d --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/test_state/test_state_analyser.py @@ -0,0 +1,49 @@ +from typing import Final + +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import ( + ActivityResource, + Resource, + ServiceResource, +) +from localstack.services.stepfunctions.asl.component.state.state_type import StateType +from localstack.services.stepfunctions.asl.parse.test_state.asl_parser import ( + TestStateAmazonStateLanguageParser, +) +from localstack.services.stepfunctions.asl.static_analyser.static_analyser import StaticAnalyser + + +class TestStateStaticAnalyser(StaticAnalyser): + _SUPPORTED_STATE_TYPES: Final[set[StateType]] = { + StateType.Task, + StateType.Pass, + StateType.Wait, + StateType.Choice, + StateType.Succeed, + StateType.Fail, + } + + def analyse(self, definition) -> None: + _, parser_rule_context = TestStateAmazonStateLanguageParser.parse(definition) + self.visit(parser_rule_context) + + def visitState_type(self, ctx: ASLParser.State_typeContext) -> None: + state_type_value: int = ctx.children[0].symbol.type + state_type = StateType(state_type_value) + if state_type not in self._SUPPORTED_STATE_TYPES: + raise ValueError(f"Unsupported state type for TestState runs '{state_type}'.") + + def visitResource_decl(self, ctx: ASLParser.Resource_declContext) -> None: + resource_str: str = ctx.string_literal().getText()[1:-1] + resource = Resource.from_resource_arn(resource_str) + + if isinstance(resource, ActivityResource): + raise ValueError( + f"ActivityResources are not supported for TestState runs {resource_str}." + ) + + if isinstance(resource, ServiceResource): + if resource.condition is not None: + raise ValueError( + f"Service integration patterns are not supported for TestState runs {resource_str}." + ) diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/usage_metrics_static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/usage_metrics_static_analyser.py new file mode 100644 index 0000000000000..b19fd0d4bf420 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/usage_metrics_static_analyser.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import logging +from typing import Final + +import localstack.services.stepfunctions.usage as UsageMetrics +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.component.common.query_language import ( + QueryLanguageMode, +) +from localstack.services.stepfunctions.asl.static_analyser.static_analyser import StaticAnalyser + +LOG = logging.getLogger(__name__) + + +class QueryLanguage(str): + JSONPath = QueryLanguageMode.JSONPath.name + JSONata = QueryLanguageMode.JSONata.name + Both = "JSONPath+JSONata" + + +class UsageMetricsStaticAnalyser(StaticAnalyser): + @staticmethod + def process(definition: str) -> UsageMetricsStaticAnalyser: + analyser = UsageMetricsStaticAnalyser() + try: + # Run the static analyser. + analyser.analyse(definition=definition) + + # Determine which query language is being used in this state machine. + query_modes = analyser.query_language_modes + if len(query_modes) == 2: + language_used = QueryLanguage.Both + elif QueryLanguageMode.JSONata in query_modes: + language_used = QueryLanguage.JSONata + else: + language_used = QueryLanguage.JSONPath + + # Determine is the state machine uses the variables feature. + uses_variables = analyser.uses_variables + + # Count. + UsageMetrics.language_features_counter.labels( + query_language=language_used, uses_variables=uses_variables + ).increment() + except Exception as e: + LOG.warning( + "Failed to record Step Functions metrics from static analysis", + exc_info=e, + ) + return analyser + + query_language_modes: Final[set[QueryLanguageMode]] + uses_variables: bool + + def __init__(self): + super().__init__() + self.query_language_modes = set() + self.uses_variables = False + + def visitQuery_language_decl(self, ctx: ASLParser.Query_language_declContext): + if len(self.query_language_modes) == 2: + # Both query language modes have been confirmed to be in use. + return + query_language_mode_int = ctx.children[-1].getSymbol().type + query_language_mode = QueryLanguageMode(value=query_language_mode_int) + self.query_language_modes.add(query_language_mode) + + def visitState_decl(self, ctx: ASLParser.State_declContext): + # If before entering a state, no query language was explicitly enforced, then we know + # this is the first state operating under the default mode (JSONPath) + if not self.query_language_modes: + self.query_language_modes.add(QueryLanguageMode.JSONPath) + super().visitState_decl(ctx=ctx) + + def visitString_literal(self, ctx: ASLParser.String_literalContext): + # Prune everything parsed as a string literal. + return + + def visitString_variable_sample(self, ctx: ASLParser.String_variable_sampleContext): + self.uses_variables = True + + def visitAssign_decl(self, ctx: ASLParser.Assign_declContext): + self.uses_variables = True diff --git a/localstack-core/localstack/services/stepfunctions/asl/static_analyser/variable_references_static_analyser.py b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/variable_references_static_analyser.py new file mode 100644 index 0000000000000..93edc9a06a97f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/static_analyser/variable_references_static_analyser.py @@ -0,0 +1,82 @@ +from collections import OrderedDict +from typing import Final + +from localstack.aws.api.stepfunctions import ( + StateName, + VariableName, + VariableNameList, + VariableReferences, +) +from localstack.services.stepfunctions.asl.antlr.runtime.ASLParser import ASLParser +from localstack.services.stepfunctions.asl.jsonata.jsonata import ( + VariableReference, + extract_jsonata_variable_references, +) +from localstack.services.stepfunctions.asl.static_analyser.intrinsic.variable_names_intrinsic_static_analyser import ( + VariableNamesIntrinsicStaticAnalyser, +) +from localstack.services.stepfunctions.asl.static_analyser.static_analyser import StaticAnalyser + + +class VariableReferencesStaticAnalyser(StaticAnalyser): + @staticmethod + def process_and_get(definition: str) -> VariableReferences: + analyser = VariableReferencesStaticAnalyser() + analyser.analyse(definition=definition) + return analyser.get_variable_references() + + _fringe_state_names: Final[list[StateName]] + _variable_references: Final[VariableReferences] + + def __init__(self): + super().__init__() + self._fringe_state_names = list() + self._variable_references = OrderedDict() + + def get_variable_references(self) -> VariableReferences: + return self._variable_references + + def _enter_state(self, state_name: StateName) -> None: + self._fringe_state_names.append(state_name) + + def _exit_state(self) -> None: + self._fringe_state_names.pop() + + def visitState_decl(self, ctx: ASLParser.State_declContext) -> None: + state_name: str = ctx.string_literal().getText()[1:-1] + self._enter_state(state_name=state_name) + super().visitState_decl(ctx=ctx) + self._exit_state() + + def _put_variable_reference(self, variable_reference: VariableReference) -> None: + variable_name: VariableName = variable_reference[1:] + self._put_variable_name(variable_name) + + def _put_variable_name(self, variable_name: VariableName) -> None: + state_name = self._fringe_state_names[-1] + variable_name_list: VariableNameList = self._variable_references.get(state_name, list()) + if variable_name in variable_name_list: + return + variable_name_list.append(variable_name) + if state_name not in self._variable_references: + self._variable_references[state_name] = variable_name_list + + def visitString_variable_sample(self, ctx: ASLParser.String_variable_sampleContext): + reference_body = ctx.getText()[1:-1] + variable_references: set[VariableReference] = extract_jsonata_variable_references( + reference_body + ) + for variable_reference in variable_references: + self._put_variable_reference(variable_reference) + + def visitString_intrinsic_function(self, ctx: ASLParser.String_intrinsic_functionContext): + definition_body = ctx.getText()[1:-1] + variable_name_list: VariableNameList = VariableNamesIntrinsicStaticAnalyser.process_and_get( + definition_body + ) + for variable_name in variable_name_list: + self._put_variable_name(variable_name) + + def visitString_literal(self, ctx: ASLParser.String_literalContext): + # Prune everything parsed as a string literal. + return diff --git a/localstack-core/localstack/services/stepfunctions/asl/utils/__init__.py b/localstack-core/localstack/services/stepfunctions/asl/utils/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/asl/utils/boto_client.py b/localstack-core/localstack/services/stepfunctions/asl/utils/boto_client.py new file mode 100644 index 0000000000000..c7facf1bb532c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/utils/boto_client.py @@ -0,0 +1,27 @@ +from botocore.client import BaseClient +from botocore.config import Config + +from localstack.aws.connect import connect_to +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds +from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.credentials import ( + StateCredentials, +) +from localstack.utils.aws.client_types import ServicePrincipal + +_BOTO_CLIENT_CONFIG = config = Config( + parameter_validation=False, + retries={"total_max_attempts": 1}, + connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS, + read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS, + tcp_keepalive=True, +) + + +def boto_client_for(service: str, region: str, state_credentials: StateCredentials) -> BaseClient: + client_factory = connect_to.with_assumed_role( + role_arn=state_credentials.role_arn, + service_principal=ServicePrincipal.states, + region_name=region, + config=_BOTO_CLIENT_CONFIG, + ) + return client_factory.get_client(service=service) diff --git a/localstack-core/localstack/services/stepfunctions/asl/utils/encoding.py b/localstack-core/localstack/services/stepfunctions/asl/utils/encoding.py new file mode 100644 index 0000000000000..893db6cc28f44 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/utils/encoding.py @@ -0,0 +1,16 @@ +import datetime +import json +from json import JSONEncoder +from typing import Any, Optional + + +class _DateTimeEncoder(JSONEncoder): + def default(self, obj): + if isinstance(obj, (datetime.date, datetime.datetime)): + return obj.isoformat() + else: + return str(obj) + + +def to_json_str(obj: Any, separators: Optional[tuple[str, str]] = None) -> str: + return json.dumps(obj, cls=_DateTimeEncoder, separators=separators) diff --git a/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py b/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py new file mode 100644 index 0000000000000..2447458683daf --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/asl/utils/json_path.py @@ -0,0 +1,66 @@ +import re +from typing import Any, Final, Optional + +from jsonpath_ng.ext import parse +from jsonpath_ng.jsonpath import Index + +from localstack.services.events.utils import to_json_str + +_PATTERN_SINGLETON_ARRAY_ACCESS_OUTPUT: Final[str] = r"\[\d+\]$" +_PATTERN_SLICE_OR_WILDCARD_ACCESS = r"\$(?:\.[^[]+\[(?:\*|\d*:\d*)\]|\[\*\])(?:\.[^[]+)*$" + + +def _is_singleton_array_access(path: str) -> bool: + # Returns true if the json path terminates with a literal singleton array access. + return bool(re.search(_PATTERN_SINGLETON_ARRAY_ACCESS_OUTPUT, path)) + + +def _contains_slice_or_wildcard_array(path: str) -> bool: + # Returns true if the json path contains a slice or wildcard in the array. + # Slices at the root are discarded, but wildcard at the root is allowed. + return bool(re.search(_PATTERN_SLICE_OR_WILDCARD_ACCESS, path)) + + +class NoSuchJsonPathError(Exception): + json_path: Final[str] + data: Final[Any] + _message: Optional[str] + + def __init__(self, json_path: str, data: Any): + self.json_path = json_path + self.data = data + self._message = None + + @property + def message(self) -> str: + if self._message is None: + data_json_str = to_json_str(self.data) + self._message = ( + f"The JSONPath '{self.json_path}' could not be found in the input '{data_json_str}'" + ) + return self._message + + def __str__(self): + return self.message + + +def extract_json(path: str, data: Any) -> Any: + input_expr = parse(path) + + matches = input_expr.find(data) + if not matches: + if _contains_slice_or_wildcard_array(path): + return [] + raise NoSuchJsonPathError(json_path=path, data=data) + + if len(matches) > 1 or isinstance(matches[0].path, Index): + value = [match.value for match in matches] + + # AWS StepFunctions breaks jsonpath specifications and instead + # unpacks literal singleton array accesses. + if _is_singleton_array_access(path=path) and len(value) == 1: + value = value[0] + else: + value = matches[0].value + + return value diff --git a/localstack-core/localstack/services/stepfunctions/backend/__init__.py b/localstack-core/localstack/services/stepfunctions/backend/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/backend/activity.py b/localstack-core/localstack/services/stepfunctions/backend/activity.py new file mode 100644 index 0000000000000..8800dbd3fa122 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/activity.py @@ -0,0 +1,49 @@ +import datetime +from collections import deque +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + ActivityListItem, + Arn, + DescribeActivityOutput, + Name, + Timestamp, +) + + +class ActivityTask: + task_input: Final[str] + task_token: Final[str] + + def __init__(self, task_token: str, task_input: str): + self.task_token = task_token + self.task_input = task_input + + +class Activity: + arn: Final[Arn] + name: Final[Name] + creation_date: Final[Timestamp] + _tasks: Final[deque[ActivityTask]] + + def __init__(self, arn: Arn, name: Name, creation_date: Optional[Timestamp] = None): + self.arn = arn + self.name = name + self.creation_date = creation_date or datetime.datetime.now(tz=datetime.timezone.utc) + self._tasks = deque() + + def add_task(self, task: ActivityTask): + self._tasks.append(task) + + def get_task(self) -> Optional[ActivityTask]: + return self._tasks.popleft() + + def to_describe_activity_output(self) -> DescribeActivityOutput: + return DescribeActivityOutput( + activityArn=self.arn, name=self.name, creationDate=self.creation_date + ) + + def to_activity_list_item(self) -> ActivityListItem: + return ActivityListItem( + activityArn=self.arn, name=self.name, creationDate=self.creation_date + ) diff --git a/localstack-core/localstack/services/stepfunctions/backend/alias.py b/localstack-core/localstack/services/stepfunctions/backend/alias.py new file mode 100644 index 0000000000000..155890abf4cb3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/alias.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import copy +import datetime +import random +import threading +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + AliasDescription, + Arn, + CharacterRestrictedName, + DescribeStateMachineAliasOutput, + PageToken, + RoutingConfigurationList, + StateMachineAliasListItem, +) +from localstack.utils.strings import token_generator + + +class Alias: + _mutex: Final[threading.Lock] + update_date: Optional[datetime.datetime] + name: Final[CharacterRestrictedName] + _description: Optional[AliasDescription] + _routing_configuration_list: RoutingConfigurationList + _state_machine_version_arns: list[Arn] + _execution_probability_distribution: list[int] + state_machine_alias_arn: Final[Arn] + tokenized_state_machine_alias_arn: Final[PageToken] + create_date: datetime.datetime + + def __init__( + self, + state_machine_arn: Arn, + name: CharacterRestrictedName, + description: Optional[AliasDescription], + routing_configuration_list: RoutingConfigurationList, + ): + self._mutex = threading.Lock() + self.update_date = None + self.name = name + self._description = None + self.state_machine_alias_arn = f"{state_machine_arn}:{name}" + self.tokenized_state_machine_alias_arn = token_generator(self.state_machine_alias_arn) + self.update(description=description, routing_configuration_list=routing_configuration_list) + self.create_date = self._get_mutex_date() + + def __hash__(self): + return hash(self.state_machine_alias_arn) + + def __eq__(self, other): + if isinstance(other, Alias): + return self.is_idempotent(other=other) + return False + + def is_idempotent(self, other: Alias) -> bool: + return all( + [ + self.state_machine_alias_arn == other.state_machine_alias_arn, + self.name == other.name, + self._description == other._description, + self._routing_configuration_list == other._routing_configuration_list, + ] + ) + + @staticmethod + def _get_mutex_date() -> datetime.datetime: + return datetime.datetime.now(tz=datetime.timezone.utc) + + def get_routing_configuration_list(self) -> RoutingConfigurationList: + return copy.deepcopy(self._routing_configuration_list) + + def is_router_for(self, state_machine_version_arn: Arn) -> bool: + with self._mutex: + return state_machine_version_arn in self._state_machine_version_arns + + def update( + self, + description: Optional[AliasDescription], + routing_configuration_list: RoutingConfigurationList, + ) -> None: + with self._mutex: + self.update_date = self._get_mutex_date() + + if description is not None: + self._description = description + + if routing_configuration_list: + self._routing_configuration_list = routing_configuration_list + self._state_machine_version_arns = list() + self._execution_probability_distribution = list() + for routing_configuration in routing_configuration_list: + self._state_machine_version_arns.append( + routing_configuration["stateMachineVersionArn"] + ) + self._execution_probability_distribution.append(routing_configuration["weight"]) + + def sample(self): + with self._mutex: + samples = random.choices( + self._state_machine_version_arns, + weights=self._execution_probability_distribution, + k=1, + ) + state_machine_version_arn = samples[0] + return state_machine_version_arn + + def to_description(self) -> DescribeStateMachineAliasOutput: + with self._mutex: + description = DescribeStateMachineAliasOutput( + creationDate=self.create_date, + name=self.name, + description=self._description, + routingConfiguration=self._routing_configuration_list, + stateMachineAliasArn=self.state_machine_alias_arn, + ) + if self.update_date is not None: + description["updateDate"] = self.update_date + return description + + def to_item(self) -> StateMachineAliasListItem: + return StateMachineAliasListItem( + stateMachineAliasArn=self.state_machine_alias_arn, creationDate=self.create_date + ) diff --git a/localstack-core/localstack/services/stepfunctions/backend/execution.py b/localstack-core/localstack/services/stepfunctions/backend/execution.py new file mode 100644 index 0000000000000..76090c7981944 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/execution.py @@ -0,0 +1,420 @@ +from __future__ import annotations + +import datetime +import json +import logging +from typing import Final, Optional + +from localstack.aws.api.events import PutEventsRequestEntry +from localstack.aws.api.stepfunctions import ( + Arn, + CloudWatchEventsExecutionDataDetails, + DescribeExecutionOutput, + DescribeStateMachineForExecutionOutput, + ExecutionListItem, + ExecutionStatus, + GetExecutionHistoryOutput, + HistoryEventList, + InvalidName, + SensitiveCause, + SensitiveError, + StartExecutionOutput, + StartSyncExecutionOutput, + StateMachineType, + SyncExecutionStatus, + Timestamp, + TraceHeader, + VariableReferences, +) +from localstack.aws.connect import connect_to +from localstack.services.stepfunctions.asl.eval.evaluation_details import ( + AWSExecutionDetails, + EvaluationDetails, + ExecutionDetails, + StateMachineDetails, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingSession, +) +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramEnded, + ProgramError, + ProgramState, + ProgramStopped, + ProgramTimedOut, +) +from localstack.services.stepfunctions.asl.static_analyser.variable_references_static_analyser import ( + VariableReferencesStaticAnalyser, +) +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.backend.activity import Activity +from localstack.services.stepfunctions.backend.execution_worker import ( + ExecutionWorker, + SyncExecutionWorker, +) +from localstack.services.stepfunctions.backend.execution_worker_comm import ( + ExecutionWorkerCommunication, +) +from localstack.services.stepfunctions.backend.state_machine import ( + StateMachineInstance, + StateMachineVersion, +) +from localstack.services.stepfunctions.mocking.mock_config import MockTestCase + +LOG = logging.getLogger(__name__) + + +class BaseExecutionWorkerCommunication(ExecutionWorkerCommunication): + execution: Final[Execution] + + def __init__(self, execution: Execution): + self.execution = execution + + def _reflect_execution_status(self): + exit_program_state: ProgramState = self.execution.exec_worker.env.program_state() + self.execution.stop_date = datetime.datetime.now(tz=datetime.timezone.utc) + if isinstance(exit_program_state, ProgramEnded): + self.execution.exec_status = ExecutionStatus.SUCCEEDED + self.execution.output = self.execution.exec_worker.env.states.get_input() + elif isinstance(exit_program_state, ProgramStopped): + self.execution.exec_status = ExecutionStatus.ABORTED + elif isinstance(exit_program_state, ProgramError): + self.execution.exec_status = ExecutionStatus.FAILED + self.execution.error = exit_program_state.error.get("error") + self.execution.cause = exit_program_state.error.get("cause") + elif isinstance(exit_program_state, ProgramTimedOut): + self.execution.exec_status = ExecutionStatus.TIMED_OUT + else: + raise RuntimeWarning( + f"Execution ended with unsupported ProgramState type '{type(exit_program_state)}'." + ) + + def terminated(self) -> None: + self._reflect_execution_status() + self.execution.publish_execution_status_change_event() + + +class Execution: + name: Final[str] + sm_type: Final[StateMachineType] + role_arn: Final[Arn] + exec_arn: Final[Arn] + + account_id: str + region_name: str + + state_machine: Final[StateMachineInstance] + state_machine_arn: Final[Arn] + state_machine_version_arn: Final[Optional[Arn]] + state_machine_alias_arn: Final[Optional[Arn]] + + mock_test_case: Final[Optional[MockTestCase]] + + start_date: Final[Timestamp] + input_data: Final[Optional[json]] + input_details: Final[Optional[CloudWatchEventsExecutionDataDetails]] + trace_header: Final[Optional[TraceHeader]] + _cloud_watch_logging_session: Final[Optional[CloudWatchLoggingSession]] + + exec_status: Optional[ExecutionStatus] + stop_date: Optional[Timestamp] + + output: Optional[json] + output_details: Optional[CloudWatchEventsExecutionDataDetails] + + error: Optional[SensitiveError] + cause: Optional[SensitiveCause] + + exec_worker: Optional[ExecutionWorker] + + _activity_store: dict[Arn, Activity] + + def __init__( + self, + name: str, + sm_type: StateMachineType, + role_arn: Arn, + exec_arn: Arn, + account_id: str, + region_name: str, + state_machine: StateMachineInstance, + start_date: Timestamp, + cloud_watch_logging_session: Optional[CloudWatchLoggingSession], + activity_store: dict[Arn, Activity], + input_data: Optional[json] = None, + trace_header: Optional[TraceHeader] = None, + state_machine_alias_arn: Optional[Arn] = None, + mock_test_case: Optional[MockTestCase] = None, + ): + self.name = name + self.sm_type = sm_type + self.role_arn = role_arn + self.exec_arn = exec_arn + self.account_id = account_id + self.region_name = region_name + self.state_machine = state_machine + if isinstance(state_machine, StateMachineVersion): + self.state_machine_arn = state_machine.source_arn + self.state_machine_version_arn = state_machine.arn + else: + self.state_machine_arn = state_machine.arn + self.state_machine_version_arn = None + self.state_machine_alias_arn = state_machine_alias_arn + self.start_date = start_date + self._cloud_watch_logging_session = cloud_watch_logging_session + self.input_data = input_data + self.input_details = CloudWatchEventsExecutionDataDetails(included=True) + self.trace_header = trace_header + self.exec_status = None + self.stop_date = None + self.output = None + self.output_details = CloudWatchEventsExecutionDataDetails(included=True) + self.exec_worker = None + self.error = None + self.cause = None + self._activity_store = activity_store + self.mock_test_case = mock_test_case + + def _get_events_client(self): + return connect_to(aws_access_key_id=self.account_id, region_name=self.region_name).events + + def to_start_output(self) -> StartExecutionOutput: + return StartExecutionOutput(executionArn=self.exec_arn, startDate=self.start_date) + + def to_describe_output(self) -> DescribeExecutionOutput: + describe_output = DescribeExecutionOutput( + executionArn=self.exec_arn, + stateMachineArn=self.state_machine_arn, + name=self.name, + status=self.exec_status, + startDate=self.start_date, + stopDate=self.stop_date, + input=to_json_str(self.input_data, separators=(",", ":")), + inputDetails=self.input_details, + traceHeader=self.trace_header, + ) + if describe_output["status"] == ExecutionStatus.SUCCEEDED: + describe_output["output"] = to_json_str(self.output, separators=(",", ":")) + describe_output["outputDetails"] = self.output_details + if self.error is not None: + describe_output["error"] = self.error + if self.cause is not None: + describe_output["cause"] = self.cause + if self.state_machine_version_arn is not None: + describe_output["stateMachineVersionArn"] = self.state_machine_version_arn + if self.state_machine_alias_arn is not None: + describe_output["stateMachineAliasArn"] = self.state_machine_alias_arn + return describe_output + + def to_describe_state_machine_for_execution_output( + self, + ) -> DescribeStateMachineForExecutionOutput: + state_machine: StateMachineInstance = self.state_machine + state_machine_arn = ( + state_machine.source_arn + if isinstance(state_machine, StateMachineVersion) + else state_machine.arn + ) + out = DescribeStateMachineForExecutionOutput( + stateMachineArn=state_machine_arn, + name=state_machine.name, + definition=state_machine.definition, + roleArn=self.role_arn, + # The date and time the state machine associated with an execution was updated. + updateDate=state_machine.create_date, + loggingConfiguration=state_machine.logging_config, + ) + revision_id = self.state_machine.revision_id + if self.state_machine.revision_id: + out["revisionId"] = revision_id + variable_references: VariableReferences = VariableReferencesStaticAnalyser.process_and_get( + definition=self.state_machine.definition + ) + if variable_references: + out["variableReferences"] = variable_references + return out + + def to_execution_list_item(self) -> ExecutionListItem: + if isinstance(self.state_machine, StateMachineVersion): + state_machine_arn = self.state_machine.source_arn + state_machine_version_arn = self.state_machine.arn + else: + state_machine_arn = self.state_machine.arn + state_machine_version_arn = None + + item = ExecutionListItem( + executionArn=self.exec_arn, + stateMachineArn=state_machine_arn, + name=self.name, + status=self.exec_status, + startDate=self.start_date, + stopDate=self.stop_date, + ) + if state_machine_version_arn is not None: + item["stateMachineVersionArn"] = state_machine_version_arn + if self.state_machine_alias_arn is not None: + item["stateMachineAliasArn"] = self.state_machine_alias_arn + return item + + def to_history_output(self) -> GetExecutionHistoryOutput: + env = self.exec_worker.env + event_history: HistoryEventList = list() + if env is not None: + # The execution has not started yet. + event_history: HistoryEventList = env.event_manager.get_event_history() + return GetExecutionHistoryOutput(events=event_history) + + @staticmethod + def _to_serialized_date(timestamp: datetime.datetime) -> str: + """See test in tests.aws.services.stepfunctions.v2.base.test_base.TestSnfBase.test_execution_dateformat""" + return ( + f"{timestamp.astimezone(datetime.timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}Z" + ) + + def _get_start_execution_worker_comm(self) -> BaseExecutionWorkerCommunication: + return BaseExecutionWorkerCommunication(self) + + def _get_start_aws_execution_details(self) -> AWSExecutionDetails: + return AWSExecutionDetails( + account=self.account_id, region=self.region_name, role_arn=self.role_arn + ) + + def get_start_execution_details(self) -> ExecutionDetails: + return ExecutionDetails( + arn=self.exec_arn, + name=self.name, + role_arn=self.role_arn, + inpt=self.input_data, + start_time=self._to_serialized_date(self.start_date), + ) + + def get_start_state_machine_details(self) -> StateMachineDetails: + return StateMachineDetails( + arn=self.state_machine.arn, + name=self.state_machine.name, + typ=self.state_machine.sm_type, + definition=self.state_machine.definition, + ) + + def _get_start_execution_worker(self) -> ExecutionWorker: + return ExecutionWorker( + evaluation_details=EvaluationDetails( + aws_execution_details=self._get_start_aws_execution_details(), + execution_details=self.get_start_execution_details(), + state_machine_details=self.get_start_state_machine_details(), + ), + exec_comm=self._get_start_execution_worker_comm(), + cloud_watch_logging_session=self._cloud_watch_logging_session, + activity_store=self._activity_store, + mock_test_case=self.mock_test_case, + ) + + def start(self) -> None: + # TODO: checks exec_worker does not exists already? + if self.exec_worker: + raise InvalidName() # TODO. + self.exec_worker = self._get_start_execution_worker() + self.exec_status = ExecutionStatus.RUNNING + self.publish_execution_status_change_event() + self.exec_worker.start() + + def stop(self, stop_date: datetime.datetime, error: Optional[str], cause: Optional[str]): + exec_worker: Optional[ExecutionWorker] = self.exec_worker + if exec_worker: + exec_worker.stop(stop_date=stop_date, cause=cause, error=error) + + def publish_execution_status_change_event(self): + input_value = ( + dict() if not self.input_data else to_json_str(self.input_data, separators=(",", ":")) + ) + output_value = ( + None if self.output is None else to_json_str(self.output, separators=(",", ":")) + ) + output_details = None if output_value is None else self.output_details + entry = PutEventsRequestEntry( + Source="aws.states", + Resources=[self.exec_arn], + DetailType="Step Functions Execution Status Change", + Detail=to_json_str( + # Note: this operation carries significant changes from a describe_execution request. + DescribeExecutionOutput( + executionArn=self.exec_arn, + stateMachineArn=self.state_machine.arn, + stateMachineAliasArn=None, + stateMachineVersionArn=None, + name=self.name, + status=self.exec_status, + startDate=self.start_date, + stopDate=self.stop_date, + input=input_value, + inputDetails=self.input_details, + output=output_value, + outputDetails=output_details, + error=self.error, + cause=self.cause, + ) + ), + ) + try: + self._get_events_client().put_events(Entries=[entry]) + except Exception: + LOG.exception( + "Unable to send notification of Entry='%s' for Step Function execution with Arn='%s' to EventBridge.", + entry, + self.exec_arn, + ) + + +class SyncExecutionWorkerCommunication(BaseExecutionWorkerCommunication): + execution: Final[SyncExecution] + + def _reflect_execution_status(self) -> None: + super()._reflect_execution_status() + exit_status: ExecutionStatus = self.execution.exec_status + if exit_status == ExecutionStatus.SUCCEEDED: + self.execution.sync_execution_status = SyncExecutionStatus.SUCCEEDED + elif exit_status == ExecutionStatus.TIMED_OUT: + self.execution.sync_execution_status = SyncExecutionStatus.TIMED_OUT + else: + self.execution.sync_execution_status = SyncExecutionStatus.FAILED + + +class SyncExecution(Execution): + sync_execution_status: Optional[SyncExecutionStatus] = None + + def _get_start_execution_worker(self) -> SyncExecutionWorker: + return SyncExecutionWorker( + evaluation_details=EvaluationDetails( + aws_execution_details=self._get_start_aws_execution_details(), + execution_details=self.get_start_execution_details(), + state_machine_details=self.get_start_state_machine_details(), + ), + exec_comm=self._get_start_execution_worker_comm(), + cloud_watch_logging_session=self._cloud_watch_logging_session, + activity_store=self._activity_store, + ) + + def _get_start_execution_worker_comm(self) -> BaseExecutionWorkerCommunication: + return SyncExecutionWorkerCommunication(self) + + def to_start_sync_execution_output(self) -> StartSyncExecutionOutput: + start_output = StartSyncExecutionOutput( + executionArn=self.exec_arn, + stateMachineArn=self.state_machine.arn, + name=self.name, + status=self.sync_execution_status, + startDate=self.start_date, + stopDate=self.stop_date, + input=to_json_str(self.input_data, separators=(",", ":")), + inputDetails=self.input_details, + traceHeader=self.trace_header, + ) + if self.sync_execution_status == SyncExecutionStatus.SUCCEEDED: + start_output["output"] = to_json_str(self.output, separators=(",", ":")) + if self.output_details: + start_output["outputDetails"] = self.output_details + if self.error is not None: + start_output["error"] = self.error + if self.cause is not None: + start_output["cause"] = self.cause + return start_output diff --git a/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py b/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py new file mode 100644 index 0000000000000..c2d14c2085295 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/execution_worker.py @@ -0,0 +1,123 @@ +import datetime +from threading import Thread +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + Arn, + ExecutionStartedEventDetails, + HistoryEventExecutionDataDetails, + HistoryEventType, +) +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.evaluation_details import EvaluationDetails +from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails +from localstack.services.stepfunctions.asl.eval.event.event_manager import ( + EventHistoryContext, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingSession, +) +from localstack.services.stepfunctions.asl.eval.states import ( + ContextObjectData, + ExecutionData, + StateMachineData, +) +from localstack.services.stepfunctions.asl.parse.asl_parser import AmazonStateLanguageParser +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.backend.activity import Activity +from localstack.services.stepfunctions.backend.execution_worker_comm import ( + ExecutionWorkerCommunication, +) +from localstack.services.stepfunctions.mocking.mock_config import MockTestCase +from localstack.utils.common import TMP_THREADS + + +class ExecutionWorker: + _evaluation_details: Final[EvaluationDetails] + _execution_communication: Final[ExecutionWorkerCommunication] + _cloud_watch_logging_session: Final[Optional[CloudWatchLoggingSession]] + _mock_test_case: Final[Optional[MockTestCase]] + _activity_store: dict[Arn, Activity] + + env: Optional[Environment] + + def __init__( + self, + evaluation_details: EvaluationDetails, + exec_comm: ExecutionWorkerCommunication, + cloud_watch_logging_session: Optional[CloudWatchLoggingSession], + activity_store: dict[Arn, Activity], + mock_test_case: Optional[MockTestCase] = None, + ): + self._evaluation_details = evaluation_details + self._execution_communication = exec_comm + self._cloud_watch_logging_session = cloud_watch_logging_session + self._mock_test_case = mock_test_case + self._activity_store = activity_store + self.env = None + + def _get_evaluation_entrypoint(self) -> EvalComponent: + return AmazonStateLanguageParser.parse( + self._evaluation_details.state_machine_details.definition + )[0] + + def _get_evaluation_environment(self) -> Environment: + return Environment( + aws_execution_details=self._evaluation_details.aws_execution_details, + execution_type=self._evaluation_details.state_machine_details.typ, + context=ContextObjectData( + Execution=ExecutionData( + Id=self._evaluation_details.execution_details.arn, + Input=self._evaluation_details.execution_details.inpt, + Name=self._evaluation_details.execution_details.name, + RoleArn=self._evaluation_details.execution_details.role_arn, + StartTime=self._evaluation_details.execution_details.start_time, + ), + StateMachine=StateMachineData( + Id=self._evaluation_details.state_machine_details.arn, + Name=self._evaluation_details.state_machine_details.name, + ), + ), + event_history_context=EventHistoryContext.of_program_start(), + cloud_watch_logging_session=self._cloud_watch_logging_session, + activity_store=self._activity_store, + mock_test_case=self._mock_test_case, + ) + + def _execution_logic(self): + program = self._get_evaluation_entrypoint() + self.env = self._get_evaluation_environment() + + self.env.event_manager.add_event( + context=self.env.event_history_context, + event_type=HistoryEventType.ExecutionStarted, + event_details=EventDetails( + executionStartedEventDetails=ExecutionStartedEventDetails( + input=to_json_str(self._evaluation_details.execution_details.inpt), + inputDetails=HistoryEventExecutionDataDetails( + truncated=False + ), # Always False for api calls. + roleArn=self._evaluation_details.aws_execution_details.role_arn, + ) + ), + update_source_event_id=False, + ) + + program.eval(self.env) + + self._execution_communication.terminated() + + def start(self): + execution_logic_thread = Thread(target=self._execution_logic, daemon=True) + TMP_THREADS.append(execution_logic_thread) + execution_logic_thread.start() + + def stop(self, stop_date: datetime.datetime, error: Optional[str], cause: Optional[str]): + self.env.set_stop(stop_date=stop_date, cause=cause, error=error) + + +class SyncExecutionWorker(ExecutionWorker): + def start(self): + # bypass the native async execution of ASL programs. + self._execution_logic() diff --git a/localstack/services/stepfunctions/backend/execution_worker_comm.py b/localstack-core/localstack/services/stepfunctions/backend/execution_worker_comm.py similarity index 78% rename from localstack/services/stepfunctions/backend/execution_worker_comm.py rename to localstack-core/localstack/services/stepfunctions/backend/execution_worker_comm.py index 4602d13394690..c2e1d74849bbe 100644 --- a/localstack/services/stepfunctions/backend/execution_worker_comm.py +++ b/localstack-core/localstack/services/stepfunctions/backend/execution_worker_comm.py @@ -1,7 +1,7 @@ import abc -class ExecutionWorkerComm(abc.ABC): +class ExecutionWorkerCommunication(abc.ABC): """ Defines abstract callbacks for Execution's workers to report their progress, such as termination. Execution instances define custom callbacks routines to update their state according to the latest @@ -9,5 +9,4 @@ class ExecutionWorkerComm(abc.ABC): """ @abc.abstractmethod - def terminated(self) -> None: - ... + def terminated(self) -> None: ... diff --git a/localstack-core/localstack/services/stepfunctions/backend/state_machine.py b/localstack-core/localstack/services/stepfunctions/backend/state_machine.py new file mode 100644 index 0000000000000..71c82f55c881c --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/state_machine.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +import abc +import datetime +import json +from collections import OrderedDict +from typing import Final, Optional + +from localstack.aws.api.stepfunctions import ( + Arn, + Definition, + DescribeStateMachineOutput, + LoggingConfiguration, + Name, + RevisionId, + StateMachineListItem, + StateMachineStatus, + StateMachineType, + StateMachineVersionListItem, + Tag, + TagKeyList, + TagList, + TracingConfiguration, + ValidationException, + VariableReferences, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingConfiguration, +) +from localstack.services.stepfunctions.asl.static_analyser.variable_references_static_analyser import ( + VariableReferencesStaticAnalyser, +) +from localstack.services.stepfunctions.backend.alias import Alias +from localstack.utils.strings import long_uid + + +class StateMachineInstance: + name: Name + arn: Arn + revision_id: Optional[RevisionId] + definition: Definition + role_arn: Arn + create_date: datetime.datetime + sm_type: StateMachineType + logging_config: LoggingConfiguration + cloud_watch_logging_configuration: Optional[CloudWatchLoggingConfiguration] + tags: Optional[TagList] + tracing_config: Optional[TracingConfiguration] + + def __init__( + self, + name: Name, + arn: Arn, + definition: Definition, + role_arn: Arn, + logging_config: LoggingConfiguration, + cloud_watch_logging_configuration: Optional[CloudWatchLoggingConfiguration] = None, + create_date: Optional[datetime.datetime] = None, + sm_type: Optional[StateMachineType] = None, + tags: Optional[TagList] = None, + tracing_config: Optional[TracingConfiguration] = None, + ): + self.name = name + self.arn = arn + self.revision_id = None + self.definition = definition + self.role_arn = role_arn + self.create_date = create_date or datetime.datetime.now(tz=datetime.timezone.utc) + self.sm_type = sm_type or StateMachineType.STANDARD + self.logging_config = logging_config + self.cloud_watch_logging_configuration = cloud_watch_logging_configuration + self.tags = tags + self.tracing_config = tracing_config + + def describe(self) -> DescribeStateMachineOutput: + describe_output = DescribeStateMachineOutput( + stateMachineArn=self.arn, + name=self.name, + status=StateMachineStatus.ACTIVE, + definition=self.definition, + roleArn=self.role_arn, + type=self.sm_type, + creationDate=self.create_date, + loggingConfiguration=self.logging_config, + ) + + if self.revision_id: + describe_output["revisionId"] = self.revision_id + + variable_references: VariableReferences = VariableReferencesStaticAnalyser.process_and_get( + definition=self.definition + ) + if variable_references: + describe_output["variableReferences"] = variable_references + + return describe_output + + @abc.abstractmethod + def itemise(self): ... + + +class TestStateMachine(StateMachineInstance): + def __init__( + self, + name: Name, + arn: Arn, + definition: Definition, + role_arn: Arn, + create_date: Optional[datetime.datetime] = None, + ): + super().__init__( + name, + arn, + definition, + role_arn, + create_date, + StateMachineType.STANDARD, + None, + None, + None, + ) + + def itemise(self): + raise NotImplementedError("TestStateMachine does not support itemise.") + + +class TagManager: + _tags: Final[dict[str, Optional[str]]] + + def __init__(self): + self._tags = OrderedDict() + + @staticmethod + def _validate_key_value(key: str) -> None: + if not key: + raise ValidationException() + + @staticmethod + def _validate_tag_value(value: str) -> None: + if value is None: + raise ValidationException() + + def add_all(self, tags: TagList) -> None: + for tag in tags: + tag_key = tag["key"] + tag_value = tag["value"] + self._validate_key_value(key=tag_key) + self._validate_tag_value(value=tag_value) + self._tags[tag_key] = tag_value + + def remove_all(self, keys: TagKeyList): + for key in keys: + self._validate_key_value(key=key) + self._tags.pop(key, None) + + def to_tag_list(self) -> TagList: + tag_list = list() + for key, value in self._tags.items(): + tag_list.append(Tag(key=key, value=value)) + return tag_list + + +class StateMachineRevision(StateMachineInstance): + _next_version_number: int + versions: Final[dict[RevisionId, Arn]] + tag_manager: Final[TagManager] + aliases: Final[set[Alias]] + + def __init__( + self, + name: Name, + arn: Arn, + definition: Definition, + role_arn: Arn, + logging_config: LoggingConfiguration, + cloud_watch_logging_configuration: Optional[CloudWatchLoggingConfiguration], + create_date: Optional[datetime.datetime] = None, + sm_type: Optional[StateMachineType] = None, + tags: Optional[TagList] = None, + tracing_config: Optional[TracingConfiguration] = None, + ): + super().__init__( + name, + arn, + definition, + role_arn, + logging_config, + cloud_watch_logging_configuration, + create_date, + sm_type, + tags, + tracing_config, + ) + self.versions = dict() + self._version_number = 0 + self.tag_manager = TagManager() + if tags: + self.tag_manager.add_all(tags) + self.aliases = set() + + def create_revision( + self, + definition: Optional[str], + role_arn: Optional[Arn], + logging_configuration: Optional[LoggingConfiguration], + ) -> Optional[RevisionId]: + update_definition = definition and json.loads(definition) != json.loads(self.definition) + if update_definition: + self.definition = definition + + update_role_arn = role_arn and role_arn != self.role_arn + if update_role_arn: + self.role_arn = role_arn + + update_logging_configuration = ( + logging_configuration and logging_configuration != self.logging_config + ) + if update_logging_configuration: + self.logging_config = logging_configuration + self.cloud_watch_logging_configuration = ( + CloudWatchLoggingConfiguration.from_logging_configuration( + state_machine_arn=self.arn, logging_configuration=self.logging_config + ) + ) + + if any([update_definition, update_role_arn, update_logging_configuration]): + self.revision_id = long_uid() + + return self.revision_id + + def create_version(self, description: Optional[str]) -> Optional[StateMachineVersion]: + if self.revision_id not in self.versions: + self._version_number += 1 + version = StateMachineVersion( + self, version=self._version_number, description=description + ) + self.versions[self.revision_id] = version.arn + + return version + return None + + def delete_version(self, state_machine_version_arn: Arn) -> None: + source_revision_id = None + for revision_id, version_arn in self.versions.items(): + if version_arn == state_machine_version_arn: + source_revision_id = revision_id + break + self.versions.pop(source_revision_id, None) + + def itemise(self) -> StateMachineListItem: + return StateMachineListItem( + stateMachineArn=self.arn, + name=self.name, + type=self.sm_type, + creationDate=self.create_date, + ) + + +class StateMachineVersion(StateMachineInstance): + source_arn: Arn + version: int + description: Optional[str] + + def __init__( + self, state_machine_revision: StateMachineRevision, version: int, description: Optional[str] + ): + version_arn = f"{state_machine_revision.arn}:{version}" + super().__init__( + name=state_machine_revision.name, + arn=version_arn, + definition=state_machine_revision.definition, + role_arn=state_machine_revision.role_arn, + create_date=datetime.datetime.now(tz=datetime.timezone.utc), + sm_type=state_machine_revision.sm_type, + logging_config=state_machine_revision.logging_config, + cloud_watch_logging_configuration=state_machine_revision.cloud_watch_logging_configuration, + tags=state_machine_revision.tags, + tracing_config=state_machine_revision.tracing_config, + ) + self.source_arn = state_machine_revision.arn + self.revision_id = state_machine_revision.revision_id + self.version = version + self.description = description + + def describe(self) -> DescribeStateMachineOutput: + describe_output: DescribeStateMachineOutput = super().describe() + if self.description: + describe_output["description"] = self.description + return describe_output + + def itemise(self) -> StateMachineVersionListItem: + return StateMachineVersionListItem( + stateMachineVersionArn=self.arn, creationDate=self.create_date + ) diff --git a/localstack-core/localstack/services/stepfunctions/backend/store.py b/localstack-core/localstack/services/stepfunctions/backend/store.py new file mode 100644 index 0000000000000..825fb2b630c83 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/store.py @@ -0,0 +1,24 @@ +from collections import OrderedDict +from typing import Final + +from localstack.aws.api.stepfunctions import Arn +from localstack.services.stepfunctions.backend.activity import Activity +from localstack.services.stepfunctions.backend.alias import Alias +from localstack.services.stepfunctions.backend.execution import Execution +from localstack.services.stepfunctions.backend.state_machine import StateMachineInstance +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute + + +class SFNStore(BaseStore): + # Maps ARNs to state machines. + state_machines: Final[dict[Arn, StateMachineInstance]] = LocalAttribute(default=dict) + # Map Alias ARNs to state machine aliases + aliases: Final[dict[Arn, Alias]] = LocalAttribute(default=dict) + # Maps Execution-ARNs to state machines. + executions: Final[dict[Arn, Execution]] = LocalAttribute( + default=OrderedDict + ) # TODO: when snapshot to pods stop execution(?) + activities: Final[OrderedDict[Arn, Activity]] = LocalAttribute(default=dict) + + +sfn_stores: Final[AccountRegionBundle] = AccountRegionBundle("stepfunctions", SFNStore) diff --git a/localstack-core/localstack/services/stepfunctions/backend/test_state/__init__.py b/localstack-core/localstack/services/stepfunctions/backend/test_state/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/backend/test_state/execution.py b/localstack-core/localstack/services/stepfunctions/backend/test_state/execution.py new file mode 100644 index 0000000000000..cc200f09b29c6 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/test_state/execution.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import logging +import threading +from typing import Optional + +from localstack.aws.api.stepfunctions import ( + Arn, + ExecutionStatus, + InspectionLevel, + StateMachineType, + TestExecutionStatus, + TestStateOutput, + Timestamp, +) +from localstack.services.stepfunctions.asl.eval.evaluation_details import EvaluationDetails +from localstack.services.stepfunctions.asl.eval.program_state import ( + ProgramEnded, + ProgramError, + ProgramState, +) +from localstack.services.stepfunctions.asl.eval.test_state.program_state import ( + ProgramChoiceSelected, +) +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.backend.activity import Activity +from localstack.services.stepfunctions.backend.execution import ( + BaseExecutionWorkerCommunication, + Execution, +) +from localstack.services.stepfunctions.backend.state_machine import StateMachineInstance +from localstack.services.stepfunctions.backend.test_state.execution_worker import ( + TestStateExecutionWorker, +) + +LOG = logging.getLogger(__name__) + + +class TestStateExecution(Execution): + exec_worker: Optional[TestStateExecutionWorker] + next_state: Optional[str] + + class TestCaseExecutionWorkerCommunication(BaseExecutionWorkerCommunication): + _execution: TestStateExecution + + def terminated(self) -> None: + exit_program_state: ProgramState = self.execution.exec_worker.env.program_state() + if isinstance(exit_program_state, ProgramChoiceSelected): + self.execution.exec_status = ExecutionStatus.SUCCEEDED + self.execution.output = self.execution.exec_worker.env.states.get_input() + self.execution.next_state = exit_program_state.next_state_name + else: + self._reflect_execution_status() + + def __init__( + self, + name: str, + role_arn: Arn, + exec_arn: Arn, + account_id: str, + region_name: str, + state_machine: StateMachineInstance, + start_date: Timestamp, + activity_store: dict[Arn, Activity], + input_data: Optional[dict] = None, + ): + super().__init__( + name=name, + sm_type=StateMachineType.STANDARD, + role_arn=role_arn, + exec_arn=exec_arn, + account_id=account_id, + region_name=region_name, + state_machine=state_machine, + start_date=start_date, + activity_store=activity_store, + input_data=input_data, + cloud_watch_logging_session=None, + trace_header=None, + ) + self._execution_terminated_event = threading.Event() + self.next_state = None + + def _get_start_execution_worker_comm(self) -> BaseExecutionWorkerCommunication: + return self.TestCaseExecutionWorkerCommunication(self) + + def _get_start_execution_worker(self) -> TestStateExecutionWorker: + return TestStateExecutionWorker( + evaluation_details=EvaluationDetails( + aws_execution_details=self._get_start_aws_execution_details(), + execution_details=self.get_start_execution_details(), + state_machine_details=self.get_start_state_machine_details(), + ), + exec_comm=self._get_start_execution_worker_comm(), + cloud_watch_logging_session=self._cloud_watch_logging_session, + activity_store=self._activity_store, + ) + + def publish_execution_status_change_event(self): + # Do not publish execution status change events during test state execution. + pass + + def to_test_state_output(self, inspection_level: InspectionLevel) -> TestStateOutput: + exit_program_state: ProgramState = self.exec_worker.env.program_state() + if isinstance(exit_program_state, ProgramEnded): + output_str = to_json_str(self.output) + test_state_output = TestStateOutput( + status=TestExecutionStatus.SUCCEEDED, output=output_str + ) + elif isinstance(exit_program_state, ProgramError): + test_state_output = TestStateOutput( + status=TestExecutionStatus.FAILED, + error=exit_program_state.error["error"], + cause=exit_program_state.error["cause"], + ) + elif isinstance(exit_program_state, ProgramChoiceSelected): + output_str = to_json_str(self.output) + test_state_output = TestStateOutput( + status=TestExecutionStatus.SUCCEEDED, nextState=self.next_state, output=output_str + ) + else: + # TODO: handle other statuses + LOG.warning( + "Unsupported StateMachine exit type for TestState '%s'", + type(exit_program_state), + ) + output_str = to_json_str(self.output) + test_state_output = TestStateOutput( + status=TestExecutionStatus.FAILED, output=output_str + ) + + match inspection_level: + case InspectionLevel.TRACE: + test_state_output["inspectionData"] = self.exec_worker.env.inspection_data + case InspectionLevel.DEBUG: + test_state_output["inspectionData"] = self.exec_worker.env.inspection_data + + return test_state_output diff --git a/localstack-core/localstack/services/stepfunctions/backend/test_state/execution_worker.py b/localstack-core/localstack/services/stepfunctions/backend/test_state/execution_worker.py new file mode 100644 index 0000000000000..b70c7d41bd6a3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/backend/test_state/execution_worker.py @@ -0,0 +1,48 @@ +from typing import Optional + +from localstack.services.stepfunctions.asl.component.eval_component import EvalComponent +from localstack.services.stepfunctions.asl.eval.environment import Environment +from localstack.services.stepfunctions.asl.eval.event.event_manager import ( + EventHistoryContext, +) +from localstack.services.stepfunctions.asl.eval.states import ( + ContextObjectData, + ExecutionData, + StateMachineData, +) +from localstack.services.stepfunctions.asl.eval.test_state.environment import TestStateEnvironment +from localstack.services.stepfunctions.asl.parse.test_state.asl_parser import ( + TestStateAmazonStateLanguageParser, +) +from localstack.services.stepfunctions.backend.execution_worker import SyncExecutionWorker + + +class TestStateExecutionWorker(SyncExecutionWorker): + env: Optional[TestStateEnvironment] + + def _get_evaluation_entrypoint(self) -> EvalComponent: + return TestStateAmazonStateLanguageParser.parse( + self._evaluation_details.state_machine_details.definition + )[0] + + def _get_evaluation_environment(self) -> Environment: + return TestStateEnvironment( + aws_execution_details=self._evaluation_details.aws_execution_details, + execution_type=self._evaluation_details.state_machine_details.typ, + context=ContextObjectData( + Execution=ExecutionData( + Id=self._evaluation_details.execution_details.arn, + Input=self._evaluation_details.execution_details.inpt, + Name=self._evaluation_details.execution_details.name, + RoleArn=self._evaluation_details.execution_details.role_arn, + StartTime=self._evaluation_details.execution_details.start_time, + ), + StateMachine=StateMachineData( + Id=self._evaluation_details.state_machine_details.arn, + Name=self._evaluation_details.state_machine_details.name, + ), + ), + event_history_context=EventHistoryContext.of_program_start(), + cloud_watch_logging_session=self._cloud_watch_logging_session, + activity_store=self._activity_store, + ) diff --git a/localstack-core/localstack/services/stepfunctions/mocking/__init__.py b/localstack-core/localstack/services/stepfunctions/mocking/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py b/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py new file mode 100644 index 0000000000000..25f71acee35d5 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/mocking/mock_config.py @@ -0,0 +1,214 @@ +import abc +from typing import Any, Final, Optional + +from localstack.services.stepfunctions.mocking.mock_config_file import ( + RawMockConfig, + RawResponseModel, + RawTestCase, + _load_sfn_raw_mock_config, +) + + +class MockedResponse(abc.ABC): + range_start: Final[int] + range_end: Final[int] + + def __init__(self, range_start: int, range_end: int): + super().__init__() + if range_start < 0 or range_end < 0: + raise ValueError( + f"Invalid range: both '{range_start}' and '{range_end}' must be positive integers." + ) + if range_start != range_end and range_end < range_start + 1: + raise ValueError( + f"Invalid range: values must be equal or '{range_start}' " + f"must be at least one greater than '{range_end}'." + ) + self.range_start = range_start + self.range_end = range_end + + +class MockedResponseReturn(MockedResponse): + payload: Final[Any] + + def __init__(self, range_start: int, range_end: int, payload: Any): + super().__init__(range_start=range_start, range_end=range_end) + self.payload = payload + + +class MockedResponseThrow(MockedResponse): + error: Final[str] + cause: Final[str] + + def __init__(self, range_start: int, range_end: int, error: str, cause: str): + super().__init__(range_start=range_start, range_end=range_end) + self.error = error + self.cause = cause + + +class StateMockedResponses: + state_name: Final[str] + mocked_response_name: Final[str] + mocked_responses: Final[list[MockedResponse]] + + def __init__( + self, state_name: str, mocked_response_name: str, mocked_responses: list[MockedResponse] + ): + self.state_name = state_name + self.mocked_response_name = mocked_response_name + self.mocked_responses = list() + last_range_end: int = -1 + mocked_responses_sorted = sorted(mocked_responses, key=lambda mr: mr.range_start) + for mocked_response in mocked_responses_sorted: + if not mocked_response.range_start - last_range_end == 1: + raise RuntimeError( + f"Inconsistent event numbering detected for state '{state_name}': " + f"the previous mocked response ended at event '{last_range_end}' " + f"while the next response '{mocked_response_name}' " + f"starts at event '{mocked_response.range_start}'. " + "Mock responses must be consecutively numbered. " + f"Expected the next response to begin at event {last_range_end + 1}." + ) + repeats = mocked_response.range_end - mocked_response.range_start + 1 + self.mocked_responses.extend([mocked_response] * repeats) + last_range_end = mocked_response.range_end + + +class MockTestCase: + state_machine_name: Final[str] + test_case_name: Final[str] + state_mocked_responses: Final[dict[str, StateMockedResponses]] + + def __init__( + self, + state_machine_name: str, + test_case_name: str, + state_mocked_responses_list: list[StateMockedResponses], + ): + self.state_machine_name = state_machine_name + self.test_case_name = test_case_name + self.state_mocked_responses = dict() + for state_mocked_response in state_mocked_responses_list: + state_name = state_mocked_response.state_name + if state_name in self.state_mocked_responses: + raise RuntimeError( + f"Duplicate definition of state '{state_name}' for test case '{test_case_name}'" + ) + self.state_mocked_responses[state_name] = state_mocked_response + + +def _parse_mocked_response_range(string_definition: str) -> tuple[int, int]: + definition_parts = string_definition.strip().split("-") + if len(definition_parts) == 1: + range_part = definition_parts[0] + try: + range_value = int(range_part) + return range_value, range_value + except Exception: + raise RuntimeError( + f"Unknown mocked response retry range value '{range_part}', not a valid integer" + ) + elif len(definition_parts) == 2: + range_part_start = definition_parts[0] + range_part_end = definition_parts[1] + try: + return int(range_part_start), int(range_part_end) + except Exception: + raise RuntimeError( + f"Unknown mocked response retry range value '{range_part_start}:{range_part_end}', " + "not valid integer values" + ) + else: + raise RuntimeError( + f"Unknown mocked response retry range definition '{string_definition}', " + "range definition should consist of one integer (e.g. '0'), or a integer range (e.g. '1-2')'." + ) + + +def _mocked_response_from_raw( + raw_response_model_range: str, raw_response_model: RawResponseModel +) -> MockedResponse: + range_start, range_end = _parse_mocked_response_range(raw_response_model_range) + if raw_response_model.Return: + payload = raw_response_model.Return.model_dump() + return MockedResponseReturn(range_start=range_start, range_end=range_end, payload=payload) + throw_definition = raw_response_model.Throw + return MockedResponseThrow( + range_start=range_start, + range_end=range_end, + error=throw_definition.Error, + cause=throw_definition.Cause, + ) + + +def _mocked_responses_from_raw( + mocked_response_name: str, raw_mock_config: RawMockConfig +) -> list[MockedResponse]: + raw_response_models: Optional[dict[str, RawResponseModel]] = ( + raw_mock_config.MockedResponses.get(mocked_response_name) + ) + if not raw_response_models: + raise RuntimeError( + f"No definitions for mocked response '{mocked_response_name}' in the mock configuration file." + ) + mocked_responses: list[MockedResponse] = list() + for raw_response_model_range, raw_response_model in raw_response_models.items(): + mocked_response: MockedResponse = _mocked_response_from_raw( + raw_response_model_range=raw_response_model_range, raw_response_model=raw_response_model + ) + mocked_responses.append(mocked_response) + return mocked_responses + + +def _state_mocked_responses_from_raw( + state_name: str, mocked_response_name: str, raw_mock_config: RawMockConfig +) -> StateMockedResponses: + mocked_responses = _mocked_responses_from_raw( + mocked_response_name=mocked_response_name, raw_mock_config=raw_mock_config + ) + return StateMockedResponses( + state_name=state_name, + mocked_response_name=mocked_response_name, + mocked_responses=mocked_responses, + ) + + +def _mock_test_case_from_raw( + state_machine_name: str, test_case_name: str, raw_mock_config: RawMockConfig +) -> MockTestCase: + state_machine = raw_mock_config.StateMachines.get(state_machine_name) + if not state_machine: + raise RuntimeError( + f"No definitions for state machine '{state_machine_name}' in the mock configuration file." + ) + test_case: RawTestCase = state_machine.TestCases.get(test_case_name) + if not test_case: + raise RuntimeError( + f"No definitions for test case '{test_case_name}' and " + f"state machine '{state_machine_name}' in the mock configuration file." + ) + state_mocked_responses_list: list[StateMockedResponses] = list() + for state_name, mocked_response_name in test_case.root.items(): + state_mocked_responses = _state_mocked_responses_from_raw( + state_name=state_name, + mocked_response_name=mocked_response_name, + raw_mock_config=raw_mock_config, + ) + state_mocked_responses_list.append(state_mocked_responses) + return MockTestCase( + state_machine_name=state_machine_name, + test_case_name=test_case_name, + state_mocked_responses_list=state_mocked_responses_list, + ) + + +def load_mock_test_case_for(state_machine_name: str, test_case_name: str) -> Optional[MockTestCase]: + raw_mock_config: Optional[RawMockConfig] = _load_sfn_raw_mock_config() + if raw_mock_config is None: + return None + mock_test_case: MockTestCase = _mock_test_case_from_raw( + state_machine_name=state_machine_name, + test_case_name=test_case_name, + raw_mock_config=raw_mock_config, + ) + return mock_test_case diff --git a/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py b/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py new file mode 100644 index 0000000000000..145ffd20750a2 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/mocking/mock_config_file.py @@ -0,0 +1,187 @@ +import logging +import os +from functools import lru_cache +from json import JSONDecodeError +from typing import Any, Dict, Final, Optional + +from pydantic import BaseModel, RootModel, ValidationError, model_validator + +from localstack import config + +LOG = logging.getLogger(__name__) + +_RETURN_KEY: Final[str] = "Return" +_THROW_KEY: Final[str] = "Throw" + + +class RawReturnResponse(RootModel[Any]): + """ + Represents a return response. + Accepts any fields. + """ + + model_config = {"frozen": True} + + +class RawThrowResponse(BaseModel): + """ + Represents an error response. + Both 'Error' and 'Cause' are required. + """ + + model_config = {"frozen": True} + + Error: str + Cause: str + + +class RawResponseModel(BaseModel): + """ + A response step must include exactly one of: + - 'Return': a ReturnResponse object. + - 'Throw': a ThrowResponse object. + """ + + model_config = {"frozen": True} + + Return: Optional[RawReturnResponse] = None + Throw: Optional[RawThrowResponse] = None + + @model_validator(mode="before") + def validate_response(cls, data: dict) -> dict: + if _RETURN_KEY in data and _THROW_KEY in data: + raise ValueError(f"Response cannot contain both '{_RETURN_KEY}' and '{_THROW_KEY}'") + if _RETURN_KEY not in data and _THROW_KEY not in data: + raise ValueError(f"Response must contain one of '{_RETURN_KEY}' or '{_THROW_KEY}'") + return data + + +class RawTestCase(RootModel[Dict[str, str]]): + """ + Represents an individual test case. + The keys are state names (e.g., 'LambdaState', 'SQSState') + and the values are the names of the mocked response configurations. + """ + + model_config = {"frozen": True} + + +class RawStateMachine(BaseModel): + """ + Represents a state machine configuration containing multiple test cases. + """ + + model_config = {"frozen": True} + + TestCases: Dict[str, RawTestCase] + + +class RawMockConfig(BaseModel): + """ + The root configuration that contains: + - StateMachines: mapping state machine names to their configuration. + - MockedResponses: mapping response configuration names to response steps. + Each response step is keyed (e.g. "0", "1-2") and maps to a ResponseModel. + """ + + model_config = {"frozen": True} + + StateMachines: Dict[str, RawStateMachine] + MockedResponses: Dict[str, Dict[str, RawResponseModel]] + + +@lru_cache(maxsize=1) +def _read_sfn_raw_mock_config(file_path: str, modified_epoch: int) -> Optional[RawMockConfig]: # noqa + """ + Load and cache the Step Functions mock configuration from a JSON file. + + This function is memoized using `functools.lru_cache` to avoid re-reading the file + from disk unless it has changed. The `modified_epoch` parameter is used solely to + trigger cache invalidation when the file is updated. If either the file path or the + modified timestamp changes, the cached result is discarded and the file is reloaded. + + Parameters: + file_path (str): + The absolute path to the JSON configuration file. + + modified_epoch (int): + The last modified time of the file, in epoch seconds. This value is used + as part of the cache key to ensure the cache is refreshed when the file is updated. + + Returns: + Optional[dict]: + The parsed configuration as a dictionary if the file is successfully loaded, + or `None` if an error occurs during reading or parsing. + + Notes: + - The `modified_epoch` argument is not used inside the function logic, but is + necessary to ensure cache correctness via `lru_cache`. + - Logging is used to capture warnings if file access or parsing fails. + """ + try: + with open(file_path, "r") as df: + mock_config_str = df.read() + mock_config: RawMockConfig = RawMockConfig.model_validate_json(mock_config_str) + return mock_config + except (OSError, IOError) as file_error: + LOG.error("Failed to open mock configuration file '%s'. Error: %s", file_path, file_error) + return None + except ValidationError as validation_error: + errors = validation_error.errors() + if not errors: + # No detailed errors provided by Pydantic + LOG.error( + "Validation failed for mock configuration file at '%s'. " + "The file must contain a valid mock configuration.", + file_path, + ) + else: + for err in errors: + location = ".".join(str(loc) for loc in err["loc"]) + message = err["msg"] + error_type = err["type"] + LOG.error( + "Mock configuration file error at '%s': %s (%s)", + location, + message, + error_type, + ) + # TODO: add tests to ensure the hot-reloading of the mock configuration + # file works as expected, and inform the user with the info below: + # LOG.info( + # "Changes to the mock configuration file will be applied at the " + # "next mock execution without requiring a LocalStack restart." + # ) + return None + except JSONDecodeError as json_error: + LOG.error( + "Malformed JSON in mock configuration file at '%s'. Error: %s", + file_path, + json_error, + ) + # TODO: add tests to ensure the hot-reloading of the mock configuration + # file works as expected, and inform the user with the info below: + # LOG.info( + # "Changes to the mock configuration file will be applied at the " + # "next mock execution without requiring a LocalStack restart." + # ) + return None + + +def _load_sfn_raw_mock_config() -> Optional[RawMockConfig]: + configuration_file_path = config.SFN_MOCK_CONFIG + if not configuration_file_path: + return None + + try: + modified_time = int(os.path.getmtime(configuration_file_path)) + except Exception as ex: + LOG.warning( + "Unable to access the step functions mock configuration file at '%s' due to %s", + configuration_file_path, + ex, + ) + return None + + mock_config = _read_sfn_raw_mock_config(configuration_file_path, modified_time) + return mock_config diff --git a/localstack-core/localstack/services/stepfunctions/packages.py b/localstack-core/localstack/services/stepfunctions/packages.py new file mode 100644 index 0000000000000..b96f7a8d775f0 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/packages.py @@ -0,0 +1,39 @@ +from localstack.packages import Package, PackageInstaller +from localstack.packages.core import MavenPackageInstaller +from localstack.packages.java import JavaInstallerMixin + +JSONATA_DEFAULT_VERSION = "0.9.7" +JACKSON_DEFAULT_VERSION = "2.16.2" + +JSONATA_JACKSON_VERSION_STORE = {JSONATA_DEFAULT_VERSION: JACKSON_DEFAULT_VERSION} + + +class JSONataPackage(Package): + def __init__(self): + super().__init__("JSONataLibs", JSONATA_DEFAULT_VERSION) + + # Match the dynamodb-local JRE version to reduce the LocalStack image size by sharing the same JRE version + self.java_version = "21" + + def get_versions(self) -> list[str]: + return list(JSONATA_JACKSON_VERSION_STORE.keys()) + + def _get_installer(self, version: str) -> PackageInstaller: + return JSONataPackageInstaller(version) + + +class JSONataPackageInstaller(JavaInstallerMixin, MavenPackageInstaller): + def __init__(self, version: str): + jackson_version = JSONATA_JACKSON_VERSION_STORE[version] + super().__init__( + f"pkg:maven/com.dashjoin/jsonata@{version}", + # jackson-databind is imported in jsonata.py as "from com.fasterxml.jackson.databind import ObjectMapper" + # jackson-annotations and jackson-core are dependencies of jackson-databind: + # https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind/dependencies + f"pkg:maven/com.fasterxml.jackson.core/jackson-core@{jackson_version}", + f"pkg:maven/com.fasterxml.jackson.core/jackson-annotations@{jackson_version}", + f"pkg:maven/com.fasterxml.jackson.core/jackson-databind@{jackson_version}", + ) + + +jpype_jsonata_package = JSONataPackage() diff --git a/localstack-core/localstack/services/stepfunctions/plugins.py b/localstack-core/localstack/services/stepfunctions/plugins.py new file mode 100644 index 0000000000000..b407ee2875396 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/plugins.py @@ -0,0 +1,9 @@ +from localstack.packages import Package, package + + +@package(name="jpype-jsonata") +def jpype_jsonata_package() -> Package: + """The Java-based jsonata library uses JPype and depends on a JVM installation.""" + from localstack.services.stepfunctions.packages import jpype_jsonata_package + + return jpype_jsonata_package diff --git a/localstack-core/localstack/services/stepfunctions/provider.py b/localstack-core/localstack/services/stepfunctions/provider.py new file mode 100644 index 0000000000000..c43fd396c9a8f --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/provider.py @@ -0,0 +1,1660 @@ +import copy +import datetime +import json +import logging +import re +import time +from typing import Final, Optional + +from localstack.aws.api import CommonServiceException, RequestContext +from localstack.aws.api.stepfunctions import ( + ActivityDoesNotExist, + AliasDescription, + Arn, + CharacterRestrictedName, + ConflictException, + CreateActivityOutput, + CreateStateMachineAliasOutput, + CreateStateMachineInput, + CreateStateMachineOutput, + Definition, + DeleteActivityOutput, + DeleteStateMachineAliasOutput, + DeleteStateMachineOutput, + DeleteStateMachineVersionOutput, + DescribeActivityOutput, + DescribeExecutionOutput, + DescribeMapRunOutput, + DescribeStateMachineAliasOutput, + DescribeStateMachineForExecutionOutput, + DescribeStateMachineOutput, + EncryptionConfiguration, + ExecutionDoesNotExist, + ExecutionList, + ExecutionRedriveFilter, + ExecutionStatus, + GetActivityTaskOutput, + GetExecutionHistoryOutput, + IncludedData, + IncludeExecutionDataGetExecutionHistory, + InspectionLevel, + InvalidArn, + InvalidDefinition, + InvalidExecutionInput, + InvalidLoggingConfiguration, + InvalidName, + InvalidToken, + ListActivitiesOutput, + ListExecutionsOutput, + ListExecutionsPageToken, + ListMapRunsOutput, + ListStateMachineAliasesOutput, + ListStateMachinesOutput, + ListStateMachineVersionsOutput, + ListTagsForResourceOutput, + LoggingConfiguration, + LogLevel, + LongArn, + MaxConcurrency, + MissingRequiredParameter, + Name, + PageSize, + PageToken, + Publish, + PublishStateMachineVersionOutput, + ResourceNotFound, + RevealSecrets, + ReverseOrder, + RevisionId, + RoutingConfigurationList, + SendTaskFailureOutput, + SendTaskHeartbeatOutput, + SendTaskSuccessOutput, + SensitiveCause, + SensitiveData, + SensitiveError, + StartExecutionOutput, + StartSyncExecutionOutput, + StateMachineAliasList, + StateMachineAlreadyExists, + StateMachineDoesNotExist, + StateMachineList, + StateMachineType, + StateMachineTypeNotSupported, + StepfunctionsApi, + StopExecutionOutput, + TagKeyList, + TagList, + TagResourceOutput, + TaskDoesNotExist, + TaskTimedOut, + TaskToken, + TestStateOutput, + ToleratedFailureCount, + ToleratedFailurePercentage, + TraceHeader, + TracingConfiguration, + UntagResourceOutput, + UpdateMapRunOutput, + UpdateStateMachineAliasOutput, + UpdateStateMachineOutput, + ValidateStateMachineDefinitionDiagnostic, + ValidateStateMachineDefinitionDiagnosticList, + ValidateStateMachineDefinitionInput, + ValidateStateMachineDefinitionOutput, + ValidateStateMachineDefinitionResultCode, + ValidateStateMachineDefinitionSeverity, + ValidationException, + VersionDescription, +) +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.stepfunctions.asl.component.state.state_execution.state_map.iteration.itemprocessor.map_run_record import ( + MapRunRecord, +) +from localstack.services.stepfunctions.asl.eval.callback.callback import ( + ActivityCallbackEndpoint, + CallbackConsumerTimeout, + CallbackNotifyConsumerError, + CallbackOutcomeFailure, + CallbackOutcomeSuccess, +) +from localstack.services.stepfunctions.asl.eval.event.logging import ( + CloudWatchLoggingConfiguration, + CloudWatchLoggingSession, +) +from localstack.services.stepfunctions.asl.parse.asl_parser import ( + ASLParserException, +) +from localstack.services.stepfunctions.asl.static_analyser.express_static_analyser import ( + ExpressStaticAnalyser, +) +from localstack.services.stepfunctions.asl.static_analyser.static_analyser import ( + StaticAnalyser, +) +from localstack.services.stepfunctions.asl.static_analyser.test_state.test_state_analyser import ( + TestStateStaticAnalyser, +) +from localstack.services.stepfunctions.asl.static_analyser.usage_metrics_static_analyser import ( + UsageMetricsStaticAnalyser, +) +from localstack.services.stepfunctions.backend.activity import Activity, ActivityTask +from localstack.services.stepfunctions.backend.alias import Alias +from localstack.services.stepfunctions.backend.execution import Execution, SyncExecution +from localstack.services.stepfunctions.backend.state_machine import ( + StateMachineInstance, + StateMachineRevision, + StateMachineVersion, + TestStateMachine, +) +from localstack.services.stepfunctions.backend.store import SFNStore, sfn_stores +from localstack.services.stepfunctions.backend.test_state.execution import ( + TestStateExecution, +) +from localstack.services.stepfunctions.mocking.mock_config import ( + MockTestCase, + load_mock_test_case_for, +) +from localstack.services.stepfunctions.stepfunctions_utils import ( + assert_pagination_parameters_valid, + get_next_page_token_from_arn, + normalise_max_results, +) +from localstack.state import StateVisitor +from localstack.utils.aws.arns import ( + ARN_PARTITION_REGEX, + stepfunctions_activity_arn, + stepfunctions_express_execution_arn, + stepfunctions_standard_execution_arn, + stepfunctions_state_machine_arn, +) +from localstack.utils.collections import PaginatedList +from localstack.utils.strings import long_uid, short_uid + +LOG = logging.getLogger(__name__) + + +class StepFunctionsProvider(StepfunctionsApi, ServiceLifecycleHook): + _TEST_STATE_MAX_TIMEOUT_SECONDS: Final[int] = 300 # 5 minutes. + + @staticmethod + def get_store(context: RequestContext) -> SFNStore: + return sfn_stores[context.account_id][context.region] + + def accept_state_visitor(self, visitor: StateVisitor): + visitor.visit(sfn_stores) + + _STATE_MACHINE_ARN_REGEX: Final[re.Pattern] = re.compile( + rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:stateMachine:[a-zA-Z0-9-_.]+(:\d+)?(:[a-zA-Z0-9-_.]+)*(?:#[a-zA-Z0-9-_]+)?$" + ) + + _STATE_MACHINE_EXECUTION_ARN_REGEX: Final[re.Pattern] = re.compile( + rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:(stateMachine|execution|express):[a-zA-Z0-9-_.]+(:\d+)?(:[a-zA-Z0-9-_.]+)*$" + ) + + _ACTIVITY_ARN_REGEX: Final[re.Pattern] = re.compile( + rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:activity:[a-zA-Z0-9-_\.]{{1,80}}$" + ) + + _ALIAS_ARN_REGEX: Final[re.Pattern] = re.compile( + rf"{ARN_PARTITION_REGEX}:states:[a-z0-9-]+:[0-9]{{12}}:stateMachine:[A-Za-z0-9_.-]+:[A-Za-z_.-]+[A-Za-z0-9_.-]{{0,80}}$" + ) + + _ALIAS_NAME_REGEX: Final[re.Pattern] = re.compile(r"^(?=.*[a-zA-Z_\-\.])[a-zA-Z0-9_\-\.]+$") + + @staticmethod + def _validate_state_machine_arn(state_machine_arn: str) -> None: + # TODO: InvalidArn exception message do not communicate which part of the ARN is incorrect. + if not StepFunctionsProvider._STATE_MACHINE_ARN_REGEX.match(state_machine_arn): + raise InvalidArn(f"Invalid arn: '{state_machine_arn}'") + + @staticmethod + def _raise_state_machine_does_not_exist(state_machine_arn: str) -> None: + raise StateMachineDoesNotExist(f"State Machine Does Not Exist: '{state_machine_arn}'") + + @staticmethod + def _validate_state_machine_execution_arn(execution_arn: str) -> None: + # TODO: InvalidArn exception message do not communicate which part of the ARN is incorrect. + if not StepFunctionsProvider._STATE_MACHINE_EXECUTION_ARN_REGEX.match(execution_arn): + raise InvalidArn(f"Invalid arn: '{execution_arn}'") + + @staticmethod + def _validate_activity_arn(activity_arn: str) -> None: + # TODO: InvalidArn exception message do not communicate which part of the ARN is incorrect. + if not StepFunctionsProvider._ACTIVITY_ARN_REGEX.match(activity_arn): + raise InvalidArn(f"Invalid arn: '{activity_arn}'") + + @staticmethod + def _validate_state_machine_alias_arn(state_machine_alias_arn: Arn) -> None: + if not StepFunctionsProvider._ALIAS_ARN_REGEX.match(state_machine_alias_arn): + raise InvalidArn(f"Invalid arn: '{state_machine_alias_arn}'") + + def _raise_state_machine_type_not_supported(self): + raise StateMachineTypeNotSupported( + "This operation is not supported by this type of state machine" + ) + + @staticmethod + def _raise_resource_type_not_in_context(resource_type: str) -> None: + lower_resource_type = resource_type.lower() + raise InvalidArn( + f"Invalid Arn: 'Resource type not valid in this context: {lower_resource_type}'" + ) + + @staticmethod + def _validate_activity_name(name: str) -> None: + # The activity name is validated according to the AWS StepFunctions documentation, the name should not contain: + # - white space + # - brackets < > { } [ ] + # - wildcard characters ? * + # - special characters " # % \ ^ | ~ ` $ & , ; : / + # - control characters (U+0000-001F, U+007F-009F) + # https://docs.aws.amazon.com/step-functions/latest/apireference/API_CreateActivity.html#API_CreateActivity_RequestSyntax + if not (1 <= len(name) <= 80): + raise InvalidName(f"Invalid Name: '{name}'") + invalid_chars = set(' <>{}[]?*"#%\\^|~`$&,;:/') + control_chars = {chr(i) for i in range(32)} | {chr(i) for i in range(127, 160)} + invalid_chars |= control_chars + for char in name: + if char in invalid_chars: + raise InvalidName(f"Invalid Name: '{name}'") + + @staticmethod + def _validate_state_machine_alias_name(name: CharacterRestrictedName) -> None: + len_name = len(name) + if len_name > 80: + raise ValidationException( + f"1 validation error detected: Value '{name}' at 'name' failed to satisfy constraint: " + f"Member must have length less than or equal to 80" + ) + if not StepFunctionsProvider._ALIAS_NAME_REGEX.match(name): + raise ValidationException( + # TODO: explore more error cases in which more than one validation error may occur which results + # in the counter below being greater than 1. + f"1 validation error detected: Value '{name}' at 'name' failed to satisfy constraint: " + f"Member must satisfy regular expression pattern: ^(?=.*[a-zA-Z_\\-\\.])[a-zA-Z0-9_\\-\\.]+$" + ) + + def _get_execution(self, context: RequestContext, execution_arn: Arn) -> Execution: + execution: Optional[Execution] = self.get_store(context).executions.get(execution_arn) + if not execution: + raise ExecutionDoesNotExist(f"Execution Does Not Exist: '{execution_arn}'") + return execution + + def _get_executions( + self, + context: RequestContext, + execution_status: Optional[ExecutionStatus] = None, + ): + store = self.get_store(context) + execution: list[Execution] = list(store.executions.values()) + if execution_status: + execution = list( + filter( + lambda e: e.exec_status == execution_status, + store.executions.values(), + ) + ) + return execution + + def _get_activity(self, context: RequestContext, activity_arn: Arn) -> Activity: + maybe_activity: Optional[Activity] = self.get_store(context).activities.get( + activity_arn, None + ) + if maybe_activity is None: + raise ActivityDoesNotExist(f"Activity Does Not Exist: '{activity_arn}'") + return maybe_activity + + def _idempotent_revision( + self, + context: RequestContext, + name: str, + definition: Definition, + state_machine_type: StateMachineType, + logging_configuration: LoggingConfiguration, + tracing_configuration: TracingConfiguration, + ) -> Optional[StateMachineRevision]: + # CreateStateMachine's idempotency check is based on the state machine name, definition, type, + # LoggingConfiguration and TracingConfiguration. + # If a following request has a different roleArn or tags, Step Functions will ignore these differences and + # treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even + # if they are different. + state_machines: list[StateMachineInstance] = list( + self.get_store(context).state_machines.values() + ) + revisions = filter(lambda sm: isinstance(sm, StateMachineRevision), state_machines) + for state_machine in revisions: + check = all( + [ + state_machine.name == name, + state_machine.definition == definition, + state_machine.sm_type == state_machine_type, + state_machine.logging_config == logging_configuration, + state_machine.tracing_config == tracing_configuration, + ] + ) + if check: + return state_machine + return None + + def _idempotent_start_execution( + self, + execution: Optional[Execution], + state_machine: StateMachineInstance, + name: Name, + input_data: SensitiveData, + ) -> Optional[Execution]: + # StartExecution is idempotent for STANDARD workflows. For a STANDARD workflow, + # if you call StartExecution with the same name and input as a running execution, + # the call succeeds and return the same response as the original request. + # If the execution is closed or if the input is different, + # it returns a 400 ExecutionAlreadyExists error. You can reuse names after 90 days. + + if not execution: + return None + + match (name, input_data, execution.exec_status, state_machine.sm_type): + case ( + execution.name, + execution.input_data, + ExecutionStatus.RUNNING, + StateMachineType.STANDARD, + ): + return execution + + raise CommonServiceException( + code="ExecutionAlreadyExists", + message=f"Execution Already Exists: '{execution.exec_arn}'", + ) + + def _revision_by_name( + self, context: RequestContext, name: str + ) -> Optional[StateMachineInstance]: + state_machines: list[StateMachineInstance] = list( + self.get_store(context).state_machines.values() + ) + for state_machine in state_machines: + if isinstance(state_machine, StateMachineRevision) and state_machine.name == name: + return state_machine + return None + + @staticmethod + def _validate_definition(definition: str, static_analysers: list[StaticAnalyser]) -> None: + try: + for static_analyser in static_analysers: + static_analyser.analyse(definition) + except ASLParserException as asl_parser_exception: + invalid_definition = InvalidDefinition() + invalid_definition.message = repr(asl_parser_exception) + raise invalid_definition + except Exception as exception: + exception_name = exception.__class__.__name__ + exception_args = list(exception.args) + invalid_definition = InvalidDefinition() + invalid_definition.message = ( + f"Error={exception_name} Args={exception_args} in definition '{definition}'." + ) + raise invalid_definition + + @staticmethod + def _sanitise_logging_configuration( + logging_configuration: LoggingConfiguration, + ) -> None: + level = logging_configuration.get("level") + destinations = logging_configuration.get("destinations") + + if destinations is not None and len(destinations) > 1: + raise InvalidLoggingConfiguration( + "Invalid Logging Configuration: Must specify exactly one Log Destination." + ) + + # A LogLevel that is not OFF, should have a destination. + if level is not None and level != LogLevel.OFF and not destinations: + raise InvalidLoggingConfiguration( + "Invalid Logging Configuration: Must specify exactly one Log Destination." + ) + + # Default for level is OFF. + level = level or LogLevel.OFF + + # Default for includeExecutionData is False. + include_flag = logging_configuration.get("includeExecutionData", False) + + # Update configuration object. + logging_configuration["level"] = level + logging_configuration["includeExecutionData"] = include_flag + + def create_state_machine( + self, context: RequestContext, request: CreateStateMachineInput, **kwargs + ) -> CreateStateMachineOutput: + if not request.get("publish", False) and request.get("versionDescription"): + raise ValidationException("Version description can only be set when publish is true") + + # Extract parameters and set defaults. + state_machine_name = request["name"] + state_machine_role_arn = request["roleArn"] + state_machine_definition = request["definition"] + state_machine_type = request.get("type") or StateMachineType.STANDARD + state_machine_tracing_configuration = request.get("tracingConfiguration") + state_machine_tags = request.get("tags") + state_machine_logging_configuration = request.get( + "loggingConfiguration", LoggingConfiguration() + ) + self._sanitise_logging_configuration( + logging_configuration=state_machine_logging_configuration + ) + + # CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was + # already created. + idem_state_machine: Optional[StateMachineRevision] = self._idempotent_revision( + context=context, + name=state_machine_name, + definition=state_machine_definition, + state_machine_type=state_machine_type, + logging_configuration=state_machine_logging_configuration, + tracing_configuration=state_machine_tracing_configuration, + ) + if idem_state_machine is not None: + return CreateStateMachineOutput( + stateMachineArn=idem_state_machine.arn, + creationDate=idem_state_machine.create_date, + ) + + # Assert this state machine name is unique. + state_machine_with_name: Optional[StateMachineRevision] = self._revision_by_name( + context=context, name=state_machine_name + ) + if state_machine_with_name is not None: + raise StateMachineAlreadyExists( + f"State Machine Already Exists: '{state_machine_with_name.arn}'" + ) + + # Compute the state machine's Arn. + state_machine_arn = stepfunctions_state_machine_arn( + name=state_machine_name, + account_id=context.account_id, + region_name=context.region, + ) + state_machines = self.get_store(context).state_machines + + # Reduce the logging configuration to a usable cloud watch representation, and validate the destinations + # if any were given. + cloud_watch_logging_configuration = ( + CloudWatchLoggingConfiguration.from_logging_configuration( + state_machine_arn=state_machine_arn, + logging_configuration=state_machine_logging_configuration, + ) + ) + if cloud_watch_logging_configuration is not None: + cloud_watch_logging_configuration.validate() + + # Run static analysers on the definition given. + if state_machine_type == StateMachineType.EXPRESS: + StepFunctionsProvider._validate_definition( + definition=state_machine_definition, + static_analysers=[ExpressStaticAnalyser()], + ) + else: + StepFunctionsProvider._validate_definition( + definition=state_machine_definition, static_analysers=[StaticAnalyser()] + ) + + # Create the state machine and add it to the store. + state_machine = StateMachineRevision( + name=state_machine_name, + arn=state_machine_arn, + role_arn=state_machine_role_arn, + definition=state_machine_definition, + sm_type=state_machine_type, + logging_config=state_machine_logging_configuration, + cloud_watch_logging_configuration=cloud_watch_logging_configuration, + tracing_config=state_machine_tracing_configuration, + tags=state_machine_tags, + ) + state_machines[state_machine_arn] = state_machine + + create_output = CreateStateMachineOutput( + stateMachineArn=state_machine.arn, creationDate=state_machine.create_date + ) + + # Create the first version if the 'publish' flag is used. + if request.get("publish", False): + version_description = request.get("versionDescription") + state_machine_version = state_machine.create_version(description=version_description) + if state_machine_version is not None: + state_machine_version_arn = state_machine_version.arn + state_machines[state_machine_version_arn] = state_machine_version + create_output["stateMachineVersionArn"] = state_machine_version_arn + + # Run static analyser on definition and collect usage metrics + UsageMetricsStaticAnalyser.process(state_machine_definition) + + return create_output + + def _validate_state_machine_alias_routing_configuration( + self, context: RequestContext, routing_configuration_list: RoutingConfigurationList + ) -> None: + # TODO: to match AWS's approach best validation exceptions could be + # built in a process decoupled from the provider. + + routing_configuration_list_len = len(routing_configuration_list) + if not (1 <= routing_configuration_list_len <= 2): + # Replicate the object string dump format: + # [RoutingConfigurationListItem(stateMachineVersionArn=arn_no_quotes, weight=int), ...] + routing_configuration_serialization_parts = [] + for routing_configuration in routing_configuration_list: + routing_configuration_serialization_parts.append( + "".join( + [ + "RoutingConfigurationListItem(stateMachineVersionArn=", + routing_configuration["stateMachineVersionArn"], + ", weight=", + str(routing_configuration["weight"]), + ")", + ] + ) + ) + routing_configuration_serialization_list = ( + f"[{', '.join(routing_configuration_serialization_parts)}]" + ) + raise ValidationException( + f"1 validation error detected: Value '{routing_configuration_serialization_list}' " + "at 'routingConfiguration' failed to " + "satisfy constraint: Member must have length less than or equal to 2" + ) + + routing_configuration_arn_list = [ + routing_configuration["stateMachineVersionArn"] + for routing_configuration in routing_configuration_list + ] + if len(set(routing_configuration_arn_list)) < routing_configuration_list_len: + arn_list_string = f"[{', '.join(routing_configuration_arn_list)}]" + raise ValidationException( + "Routing configuration must contain distinct state machine version ARNs. " + f"Received: {arn_list_string}" + ) + + routing_weights = [ + routing_configuration["weight"] for routing_configuration in routing_configuration_list + ] + for i, weight in enumerate(routing_weights): + # TODO: check for weight type. + if weight < 0: + raise ValidationException( + f"Invalid value for parameter routingConfiguration[{i + 1}].weight, value: {weight}, valid min value: 0" + ) + if weight > 100: + raise ValidationException( + f"1 validation error detected: Value '{weight}' at 'routingConfiguration.{i + 1}.member.weight' " + "failed to satisfy constraint: Member must have value less than or equal to 100" + ) + routing_weights_sum = sum(routing_weights) + if not routing_weights_sum == 100: + raise ValidationException( + f"Sum of routing configuration weights must equal 100. Received: {json.dumps(routing_weights)}" + ) + + store = self.get_store(context=context) + state_machines = store.state_machines + + first_routing_qualified_arn = routing_configuration_arn_list[0] + shared_state_machine_revision_arn = self._get_state_machine_arn_from_qualified_arn( + qualified_arn=first_routing_qualified_arn + ) + for routing_configuration_arn in routing_configuration_arn_list: + maybe_state_machine_version = state_machines.get(routing_configuration_arn) + if not isinstance(maybe_state_machine_version, StateMachineVersion): + arn_list_string = f"[{', '.join(routing_configuration_arn_list)}]" + raise ValidationException( + f"Routing configuration must contain state machine version ARNs. Received: {arn_list_string}" + ) + state_machine_revision_arn = self._get_state_machine_arn_from_qualified_arn( + qualified_arn=routing_configuration_arn + ) + if state_machine_revision_arn != shared_state_machine_revision_arn: + raise ValidationException("TODO") + + @staticmethod + def _get_state_machine_arn_from_qualified_arn(qualified_arn: Arn) -> Arn: + last_colon_index = qualified_arn.rfind(":") + base_arn = qualified_arn[:last_colon_index] + return base_arn + + def create_state_machine_alias( + self, + context: RequestContext, + name: CharacterRestrictedName, + routing_configuration: RoutingConfigurationList, + description: AliasDescription = None, + **kwargs, + ) -> CreateStateMachineAliasOutput: + # Validate the inputs. + self._validate_state_machine_alias_name(name=name) + self._validate_state_machine_alias_routing_configuration( + context=context, routing_configuration_list=routing_configuration + ) + + # Determine the state machine arn this alias maps to, + # do so unsafely as validation already took place before initialisation. + first_routing_qualified_arn = routing_configuration[0]["stateMachineVersionArn"] + state_machine_revision_arn = self._get_state_machine_arn_from_qualified_arn( + qualified_arn=first_routing_qualified_arn + ) + alias = Alias( + state_machine_arn=state_machine_revision_arn, + name=name, + description=description, + routing_configuration_list=routing_configuration, + ) + state_machine_alias_arn = alias.state_machine_alias_arn + + store = self.get_store(context=context) + + aliases = store.aliases + if maybe_idempotent_alias := aliases.get(state_machine_alias_arn): + if alias.is_idempotent(maybe_idempotent_alias): + return CreateStateMachineAliasOutput( + stateMachineAliasArn=state_machine_alias_arn, creationDate=alias.create_date + ) + else: + # CreateStateMachineAlias is an idempotent API. Idempotent requests won’t create duplicate resources. + raise ConflictException( + "Failed to create alias because an alias with the same name and a " + "different routing configuration already exists." + ) + aliases[state_machine_alias_arn] = alias + + state_machine_revision = store.state_machines.get(state_machine_revision_arn) + if not isinstance(state_machine_revision, StateMachineRevision): + # The state machine was deleted but not the version referenced in this context. + raise RuntimeError(f"No state machine revision for arn '{state_machine_revision_arn}'") + state_machine_revision.aliases.add(alias) + + return CreateStateMachineAliasOutput( + stateMachineAliasArn=state_machine_alias_arn, creationDate=alias.create_date + ) + + def describe_state_machine( + self, + context: RequestContext, + state_machine_arn: Arn, + included_data: IncludedData = None, + **kwargs, + ) -> DescribeStateMachineOutput: + self._validate_state_machine_arn(state_machine_arn) + state_machine = self.get_store(context).state_machines.get(state_machine_arn) + if state_machine is None: + self._raise_state_machine_does_not_exist(state_machine_arn) + return state_machine.describe() + + def describe_state_machine_alias( + self, context: RequestContext, state_machine_alias_arn: Arn, **kwargs + ) -> DescribeStateMachineAliasOutput: + self._validate_state_machine_alias_arn(state_machine_alias_arn=state_machine_alias_arn) + alias: Optional[Alias] = self.get_store(context=context).aliases.get( + state_machine_alias_arn + ) + if alias is None: + # TODO: assemble the correct exception + raise ValidationException() + description = alias.to_description() + return description + + def describe_state_machine_for_execution( + self, + context: RequestContext, + execution_arn: Arn, + included_data: IncludedData = None, + **kwargs, + ) -> DescribeStateMachineForExecutionOutput: + self._validate_state_machine_execution_arn(execution_arn) + execution: Execution = self._get_execution(context=context, execution_arn=execution_arn) + return execution.to_describe_state_machine_for_execution_output() + + def send_task_heartbeat( + self, context: RequestContext, task_token: TaskToken, **kwargs + ) -> SendTaskHeartbeatOutput: + running_executions: list[Execution] = self._get_executions(context, ExecutionStatus.RUNNING) + for execution in running_executions: + try: + if execution.exec_worker.env.callback_pool_manager.heartbeat( + callback_id=task_token + ): + return SendTaskHeartbeatOutput() + except CallbackNotifyConsumerError as consumer_error: + if isinstance(consumer_error, CallbackConsumerTimeout): + raise TaskTimedOut() + else: + raise TaskDoesNotExist() + raise InvalidToken() + + def send_task_success( + self, + context: RequestContext, + task_token: TaskToken, + output: SensitiveData, + **kwargs, + ) -> SendTaskSuccessOutput: + outcome = CallbackOutcomeSuccess(callback_id=task_token, output=output) + running_executions: list[Execution] = self._get_executions(context, ExecutionStatus.RUNNING) + for execution in running_executions: + try: + if execution.exec_worker.env.callback_pool_manager.notify( + callback_id=task_token, outcome=outcome + ): + return SendTaskSuccessOutput() + except CallbackNotifyConsumerError as consumer_error: + if isinstance(consumer_error, CallbackConsumerTimeout): + raise TaskTimedOut() + else: + raise TaskDoesNotExist() + raise InvalidToken("Invalid token") + + def send_task_failure( + self, + context: RequestContext, + task_token: TaskToken, + error: SensitiveError = None, + cause: SensitiveCause = None, + **kwargs, + ) -> SendTaskFailureOutput: + outcome = CallbackOutcomeFailure(callback_id=task_token, error=error, cause=cause) + store = self.get_store(context) + for execution in store.executions.values(): + try: + if execution.exec_worker.env.callback_pool_manager.notify( + callback_id=task_token, outcome=outcome + ): + return SendTaskFailureOutput() + except CallbackNotifyConsumerError as consumer_error: + if isinstance(consumer_error, CallbackConsumerTimeout): + raise TaskTimedOut() + else: + raise TaskDoesNotExist() + raise InvalidToken("Invalid token") + + def start_execution( + self, + context: RequestContext, + state_machine_arn: Arn, + name: Name = None, + input: SensitiveData = None, + trace_header: TraceHeader = None, + **kwargs, + ) -> StartExecutionOutput: + self._validate_state_machine_arn(state_machine_arn) + + state_machine_arn_parts = state_machine_arn.split("#") + state_machine_arn = state_machine_arn_parts[0] + mock_test_case_name = ( + state_machine_arn_parts[1] if len(state_machine_arn_parts) == 2 else None + ) + + store = self.get_store(context=context) + + alias: Optional[Alias] = store.aliases.get(state_machine_arn) + alias_sample_state_machine_version_arn = alias.sample() if alias is not None else None + unsafe_state_machine: Optional[StateMachineInstance] = store.state_machines.get( + alias_sample_state_machine_version_arn or state_machine_arn + ) + if not unsafe_state_machine: + self._raise_state_machine_does_not_exist(state_machine_arn) + + # Update event change parameters about the state machine and should not affect those about this execution. + state_machine_clone = copy.deepcopy(unsafe_state_machine) + + if input is None: + input_data = dict() + else: + try: + input_data = json.loads(input) + except Exception as ex: + raise InvalidExecutionInput(str(ex)) # TODO: report parsing error like AWS. + + normalised_state_machine_arn = ( + state_machine_clone.source_arn + if isinstance(state_machine_clone, StateMachineVersion) + else state_machine_clone.arn + ) + exec_name = name or long_uid() # TODO: validate name format + if state_machine_clone.sm_type == StateMachineType.STANDARD: + exec_arn = stepfunctions_standard_execution_arn(normalised_state_machine_arn, exec_name) + else: + # Exhaustive check on STANDARD and EXPRESS type, validated on creation. + exec_arn = stepfunctions_express_execution_arn(normalised_state_machine_arn, exec_name) + + if execution := store.executions.get(exec_arn): + # Return already running execution if name and input match + existing_execution = self._idempotent_start_execution( + execution=execution, + state_machine=state_machine_clone, + name=name, + input_data=input_data, + ) + + if existing_execution: + return existing_execution.to_start_output() + + # Create the execution logging session, if logging is configured. + cloud_watch_logging_session = None + if state_machine_clone.cloud_watch_logging_configuration is not None: + cloud_watch_logging_session = CloudWatchLoggingSession( + execution_arn=exec_arn, + configuration=state_machine_clone.cloud_watch_logging_configuration, + ) + + mock_test_case: Optional[MockTestCase] = None + if mock_test_case_name is not None: + state_machine_name = state_machine_clone.name + mock_test_case = load_mock_test_case_for( + state_machine_name=state_machine_name, test_case_name=mock_test_case_name + ) + if mock_test_case is None: + raise InvalidName( + f"Invalid mock test case name '{mock_test_case_name}' " + f"for state machine '{state_machine_name}'." + "Either the test case is not defined or the mock configuration file " + "could not be loaded. See logs for details." + ) + + execution = Execution( + name=exec_name, + sm_type=state_machine_clone.sm_type, + role_arn=state_machine_clone.role_arn, + exec_arn=exec_arn, + account_id=context.account_id, + region_name=context.region, + state_machine=state_machine_clone, + state_machine_alias_arn=alias.state_machine_alias_arn if alias is not None else None, + start_date=datetime.datetime.now(tz=datetime.timezone.utc), + cloud_watch_logging_session=cloud_watch_logging_session, + input_data=input_data, + trace_header=trace_header, + activity_store=self.get_store(context).activities, + mock_test_case=mock_test_case, + ) + + store.executions[exec_arn] = execution + + execution.start() + return execution.to_start_output() + + def start_sync_execution( + self, + context: RequestContext, + state_machine_arn: Arn, + name: Name = None, + input: SensitiveData = None, + trace_header: TraceHeader = None, + included_data: IncludedData = None, + **kwargs, + ) -> StartSyncExecutionOutput: + self._validate_state_machine_arn(state_machine_arn) + unsafe_state_machine: Optional[StateMachineInstance] = self.get_store( + context + ).state_machines.get(state_machine_arn) + if not unsafe_state_machine: + self._raise_state_machine_does_not_exist(state_machine_arn) + + if unsafe_state_machine.sm_type == StateMachineType.STANDARD: + self._raise_state_machine_type_not_supported() + + # Update event change parameters about the state machine and should not affect those about this execution. + state_machine_clone = copy.deepcopy(unsafe_state_machine) + + if input is None: + input_data = dict() + else: + try: + input_data = json.loads(input) + except Exception as ex: + raise InvalidExecutionInput(str(ex)) # TODO: report parsing error like AWS. + + normalised_state_machine_arn = ( + state_machine_clone.source_arn + if isinstance(state_machine_clone, StateMachineVersion) + else state_machine_clone.arn + ) + exec_name = name or long_uid() # TODO: validate name format + exec_arn = stepfunctions_express_execution_arn(normalised_state_machine_arn, exec_name) + + if exec_arn in self.get_store(context).executions: + raise InvalidName() # TODO + + # Create the execution logging session, if logging is configured. + cloud_watch_logging_session = None + if state_machine_clone.cloud_watch_logging_configuration is not None: + cloud_watch_logging_session = CloudWatchLoggingSession( + execution_arn=exec_arn, + configuration=state_machine_clone.cloud_watch_logging_configuration, + ) + + execution = SyncExecution( + name=exec_name, + sm_type=state_machine_clone.sm_type, + role_arn=state_machine_clone.role_arn, + exec_arn=exec_arn, + account_id=context.account_id, + region_name=context.region, + state_machine=state_machine_clone, + start_date=datetime.datetime.now(tz=datetime.timezone.utc), + cloud_watch_logging_session=cloud_watch_logging_session, + input_data=input_data, + trace_header=trace_header, + activity_store=self.get_store(context).activities, + ) + self.get_store(context).executions[exec_arn] = execution + + execution.start() + return execution.to_start_sync_execution_output() + + def describe_execution( + self, + context: RequestContext, + execution_arn: Arn, + included_data: IncludedData = None, + **kwargs, + ) -> DescribeExecutionOutput: + self._validate_state_machine_execution_arn(execution_arn) + execution: Execution = self._get_execution(context=context, execution_arn=execution_arn) + + # Action only compatible with STANDARD workflows. + if execution.sm_type != StateMachineType.STANDARD: + self._raise_resource_type_not_in_context(resource_type=execution.sm_type) + + return execution.to_describe_output() + + @staticmethod + def _list_execution_filter( + ex: Execution, state_machine_arn: str, status_filter: Optional[str] + ) -> bool: + state_machine_reference_arn_set = {ex.state_machine_arn, ex.state_machine_version_arn} + if state_machine_arn not in state_machine_reference_arn_set: + return False + + if not status_filter: + return True + return ex.exec_status == status_filter + + def list_executions( + self, + context: RequestContext, + state_machine_arn: Arn = None, + status_filter: ExecutionStatus = None, + max_results: PageSize = None, + next_token: ListExecutionsPageToken = None, + map_run_arn: LongArn = None, + redrive_filter: ExecutionRedriveFilter = None, + **kwargs, + ) -> ListExecutionsOutput: + self._validate_state_machine_arn(state_machine_arn) + assert_pagination_parameters_valid( + max_results=max_results, + next_token=next_token, + next_token_length_limit=3096, + ) + max_results = normalise_max_results(max_results) + + state_machine = self.get_store(context).state_machines.get(state_machine_arn) + if state_machine is None: + self._raise_state_machine_does_not_exist(state_machine_arn) + + if state_machine.sm_type != StateMachineType.STANDARD: + self._raise_state_machine_type_not_supported() + + # TODO: add support for paging + + allowed_execution_status = [ + ExecutionStatus.SUCCEEDED, + ExecutionStatus.TIMED_OUT, + ExecutionStatus.PENDING_REDRIVE, + ExecutionStatus.ABORTED, + ExecutionStatus.FAILED, + ExecutionStatus.RUNNING, + ] + + validation_errors = [] + + if status_filter and status_filter not in allowed_execution_status: + validation_errors.append( + f"Value '{status_filter}' at 'statusFilter' failed to satisfy constraint: Member must satisfy enum value set: [{', '.join(allowed_execution_status)}]" + ) + + if not state_machine_arn and not map_run_arn: + validation_errors.append("Must provide a StateMachine ARN or MapRun ARN") + + if validation_errors: + errors_message = "; ".join(validation_errors) + message = f"{len(validation_errors)} validation {'errors' if len(validation_errors) > 1 else 'error'} detected: {errors_message}" + raise CommonServiceException(message=message, code="ValidationException") + + executions: ExecutionList = [ + execution.to_execution_list_item() + for execution in self.get_store(context).executions.values() + if self._list_execution_filter( + execution, + state_machine_arn=state_machine_arn, + status_filter=status_filter, + ) + ] + + executions.sort(key=lambda item: item["startDate"], reverse=True) + + paginated_executions = PaginatedList(executions) + page, token_for_next_page = paginated_executions.get_page( + token_generator=lambda item: get_next_page_token_from_arn(item.get("executionArn")), + page_size=max_results, + next_token=next_token, + ) + + return ListExecutionsOutput(executions=page, nextToken=token_for_next_page) + + def list_state_machines( + self, + context: RequestContext, + max_results: PageSize = None, + next_token: PageToken = None, + **kwargs, + ) -> ListStateMachinesOutput: + assert_pagination_parameters_valid(max_results, next_token) + max_results = normalise_max_results(max_results) + + state_machines: StateMachineList = [ + sm.itemise() + for sm in self.get_store(context).state_machines.values() + if isinstance(sm, StateMachineRevision) + ] + state_machines.sort(key=lambda item: item["name"]) + + paginated_state_machines = PaginatedList(state_machines) + page, token_for_next_page = paginated_state_machines.get_page( + token_generator=lambda item: get_next_page_token_from_arn(item.get("stateMachineArn")), + page_size=max_results, + next_token=next_token, + ) + + return ListStateMachinesOutput(stateMachines=page, nextToken=token_for_next_page) + + def list_state_machine_aliases( + self, + context: RequestContext, + state_machine_arn: Arn, + next_token: PageToken = None, + max_results: PageSize = None, + **kwargs, + ) -> ListStateMachineAliasesOutput: + assert_pagination_parameters_valid(max_results, next_token) + + self._validate_state_machine_arn(state_machine_arn) + state_machines = self.get_store(context).state_machines + state_machine_revision = state_machines.get(state_machine_arn) + if not isinstance(state_machine_revision, StateMachineRevision): + raise InvalidArn(f"Invalid arn: {state_machine_arn}") + + state_machine_aliases: StateMachineAliasList = list() + valid_token_found = next_token is None + + for alias in state_machine_revision.aliases: + state_machine_aliases.append(alias.to_item()) + if alias.tokenized_state_machine_alias_arn == next_token: + valid_token_found = True + + if not valid_token_found: + raise InvalidToken("Invalid Token: 'Invalid token'") + + state_machine_aliases.sort(key=lambda item: item["creationDate"]) + + paginated_list = PaginatedList(state_machine_aliases) + + paginated_aliases, next_token = paginated_list.get_page( + token_generator=lambda item: get_next_page_token_from_arn( + item.get("stateMachineAliasArn") + ), + next_token=next_token, + page_size=100 if max_results == 0 or max_results is None else max_results, + ) + + return ListStateMachineAliasesOutput( + stateMachineAliases=paginated_aliases, nextToken=next_token + ) + + def list_state_machine_versions( + self, + context: RequestContext, + state_machine_arn: Arn, + next_token: PageToken = None, + max_results: PageSize = None, + **kwargs, + ) -> ListStateMachineVersionsOutput: + self._validate_state_machine_arn(state_machine_arn) + assert_pagination_parameters_valid(max_results, next_token) + max_results = normalise_max_results(max_results) + + state_machines = self.get_store(context).state_machines + state_machine_revision = state_machines.get(state_machine_arn) + if not isinstance(state_machine_revision, StateMachineRevision): + raise InvalidArn(f"Invalid arn: {state_machine_arn}") + + state_machine_version_items = list() + for version_arn in state_machine_revision.versions.values(): + state_machine_version = state_machines[version_arn] + if isinstance(state_machine_version, StateMachineVersion): + state_machine_version_items.append(state_machine_version.itemise()) + else: + raise RuntimeError( + f"Expected {version_arn} to be a StateMachine Version, but got '{type(state_machine_version)}'." + ) + + state_machine_version_items.sort(key=lambda item: item["creationDate"], reverse=True) + + paginated_state_machine_versions = PaginatedList(state_machine_version_items) + page, token_for_next_page = paginated_state_machine_versions.get_page( + token_generator=lambda item: get_next_page_token_from_arn( + item.get("stateMachineVersionArn") + ), + page_size=max_results, + next_token=next_token, + ) + + return ListStateMachineVersionsOutput( + stateMachineVersions=page, nextToken=token_for_next_page + ) + + def get_execution_history( + self, + context: RequestContext, + execution_arn: Arn, + max_results: PageSize = None, + reverse_order: ReverseOrder = None, + next_token: PageToken = None, + include_execution_data: IncludeExecutionDataGetExecutionHistory = None, + **kwargs, + ) -> GetExecutionHistoryOutput: + # TODO: add support for paging, ordering, and other manipulations. + self._validate_state_machine_execution_arn(execution_arn) + execution: Execution = self._get_execution(context=context, execution_arn=execution_arn) + + # Action only compatible with STANDARD workflows. + if execution.sm_type != StateMachineType.STANDARD: + self._raise_resource_type_not_in_context(resource_type=execution.sm_type) + + history: GetExecutionHistoryOutput = execution.to_history_output() + if reverse_order: + history["events"].reverse() + return history + + def delete_state_machine( + self, context: RequestContext, state_machine_arn: Arn, **kwargs + ) -> DeleteStateMachineOutput: + # TODO: halt executions? + self._validate_state_machine_arn(state_machine_arn) + state_machines = self.get_store(context).state_machines + state_machine = state_machines.get(state_machine_arn) + if isinstance(state_machine, StateMachineRevision): + state_machines.pop(state_machine_arn) + for version_arn in state_machine.versions.values(): + state_machines.pop(version_arn, None) + return DeleteStateMachineOutput() + + def delete_state_machine_alias( + self, context: RequestContext, state_machine_alias_arn: Arn, **kwargs + ) -> DeleteStateMachineAliasOutput: + self._validate_state_machine_alias_arn(state_machine_alias_arn=state_machine_alias_arn) + store = self.get_store(context=context) + aliases = store.aliases + if (alias := aliases.pop(state_machine_alias_arn, None)) is not None: + state_machines = store.state_machines + for routing_configuration in alias.get_routing_configuration_list(): + state_machine_version_arn = routing_configuration["stateMachineVersionArn"] + if ( + state_machine_version := state_machines.get(state_machine_version_arn) + ) is None or not isinstance(state_machine_version, StateMachineVersion): + continue + if ( + state_machine_revision := state_machines.get(state_machine_version.source_arn) + ) is None or not isinstance(state_machine_revision, StateMachineRevision): + continue + state_machine_revision.aliases.discard(alias) + return DeleteStateMachineOutput() + + def delete_state_machine_version( + self, context: RequestContext, state_machine_version_arn: LongArn, **kwargs + ) -> DeleteStateMachineVersionOutput: + self._validate_state_machine_arn(state_machine_version_arn) + state_machines = self.get_store(context).state_machines + + if not ( + state_machine_version := state_machines.get(state_machine_version_arn) + ) or not isinstance(state_machine_version, StateMachineVersion): + return DeleteStateMachineVersionOutput() + + if ( + state_machine_revision := state_machines.get(state_machine_version.source_arn) + ) and isinstance(state_machine_revision, StateMachineRevision): + referencing_alias_names: list[str] = list() + for alias in state_machine_revision.aliases: + if alias.is_router_for(state_machine_version_arn=state_machine_version_arn): + referencing_alias_names.append(alias.name) + if referencing_alias_names: + referencing_alias_names_list_body = ", ".join(referencing_alias_names) + raise ConflictException( + "Version to be deleted must not be referenced by an alias. " + f"Current list of aliases referencing this version: [{referencing_alias_names_list_body}]" + ) + state_machine_revision.delete_version(state_machine_version_arn) + + state_machines.pop(state_machine_version.arn, None) + return DeleteStateMachineVersionOutput() + + def stop_execution( + self, + context: RequestContext, + execution_arn: Arn, + error: SensitiveError = None, + cause: SensitiveCause = None, + **kwargs, + ) -> StopExecutionOutput: + self._validate_state_machine_execution_arn(execution_arn) + execution: Execution = self._get_execution(context=context, execution_arn=execution_arn) + + # Action only compatible with STANDARD workflows. + if execution.sm_type != StateMachineType.STANDARD: + self._raise_resource_type_not_in_context(resource_type=execution.sm_type) + + stop_date = datetime.datetime.now(tz=datetime.timezone.utc) + execution.stop(stop_date=stop_date, cause=cause, error=error) + return StopExecutionOutput(stopDate=stop_date) + + def update_state_machine( + self, + context: RequestContext, + state_machine_arn: Arn, + definition: Definition = None, + role_arn: Arn = None, + logging_configuration: LoggingConfiguration = None, + tracing_configuration: TracingConfiguration = None, + publish: Publish = None, + version_description: VersionDescription = None, + encryption_configuration: EncryptionConfiguration = None, + **kwargs, + ) -> UpdateStateMachineOutput: + self._validate_state_machine_arn(state_machine_arn) + state_machines = self.get_store(context).state_machines + + state_machine = state_machines.get(state_machine_arn) + if not isinstance(state_machine, StateMachineRevision): + self._raise_state_machine_does_not_exist(state_machine_arn) + + # TODO: Add logic to handle metrics for when SFN definitions update + if not any([definition, role_arn, logging_configuration]): + raise MissingRequiredParameter( + "Either the definition, the role ARN, the LoggingConfiguration, " + "or the TracingConfiguration must be specified" + ) + + if definition is not None: + self._validate_definition(definition=definition, static_analysers=[StaticAnalyser()]) + + if logging_configuration is not None: + self._sanitise_logging_configuration(logging_configuration=logging_configuration) + + revision_id = state_machine.create_revision( + definition=definition, + role_arn=role_arn, + logging_configuration=logging_configuration, + ) + + version_arn = None + if publish: + version = state_machine.create_version(description=version_description) + if version is not None: + version_arn = version.arn + state_machines[version_arn] = version + else: + target_revision_id = revision_id or state_machine.revision_id + version_arn = state_machine.versions[target_revision_id] + + update_output = UpdateStateMachineOutput( + updateDate=datetime.datetime.now(tz=datetime.timezone.utc) + ) + if revision_id is not None: + update_output["revisionId"] = revision_id + if version_arn is not None: + update_output["stateMachineVersionArn"] = version_arn + return update_output + + def update_state_machine_alias( + self, + context: RequestContext, + state_machine_alias_arn: Arn, + description: AliasDescription = None, + routing_configuration: RoutingConfigurationList = None, + **kwargs, + ) -> UpdateStateMachineAliasOutput: + self._validate_state_machine_alias_arn(state_machine_alias_arn=state_machine_alias_arn) + if not any([description, routing_configuration]): + raise MissingRequiredParameter( + "Either the description or the RoutingConfiguration must be specified" + ) + if routing_configuration is not None: + self._validate_state_machine_alias_routing_configuration( + context=context, routing_configuration_list=routing_configuration + ) + store = self.get_store(context=context) + alias = store.aliases.get(state_machine_alias_arn) + if alias is None: + raise ResourceNotFound("Request references a resource that does not exist.") + + alias.update(description=description, routing_configuration_list=routing_configuration) + return UpdateStateMachineAliasOutput(updateDate=alias.update_date) + + def publish_state_machine_version( + self, + context: RequestContext, + state_machine_arn: Arn, + revision_id: RevisionId = None, + description: VersionDescription = None, + **kwargs, + ) -> PublishStateMachineVersionOutput: + self._validate_state_machine_arn(state_machine_arn) + state_machines = self.get_store(context).state_machines + + state_machine_revision = state_machines.get(state_machine_arn) + if not isinstance(state_machine_revision, StateMachineRevision): + self._raise_state_machine_does_not_exist(state_machine_arn) + + if revision_id is not None and state_machine_revision.revision_id != revision_id: + raise ConflictException( + f"Failed to publish the State Machine version for revision {revision_id}. " + f"The current State Machine revision is {state_machine_revision.revision_id}." + ) + + state_machine_version = state_machine_revision.create_version(description=description) + if state_machine_version is not None: + state_machines[state_machine_version.arn] = state_machine_version + else: + target_revision_id = revision_id or state_machine_revision.revision_id + state_machine_version_arn = state_machine_revision.versions.get(target_revision_id) + state_machine_version = state_machines[state_machine_version_arn] + + return PublishStateMachineVersionOutput( + creationDate=state_machine_version.create_date, + stateMachineVersionArn=state_machine_version.arn, + ) + + def tag_resource( + self, context: RequestContext, resource_arn: Arn, tags: TagList, **kwargs + ) -> TagResourceOutput: + # TODO: add tagging for activities. + state_machines = self.get_store(context).state_machines + state_machine = state_machines.get(resource_arn) + if not isinstance(state_machine, StateMachineRevision): + raise ResourceNotFound(f"Resource not found: '{resource_arn}'") + + state_machine.tag_manager.add_all(tags) + return TagResourceOutput() + + def untag_resource( + self, context: RequestContext, resource_arn: Arn, tag_keys: TagKeyList, **kwargs + ) -> UntagResourceOutput: + # TODO: add untagging for activities. + state_machines = self.get_store(context).state_machines + state_machine = state_machines.get(resource_arn) + if not isinstance(state_machine, StateMachineRevision): + raise ResourceNotFound(f"Resource not found: '{resource_arn}'") + + state_machine.tag_manager.remove_all(tag_keys) + return UntagResourceOutput() + + def list_tags_for_resource( + self, context: RequestContext, resource_arn: Arn, **kwargs + ) -> ListTagsForResourceOutput: + # TODO: add untagging for activities. + state_machines = self.get_store(context).state_machines + state_machine = state_machines.get(resource_arn) + if not isinstance(state_machine, StateMachineRevision): + raise ResourceNotFound(f"Resource not found: '{resource_arn}'") + + tags: TagList = state_machine.tag_manager.to_tag_list() + return ListTagsForResourceOutput(tags=tags) + + def describe_map_run( + self, context: RequestContext, map_run_arn: LongArn, **kwargs + ) -> DescribeMapRunOutput: + store = self.get_store(context) + for execution in store.executions.values(): + map_run_record: Optional[MapRunRecord] = ( + execution.exec_worker.env.map_run_record_pool_manager.get(map_run_arn) + ) + if map_run_record is not None: + return map_run_record.describe() + raise ResourceNotFound() + + def list_map_runs( + self, + context: RequestContext, + execution_arn: Arn, + max_results: PageSize = None, + next_token: PageToken = None, + **kwargs, + ) -> ListMapRunsOutput: + # TODO: add support for paging. + execution = self._get_execution(context=context, execution_arn=execution_arn) + map_run_records: list[MapRunRecord] = ( + execution.exec_worker.env.map_run_record_pool_manager.get_all() + ) + return ListMapRunsOutput( + mapRuns=[map_run_record.list_item() for map_run_record in map_run_records] + ) + + def update_map_run( + self, + context: RequestContext, + map_run_arn: LongArn, + max_concurrency: MaxConcurrency = None, + tolerated_failure_percentage: ToleratedFailurePercentage = None, + tolerated_failure_count: ToleratedFailureCount = None, + **kwargs, + ) -> UpdateMapRunOutput: + if tolerated_failure_percentage is not None or tolerated_failure_count is not None: + raise NotImplementedError( + "Updating of ToleratedFailureCount and ToleratedFailurePercentage is currently unsupported." + ) + # TODO: investigate behaviour of empty requests. + store = self.get_store(context) + for execution in store.executions.values(): + map_run_record: Optional[MapRunRecord] = ( + execution.exec_worker.env.map_run_record_pool_manager.get(map_run_arn) + ) + if map_run_record is not None: + map_run_record.update( + max_concurrency=max_concurrency, + tolerated_failure_count=tolerated_failure_count, + tolerated_failure_percentage=tolerated_failure_percentage, + ) + LOG.warning( + "StepFunctions UpdateMapRun changes are currently not being reflected in the MapRun instances." + ) + return UpdateMapRunOutput() + raise ResourceNotFound() + + def test_state( + self, + context: RequestContext, + definition: Definition, + role_arn: Arn = None, + input: SensitiveData = None, + inspection_level: InspectionLevel = None, + reveal_secrets: RevealSecrets = None, + variables: SensitiveData = None, + **kwargs, + ) -> TestStateOutput: + StepFunctionsProvider._validate_definition( + definition=definition, static_analysers=[TestStateStaticAnalyser()] + ) + + name: Optional[Name] = f"TestState-{short_uid()}" + arn = stepfunctions_state_machine_arn( + name=name, account_id=context.account_id, region_name=context.region + ) + state_machine = TestStateMachine( + name=name, + arn=arn, + role_arn=role_arn, + definition=definition, + ) + exec_arn = stepfunctions_standard_execution_arn(state_machine.arn, name) + + input_json = json.loads(input) + execution = TestStateExecution( + name=name, + role_arn=role_arn, + exec_arn=exec_arn, + account_id=context.account_id, + region_name=context.region, + state_machine=state_machine, + start_date=datetime.datetime.now(tz=datetime.timezone.utc), + input_data=input_json, + activity_store=self.get_store(context).activities, + ) + execution.start() + + test_state_output = execution.to_test_state_output( + inspection_level=inspection_level or InspectionLevel.INFO + ) + + return test_state_output + + def create_activity( + self, + context: RequestContext, + name: Name, + tags: TagList = None, + encryption_configuration: EncryptionConfiguration = None, + **kwargs, + ) -> CreateActivityOutput: + self._validate_activity_name(name=name) + + activity_arn = stepfunctions_activity_arn( + name=name, account_id=context.account_id, region_name=context.region + ) + activities = self.get_store(context).activities + if activity_arn not in activities: + activity = Activity(arn=activity_arn, name=name) + activities[activity_arn] = activity + else: + activity = activities[activity_arn] + + return CreateActivityOutput(activityArn=activity.arn, creationDate=activity.creation_date) + + def delete_activity( + self, context: RequestContext, activity_arn: Arn, **kwargs + ) -> DeleteActivityOutput: + self._validate_activity_arn(activity_arn) + self.get_store(context).activities.pop(activity_arn, None) + return DeleteActivityOutput() + + def describe_activity( + self, context: RequestContext, activity_arn: Arn, **kwargs + ) -> DescribeActivityOutput: + self._validate_activity_arn(activity_arn) + activity = self._get_activity(context=context, activity_arn=activity_arn) + return activity.to_describe_activity_output() + + def list_activities( + self, + context: RequestContext, + max_results: PageSize = None, + next_token: PageToken = None, + **kwargs, + ) -> ListActivitiesOutput: + activities: list[Activity] = list(self.get_store(context).activities.values()) + return ListActivitiesOutput( + activities=[activity.to_activity_list_item() for activity in activities] + ) + + def _send_activity_task_started( + self, + context: RequestContext, + task_token: TaskToken, + worker_name: Optional[Name], + ) -> None: + executions: list[Execution] = self._get_executions(context) + for execution in executions: + callback_endpoint = execution.exec_worker.env.callback_pool_manager.get( + callback_id=task_token + ) + if isinstance(callback_endpoint, ActivityCallbackEndpoint): + callback_endpoint.notify_activity_task_start(worker_name=worker_name) + return + raise InvalidToken() + + @staticmethod + def _pull_activity_task(activity: Activity) -> Optional[ActivityTask]: + seconds_left = 60 + while seconds_left > 0: + try: + return activity.get_task() + except IndexError: + time.sleep(1) + seconds_left -= 1 + return None + + def get_activity_task( + self, + context: RequestContext, + activity_arn: Arn, + worker_name: Name = None, + **kwargs, + ) -> GetActivityTaskOutput: + self._validate_activity_arn(activity_arn) + + activity = self._get_activity(context=context, activity_arn=activity_arn) + maybe_task: Optional[ActivityTask] = self._pull_activity_task(activity=activity) + if maybe_task is not None: + self._send_activity_task_started( + context, maybe_task.task_token, worker_name=worker_name + ) + return GetActivityTaskOutput( + taskToken=maybe_task.task_token, input=maybe_task.task_input + ) + + return GetActivityTaskOutput(taskToken=None, input=None) + + def validate_state_machine_definition( + self, context: RequestContext, request: ValidateStateMachineDefinitionInput, **kwargs + ) -> ValidateStateMachineDefinitionOutput: + # TODO: increase parity of static analysers, current implementation is an unblocker for this API action. + # TODO: add support for ValidateStateMachineDefinitionSeverity + # TODO: add support for ValidateStateMachineDefinitionMaxResult + + state_machine_type: StateMachineType = request.get("type", StateMachineType.STANDARD) + definition: str = request["definition"] + + static_analysers = list() + if state_machine_type == StateMachineType.STANDARD: + static_analysers.append(StaticAnalyser()) + else: + static_analysers.append(ExpressStaticAnalyser()) + + diagnostics: ValidateStateMachineDefinitionDiagnosticList = list() + try: + StepFunctionsProvider._validate_definition( + definition=definition, static_analysers=static_analysers + ) + validation_result = ValidateStateMachineDefinitionResultCode.OK + except InvalidDefinition as invalid_definition: + validation_result = ValidateStateMachineDefinitionResultCode.FAIL + diagnostics.append( + ValidateStateMachineDefinitionDiagnostic( + severity=ValidateStateMachineDefinitionSeverity.ERROR, + code="SCHEMA_VALIDATION_FAILED", + message=invalid_definition.message, + ) + ) + except Exception as ex: + validation_result = ValidateStateMachineDefinitionResultCode.FAIL + LOG.error("Unknown error during validation %s", ex) + + return ValidateStateMachineDefinitionOutput( + result=validation_result, diagnostics=diagnostics, truncated=False + ) diff --git a/localstack-core/localstack/services/stepfunctions/quotas.py b/localstack-core/localstack/services/stepfunctions/quotas.py new file mode 100644 index 0000000000000..bf55f8256f51a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/quotas.py @@ -0,0 +1,13 @@ +import json +from typing import Final, Union + +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str + +MAX_STATE_SIZE_UTF8_BYTES: Final[int] = 256 * 1024 # 256 KB of data as a UTF-8 encoded string. + + +def is_within_size_quota(value: Union[str, json]) -> bool: + item_str = value if isinstance(value, str) else to_json_str(value) + item_bytes = item_str.encode("utf-8") + len_item_bytes = len(item_bytes) + return len_item_bytes < MAX_STATE_SIZE_UTF8_BYTES diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/__init__.py b/localstack-core/localstack/services/stepfunctions/resource_providers/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.py b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.py new file mode 100644 index 0000000000000..bea92e160ec03 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.py @@ -0,0 +1,114 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) + + +class StepFunctionsActivityProperties(TypedDict): + Name: Optional[str] + Arn: Optional[str] + Tags: Optional[list[TagsEntry]] + + +class TagsEntry(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class StepFunctionsActivityProvider(ResourceProvider[StepFunctionsActivityProperties]): + TYPE = "AWS::StepFunctions::Activity" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[StepFunctionsActivityProperties], + ) -> ProgressEvent[StepFunctionsActivityProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Arn + + Required properties: + - Name + + Create-only properties: + - /properties/Name + + Read-only properties: + - /properties/Arn + + IAM permissions required: + - states:CreateActivity + + """ + model = request.desired_state + step_functions = request.aws_client_factory.stepfunctions + response = step_functions.create_activity(**model) + model["Arn"] = response["activityArn"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def read( + self, + request: ResourceRequest[StepFunctionsActivityProperties], + ) -> ProgressEvent[StepFunctionsActivityProperties]: + """ + Fetch resource information + + IAM permissions required: + - states:DescribeActivity + - states:ListTagsForResource + """ + raise NotImplementedError + + def delete( + self, + request: ResourceRequest[StepFunctionsActivityProperties], + ) -> ProgressEvent[StepFunctionsActivityProperties]: + """ + Delete a resource + + IAM permissions required: + - states:DeleteActivity + """ + model = request.desired_state + step_functions = request.aws_client_factory.stepfunctions + + step_functions.delete_activity(activityArn=model["Arn"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[StepFunctionsActivityProperties], + ) -> ProgressEvent[StepFunctionsActivityProperties]: + """ + Update a resource + + IAM permissions required: + - states:ListTagsForResource + - states:TagResource + - states:UntagResource + """ + raise NotImplementedError diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.schema.json b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.schema.json new file mode 100644 index 0000000000000..9a1f2bb156ca3 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity.schema.json @@ -0,0 +1,92 @@ +{ + "typeName": "AWS::StepFunctions::Activity", + "description": "Resource schema for Activity", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-stepfunctions.git", + "definitions": { + "TagsEntry": { + "type": "object", + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "additionalProperties": false, + "required": [ + "Key", + "Value" + ] + } + }, + "properties": { + "Arn": { + "type": "string", + "minLength": 1, + "maxLength": 2048 + }, + "Name": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/TagsEntry" + } + } + }, + "additionalProperties": false, + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "required": [ + "Name" + ], + "primaryIdentifier": [ + "/properties/Arn" + ], + "readOnlyProperties": [ + "/properties/Arn" + ], + "createOnlyProperties": [ + "/properties/Name" + ], + "handlers": { + "create": { + "permissions": [ + "states:CreateActivity" + ] + }, + "read": { + "permissions": [ + "states:DescribeActivity", + "states:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "states:ListTagsForResource", + "states:TagResource", + "states:UntagResource" + ] + }, + "delete": { + "permissions": [ + "states:DeleteActivity" + ] + } + } +} diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity_plugin.py b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity_plugin.py new file mode 100644 index 0000000000000..b8f8891464a39 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_activity_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class StepFunctionsActivityProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::StepFunctions::Activity" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.stepfunctions.resource_providers.aws_stepfunctions_activity import ( + StepFunctionsActivityProvider, + ) + + self.factory = StepFunctionsActivityProvider diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py new file mode 100644 index 0000000000000..a1dd521ab5d4a --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.py @@ -0,0 +1,250 @@ +# LocalStack Resource Provider Scaffolding v2 +from __future__ import annotations + +import json +import re +from pathlib import Path +from typing import Optional, TypedDict + +import localstack.services.cloudformation.provider_utils as util +from localstack.services.cloudformation.resource_provider import ( + LOG, + OperationStatus, + ProgressEvent, + ResourceProvider, + ResourceRequest, +) +from localstack.utils.strings import to_str + + +class StepFunctionsStateMachineProperties(TypedDict): + RoleArn: Optional[str] + Arn: Optional[str] + Definition: Optional[dict] + DefinitionS3Location: Optional[S3Location] + DefinitionString: Optional[str] + DefinitionSubstitutions: Optional[dict] + LoggingConfiguration: Optional[LoggingConfiguration] + Name: Optional[str] + StateMachineName: Optional[str] + StateMachineRevisionId: Optional[str] + StateMachineType: Optional[str] + Tags: Optional[list[TagsEntry]] + TracingConfiguration: Optional[TracingConfiguration] + + +class CloudWatchLogsLogGroup(TypedDict): + LogGroupArn: Optional[str] + + +class LogDestination(TypedDict): + CloudWatchLogsLogGroup: Optional[CloudWatchLogsLogGroup] + + +class LoggingConfiguration(TypedDict): + Destinations: Optional[list[LogDestination]] + IncludeExecutionData: Optional[bool] + Level: Optional[str] + + +class TracingConfiguration(TypedDict): + Enabled: Optional[bool] + + +class S3Location(TypedDict): + Bucket: Optional[str] + Key: Optional[str] + Version: Optional[str] + + +class TagsEntry(TypedDict): + Key: Optional[str] + Value: Optional[str] + + +REPEATED_INVOCATION = "repeated_invocation" + + +class StepFunctionsStateMachineProvider(ResourceProvider[StepFunctionsStateMachineProperties]): + TYPE = "AWS::StepFunctions::StateMachine" # Autogenerated. Don't change + SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change + + def create( + self, + request: ResourceRequest[StepFunctionsStateMachineProperties], + ) -> ProgressEvent[StepFunctionsStateMachineProperties]: + """ + Create a new resource. + + Primary identifier fields: + - /properties/Arn + + Required properties: + - RoleArn + + Create-only properties: + - /properties/StateMachineName + - /properties/StateMachineType + + Read-only properties: + - /properties/Arn + - /properties/Name + - /properties/StateMachineRevisionId + + IAM permissions required: + - states:CreateStateMachine + - iam:PassRole + - s3:GetObject + + """ + model = request.desired_state + step_function = request.aws_client_factory.stepfunctions + + if not model.get("StateMachineName"): + model["StateMachineName"] = util.generate_default_name( + stack_name=request.stack_name, logical_resource_id=request.logical_resource_id + ) + + params = { + "name": model.get("StateMachineName"), + "roleArn": model.get("RoleArn"), + "type": model.get("StateMachineType", "STANDARD"), + } + logging_configuration = model.get("LoggingConfiguration") + if logging_configuration is not None: + params["loggingConfiguration"] = logging_configuration + + # get definition + s3_client = request.aws_client_factory.s3 + + definition_str = self._get_definition(model, s3_client) + + params["definition"] = definition_str + + response = step_function.create_state_machine(**params) + + model["Arn"] = response["stateMachineArn"] + model["Name"] = model["StateMachineName"] + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def _get_definition(self, model, s3_client): + if "DefinitionString" in model: + definition_str = model.get("DefinitionString") + elif "DefinitionS3Location" in model: + # TODO: currently not covered by tests - add a test to mimick the behavior of "sam deploy ..." + s3_location = model.get("DefinitionS3Location") + LOG.debug("Fetching state machine definition from S3: %s", s3_location) + result = s3_client.get_object(Bucket=s3_location["Bucket"], Key=s3_location["Key"]) + definition_str = to_str(result["Body"].read()) + elif "Definition" in model: + definition = model.get("Definition") + definition_str = json.dumps(definition) + else: + definition_str = None + + substitutions = model.get("DefinitionSubstitutions") + if substitutions is not None: + definition_str = _apply_substitutions(definition_str, substitutions) + return definition_str + + def read( + self, + request: ResourceRequest[StepFunctionsStateMachineProperties], + ) -> ProgressEvent[StepFunctionsStateMachineProperties]: + """ + Fetch resource information + + IAM permissions required: + - states:DescribeStateMachine + - states:ListTagsForResource + """ + raise NotImplementedError + + def list( + self, request: ResourceRequest[StepFunctionsStateMachineProperties] + ) -> ProgressEvent[StepFunctionsStateMachineProperties]: + resources = request.aws_client_factory.stepfunctions.list_state_machines()["stateMachines"] + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_models=[ + StepFunctionsStateMachineProperties(Arn=resource["stateMachineArn"]) + for resource in resources + ], + ) + + def delete( + self, + request: ResourceRequest[StepFunctionsStateMachineProperties], + ) -> ProgressEvent[StepFunctionsStateMachineProperties]: + """ + Delete a resource + + IAM permissions required: + - states:DeleteStateMachine + - states:DescribeStateMachine + """ + model = request.desired_state + step_function = request.aws_client_factory.stepfunctions + + step_function.delete_state_machine(stateMachineArn=model["Arn"]) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + def update( + self, + request: ResourceRequest[StepFunctionsStateMachineProperties], + ) -> ProgressEvent[StepFunctionsStateMachineProperties]: + """ + Update a resource + + IAM permissions required: + - states:UpdateStateMachine + - states:TagResource + - states:UntagResource + - states:ListTagsForResource + - iam:PassRole + """ + model = request.desired_state + step_function = request.aws_client_factory.stepfunctions + + if not model.get("Arn"): + model["Arn"] = request.previous_state["Arn"] + + definition_str = self._get_definition(model, request.aws_client_factory.s3) + params = { + "stateMachineArn": model["Arn"], + "definition": definition_str, + } + logging_configuration = model.get("LoggingConfiguration") + if logging_configuration is not None: + params["loggingConfiguration"] = logging_configuration + + step_function.update_state_machine(**params) + + return ProgressEvent( + status=OperationStatus.SUCCESS, + resource_model=model, + custom_context=request.custom_context, + ) + + +def _apply_substitutions(definition: str, substitutions: dict[str, str]) -> str: + substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases + tokens = substitution_regex.findall(definition) + result = definition + for token in tokens: + raw_token = token[2:-1] # strip ${ and } + if raw_token not in substitutions: + raise + result = result.replace(token, substitutions[raw_token]) + + return result diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.schema.json b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.schema.json new file mode 100644 index 0000000000000..607e1a9bccdab --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine.schema.json @@ -0,0 +1,250 @@ +{ + "typeName": "AWS::StepFunctions::StateMachine", + "description": "Resource schema for StateMachine", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-stepfunctions.git", + "definitions": { + "TagsEntry": { + "type": "object", + "properties": { + "Key": { + "type": "string", + "minLength": 1, + "maxLength": 128 + }, + "Value": { + "type": "string", + "minLength": 1, + "maxLength": 256 + } + }, + "additionalProperties": false, + "required": [ + "Key", + "Value" + ] + }, + "CloudWatchLogsLogGroup": { + "type": "object", + "additionalProperties": false, + "properties": { + "LogGroupArn": { + "type": "string", + "minLength": 1, + "maxLength": 256 + } + } + }, + "LogDestination": { + "type": "object", + "additionalProperties": false, + "properties": { + "CloudWatchLogsLogGroup": { + "$ref": "#/definitions/CloudWatchLogsLogGroup" + } + } + }, + "LoggingConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Level": { + "type": "string", + "enum": [ + "ALL", + "ERROR", + "FATAL", + "OFF" + ] + }, + "IncludeExecutionData": { + "type": "boolean" + }, + "Destinations": { + "type": "array", + "minItems": 1, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/LogDestination" + } + } + } + }, + "TracingConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "Enabled": { + "type": "boolean" + } + } + }, + "S3Location": { + "type": "object", + "additionalProperties": false, + "properties": { + "Bucket": { + "type": "string" + }, + "Key": { + "type": "string" + }, + "Version": { + "type": "string" + } + }, + "required": [ + "Bucket", + "Key" + ] + }, + "DefinitionSubstitutions": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + ".*": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "boolean" + } + ] + } + }, + "minProperties": 1 + }, + "Definition": { + "type": "object", + "minProperties": 1 + } + }, + "properties": { + "Arn": { + "type": "string", + "minLength": 1, + "maxLength": 2048 + }, + "Name": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "DefinitionString": { + "type": "string", + "minLength": 1, + "maxLength": 1048576 + }, + "RoleArn": { + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "StateMachineName": { + "type": "string", + "minLength": 1, + "maxLength": 80 + }, + "StateMachineType": { + "type": "string", + "enum": [ + "STANDARD", + "EXPRESS" + ] + }, + "StateMachineRevisionId": { + "type": "string", + "minLength": 1, + "maxLength": 256 + }, + "LoggingConfiguration": { + "$ref": "#/definitions/LoggingConfiguration" + }, + "TracingConfiguration": { + "$ref": "#/definitions/TracingConfiguration" + }, + "DefinitionS3Location": { + "$ref": "#/definitions/S3Location" + }, + "DefinitionSubstitutions": { + "$ref": "#/definitions/DefinitionSubstitutions" + }, + "Definition": { + "$ref": "#/definitions/Definition" + }, + "Tags": { + "type": "array", + "uniqueItems": false, + "insertionOrder": false, + "items": { + "$ref": "#/definitions/TagsEntry" + } + } + }, + "required": [ + "RoleArn" + ], + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags" + }, + "additionalProperties": false, + "readOnlyProperties": [ + "/properties/Arn", + "/properties/Name", + "/properties/StateMachineRevisionId" + ], + "createOnlyProperties": [ + "/properties/StateMachineName", + "/properties/StateMachineType" + ], + "writeOnlyProperties": [ + "/properties/Definition", + "/properties/DefinitionS3Location", + "/properties/DefinitionSubstitutions" + ], + "primaryIdentifier": [ + "/properties/Arn" + ], + "handlers": { + "create": { + "permissions": [ + "states:CreateStateMachine", + "iam:PassRole", + "s3:GetObject" + ] + }, + "read": { + "permissions": [ + "states:DescribeStateMachine", + "states:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "states:UpdateStateMachine", + "states:TagResource", + "states:UntagResource", + "states:ListTagsForResource", + "iam:PassRole" + ] + }, + "delete": { + "permissions": [ + "states:DeleteStateMachine", + "states:DescribeStateMachine" + ] + }, + "list": { + "permissions": [ + "states:ListStateMachines" + ] + } + } +} diff --git a/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine_plugin.py b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine_plugin.py new file mode 100644 index 0000000000000..744ff8120e5f6 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/resource_providers/aws_stepfunctions_statemachine_plugin.py @@ -0,0 +1,20 @@ +from typing import Optional, Type + +from localstack.services.cloudformation.resource_provider import ( + CloudFormationResourceProviderPlugin, + ResourceProvider, +) + + +class StepFunctionsStateMachineProviderPlugin(CloudFormationResourceProviderPlugin): + name = "AWS::StepFunctions::StateMachine" + + def __init__(self): + self.factory: Optional[Type[ResourceProvider]] = None + + def load(self): + from localstack.services.stepfunctions.resource_providers.aws_stepfunctions_statemachine import ( + StepFunctionsStateMachineProvider, + ) + + self.factory = StepFunctionsStateMachineProvider diff --git a/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py b/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py new file mode 100644 index 0000000000000..95133b4ed47e8 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/stepfunctions_utils.py @@ -0,0 +1,69 @@ +import base64 +import logging +from typing import Dict + +from localstack.aws.api.stepfunctions import ValidationException +from localstack.aws.connect import connect_to +from localstack.utils.aws.arns import parse_arn +from localstack.utils.common import retry +from localstack.utils.strings import to_bytes, to_str + +LOG = logging.getLogger(__name__) + + +def await_sfn_execution_result(execution_arn: str, timeout_secs: int = 60) -> Dict: + """Wait until the given SFN execution ARN is no longer in RUNNING status, then return execution result.""" + + arn_data = parse_arn(execution_arn) + + client = connect_to( + aws_access_key_id=arn_data["account"], region_name=arn_data["region"] + ).stepfunctions + + def _get_result(): + result = client.describe_execution(executionArn=execution_arn) + assert result["status"] != "RUNNING" + return result + + return retry(_get_result, sleep=2, retries=timeout_secs / 2) + + +def get_next_page_token_from_arn(resource_arn: str) -> str: + return to_str(base64.b64encode(to_bytes(resource_arn))) + + +_DEFAULT_SFN_MAX_RESULTS: int = 100 + + +def normalise_max_results(max_results: int = 100) -> int: + if not max_results: + return _DEFAULT_SFN_MAX_RESULTS + return max_results + + +def assert_pagination_parameters_valid( + max_results: int, + next_token: str, + next_token_length_limit: int = 1024, + max_results_upper_limit: int = 1000, +) -> None: + validation_errors = [] + + match max_results: + case int() if max_results > max_results_upper_limit: + validation_errors.append( + f"Value '{max_results}' at 'maxResults' failed to satisfy constraint: " + f"Member must have value less than or equal to {max_results_upper_limit}" + ) + + match next_token: + case str() if len(next_token) > next_token_length_limit: + validation_errors.append( + f"Value '{next_token}' at 'nextToken' failed to satisfy constraint: " + f"Member must have length less than or equal to {next_token_length_limit}" + ) + + if validation_errors: + errors_message = "; ".join(validation_errors) + message = f"{len(validation_errors)} validation {'errors' if len(validation_errors) > 1 else 'error'} detected: {errors_message}" + raise ValidationException(message) diff --git a/localstack-core/localstack/services/stepfunctions/usage.py b/localstack-core/localstack/services/stepfunctions/usage.py new file mode 100644 index 0000000000000..63c5c90411b40 --- /dev/null +++ b/localstack-core/localstack/services/stepfunctions/usage.py @@ -0,0 +1,12 @@ +""" +Usage reporting for StepFunctions service +""" + +from localstack.utils.analytics.metrics import Counter + +# Initialize a counter to record the usage of language features for each state machine. +language_features_counter = Counter( + namespace="stepfunctions", + name="language_features_used", + labels=["query_language", "uses_variables"], +) diff --git a/localstack-core/localstack/services/stores.py b/localstack-core/localstack/services/stores.py new file mode 100644 index 0000000000000..af4d7d1b8b068 --- /dev/null +++ b/localstack-core/localstack/services/stores.py @@ -0,0 +1,346 @@ +""" +Base class and utilities for provider stores. + +Stores provide storage for AWS service providers and are analogous to Moto's BackendDict. + +By convention, Stores are to be defined in `models` submodule of the service +by subclassing BaseStore e.g. `localstack.services.sqs.models.SqsStore` +Also by convention, cross-region and cross-account attributes are declared in CAPITAL_CASE + + class SqsStore(BaseStore): + queues: dict[str, SqsQueue] = LocalAttribute(default=dict) + DELETED: dict[str, float] = CrossRegionAttribute(default=dict) + +Stores are then wrapped in AccountRegionBundle + + sqs_stores = AccountRegionBundle('sqs', SqsStore) + +Access patterns are as follows + + account_id = '001122334455' + sqs_stores[account_id] # -> RegionBundle + sqs_stores[account_id]['ap-south-1'] # -> SqsStore + sqs_stores[account_id]['ap-south-1'].queues # -> {} + +There should be a single declaration of a Store for a given service. If a service +has both Community and Pro providers, it must be declared as in Community codebase. +All Pro attributes must be declared within. + +While not recommended, store classes may define member helper functions and properties. +""" + +import re +from collections.abc import Callable +from threading import RLock +from typing import Any, Generic, Iterator, Type, TypeVar, Union + +from localstack import config +from localstack.utils.aws.aws_stack import get_valid_regions_for_service + +LOCAL_ATTR_PREFIX = "attr_" + +BaseStoreType = TypeVar("BaseStoreType") + + +# +# Descriptor protocol classes +# + + +class LocalAttribute: + """ + Descriptor protocol for marking store attributes as local to a region. + """ + + def __init__(self, default: Union[Callable, int, float, str, bool, None]): + """ + :param default: Default value assigned to the local attribute. Must be a scalar + or a callable. + """ + self.default = default + + def __set_name__(self, owner, name): + self.name = LOCAL_ATTR_PREFIX + name + + def __get__(self, obj: BaseStoreType, objtype=None) -> Any: + if not hasattr(obj, self.name): + if isinstance(self.default, Callable): + value = self.default() + else: + value = self.default + setattr(obj, self.name, value) + + return getattr(obj, self.name) + + def __set__(self, obj: BaseStoreType, value: Any): + setattr(obj, self.name, value) + + +class CrossRegionAttribute: + """ + Descriptor protocol for marking store attributes as shared across all regions. + """ + + def __init__(self, default: Union[Callable, int, float, str, bool, None]): + """ + :param default: The default value assigned to the cross-region attribute. + This must be a scalar or a callable. + """ + self.default = default + + def __set_name__(self, owner, name): + self.name = name + + def __get__(self, obj: BaseStoreType, objtype=None) -> Any: + self._check_region_store_association(obj) + + if self.name not in obj._global: + if isinstance(self.default, Callable): + obj._global[self.name] = self.default() + else: + obj._global[self.name] = self.default + + return obj._global[self.name] + + def __set__(self, obj: BaseStoreType, value: Any): + self._check_region_store_association(obj) + + obj._global[self.name] = value + + def _check_region_store_association(self, obj): + if not hasattr(obj, "_global"): + # Raise if a Store is instantiated outside of a RegionBundle + raise AttributeError( + "Could not resolve cross-region attribute because there is no associated RegionBundle" + ) + + +class CrossAccountAttribute: + """ + Descriptor protocol for marking a store attributes as shared across all regions and accounts. + + This should be used for resources that are identified by ARNs. + """ + + def __init__(self, default: Union[Callable, int, float, str, bool, None]): + """ + :param default: The default value assigned to the cross-account attribute. + This must be a scalar or a callable. + """ + self.default = default + + def __set_name__(self, owner, name): + self.name = name + + def __get__(self, obj: BaseStoreType, objtype=None) -> Any: + self._check_account_store_association(obj) + + if self.name not in obj._universal: + if isinstance(self.default, Callable): + obj._universal[self.name] = self.default() + else: + obj._universal[self.name] = self.default + + return obj._universal[self.name] + + def __set__(self, obj: BaseStoreType, value: Any): + self._check_account_store_association(obj) + + obj._universal[self.name] = value + + def _check_account_store_association(self, obj): + if not hasattr(obj, "_universal"): + # Raise if a Store is instantiated outside an AccountRegionBundle + raise AttributeError( + "Could not resolve cross-account attribute because there is no associated AccountRegionBundle" + ) + + +# +# Base models +# + + +class BaseStore: + """ + Base class for defining stores for LocalStack providers. + """ + + _service_name: str + _account_id: str + _region_name: str + _global: dict + _universal: dict + + def __repr__(self): + try: + repr_templ = "<{name} object for {service_name} at {account_id}/{region_name}>" + return repr_templ.format( + name=self.__class__.__name__, + service_name=self._service_name, + account_id=self._account_id, + region_name=self._region_name, + ) + except AttributeError: + return super().__repr__() + + +# +# Encapsulations +# + + +class RegionBundle(dict, Generic[BaseStoreType]): + """ + Encapsulation for stores across all regions for a specific AWS account ID. + """ + + def __init__( + self, + service_name: str, + store: Type[BaseStoreType], + account_id: str, + validate: bool = True, + lock: RLock = None, + universal: dict = None, + ): + self.store = store + self.account_id = account_id + self.service_name = service_name + self.validate = validate + self.lock = lock or RLock() + + self.valid_regions = get_valid_regions_for_service(service_name) + + # Keeps track of all cross-region attributes. This dict is maintained at + # a region level (hence in RegionBundle). A ref is passed to every store + # intialised in this region so that backref is possible. + self._global = {} + + # Keeps track of all cross-account attributes. This dict is maintained at + # the account level (ie. AccountRegionBundle). A ref is passed down from + # AccountRegionBundle to RegionBundle to individual stores to enable backref. + self._universal = universal + + def __getitem__(self, region_name) -> BaseStoreType: + if ( + not config.ALLOW_NONSTANDARD_REGIONS + and self.validate + and region_name not in self.valid_regions + ): + raise ValueError( + f"'{region_name}' is not a valid AWS region name for {self.service_name}" + ) + + with self.lock: + if region_name not in self.keys(): + store_obj = self.store() + + store_obj._global = self._global + store_obj._universal = self._universal + store_obj.service_name = self.service_name + store_obj._account_id = self.account_id + store_obj._region_name = region_name + + self[region_name] = store_obj + + return super().__getitem__(region_name) + + def reset(self, _reset_universal: bool = False): + """ + Clear all store data. + + This only deletes the data held in the stores. All instantiated stores + are retained. This includes data shared by all stores in this account + and marked by the CrossRegionAttribute descriptor. + + Data marked by CrossAccountAttribute descriptor is only cleared when + `_reset_universal` is set. Note that this escapes the logical boundary of + the account associated with this RegionBundle and affects *all* accounts. + Hence this argument is not intended for public use and is only used when + invoking this method from AccountRegionBundle. + """ + # For safety, clear data in all referenced store instances, if any + for store_inst in self.values(): + attrs = list(store_inst.__dict__.keys()) + for attr in attrs: + # reset the cross-region attributes + if attr == "_global": + store_inst._global.clear() + + if attr == "_universal" and _reset_universal: + store_inst._universal.clear() + + # reset the local attributes + elif attr.startswith(LOCAL_ATTR_PREFIX): + delattr(store_inst, attr) + + self._global.clear() + + with self.lock: + self.clear() + + +class AccountRegionBundle(dict, Generic[BaseStoreType]): + """ + Encapsulation for all stores for all AWS account IDs. + """ + + def __init__(self, service_name: str, store: Type[BaseStoreType], validate: bool = True): + """ + :param service_name: Name of the service. Must be a valid service defined in botocore. + :param store: Class definition of the Store + :param validate: Whether to raise if invalid region names or account IDs are used during subscription + """ + self.service_name = service_name + self.store = store + self.validate = validate + self.lock = RLock() + + # Keeps track of all cross-account attributes. This dict is maintained at + # the account level (hence in AccountRegionBundle). A ref is passed to + # every region bundle, which in turn passes it to every store in it. + self._universal = {} + + def __getitem__(self, account_id: str) -> RegionBundle[BaseStoreType]: + if self.validate and not re.match(r"\d{12}", account_id): + raise ValueError(f"'{account_id}' is not a valid AWS account ID") + + with self.lock: + if account_id not in self.keys(): + self[account_id] = RegionBundle( + service_name=self.service_name, + store=self.store, + account_id=account_id, + validate=self.validate, + lock=self.lock, + universal=self._universal, + ) + + return super().__getitem__(account_id) + + def reset(self): + """ + Clear all store data. + + This only deletes the data held in the stores. All instantiated stores are retained. + """ + # For safety, clear all referenced region bundles, if any + for region_bundle in self.values(): + region_bundle.reset(_reset_universal=True) + + self._universal.clear() + + with self.lock: + self.clear() + + def iter_stores(self) -> Iterator[tuple[str, str, BaseStoreType]]: + """ + Iterate over a flattened view of all stores in this AccountRegionBundle, where each record is a + tuple of account id, region name, and the store within that account and region. Example:: + + :return: an iterator + """ + for account_id, region_stores in self.items(): + for region_name, store in region_stores.items(): + yield account_id, region_name, store diff --git a/localstack-core/localstack/services/sts/__init__.py b/localstack-core/localstack/services/sts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/sts/models.py b/localstack-core/localstack/services/sts/models.py new file mode 100644 index 0000000000000..67a8665dbb76f --- /dev/null +++ b/localstack-core/localstack/services/sts/models.py @@ -0,0 +1,19 @@ +from typing import TypedDict + +from localstack.aws.api.sts import Tag +from localstack.services.stores import AccountRegionBundle, BaseStore, CrossRegionAttribute + + +class SessionTaggingConfig(TypedDict): + # => {"Key": , "Value": } + tags: dict[str, Tag] + # list of lowercase transitive tag keys + transitive_tags: list[str] + + +class STSStore(BaseStore): + # maps access key ids to tagging config for the session they belong to + session_tags: dict[str, SessionTaggingConfig] = CrossRegionAttribute(default=dict) + + +sts_stores = AccountRegionBundle("sts", STSStore) diff --git a/localstack-core/localstack/services/sts/provider.py b/localstack-core/localstack/services/sts/provider.py new file mode 100644 index 0000000000000..14807869ea9cb --- /dev/null +++ b/localstack-core/localstack/services/sts/provider.py @@ -0,0 +1,107 @@ +import logging + +from localstack.aws.api import RequestContext, ServiceException +from localstack.aws.api.sts import ( + AssumeRoleResponse, + GetCallerIdentityResponse, + ProvidedContextsListType, + StsApi, + arnType, + externalIdType, + policyDescriptorListType, + roleDurationSecondsType, + roleSessionNameType, + serialNumberType, + sourceIdentityType, + tagKeyListType, + tagListType, + tokenCodeType, + unrestrictedSessionPolicyDocumentType, +) +from localstack.services.iam.iam_patches import apply_iam_patches +from localstack.services.moto import call_moto +from localstack.services.plugins import ServiceLifecycleHook +from localstack.services.sts.models import SessionTaggingConfig, sts_stores +from localstack.utils.aws.arns import extract_account_id_from_arn +from localstack.utils.aws.request_context import extract_access_key_id_from_auth_header + +LOG = logging.getLogger(__name__) + + +class InvalidParameterValueError(ServiceException): + code = "InvalidParameterValue" + status_code = 400 + sender_fault = True + + +class StsProvider(StsApi, ServiceLifecycleHook): + def __init__(self): + apply_iam_patches() + + def get_caller_identity(self, context: RequestContext, **kwargs) -> GetCallerIdentityResponse: + response = call_moto(context) + if "user/moto" in response["Arn"] and "sts" in response["Arn"]: + response["Arn"] = f"arn:{context.partition}:iam::{response['Account']}:root" + return response + + def assume_role( + self, + context: RequestContext, + role_arn: arnType, + role_session_name: roleSessionNameType, + policy_arns: policyDescriptorListType = None, + policy: unrestrictedSessionPolicyDocumentType = None, + duration_seconds: roleDurationSecondsType = None, + tags: tagListType = None, + transitive_tag_keys: tagKeyListType = None, + external_id: externalIdType = None, + serial_number: serialNumberType = None, + token_code: tokenCodeType = None, + source_identity: sourceIdentityType = None, + provided_contexts: ProvidedContextsListType = None, + **kwargs, + ) -> AssumeRoleResponse: + target_account_id = extract_account_id_from_arn(role_arn) + access_key_id = extract_access_key_id_from_auth_header(context.request.headers) + store = sts_stores[target_account_id]["us-east-1"] + existing_tagging_config = store.session_tags.get(access_key_id, {}) + + if tags: + tag_keys = {tag["Key"].lower() for tag in tags} + # if the lower-cased set is smaller than the number of keys, there have to be some duplicates. + if len(tag_keys) < len(tags): + raise InvalidParameterValueError( + "Duplicate tag keys found. Please note that Tag keys are case insensitive." + ) + + # prevent transitive tags from being overridden + if existing_tagging_config: + if set(existing_tagging_config["transitive_tags"]).intersection(tag_keys): + raise InvalidParameterValueError( + "One of the specified transitive tag keys can't be set because it conflicts with a transitive tag key from the calling session." + ) + if transitive_tag_keys: + transitive_tag_key_set = {key.lower() for key in transitive_tag_keys} + if not transitive_tag_key_set <= tag_keys: + raise InvalidParameterValueError( + "The specified transitive tag key must be included in the requested tags." + ) + + response: AssumeRoleResponse = call_moto(context) + + transitive_tag_keys = transitive_tag_keys or [] + tags = tags or [] + transformed_tags = {tag["Key"].lower(): tag for tag in tags} + # propagate transitive tags + if existing_tagging_config: + for tag in existing_tagging_config["transitive_tags"]: + transformed_tags[tag] = existing_tagging_config["tags"][tag] + transitive_tag_keys += existing_tagging_config["transitive_tags"] + if transformed_tags: + # store session tagging config + access_key_id = response["Credentials"]["AccessKeyId"] + store.session_tags[access_key_id] = SessionTaggingConfig( + tags=transformed_tags, + transitive_tags=[key.lower() for key in transitive_tag_keys], + ) + return response diff --git a/localstack-core/localstack/services/support/__init__.py b/localstack-core/localstack/services/support/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack/services/support/provider.py b/localstack-core/localstack/services/support/provider.py similarity index 100% rename from localstack/services/support/provider.py rename to localstack-core/localstack/services/support/provider.py diff --git a/localstack-core/localstack/services/swf/__init__.py b/localstack-core/localstack/services/swf/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack/services/swf/provider.py b/localstack-core/localstack/services/swf/provider.py similarity index 100% rename from localstack/services/swf/provider.py rename to localstack-core/localstack/services/swf/provider.py diff --git a/localstack-core/localstack/services/transcribe/__init__.py b/localstack-core/localstack/services/transcribe/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/services/transcribe/models.py b/localstack-core/localstack/services/transcribe/models.py new file mode 100644 index 0000000000000..4f9935a310501 --- /dev/null +++ b/localstack-core/localstack/services/transcribe/models.py @@ -0,0 +1,9 @@ +from localstack.aws.api.transcribe import TranscriptionJob, TranscriptionJobName +from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute + + +class TranscribeStore(BaseStore): + transcription_jobs: dict[TranscriptionJobName, TranscriptionJob] = LocalAttribute(default=dict) # type: ignore[assignment] + + +transcribe_stores = AccountRegionBundle("transcribe", TranscribeStore) diff --git a/localstack-core/localstack/services/transcribe/packages.py b/localstack-core/localstack/services/transcribe/packages.py new file mode 100644 index 0000000000000..14faf968c2159 --- /dev/null +++ b/localstack-core/localstack/services/transcribe/packages.py @@ -0,0 +1,25 @@ +from typing import List + +from localstack.packages import Package +from localstack.packages.core import PythonPackageInstaller + +_VOSK_DEFAULT_VERSION = "0.3.43" + + +class VoskPackage(Package[PythonPackageInstaller]): + def __init__(self, default_version: str = _VOSK_DEFAULT_VERSION): + super().__init__(name="Vosk", default_version=default_version) + + def _get_installer(self, version: str) -> PythonPackageInstaller: + return VoskPackageInstaller(version) + + def get_versions(self) -> List[str]: + return [_VOSK_DEFAULT_VERSION] + + +class VoskPackageInstaller(PythonPackageInstaller): + def __init__(self, version: str): + super().__init__("vosk", version) + + +vosk_package = VoskPackage() diff --git a/localstack-core/localstack/services/transcribe/plugins.py b/localstack-core/localstack/services/transcribe/plugins.py new file mode 100644 index 0000000000000..78cc12751894d --- /dev/null +++ b/localstack-core/localstack/services/transcribe/plugins.py @@ -0,0 +1,9 @@ +from localstack.packages import Package, package +from localstack.packages.core import PythonPackageInstaller + + +@package(name="vosk") +def vosk_package() -> Package[PythonPackageInstaller]: + from localstack.services.transcribe.packages import vosk_package + + return vosk_package diff --git a/localstack-core/localstack/services/transcribe/provider.py b/localstack-core/localstack/services/transcribe/provider.py new file mode 100644 index 0000000000000..b0d1f62d458ed --- /dev/null +++ b/localstack-core/localstack/services/transcribe/provider.py @@ -0,0 +1,415 @@ +import datetime +import json +import logging +import threading +import wave +from functools import cache +from pathlib import Path +from typing import Any, Tuple +from zipfile import ZipFile + +from localstack import config +from localstack.aws.api import RequestContext, handler +from localstack.aws.api.transcribe import ( + BadRequestException, + ConflictException, + GetTranscriptionJobResponse, + LanguageCode, + ListTranscriptionJobsResponse, + MaxResults, + MediaFormat, + NextToken, + NotFoundException, + StartTranscriptionJobRequest, + StartTranscriptionJobResponse, + TranscribeApi, + Transcript, + TranscriptionJob, + TranscriptionJobName, + TranscriptionJobStatus, + TranscriptionJobSummary, +) +from localstack.aws.connect import connect_to +from localstack.constants import HUGGING_FACE_ENDPOINT +from localstack.packages.ffmpeg import ffmpeg_package +from localstack.services.s3.utils import ( + get_bucket_and_key_from_presign_url, + get_bucket_and_key_from_s3_uri, +) +from localstack.services.transcribe.models import TranscribeStore, transcribe_stores +from localstack.services.transcribe.packages import vosk_package +from localstack.utils.files import new_tmp_file +from localstack.utils.http import download +from localstack.utils.run import run +from localstack.utils.threads import start_thread + +# Amazon Transcribe service calls are limited to four hours (or 2 GB) per API call for our batch service. +# The streaming service can accommodate open connections up to four hours long. +# See https://aws.amazon.com/transcribe/faqs/ +MAX_AUDIO_DURATION_SECONDS = 60 * 60 * 4 + +LOG = logging.getLogger(__name__) + +VOSK_MODELS_URL = f"{HUGGING_FACE_ENDPOINT}/vosk-models/resolve/main/" + +# Map of language codes to Vosk language models +# See https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html +LANGUAGE_MODELS = { + LanguageCode.ca_ES: "vosk-model-small-ca-0.4", + LanguageCode.cs_CZ: "vosk-model-small-cs-0.4-rhasspy", + LanguageCode.en_GB: "vosk-model-small-en-gb-0.15", + LanguageCode.en_IN: "vosk-model-small-en-in-0.4", + LanguageCode.en_US: "vosk-model-small-en-us-0.15", + LanguageCode.fa_IR: "vosk-model-small-fa-0.42", + LanguageCode.fr_FR: "vosk-model-small-fr-0.22", + LanguageCode.de_DE: "vosk-model-small-de-0.15", + LanguageCode.es_ES: "vosk-model-small-es-0.42", + LanguageCode.gu_IN: "vosk-model-small-gu-0.42", + LanguageCode.hi_IN: "vosk-model-small-hi-0.22", + LanguageCode.it_IT: "vosk-model-small-it-0.22", + LanguageCode.ja_JP: "vosk-model-small-ja-0.22", + LanguageCode.kk_KZ: "vosk-model-small-kz-0.15", + LanguageCode.ko_KR: "vosk-model-small-ko-0.22", + LanguageCode.nl_NL: "vosk-model-small-nl-0.22", + LanguageCode.pl_PL: "vosk-model-small-pl-0.22", + LanguageCode.pt_BR: "vosk-model-small-pt-0.3", + LanguageCode.ru_RU: "vosk-model-small-ru-0.22", + LanguageCode.te_IN: "vosk-model-small-te-0.42", + LanguageCode.tr_TR: "vosk-model-small-tr-0.3", + LanguageCode.uk_UA: "vosk-model-small-uk-v3-nano", + LanguageCode.uz_UZ: "vosk-model-small-uz-0.22", + LanguageCode.vi_VN: "vosk-model-small-vn-0.4", + LanguageCode.zh_CN: "vosk-model-small-cn-0.22", +} + +LANGUAGE_MODEL_DIR = Path(config.dirs.cache) / "vosk" + +# List of ffmpeg format names that correspond the supported formats by AWS +# See https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html +SUPPORTED_FORMAT_NAMES = { + "amr": MediaFormat.amr, + "flac": MediaFormat.flac, + "mp3": MediaFormat.mp3, + "mov,mp4,m4a,3gp,3g2,mj2": MediaFormat.mp4, + "ogg": MediaFormat.ogg, + "matroska,webm": MediaFormat.webm, + "wav": MediaFormat.wav, +} + +# Mutex for when downloading models +_DL_LOCK = threading.Lock() + + +class TranscribeProvider(TranscribeApi): + def get_transcription_job( + self, context: RequestContext, transcription_job_name: TranscriptionJobName, **kwargs: Any + ) -> GetTranscriptionJobResponse: + store = transcribe_stores[context.account_id][context.region] + + if job := store.transcription_jobs.get(transcription_job_name): + # fetch output key and output bucket + output_bucket, output_key = get_bucket_and_key_from_presign_url( + job["Transcript"]["TranscriptFileUri"] # type: ignore[index,arg-type] + ) + job["Transcript"]["TranscriptFileUri"] = connect_to().s3.generate_presigned_url( # type: ignore[index] + "get_object", + Params={"Bucket": output_bucket, "Key": output_key}, + ExpiresIn=60 * 15, + ) + return GetTranscriptionJobResponse(TranscriptionJob=job) + + raise NotFoundException( + "The requested job couldn't be found. Check the job name and try your request again." + ) + + @staticmethod + @cache + def _setup_vosk() -> None: + # Install and configure vosk + vosk_package.install() + + from vosk import SetLogLevel # type: ignore[import-not-found] # noqa + + # Suppress Vosk logging + SetLogLevel(-1) + + @handler("StartTranscriptionJob", expand=False) + def start_transcription_job( # type: ignore[override] + self, + context: RequestContext, + request: StartTranscriptionJobRequest, + ) -> StartTranscriptionJobResponse: + job_name = request["TranscriptionJobName"] + media = request["Media"] + language_code = request.get("LanguageCode") + + if not language_code: + raise BadRequestException("Language code is missing") + + if language_code not in LANGUAGE_MODELS: + raise BadRequestException(f"Language code must be one of {LANGUAGE_MODELS.keys()}") + + store = transcribe_stores[context.account_id][context.region] + + if job_name in store.transcription_jobs: + raise ConflictException( + "The requested job name already exists. Use a different job name." + ) + + s3_path = request["Media"]["MediaFileUri"] + output_bucket = request.get("OutputBucketName", get_bucket_and_key_from_s3_uri(s3_path)[0]) # type: ignore[arg-type] + output_key = request.get("OutputKey") + + if not output_key: + output_key = f"{job_name}.json" + + s3_client = connect_to().s3 + + # the presign url is valid for 15 minutes + presign_url = s3_client.generate_presigned_url( + "get_object", + Params={"Bucket": output_bucket, "Key": output_key}, + ExpiresIn=60 * 15, + ) + + transcript = Transcript(TranscriptFileUri=presign_url) + + job = TranscriptionJob( + TranscriptionJobName=job_name, + LanguageCode=language_code, + Media=media, + CreationTime=datetime.datetime.utcnow(), + StartTime=datetime.datetime.utcnow(), + TranscriptionJobStatus=TranscriptionJobStatus.QUEUED, + Transcript=transcript, + ) + store.transcription_jobs[job_name] = job + + start_thread(self._run_transcription_job, (store, job_name)) + + return StartTranscriptionJobResponse(TranscriptionJob=job) + + def list_transcription_jobs( + self, + context: RequestContext, + status: TranscriptionJobStatus | None = None, + job_name_contains: TranscriptionJobName | None = None, + next_token: NextToken | None = None, + max_results: MaxResults | None = None, + **kwargs: Any, + ) -> ListTranscriptionJobsResponse: + store = transcribe_stores[context.account_id][context.region] + summaries = [] + for job in store.transcription_jobs.values(): + summaries.append( + TranscriptionJobSummary( + TranscriptionJobName=job["TranscriptionJobName"], + LanguageCode=job["LanguageCode"], + CreationTime=job["CreationTime"], + StartTime=job["StartTime"], + TranscriptionJobStatus=job["TranscriptionJobStatus"], + CompletionTime=job.get("CompletionTime"), + FailureReason=job.get("FailureReason"), + ) + ) + + return ListTranscriptionJobsResponse(TranscriptionJobSummaries=summaries) + + def delete_transcription_job( + self, context: RequestContext, transcription_job_name: TranscriptionJobName, **kwargs: Any + ) -> None: + store = transcribe_stores[context.account_id][context.region] + + if transcription_job_name not in store.transcription_jobs: + raise NotFoundException( + "The requested job couldn't be found. Check the job name and try your request again." + ) + + store.transcription_jobs.pop(transcription_job_name) + + # + # Utils + # + + @staticmethod + def download_model(name: str) -> str: + """ + Download a Vosk language model to LocalStack cache directory. Do nothing if model is already downloaded. + + While can Vosk also download a model if not available locally, it saves it to a + non-configurable location ~/.cache/vosk. + """ + model_path = LANGUAGE_MODEL_DIR / name + + with _DL_LOCK: + # check if model path exists and is not empty + if model_path.exists() and any(model_path.iterdir()): + LOG.debug("Using a pre-downloaded language model: %s", model_path) + return str(model_path) + else: + model_path.mkdir(parents=True) + + model_zip_path = str(model_path) + ".zip" + + LOG.debug("Downloading language model: %s", model_path.name) + + from vosk import MODEL_PRE_URL # noqa + + download_urls = [MODEL_PRE_URL, VOSK_MODELS_URL] + + for url in download_urls: + try: + download(url + str(model_path.name) + ".zip", model_zip_path, verify_ssl=False) + except Exception as e: + LOG.warning("Failed to download model from %s: %s", url, e) + continue + break + + LOG.debug("Extracting language model: %s", model_path.name) + with ZipFile(model_zip_path, "r") as model_ref: + model_ref.extractall(model_path.parent) + + Path(model_zip_path).unlink() + + return str(model_path) + + # + # Threads + # + + def _run_transcription_job(self, args: Tuple[TranscribeStore, str]) -> None: + store, job_name = args + + job = store.transcription_jobs[job_name] + job["StartTime"] = datetime.datetime.utcnow() + job["TranscriptionJobStatus"] = TranscriptionJobStatus.IN_PROGRESS + + failure_reason = None + + try: + LOG.debug("Starting transcription: %s", job_name) + + # Get file from S3 + file_path = new_tmp_file() + s3_client = connect_to().s3 + s3_path: str = job["Media"]["MediaFileUri"] # type: ignore[index,assignment] + bucket, _, key = s3_path.removeprefix("s3://").partition("/") + s3_client.download_file(Bucket=bucket, Key=key, Filename=file_path) + + ffmpeg_package.install() + ffmpeg_bin = ffmpeg_package.get_installer().get_ffmpeg_path() + ffprobe_bin = ffmpeg_package.get_installer().get_ffprobe_path() + + LOG.debug("Determining media format") + # TODO set correct failure_reason if ffprobe execution fails + ffprobe_output = json.loads( + run( # type: ignore[arg-type] + f"{ffprobe_bin} -show_streams -show_format -print_format json -hide_banner -v error {file_path}" + ) + ) + format = ffprobe_output["format"]["format_name"] + LOG.debug("Media format detected as: %s", format) + job["MediaFormat"] = SUPPORTED_FORMAT_NAMES[format] + duration = ffprobe_output["format"]["duration"] + + if float(duration) >= MAX_AUDIO_DURATION_SECONDS: + failure_reason = "Invalid file size: file size too large. Maximum audio duration is 4.000000 hours.Check the length of the file and try your request again." + raise RuntimeError() + + # Determine the sample rate of input audio if possible + for stream in ffprobe_output["streams"]: + if stream["codec_type"] == "audio": + job["MediaSampleRateHertz"] = int(stream["sample_rate"]) + + if format in SUPPORTED_FORMAT_NAMES: + wav_path = new_tmp_file(suffix=".wav") + LOG.debug("Transcoding media to wav") + # TODO set correct failure_reason if ffmpeg execution fails + run( + f"{ffmpeg_bin} -y -nostdin -loglevel quiet -i '{file_path}' -ar 16000 -ac 1 '{wav_path}'" + ) + else: + failure_reason = f"Unsupported media format: {format}" + raise RuntimeError() + + # Check if file is valid wav + audio = wave.open(wav_path, "rb") + if ( + audio.getnchannels() != 1 + or audio.getsampwidth() != 2 + or audio.getcomptype() != "NONE" + ): + # Fail job + failure_reason = ( + "Audio file must be mono PCM WAV format. Transcoding may have failed. " + ) + raise RuntimeError() + + # Prepare transcriber + language_code: str = job["LanguageCode"] # type: ignore[assignment] + model_name = LANGUAGE_MODELS[language_code] # type: ignore[index] + self._setup_vosk() + model_path = self.download_model(model_name) + from vosk import KaldiRecognizer, Model # noqa + + model = Model(model_path=model_path, model_name=model_name) + + tc = KaldiRecognizer(model, audio.getframerate()) + tc.SetWords(True) + tc.SetPartialWords(True) + + # Start transcription + while True: + data = audio.readframes(4000) + if len(data) == 0: + break + tc.AcceptWaveform(data) + + tc_result = json.loads(tc.FinalResult()) + + # Convert to AWS format + items = [] + for unigram in tc_result["result"]: + items.append( + { + "start_time": unigram["start"], + "end_time": unigram["end"], + "type": "pronunciation", + "alternatives": [ + { + "confidence": unigram["conf"], + "content": unigram["word"], + } + ], + } + ) + output = { + "jobName": job_name, + "status": TranscriptionJobStatus.COMPLETED, + "results": { + "transcripts": [ + { + "transcript": tc_result["text"], + } + ], + "items": items, + }, + } + + # Save to S3 + output_s3_path: str = job["Transcript"]["TranscriptFileUri"] # type: ignore[index,assignment] + output_bucket, output_key = get_bucket_and_key_from_presign_url(output_s3_path) + s3_client.put_object(Bucket=output_bucket, Key=output_key, Body=json.dumps(output)) + + # Update job details + job["CompletionTime"] = datetime.datetime.utcnow() + job["TranscriptionJobStatus"] = TranscriptionJobStatus.COMPLETED + job["MediaFormat"] = MediaFormat.wav + + LOG.info("Transcription job completed: %s", job_name) + + except Exception as exc: + job["FailureReason"] = failure_reason or str(exc) + job["TranscriptionJobStatus"] = TranscriptionJobStatus.FAILED + + LOG.exception("Transcription job %s failed: %s", job_name, job["FailureReason"]) diff --git a/localstack-core/localstack/state/__init__.py b/localstack-core/localstack/state/__init__.py new file mode 100644 index 0000000000000..e7e0401f97113 --- /dev/null +++ b/localstack-core/localstack/state/__init__.py @@ -0,0 +1,19 @@ +from .core import ( + AssetDirectory, + Decoder, + Encoder, + StateContainer, + StateLifecycleHook, + StateVisitable, + StateVisitor, +) + +__all__ = [ + "StateVisitable", + "StateVisitor", + "StateLifecycleHook", + "AssetDirectory", + "StateContainer", + "Encoder", + "Decoder", +] diff --git a/localstack-core/localstack/state/core.py b/localstack-core/localstack/state/core.py new file mode 100644 index 0000000000000..ae41f47b17469 --- /dev/null +++ b/localstack-core/localstack/state/core.py @@ -0,0 +1,138 @@ +"""Core concepts of the persistence API.""" + +import io +import os +import pathlib +from typing import IO, Any, Protocol, runtime_checkable + + +class StateContainer(Protocol): + """While a StateContainer can in principle be anything, localstack currently supports by default the following + containers: + + - BackendDict (moto backend state) + - AccountRegionBundle (localstack stores) + - AssetDirectory (folders on disk) + """ + + service_name: str + + +class StateLifecycleHook: + """ + There are three well-known state manipulation operations for a service provider: + + - reset: the state within the service provider is reset, stores cleared, directories removed + - save: the state of the service provider is extracted and stored into some format (on disk, pods, ...) + - load: the state is injected into the service, or state directories on disk are restored + """ + + def on_before_state_reset(self) -> None: + """Hook triggered before the provider's state containers are reset/cleared.""" + pass + + def on_after_state_reset(self) -> None: + """Hook triggered after the provider's state containers have been reset/cleared.""" + pass + + def on_before_state_save(self) -> None: + """Hook triggered before the provider's state containers are saved.""" + pass + + def on_after_state_save(self) -> None: + """Hook triggered after the provider's state containers have been saved.""" + pass + + def on_before_state_load(self) -> None: + """Hook triggered before a previously serialized state is loaded into the provider's state containers.""" + pass + + def on_after_state_load(self) -> None: + """Hook triggered after a previously serialized state has been loaded into the provider's state containers.""" + pass + + +class StateVisitor: + def visit(self, state_container: StateContainer): + """ + Visit (=do something with) a given state container. A state container can be anything that holds service state. + An AccountRegionBundle, a moto BackendDict, or a directory containing assets. + """ + raise NotImplementedError + + +@runtime_checkable +class StateVisitable(Protocol): + def accept_state_visitor(self, visitor: StateVisitor): + """ + Accept a StateVisitor. The implementing method should call visit not necessarily on itself, but can also call + the visit method on the state container it holds. The common case is calling visit on the stores of a provider. + :param visitor: the StateVisitor + """ + + +class AssetDirectory: + """ + A state container manifested as a directory on the file system. + """ + + service_name: str + path: pathlib.Path + + def __init__(self, service_name: str, path: str | os.PathLike): + if not service_name: + raise ValueError("service name must be set") + + if not path: + raise ValueError("path must be set") + + if not isinstance(path, os.PathLike): + path = pathlib.Path(path) + + self.service_name = service_name + self.path = path + + def __str__(self) -> str: + return str(self.path) + + +class Encoder: + def encodes(self, obj: Any) -> bytes: + """ + Encode an object into bytes. + + :param obj: the object to encode + :return: the encoded object + """ + b = io.BytesIO() + self.encode(obj, b) + return b.getvalue() + + def encode(self, obj: Any, file: IO[bytes]): + """ + Encode an object into bytes. + + :param obj: the object to encode + :param file: the file to write the encoded data into + """ + raise NotImplementedError + + +class Decoder: + def decodes(self, data: bytes) -> Any: + """ + Decode a previously encoded object. + + :param data: the encoded object to decode + :return: the decoded object + """ + return self.decode(io.BytesIO(data)) + + def decode(self, file: IO[bytes]) -> Any: + """ + Decode a previously encoded object. + + :param file: the io object containing the object to decode + :return: the decoded object + """ + raise NotImplementedError diff --git a/localstack-core/localstack/state/inspect.py b/localstack-core/localstack/state/inspect.py new file mode 100644 index 0000000000000..f5b10c6e3e2e4 --- /dev/null +++ b/localstack-core/localstack/state/inspect.py @@ -0,0 +1,112 @@ +"""Utilities to inspect services and their state containers.""" + +import importlib +import logging +from functools import singledispatchmethod +from typing import Any, Dict, Optional, TypedDict + +from moto.core.base_backend import BackendDict + +from localstack.services.stores import AccountRegionBundle +from localstack.state.core import StateVisitor + +LOG = logging.getLogger(__name__) + + +class ServiceBackend(TypedDict, total=False): + """Wrapper of the possible type of backends that a service can use.""" + + localstack: AccountRegionBundle | None + moto: BackendDict | Dict | None + + +class ServiceBackendCollectorVisitor(StateVisitor): + """Implementation of StateVisitor meant to collect the backends that a given service use to hold its state.""" + + store: AccountRegionBundle | None + backend_dict: BackendDict | Dict | None + + def __init__(self) -> None: + self.store = None + self.backend_dict = None + + @singledispatchmethod + def visit(self, state_container: Any): + raise NotImplementedError("Can't restore state container of type %s", type(state_container)) + + @visit.register(AccountRegionBundle) + def _(self, state_container: AccountRegionBundle): + self.store = state_container + + @visit.register(BackendDict) + def _(self, state_container: BackendDict): + self.backend_dict = state_container + + def collect(self) -> ServiceBackend: + service_backend = ServiceBackend() + if self.store: + service_backend.update({"localstack": self.store}) + if self.backend_dict: + service_backend.update({"moto": self.backend_dict}) + return service_backend + + +class ReflectionStateLocator: + """ + Implementation of the StateVisitable protocol that uses reflection to visit and collect anything that hold state + for a service, based on the assumption that AccountRegionBundle and BackendDict are stored in a predictable + location with a predictable naming. + """ + + provider: Any + + def __init__(self, provider: Optional[Any] = None, service: Optional[str] = None): + self.provider = provider + self.service = service or provider.service + + def accept_state_visitor(self, visitor: StateVisitor): + # needed for services like cognito-idp + service_name: str = self.service.replace("-", "_") + LOG.debug("Visit stores for %s", service_name) + + # try to load AccountRegionBundle from predictable location + attribute_name = f"{service_name}_stores" + module_name = f"localstack.pro.core.services.{service_name}.models" + + # it first looks for a module in ext; eventually, it falls back to community + attribute = _load_attribute_from_module(module_name, attribute_name) + if attribute is None: + module_name = f"localstack.services.{service_name}.models" + attribute = _load_attribute_from_module(module_name, attribute_name) + + if attribute is not None: + visitor.visit(attribute) + + # try to load BackendDict from predictable location + module_name = f"moto.{service_name}.models" + attribute_name = f"{service_name}_backends" + attribute = _load_attribute_from_module(module_name, attribute_name) + + if attribute is None and "_" in attribute_name: + # some services like application_autoscaling do have a backend without the underscore + service_name_tmp = service_name.replace("_", "") + module_name = f"moto.{service_name_tmp}.models" + attribute_name = f"{service_name_tmp}_backends" + attribute = _load_attribute_from_module(module_name, attribute_name) + + if attribute is not None: + visitor.visit(attribute) + + +def _load_attribute_from_module(module_name: str, attribute_name: str) -> Any | None: + """ + Attempts at getting an attribute from a given module. + :return the attribute or None, if the attribute can't be found + """ + try: + module = importlib.import_module(module_name) + attr = getattr(module, attribute_name) + LOG.debug("Found attribute %s in module %s", attribute_name, module_name) + return attr + except (ModuleNotFoundError, AttributeError): + return None diff --git a/localstack-core/localstack/state/pickle.py b/localstack-core/localstack/state/pickle.py new file mode 100644 index 0000000000000..1b4535a5f5ca3 --- /dev/null +++ b/localstack-core/localstack/state/pickle.py @@ -0,0 +1,348 @@ +""" +A small wrapper around dill that integrates with our state API, and allows registering custom serializer methods for +class hierarchies. + +For your convenience, you can simply call ``dumps`` or ``loads`` as you would pickle or dill:: + + from localstack.state import pickle + foo = pickle.loads(pickle.dumps(Foo())) + + +You can register custom state serializers and deserializers to dill's dispatch table, but can also apply them to the +entire subclass hierarchy:: + + @register(PriorityQueue, subclasses=True) + def my_queue_pickler(pickler, obj): + pickler.save_reduce(_recreate, (type(obj), obj.queue,), obj=obj) + + def _recreate(obj_type, obj_queue): + # this method will be called when the object is de-serialized. you won't be able to reach it with the + # debugger though, it's saved into the pickle! Make sure it's outside the actual reduce hook, otherwise a new + # function is created every time for every serialized object of that type. + + q = obj_type() + q.queue = obj_queue + return q + +To learn more about this mechanism, read https://docs.python.org/3/library/copyreg.html and +https://dill.readthedocs.io/en/latest/index.html?highlight=register#dill.Pickler.dispatch. +""" + +import inspect +from typing import Any, BinaryIO, Callable, Generic, Type, TypeVar + +import dill +from dill._dill import MetaCatchingDict + +from .core import Decoder, Encoder + +_T = TypeVar("_T") + +PythonPickler = Any +"""Type placeholder for pickle._Pickler (which has for instance the save_reduce method)""" + + +def register(cls: Type = None, subclasses: bool = False): + """ + Decorator to register a custom type or type tree into the dill pickling dispatcher table. + + :param cls: the type + :param subclasses: whether to dispatch all subclasses to this function as well + :return: + """ + + def _wrapper(fn: Any | Callable[[PythonPickler, Any], None]): + if inspect.isclass(fn) and issubclass(fn, ObjectStateReducer): + if cls is not None: + raise ValueError("superfluous cls attribute for registering classes") + obj = fn.create() + add_dispatch_entry(obj.cls, obj._pickle, subclasses) + elif callable(fn): + add_dispatch_entry(cls, fn, subclasses=subclasses) + else: + raise ValueError("cannot register %s" % fn) + + return fn + + return _wrapper + + +def reducer(cls: Type, restore: Callable = None, subclasses: bool = False): + """ + Convenience decorator to simplify the following pattern:: + + def _create_something(attr1, attr2): + return Something(attr1, attr2) + + @register(Something) + def pickle_something(pickler, obj): + attr1 = obj.attr1 + attr2 = obj.attr2 + return pickler.save_reduce(_create_something, (attr1, attr2), obj=obj) + + into:: + + def _create_something(attr1, attr2): + return Something(attr1, attr2) + + @reducer(Something, _create_something) + def pickle_something(pickler, obj): + return obj.attr1, obj.attr2 + + in some cases, if your constructor matches the arguments you return, into:: + + @reducer(Something) + def pickle_something(pickler, obj): + return obj.attr1, obj.attr2 + + Note that this option creates larger pickles than the previous option, since this option also needs to store the + ``Something`` class into the pickle. + + :param cls: + :param restore: + :param subclasses: + :return: + """ + + def _wrapper(fn): + def _reducer(pickler, obj): + return pickler.save_reduce(restore or cls, fn(obj), obj=obj) + + add_dispatch_entry(cls, _reducer, subclasses) + return fn + + return _wrapper + + +def add_dispatch_entry( + cls: Type, fn: Callable[[PythonPickler, Any], None], subclasses: bool = False +): + Pickler.dispatch_overwrite[cls] = fn + if subclasses: + Pickler.match_subclasses_of.add(cls) + + +def remove_dispatch_entry(cls: Type): + try: + del Pickler.dispatch_overwrite[cls] + except KeyError: + pass + + try: + Pickler.match_subclasses_of.remove(cls) + except KeyError: + pass + + +def dumps(obj: Any) -> bytes: + """ + Pickle an object into bytes using a ``PickleEncoder``. + + :param obj: the object to pickle + :return: the pickled object + """ + return PickleEncoder().encodes(obj) + + +def dump(obj: Any, file: BinaryIO): + """ + Pickle an object into a buffer using a ``PickleEncoder``. + + :param obj: the object to pickle + :param file: the IO buffer + """ + return PickleEncoder().encode(obj, file) + + +def loads(data: bytes) -> Any: + """ + Unpickle am object from bytes using a ``PickleDecoder``. + + :param data: the pickled object + :return: the unpickled object + """ + return PickleDecoder().decodes(data) + + +def load(file: BinaryIO) -> Any: + """ + Unpickle am object from a buffer using a ``PickleDecoder``. + + :param file: the buffer containing the pickled object + :return: the unpickled object + """ + return PickleDecoder().decode(file) + + +class _SuperclassMatchingTypeDict(MetaCatchingDict): + """ + A special dictionary where keys are types, and keys are also optionally matched on their subclasses. Types where + subclass matching should happen can be registered through the ``dispatch_subclasses_of`` property. Example:: + + d = _SuperclassMatchingTypeDict() + d[dict] = "a dict" + d[defaultdict] # raises key error + d.match_subclasses_of.add(dict) + d[defaultdict] # returns "a dict" + + """ + + def __init__(self, seq=None, match_subclasses_of: set[Type] = None): + if seq is not None: + super().__init__(seq) + else: + super().__init__() + + self.match_subclasses_of = match_subclasses_of or set() + + def __missing__(self, key): + for c in key.__mro__[1:]: + # traverse the superclasses upwards until a dispatcher is found + if c not in self.match_subclasses_of: + continue + + if fn := super().get(c): + return fn + + return super().__missing__(key) + + +class Pickler(dill.Pickler): + """ + Custom dill pickler that considers dispatchers and subclass dispatchers registered via ``register``. + """ + + match_subclasses_of: set[Type] = set() + dispatch_overwrite: dict[Type, Callable] = {} + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # create the dispatch table (inherit the dill dispatchers) + dispatch = _SuperclassMatchingTypeDict(dill.Pickler.dispatch.copy()) + dispatch.update(Pickler.dispatch_overwrite.copy()) # makes sure ours take precedence + dispatch.match_subclasses_of.update(Pickler.match_subclasses_of.copy()) + self.dispatch = dispatch + + +class PickleEncoder(Encoder): + """ + An Encoder that use a dill pickling under the hood, and by default uses the custom ``Pickler`` that can be + extended with custom serializers. + """ + + pickler_class: Type[dill.Pickler] + + def __init__(self, pickler_class: Type[dill.Pickler] = None): + self.pickler_class = pickler_class or Pickler + + def encode(self, obj: Any, file: BinaryIO): + return self.pickler_class(file).dump(obj) + + +class PickleDecoder(Decoder): + """ + A Decoder that use a dill pickling under the hood, and by default uses the custom ``Unpickler`` that can be + extended with custom serializers. + """ + + unpickler_class: Type[dill.Unpickler] + + def __init__(self, unpickler_class: Type[dill.Unpickler] = None): + self.unpickler_class = unpickler_class or dill.Unpickler + + def decode(self, file: BinaryIO) -> Any: + return self.unpickler_class(file).load() + + +class ObjectStateReducer(Generic[_T]): + """ + A generalization of the following pattern:: + + def _create_something(cls: Type, state: dict): + obj = cls.__new__(self.cls) + + # do stuff on the state (perhaps re-create some attributes) + state["this_one_doesnt_serialize"] = restore(state["this_one_serialized"]) + + obj.__dict__.update(state) + return obj + + @register(Something) + def pickle_something(pickler, obj): + state = obj.__dict__.copy() + state.pop("this_one_doesnt_serialize") + return pickler.save_reduce(_create_something, (state,), obj=obj) + + + With the ObjectStateReducer, this can now be expressed as: + + @register() + class SomethingPickler(ObjectStatePickler): + cls = Something + + def prepare(state: dict): + state.pop("this_one_doesnt_serialize") + + def restore(state: dict): + state["this_one_doesnt_serialize"] = restore(state["this_one_serialized"]) + """ + + cls: _T + + @classmethod + def create(cls): + return cls() + + def register(self, subclasses=False): + """ + Registers this ObjectStateReducer's reducer function. See ``pickle.register``. + """ + add_dispatch_entry(self.cls, self._pickle, subclasses=subclasses) + + def _pickle(self, pickler, obj: _T): + state = self.get_state(obj) + self.prepare(obj, state) + return pickler.save_reduce(self._unpickle, (state,), obj=obj) + + def _unpickle(self, state: dict) -> dict: + obj = self.cls.__new__(self.cls) + self.restore(obj, state) + self.set_state(obj, state) + return obj + + def get_state(self, obj: _T) -> Any: + """ + Return the objects state. Can be overwritten by subclasses to return custom state. + + :param obj: the object + :return: the unprepared state + """ + return obj.__dict__.copy() + + def set_state(self, obj: _T, state: Any): + """ + Set the state of the object. Can be overwritten by subclasses to set custom state. + + :param obj: the object + :param state: the restored object state. + """ + obj.__dict__.update(state) + + def prepare(self, obj: _T, state: Any): + """ + Can be overwritten by subclasses to prepare the object state for pickling. + + :param obj: the object + :param state: the object state to serialize + """ + pass + + def restore(self, obj: _T, state: Any): + """ + Can be overwritten by subclasses to modify the object state to restore any previously removed attributes. + + :param obj: the object + :param state: the object's state to restore + """ + pass diff --git a/localstack-core/localstack/state/snapshot.py b/localstack-core/localstack/state/snapshot.py new file mode 100644 index 0000000000000..9f936fb280dda --- /dev/null +++ b/localstack-core/localstack/state/snapshot.py @@ -0,0 +1,22 @@ +from plux import Plugin + +from .core import StateVisitor + + +class SnapshotPersistencePlugin(Plugin): + """ + A plugin for the snapshot persistence mechanism, which allows you to return custom visitors for saving or loading + state, if the service has a particular logic. + """ + + namespace: str = "localstack.persistence.snapshot" + """Plugin namespace""" + + name: str + """Name of the plugin corresponds to the name of the service this plugin is loaded for. To be set by the Plugin.""" + + def create_load_snapshot_visitor(self, service: str, data_dir: str) -> StateVisitor: + raise NotImplementedError + + def create_save_snapshot_visitor(self, service: str, data_dir: str) -> StateVisitor: + raise NotImplementedError diff --git a/localstack-core/localstack/testing/__init__.py b/localstack-core/localstack/testing/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/aws/__init__.py b/localstack-core/localstack/testing/aws/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/aws/asf_utils.py b/localstack-core/localstack/testing/aws/asf_utils.py new file mode 100644 index 0000000000000..33035496ebf2f --- /dev/null +++ b/localstack-core/localstack/testing/aws/asf_utils.py @@ -0,0 +1,177 @@ +import importlib +import importlib.util +import inspect +import pkgutil +import re +from types import FunctionType, ModuleType, NoneType, UnionType +from typing import Optional, Pattern, Union, get_args, get_origin + + +def _import_submodules( + package_name: str, module_regex: Optional[Pattern] = None, recursive: bool = True +) -> dict[str, ModuleType]: + """ + Imports all submodules of the given package with the defined (optional) module_suffix. + + :param package_name: To start the loading / importing at + :param module_regex: Optional regex to filter the module names for + :param recursive: True if the package should be loaded recursively + :return: + """ + package = importlib.import_module(package_name) + results = {} + for loader, name, is_pkg in pkgutil.walk_packages(package.__path__, package.__name__ + "."): + if not module_regex or module_regex.match(name): + results[name] = importlib.import_module(name) + if recursive and is_pkg: + results.update(_import_submodules(name, module_regex, recursive)) + return results + + +def _collect_provider_classes( + provider_module: str, provider_module_regex: Pattern, provider_class_regex: Pattern +) -> list[type]: + """ + Collects all provider implementation classes which should be tested. + :param provider_module: module to start collecting in + :param provider_module_regex: Regex to filter the module names for + :param provider_class_regex: Regex to filter the provider class names for + :return: list of classes to check the operation signatures of + """ + provider_classes = [] + provider_modules = _import_submodules(provider_module, provider_module_regex) + # check that all these files don't import any encrypted code + for _, mod in provider_modules.items(): + # get all classes of the module which end with "Provider" + classes = [ + cls_obj + for cls_name, cls_obj in inspect.getmembers(mod) + if inspect.isclass(cls_obj) and provider_class_regex.match(cls_name) + ] + provider_classes.extend(classes) + return provider_classes + + +def collect_implemented_provider_operations( + provider_module: str = "localstack.services", + provider_module_regex: Pattern = re.compile(r".*\.provider[A-Za-z_0-9]*$"), + provider_class_regex: Pattern = re.compile(r".*Provider$"), + asf_api_module: str = "localstack.aws.api", +) -> list[tuple[type, type, str]]: + """ + Collects all implemented operations on all provider classes together with their base classes (generated API classes). + :param provider_module: module to start collecting in + :param provider_module_regex: Regex to filter the module names for + :param provider_class_regex: Regex to filter the provider class names for + :param asf_api_module: module which contains the generated ASF APIs + :return: list of tuple, where each tuple is (provider_class: type, base_class: type, provider_function: str) + """ + results = [] + provider_classes = _collect_provider_classes( + provider_module, provider_module_regex, provider_class_regex + ) + for provider_class in provider_classes: + for base_class in provider_class.__bases__: + base_parent_module = ".".join(base_class.__module__.split(".")[:-1]) + if base_parent_module == asf_api_module: + # find all functions on the provider class which are also defined in the super class and are not dunder functions + provider_functions = [ + method + for method in dir(provider_class) + if hasattr(base_class, method) + and isinstance(getattr(base_class, method), FunctionType) + and method.startswith("__") is False + ] + for provider_function in provider_functions: + results.append((provider_class, base_class, provider_function)) + return results + + +def check_provider_signature(sub_class: type, base_class: type, method_name: str) -> None: + """ + Checks if the signature of a given provider method is equal to the signature of the function with the same name on the base class. + + :param sub_class: provider class to check the given method's signature of + :param base_class: API class to check the given method's signature against + :param method_name: name of the method on the sub_class and base_class to compare + :raise: AssertionError if the two signatures are not equal + """ + try: + sub_function = getattr(sub_class, method_name) + except AttributeError: + raise AttributeError( + f"Given method name ('{method_name}') is not a method of the sub class ('{sub_class.__name__}')." + ) + + if not isinstance(sub_function, FunctionType): + raise AttributeError( + f"Given method name ('{method_name}') is not a method of the sub class ('{sub_class.__name__}')." + ) + + if not getattr(sub_function, "expand_parameters", True): + # if the operation on the subclass has the "expand_parameters" attribute (it has a handler decorator) set to False, we don't care + return + + if wrapped := getattr(sub_function, "__wrapped__", False): + # if the operation on the subclass has a decorator, unwrap it + sub_function = wrapped + + try: + base_function = getattr(base_class, method_name) + # unwrap from the handler decorator + base_function = base_function.__wrapped__ + + sub_spec = inspect.getfullargspec(sub_function) + base_spec = inspect.getfullargspec(base_function) + + error_msg = f"{sub_class.__name__}#{method_name} breaks with {base_class.__name__}#{method_name}. This can also be caused by 'from __future__ import annotations' in a provider file!" + + # Assert that the signature is correct + assert sub_spec.args == base_spec.args, error_msg + assert sub_spec.varargs == base_spec.varargs, error_msg + assert sub_spec.varkw == base_spec.varkw, error_msg + assert sub_spec.defaults == base_spec.defaults, ( + error_msg + f"\n{sub_spec.defaults} != {base_spec.defaults}" + ) + assert sub_spec.kwonlyargs == base_spec.kwonlyargs, error_msg + assert sub_spec.kwonlydefaults == base_spec.kwonlydefaults, error_msg + + # Assert that the typing of the implementation is equal to the base + for kwarg in sub_spec.annotations: + if kwarg == "return": + assert sub_spec.annotations[kwarg] == base_spec.annotations[kwarg] + else: + # The API currently marks everything as required, and optional args are configured as: + # arg: ArgType = None + # which is obviously incorrect. + # Implementations sometimes do this correctly: + # arg: ArgType | None = None + # These should be considered equal, so until the API is fixed, we remove any Optionals + # This also gives us the flexibility to correct the API without fixing all implementations at the same time + + if kwarg not in base_spec.annotations: + # Typically happens when the implementation uses '**kwargs: Any' + # This parameter is not part of the base spec, so we can't compare types + continue + + sub_type = _remove_optional(sub_spec.annotations[kwarg]) + base_type = _remove_optional(base_spec.annotations[kwarg]) + assert sub_type == base_type, ( + f"Types for {kwarg} are different - {sub_type} instead of {base_type}" + ) + + except AttributeError: + # the function is not defined in the superclass + pass + + +def _remove_optional(_type: type) -> list[type]: + if get_origin(_type) in [Union, UnionType]: + union_types = list(get_args(_type)) + try: + union_types.remove(NoneType) + except ValueError: + # Union of some other kind, like 'str | int' + pass + return union_types + return [_type] diff --git a/localstack/testing/aws/cloudformation_utils.py b/localstack-core/localstack/testing/aws/cloudformation_utils.py similarity index 100% rename from localstack/testing/aws/cloudformation_utils.py rename to localstack-core/localstack/testing/aws/cloudformation_utils.py diff --git a/localstack-core/localstack/testing/aws/eventbus_utils.py b/localstack-core/localstack/testing/aws/eventbus_utils.py new file mode 100644 index 0000000000000..5da33a9354fbf --- /dev/null +++ b/localstack-core/localstack/testing/aws/eventbus_utils.py @@ -0,0 +1,54 @@ +import json + +import requests + +from localstack import config +from localstack.testing.aws.util import is_aws_cloud +from localstack.utils.aws.client_types import TypedServiceClientFactory + + +def trigger_scheduled_rule(rule_arn: str): + """ + Call the internal /_aws/events/rules//trigger endpoint to expire the deadline of a rule and + trigger it ASAP. + + :param rule_arn: the rule to run + :raises ValueError: if the response return a >=400 code + """ + if is_aws_cloud(): + return + + url = config.internal_service_url() + f"/_aws/events/rules/{rule_arn}/trigger" + response = requests.get(url) + if not response.ok: + raise ValueError( + f"Error triggering rule {rule_arn}: {response.status_code},{response.text}" + ) + + +def allow_event_rule_to_sqs_queue( + aws_client: TypedServiceClientFactory, + sqs_queue_url: str, + sqs_queue_arn: str, + event_rule_arn: str, +): + """Creates an SQS Queue Policy that allows te given eventbus rule to write tho the given sqs queue.""" + return aws_client.sqs.set_queue_attributes( + QueueUrl=sqs_queue_url, + Attributes={ + "Policy": json.dumps( + { + "Statement": [ + { + "Sid": "AllowEventsToQueue", + "Effect": "Allow", + "Principal": {"Service": "events.amazonaws.com"}, + "Action": "sqs:SendMessage", + "Resource": sqs_queue_arn, + "Condition": {"ArnEquals": {"aws:SourceArn": event_rule_arn}}, + } + ] + } + ) + }, + ) diff --git a/localstack-core/localstack/testing/aws/lambda_utils.py b/localstack-core/localstack/testing/aws/lambda_utils.py new file mode 100644 index 0000000000000..764605f46962a --- /dev/null +++ b/localstack-core/localstack/testing/aws/lambda_utils.py @@ -0,0 +1,342 @@ +import itertools +import json +import logging +import os +import subprocess +import zipfile +from pathlib import Path +from typing import TYPE_CHECKING, Literal, Mapping, Optional, Sequence, overload + +from localstack import config +from localstack.services.lambda_.runtimes import RUNTIMES_AGGREGATED +from localstack.utils.files import load_file +from localstack.utils.platform import Arch, get_arch +from localstack.utils.strings import short_uid +from localstack.utils.sync import ShortCircuitWaitException, retry +from localstack.utils.testutil import get_lambda_log_events + +if TYPE_CHECKING: + from mypy_boto3_lambda import LambdaClient + from mypy_boto3_lambda.literals import ArchitectureType, PackageTypeType, RuntimeType + from mypy_boto3_lambda.type_defs import ( + DeadLetterConfigTypeDef, + EnvironmentTypeDef, + EphemeralStorageTypeDef, + FileSystemConfigTypeDef, + FunctionCodeTypeDef, + FunctionConfigurationResponseMetadataTypeDef, + ImageConfigTypeDef, + TracingConfigTypeDef, + VpcConfigTypeDef, + ) + +LOG = logging.getLogger(__name__) + +HANDLERS = { + **dict.fromkeys(RUNTIMES_AGGREGATED.get("nodejs"), "index.handler"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("python"), "handler.handler"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("java"), "echo.Handler"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("ruby"), "function.handler"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("dotnet"), "dotnet::Dotnet.Function::FunctionHandler"), + # The handler value does not matter unless the custom runtime reads it in some way, but it is a required field. + **dict.fromkeys(RUNTIMES_AGGREGATED.get("provided"), "function.handler"), +} + +PACKAGE_FOR_RUNTIME = { + **dict.fromkeys(RUNTIMES_AGGREGATED.get("nodejs"), "nodejs"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("python"), "python"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("java"), "java"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("ruby"), "ruby"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("dotnet"), "dotnet"), + **dict.fromkeys(RUNTIMES_AGGREGATED.get("provided"), "provided"), +} + + +def generate_tests(metafunc): + i = next(metafunc.definition.iter_markers("multiruntime"), None) + if not i: + return + if i.args: + raise ValueError("doofus") + + scenario = i.kwargs["scenario"] + runtimes = i.kwargs.get("runtimes") + if not runtimes: + runtimes = list(RUNTIMES_AGGREGATED.keys()) + ids = list( + itertools.chain.from_iterable( + RUNTIMES_AGGREGATED.get(runtime) or [runtime] for runtime in runtimes + ) + ) + arg_values = [(scenario, runtime, HANDLERS[runtime]) for runtime in ids] + + metafunc.parametrize( + argvalues=arg_values, + argnames="multiruntime_lambda", + indirect=True, + ids=ids, + ) + + +def package_for_lang(scenario: str, runtime: str, root_folder: Path) -> str: + """ + :param scenario: which scenario to run + :param runtime: which runtime to build + :param root_folder: The root folder for the scenarios + :return: path to built zip file + """ + runtime_folder = PACKAGE_FOR_RUNTIME[runtime] + + common_dir = root_folder / "functions" / "common" + scenario_dir = common_dir / scenario + runtime_dir_candidate = scenario_dir / runtime + generic_runtime_dir_candidate = scenario_dir / runtime_folder + + # if a more specific folder exists, use that one + # otherwise: try to fall back to generic runtime (e.g. python for python3.12) + if runtime_dir_candidate.exists() and runtime_dir_candidate.is_dir(): + runtime_dir = runtime_dir_candidate + else: + runtime_dir = generic_runtime_dir_candidate + + build_dir = runtime_dir / "build" + package_path = runtime_dir / "handler.zip" + + # caching step + # TODO: add invalidation (e.g. via storing a hash besides this of all files in src) + if os.path.exists(package_path) and os.path.isfile(package_path): + return package_path + + # packaging + # Use the default Lambda architecture x86_64 unless the ignore architecture flag is configured. + # This enables local testing of both architectures on multi-architecture platforms such as Apple Silicon machines. + architecture = "x86_64" + if config.LAMBDA_IGNORE_ARCHITECTURE: + architecture = "arm64" if get_arch() == Arch.arm64 else "x86_64" + build_cmd = ["make", "build", f"ARCHITECTURE={architecture}"] + LOG.debug( + "Building Lambda function for scenario %s and runtime %s using %s.", + scenario, + runtime, + " ".join(build_cmd), + ) + result = subprocess.run(build_cmd, cwd=runtime_dir) + if result.returncode != 0: + raise Exception( + f"Failed to build multiruntime {scenario=} for {runtime=} with error code: {result.returncode}" + ) + + # check again if the zip file is now present + if os.path.exists(package_path) and os.path.isfile(package_path): + return package_path + + # check something is in build now + target_empty = len(os.listdir(build_dir)) <= 0 + if target_empty: + raise Exception(f"Failed to build multiruntime {scenario=} for {runtime=} ") + + with zipfile.ZipFile(package_path, "w", strict_timestamps=True) as zf: + for root, dirs, files in os.walk(build_dir): + rel_dir = os.path.relpath(root, build_dir) + for f in files: + zf.write(os.path.join(root, f), arcname=os.path.join(rel_dir, f)) + + # make sure package file has been generated + assert package_path.exists() and package_path.is_file() + return package_path + + +class ParametrizedLambda: + lambda_client: "LambdaClient" + function_names: list[str] + scenario: str + runtime: str + handler: str + zip_file_path: str + role: str + + def __init__( + self, + lambda_client: "LambdaClient", + scenario: str, + runtime: str, + handler: str, + zip_file_path: str, + role: str, + ): + self.function_names = [] + self.lambda_client = lambda_client + self.scenario = scenario + self.runtime = runtime + self.handler = handler + self.zip_file_path = zip_file_path + self.role = role + + @overload + def create_function( + self, + *, + FunctionName: Optional[str] = None, + Role: Optional[str] = None, + Code: Optional["FunctionCodeTypeDef"] = None, + Runtime: Optional["RuntimeType"] = None, + Handler: Optional[str] = None, + Description: Optional[str] = None, + Timeout: Optional[int] = None, + MemorySize: Optional[int] = None, + Publish: Optional[bool] = None, + VpcConfig: Optional["VpcConfigTypeDef"] = None, + PackageType: Optional["PackageTypeType"] = None, + DeadLetterConfig: Optional["DeadLetterConfigTypeDef"] = None, + Environment: Optional["EnvironmentTypeDef"] = None, + KMSKeyArn: Optional[str] = None, + TracingConfig: Optional["TracingConfigTypeDef"] = None, + Tags: Optional[Mapping[str, str]] = None, + Layers: Optional[Sequence[str]] = None, + FileSystemConfigs: Optional[Sequence["FileSystemConfigTypeDef"]] = None, + ImageConfig: Optional["ImageConfigTypeDef"] = None, + CodeSigningConfigArn: Optional[str] = None, + Architectures: Optional[Sequence["ArchitectureType"]] = None, + EphemeralStorage: Optional["EphemeralStorageTypeDef"] = None, + ) -> "FunctionConfigurationResponseMetadataTypeDef": ... + + def create_function(self, **kwargs): + kwargs.setdefault("FunctionName", f"{self.scenario}-{short_uid()}") + kwargs.setdefault("Runtime", self.runtime) + kwargs.setdefault("Handler", self.handler) + kwargs.setdefault("Role", self.role) + kwargs.setdefault("Code", {"ZipFile": load_file(self.zip_file_path, mode="rb")}) + + def _create_function(): + return self.lambda_client.create_function(**kwargs) + + # @AWS, takes about 10s until the role/policy is "active", until then it will fail + # localstack should normally not require the retries and will just continue here + result = retry(_create_function, retries=3, sleep=4) + self.function_names.append(result["FunctionArn"]) + self.lambda_client.get_waiter("function_active_v2").wait( + FunctionName=kwargs.get("FunctionName") + ) + + return result + + def destroy(self): + for function_name in self.function_names: + try: + self.lambda_client.delete_function(FunctionName=function_name) + except Exception as e: + LOG.debug("Error deleting function %s: %s", function_name, e) + + +def update_done(client, function_name): + """wait fn for checking 'LastUpdateStatus' of lambda""" + + def _update_done(): + last_update_status = client.get_function_configuration(FunctionName=function_name)[ + "LastUpdateStatus" + ] + if last_update_status == "Failed": + raise ShortCircuitWaitException(f"Lambda Config update failed: {last_update_status=}") + else: + return last_update_status == "Successful" + + return _update_done + + +def concurrency_update_done(client, function_name, qualifier): + """wait fn for ProvisionedConcurrencyConfig 'Status'""" + + def _concurrency_update_done(): + status = client.get_provisioned_concurrency_config( + FunctionName=function_name, Qualifier=qualifier + )["Status"] + if status == "FAILED": + raise ShortCircuitWaitException(f"Concurrency update failed: {status=}") + else: + return status == "READY" + + return _concurrency_update_done + + +def get_invoke_init_type( + client, function_name, qualifier +) -> Literal["on-demand", "provisioned-concurrency"]: + """check the environment in the lambda for AWS_LAMBDA_INITIALIZATION_TYPE indicating ondemand/provisioned""" + invoke_result = client.invoke(FunctionName=function_name, Qualifier=qualifier) + return json.load(invoke_result["Payload"]) + + +lambda_role = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": "lambda.amazonaws.com"}, + "Action": "sts:AssumeRole", + } + ], +} +esm_lambda_permission = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:*", + "sns:*", + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:ListStreams", + "kinesis:DescribeStream", + "kinesis:DescribeStreamSummary", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:ListStreams", + "kinesis:SubscribeToShard", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "s3:ListBucket", + "s3:PutObject", + ], + "Resource": ["*"], + } + ], +} + + +def _await_event_source_mapping_state(lambda_client, uuid, state, retries=30): + def assert_mapping_disabled(): + assert lambda_client.get_event_source_mapping(UUID=uuid)["State"] == state + + retry(assert_mapping_disabled, sleep_before=2, retries=retries) + + +def _await_event_source_mapping_enabled(lambda_client, uuid, retries=30): + return _await_event_source_mapping_state( + lambda_client=lambda_client, uuid=uuid, retries=retries, state="Enabled" + ) + + +def _await_dynamodb_table_active(dynamodb_client, table_name, retries=6): + def assert_table_active(): + assert ( + dynamodb_client.describe_table(TableName=table_name)["Table"]["TableStatus"] == "ACTIVE" + ) + + retry(assert_table_active, retries=retries, sleep_before=2) + + +def _get_lambda_invocation_events(logs_client, function_name, expected_num_events, retries=30): + def get_events(): + events = get_lambda_log_events(function_name, logs_client=logs_client) + assert len(events) == expected_num_events + return events + + return retry(get_events, retries=retries, sleep_before=5, sleep=5) + + +def is_docker_runtime_executor(): + return config.LAMBDA_RUNTIME_EXECUTOR in ["docker", ""] diff --git a/localstack-core/localstack/testing/aws/util.py b/localstack-core/localstack/testing/aws/util.py new file mode 100644 index 0000000000000..2fadd02b9b257 --- /dev/null +++ b/localstack-core/localstack/testing/aws/util.py @@ -0,0 +1,247 @@ +import functools +import os +from typing import Callable, Dict, TypeVar + +import boto3 +import botocore +from botocore.awsrequest import AWSPreparedRequest, AWSResponse +from botocore.client import BaseClient +from botocore.compat import HTTPHeaders +from botocore.config import Config +from botocore.exceptions import ClientError + +from localstack import config +from localstack.aws.api import RequestContext +from localstack.aws.connect import ( + ClientFactory, + ExternalAwsClientFactory, + ExternalClientFactory, + ServiceLevelClientFactory, +) +from localstack.aws.forwarder import create_http_request +from localstack.aws.protocol.parser import create_parser +from localstack.aws.spec import LOCALSTACK_BUILTIN_DATA_PATH, load_service +from localstack.config import is_env_true +from localstack.testing.config import ( + SECONDARY_TEST_AWS_ACCESS_KEY_ID, + SECONDARY_TEST_AWS_PROFILE, + SECONDARY_TEST_AWS_SECRET_ACCESS_KEY, + SECONDARY_TEST_AWS_SESSION_TOKEN, + TEST_AWS_ACCESS_KEY_ID, + TEST_AWS_REGION_NAME, + TEST_AWS_SECRET_ACCESS_KEY, +) +from localstack.utils.aws.arns import get_partition +from localstack.utils.aws.request_context import get_account_id_from_request +from localstack.utils.sync import poll_condition + + +def is_aws_cloud() -> bool: + return os.environ.get("TEST_TARGET", "") == "AWS_CLOUD" + + +def in_default_partition() -> bool: + return is_aws_cloud() or get_partition(TEST_AWS_REGION_NAME) == "aws" + + +def get_lambda_logs(func_name, logs_client): + log_group_name = f"/aws/lambda/{func_name}" + streams = logs_client.describe_log_streams(logGroupName=log_group_name)["logStreams"] + streams = sorted(streams, key=lambda x: x["creationTime"], reverse=True) + log_events = logs_client.get_log_events( + logGroupName=log_group_name, logStreamName=streams[0]["logStreamName"] + )["events"] + return log_events + + +def bucket_exists(client, bucket_name: str) -> bool: + buckets = client.list_buckets() + for bucket in buckets["Buckets"]: + if bucket["Name"] == bucket_name: + return True + return False + + +def wait_for_user(keys, region_name: str): + sts_client = create_client_with_keys(service="sts", keys=keys, region_name=region_name) + + def is_user_ready(): + try: + sts_client.get_caller_identity() + return True + except ClientError as e: + if e.response["Error"]["Code"] == "InvalidClientTokenId": + return False + return True + + # wait until the given user is ready, takes AWS IAM a while... + poll_condition(is_user_ready, interval=5, timeout=20) + + +def create_client_with_keys( + service: str, + keys: Dict[str, str], + region_name: str, + client_config: Config = None, +): + """ + Create a boto client with the given access key, targeted against LS per default, but to AWS if TEST_TARGET is set + accordingly. + + :param service: Service to create the Client for + :param keys: Access Keys + :param region_name: Region for the client + :param client_config: + :return: + """ + return boto3.client( + service, + region_name=region_name, + aws_access_key_id=keys["AccessKeyId"], + aws_secret_access_key=keys["SecretAccessKey"], + aws_session_token=keys.get("SessionToken"), + config=client_config, + endpoint_url=config.internal_service_url() if not is_aws_cloud() else None, + ) + + +def create_request_context( + service_name: str, operation_name: str, region: str, aws_request: AWSPreparedRequest +) -> RequestContext: + if hasattr(aws_request.body, "read"): + aws_request.body = aws_request.body.read() + request = create_http_request(aws_request) + + context = RequestContext(request=request) + context.service = load_service(service_name) + context.operation = context.service.operation_model(operation_name=operation_name) + context.region = region + parser = create_parser(context.service) + _, instance = parser.parse(context.request) + context.service_request = instance + context.account_id = get_account_id_from_request(context.request) + return context + + +class _RequestContextClient: + _client: BaseClient + + def __init__(self, client: BaseClient): + self._client = client + + def __getattr__(self, item): + target = getattr(self._client, item) + if not isinstance(target, Callable): + return target + + @functools.wraps(target) + def wrapper_method(*args, **kwargs): + service_name = self._client.meta.service_model.service_name + operation_name = self._client.meta.method_to_api_mapping[item] + region = self._client.meta.region_name + prepared_request = None + + def event_handler(request: AWSPreparedRequest, **_): + nonlocal prepared_request + prepared_request = request + # we need to return an AWS Response here + aws_response = AWSResponse( + url=request.url, status_code=200, headers=HTTPHeaders(), raw=None + ) + aws_response._content = b"" + return aws_response + + self._client.meta.events.register( + f"before-send.{service_name}.{operation_name}", handler=event_handler + ) + try: + target(*args, **kwargs) + except Exception: + pass + self._client.meta.events.unregister( + f"before-send.{service_name}.{operation_name}", handler=event_handler + ) + + return create_request_context( + service_name=service_name, + operation_name=operation_name, + region=region, + aws_request=prepared_request, + ) + + return wrapper_method + + +T = TypeVar("T", bound=BaseClient) + + +def RequestContextClient(client: T) -> T: + return _RequestContextClient(client) # noqa + + +# Used for the aws_session, aws_client_factory and aws_client pytest fixtures +# Supports test executions against both LocalStack and production AWS + +# TODO: Add the ability to use config profiles for primary and secondary clients +# See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-a-configuration-file + + +def base_aws_session() -> boto3.Session: + # When running against AWS, initial credentials must be read from environment or config file + if is_aws_cloud(): + return boto3.Session() + + # Otherwise, when running against LS, use primary test credentials to start with + # This set here in the session so that both `aws_client` and `aws_client_factory` can work without explicit creds. + session = boto3.Session( + aws_access_key_id=TEST_AWS_ACCESS_KEY_ID, + aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY, + ) + # make sure we consider our custom data paths for legacy specs (like SQS query protocol) + session._loader.search_paths.insert(0, LOCALSTACK_BUILTIN_DATA_PATH) + return session + + +def secondary_aws_session() -> boto3.Session: + if is_aws_cloud() and SECONDARY_TEST_AWS_PROFILE: + return boto3.Session(profile_name=SECONDARY_TEST_AWS_PROFILE) + + # Otherwise, when running against LS or AWS, but have no profile set for the secondary account, + # we use secondary test credentials to initialize the session. + # This set here in the session so that both `secondary_aws_client` and `secondary_aws_client_factory` can work + # without explicit creds. + session = boto3.Session( + aws_access_key_id=SECONDARY_TEST_AWS_ACCESS_KEY_ID, + aws_secret_access_key=SECONDARY_TEST_AWS_SECRET_ACCESS_KEY, + aws_session_token=SECONDARY_TEST_AWS_SESSION_TOKEN, + ) + if not is_aws_cloud(): + # make sure we consider our custom data paths for legacy specs (like SQS query protocol), only if we run against + # LocalStack + session._loader.search_paths.append(LOCALSTACK_BUILTIN_DATA_PATH) + return session + + +def base_aws_client_factory(session: boto3.Session) -> ClientFactory: + config = None + if is_env_true("TEST_DISABLE_RETRIES_AND_TIMEOUTS"): + config = botocore.config.Config( + connect_timeout=1_000, + read_timeout=1_000, + retries={"total_max_attempts": 1}, + ) + + if is_aws_cloud(): + return ExternalAwsClientFactory(session=session, config=config) + else: + if not config: + config = botocore.config.Config() + + # Prevent this fixture from using the region configured in system config + config = config.merge(botocore.config.Config(region_name=TEST_AWS_REGION_NAME)) + return ExternalClientFactory(session=session, config=config) + + +def base_testing_aws_client(client_factory: ClientFactory) -> ServiceLevelClientFactory: + # Primary test credentials are already set in the boto3 session, so they're not set here again + return client_factory() diff --git a/localstack-core/localstack/testing/config.py b/localstack-core/localstack/testing/config.py new file mode 100644 index 0000000000000..f6191e9faa977 --- /dev/null +++ b/localstack-core/localstack/testing/config.py @@ -0,0 +1,20 @@ +import os + +from localstack.constants import DEFAULT_AWS_ACCOUNT_ID + +# Credentials used in the test suite +# These can be overridden if the tests are being run against AWS +TEST_AWS_ACCOUNT_ID = os.getenv("TEST_AWS_ACCOUNT_ID") or DEFAULT_AWS_ACCOUNT_ID +# If a structured access key ID is used, it must correspond to the account ID +TEST_AWS_ACCESS_KEY_ID = os.getenv("TEST_AWS_ACCESS_KEY_ID") or "test" +TEST_AWS_SECRET_ACCESS_KEY = os.getenv("TEST_AWS_SECRET_ACCESS_KEY") or "test" +TEST_AWS_REGION_NAME = os.getenv("TEST_AWS_REGION_NAME") or "us-east-1" + +# Secondary test AWS profile - only used for testing against AWS +SECONDARY_TEST_AWS_PROFILE = os.getenv("SECONDARY_TEST_AWS_PROFILE") +# Additional credentials used in the test suite (when running cross-account tests) +SECONDARY_TEST_AWS_ACCOUNT_ID = os.getenv("SECONDARY_TEST_AWS_ACCOUNT_ID") or "000000000002" +SECONDARY_TEST_AWS_ACCESS_KEY_ID = os.getenv("SECONDARY_TEST_AWS_ACCESS_KEY_ID") or "000000000002" +SECONDARY_TEST_AWS_SECRET_ACCESS_KEY = os.getenv("SECONDARY_TEST_AWS_SECRET_ACCESS_KEY") or "test2" +SECONDARY_TEST_AWS_SESSION_TOKEN = os.getenv("SECONDARY_TEST_AWS_SESSION_TOKEN") +SECONDARY_TEST_AWS_REGION_NAME = os.getenv("SECONDARY_TEST_AWS_REGION_NAME") or "ap-southeast-1" diff --git a/localstack-core/localstack/testing/pytest/__init__.py b/localstack-core/localstack/testing/pytest/__init__.py new file mode 100644 index 0000000000000..a92cec4e13581 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/__init__.py @@ -0,0 +1,3 @@ +from localstack.testing.pytest.marking import Markers + +markers = Markers diff --git a/localstack-core/localstack/testing/pytest/bootstrap.py b/localstack-core/localstack/testing/pytest/bootstrap.py new file mode 100644 index 0000000000000..f9ead8e2d2fc0 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/bootstrap.py @@ -0,0 +1,8 @@ +import pytest + +from localstack import config + + +@pytest.fixture(scope="session", autouse=True) +def setup_host_config_dirs(): + config.dirs.mkdirs() diff --git a/localstack-core/localstack/testing/pytest/cloudformation/__init__.py b/localstack-core/localstack/testing/pytest/cloudformation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py b/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py new file mode 100644 index 0000000000000..99ce1673259a5 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/cloudformation/fixtures.py @@ -0,0 +1,181 @@ +import json +from collections import defaultdict +from typing import Callable + +import pytest + +from localstack.aws.api.cloudformation import DescribeChangeSetOutput, StackEvent +from localstack.aws.connect import ServiceLevelClientFactory +from localstack.utils.functions import call_safe +from localstack.utils.strings import short_uid + +PerResourceStackEvents = dict[str, list[StackEvent]] + + +@pytest.fixture +def capture_per_resource_events( + aws_client: ServiceLevelClientFactory, +) -> Callable[[str], PerResourceStackEvents]: + def capture(stack_name: str) -> PerResourceStackEvents: + events = aws_client.cloudformation.describe_stack_events(StackName=stack_name)[ + "StackEvents" + ] + per_resource_events = defaultdict(list) + for event in events: + if logical_resource_id := event.get("LogicalResourceId"): + per_resource_events[logical_resource_id].append(event) + return per_resource_events + + return capture + + +def _normalise_describe_change_set_output(value: DescribeChangeSetOutput) -> None: + value.get("Changes", list()).sort( + key=lambda change: change.get("ResourceChange", dict()).get("LogicalResourceId", str()) + ) + + +@pytest.fixture +def capture_update_process(aws_client_no_retry, cleanups, capture_per_resource_events): + """ + Fixture to deploy a new stack (via creating and executing a change set), then updating the + stack with a second template (via creating and executing a change set). + """ + + stack_name = f"stack-{short_uid()}" + change_set_name = f"cs-{short_uid()}" + + def inner( + snapshot, t1: dict | str, t2: dict | str, p1: dict | None = None, p2: dict | None = None + ): + snapshot.add_transformer(snapshot.transform.cloudformation_api()) + + if isinstance(t1, dict): + t1 = json.dumps(t1) + elif isinstance(t1, str): + with open(t1) as infile: + t1 = infile.read() + if isinstance(t2, dict): + t2 = json.dumps(t2) + elif isinstance(t2, str): + with open(t2) as infile: + t2 = infile.read() + + p1 = p1 or {} + p2 = p2 or {} + + # deploy original stack + change_set_details = aws_client_no_retry.cloudformation.create_change_set( + StackName=stack_name, + ChangeSetName=change_set_name, + TemplateBody=t1, + ChangeSetType="CREATE", + Parameters=[{"ParameterKey": k, "ParameterValue": v} for (k, v) in p1.items()], + ) + snapshot.match("create-change-set-1", change_set_details) + stack_id = change_set_details["StackId"] + change_set_id = change_set_details["Id"] + aws_client_no_retry.cloudformation.get_waiter("change_set_create_complete").wait( + ChangeSetName=change_set_id + ) + cleanups.append( + lambda: call_safe( + aws_client_no_retry.cloudformation.delete_change_set, + kwargs=dict(ChangeSetName=change_set_id), + ) + ) + + describe_change_set_with_prop_values = ( + aws_client_no_retry.cloudformation.describe_change_set( + ChangeSetName=change_set_id, IncludePropertyValues=True + ) + ) + _normalise_describe_change_set_output(describe_change_set_with_prop_values) + snapshot.match("describe-change-set-1-prop-values", describe_change_set_with_prop_values) + + describe_change_set_without_prop_values = ( + aws_client_no_retry.cloudformation.describe_change_set( + ChangeSetName=change_set_id, IncludePropertyValues=False + ) + ) + _normalise_describe_change_set_output(describe_change_set_without_prop_values) + snapshot.match("describe-change-set-1", describe_change_set_without_prop_values) + + execute_results = aws_client_no_retry.cloudformation.execute_change_set( + ChangeSetName=change_set_id + ) + snapshot.match("execute-change-set-1", execute_results) + aws_client_no_retry.cloudformation.get_waiter("stack_create_complete").wait( + StackName=stack_id + ) + + # ensure stack deletion + cleanups.append( + lambda: call_safe( + aws_client_no_retry.cloudformation.delete_stack, kwargs=dict(StackName=stack_id) + ) + ) + + describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][ + 0 + ] + snapshot.match("post-create-1-describe", describe) + + # update stack + change_set_details = aws_client_no_retry.cloudformation.create_change_set( + StackName=stack_name, + ChangeSetName=change_set_name, + TemplateBody=t2, + ChangeSetType="UPDATE", + Parameters=[{"ParameterKey": k, "ParameterValue": v} for (k, v) in p2.items()], + ) + snapshot.match("create-change-set-2", change_set_details) + stack_id = change_set_details["StackId"] + change_set_id = change_set_details["Id"] + aws_client_no_retry.cloudformation.get_waiter("change_set_create_complete").wait( + ChangeSetName=change_set_id + ) + + describe_change_set_with_prop_values = ( + aws_client_no_retry.cloudformation.describe_change_set( + ChangeSetName=change_set_id, IncludePropertyValues=True + ) + ) + _normalise_describe_change_set_output(describe_change_set_with_prop_values) + snapshot.match("describe-change-set-2-prop-values", describe_change_set_with_prop_values) + + describe_change_set_without_prop_values = ( + aws_client_no_retry.cloudformation.describe_change_set( + ChangeSetName=change_set_id, IncludePropertyValues=False + ) + ) + _normalise_describe_change_set_output(describe_change_set_without_prop_values) + snapshot.match("describe-change-set-2", describe_change_set_without_prop_values) + + execute_results = aws_client_no_retry.cloudformation.execute_change_set( + ChangeSetName=change_set_id + ) + snapshot.match("execute-change-set-2", execute_results) + aws_client_no_retry.cloudformation.get_waiter("stack_update_complete").wait( + StackName=stack_id + ) + + describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][ + 0 + ] + snapshot.match("post-create-2-describe", describe) + + events = capture_per_resource_events(stack_name) + snapshot.match("per-resource-events", events) + + # delete stack + aws_client_no_retry.cloudformation.delete_stack(StackName=stack_id) + aws_client_no_retry.cloudformation.get_waiter("stack_delete_complete").wait( + StackName=stack_id + ) + describe = aws_client_no_retry.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][ + 0 + ] + snapshot.match("delete-describe", describe) + + yield inner diff --git a/localstack-core/localstack/testing/pytest/container.py b/localstack-core/localstack/testing/pytest/container.py new file mode 100644 index 0000000000000..fd904f6a86233 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/container.py @@ -0,0 +1,278 @@ +import logging +import os +import shlex +import threading +from typing import Callable, Generator, List, Optional + +import pytest + +from localstack import constants +from localstack.utils.bootstrap import Container, RunningContainer, get_docker_image_to_start +from localstack.utils.container_utils.container_client import ( + CancellableStream, + ContainerConfiguration, + ContainerConfigurator, + NoSuchNetwork, + PortMappings, + VolumeMappings, +) +from localstack.utils.docker_utils import DOCKER_CLIENT +from localstack.utils.strings import short_uid +from localstack.utils.sync import poll_condition + +LOG = logging.getLogger(__name__) + +ENV_TEST_CONTAINER_MOUNT_SOURCES = "TEST_CONTAINER_MOUNT_SOURCES" +"""Environment variable used to indicate that we should mount LocalStack source files into the container.""" + +ENV_TEST_CONTAINER_MOUNT_DEPENDENCIES = "TEST_CONTAINER_MOUNT_DEPENDENCIES" +"""Environment variable used to indicate that we should mount dependencies into the container.""" + + +class ContainerFactory: + def __init__(self): + self._containers: List[Container] = [] + + def __call__( + self, + # convenience properties + pro: bool = False, + publish: Optional[List[int]] = None, + configurators: Optional[List[ContainerConfigurator]] = None, + # ContainerConfig properties + **kwargs, + ) -> Container: + port_configuration = PortMappings() + if publish: + for port in publish: + port_configuration.add(port) + + container_configuration = ContainerConfiguration( + image_name=get_docker_image_to_start(), + name=None, + volumes=VolumeMappings(), + remove=True, + ports=port_configuration, + entrypoint=os.environ.get("ENTRYPOINT"), + command=shlex.split(os.environ.get("CMD", "")) or None, + env_vars={}, + ) + + # handle the convenience options + if pro: + container_configuration.env_vars["GATEWAY_LISTEN"] = "0.0.0.0:4566,0.0.0.0:443" + container_configuration.env_vars["LOCALSTACK_AUTH_TOKEN"] = os.environ.get( + "LOCALSTACK_AUTH_TOKEN", "test" + ) + + # override values from kwargs + for key, value in kwargs.items(): + setattr(container_configuration, key, value) + + container = Container(container_configuration) + + if configurators: + container.configure(configurators) + + # track the container so we can remove it later + self._containers.append(container) + return container + + def remove_all_containers(self): + failures = [] + for container in self._containers: + if not container.running_container: + # container is not running + continue + + try: + container.running_container.shutdown() + except Exception as e: + failures.append((container, e)) + + if failures: + for container, ex in failures: + LOG.error( + "Failed to remove container %s", + container.running_container.id, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + +class LogStreamFactory: + def __init__(self): + self.streams: list[CancellableStream] = [] + self.stop_events: list[threading.Event] = [] + self.mutex = threading.RLock() + + def __call__(self, container: Container, callback: Callable[[str], None] = None) -> None: + """ + Create and start a new log stream thread. The thread starts immediately and waits for the container + to move into a running state. Once it's running, it will attempt to stream the container logs. If + the container is already closed by then, an exception will be raised in the thread and it will + terminate. + + :param container: the container to stream the logs from + :param callback: an optional callback called on each log line. + """ + stop = threading.Event() + self.stop_events.append(stop) + + def _can_continue(): + if stop.is_set(): + return True + if not container.running_container: + return False + return container.running_container.is_running() + + def _run_stream_container_logs(): + # wait until either the container is running or the test was terminated + poll_condition(_can_continue) + with self.mutex: + if stop.is_set(): + return + + stream = container.running_container.stream_logs() + self.streams.append(stream) + + # create a default logger + if callback is None: + log = logging.getLogger(f"container.{container.running_container.name}") + log.setLevel(level=logging.DEBUG) + _callback = log.debug + else: + _callback = callback + + for line in stream: + _callback(line.decode("utf-8").rstrip(os.linesep)) + + t = threading.Thread( + target=_run_stream_container_logs, + name=threading._newname("log-stream-%d"), + daemon=True, + ) + t.start() + + def close(self): + with self.mutex: + for _event in self.stop_events: + _event.set() + + for _stream in self.streams: + _stream.close() + + +@pytest.fixture +def container_factory() -> Generator[ContainerFactory, None, None]: + factory = ContainerFactory() + yield factory + factory.remove_all_containers() + + +@pytest.fixture(scope="session") +def wait_for_localstack_ready(): + def _wait_for(container: RunningContainer, timeout: Optional[float] = None): + container.wait_until_ready(timeout) + + poll_condition( + lambda: constants.READY_MARKER_OUTPUT in container.get_logs().splitlines(), + timeout=timeout, + ) + + return _wait_for + + +@pytest.fixture +def ensure_network(): + networks = [] + + def _ensure_network(name: str): + try: + DOCKER_CLIENT.inspect_network(name) + except NoSuchNetwork: + DOCKER_CLIENT.create_network(name) + networks.append(name) + + yield _ensure_network + + for network_name in networks: + # detach attached containers + details = DOCKER_CLIENT.inspect_network(network_name) + for container_id in details.get("Containers", []): + DOCKER_CLIENT.disconnect_container_from_network( + network_name=network_name, container_name_or_id=container_id + ) + DOCKER_CLIENT.delete_network(network_name) + + +@pytest.fixture +def docker_network(ensure_network): + network_name = f"net-{short_uid()}" + ensure_network(network_name) + return network_name + + +@pytest.fixture +def dns_query_from_container(container_factory: ContainerFactory, monkeypatch): + """ + Run the LocalStack container after installing dig + """ + containers: list[RunningContainer] = [] + + def query(name: str, ip_address: str, port: int = 53, **kwargs) -> tuple[bytes, bytes]: + container = container_factory( + image_name="localstack/localstack", + command=["infinity"], + entrypoint="sleep", + **kwargs, + ) + running_container = container.start() + containers.append(running_container) + + command = [ + "bash", + "-c", + f"apt-get install -y --no-install-recommends dnsutils >/dev/null && dig +short @{ip_address} -p {port} {name}", + ] + # The CmdDockerClient has its output set to a logfile. We must patch + # the client to ensure the output of the command goes to stdout. We use + # a monkeypatch.context here to make sure the scope of the patching is + # minimal. + with monkeypatch.context() as m: + m.setattr(running_container.container_client, "default_run_outfile", None) + stdout, stderr = running_container.exec_in_container(command=command) + return stdout, stderr + + yield query + + for container in containers: + container.shutdown() + + +@pytest.fixture +def stream_container_logs() -> Generator[LogStreamFactory, None, None]: + """ + Factory fixture for streaming logs of containers in the background. Invoke as follows:: + + def test_container(container_factory, stream_container_logs): + container: Container = container_factory(...) + + with container.start() as running_container: + stream_container_logs(container) + + This will start a background thread that streams the container logs to a python logger + ``containers.``. You can find it in the logs as:: + + 2023-09-03T18:49:06.236 DEBUG --- [log-stream-1] container.localstack-5a4c3678 : foobar + 2023-09-03T18:49:06.236 DEBUG --- [log-stream-1] container.localstack-5a4c3678 : hello world + + The function ``stream_container_logs`` also accepts a ``callback`` argument that can be used to + overwrite the default logging mechanism. For example, to print every log line directly to stdout, call:: + + stream_container_logs(container, callback=print) + + :return: a factory to start log streams + """ + factory = LogStreamFactory() + yield factory + factory.close() diff --git a/localstack/testing/pytest/detect_thread_leakage.py b/localstack-core/localstack/testing/pytest/detect_thread_leakage.py similarity index 89% rename from localstack/testing/pytest/detect_thread_leakage.py rename to localstack-core/localstack/testing/pytest/detect_thread_leakage.py index a2a3d380a9ce1..b36ffc7260e63 100644 --- a/localstack/testing/pytest/detect_thread_leakage.py +++ b/localstack-core/localstack/testing/pytest/detect_thread_leakage.py @@ -23,8 +23,10 @@ def pytest_unconfigure(config): "line_no": frame.f_code.co_firstlineno, "frame_traceback": traceback.format_stack(frame), "thread_name": thread.name, - "thread_target": repr(thread._target), - "thread_target_file": inspect.getfile(thread._target) if thread._target else None, + "thread_target": repr(thread._target) if hasattr(thread, "_target") else None, + "thread_target_file": inspect.getfile(thread._target) + if hasattr(thread, "_target") and thread._target + else None, } for frame, thread in thread_frames if frame diff --git a/localstack/testing/pytest/filters.py b/localstack-core/localstack/testing/pytest/filters.py similarity index 81% rename from localstack/testing/pytest/filters.py rename to localstack-core/localstack/testing/pytest/filters.py index 9c005a1c014c3..2e7f0a8d0a780 100644 --- a/localstack/testing/pytest/filters.py +++ b/localstack-core/localstack/testing/pytest/filters.py @@ -1,3 +1,5 @@ +from typing import List + import pytest from _pytest.config import Config, PytestPluginManager from _pytest.config.argparsing import Parser @@ -11,12 +13,12 @@ def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager): @pytest.hookimpl -def pytest_collection_modifyitems(session: Session, config: Config, items: list[Item]): - ff = config.getoption("--filter-fixtures") - if ff: +def pytest_collection_modifyitems(session: Session, config: Config, items: List[Item]): + filter_fixtures_option = config.getoption("--filter-fixtures") + if filter_fixtures_option: # TODO: add more sophisticated combinations (=> like pytest -m and -k) # currently this is implemented in a way that any overlap between the fixture names will lead to selection - filter_fixtures = set(ff.split(",")) + filter_fixtures = set(filter_fixtures_option.split(",")) selected = [] deselected = [] for item in items: diff --git a/localstack/testing/pytest/find_orphaned_snapshots.py b/localstack-core/localstack/testing/pytest/find_orphaned_snapshots.py similarity index 99% rename from localstack/testing/pytest/find_orphaned_snapshots.py rename to localstack-core/localstack/testing/pytest/find_orphaned_snapshots.py index 98322d604ccef..349f70edfc1cc 100644 --- a/localstack/testing/pytest/find_orphaned_snapshots.py +++ b/localstack-core/localstack/testing/pytest/find_orphaned_snapshots.py @@ -21,7 +21,6 @@ def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager): @pytest.hookimpl def pytest_collection_modifyitems(session: Session, config: Config, items: list[Item]): - # for each file load the corresponding snapshot file ff = config.getoption("--filter-fixtures") diff --git a/localstack/testing/pytest/fixture_conflicts.py b/localstack-core/localstack/testing/pytest/fixture_conflicts.py similarity index 98% rename from localstack/testing/pytest/fixture_conflicts.py rename to localstack-core/localstack/testing/pytest/fixture_conflicts.py index 7a5533318d521..8ecb880d28110 100644 --- a/localstack/testing/pytest/fixture_conflicts.py +++ b/localstack-core/localstack/testing/pytest/fixture_conflicts.py @@ -32,6 +32,6 @@ def pytest_runtest_setup(item: Item): # the fixture names only include a single entry even when multiple definitions are found # so we need to check the internal name2fixturedefs dict instead defs = item._fixtureinfo.name2fixturedefs - multi_defs = [k for k, v in defs.items() if len(v) > 1] + multi_defs = [k for k, v in defs.items() if len(v) > 1 and "snapshot" not in k] if multi_defs: pytest.exit(f"Aborting. Detected multiple defs for fixtures: {multi_defs}") diff --git a/localstack-core/localstack/testing/pytest/fixtures.py b/localstack-core/localstack/testing/pytest/fixtures.py new file mode 100644 index 0000000000000..5c282ea8fcbc5 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/fixtures.py @@ -0,0 +1,2639 @@ +import contextlib +import dataclasses +import json +import logging +import os +import re +import textwrap +import time +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple + +import botocore.auth +import botocore.config +import botocore.credentials +import botocore.session +import pytest +from _pytest.config import Config +from _pytest.nodes import Item +from botocore.exceptions import ClientError +from botocore.regions import EndpointResolver +from pytest_httpserver import HTTPServer +from werkzeug import Request, Response + +from localstack import config +from localstack.aws.api.ec2 import CreateSecurityGroupRequest +from localstack.aws.connect import ServiceLevelClientFactory +from localstack.services.stores import ( + AccountRegionBundle, + BaseStore, + CrossAccountAttribute, + CrossRegionAttribute, + LocalAttribute, +) +from localstack.testing.aws.cloudformation_utils import load_template_file, render_template +from localstack.testing.aws.util import get_lambda_logs, is_aws_cloud +from localstack.testing.config import ( + SECONDARY_TEST_AWS_ACCOUNT_ID, + SECONDARY_TEST_AWS_REGION_NAME, + TEST_AWS_ACCOUNT_ID, + TEST_AWS_REGION_NAME, +) +from localstack.utils import testutil +from localstack.utils.aws.arns import get_partition +from localstack.utils.aws.client import SigningHttpClient +from localstack.utils.aws.resources import create_dynamodb_table +from localstack.utils.bootstrap import is_api_enabled +from localstack.utils.collections import ensure_list, select_from_typed_dict +from localstack.utils.functions import call_safe, run_safe +from localstack.utils.http import safe_requests as requests +from localstack.utils.id_generator import ResourceIdentifier, localstack_id_manager +from localstack.utils.json import CustomEncoder, json_safe +from localstack.utils.net import wait_for_port_open +from localstack.utils.strings import short_uid, to_str +from localstack.utils.sync import ShortCircuitWaitException, poll_condition, retry, wait_until + +LOG = logging.getLogger(__name__) + +# URL of public HTTP echo server, used primarily for AWS parity/snapshot testing +PUBLIC_HTTP_ECHO_SERVER_URL = "http://httpbin.org" + +WAITER_CHANGE_SET_CREATE_COMPLETE = "change_set_create_complete" +WAITER_STACK_CREATE_COMPLETE = "stack_create_complete" +WAITER_STACK_UPDATE_COMPLETE = "stack_update_complete" +WAITER_STACK_DELETE_COMPLETE = "stack_delete_complete" + + +if TYPE_CHECKING: + from mypy_boto3_sqs import SQSClient + from mypy_boto3_sqs.type_defs import MessageTypeDef + + +@pytest.fixture(scope="session") +def aws_client_no_retry(aws_client_factory): + """ + This fixture can be used to obtain Boto clients with disabled retries for testing. + botocore docs: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#configuring-a-retry-mode + + Use this client when testing exceptions (i.e., with pytest.raises(...)) or expected errors (e.g., status code 500) + to avoid unnecessary retries and mitigate test flakiness if the tested error condition is time-bound. + + This client is needed for the following errors, exceptions, and HTTP status codes defined by the legacy retry mode: + https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#legacy-retry-mode + General socket/connection errors: + * ConnectionError + * ConnectionClosedError + * ReadTimeoutError + * EndpointConnectionError + + Service-side throttling/limit errors and exceptions: + * Throttling + * ThrottlingException + * ThrottledException + * RequestThrottledException + * ProvisionedThroughputExceededException + + HTTP status codes: 429, 500, 502, 503, 504, and 509 + + Hence, this client is not needed for a `ResourceNotFound` error (but it doesn't harm). + """ + no_retry_config = botocore.config.Config(retries={"max_attempts": 1}) + return aws_client_factory(config=no_retry_config) + + +@pytest.fixture(scope="class") +def aws_http_client_factory(aws_session): + """ + Returns a factory for creating new ``SigningHttpClient`` instances using a configurable botocore request signer. + The default signer is a SigV4QueryAuth. The credentials are extracted from the ``boto3_sessions`` fixture that + transparently uses your global profile when TEST_TARGET=AWS_CLOUD, or test credentials when running against + LocalStack. + + Example invocations + + client = aws_signing_http_client_factory("sqs") + client.get("http://localhost:4566/000000000000/my-queue") + + or + client = aws_signing_http_client_factory("dynamodb", signer_factory=SigV4Auth) + client.post("...") + """ + + def factory( + service: str, + region: str = None, + signer_factory: Callable[ + [botocore.credentials.Credentials, str, str], botocore.auth.BaseSigner + ] = botocore.auth.SigV4QueryAuth, + endpoint_url: str = None, + aws_access_key_id: str = None, + aws_secret_access_key: str = None, + ): + region = region or TEST_AWS_REGION_NAME + + if aws_access_key_id or aws_secret_access_key: + credentials = botocore.credentials.Credentials( + access_key=aws_access_key_id, secret_key=aws_secret_access_key + ) + else: + credentials = aws_session.get_credentials() + + creds = credentials.get_frozen_credentials() + + if not endpoint_url: + if is_aws_cloud(): + # FIXME: this is a bit raw. we should probably re-use boto in a better way + resolver: EndpointResolver = aws_session._session.get_component("endpoint_resolver") + endpoint_url = "https://" + resolver.construct_endpoint(service, region)["hostname"] + else: + endpoint_url = config.internal_service_url() + + return SigningHttpClient(signer_factory(creds, service, region), endpoint_url=endpoint_url) + + return factory + + +@pytest.fixture(scope="class") +def s3_vhost_client(aws_client_factory, region_name): + return aws_client_factory( + config=botocore.config.Config(s3={"addressing_style": "virtual"}), region_name=region_name + ).s3 + + +@pytest.fixture +def dynamodb_wait_for_table_active(aws_client): + def wait_for_table_active(table_name: str, client=None): + def wait(): + return (client or aws_client.dynamodb).describe_table(TableName=table_name)["Table"][ + "TableStatus" + ] == "ACTIVE" + + poll_condition(wait, timeout=30) + + return wait_for_table_active + + +@pytest.fixture +def dynamodb_create_table_with_parameters(dynamodb_wait_for_table_active, aws_client): + tables = [] + + def factory(**kwargs): + if "TableName" not in kwargs: + kwargs["TableName"] = f"test-table-{short_uid()}" + + tables.append(kwargs["TableName"]) + response = aws_client.dynamodb.create_table(**kwargs) + dynamodb_wait_for_table_active(kwargs["TableName"]) + return response + + yield factory + + # cleanup + for table in tables: + try: + # table has to be in ACTIVE state before deletion + dynamodb_wait_for_table_active(table) + aws_client.dynamodb.delete_table(TableName=table) + except Exception as e: + LOG.debug("error cleaning up table %s: %s", table, e) + + +@pytest.fixture +def dynamodb_create_table(dynamodb_wait_for_table_active, aws_client): + # beware, this swallows exception in create_dynamodb_table utility function + tables = [] + + def factory(**kwargs): + kwargs["client"] = aws_client.dynamodb + if "table_name" not in kwargs: + kwargs["table_name"] = f"test-table-{short_uid()}" + if "partition_key" not in kwargs: + kwargs["partition_key"] = "id" + + tables.append(kwargs["table_name"]) + + return create_dynamodb_table(**kwargs) + + yield factory + + # cleanup + for table in tables: + try: + # table has to be in ACTIVE state before deletion + dynamodb_wait_for_table_active(table) + aws_client.dynamodb.delete_table(TableName=table) + except Exception as e: + LOG.debug("error cleaning up table %s: %s", table, e) + + +@pytest.fixture +def s3_create_bucket(s3_empty_bucket, aws_client): + buckets = [] + + def factory(**kwargs) -> str: + if "Bucket" not in kwargs: + kwargs["Bucket"] = "test-bucket-%s" % short_uid() + + if ( + "CreateBucketConfiguration" not in kwargs + and aws_client.s3.meta.region_name != "us-east-1" + ): + kwargs["CreateBucketConfiguration"] = { + "LocationConstraint": aws_client.s3.meta.region_name + } + + aws_client.s3.create_bucket(**kwargs) + buckets.append(kwargs["Bucket"]) + return kwargs["Bucket"] + + yield factory + + # cleanup + for bucket in buckets: + try: + s3_empty_bucket(bucket) + aws_client.s3.delete_bucket(Bucket=bucket) + except Exception as e: + LOG.debug("error cleaning up bucket %s: %s", bucket, e) + + +@pytest.fixture +def s3_create_bucket_with_client(s3_empty_bucket, aws_client): + buckets = [] + + def factory(s3_client, **kwargs) -> str: + if "Bucket" not in kwargs: + kwargs["Bucket"] = f"test-bucket-{short_uid()}" + + response = s3_client.create_bucket(**kwargs) + buckets.append(kwargs["Bucket"]) + return response + + yield factory + + # cleanup + for bucket in buckets: + try: + s3_empty_bucket(bucket) + aws_client.s3.delete_bucket(Bucket=bucket) + except Exception as e: + LOG.debug("error cleaning up bucket %s: %s", bucket, e) + + +@pytest.fixture +def s3_bucket(s3_create_bucket, aws_client) -> str: + region = aws_client.s3.meta.region_name + kwargs = {} + if region != "us-east-1": + kwargs["CreateBucketConfiguration"] = {"LocationConstraint": region} + return s3_create_bucket(**kwargs) + + +@pytest.fixture +def s3_empty_bucket(aws_client): + """ + Returns a factory that given a bucket name, deletes all objects and deletes all object versions + """ + + # Boto resource would make this a straightforward task, but our internal client does not support Boto resource + # FIXME: this won't work when bucket has more than 1000 objects + def factory(bucket_name: str): + kwargs = {} + try: + aws_client.s3.get_object_lock_configuration(Bucket=bucket_name) + kwargs["BypassGovernanceRetention"] = True + except ClientError: + pass + + response = aws_client.s3.list_objects_v2(Bucket=bucket_name) + objects = [{"Key": obj["Key"]} for obj in response.get("Contents", [])] + if objects: + aws_client.s3.delete_objects( + Bucket=bucket_name, + Delete={"Objects": objects}, + **kwargs, + ) + + response = aws_client.s3.list_object_versions(Bucket=bucket_name) + versions = response.get("Versions", []) + versions.extend(response.get("DeleteMarkers", [])) + + object_versions = [{"Key": obj["Key"], "VersionId": obj["VersionId"]} for obj in versions] + if object_versions: + aws_client.s3.delete_objects( + Bucket=bucket_name, + Delete={"Objects": object_versions}, + **kwargs, + ) + + yield factory + + +@pytest.fixture +def sqs_create_queue(aws_client): + queue_urls = [] + + def factory(**kwargs): + if "QueueName" not in kwargs: + kwargs["QueueName"] = "test-queue-%s" % short_uid() + + response = aws_client.sqs.create_queue(**kwargs) + url = response["QueueUrl"] + queue_urls.append(url) + + return url + + yield factory + + # cleanup + for queue_url in queue_urls: + try: + aws_client.sqs.delete_queue(QueueUrl=queue_url) + except Exception as e: + LOG.debug("error cleaning up queue %s: %s", queue_url, e) + + +@pytest.fixture +def sqs_receive_messages_delete(aws_client): + def factory( + queue_url: str, + expected_messages: Optional[int] = None, + wait_time: Optional[int] = 5, + ): + response = aws_client.sqs.receive_message( + QueueUrl=queue_url, + MessageAttributeNames=["All"], + VisibilityTimeout=0, + WaitTimeSeconds=wait_time, + ) + messages = [] + for m in response["Messages"]: + message = json.loads(to_str(m["Body"])) + messages.append(message) + + if expected_messages is not None: + assert len(messages) == expected_messages + + for message in response["Messages"]: + aws_client.sqs.delete_message( + QueueUrl=queue_url, ReceiptHandle=message["ReceiptHandle"] + ) + + return messages + + return factory + + +@pytest.fixture +def sqs_receive_num_messages(sqs_receive_messages_delete): + def factory(queue_url: str, expected_messages: int, max_iterations: int = 3): + all_messages = [] + for _ in range(max_iterations): + try: + messages = sqs_receive_messages_delete(queue_url, wait_time=5) + except KeyError: + # there were no messages + continue + all_messages.extend(messages) + + if len(all_messages) >= expected_messages: + return all_messages[:expected_messages] + + raise AssertionError(f"max iterations reached with {len(all_messages)} messages received") + + return factory + + +@pytest.fixture +def sqs_collect_messages(aws_client): + """Collects SQS messages from a given queue_url and deletes them by default. + Example usage: + messages = sqs_collect_messages( + my_queue_url, + expected=2, + timeout=10, + attribute_names=["All"], + message_attribute_names=["All"], + ) + """ + + def factory( + queue_url: str, + expected: int, + timeout: int, + delete: bool = True, + attribute_names: list[str] = None, + message_attribute_names: list[str] = None, + max_number_of_messages: int = 1, + wait_time_seconds: int = 5, + sqs_client: "SQSClient | None" = None, + ) -> list["MessageTypeDef"]: + sqs_client = sqs_client or aws_client.sqs + collected = [] + + def _receive(): + response = sqs_client.receive_message( + QueueUrl=queue_url, + # Maximum is 20 seconds. Performs long polling. + WaitTimeSeconds=wait_time_seconds, + # Maximum 10 messages + MaxNumberOfMessages=max_number_of_messages, + AttributeNames=attribute_names or [], + MessageAttributeNames=message_attribute_names or [], + ) + + if messages := response.get("Messages"): + collected.extend(messages) + + if delete: + for m in messages: + sqs_client.delete_message( + QueueUrl=queue_url, ReceiptHandle=m["ReceiptHandle"] + ) + + return len(collected) >= expected + + if not poll_condition(_receive, timeout=timeout): + raise TimeoutError( + f"gave up waiting for messages (expected={expected}, actual={len(collected)}" + ) + + return collected + + yield factory + + +@pytest.fixture +def sqs_queue(sqs_create_queue): + return sqs_create_queue() + + +@pytest.fixture +def sqs_get_queue_arn(aws_client) -> Callable: + def _get_queue_arn(queue_url: str) -> str: + return aws_client.sqs.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["QueueArn"])[ + "Attributes" + ]["QueueArn"] + + return _get_queue_arn + + +@pytest.fixture +def sqs_queue_exists(aws_client): + def _queue_exists(queue_url: str) -> bool: + """ + Checks whether a queue with the given queue URL exists. + :param queue_url: the queue URL + :return: true if the queue exists, false otherwise + """ + try: + result = aws_client.sqs.get_queue_url(QueueName=queue_url.split("/")[-1]) + return result.get("QueueUrl") == queue_url + except ClientError as e: + if "NonExistentQueue" in e.response["Error"]["Code"]: + return False + raise + + yield _queue_exists + + +@pytest.fixture +def sns_create_topic(aws_client): + topic_arns = [] + + def _create_topic(**kwargs): + if "Name" not in kwargs: + kwargs["Name"] = "test-topic-%s" % short_uid() + response = aws_client.sns.create_topic(**kwargs) + topic_arns.append(response["TopicArn"]) + return response + + yield _create_topic + + for topic_arn in topic_arns: + try: + aws_client.sns.delete_topic(TopicArn=topic_arn) + except Exception as e: + LOG.debug("error cleaning up topic %s: %s", topic_arn, e) + + +@pytest.fixture +def sns_wait_for_topic_delete(aws_client): + def wait_for_topic_delete(topic_arn: str) -> None: + def wait(): + try: + aws_client.sns.get_topic_attributes(TopicArn=topic_arn) + return False + except Exception as e: + if "NotFound" in e.response["Error"]["Code"]: + return True + + raise + + poll_condition(wait, timeout=30) + + return wait_for_topic_delete + + +@pytest.fixture +def sns_subscription(aws_client): + sub_arns = [] + + def _create_sub(**kwargs): + if kwargs.get("ReturnSubscriptionArn") is None: + kwargs["ReturnSubscriptionArn"] = True + + # requires 'TopicArn', 'Protocol', and 'Endpoint' + response = aws_client.sns.subscribe(**kwargs) + sub_arn = response["SubscriptionArn"] + sub_arns.append(sub_arn) + return response + + yield _create_sub + + for sub_arn in sub_arns: + try: + aws_client.sns.unsubscribe(SubscriptionArn=sub_arn) + except Exception as e: + LOG.debug("error cleaning up subscription %s: %s", sub_arn, e) + + +@pytest.fixture +def sns_topic(sns_create_topic, aws_client): + topic_arn = sns_create_topic()["TopicArn"] + return aws_client.sns.get_topic_attributes(TopicArn=topic_arn) + + +@pytest.fixture +def sns_allow_topic_sqs_queue(aws_client): + def _allow_sns_topic(sqs_queue_url, sqs_queue_arn, sns_topic_arn) -> None: + # allow topic to write to sqs queue + aws_client.sqs.set_queue_attributes( + QueueUrl=sqs_queue_url, + Attributes={ + "Policy": json.dumps( + { + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": "sns.amazonaws.com"}, + "Action": "sqs:SendMessage", + "Resource": sqs_queue_arn, + "Condition": {"ArnEquals": {"aws:SourceArn": sns_topic_arn}}, + } + ] + } + ) + }, + ) + + return _allow_sns_topic + + +@pytest.fixture +def sns_create_sqs_subscription(sns_allow_topic_sqs_queue, sqs_get_queue_arn, aws_client): + subscriptions = [] + + def _factory(topic_arn: str, queue_url: str, **kwargs) -> Dict[str, str]: + queue_arn = sqs_get_queue_arn(queue_url) + + # connect sns topic to sqs + subscription = aws_client.sns.subscribe( + TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn, **kwargs + ) + subscription_arn = subscription["SubscriptionArn"] + + # allow topic to write to sqs queue + sns_allow_topic_sqs_queue( + sqs_queue_url=queue_url, sqs_queue_arn=queue_arn, sns_topic_arn=topic_arn + ) + + subscriptions.append(subscription_arn) + return aws_client.sns.get_subscription_attributes(SubscriptionArn=subscription_arn)[ + "Attributes" + ] + + yield _factory + + for arn in subscriptions: + try: + aws_client.sns.unsubscribe(SubscriptionArn=arn) + except Exception as e: + LOG.error("error cleaning up subscription %s: %s", arn, e) + + +@pytest.fixture +def sns_create_http_endpoint(sns_create_topic, sns_subscription, aws_client): + # This fixture can be used with manual setup to expose the HTTPServer fixture to AWS. One example is to use a + # a service like localhost.run, and set up a specific port to start the `HTTPServer(port=40000)` for example, + # and tunnel `localhost:40000` to a specific domain that you can manually return from this fixture. + http_servers = [] + + def _create_http_endpoint( + raw_message_delivery: bool = False, + ) -> Tuple[str, str, str, HTTPServer]: + server = HTTPServer() + server.start() + http_servers.append(server) + server.expect_request("/sns-endpoint").respond_with_data(status=200) + endpoint_url = server.url_for("/sns-endpoint") + wait_for_port_open(endpoint_url) + + topic_arn = sns_create_topic()["TopicArn"] + subscription = sns_subscription(TopicArn=topic_arn, Protocol="http", Endpoint=endpoint_url) + subscription_arn = subscription["SubscriptionArn"] + delivery_policy = { + "healthyRetryPolicy": { + "minDelayTarget": 1, + "maxDelayTarget": 1, + "numRetries": 0, + "numNoDelayRetries": 0, + "numMinDelayRetries": 0, + "numMaxDelayRetries": 0, + "backoffFunction": "linear", + }, + "sicklyRetryPolicy": None, + "throttlePolicy": {"maxReceivesPerSecond": 1000}, + "guaranteed": False, + } + aws_client.sns.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName="DeliveryPolicy", + AttributeValue=json.dumps(delivery_policy), + ) + + if raw_message_delivery: + aws_client.sns.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName="RawMessageDelivery", + AttributeValue="true", + ) + + return topic_arn, subscription_arn, endpoint_url, server + + yield _create_http_endpoint + + for http_server in http_servers: + if http_server.is_running(): + http_server.stop() + + +@pytest.fixture +def route53_hosted_zone(aws_client): + hosted_zones = [] + + def factory(**kwargs): + if "Name" not in kwargs: + kwargs["Name"] = f"www.{short_uid()}.com." + if "CallerReference" not in kwargs: + kwargs["CallerReference"] = f"caller-ref-{short_uid()}" + response = aws_client.route53.create_hosted_zone( + Name=kwargs["Name"], CallerReference=kwargs["CallerReference"] + ) + hosted_zones.append(response["HostedZone"]["Id"]) + return response + + yield factory + + for zone in hosted_zones: + try: + aws_client.route53.delete_hosted_zone(Id=zone) + except Exception as e: + LOG.debug("error cleaning up route53 HostedZone %s: %s", zone, e) + + +@pytest.fixture +def transcribe_create_job(s3_bucket, aws_client): + job_names = [] + + def _create_job(audio_file: str, params: Optional[dict[str, Any]] = None) -> str: + s3_key = "test-clip.wav" + + if not params: + params = {} + + if "TranscriptionJobName" not in params: + params["TranscriptionJobName"] = f"test-transcribe-{short_uid()}" + + if "LanguageCode" not in params: + params["LanguageCode"] = "en-GB" + + if "Media" not in params: + params["Media"] = {"MediaFileUri": f"s3://{s3_bucket}/{s3_key}"} + + # upload test wav to a s3 bucket + with open(audio_file, "rb") as f: + aws_client.s3.upload_fileobj(f, s3_bucket, s3_key) + + response = aws_client.transcribe.start_transcription_job(**params) + + job_name = response["TranscriptionJob"]["TranscriptionJobName"] + job_names.append(job_name) + + return job_name + + yield _create_job + + for job_name in job_names: + with contextlib.suppress(ClientError): + aws_client.transcribe.delete_transcription_job(TranscriptionJobName=job_name) + + +@pytest.fixture +def kinesis_create_stream(aws_client): + stream_names = [] + + def _create_stream(**kwargs): + if "StreamName" not in kwargs: + kwargs["StreamName"] = f"test-stream-{short_uid()}" + aws_client.kinesis.create_stream(**kwargs) + stream_names.append(kwargs["StreamName"]) + return kwargs["StreamName"] + + yield _create_stream + + for stream_name in stream_names: + try: + aws_client.kinesis.delete_stream(StreamName=stream_name, EnforceConsumerDeletion=True) + except Exception as e: + LOG.debug("error cleaning up kinesis stream %s: %s", stream_name, e) + + +@pytest.fixture +def wait_for_stream_ready(aws_client): + def _wait_for_stream_ready(stream_name: str): + def is_stream_ready(): + describe_stream_response = aws_client.kinesis.describe_stream(StreamName=stream_name) + return describe_stream_response["StreamDescription"]["StreamStatus"] in [ + "ACTIVE", + "UPDATING", + ] + + return poll_condition(is_stream_ready) + + return _wait_for_stream_ready + + +@pytest.fixture +def wait_for_delivery_stream_ready(aws_client): + def _wait_for_stream_ready(delivery_stream_name: str): + def is_stream_ready(): + describe_stream_response = aws_client.firehose.describe_delivery_stream( + DeliveryStreamName=delivery_stream_name + ) + return ( + describe_stream_response["DeliveryStreamDescription"]["DeliveryStreamStatus"] + == "ACTIVE" + ) + + poll_condition(is_stream_ready) + + return _wait_for_stream_ready + + +@pytest.fixture +def wait_for_dynamodb_stream_ready(aws_client): + def _wait_for_stream_ready(stream_arn: str, client=None): + def is_stream_ready(): + ddb_client = client or aws_client.dynamodbstreams + describe_stream_response = ddb_client.describe_stream(StreamArn=stream_arn) + return describe_stream_response["StreamDescription"]["StreamStatus"] == "ENABLED" + + return poll_condition(is_stream_ready) + + return _wait_for_stream_ready + + +@pytest.fixture() +def kms_create_key(aws_client_factory): + key_ids = [] + + def _create_key(region_name: str = None, **kwargs): + if "Description" not in kwargs: + kwargs["Description"] = f"test description - {short_uid()}" + key_metadata = aws_client_factory(region_name=region_name).kms.create_key(**kwargs)[ + "KeyMetadata" + ] + key_ids.append((region_name, key_metadata["KeyId"])) + return key_metadata + + yield _create_key + + for region_name, key_id in key_ids: + try: + # shortest amount of time you can schedule the deletion + aws_client_factory(region_name=region_name).kms.schedule_key_deletion( + KeyId=key_id, PendingWindowInDays=7 + ) + except Exception as e: + exception_message = str(e) + # Some tests schedule their keys for deletion themselves. + if ( + "KMSInvalidStateException" not in exception_message + or "is pending deletion" not in exception_message + ): + LOG.debug("error cleaning up KMS key %s: %s", key_id, e) + + +@pytest.fixture() +def kms_replicate_key(aws_client_factory): + key_ids = [] + + def _replicate_key(region_from=None, **kwargs): + region_to = kwargs.get("ReplicaRegion") + key_ids.append((region_to, kwargs.get("KeyId"))) + return aws_client_factory(region_name=region_from).kms.replicate_key(**kwargs) + + yield _replicate_key + + for region_to, key_id in key_ids: + try: + # shortest amount of time you can schedule the deletion + aws_client_factory(region_name=region_to).kms.schedule_key_deletion( + KeyId=key_id, PendingWindowInDays=7 + ) + except Exception as e: + LOG.debug("error cleaning up KMS key %s: %s", key_id, e) + + +# kms_create_key fixture is used here not just to be able to create aliases without a key specified, +# but also to make sure that kms_create_key gets executed before and teared down after kms_create_alias - +# to make sure that we clean up aliases before keys get cleaned up. +@pytest.fixture() +def kms_create_alias(kms_create_key, aws_client): + aliases = [] + + def _create_alias(**kwargs): + if "AliasName" not in kwargs: + kwargs["AliasName"] = f"alias/{short_uid()}" + if "TargetKeyId" not in kwargs: + kwargs["TargetKeyId"] = kms_create_key()["KeyId"] + + aws_client.kms.create_alias(**kwargs) + aliases.append(kwargs["AliasName"]) + return kwargs["AliasName"] + + yield _create_alias + + for alias in aliases: + try: + aws_client.kms.delete_alias(AliasName=alias) + except Exception as e: + LOG.debug("error cleaning up KMS alias %s: %s", alias, e) + + +@pytest.fixture() +def kms_create_grant(kms_create_key, aws_client): + grants = [] + + def _create_grant(**kwargs): + # Just a random ARN, since KMS in LocalStack currently doesn't validate GranteePrincipal, + # but some GranteePrincipal is required to create a grant. + GRANTEE_PRINCIPAL_ARN = ( + "arn:aws:kms:eu-central-1:123456789876:key/198a5a78-52c3-489f-ac70-b06a4d11027a" + ) + + if "Operations" not in kwargs: + kwargs["Operations"] = ["Decrypt", "Encrypt"] + if "GranteePrincipal" not in kwargs: + kwargs["GranteePrincipal"] = GRANTEE_PRINCIPAL_ARN + if "KeyId" not in kwargs: + kwargs["KeyId"] = kms_create_key()["KeyId"] + + grant_id = aws_client.kms.create_grant(**kwargs)["GrantId"] + grants.append((grant_id, kwargs["KeyId"])) + return grant_id, kwargs["KeyId"] + + yield _create_grant + + for grant_id, key_id in grants: + try: + aws_client.kms.retire_grant(GrantId=grant_id, KeyId=key_id) + except Exception as e: + LOG.debug("error cleaning up KMS grant %s: %s", grant_id, e) + + +@pytest.fixture +def kms_key(kms_create_key): + return kms_create_key() + + +@pytest.fixture +def kms_grant_and_key(kms_key, aws_client): + user_arn = aws_client.sts.get_caller_identity()["Arn"] + + return [ + aws_client.kms.create_grant( + KeyId=kms_key["KeyId"], + GranteePrincipal=user_arn, + Operations=["Decrypt", "Encrypt"], + ), + kms_key, + ] + + +@pytest.fixture +def opensearch_wait_for_cluster(aws_client): + def _wait_for_cluster(domain_name: str): + def finished_processing(): + status = aws_client.opensearch.describe_domain(DomainName=domain_name)["DomainStatus"] + return status["Processing"] is False and "Endpoint" in status + + assert poll_condition( + finished_processing, timeout=25 * 60, **({"interval": 10} if is_aws_cloud() else {}) + ), f"could not start domain: {domain_name}" + + return _wait_for_cluster + + +@pytest.fixture +def opensearch_create_domain(opensearch_wait_for_cluster, aws_client): + domains = [] + + def factory(**kwargs) -> str: + if "DomainName" not in kwargs: + kwargs["DomainName"] = f"test-domain-{short_uid()}" + + aws_client.opensearch.create_domain(**kwargs) + + opensearch_wait_for_cluster(domain_name=kwargs["DomainName"]) + + domains.append(kwargs["DomainName"]) + return kwargs["DomainName"] + + yield factory + + # cleanup + for domain in domains: + try: + aws_client.opensearch.delete_domain(DomainName=domain) + except Exception as e: + LOG.debug("error cleaning up domain %s: %s", domain, e) + + +@pytest.fixture +def opensearch_domain(opensearch_create_domain) -> str: + return opensearch_create_domain() + + +@pytest.fixture +def opensearch_endpoint(opensearch_domain, aws_client) -> str: + status = aws_client.opensearch.describe_domain(DomainName=opensearch_domain)["DomainStatus"] + assert "Endpoint" in status + return f"https://{status['Endpoint']}" + + +@pytest.fixture +def opensearch_document_path(opensearch_endpoint, aws_client): + document = { + "first_name": "Boba", + "last_name": "Fett", + "age": 41, + "about": "I'm just a simple man, trying to make my way in the universe.", + "interests": ["mandalorian armor", "tusken culture"], + } + document_path = f"{opensearch_endpoint}/bountyhunters/_doc/1" + response = requests.put( + document_path, + data=json.dumps(document), + headers={"content-type": "application/json", "Accept-encoding": "identity"}, + ) + assert response.status_code == 201, f"could not create document at: {document_path}" + return document_path + + +# Cleanup fixtures +@pytest.fixture +def cleanup_stacks(aws_client): + def _cleanup_stacks(stacks: List[str]) -> None: + stacks = ensure_list(stacks) + for stack in stacks: + try: + aws_client.cloudformation.delete_stack(StackName=stack) + aws_client.cloudformation.get_waiter("stack_delete_complete").wait(StackName=stack) + except Exception: + LOG.debug("Failed to cleanup stack '%s'", stack) + + return _cleanup_stacks + + +@pytest.fixture +def cleanup_changesets(aws_client): + def _cleanup_changesets(changesets: List[str]) -> None: + changesets = ensure_list(changesets) + for cs in changesets: + try: + aws_client.cloudformation.delete_change_set(ChangeSetName=cs) + except Exception: + LOG.debug("Failed to cleanup changeset '%s'", cs) + + return _cleanup_changesets + + +# Helpers for Cfn + + +# TODO: exports(!) +@dataclasses.dataclass(frozen=True) +class DeployResult: + change_set_id: str + stack_id: str + stack_name: str + change_set_name: str + outputs: Dict[str, str] + + destroy: Callable[[], None] + + +class StackDeployError(Exception): + def __init__(self, describe_res: dict, events: list[dict]): + self.describe_result = describe_res + self.events = events + + encoded_describe_output = json.dumps(self.describe_result, cls=CustomEncoder) + if config.CFN_VERBOSE_ERRORS: + msg = f"Describe output:\n{encoded_describe_output}\nEvents:\n{self.format_events(events)}" + else: + msg = f"Describe output:\n{encoded_describe_output}\nFailing resources:\n{self.format_events(events)}" + + super().__init__(msg) + + def format_events(self, events: list[dict]) -> str: + formatted_events = [] + + chronological_events = sorted(events, key=lambda event: event["Timestamp"]) + for event in chronological_events: + if event["ResourceStatus"].endswith("FAILED") or config.CFN_VERBOSE_ERRORS: + formatted_events.append(self.format_event(event)) + + return "\n".join(formatted_events) + + @staticmethod + def format_event(event: dict) -> str: + if reason := event.get("ResourceStatusReason"): + reason = reason.replace("\n", "; ") + return f"- {event['LogicalResourceId']} ({event['ResourceType']}) -> {event['ResourceStatus']} ({reason})" + else: + return f"- {event['LogicalResourceId']} ({event['ResourceType']}) -> {event['ResourceStatus']}" + + +@pytest.fixture +def deploy_cfn_template( + aws_client: ServiceLevelClientFactory, +): + state: list[tuple[str, Callable]] = [] + + def _deploy( + *, + is_update: Optional[bool] = False, + stack_name: Optional[str] = None, + change_set_name: Optional[str] = None, + template: Optional[str] = None, + template_path: Optional[str | os.PathLike] = None, + template_mapping: Optional[Dict[str, Any]] = None, + parameters: Optional[Dict[str, str]] = None, + role_arn: Optional[str] = None, + max_wait: Optional[int] = None, + delay_between_polls: Optional[int] = 2, + custom_aws_client: Optional[ServiceLevelClientFactory] = None, + ) -> DeployResult: + if is_update: + assert stack_name + stack_name = stack_name or f"stack-{short_uid()}" + change_set_name = change_set_name or f"change-set-{short_uid()}" + + if max_wait is None: + max_wait = 1800 if is_aws_cloud() else 180 + + if template_path is not None: + template = load_template_file(template_path) + template_rendered = render_template(template, **(template_mapping or {})) + + kwargs = dict( + StackName=stack_name, + ChangeSetName=change_set_name, + TemplateBody=template_rendered, + Capabilities=["CAPABILITY_AUTO_EXPAND", "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"], + ChangeSetType=("UPDATE" if is_update else "CREATE"), + Parameters=[ + { + "ParameterKey": k, + "ParameterValue": v, + } + for (k, v) in (parameters or {}).items() + ], + ) + if role_arn is not None: + kwargs["RoleARN"] = role_arn + + cfn_aws_client = custom_aws_client if custom_aws_client is not None else aws_client + + response = cfn_aws_client.cloudformation.create_change_set(**kwargs) + + change_set_id = response["Id"] + stack_id = response["StackId"] + + cfn_aws_client.cloudformation.get_waiter(WAITER_CHANGE_SET_CREATE_COMPLETE).wait( + ChangeSetName=change_set_id + ) + cfn_aws_client.cloudformation.execute_change_set(ChangeSetName=change_set_id) + stack_waiter = cfn_aws_client.cloudformation.get_waiter( + WAITER_STACK_UPDATE_COMPLETE if is_update else WAITER_STACK_CREATE_COMPLETE + ) + + try: + stack_waiter.wait( + StackName=stack_id, + WaiterConfig={ + "Delay": delay_between_polls, + "MaxAttempts": max_wait / delay_between_polls, + }, + ) + except botocore.exceptions.WaiterError as e: + raise StackDeployError( + cfn_aws_client.cloudformation.describe_stacks(StackName=stack_id)["Stacks"][0], + cfn_aws_client.cloudformation.describe_stack_events(StackName=stack_id)[ + "StackEvents" + ], + ) from e + + describe_stack_res = cfn_aws_client.cloudformation.describe_stacks(StackName=stack_id)[ + "Stacks" + ][0] + outputs = describe_stack_res.get("Outputs", []) + + mapped_outputs = {o["OutputKey"]: o.get("OutputValue") for o in outputs} + + def _destroy_stack(): + cfn_aws_client.cloudformation.delete_stack(StackName=stack_id) + cfn_aws_client.cloudformation.get_waiter(WAITER_STACK_DELETE_COMPLETE).wait( + StackName=stack_id, + WaiterConfig={ + "Delay": delay_between_polls, + "MaxAttempts": max_wait / delay_between_polls, + }, + ) + + state.append((stack_id, _destroy_stack)) + + return DeployResult( + change_set_id, stack_id, stack_name, change_set_name, mapped_outputs, _destroy_stack + ) + + yield _deploy + + # delete the stacks in the reverse order they were created in case of inter-stack dependencies + for stack_id, teardown in state[::-1]: + try: + teardown() + except Exception as e: + LOG.debug("Failed cleaning up stack stack_id=%s: %s", stack_id, e) + + +@pytest.fixture +def is_change_set_created_and_available(aws_client): + def _is_change_set_created_and_available(change_set_id: str): + def _inner(): + change_set = aws_client.cloudformation.describe_change_set(ChangeSetName=change_set_id) + return ( + # TODO: CREATE_FAILED should also not lead to further retries + change_set.get("Status") == "CREATE_COMPLETE" + and change_set.get("ExecutionStatus") == "AVAILABLE" + ) + + return _inner + + return _is_change_set_created_and_available + + +@pytest.fixture +def is_change_set_failed_and_unavailable(aws_client): + def _is_change_set_created_and_available(change_set_id: str): + def _inner(): + change_set = aws_client.cloudformation.describe_change_set(ChangeSetName=change_set_id) + return ( + # TODO: CREATE_FAILED should also not lead to further retries + change_set.get("Status") == "FAILED" + and change_set.get("ExecutionStatus") == "UNAVAILABLE" + ) + + return _inner + + return _is_change_set_created_and_available + + +@pytest.fixture +def is_stack_created(aws_client): + return _has_stack_status(aws_client.cloudformation, ["CREATE_COMPLETE", "CREATE_FAILED"]) + + +@pytest.fixture +def is_stack_updated(aws_client): + return _has_stack_status(aws_client.cloudformation, ["UPDATE_COMPLETE", "UPDATE_FAILED"]) + + +@pytest.fixture +def is_stack_deleted(aws_client): + return _has_stack_status(aws_client.cloudformation, ["DELETE_COMPLETE"]) + + +def _has_stack_status(cfn_client, statuses: List[str]): + def _has_status(stack_id: str): + def _inner(): + resp = cfn_client.describe_stacks(StackName=stack_id) + s = resp["Stacks"][0] # since the lookup uses the id we can only get a single response + return s.get("StackStatus") in statuses + + return _inner + + return _has_status + + +@pytest.fixture +def is_change_set_finished(aws_client): + def _is_change_set_finished(change_set_id: str, stack_name: Optional[str] = None): + def _inner(): + kwargs = {"ChangeSetName": change_set_id} + if stack_name: + kwargs["StackName"] = stack_name + + check_set = aws_client.cloudformation.describe_change_set(**kwargs) + + if check_set.get("ExecutionStatus") == "EXECUTE_FAILED": + LOG.warning("Change set failed") + raise ShortCircuitWaitException() + + return check_set.get("ExecutionStatus") == "EXECUTE_COMPLETE" + + return _inner + + return _is_change_set_finished + + +@pytest.fixture +def wait_until_lambda_ready(aws_client): + def _wait_until_ready(function_name: str, qualifier: str = None, client=None): + client = client or aws_client.lambda_ + + def _is_not_pending(): + kwargs = {} + if qualifier: + kwargs["Qualifier"] = qualifier + try: + result = ( + client.get_function(FunctionName=function_name)["Configuration"]["State"] + != "Pending" + ) + LOG.debug("lambda state result: result=%s", result) + return result + except Exception as e: + LOG.error(e) + raise + + wait_until(_is_not_pending) + + return _wait_until_ready + + +role_assume_policy = """ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +""".strip() + +role_policy = """ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": [ + "*" + ] + } + ] +} +""".strip() + + +@pytest.fixture +def create_lambda_function_aws(aws_client): + lambda_arns = [] + + def _create_lambda_function(**kwargs): + def _create_function(): + resp = aws_client.lambda_.create_function(**kwargs) + lambda_arns.append(resp["FunctionArn"]) + + def _is_not_pending(): + try: + result = ( + aws_client.lambda_.get_function(FunctionName=resp["FunctionName"])[ + "Configuration" + ]["State"] + != "Pending" + ) + return result + except Exception as e: + LOG.error(e) + raise + + wait_until(_is_not_pending) + return resp + + # @AWS, takes about 10s until the role/policy is "active", until then it will fail + # localstack should normally not require the retries and will just continue here + return retry(_create_function, retries=3, sleep=4) + + yield _create_lambda_function + + for arn in lambda_arns: + try: + aws_client.lambda_.delete_function(FunctionName=arn) + except Exception: + LOG.debug("Unable to delete function arn=%s in cleanup", arn) + + +@pytest.fixture +def create_lambda_function(aws_client, wait_until_lambda_ready, lambda_su_role): + lambda_arns_and_clients = [] + log_groups = [] + lambda_client = aws_client.lambda_ + logs_client = aws_client.logs + s3_client = aws_client.s3 + + def _create_lambda_function(*args, **kwargs): + client = kwargs.get("client") or lambda_client + kwargs["client"] = client + kwargs["s3_client"] = s3_client + func_name = kwargs.get("func_name") + assert func_name + del kwargs["func_name"] + + if not kwargs.get("role"): + kwargs["role"] = lambda_su_role + + def _create_function(): + resp = testutil.create_lambda_function(func_name, **kwargs) + lambda_arns_and_clients.append((resp["CreateFunctionResponse"]["FunctionArn"], client)) + wait_until_lambda_ready(function_name=func_name, client=client) + log_group_name = f"/aws/lambda/{func_name}" + log_groups.append(log_group_name) + return resp + + # @AWS, takes about 10s until the role/policy is "active", until then it will fail + # localstack should normally not require the retries and will just continue here + return retry(_create_function, retries=3, sleep=4) + + yield _create_lambda_function + + for arn, client in lambda_arns_and_clients: + try: + client.delete_function(FunctionName=arn) + except Exception: + LOG.debug("Unable to delete function arn=%s in cleanup", arn) + + for log_group_name in log_groups: + try: + logs_client.delete_log_group(logGroupName=log_group_name) + except Exception: + LOG.debug("Unable to delete log group %s in cleanup", log_group_name) + + +@pytest.fixture +def create_echo_http_server(aws_client, create_lambda_function): + from localstack.aws.api.lambda_ import Runtime + + lambda_client = aws_client.lambda_ + handler_code = textwrap.dedent( + """ + import json + import os + + + def make_response(body: dict, status_code: int = 200): + return { + "statusCode": status_code, + "headers": {"Content-Type": "application/json"}, + "body": body, + } + + + def trim_headers(headers): + if not int(os.getenv("TRIM_X_HEADERS", 0)): + return headers + return { + key: value for key, value in headers.items() + if not (key.startswith("x-amzn") or key.startswith("x-forwarded-")) + } + + + def handler(event, context): + print(json.dumps(event)) + response = { + "args": event.get("queryStringParameters", {}), + "data": event.get("body", ""), + "domain": event["requestContext"].get("domainName", ""), + "headers": trim_headers(event.get("headers", {})), + "method": event["requestContext"]["http"].get("method", ""), + "origin": event["requestContext"]["http"].get("sourceIp", ""), + "path": event["requestContext"]["http"].get("path", ""), + } + return make_response(response)""" + ) + + def _create_echo_http_server(trim_x_headers: bool = False) -> str: + """Creates a server that will echo any request. Any request will be returned with the + following format. Any unset values will have those defaults. + `trim_x_headers` can be set to True to trim some headers that are automatically added by lambda in + order to create easier Snapshot testing. Default: `False` + { + "args": {}, + "headers": {}, + "data": "", + "method": "", + "domain": "", + "origin": "", + "path": "" + }""" + zip_file = testutil.create_lambda_archive(handler_code, get_content=True) + func_name = f"echo-http-{short_uid()}" + create_lambda_function( + func_name=func_name, + zip_file=zip_file, + runtime=Runtime.python3_12, + envvars={"TRIM_X_HEADERS": "1" if trim_x_headers else "0"}, + ) + url_response = lambda_client.create_function_url_config( + FunctionName=func_name, AuthType="NONE" + ) + aws_client.lambda_.add_permission( + FunctionName=func_name, + StatementId="urlPermission", + Action="lambda:InvokeFunctionUrl", + Principal="*", + FunctionUrlAuthType="NONE", + ) + return url_response["FunctionUrl"] + + yield _create_echo_http_server + + +@pytest.fixture +def create_event_source_mapping(aws_client): + uuids = [] + + def _create_event_source_mapping(*args, **kwargs): + response = aws_client.lambda_.create_event_source_mapping(*args, **kwargs) + uuids.append(response["UUID"]) + return response + + yield _create_event_source_mapping + + for uuid in uuids: + try: + aws_client.lambda_.delete_event_source_mapping(UUID=uuid) + except Exception: + LOG.debug("Unable to delete event source mapping %s in cleanup", uuid) + + +@pytest.fixture +def check_lambda_logs(aws_client): + def _check_logs(func_name: str, expected_lines: List[str] = None) -> List[str]: + if not expected_lines: + expected_lines = [] + log_events = get_lambda_logs(func_name, logs_client=aws_client.logs) + log_messages = [e["message"] for e in log_events] + for line in expected_lines: + if ".*" in line: + found = [re.match(line, m, flags=re.DOTALL) for m in log_messages] + if any(found): + continue + assert line in log_messages + return log_messages + + return _check_logs + + +@pytest.fixture +def create_policy(aws_client): + policy_arns = [] + + def _create_policy(*args, iam_client=None, **kwargs): + iam_client = iam_client or aws_client.iam + if "PolicyName" not in kwargs: + kwargs["PolicyName"] = f"policy-{short_uid()}" + response = iam_client.create_policy(*args, **kwargs) + policy_arn = response["Policy"]["Arn"] + policy_arns.append((policy_arn, iam_client)) + return response + + yield _create_policy + + for policy_arn, iam_client in policy_arns: + try: + iam_client.delete_policy(PolicyArn=policy_arn) + except Exception: + LOG.debug("Could not delete policy '%s' during test cleanup", policy_arn) + + +@pytest.fixture +def create_user(aws_client): + usernames = [] + + def _create_user(**kwargs): + if "UserName" not in kwargs: + kwargs["UserName"] = f"user-{short_uid()}" + response = aws_client.iam.create_user(**kwargs) + usernames.append(response["User"]["UserName"]) + return response + + yield _create_user + + for username in usernames: + try: + inline_policies = aws_client.iam.list_user_policies(UserName=username)["PolicyNames"] + except ClientError as e: + LOG.debug( + "Cannot list user policies: %s. User %s probably already deleted...", e, username + ) + continue + + for inline_policy in inline_policies: + try: + aws_client.iam.delete_user_policy(UserName=username, PolicyName=inline_policy) + except Exception: + LOG.debug( + "Could not delete user policy '%s' from '%s' during cleanup", + inline_policy, + username, + ) + attached_policies = aws_client.iam.list_attached_user_policies(UserName=username)[ + "AttachedPolicies" + ] + for attached_policy in attached_policies: + try: + aws_client.iam.detach_user_policy( + UserName=username, PolicyArn=attached_policy["PolicyArn"] + ) + except Exception: + LOG.debug( + "Error detaching policy '%s' from user '%s'", + attached_policy["PolicyArn"], + username, + ) + access_keys = aws_client.iam.list_access_keys(UserName=username)["AccessKeyMetadata"] + for access_key in access_keys: + try: + aws_client.iam.delete_access_key( + UserName=username, AccessKeyId=access_key["AccessKeyId"] + ) + except Exception: + LOG.debug( + "Error deleting access key '%s' from user '%s'", + access_key["AccessKeyId"], + username, + ) + + try: + aws_client.iam.delete_user(UserName=username) + except Exception as e: + LOG.debug("Error deleting user '%s' during test cleanup: %s", username, e) + + +@pytest.fixture +def wait_and_assume_role(aws_client): + def _wait_and_assume_role(role_arn: str, session_name: str = None, **kwargs): + if not session_name: + session_name = f"session-{short_uid()}" + + def assume_role(): + return aws_client.sts.assume_role( + RoleArn=role_arn, RoleSessionName=session_name, **kwargs + )["Credentials"] + + # need to retry a couple of times before we are allowed to assume this role in AWS + keys = retry(assume_role, sleep=5, retries=4) + return keys + + return _wait_and_assume_role + + +@pytest.fixture +def create_role(aws_client): + role_names = [] + + def _create_role(iam_client=None, **kwargs): + if not kwargs.get("RoleName"): + kwargs["RoleName"] = f"role-{short_uid()}" + iam_client = iam_client or aws_client.iam + result = iam_client.create_role(**kwargs) + role_names.append((result["Role"]["RoleName"], iam_client)) + return result + + yield _create_role + + for role_name, iam_client in role_names: + # detach policies + try: + attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)[ + "AttachedPolicies" + ] + except ClientError as e: + LOG.debug( + "Cannot list attached role policies: %s. Role %s probably already deleted...", + e, + role_name, + ) + continue + for attached_policy in attached_policies: + try: + iam_client.detach_role_policy( + RoleName=role_name, PolicyArn=attached_policy["PolicyArn"] + ) + except Exception: + LOG.debug( + "Could not detach role policy '%s' from '%s' during cleanup", + attached_policy["PolicyArn"], + role_name, + ) + role_policies = iam_client.list_role_policies(RoleName=role_name)["PolicyNames"] + for role_policy in role_policies: + try: + iam_client.delete_role_policy(RoleName=role_name, PolicyName=role_policy) + except Exception: + LOG.debug( + "Could not delete role policy '%s' from '%s' during cleanup", + role_policy, + role_name, + ) + try: + iam_client.delete_role(RoleName=role_name) + except Exception: + LOG.debug("Could not delete role '%s' during cleanup", role_name) + + +@pytest.fixture +def create_parameter(aws_client): + params = [] + + def _create_parameter(**kwargs): + params.append(kwargs["Name"]) + return aws_client.ssm.put_parameter(**kwargs) + + yield _create_parameter + + for param in params: + aws_client.ssm.delete_parameter(Name=param) + + +@pytest.fixture +def create_secret(aws_client): + items = [] + + def _create_parameter(**kwargs): + create_response = aws_client.secretsmanager.create_secret(**kwargs) + items.append(create_response["ARN"]) + return create_response + + yield _create_parameter + + for item in items: + aws_client.secretsmanager.delete_secret(SecretId=item, ForceDeleteWithoutRecovery=True) + + +# TODO Figure out how to make cert creation tests pass against AWS. +# +# We would like to have localstack tests to pass not just against localstack, but also against AWS to make sure +# our emulation is correct. Unfortunately, with certificate creation there are some issues. +# +# In AWS newly created ACM certificates have to be validated either by email or by DNS. The latter is +# by adding some CNAME records as requested by ASW in response to a certificate request. +# For testing purposes the DNS one seems to be easier, at least as long as DNS is handled by Region53 AWS DNS service. +# +# The other possible option is to use IAM certificates instead of ACM ones. Those just have to be uploaded from files +# created by openssl etc. Not sure if there are other issues after that. +# +# The third option might be having in AWS some certificates created in advance - so they do not require validation +# and can be easily used in tests. The issie with such an approach is that for AppSync, for example, in order to +# register a domain name (https://docs.aws.amazon.com/appsync/latest/APIReference/API_CreateDomainName.html), +# the domain name in the API request has to match the domain name used in certificate creation. Which means that with +# pre-created certificates we would have to use specific domain names instead of random ones. +@pytest.fixture +def acm_request_certificate(aws_client_factory): + certificate_arns = [] + + def factory(**kwargs) -> str: + if "DomainName" not in kwargs: + kwargs["DomainName"] = f"test-domain-{short_uid()}.localhost.localstack.cloud" + + region_name = kwargs.pop("region_name", None) + acm_client = aws_client_factory(region_name=region_name).acm + + response = acm_client.request_certificate(**kwargs) + created_certificate_arn = response["CertificateArn"] + certificate_arns.append((created_certificate_arn, region_name)) + return response + + yield factory + + # cleanup + for certificate_arn, region_name in certificate_arns: + try: + acm_client = aws_client_factory(region_name=region_name).acm + acm_client.delete_certificate(CertificateArn=certificate_arn) + except Exception as e: + LOG.debug("error cleaning up certificate %s: %s", certificate_arn, e) + + +role_policy_su = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["*"], "Resource": ["*"]}], +} + + +@pytest.fixture(scope="session") +def lambda_su_role(aws_client): + role_name = f"lambda-autogenerated-{short_uid()}" + role = aws_client.iam.create_role( + RoleName=role_name, AssumeRolePolicyDocument=role_assume_policy + )["Role"] + policy_name = f"lambda-autogenerated-{short_uid()}" + policy_arn = aws_client.iam.create_policy( + PolicyName=policy_name, PolicyDocument=json.dumps(role_policy_su) + )["Policy"]["Arn"] + aws_client.iam.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn) + + if is_aws_cloud(): # dirty but necessary + time.sleep(10) + + yield role["Arn"] + + run_safe(aws_client.iam.detach_role_policy(RoleName=role_name, PolicyArn=policy_arn)) + run_safe(aws_client.iam.delete_role(RoleName=role_name)) + run_safe(aws_client.iam.delete_policy(PolicyArn=policy_arn)) + + +@pytest.fixture +def create_iam_role_and_attach_policy(aws_client): + """ + Fixture that creates an IAM role with given role definition and predefined policy ARN. + + Use this fixture with AWS managed policies like 'AmazonS3ReadOnlyAccess' or 'AmazonKinesisFullAccess'. + """ + roles = [] + + def _inner(**kwargs: dict[str, any]) -> str: + """ + :param dict RoleDefinition: role definition document + :param str PolicyArn: policy ARN + :param str RoleName: role name (autogenerated if omitted) + :return: role ARN + """ + if "RoleName" not in kwargs: + kwargs["RoleName"] = f"test-role-{short_uid()}" + + role = kwargs["RoleName"] + role_policy = json.dumps(kwargs["RoleDefinition"]) + + result = aws_client.iam.create_role(RoleName=role, AssumeRolePolicyDocument=role_policy) + role_arn = result["Role"]["Arn"] + + policy_arn = kwargs["PolicyArn"] + aws_client.iam.attach_role_policy(PolicyArn=policy_arn, RoleName=role) + + roles.append(role) + return role_arn + + yield _inner + + for role in roles: + try: + aws_client.iam.delete_role(RoleName=role) + except Exception as exc: + LOG.debug("Error deleting IAM role '%s': %s", role, exc) + + +@pytest.fixture +def create_iam_role_with_policy(aws_client): + """ + Fixture that creates an IAM role with given role definition and policy definition. + """ + roles = {} + + def _create_role_and_policy(**kwargs: dict[str, any]) -> str: + """ + :param dict RoleDefinition: role definition document + :param dict PolicyDefinition: policy definition document + :param str PolicyName: policy name (autogenerated if omitted) + :param str RoleName: role name (autogenerated if omitted) + :return: role ARN + """ + if "RoleName" not in kwargs: + kwargs["RoleName"] = f"test-role-{short_uid()}" + role = kwargs["RoleName"] + if "PolicyName" not in kwargs: + kwargs["PolicyName"] = f"test-policy-{short_uid()}" + policy = kwargs["PolicyName"] + role_policy = json.dumps(kwargs["RoleDefinition"]) + + result = aws_client.iam.create_role(RoleName=role, AssumeRolePolicyDocument=role_policy) + role_arn = result["Role"]["Arn"] + + policy_document = json.dumps(kwargs["PolicyDefinition"]) + aws_client.iam.put_role_policy( + RoleName=role, PolicyName=policy, PolicyDocument=policy_document + ) + roles[role] = policy + return role_arn + + yield _create_role_and_policy + + for role_name, policy_name in roles.items(): + try: + aws_client.iam.delete_role_policy(RoleName=role_name, PolicyName=policy_name) + except Exception as exc: + LOG.debug("Error deleting IAM role policy '%s' '%s': %s", role_name, policy_name, exc) + try: + aws_client.iam.delete_role(RoleName=role_name) + except Exception as exc: + LOG.debug("Error deleting IAM role '%s': %s", role_name, exc) + + +@pytest.fixture +def firehose_create_delivery_stream(wait_for_delivery_stream_ready, aws_client): + delivery_stream_names = [] + + def _create_delivery_stream(**kwargs): + if "DeliveryStreamName" not in kwargs: + kwargs["DeliveryStreamName"] = f"test-delivery-stream-{short_uid()}" + delivery_stream_name = kwargs["DeliveryStreamName"] + response = aws_client.firehose.create_delivery_stream(**kwargs) + delivery_stream_names.append(delivery_stream_name) + wait_for_delivery_stream_ready(delivery_stream_name) + return response + + yield _create_delivery_stream + + for delivery_stream_name in delivery_stream_names: + try: + aws_client.firehose.delete_delivery_stream(DeliveryStreamName=delivery_stream_name) + except Exception: + LOG.info("Failed to delete delivery stream %s", delivery_stream_name) + + +@pytest.fixture +def ses_configuration_set(aws_client): + configuration_set_names = [] + + def factory(name: str) -> None: + aws_client.ses.create_configuration_set( + ConfigurationSet={ + "Name": name, + }, + ) + configuration_set_names.append(name) + + yield factory + + for configuration_set_name in configuration_set_names: + aws_client.ses.delete_configuration_set(ConfigurationSetName=configuration_set_name) + + +@pytest.fixture +def ses_configuration_set_sns_event_destination(aws_client): + event_destinations = [] + + def factory(config_set_name: str, event_destination_name: str, topic_arn: str) -> None: + aws_client.ses.create_configuration_set_event_destination( + ConfigurationSetName=config_set_name, + EventDestination={ + "Name": event_destination_name, + "Enabled": True, + "MatchingEventTypes": ["send", "bounce", "delivery", "open", "click"], + "SNSDestination": { + "TopicARN": topic_arn, + }, + }, + ) + event_destinations.append((config_set_name, event_destination_name)) + + yield factory + + for created_config_set_name, created_event_destination_name in event_destinations: + aws_client.ses.delete_configuration_set_event_destination( + ConfigurationSetName=created_config_set_name, + EventDestinationName=created_event_destination_name, + ) + + +@pytest.fixture +def ses_email_template(aws_client): + template_names = [] + + def factory(name: str, contents: str, subject: str = f"Email template {short_uid()}"): + aws_client.ses.create_template( + Template={ + "TemplateName": name, + "SubjectPart": subject, + "TextPart": contents, + } + ) + template_names.append(name) + + yield factory + + for template_name in template_names: + aws_client.ses.delete_template(TemplateName=template_name) + + +@pytest.fixture +def ses_verify_identity(aws_client): + identities = [] + + def factory(email_address: str) -> None: + aws_client.ses.verify_email_identity(EmailAddress=email_address) + + yield factory + + for identity in identities: + aws_client.ses.delete_identity(Identity=identity) + + +@pytest.fixture +def setup_sender_email_address(ses_verify_identity): + """ + If the test is running against AWS then assume the email address passed is already + verified, and passes the given email address through. Otherwise, it generates one random + email address and verify them. + """ + + def inner(sender_email_address: Optional[str] = None) -> str: + if is_aws_cloud(): + if sender_email_address is None: + raise ValueError( + "sender_email_address must be specified to run this test against AWS" + ) + else: + # overwrite the given parameters with localstack specific ones + sender_email_address = f"sender-{short_uid()}@example.com" + ses_verify_identity(sender_email_address) + + return sender_email_address + + return inner + + +@pytest.fixture +def ec2_create_security_group(aws_client): + ec2_sgs = [] + + def factory(ports=None, ip_protocol: str = "tcp", **kwargs): + """ + Create the target group and authorize the security group ingress. + :param ports: list of ports to be authorized for the ingress rule. + :param ip_protocol: the ip protocol for the permissions (tcp by default) + """ + if "GroupName" not in kwargs: + # FIXME: This will fail against AWS since the sg prefix is not valid for GroupName + # > "Group names may not be in the format sg-*". + kwargs["GroupName"] = f"sg-{short_uid()}" + # Making sure the call to CreateSecurityGroup gets the right arguments + _args = select_from_typed_dict(CreateSecurityGroupRequest, kwargs) + security_group = aws_client.ec2.create_security_group(**_args) + security_group_id = security_group["GroupId"] + + # FIXME: If 'ports' is None or an empty list, authorize_security_group_ingress will fail due to missing IpPermissions. + # Must ensure ports are explicitly provided or skip authorization entirely if not required. + permissions = [ + { + "FromPort": port, + "IpProtocol": ip_protocol, + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], + "ToPort": port, + } + for port in ports or [] + ] + aws_client.ec2.authorize_security_group_ingress( + GroupId=security_group_id, + IpPermissions=permissions, + ) + + ec2_sgs.append(security_group_id) + return security_group + + yield factory + + for sg_group_id in ec2_sgs: + try: + aws_client.ec2.delete_security_group(GroupId=sg_group_id) + except Exception as e: + LOG.debug("Error cleaning up EC2 security group: %s, %s", sg_group_id, e) + + +@pytest.fixture +def cleanups(): + cleanup_fns = [] + + yield cleanup_fns + + for cleanup_callback in cleanup_fns[::-1]: + try: + cleanup_callback() + except Exception as e: + LOG.warning("Failed to execute cleanup", exc_info=e) + + +@pytest.fixture(scope="session") +def account_id(aws_client): + if is_aws_cloud() or is_api_enabled("sts"): + return aws_client.sts.get_caller_identity()["Account"] + else: + return TEST_AWS_ACCOUNT_ID + + +@pytest.fixture(scope="session") +def region_name(aws_client): + if is_aws_cloud() or is_api_enabled("sts"): + return aws_client.sts.meta.region_name + else: + return TEST_AWS_REGION_NAME + + +@pytest.fixture(scope="session") +def partition(region_name): + return get_partition(region_name) + + +@pytest.fixture(scope="session") +def secondary_account_id(secondary_aws_client): + if is_aws_cloud() or is_api_enabled("sts"): + return secondary_aws_client.sts.get_caller_identity()["Account"] + else: + return SECONDARY_TEST_AWS_ACCOUNT_ID + + +@pytest.fixture(scope="session") +def secondary_region_name(): + return SECONDARY_TEST_AWS_REGION_NAME + + +@pytest.hookimpl +def pytest_collection_modifyitems(config: Config, items: list[Item]): + only_localstack = pytest.mark.skipif( + is_aws_cloud(), + reason="test only applicable if run against localstack", + ) + for item in items: + for mark in item.iter_markers(): + if mark.name.endswith("only_localstack"): + item.add_marker(only_localstack) + if hasattr(item, "fixturenames") and "snapshot" in item.fixturenames: + # add a marker that indicates that this test is snapshot validated + # if it uses the snapshot fixture -> allows selecting only snapshot + # validated tests in order to capture new snapshots for a whole + # test file with "-m snapshot_validated" + item.add_marker("snapshot_validated") + + +@pytest.fixture +def sample_stores() -> AccountRegionBundle: + class SampleStore(BaseStore): + CROSS_ACCOUNT_ATTR = CrossAccountAttribute(default=list) + CROSS_REGION_ATTR = CrossRegionAttribute(default=list) + region_specific_attr = LocalAttribute(default=list) + + return AccountRegionBundle("zzz", SampleStore, validate=False) + + +@pytest.fixture +def create_rest_apigw(aws_client_factory): + rest_apis = [] + retry_boto_config = None + if is_aws_cloud(): + retry_boto_config = botocore.config.Config( + # Api gateway can throttle requests pretty heavily. Leading to potentially undeleted apis + retries={"max_attempts": 10, "mode": "adaptive"} + ) + + def _create_apigateway_function(**kwargs): + client_region_name = kwargs.pop("region_name", None) + apigateway_client = aws_client_factory( + region_name=client_region_name, config=retry_boto_config + ).apigateway + kwargs.setdefault("name", f"api-{short_uid()}") + + response = apigateway_client.create_rest_api(**kwargs) + api_id = response.get("id") + rest_apis.append((api_id, client_region_name)) + + return api_id, response.get("name"), response.get("rootResourceId") + + yield _create_apigateway_function + + for rest_api_id, _client_region_name in rest_apis: + apigateway_client = aws_client_factory( + region_name=_client_region_name, + config=retry_boto_config, + ).apigateway + # First, retrieve the usage plans associated with the REST API + usage_plan_ids = [] + usage_plans = apigateway_client.get_usage_plans() + for item in usage_plans.get("items", []): + api_stages = item.get("apiStages", []) + usage_plan_ids.extend( + item.get("id") for api_stage in api_stages if api_stage.get("apiId") == rest_api_id + ) + + # Then delete the API, as you can't delete the UsagePlan if a stage is associated with it + with contextlib.suppress(Exception): + apigateway_client.delete_rest_api(restApiId=rest_api_id) + + # finally delete the usage plans and the API Keys linked to it + for usage_plan_id in usage_plan_ids: + usage_plan_keys = apigateway_client.get_usage_plan_keys(usagePlanId=usage_plan_id) + for key in usage_plan_keys.get("items", []): + apigateway_client.delete_api_key(apiKey=key["id"]) + apigateway_client.delete_usage_plan(usagePlanId=usage_plan_id) + + +@pytest.fixture +def create_rest_apigw_openapi(aws_client_factory): + rest_apis = [] + + def _create_apigateway_function(**kwargs): + region_name = kwargs.pop("region_name", None) + apigateway_client = aws_client_factory(region_name=region_name).apigateway + + response = apigateway_client.import_rest_api(**kwargs) + api_id = response.get("id") + rest_apis.append((api_id, region_name)) + return api_id, response + + yield _create_apigateway_function + + for rest_api_id, region_name in rest_apis: + with contextlib.suppress(Exception): + apigateway_client = aws_client_factory(region_name=region_name).apigateway + apigateway_client.delete_rest_api(restApiId=rest_api_id) + + +@pytest.fixture +def appsync_create_api(aws_client): + graphql_apis = [] + + def factory(**kwargs): + if "name" not in kwargs: + kwargs["name"] = f"graphql-api-testing-name-{short_uid()}" + if not kwargs.get("authenticationType"): + kwargs["authenticationType"] = "API_KEY" + + result = aws_client.appsync.create_graphql_api(**kwargs)["graphqlApi"] + graphql_apis.append(result["apiId"]) + return result + + yield factory + + for api in graphql_apis: + try: + aws_client.appsync.delete_graphql_api(apiId=api) + except Exception as e: + LOG.debug("Error cleaning up AppSync API: %s, %s", api, e) + + +@pytest.fixture +def assert_host_customisation(monkeypatch): + localstack_host = "foo.bar" + monkeypatch.setattr( + config, "LOCALSTACK_HOST", config.HostAndPort(host=localstack_host, port=8888) + ) + + def asserter( + url: str, + *, + custom_host: Optional[str] = None, + ): + if custom_host is not None: + assert custom_host in url, f"Could not find `{custom_host}` in `{url}`" + + assert localstack_host not in url + else: + assert localstack_host in url, f"Could not find `{localstack_host}` in `{url}`" + + yield asserter + + +@pytest.fixture +def echo_http_server(httpserver: HTTPServer): + """Spins up a local HTTP echo server and returns the endpoint URL""" + + def _echo(request: Request) -> Response: + request_json = None + if request.is_json: + with contextlib.suppress(ValueError): + request_json = json.loads(request.data) + result = { + "data": request.data or "{}", + "headers": dict(request.headers), + "url": request.url, + "method": request.method, + "json": request_json, + } + response_body = json.dumps(json_safe(result)) + return Response(response_body, status=200) + + httpserver.expect_request("").respond_with_handler(_echo) + http_endpoint = httpserver.url_for("/") + + return http_endpoint + + +@pytest.fixture +def echo_http_server_post(echo_http_server): + """ + Returns an HTTP echo server URL for POST requests that work both locally and for parity tests (against real AWS) + """ + if is_aws_cloud(): + return f"{PUBLIC_HTTP_ECHO_SERVER_URL}/post" + + return f"{echo_http_server}post" + + +def create_policy_doc(effect: str, actions: List, resource=None) -> Dict: + actions = ensure_list(actions) + resource = resource or "*" + return { + "Version": "2012-10-17", + "Statement": [ + { + # TODO statement ids have to be alphanumeric [0-9A-Za-z], write a test for it + "Sid": f"s{short_uid()}", + "Effect": effect, + "Action": actions, + "Resource": resource, + } + ], + } + + +@pytest.fixture +def create_policy_generated_document(create_policy): + def _create_policy_with_doc(effect, actions, policy_name=None, resource=None, iam_client=None): + policy_name = policy_name or f"p-{short_uid()}" + policy = create_policy_doc(effect, actions, resource=resource) + response = create_policy( + PolicyName=policy_name, PolicyDocument=json.dumps(policy), iam_client=iam_client + ) + policy_arn = response["Policy"]["Arn"] + return policy_arn + + return _create_policy_with_doc + + +@pytest.fixture +def create_role_with_policy(create_role, create_policy_generated_document, aws_client): + def _create_role_with_policy( + effect, actions, assume_policy_doc, resource=None, attach=True, iam_client=None + ): + iam_client = iam_client or aws_client.iam + + role_name = f"role-{short_uid()}" + result = create_role( + RoleName=role_name, AssumeRolePolicyDocument=assume_policy_doc, iam_client=iam_client + ) + role_arn = result["Role"]["Arn"] + policy_name = f"p-{short_uid()}" + + if attach: + # create role and attach role policy + policy_arn = create_policy_generated_document( + effect, actions, policy_name=policy_name, resource=resource, iam_client=iam_client + ) + iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn) + else: + # put role policy + policy_document = create_policy_doc(effect, actions, resource=resource) + policy_document = json.dumps(policy_document) + iam_client.put_role_policy( + RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document + ) + + return role_name, role_arn + + return _create_role_with_policy + + +@pytest.fixture +def create_user_with_policy(create_policy_generated_document, create_user, aws_client): + def _create_user_with_policy(effect, actions, resource=None): + policy_arn = create_policy_generated_document(effect, actions, resource=resource) + username = f"user-{short_uid()}" + create_user(UserName=username) + aws_client.iam.attach_user_policy(UserName=username, PolicyArn=policy_arn) + keys = aws_client.iam.create_access_key(UserName=username)["AccessKey"] + return username, keys + + return _create_user_with_policy + + +@pytest.fixture() +def register_extension(s3_bucket, aws_client): + cfn_client = aws_client.cloudformation + extensions_arns = [] + + def _register(extension_name, extension_type, artifact_path): + bucket = s3_bucket + key = f"artifact-{short_uid()}" + + aws_client.s3.upload_file(artifact_path, bucket, key) + + register_response = cfn_client.register_type( + Type=extension_type, + TypeName=extension_name, + SchemaHandlerPackage=f"s3://{bucket}/{key}", + ) + + registration_token = register_response["RegistrationToken"] + cfn_client.get_waiter("type_registration_complete").wait( + RegistrationToken=registration_token + ) + + describe_response = cfn_client.describe_type_registration( + RegistrationToken=registration_token + ) + + extensions_arns.append(describe_response["TypeArn"]) + cfn_client.set_type_default_version(Arn=describe_response["TypeVersionArn"]) + + return describe_response + + yield _register + + for arn in extensions_arns: + versions = cfn_client.list_type_versions(Arn=arn)["TypeVersionSummaries"] + for v in versions: + try: + cfn_client.deregister_type(Arn=v["Arn"]) + except Exception: + continue + cfn_client.deregister_type(Arn=arn) + + +@pytest.fixture +def hosted_zone(aws_client): + zone_ids = [] + + def factory(**kwargs): + if "CallerReference" not in kwargs: + kwargs["CallerReference"] = f"ref-{short_uid()}" + response = aws_client.route53.create_hosted_zone(**kwargs) + zone_id = response["HostedZone"]["Id"] + zone_ids.append(zone_id) + return response + + yield factory + + for zone_id in zone_ids[::-1]: + aws_client.route53.delete_hosted_zone(Id=zone_id) + + +@pytest.fixture +def openapi_validate(monkeypatch): + monkeypatch.setattr(config, "OPENAPI_VALIDATE_RESPONSE", "true") + monkeypatch.setattr(config, "OPENAPI_VALIDATE_REQUEST", "true") + + +@pytest.fixture +def set_resource_custom_id(): + set_ids = [] + + def _set_custom_id(resource_identifier: ResourceIdentifier, custom_id): + localstack_id_manager.set_custom_id( + resource_identifier=resource_identifier, custom_id=custom_id + ) + set_ids.append(resource_identifier) + + yield _set_custom_id + + for resource_identifier in set_ids: + localstack_id_manager.unset_custom_id(resource_identifier) + + +############################### +# Events (EventBridge) fixtures +############################### + + +@pytest.fixture +def events_create_event_bus(aws_client, region_name, account_id): + event_bus_names = [] + + def _create_event_bus(**kwargs): + if "Name" not in kwargs: + kwargs["Name"] = f"test-event-bus-{short_uid()}" + + response = aws_client.events.create_event_bus(**kwargs) + event_bus_names.append(kwargs["Name"]) + return response + + yield _create_event_bus + + for event_bus_name in event_bus_names: + try: + response = aws_client.events.list_rules(EventBusName=event_bus_name) + rules = [rule["Name"] for rule in response["Rules"]] + + # Delete all rules for the current event bus + for rule in rules: + try: + response = aws_client.events.list_targets_by_rule( + Rule=rule, EventBusName=event_bus_name + ) + targets = [target["Id"] for target in response["Targets"]] + + # Remove all targets for the current rule + if targets: + for target in targets: + aws_client.events.remove_targets( + Rule=rule, EventBusName=event_bus_name, Ids=[target] + ) + + aws_client.events.delete_rule(Name=rule, EventBusName=event_bus_name) + except Exception as e: + LOG.warning("Failed to delete rule %s: %s", rule, e) + + # Delete archives for event bus + event_source_arn = ( + f"arn:aws:events:{region_name}:{account_id}:event-bus/{event_bus_name}" + ) + response = aws_client.events.list_archives(EventSourceArn=event_source_arn) + archives = [archive["ArchiveName"] for archive in response["Archives"]] + for archive in archives: + try: + aws_client.events.delete_archive(ArchiveName=archive) + except Exception as e: + LOG.warning("Failed to delete archive %s: %s", archive, e) + + aws_client.events.delete_event_bus(Name=event_bus_name) + except Exception as e: + LOG.warning("Failed to delete event bus %s: %s", event_bus_name, e) + + +@pytest.fixture +def events_put_rule(aws_client): + rules = [] + + def _put_rule(**kwargs): + if "Name" not in kwargs: + kwargs["Name"] = f"rule-{short_uid()}" + + response = aws_client.events.put_rule(**kwargs) + rules.append((kwargs["Name"], kwargs.get("EventBusName", "default"))) + return response + + yield _put_rule + + for rule, event_bus_name in rules: + try: + response = aws_client.events.list_targets_by_rule( + Rule=rule, EventBusName=event_bus_name + ) + targets = [target["Id"] for target in response["Targets"]] + + # Remove all targets for the current rule + if targets: + for target in targets: + aws_client.events.remove_targets( + Rule=rule, EventBusName=event_bus_name, Ids=[target] + ) + + aws_client.events.delete_rule(Name=rule, EventBusName=event_bus_name) + except Exception as e: + LOG.warning("Failed to delete rule %s: %s", rule, e) + + +@pytest.fixture +def events_create_rule(aws_client): + rules = [] + + def _create_rule(**kwargs): + rule_name = kwargs["Name"] + bus_name = kwargs.get("EventBusName", "") + pattern = kwargs.get("EventPattern", {}) + schedule = kwargs.get("ScheduleExpression", "") + rule_arn = aws_client.events.put_rule( + Name=rule_name, + EventBusName=bus_name, + EventPattern=json.dumps(pattern), + ScheduleExpression=schedule, + )["RuleArn"] + rules.append({"name": rule_name, "bus": bus_name}) + return rule_arn + + yield _create_rule + + for rule in rules: + targets = aws_client.events.list_targets_by_rule( + Rule=rule["name"], EventBusName=rule["bus"] + )["Targets"] + + targetIds = [target["Id"] for target in targets] + if len(targetIds) > 0: + aws_client.events.remove_targets( + Rule=rule["name"], EventBusName=rule["bus"], Ids=targetIds + ) + + aws_client.events.delete_rule(Name=rule["name"], EventBusName=rule["bus"]) + + +@pytest.fixture +def sqs_as_events_target(aws_client, sqs_get_queue_arn): + queue_urls = [] + + def _sqs_as_events_target(queue_name: str | None = None) -> tuple[str, str]: + if not queue_name: + queue_name = f"tests-queue-{short_uid()}" + sqs_client = aws_client.sqs + queue_url = sqs_client.create_queue(QueueName=queue_name)["QueueUrl"] + queue_urls.append(queue_url) + queue_arn = sqs_get_queue_arn(queue_url) + policy = { + "Version": "2012-10-17", + "Id": f"sqs-eventbridge-{short_uid()}", + "Statement": [ + { + "Sid": f"SendMessage-{short_uid()}", + "Effect": "Allow", + "Principal": {"Service": "events.amazonaws.com"}, + "Action": "sqs:SendMessage", + "Resource": queue_arn, + } + ], + } + sqs_client.set_queue_attributes( + QueueUrl=queue_url, Attributes={"Policy": json.dumps(policy)} + ) + return queue_url, queue_arn + + yield _sqs_as_events_target + + for queue_url in queue_urls: + try: + aws_client.sqs.delete_queue(QueueUrl=queue_url) + except Exception as e: + LOG.debug("error cleaning up queue %s: %s", queue_url, e) + + +@pytest.fixture +def clean_up( + aws_client, +): # TODO: legacy clean up fixtures for eventbridge - remove and use individual fixtures for creating resources instead + def _clean_up( + bus_name=None, + rule_name=None, + target_ids=None, + queue_url=None, + log_group_name=None, + ): + events_client = aws_client.events + kwargs = {"EventBusName": bus_name} if bus_name else {} + if target_ids: + target_ids = target_ids if isinstance(target_ids, list) else [target_ids] + call_safe( + events_client.remove_targets, + kwargs=dict(Rule=rule_name, Ids=target_ids, Force=True, **kwargs), + ) + if rule_name: + call_safe(events_client.delete_rule, kwargs=dict(Name=rule_name, Force=True, **kwargs)) + if bus_name: + call_safe(events_client.delete_event_bus, kwargs=dict(Name=bus_name)) + if queue_url: + sqs_client = aws_client.sqs + call_safe(sqs_client.delete_queue, kwargs=dict(QueueUrl=queue_url)) + if log_group_name: + logs_client = aws_client.logs + + def _delete_log_group(): + log_streams = logs_client.describe_log_streams(logGroupName=log_group_name) + for log_stream in log_streams["logStreams"]: + logs_client.delete_log_stream( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"] + ) + logs_client.delete_log_group(logGroupName=log_group_name) + + call_safe(_delete_log_group) + + yield _clean_up diff --git a/localstack-core/localstack/testing/pytest/in_memory_localstack.py b/localstack-core/localstack/testing/pytest/in_memory_localstack.py new file mode 100644 index 0000000000000..d31a570ac4b30 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/in_memory_localstack.py @@ -0,0 +1,107 @@ +"""Pytest plugin that spins up a single localstack instance in the current interpreter that is shared +across the current test session. + +Use in your module as follows:: + + pytest_plugins = "localstack.testing.pytest.in_memory_localstack" + + @pytest.hookimpl() + def pytest_configure(config): + config.option.start_localstack = True + +You can explicitly disable starting localstack by setting ``TEST_SKIP_LOCALSTACK_START=1`` or +``TEST_TARGET=AWS_CLOUD``.""" + +import logging +import os +import threading + +import pytest +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.main import Session + +from localstack import config as localstack_config +from localstack.config import is_env_true +from localstack.constants import ENV_INTERNAL_TEST_RUN + +LOG = logging.getLogger(__name__) +LOG.info("Pytest plugin for in-memory-localstack session loaded.") + +if localstack_config.is_collect_metrics_mode(): + pytest_plugins = "localstack.testing.pytest.metric_collection" + +_started = threading.Event() + + +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager): + parser.addoption( + "--start-localstack", + action="store_true", + default=False, + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_runtestloop(session: Session): + # avoid starting up localstack if we only collect the tests (-co / --collect-only) + if session.config.option.collectonly: + return + + if not session.config.option.start_localstack: + return + + from localstack.testing.aws.util import is_aws_cloud + + if is_env_true("TEST_SKIP_LOCALSTACK_START"): + LOG.info("TEST_SKIP_LOCALSTACK_START is set, not starting localstack") + return + + if is_aws_cloud(): + if not is_env_true("TEST_FORCE_LOCALSTACK_START"): + LOG.info("Test running against aws, not starting localstack") + return + LOG.info("TEST_FORCE_LOCALSTACK_START is set, a Localstack instance will be created.") + + from localstack.utils.common import safe_requests + + if is_aws_cloud(): + localstack_config.DEFAULT_DELAY = 5 + localstack_config.DEFAULT_MAX_ATTEMPTS = 60 + + # configure + os.environ[ENV_INTERNAL_TEST_RUN] = "1" + safe_requests.verify_ssl = False + + from localstack.runtime import current + + _started.set() + runtime = current.initialize_runtime() + # start runtime asynchronously + threading.Thread(target=runtime.run).start() + + # wait for runtime to be ready + if not runtime.ready.wait(timeout=120): + raise TimeoutError("gave up waiting for runtime to be ready") + + +@pytest.hookimpl(trylast=True) +def pytest_sessionfinish(session: Session): + # last pytest lifecycle hook (before pytest exits) + if not _started.is_set(): + return + + from localstack.runtime import get_current_runtime + + try: + get_current_runtime() + except ValueError: + LOG.warning("Could not access the current runtime in a pytest sessionfinish hook.") + return + + get_current_runtime().shutdown() + LOG.info("waiting for runtime to stop") + + # wait for runtime to shut down + if not get_current_runtime().stopped.wait(timeout=20): + LOG.warning("gave up waiting for runtime to stop, returning anyway") diff --git a/localstack-core/localstack/testing/pytest/marker_report.py b/localstack-core/localstack/testing/pytest/marker_report.py new file mode 100644 index 0000000000000..03b8bc28f87d2 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/marker_report.py @@ -0,0 +1,157 @@ +import dataclasses +import datetime +import json +import os.path +import time +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest +import requests + +if TYPE_CHECKING: + from _pytest.config import Config, PytestPluginManager + from _pytest.config.argparsing import Parser + + +@dataclasses.dataclass +class MarkerReportEntry: + node_id: str + file_path: str + markers: "list[str]" + + +@dataclasses.dataclass +class TinybirdReportRow: + timestamp: str + node_id: str + project_name: str + # code_owners: str # comma separated + file_path: str # TODO: recreate data source at some point to remove this? + service: str + markers: str # comma separated list + aws_marker: str # TODO: this is a bit redundant but easier for now to process + commit_sha: str + + +@dataclasses.dataclass +class MarkerReport: + prefix_filter: str + entries: "list[MarkerReportEntry]" = dataclasses.field(default_factory=list) + aggregated_report: "dict[str, int]" = dataclasses.field(default_factory=dict) + + def create_aggregated_report(self): + for entry in self.entries: + for marker in entry.markers: + self.aggregated_report.setdefault(marker, 0) + self.aggregated_report[marker] += 1 + + +@pytest.hookimpl +def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager"): + """ + Standard usage. Will create a report for all markers under ./target/marker-report-.json + $ python -m pytest tests/aws/ --marker-report + + Advanced usage. Will create a report for all markers under ./target2/marker-report-.json + $ python -m pytest tests/aws/ --marker-report --marker-report-path target2/ + + Advanced usage. Only includes markers with `aws_` prefix in the report. + $ python -m pytest tests/aws/ --marker-report --marker-report-prefix "aws_" + """ + # TODO: --marker-report-* flags should imply --marker-report + parser.addoption("--marker-report", action="store_true") + parser.addoption("--marker-report-prefix", action="store") + parser.addoption("--marker-report-path", action="store") + parser.addoption("--marker-report-summary", action="store_true") + parser.addoption("--marker-report-tinybird-upload", action="store_true") + + +def _get_svc_from_node_id(node_id: str) -> str: + if node_id.startswith("tests/aws/services/"): + parts = node_id.split("/") + return parts[3] + return "" + + +def _get_aws_marker_from_markers(markers: "list[str]") -> str: + for marker in markers: + if marker.startswith("aws_"): + return marker + return "" + + +@pytest.hookimpl +def pytest_collection_modifyitems( + session: pytest.Session, config: "Config", items: "list[pytest.Item]" +) -> None: + """Generate a report about the pytest markers used""" + + if not config.option.marker_report: + return + + report = MarkerReport(prefix_filter=config.option.marker_report_prefix or "") + + # go through collected items to collect their markers + for item in items: + markers = set() + for mark in item.iter_markers(): + if mark.name.startswith(report.prefix_filter): + markers.add(mark.name) + + report_entry = MarkerReportEntry( + node_id=item.nodeid, file_path=item.fspath.strpath, markers=list(markers) + ) + report.entries.append(report_entry) + + report.create_aggregated_report() + + if config.option.marker_report_path: + report_directory = Path(config.option.marker_report_path) + if not report_directory.is_absolute(): + report_directory = config.rootpath / report_directory + report_directory.mkdir(parents=True, exist_ok=True) + report_path = report_directory / f"marker-report-{time.time_ns()}.json" + + with open(report_path, "w") as fd: + json.dump(dataclasses.asdict(report), fd, indent=2, sort_keys=True) + + if config.option.marker_report_tinybird_upload: + project_name = os.environ.get("MARKER_REPORT_PROJECT_NAME", "localstack") + datasource_name = "pytest_markers__v0" + token = os.environ.get("MARKER_REPORT_TINYBIRD_TOKEN") + url = f"https://api.tinybird.co/v0/events?name={datasource_name}&token={token}" + + timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + tinybird_data = [ + dataclasses.asdict( + TinybirdReportRow( + timestamp=timestamp, + node_id=x.node_id, + project_name=project_name, + file_path=x.file_path, + service=_get_svc_from_node_id(x.node_id), + markers=",".join(sorted(x.markers)), + aws_marker=_get_aws_marker_from_markers(x.markers), + commit_sha=os.environ.get("MARKER_REPORT_COMMIT_SHA", ""), + ) + ) + for x in report.entries + ] + + data = "\n".join(json.dumps(x) for x in tinybird_data) + + response = requests.post(url, data=data, timeout=20) + + if response.status_code != 202: + print(f"Error while uploading marker report to tinybird: {response.status_code}.") + else: + print("Successfully uploaded marker report to tinybird.") + + if config.option.marker_report_summary: + print("\n=========================") + print("MARKER REPORT (SUMMARY)") + print("=========================") + for k, v in report.aggregated_report.items(): + print(f"{k}: {v}") + print("=========================\n") diff --git a/localstack-core/localstack/testing/pytest/marking.py b/localstack-core/localstack/testing/pytest/marking.py new file mode 100644 index 0000000000000..5afcca6cdc24f --- /dev/null +++ b/localstack-core/localstack/testing/pytest/marking.py @@ -0,0 +1,210 @@ +""" +Custom pytest mark typings +""" + +import os +from typing import TYPE_CHECKING, Callable, List, Optional + +import pytest +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser + + +class AwsCompatibilityMarkers: + # test has been successfully run against AWS, ideally multiple times + validated = pytest.mark.aws_validated + + # implies aws_validated. test needs additional setup, configuration or some other steps not included in the test setup itself + manual_setup_required = pytest.mark.aws_manual_setup_required + + # fails against AWS but should be made runnable against AWS in the future, basically a TODO + needs_fixing = pytest.mark.aws_needs_fixing + + # only runnable against localstack by design + only_localstack = pytest.mark.aws_only_localstack + + # it's unknown if the test works (reliably) against AWS or not + unknown = pytest.mark.aws_unknown + + +class ParityMarkers: + aws_validated = pytest.mark.aws_validated + only_localstack = pytest.mark.only_localstack + + +class SkipSnapshotVerifyMarker: + def __call__( + self, + *, + paths: "Optional[List[str]]" = None, + condition: "Optional[Callable[[...], bool]]" = None, + ): ... + + +class MultiRuntimeMarker: + def __call__(self, *, scenario: str, runtimes: Optional[List[str]] = None): ... + + +class SnapshotMarkers: + skip_snapshot_verify: SkipSnapshotVerifyMarker = pytest.mark.skip_snapshot_verify + + +class Markers: + aws = AwsCompatibilityMarkers + parity = ParityMarkers # TODO: in here for compatibility sake. Remove when -ext has been refactored to use @markers.aws.* + snapshot = SnapshotMarkers + + multiruntime: MultiRuntimeMarker = pytest.mark.multiruntime + + # test selection + acceptance_test = pytest.mark.acceptance_test + skip_offline = pytest.mark.skip_offline + only_on_amd64 = pytest.mark.only_on_amd64 + only_on_arm64 = pytest.mark.only_on_arm64 + resource_heavy = pytest.mark.resource_heavy + only_in_docker = pytest.mark.only_in_docker + # Tests to execute when updating snapshots for a new Lambda runtime + lambda_runtime_update = pytest.mark.lambda_runtime_update + + +# pytest plugin +if TYPE_CHECKING: + from _pytest.config import Config + + +@pytest.hookimpl +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager): + parser.addoption( + "--offline", + action="store_true", + default=False, + help="test run will not have an internet connection", + ) + + +def enforce_single_aws_marker(items: List[pytest.Item]): + """Enforce that each test has exactly one aws compatibility marker""" + marker_errors = [] + + for item in items: + # we should only concern ourselves with tests in tests/aws/ + if "tests/aws" not in item.fspath.dirname: + continue + + aws_markers = list() + for mark in item.iter_markers(): + if mark.name.startswith("aws_"): + aws_markers.append(mark.name) + + if len(aws_markers) > 1: + marker_errors.append(f"{item.nodeid}: Too many aws markers specified: {aws_markers}") + elif len(aws_markers) == 0: + marker_errors.append( + f"{item.nodeid}: Missing aws marker. Specify at least one marker, e.g. @markers.aws.validated" + ) + + if marker_errors: + raise pytest.UsageError(*marker_errors) + + +def filter_by_markers(config: "Config", items: List[pytest.Item]): + """Filter tests by markers.""" + from localstack import config as localstack_config + from localstack.utils.bootstrap import in_ci + from localstack.utils.platform import Arch, get_arch + + is_offline = config.getoption("--offline") + is_in_docker = localstack_config.is_in_docker + is_in_ci = in_ci() + is_amd64 = get_arch() == Arch.amd64 + is_arm64 = get_arch() == Arch.arm64 + # Inlining `is_aws_cloud()` here because localstack.testing.aws.util imports boto3, + # which is not installed for the CLI tests + is_real_aws = os.environ.get("TEST_TARGET", "") == "AWS_CLOUD" + + if is_real_aws: + # Do not skip any tests if they are executed against real AWS + return + + skip_offline = pytest.mark.skip( + reason="Test cannot be executed offline / in a restricted network environment. " + "Add network connectivity and remove the --offline option when running " + "the test." + ) + only_in_docker = pytest.mark.skip( + reason="Test requires execution inside Docker (e.g., to install system packages)" + ) + only_on_amd64 = pytest.mark.skip( + reason="Test uses features that are currently only supported for AMD64. Skipping in CI." + ) + only_on_arm64 = pytest.mark.skip( + reason="Test uses features that are currently only supported for ARM64. Skipping in CI." + ) + + for item in items: + if is_offline and "skip_offline" in item.keywords: + item.add_marker(skip_offline) + if not is_in_docker and "only_in_docker" in item.keywords: + item.add_marker(only_in_docker) + if is_in_ci and not is_amd64 and "only_on_amd64" in item.keywords: + item.add_marker(only_on_amd64) + if is_in_ci and not is_arm64 and "only_on_arm64" in item.keywords: + item.add_marker(only_on_arm64) + + +@pytest.hookimpl +def pytest_collection_modifyitems( + session: pytest.Session, config: "Config", items: List[pytest.Item] +) -> None: + enforce_single_aws_marker(items) + filter_by_markers(config, items) + + +@pytest.hookimpl +def pytest_configure(config): + config.addinivalue_line( + "markers", + "skip_offline: mark the test to be skipped when the tests are run offline " + "(this test explicitly / semantically needs an internet connection)", + ) + config.addinivalue_line( + "markers", + "only_on_amd64: mark the test as running only in an amd64 (i.e., x86_64) environment", + ) + config.addinivalue_line( + "markers", + "only_on_arm64: mark the test as running only in an arm64 environment", + ) + config.addinivalue_line( + "markers", + "only_in_docker: mark the test as running only in Docker (e.g., requires installation of system packages)", + ) + config.addinivalue_line( + "markers", + "resource_heavy: mark the test as resource-heavy, e.g., downloading very large external dependencies, " + "or requiring high amount of RAM/CPU (can be systematically sampled/optimized in the future)", + ) + config.addinivalue_line( + "markers", + "aws_validated: mark the test as validated / verified against real AWS", + ) + config.addinivalue_line( + "markers", + "aws_only_localstack: mark the test as inherently incompatible with AWS, e.g. when testing localstack-specific features", + ) + config.addinivalue_line( + "markers", + "aws_needs_fixing: test fails against AWS but it shouldn't. Might need refactoring, additional permissions, etc.", + ) + config.addinivalue_line( + "markers", + "aws_manual_setup_required: validated against real AWS but needs additional setup or account configuration (e.g. increased service quotas)", + ) + config.addinivalue_line( + "markers", + "aws_unknown: it's unknown if the test works (reliably) against AWS or not", + ) + config.addinivalue_line( + "markers", + "multiruntime: parametrize test against multiple Lambda runtimes", + ) diff --git a/localstack/testing/pytest/metric_collection.py b/localstack-core/localstack/testing/pytest/metric_collection.py similarity index 94% rename from localstack/testing/pytest/metric_collection.py rename to localstack-core/localstack/testing/pytest/metric_collection.py index 1e1bfec35b3f5..c480a5330ac29 100644 --- a/localstack/testing/pytest/metric_collection.py +++ b/localstack-core/localstack/testing/pytest/metric_collection.py @@ -12,7 +12,7 @@ from localstack.aws.handlers.metric_handler import Metric, MetricHandler from localstack.utils.strings import short_uid -BASE_PATH = os.path.join(os.path.dirname(__file__), "../../../target/metric_reports") +BASE_PATH = os.path.join(os.path.dirname(__file__), "../../../../target/metric_reports") FNAME_RAW_DATA_CSV = os.path.join( BASE_PATH, f"metric-report-raw-data-{datetime.utcnow().strftime('%Y-%m-%d__%H_%M_%S')}-{short_uid()}.csv", @@ -38,7 +38,7 @@ def pytest_sessionstart(session: "Session") -> None: writer.writerow(Metric.RAW_DATA_HEADER) -@pytest.hookimpl() +@pytest.hookimpl(trylast=True) def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: node_id = item.nodeid xfail = False diff --git a/localstack-core/localstack/testing/pytest/path_filter.py b/localstack-core/localstack/testing/pytest/path_filter.py new file mode 100644 index 0000000000000..d3e13c0016143 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/path_filter.py @@ -0,0 +1,91 @@ +""" +A pytest plugin that limits test selection based on an input file. +The input file is a plaintext file with one subpath entry per line. +After gathering all potential tests, the candidates are filtered by matching with these entries. +At least one entry has to match for the test to be included in the test run. + +Example usage: `pytest --path-filter=test_selection.txt` + +File content of `test_selection.txt`: + +``` +tests/mymodule/ +tests/myothermodule/test_conrete_thing.py +``` + +There are also special values that represent +a) SENTINEL_NO_TEST: change is not classified (=> run everything) +b) SENTINEL_ALL_TESTS: change that is explicitly classified but doesn't require running a test + +If all detected changes are in category b) there will be NO tests executed (!). +If any change in category a) is detected, ALL tests will be executed. + +""" + +import os + +import pytest +from _pytest.main import Session + +from localstack.testing.testselection.matching import SENTINEL_ALL_TESTS, SENTINEL_NO_TEST + + +def pytest_addoption(parser): + parser.addoption( + "--path-filter", + action="store", + help="Path to the file containing path substrings for test selection", + ) + + +# tryfirst would IMO make the most sense since I don't see a reason why other plugins should operate on the other tests at all +# the pytest-split plugin is executed with trylast=True, so it should come after this one +@pytest.hookimpl(tryfirst=True) +def pytest_collection_modifyitems(config, items): + pathfilter_file = config.getoption("--path-filter") + if not pathfilter_file: + return + + if not os.path.exists(pathfilter_file): + raise ValueError(f"Pathfilter file does not exist: {pathfilter_file}") + + with open(pathfilter_file, "r") as f: + pathfilter_substrings = [line.strip() for line in f.readlines() if line.strip()] + + if not pathfilter_substrings: + return # No filtering if the list is empty => full test suite + + # this is technically redundant since we can just add "tests/" instead as a line item. still prefer to be explicit here + if any(p == SENTINEL_ALL_TESTS for p in pathfilter_substrings): + return # at least one change should lead to a full run + + # technically doesn't even need to be checked since the loop below will take care of it + if all(p == SENTINEL_NO_TEST for p in pathfilter_substrings): + items[:] = [] + # we only got sentinal values that signal a change that doesn't need to be tested, so delesect all + config.hook.pytest_deselected(items=items) + return + + # Filter tests based on the path substrings + selected = [] + deselected = [] + for item in items: + if any(substr in item.fspath.strpath for substr in pathfilter_substrings): + selected.append(item) + else: + deselected.append(item) + + # Update list of test items to only those selected + items[:] = selected + config.hook.pytest_deselected(items=deselected) + + +def pytest_sessionfinish(session: Session, exitstatus): + """ + Tests might be split and thus there can be splits which don't select any tests right now + + This is only applied if we're actually using the plugin + """ + pathfilter_file = session.config.getoption("--path-filter") + if pathfilter_file and exitstatus == 5: + session.exitstatus = 0 diff --git a/localstack-core/localstack/testing/pytest/stepfunctions/__init__.py b/localstack-core/localstack/testing/pytest/stepfunctions/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py b/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py new file mode 100644 index 0000000000000..13a134d269e85 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/stepfunctions/fixtures.py @@ -0,0 +1,869 @@ +import json +import logging +import os +import shutil +import tempfile +from typing import Final + +import pytest +from botocore.config import Config +from localstack_snapshot.snapshots.transformer import ( + JsonpathTransformer, + RegexTransformer, +) + +from localstack.aws.api.stepfunctions import StateMachineType +from localstack.testing.aws.util import is_aws_cloud +from localstack.testing.pytest.stepfunctions.utils import await_execution_success +from localstack.utils.strings import short_uid + +LOG = logging.getLogger(__name__) + + +@pytest.fixture +def sfn_snapshot(snapshot): + snapshot.add_transformers_list(snapshot.transform.stepfunctions_api()) + return snapshot + + +@pytest.fixture +def sfn_batch_snapshot(sfn_snapshot): + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..JobDefinition", replacement="job-definition") + ) + sfn_snapshot.add_transformer(JsonpathTransformer(jsonpath="$..JobName", replacement="job-name")) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..JobQueue", replacement="job-queue") + ) + sfn_snapshot.add_transformer(JsonpathTransformer(jsonpath="$..roleArn", replacement="role-arn")) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..x-amz-apigw-id", replacement="x-amz-apigw-id", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..X-Amzn-Trace-Id", replacement="X-Amzn-Trace-Id", replace_reference=False + ) + ) + sfn_snapshot.add_transformer(JsonpathTransformer(jsonpath="$..TaskArn", replacement="task-arn")) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..ExecutionRoleArn", replacement="execution-role-arn") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..LogStreamName", replacement="log-stream-name") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..StartedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..StoppedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..CreatedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..PrivateIpv4Address", + replacement="private-ipv4-address", + replace_reference=False, + ) + ) + return sfn_snapshot + + +@pytest.fixture +def sfn_ecs_snapshot(sfn_snapshot): + sfn_snapshot.add_transformer(JsonpathTransformer(jsonpath="$..TaskArn", replacement="task_arn")) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..ContainerArn", replacement="container_arn") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..PrivateIpv4Address", replacement="private_ipv4_address") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..RuntimeId", replacement="runtime_id") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..ImageDigest", replacement="image_digest") + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..PullStartedAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..PullStoppedAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..StartedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..StoppedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..StoppingAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer(jsonpath="$..CreatedAt", replacement="time", replace_reference=False) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..ExecutionStoppedAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..ConnectivityAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..PullStartedAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..PullStoppedAt", replacement="time", replace_reference=False + ) + ) + sfn_snapshot.add_transformer(RegexTransformer("subnet-[0-9a-zA-Z]+", "subnet_value")) + sfn_snapshot.add_transformer(RegexTransformer("sg-[0-9a-zA-Z]+", "sg_value")) + sfn_snapshot.add_transformer(RegexTransformer("eni-[0-9a-zA-Z]+", "eni_value")) + sfn_snapshot.add_transformer(RegexTransformer("ip-[0-9-]+", "ip_value")) + sfn_snapshot.add_transformer( + RegexTransformer(":".join(["[0-9a-z][0-9a-z]?[0-9a-z]?"] * 4), "ip_value") + ) + sfn_snapshot.add_transformer(RegexTransformer(":".join(["[0-9a-z][0-9a-z]+"] * 6), "mac_value")) + return sfn_snapshot + + +@pytest.fixture +def aws_client_no_sync_prefix(aws_client_factory): + # For StartSyncExecution and TestState calls, boto will prepend "sync-" to the endpoint string. + # As we operate on localhost, this function creates a new stepfunctions client with that functionality disabled. + return aws_client_factory(config=Config(inject_host_prefix=is_aws_cloud())) + + +@pytest.fixture +def mock_config_file(): + tmp_dir = tempfile.mkdtemp() + file_path = os.path.join(tmp_dir, "MockConfigFile.json") + + def write_json_to_mock_file(mock_config): + with open(file_path, "w") as df: + json.dump(mock_config, df) # noqa + df.flush() + return file_path + + try: + yield write_json_to_mock_file + finally: + try: + os.remove(file_path) + except Exception as ex: + LOG.error("Error removing temporary MockConfigFile.json: %s", ex) + finally: + shutil.rmtree( + tmp_dir, + ignore_errors=True, + onerror=lambda _, path, exc_info: LOG.error( + "Error removing temporary MockConfigFile.json: %s, %s", path, exc_info + ), + ) + + +@pytest.fixture +def create_state_machine_iam_role(cleanups, create_state_machine): + def _create(target_aws_client): + iam_client = target_aws_client.iam + stepfunctions_client = target_aws_client.stepfunctions + + role_name = f"test-sfn-role-{short_uid()}" + policy_name = f"test-sfn-policy-{short_uid()}" + role = iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": ["states.amazonaws.com"]}, + "Action": ["sts:AssumeRole"], + } + ], + } + ), + ) + cleanups.append(lambda: iam_client.delete_role(RoleName=role_name)) + role_arn = role["Role"]["Arn"] + + policy = iam_client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["*"], + "Resource": ["*"], + } + ], + } + ), + ) + cleanups.append(lambda: iam_client.delete_policy(PolicyArn=policy["Policy"]["Arn"])) + iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy["Policy"]["Arn"]) + cleanups.append( + lambda: iam_client.detach_role_policy( + RoleName=role_name, PolicyArn=policy["Policy"]["Arn"] + ) + ) + + def _wait_sfn_can_assume_role(): + sm_name = f"test-wait-sfn-can-assume-role-{short_uid()}" + sm_def = { + "Comment": "_wait_sfn_can_assume_role", + "StartAt": "PullAssumeRole", + "States": { + "PullAssumeRole": { + "Type": "Task", + "Parameters": {}, + "Resource": "arn:aws:states:::aws-sdk:s3:listBuckets", + "Catch": [ + { + "ErrorEquals": ["States.TaskFailed"], + "Next": "WaitAndPull", + } + ], + "End": True, + }, + "WaitAndPull": {"Type": "Wait", "Seconds": 5, "Next": "PullAssumeRole"}, + }, + } + creation_resp = create_state_machine( + target_aws_client, name=sm_name, definition=json.dumps(sm_def), roleArn=role_arn + ) + state_machine_arn = creation_resp["stateMachineArn"] + + exec_resp = stepfunctions_client.start_execution( + stateMachineArn=state_machine_arn, input="{}" + ) + execution_arn = exec_resp["executionArn"] + + await_execution_success( + stepfunctions_client=stepfunctions_client, execution_arn=execution_arn + ) + + stepfunctions_client.delete_state_machine(stateMachineArn=state_machine_arn) + + if is_aws_cloud(): + _wait_sfn_can_assume_role() + + return role_arn + + return _create + + +@pytest.fixture +def create_state_machine(): + created_state_machine_references = list() + + def _create_state_machine(target_aws_client, **kwargs): + sfn_client = target_aws_client.stepfunctions + create_output = sfn_client.create_state_machine(**kwargs) + create_output_arn = create_output["stateMachineArn"] + created_state_machine_references.append( + (create_output_arn, kwargs.get("type", StateMachineType.STANDARD), sfn_client) + ) + return create_output + + yield _create_state_machine + + # Delete all state machine, attempting to stop all running executions of STANDARD state machines, + # as other types, such as EXPRESS, cannot be manually stopped. + for arn, typ, client in created_state_machine_references: + try: + if typ == StateMachineType.STANDARD: + executions = client.list_executions(stateMachineArn=arn) + for execution in executions["executions"]: + client.stop_execution(executionArn=execution["executionArn"]) + client.delete_state_machine(stateMachineArn=arn) + except Exception as ex: + LOG.debug("Unable to delete state machine '%s' during cleanup: %s", arn, ex) + + +@pytest.fixture +def create_state_machine_alias(): + state_machine_alias_arn_and_client = list() + + def _create_state_machine_alias(target_aws_client, **kwargs): + step_functions_client = target_aws_client.stepfunctions + create_state_machine_response = step_functions_client.create_state_machine_alias(**kwargs) + state_machine_alias_arn_and_client.append( + (create_state_machine_response["stateMachineAliasArn"], step_functions_client) + ) + return create_state_machine_response + + yield _create_state_machine_alias + + for state_machine_alias_arn, sfn_client in state_machine_alias_arn_and_client: + try: + sfn_client.delete_state_machine_alias(stateMachineAliasArn=state_machine_alias_arn) + except Exception as ex: + LOG.debug( + "Unable to delete the state machine alias '%s' during cleanup due '%s'", + state_machine_alias_arn, + ex, + ) + + +@pytest.fixture +def create_activity(aws_client): + activities_arns: Final[list[str]] = list() + + def _create_activity(**kwargs): + create_output = aws_client.stepfunctions.create_activity(**kwargs) + create_output_arn = create_output["activityArn"] + activities_arns.append(create_output_arn) + return create_output + + yield _create_activity + + for activity_arn in activities_arns: + try: + aws_client.stepfunctions.delete_activity(activityArn=activity_arn) + except Exception: + LOG.debug("Unable to delete Activity '%s' during cleanup.", activity_arn) + + +@pytest.fixture +def sqs_send_task_success_state_machine( + aws_client, create_state_machine, create_state_machine_iam_role +): + def _create_state_machine(sqs_queue_url): + snf_role_arn = create_state_machine_iam_role(aws_client) + sm_name: str = f"sqs_send_task_success_state_machine_{short_uid()}" + + template = { + "Comment": "sqs_success_on_task_token", + "StartAt": "Iterate", + "States": { + "Iterate": { + "Type": "Pass", + "Parameters": {"Count.$": "States.MathAdd($.Iterator.Count, -1)"}, + "ResultPath": "$.Iterator", + "Next": "IterateStep", + }, + "IterateStep": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.Iterator.Count", + "NumericLessThanEquals": 0, + "Next": "NoMoreCycles", + } + ], + "Default": "WaitAndReceive", + }, + "WaitAndReceive": {"Type": "Wait", "Seconds": 1, "Next": "Receive"}, + "Receive": { + "Type": "Task", + "Parameters": {"QueueUrl.$": "$.QueueUrl"}, + "Resource": "arn:aws:states:::aws-sdk:sqs:receiveMessage", + "ResultPath": "$.SQSOutput", + "Next": "CheckMessages", + }, + "CheckMessages": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.SQSOutput.Messages", + "IsPresent": True, + "Next": "SendSuccesses", + } + ], + "Default": "Iterate", + }, + "SendSuccesses": { + "Type": "Map", + "InputPath": "$.SQSOutput.Messages", + "ItemProcessor": { + "ProcessorConfig": {"Mode": "INLINE"}, + "StartAt": "ParseBody", + "States": { + "ParseBody": { + "Type": "Pass", + "Parameters": {"Body.$": "States.StringToJson($.Body)"}, + "Next": "Send", + }, + "Send": { + "Type": "Task", + "Resource": "arn:aws:states:::aws-sdk:sfn:sendTaskSuccess", + "Parameters": { + "Output.$": "States.JsonToString($.Body.Message)", + "TaskToken.$": "$.Body.TaskToken", + }, + "End": True, + }, + }, + }, + "ResultPath": None, + "Next": "Iterate", + }, + "NoMoreCycles": {"Type": "Pass", "End": True}, + }, + } + + creation_resp = create_state_machine( + aws_client, name=sm_name, definition=json.dumps(template), roleArn=snf_role_arn + ) + state_machine_arn = creation_resp["stateMachineArn"] + + aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, + input=json.dumps({"QueueUrl": sqs_queue_url, "Iterator": {"Count": 300}}), + ) + + return _create_state_machine + + +@pytest.fixture +def sqs_send_task_failure_state_machine( + aws_client, create_state_machine, create_state_machine_iam_role +): + def _create_state_machine(sqs_queue_url): + snf_role_arn = create_state_machine_iam_role(aws_client) + sm_name: str = f"sqs_send_task_failure_state_machine_{short_uid()}" + + template = { + "Comment": "sqs_failure_on_task_token", + "StartAt": "Iterate", + "States": { + "Iterate": { + "Type": "Pass", + "Parameters": {"Count.$": "States.MathAdd($.Iterator.Count, -1)"}, + "ResultPath": "$.Iterator", + "Next": "IterateStep", + }, + "IterateStep": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.Iterator.Count", + "NumericLessThanEquals": 0, + "Next": "NoMoreCycles", + } + ], + "Default": "WaitAndReceive", + }, + "WaitAndReceive": {"Type": "Wait", "Seconds": 1, "Next": "Receive"}, + "Receive": { + "Type": "Task", + "Parameters": {"QueueUrl.$": "$.QueueUrl"}, + "Resource": "arn:aws:states:::aws-sdk:sqs:receiveMessage", + "ResultPath": "$.SQSOutput", + "Next": "CheckMessages", + }, + "CheckMessages": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.SQSOutput.Messages", + "IsPresent": True, + "Next": "SendFailure", + } + ], + "Default": "Iterate", + }, + "SendFailure": { + "Type": "Map", + "InputPath": "$.SQSOutput.Messages", + "ItemProcessor": { + "ProcessorConfig": {"Mode": "INLINE"}, + "StartAt": "ParseBody", + "States": { + "ParseBody": { + "Type": "Pass", + "Parameters": {"Body.$": "States.StringToJson($.Body)"}, + "Next": "Send", + }, + "Send": { + "Type": "Task", + "Resource": "arn:aws:states:::aws-sdk:sfn:sendTaskFailure", + "Parameters": { + "Error": "Failure error", + "Cause": "Failure cause", + "TaskToken.$": "$.Body.TaskToken", + }, + "End": True, + }, + }, + }, + "ResultPath": None, + "Next": "Iterate", + }, + "NoMoreCycles": {"Type": "Pass", "End": True}, + }, + } + + creation_resp = create_state_machine( + aws_client, name=sm_name, definition=json.dumps(template), roleArn=snf_role_arn + ) + state_machine_arn = creation_resp["stateMachineArn"] + + aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, + input=json.dumps({"QueueUrl": sqs_queue_url, "Iterator": {"Count": 300}}), + ) + + return _create_state_machine + + +@pytest.fixture +def sqs_send_heartbeat_and_task_success_state_machine( + aws_client, create_state_machine, create_state_machine_iam_role +): + def _create_state_machine(sqs_queue_url): + snf_role_arn = create_state_machine_iam_role(aws_client) + sm_name: str = f"sqs_send_heartbeat_and_task_success_state_machine_{short_uid()}" + + template = { + "Comment": "SQS_HEARTBEAT_SUCCESS_ON_TASK_TOKEN", + "StartAt": "Iterate", + "States": { + "Iterate": { + "Type": "Pass", + "Parameters": {"Count.$": "States.MathAdd($.Iterator.Count, -1)"}, + "ResultPath": "$.Iterator", + "Next": "IterateStep", + }, + "IterateStep": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.Iterator.Count", + "NumericLessThanEquals": 0, + "Next": "NoMoreCycles", + } + ], + "Default": "WaitAndReceive", + }, + "WaitAndReceive": {"Type": "Wait", "Seconds": 1, "Next": "Receive"}, + "Receive": { + "Type": "Task", + "Parameters": {"QueueUrl.$": "$.QueueUrl"}, + "Resource": "arn:aws:states:::aws-sdk:sqs:receiveMessage", + "ResultPath": "$.SQSOutput", + "Next": "CheckMessages", + }, + "CheckMessages": { + "Type": "Choice", + "Choices": [ + { + "Variable": "$.SQSOutput.Messages", + "IsPresent": True, + "Next": "SendSuccesses", + } + ], + "Default": "Iterate", + }, + "SendSuccesses": { + "Type": "Map", + "InputPath": "$.SQSOutput.Messages", + "ItemProcessor": { + "ProcessorConfig": {"Mode": "INLINE"}, + "StartAt": "ParseBody", + "States": { + "ParseBody": { + "Type": "Pass", + "Parameters": {"Body.$": "States.StringToJson($.Body)"}, + "Next": "WaitBeforeHeartbeat", + }, + "WaitBeforeHeartbeat": { + "Type": "Wait", + "Seconds": 5, + "Next": "SendHeartbeat", + }, + "SendHeartbeat": { + "Type": "Task", + "Resource": "arn:aws:states:::aws-sdk:sfn:sendTaskHeartbeat", + "Parameters": {"TaskToken.$": "$.Body.TaskToken"}, + "ResultPath": None, + "Next": "SendSuccess", + }, + "SendSuccess": { + "Type": "Task", + "Resource": "arn:aws:states:::aws-sdk:sfn:sendTaskSuccess", + "Parameters": { + "Output.$": "States.JsonToString($.Body.Message)", + "TaskToken.$": "$.Body.TaskToken", + }, + "End": True, + }, + }, + }, + "ResultPath": None, + "Next": "Iterate", + }, + "NoMoreCycles": {"Type": "Pass", "End": True}, + }, + } + + creation_resp = create_state_machine( + aws_client, name=sm_name, definition=json.dumps(template), roleArn=snf_role_arn + ) + state_machine_arn = creation_resp["stateMachineArn"] + + aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, + input=json.dumps({"QueueUrl": sqs_queue_url, "Iterator": {"Count": 300}}), + ) + + return _create_state_machine + + +@pytest.fixture +def sfn_activity_consumer(aws_client, create_state_machine, create_state_machine_iam_role): + def _create_state_machine(template, activity_arn): + snf_role_arn = create_state_machine_iam_role(aws_client) + sm_name: str = f"activity_send_task_failure_on_task_{short_uid()}" + definition = json.dumps(template) + + creation_resp = create_state_machine( + aws_client, name=sm_name, definition=definition, roleArn=snf_role_arn + ) + state_machine_arn = creation_resp["stateMachineArn"] + + aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, + input=json.dumps({"ActivityArn": activity_arn}), + ) + + return _create_state_machine + + +@pytest.fixture +def events_to_sqs_queue(events_create_rule, sqs_create_queue, sqs_get_queue_arn, aws_client): + def _setup(event_pattern): + queue_name = f"test-queue-{short_uid()}" + rule_name = f"test-rule-{short_uid()}" + target_id = f"test-target-{short_uid()}" + + rule_arn = events_create_rule( + Name=rule_name, EventBusName="default", EventPattern=event_pattern + ) + + queue_url = sqs_create_queue(QueueName=queue_name) + queue_arn = sqs_get_queue_arn(queue_url) + queue_policy = { + "Statement": [ + { + "Sid": "StepFunctionsEventRule", + "Resource": queue_arn, + "Action": "sqs:SendMessage", + "Principal": {"Service": "events.amazonaws.com"}, + "Condition": {"ArnEquals": {"aws:SourceArn": rule_arn}}, + "Effect": "Allow", + } + ] + } + aws_client.sqs.set_queue_attributes( + QueueUrl=queue_url, + Attributes={"Policy": json.dumps(queue_policy), "ReceiveMessageWaitTimeSeconds": "1"}, + ) + + aws_client.events.put_targets(Rule=rule_name, Targets=[{"Id": target_id, "Arn": queue_arn}]) + + return queue_url + + return _setup + + +@pytest.fixture +def sfn_events_to_sqs_queue(events_to_sqs_queue): + def _create(state_machine_arn: str) -> str: + event_pattern = { + "source": ["aws.states"], + "detail": { + "stateMachineArn": [state_machine_arn], + }, + } + return events_to_sqs_queue(event_pattern=event_pattern) + + return _create + + +@pytest.fixture +def sfn_glue_create_job(aws_client, create_role, create_policy, wait_and_assume_role): + job_names = [] + + def _execute(**kwargs): + job_name = f"glue-job-{short_uid()}" + + assume_role_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "sts:AssumeRole", + } + ], + } + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["*"], + "Resource": "*", + }, + ], + } + + role = create_role(AssumeRolePolicyDocument=json.dumps(assume_role_policy_document)) + role_name = role["Role"]["RoleName"] + role_arn = role["Role"]["Arn"] + + policy = create_policy(PolicyDocument=json.dumps(policy_document)) + policy_arn = policy["Policy"]["Arn"] + + aws_client.iam.attach_role_policy( + RoleName=role_name, + PolicyArn=policy_arn, + ) + + wait_and_assume_role(role_arn) + + aws_client.glue.create_job(Name=job_name, Role=role_arn, **kwargs) + + job_names.append(job_name) + return job_name + + yield _execute + + for job_name in job_names: + try: + aws_client.glue.delete_job(JobName=job_name) + except Exception as ex: + # TODO: the glue provider should not fail on deletion of deleted job, however this is currently the case. + LOG.warning("Could not delete job '%s': %s", job_name, ex) + + +@pytest.fixture +def sfn_create_log_group(aws_client, snapshot): + log_group_names = [] + + def _create() -> str: + log_group_name = f"/aws/vendedlogs/states/sfn-test-group-{short_uid()}" + snapshot.add_transformer(RegexTransformer(log_group_name, "log_group_name")) + aws_client.logs.create_log_group(logGroupName=log_group_name) + log_group_names.append(log_group_name) + + return log_group_name + + yield _create + + for log_group_name in log_group_names: + try: + aws_client.logs.delete_log_group(logGroupName=log_group_name) + except Exception: + LOG.debug("Cannot delete log group %s", log_group_name) + + +@pytest.fixture +def create_cross_account_admin_role_and_policy(create_state_machine, create_state_machine_iam_role): + created = list() + + def _create_role_and_policy(trusting_aws_client, trusted_aws_client, trusted_account_id) -> str: + trusting_iam_client = trusting_aws_client.iam + + role_name = f"admin-test-role-cross-account-{short_uid()}" + policy_name = f"admin-test-policy-cross-account-{short_uid()}" + + trust_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"AWS": f"arn:aws:iam::{trusted_account_id}:root"}, + "Action": "sts:AssumeRole", + } + ], + } + + create_role_response = trusting_iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy), + ) + role_arn = create_role_response["Role"]["Arn"] + + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*", + } + ], + } + + trusting_iam_client.put_role_policy( + RoleName=role_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) + ) + + def _wait_sfn_can_assume_admin_role(): + trusted_stepfunctions_client = trusted_aws_client.stepfunctions + sm_name = f"test-wait-sfn-can-assume-cross-account-admin-role-{short_uid()}" + sm_role = create_state_machine_iam_role(trusted_aws_client) + sm_def = { + "StartAt": "PullAssumeRole", + "States": { + "PullAssumeRole": { + "Type": "Task", + "Parameters": {}, + "Resource": "arn:aws:states:::aws-sdk:s3:listBuckets", + "Credentials": {"RoleArn": role_arn}, + "Retry": [ + { + "ErrorEquals": ["States.ALL"], + "IntervalSeconds": 2, + "MaxAttempts": 60, + } + ], + "End": True, + } + }, + } + creation_response = create_state_machine( + trusted_aws_client, name=sm_name, definition=json.dumps(sm_def), roleArn=sm_role + ) + state_machine_arn = creation_response["stateMachineArn"] + + exec_resp = trusted_stepfunctions_client.start_execution( + stateMachineArn=state_machine_arn, input="{}" + ) + execution_arn = exec_resp["executionArn"] + + await_execution_success( + stepfunctions_client=trusted_stepfunctions_client, execution_arn=execution_arn + ) + + trusted_stepfunctions_client.delete_state_machine(stateMachineArn=state_machine_arn) + + if is_aws_cloud(): + _wait_sfn_can_assume_admin_role() + + return role_arn + + yield _create_role_and_policy + + for aws_client, role_name, policy_name in created: + aws_client.iam.delete_role_policy(RoleName=role_name, PolicyName=policy_name) + aws_client.iam.delete_role(RoleName=role_name) diff --git a/localstack-core/localstack/testing/pytest/stepfunctions/utils.py b/localstack-core/localstack/testing/pytest/stepfunctions/utils.py new file mode 100644 index 0000000000000..3b2925e5a9353 --- /dev/null +++ b/localstack-core/localstack/testing/pytest/stepfunctions/utils.py @@ -0,0 +1,923 @@ +import json +import logging +from typing import Callable, Final, Optional + +from botocore.exceptions import ClientError +from jsonpath_ng.ext import parse +from localstack_snapshot.snapshots.transformer import ( + JsonpathTransformer, + RegexTransformer, + TransformContext, +) + +from localstack import config +from localstack.aws.api.stepfunctions import ( + Arn, + CloudWatchLogsLogGroup, + CreateStateMachineOutput, + Definition, + ExecutionStatus, + HistoryEventList, + HistoryEventType, + LogDestination, + LoggingConfiguration, + LogLevel, + LongArn, + StateMachineType, +) +from localstack.services.stepfunctions.asl.eval.event.logging import is_logging_enabled_for +from localstack.services.stepfunctions.asl.utils.encoding import to_json_str +from localstack.services.stepfunctions.asl.utils.json_path import NoSuchJsonPathError, extract_json +from localstack.testing.aws.util import is_aws_cloud +from localstack.utils.strings import short_uid +from localstack.utils.sync import poll_condition + +LOG = logging.getLogger(__name__) + + +# For EXPRESS state machines, the deletion will happen eventually (usually less than a minute). +# Running executions may emit logs after DeleteStateMachine API is called. +_DELETION_TIMEOUT_SECS: Final[int] = 120 +_SAMPLING_INTERVAL_SECONDS_AWS_CLOUD: Final[int] = 1 +_SAMPLING_INTERVAL_SECONDS_LOCALSTACK: Final[float] = 0.2 + + +def _get_sampling_interval_seconds() -> int | float: + return ( + _SAMPLING_INTERVAL_SECONDS_AWS_CLOUD + if is_aws_cloud() + else _SAMPLING_INTERVAL_SECONDS_LOCALSTACK + ) + + +def await_no_state_machines_listed(stepfunctions_client): + def _is_empty_state_machine_list(): + lst_resp = stepfunctions_client.list_state_machines() + state_machines = lst_resp["stateMachines"] + return not bool(state_machines) + + success = poll_condition( + condition=_is_empty_state_machine_list, + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning("Timed out whilst awaiting for listing to be empty.") + + +def _is_state_machine_alias_listed( + stepfunctions_client, state_machine_arn: Arn, state_machine_alias_arn: Arn +): + list_state_machine_aliases_list = stepfunctions_client.list_state_machine_aliases( + stateMachineArn=state_machine_arn + ) + state_machine_aliases = list_state_machine_aliases_list["stateMachineAliases"] + for state_machine_alias in state_machine_aliases: + if state_machine_alias["stateMachineAliasArn"] == state_machine_alias_arn: + return True + return False + + +def await_state_machine_alias_is_created( + stepfunctions_client, state_machine_arn: Arn, state_machine_alias_arn: Arn +): + success = poll_condition( + condition=lambda: _is_state_machine_alias_listed( + stepfunctions_client=stepfunctions_client, + state_machine_arn=state_machine_arn, + state_machine_alias_arn=state_machine_alias_arn, + ), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning("Timed out whilst awaiting for listing to be empty.") + + +def await_state_machine_alias_is_deleted( + stepfunctions_client, state_machine_arn: Arn, state_machine_alias_arn: Arn +): + success = poll_condition( + condition=lambda: not _is_state_machine_alias_listed( + stepfunctions_client=stepfunctions_client, + state_machine_arn=state_machine_arn, + state_machine_alias_arn=state_machine_alias_arn, + ), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning("Timed out whilst awaiting for listing to be empty.") + + +def _is_state_machine_listed(stepfunctions_client, state_machine_arn: str) -> bool: + lst_resp = stepfunctions_client.list_state_machines() + state_machines = lst_resp["stateMachines"] + for state_machine in state_machines: + if state_machine["stateMachineArn"] == state_machine_arn: + return True + return False + + +def _is_state_machine_version_listed( + stepfunctions_client, state_machine_arn: str, state_machine_version_arn: str +) -> bool: + lst_resp = stepfunctions_client.list_state_machine_versions(stateMachineArn=state_machine_arn) + versions = lst_resp["stateMachineVersions"] + for version in versions: + if version["stateMachineVersionArn"] == state_machine_version_arn: + return True + return False + + +def await_state_machine_not_listed(stepfunctions_client, state_machine_arn: str): + success = poll_condition( + condition=lambda: not _is_state_machine_listed(stepfunctions_client, state_machine_arn), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning("Timed out whilst awaiting for listing to exclude '%s'.", state_machine_arn) + + +def await_state_machine_listed(stepfunctions_client, state_machine_arn: str): + success = poll_condition( + condition=lambda: _is_state_machine_listed(stepfunctions_client, state_machine_arn), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning("Timed out whilst awaiting for listing to include '%s'.", state_machine_arn) + + +def await_state_machine_version_not_listed( + stepfunctions_client, state_machine_arn: str, state_machine_version_arn: str +): + success = poll_condition( + condition=lambda: not _is_state_machine_version_listed( + stepfunctions_client, state_machine_arn, state_machine_version_arn + ), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning( + "Timed out whilst awaiting for version of %s to exclude '%s'.", + state_machine_arn, + state_machine_version_arn, + ) + + +def await_state_machine_version_listed( + stepfunctions_client, state_machine_arn: str, state_machine_version_arn: str +): + success = poll_condition( + condition=lambda: _is_state_machine_version_listed( + stepfunctions_client, state_machine_arn, state_machine_version_arn + ), + timeout=_DELETION_TIMEOUT_SECS, + interval=_get_sampling_interval_seconds(), + ) + if not success: + LOG.warning( + "Timed out whilst awaiting for version of %s to include '%s'.", + state_machine_arn, + state_machine_version_arn, + ) + + +def await_on_execution_events( + stepfunctions_client, execution_arn: str, check_func: Callable[[HistoryEventList], bool] +) -> HistoryEventList: + events: HistoryEventList = list() + + def _run_check(): + nonlocal events + events.clear() + try: + hist_resp = stepfunctions_client.get_execution_history(executionArn=execution_arn) + except ClientError: + return False + events.extend(sorted(hist_resp.get("events", []), key=lambda event: event.get("timestamp"))) + res: bool = check_func(events) + return res + + assert poll_condition( + condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds() + ) + return events + + +def await_execution_success(stepfunctions_client, execution_arn: str) -> HistoryEventList: + def _check_last_is_success(events: HistoryEventList) -> bool: + if len(events) > 0: + last_event = events[-1] + return "executionSucceededEventDetails" in last_event + return False + + return await_on_execution_events( + stepfunctions_client=stepfunctions_client, + execution_arn=execution_arn, + check_func=_check_last_is_success, + ) + + +def await_list_execution_status( + stepfunctions_client, state_machine_arn: str, execution_arn: str, status: str +): + """required as there is some eventual consistency in list_executions vs describe_execution and get_execution_history""" + + def _run_check(): + list_resp = stepfunctions_client.list_executions( + stateMachineArn=state_machine_arn, statusFilter=status + ) + for execution in list_resp.get("executions", []): + if execution["executionArn"] != execution_arn or execution["status"] != status: + continue + return True + return False + + success = poll_condition( + condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds() + ) + if not success: + LOG.warning( + "Timed out whilst awaiting for execution status %s to satisfy condition for execution '%s'.", + status, + execution_arn, + ) + + +def _is_last_history_event_terminal(events: HistoryEventList) -> bool: + if len(events) > 0: + last_event = events[-1] + last_event_type = last_event.get("type") + return last_event_type is None or last_event_type in { + HistoryEventType.ExecutionFailed, + HistoryEventType.ExecutionAborted, + HistoryEventType.ExecutionTimedOut, + HistoryEventType.ExecutionSucceeded, + } + return False + + +def await_execution_terminated(stepfunctions_client, execution_arn: str) -> HistoryEventList: + return await_on_execution_events( + stepfunctions_client=stepfunctions_client, + execution_arn=execution_arn, + check_func=_is_last_history_event_terminal, + ) + + +def await_execution_lists_terminated( + stepfunctions_client, state_machine_arn: str, execution_arn: str +): + def _check_last_is_terminal() -> bool: + list_output = stepfunctions_client.list_executions(stateMachineArn=state_machine_arn) + executions = list_output["executions"] + for execution in executions: + if execution["executionArn"] == execution_arn: + return execution["status"] != ExecutionStatus.RUNNING + return False + + success = poll_condition( + condition=_check_last_is_terminal, timeout=120, interval=_get_sampling_interval_seconds() + ) + if not success: + LOG.warning( + "Timed out whilst awaiting for execution events to satisfy condition for execution '%s'.", + execution_arn, + ) + + +def await_execution_started(stepfunctions_client, execution_arn: str) -> HistoryEventList: + def _check_stated_exists(events: HistoryEventList) -> bool: + for event in events: + return "executionStartedEventDetails" in event + return False + + return await_on_execution_events( + stepfunctions_client=stepfunctions_client, + execution_arn=execution_arn, + check_func=_check_stated_exists, + ) + + +def await_execution_aborted(stepfunctions_client, execution_arn: str): + def _run_check(): + desc_res = stepfunctions_client.describe_execution(executionArn=execution_arn) + status: ExecutionStatus = desc_res["status"] + return status == ExecutionStatus.ABORTED + + success = poll_condition( + condition=_run_check, timeout=120, interval=_get_sampling_interval_seconds() + ) + if not success: + LOG.warning("Timed out whilst awaiting for execution '%s' to abort.", execution_arn) + + +def get_expected_execution_logs( + stepfunctions_client, log_level: LogLevel, execution_arn: LongArn +) -> HistoryEventList: + execution_history = stepfunctions_client.get_execution_history(executionArn=execution_arn) + execution_history_events = execution_history["events"] + expected_events = [ + event + for event in execution_history_events + if is_logging_enabled_for(log_level=log_level, history_event_type=event["type"]) + ] + return expected_events + + +def is_execution_logs_list_complete( + expected_events: HistoryEventList, +) -> Callable[[HistoryEventList], bool]: + def _validation_function(log_events: list) -> bool: + if not expected_events: + return True + return len(expected_events) == len(log_events) + + return _validation_function + + +def _await_on_execution_log_stream_created(target_aws_client, log_group_name: str) -> str: + logs_client = target_aws_client.logs + log_stream_name = str() + + def _run_check(): + nonlocal log_stream_name + try: + log_streams = logs_client.describe_log_streams(logGroupName=log_group_name)[ + "logStreams" + ] + if not log_streams: + return False + + log_stream_name = log_streams[-1]["logStreamName"] + if ( + log_stream_name + == "log_stream_created_by_aws_to_validate_log_delivery_subscriptions" + ): + # SFN has not yet create the log stream for the execution, only the validation steam. + return False + return True + except ClientError: + return False + + assert poll_condition(condition=_run_check) + return log_stream_name + + +def await_on_execution_logs( + target_aws_client, + log_group_name: str, + validation_function: Callable[[HistoryEventList], bool] = None, +) -> HistoryEventList: + log_stream_name = _await_on_execution_log_stream_created(target_aws_client, log_group_name) + + logs_client = target_aws_client.logs + events: HistoryEventList = list() + + def _run_check(): + nonlocal events + events.clear() + try: + log_events = logs_client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True + )["events"] + events.extend([json.loads(e["message"]) for e in log_events]) + except ClientError: + return False + + res = validation_function(events) + return res + + assert poll_condition(condition=_run_check) + return events + + +def create_state_machine_with_iam_role( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + snapshot, + definition: Definition, + logging_configuration: Optional[LoggingConfiguration] = None, + state_machine_name: Optional[str] = None, +): + snf_role_arn = create_state_machine_iam_role(target_aws_client=target_aws_client) + snapshot.add_transformer(RegexTransformer(snf_role_arn, "snf_role_arn")) + snapshot.add_transformer( + RegexTransformer( + "Extended Request ID: [a-zA-Z0-9-/=+]+", + "Extended Request ID: ", + ) + ) + snapshot.add_transformer( + RegexTransformer("Request ID: [a-zA-Z0-9-]+", "Request ID: ") + ) + + sm_name: str = state_machine_name or f"statemachine_create_and_record_execution_{short_uid()}" + create_arguments = { + "name": sm_name, + "definition": definition, + "roleArn": snf_role_arn, + } + if logging_configuration is not None: + create_arguments["loggingConfiguration"] = logging_configuration + creation_resp = create_state_machine(target_aws_client, **create_arguments) + snapshot.add_transformer(snapshot.transform.sfn_sm_create_arn(creation_resp, 0)) + state_machine_arn = creation_resp["stateMachineArn"] + return state_machine_arn + + +def launch_and_record_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, + verify_execution_description=False, +) -> LongArn: + stepfunctions_client = target_aws_client.stepfunctions + exec_resp = stepfunctions_client.start_execution( + stateMachineArn=state_machine_arn, input=execution_input + ) + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_exec_arn(exec_resp, 0)) + execution_arn = exec_resp["executionArn"] + + await_execution_terminated( + stepfunctions_client=stepfunctions_client, execution_arn=execution_arn + ) + + if verify_execution_description: + describe_execution = stepfunctions_client.describe_execution(executionArn=execution_arn) + sfn_snapshot.match("describe_execution", describe_execution) + + get_execution_history = stepfunctions_client.get_execution_history(executionArn=execution_arn) + + # Transform all map runs if any. + try: + map_run_arns = extract_json("$..mapRunArn", get_execution_history) + if isinstance(map_run_arns, str): + map_run_arns = [map_run_arns] + for i, map_run_arn in enumerate(list(set(map_run_arns))): + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_map_run_arn(map_run_arn, i)) + except NoSuchJsonPathError: + # No mapRunArns + pass + + sfn_snapshot.match("get_execution_history", get_execution_history) + + return execution_arn + + +def launch_and_record_mocked_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, + test_name, +) -> LongArn: + stepfunctions_client = target_aws_client.stepfunctions + exec_resp = stepfunctions_client.start_execution( + stateMachineArn=f"{state_machine_arn}#{test_name}", input=execution_input + ) + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_exec_arn(exec_resp, 0)) + execution_arn = exec_resp["executionArn"] + + await_execution_terminated( + stepfunctions_client=stepfunctions_client, execution_arn=execution_arn + ) + + get_execution_history = stepfunctions_client.get_execution_history(executionArn=execution_arn) + + # Transform all map runs if any. + try: + map_run_arns = extract_json("$..mapRunArn", get_execution_history) + if isinstance(map_run_arns, str): + map_run_arns = [map_run_arns] + for i, map_run_arn in enumerate(list(set(map_run_arns))): + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_map_run_arn(map_run_arn, i)) + except NoSuchJsonPathError: + # No mapRunArns + pass + + sfn_snapshot.match("get_execution_history", get_execution_history) + + return execution_arn + + +def launch_and_record_logs( + target_aws_client, + state_machine_arn, + execution_input, + log_level, + log_group_name, + sfn_snapshot, +): + execution_arn = launch_and_record_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, + ) + expected_events = get_expected_execution_logs( + target_aws_client.stepfunctions, log_level, execution_arn + ) + + if log_level == LogLevel.OFF or not expected_events: + # The test should terminate here, as no log streams for this execution would have been created. + return + + logs_validation_function = is_execution_logs_list_complete(expected_events) + logged_execution_events = await_on_execution_logs( + target_aws_client, log_group_name, logs_validation_function + ) + + sfn_snapshot.add_transformer( + JsonpathTransformer( + jsonpath="$..event_timestamp", + replacement="timestamp", + replace_reference=False, + ) + ) + sfn_snapshot.match("logged_execution_events", logged_execution_events) + + +def create_and_record_execution( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + execution_input, + verify_execution_description=False, +) -> LongArn: + state_machine_arn = create_state_machine_with_iam_role( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + ) + exeuction_arn = launch_and_record_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, + verify_execution_description, + ) + return exeuction_arn + + +def create_and_record_mocked_execution( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + execution_input, + state_machine_name, + test_name, +) -> LongArn: + state_machine_arn = create_state_machine_with_iam_role( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + state_machine_name=state_machine_name, + ) + execution_arn = launch_and_record_mocked_execution( + target_aws_client, sfn_snapshot, state_machine_arn, execution_input, test_name + ) + return execution_arn + + +def create_and_run_mock( + target_aws_client, + monkeypatch, + mock_config_file, + mock_config: dict, + state_machine_name: str, + definition_template: dict, + execution_input: str, + test_name: str, +): + mock_config_file_path = mock_config_file(mock_config) + monkeypatch.setattr(config, "SFN_MOCK_CONFIG", mock_config_file_path) + + sfn_client = target_aws_client.stepfunctions + + state_machine_name: str = state_machine_name or f"mocked_statemachine_{short_uid()}" + definition = json.dumps(definition_template) + creation_response = sfn_client.create_state_machine( + name=state_machine_name, + definition=definition, + roleArn="arn:aws:iam::111111111111:role/mock-role/mocked-run", + ) + state_machine_arn = creation_response["stateMachineArn"] + + test_case_arn = f"{state_machine_arn}#{test_name}" + execution = sfn_client.start_execution(stateMachineArn=test_case_arn, input=execution_input) + execution_arn = execution["executionArn"] + + await_execution_terminated(stepfunctions_client=sfn_client, execution_arn=execution_arn) + sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) + + return execution_arn + + +def create_and_record_logs( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_create_log_group, + sfn_snapshot, + definition, + execution_input, + log_level: LogLevel, + include_execution_data: bool, +): + state_machine_arn = create_state_machine_with_iam_role( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + ) + + log_group_name = sfn_create_log_group() + log_group_arn = target_aws_client.logs.describe_log_groups(logGroupNamePrefix=log_group_name)[ + "logGroups" + ][0]["arn"] + logging_configuration = LoggingConfiguration( + level=log_level, + includeExecutionData=include_execution_data, + destinations=[ + LogDestination( + cloudWatchLogsLogGroup=CloudWatchLogsLogGroup(logGroupArn=log_group_arn) + ), + ], + ) + target_aws_client.stepfunctions.update_state_machine( + stateMachineArn=state_machine_arn, loggingConfiguration=logging_configuration + ) + + launch_and_record_logs( + target_aws_client, + state_machine_arn, + execution_input, + log_level, + log_group_name, + sfn_snapshot, + ) + + +def launch_and_record_sync_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, +): + exec_resp = target_aws_client.stepfunctions.start_sync_execution( + stateMachineArn=state_machine_arn, + input=execution_input, + ) + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_sync_exec_arn(exec_resp, 0)) + sfn_snapshot.match("start_execution_sync_response", exec_resp) + + +def create_and_record_express_sync_execution( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_snapshot, + definition, + execution_input, +): + snf_role_arn = create_state_machine_iam_role(target_aws_client=target_aws_client) + sfn_snapshot.add_transformer(RegexTransformer(snf_role_arn, "sfn_role_arn")) + + creation_response = create_state_machine( + target_aws_client, + name=f"express_statemachine_{short_uid()}", + definition=definition, + roleArn=snf_role_arn, + type=StateMachineType.EXPRESS, + ) + state_machine_arn = creation_response["stateMachineArn"] + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_create_arn(creation_response, 0)) + sfn_snapshot.match("creation_response", creation_response) + + launch_and_record_sync_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + execution_input, + ) + + +def launch_and_record_express_async_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + log_group_name, + execution_input, +): + start_execution = target_aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, input=execution_input + ) + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_express_exec_arn(start_execution, 0)) + execution_arn = start_execution["executionArn"] + + event_list = await_on_execution_logs( + target_aws_client, log_group_name, validation_function=_is_last_history_event_terminal + ) + # Snapshot only the end event, as AWS StepFunctions implements a flaky approach to logging previous events. + end_event = event_list[-1] + sfn_snapshot.match("end_event", end_event) + + return execution_arn + + +def create_and_record_express_async_execution( + target_aws_client, + create_state_machine_iam_role, + create_state_machine, + sfn_create_log_group, + sfn_snapshot, + definition, + execution_input, + include_execution_data: bool = True, +) -> tuple[LongArn, LongArn]: + snf_role_arn = create_state_machine_iam_role(target_aws_client) + sfn_snapshot.add_transformer(RegexTransformer(snf_role_arn, "sfn_role_arn")) + + log_group_name = sfn_create_log_group() + log_group_arn = target_aws_client.logs.describe_log_groups(logGroupNamePrefix=log_group_name)[ + "logGroups" + ][0]["arn"] + logging_configuration = LoggingConfiguration( + level=LogLevel.ALL, + includeExecutionData=include_execution_data, + destinations=[ + LogDestination( + cloudWatchLogsLogGroup=CloudWatchLogsLogGroup(logGroupArn=log_group_arn) + ), + ], + ) + + creation_response = create_state_machine( + target_aws_client, + name=f"express_statemachine_{short_uid()}", + definition=definition, + roleArn=snf_role_arn, + type=StateMachineType.EXPRESS, + loggingConfiguration=logging_configuration, + ) + state_machine_arn = creation_response["stateMachineArn"] + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sm_create_arn(creation_response, 0)) + sfn_snapshot.match("creation_response", creation_response) + + execution_arn = launch_and_record_express_async_execution( + target_aws_client, + sfn_snapshot, + state_machine_arn, + log_group_name, + execution_input, + ) + return state_machine_arn, execution_arn + + +def create_and_record_events( + create_state_machine_iam_role, + create_state_machine, + sfn_events_to_sqs_queue, + target_aws_client, + sfn_snapshot, + definition, + execution_input, +): + sfn_snapshot.add_transformer(sfn_snapshot.transform.sfn_sqs_integration()) + sfn_snapshot.add_transformers_list( + [ + JsonpathTransformer( + jsonpath="$..detail.startDate", + replacement="start-date", + replace_reference=False, + ), + JsonpathTransformer( + jsonpath="$..detail.stopDate", + replacement="stop-date", + replace_reference=False, + ), + JsonpathTransformer( + jsonpath="$..detail.name", + replacement="test_event_bridge_events-{short_uid()}", + replace_reference=False, + ), + ] + ) + + snf_role_arn = create_state_machine_iam_role(target_aws_client) + create_output: CreateStateMachineOutput = create_state_machine( + target_aws_client, + name=f"test_event_bridge_events-{short_uid()}", + definition=definition, + roleArn=snf_role_arn, + ) + state_machine_arn = create_output["stateMachineArn"] + + queue_url = sfn_events_to_sqs_queue(state_machine_arn=state_machine_arn) + + start_execution = target_aws_client.stepfunctions.start_execution( + stateMachineArn=state_machine_arn, input=execution_input + ) + execution_arn = start_execution["executionArn"] + await_execution_terminated( + stepfunctions_client=target_aws_client.stepfunctions, execution_arn=execution_arn + ) + + stepfunctions_events = list() + + def _get_events(): + received = target_aws_client.sqs.receive_message(QueueUrl=queue_url) + for message in received.get("Messages", []): + body = json.loads(message["Body"]) + stepfunctions_events.append(body) + stepfunctions_events.sort(key=lambda e: e["time"]) + return stepfunctions_events and stepfunctions_events[-1]["detail"]["status"] != "RUNNING" + + poll_condition(_get_events, timeout=60) + + sfn_snapshot.match("stepfunctions_events", stepfunctions_events) + + +def record_sqs_events(target_aws_client, queue_url, sfn_snapshot, num_events): + stepfunctions_events = list() + + def _get_events(): + received = target_aws_client.sqs.receive_message(QueueUrl=queue_url) + for message in received.get("Messages", []): + body = json.loads(message["Body"]) + stepfunctions_events.append(body) + stepfunctions_events.sort(key=lambda e: e["time"]) + return len(stepfunctions_events) == num_events + + poll_condition(_get_events, timeout=60) + stepfunctions_events.sort(key=lambda e: json.dumps(e.get("detail", dict()))) + sfn_snapshot.match("stepfunctions_events", stepfunctions_events) + + +class SfnNoneRecursiveParallelTransformer: + """ + Normalises a sublist of events triggered in by a Parallel state to be order-independent. + """ + + def __init__(self, events_jsonpath: str = "$..events"): + self.events_jsonpath: str = events_jsonpath + + @staticmethod + def _normalise_events(events: list[dict]) -> None: + start_idx = None + sublist = list() + in_sublist = False + for i, event in enumerate(events): + event_type = event.get("type") + if event_type is None: + LOG.debug( + "No 'type' in event item '%s'.", + event, + ) + in_sublist = False + + elif event_type in { + None, + HistoryEventType.ParallelStateSucceeded, + HistoryEventType.ParallelStateAborted, + HistoryEventType.ParallelStateExited, + HistoryEventType.ParallelStateFailed, + }: + events[start_idx:i] = sorted(sublist, key=lambda e: to_json_str(e)) + in_sublist = False + elif event_type == HistoryEventType.ParallelStateStarted: + in_sublist = True + sublist = [] + start_idx = i + 1 + elif in_sublist: + event["id"] = (0,) + event["previousEventId"] = 0 + sublist.append(event) + + def transform(self, input_data: dict, *, ctx: TransformContext) -> dict: + pattern = parse("$..events") + events = pattern.find(input_data) + if not events: + LOG.debug("No Stepfunctions 'events' for jsonpath '%s'.", self.events_jsonpath) + return input_data + + for events_data in events: + self._normalise_events(events_data.value) + + return input_data diff --git a/localstack/testing/pytest/util.py b/localstack-core/localstack/testing/pytest/util.py similarity index 100% rename from localstack/testing/pytest/util.py rename to localstack-core/localstack/testing/pytest/util.py diff --git a/localstack-core/localstack/testing/pytest/validation_tracking.py b/localstack-core/localstack/testing/pytest/validation_tracking.py new file mode 100644 index 0000000000000..cb3fd9eb48dae --- /dev/null +++ b/localstack-core/localstack/testing/pytest/validation_tracking.py @@ -0,0 +1,164 @@ +""" +When a test (in tests/aws) is executed against AWS, we want to track the date of the last successful run. + +Keeping a record of how long ago a test was validated last, +we can periodically re-validate ALL AWS-targeting tests (and therefore not only just snapshot-using tests). +""" + +import datetime +import json +import os +from pathlib import Path +from typing import Dict, Optional + +import pytest +from pluggy import Result +from pytest import StashKey, TestReport + +from localstack.testing.aws.util import is_aws_cloud + +durations_key = StashKey[Dict[str, float]]() +""" +Stores phase durations on the test node between execution phases. +See https://docs.pytest.org/en/latest/reference/reference.html#pytest.Stash +""" +test_failed_key = StashKey[bool]() +""" +Stores information from call execution phase about whether the test failed. +""" + + +def find_validation_data_for_item(item: pytest.Item) -> Optional[dict]: + base_path = os.path.join(item.fspath.dirname, item.fspath.purebasename) + snapshot_path = f"{base_path}.validation.json" + + if not os.path.exists(snapshot_path): + return None + + with open(snapshot_path, "r") as fd: + file_content = json.load(fd) + return file_content.get(item.nodeid) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo): + """ + This hook is called after each test execution phase (setup, call, teardown). + """ + result: Result = yield + report: TestReport = result.get_result() + + if call.when == "setup": + _makereport_setup(item, call) + elif call.when == "call": + _makereport_call(item, call) + elif call.when == "teardown": + _makereport_teardown(item, call) + + return report + + +def _stash_phase_duration(call, item): + durations_by_phase = item.stash.setdefault(durations_key, {}) + durations_by_phase[call.when] = round(call.duration, 2) + + +def _makereport_setup(item: pytest.Item, call: pytest.CallInfo): + _stash_phase_duration(call, item) + + +def _makereport_call(item: pytest.Item, call: pytest.CallInfo): + _stash_phase_duration(call, item) + item.stash[test_failed_key] = call.excinfo is not None + + +def _makereport_teardown(item: pytest.Item, call: pytest.CallInfo): + _stash_phase_duration(call, item) + + # only update the file when running against AWS and the test finishes successfully + if not is_aws_cloud() or item.stash.get(test_failed_key, True): + return + + base_path = os.path.join(item.fspath.dirname, item.fspath.purebasename) + file_path = Path(f"{base_path}.validation.json") + file_path.touch() + with file_path.open(mode="r+") as fd: + # read existing state from file + try: + content = json.load(fd) + except json.JSONDecodeError: # expected on the first try (empty file) + content = {} + + test_execution_data = content.setdefault(item.nodeid, {}) + + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + test_execution_data["last_validated_date"] = timestamp.isoformat(timespec="seconds") + + durations_by_phase = item.stash[durations_key] + test_execution_data["durations_in_seconds"] = durations_by_phase + + total_duration = sum(durations_by_phase.values()) + durations_by_phase["total"] = round(total_duration, 2) + + # For json.dump sorted test entries enable consistent diffs. + # But test execution data is more readable in insert order for each step (setup, call, teardown). + # Hence, not using global sort_keys=True for json.dump but rather additionally sorting top-level dict only. + content = dict(sorted(content.items())) + + # save updates + fd.truncate(0) # clear existing content + fd.seek(0) + json.dump(content, fd, indent=2) + fd.write("\n") # add trailing newline for linter and Git compliance + + +@pytest.hookimpl +def pytest_addoption(parser: pytest.Parser, pluginmanager: pytest.PytestPluginManager): + parser.addoption("--validation-date-limit-days", action="store") + parser.addoption("--validation-date-limit-timestamp", action="store") + + +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems( + session: pytest.Session, config: pytest.Config, items: list[pytest.Item] +): + """ + Collect only items that have a validation timestamp earlier than the user-provided reference timestamp + + Example usage: + - pytest ... --validation-date-limit-days=10 + - pytest ... --validation-date-limit-timestamp="2023-12-01T00:00:00" + + """ + # handle two potential config options (relative vs. absolute limits) + if config.option.validation_date_limit_days is not None: + reference_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta( + days=int(config.option.validation_date_limit_days) + ) + elif config.option.validation_date_limit_timestamp is not None: + reference_date = datetime.datetime.fromisoformat( + config.option.validation_date_limit_timestamp + ) + else: + return + + selected = [] # items to collect + deselected = [] # items to drop + + for item in items: + validation_data = find_validation_data_for_item(item) + if not validation_data: + deselected.append(item) + continue + + last_validated_date = datetime.datetime.fromisoformat( + validation_data["last_validated_date"] + ) + + if last_validated_date < reference_date: + selected.append(item) + else: + deselected.append(item) + + items[:] = selected + config.hook.pytest_deselected(items=deselected) diff --git a/localstack-core/localstack/testing/scenario/__init__.py b/localstack-core/localstack/testing/scenario/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/scenario/cdk_lambda_helper.py b/localstack-core/localstack/testing/scenario/cdk_lambda_helper.py new file mode 100644 index 0000000000000..18233edcdf6e8 --- /dev/null +++ b/localstack-core/localstack/testing/scenario/cdk_lambda_helper.py @@ -0,0 +1,229 @@ +import base64 +import os +import shutil +import tempfile +import zipfile +from pathlib import Path +from typing import TYPE_CHECKING + +from botocore.exceptions import ClientError + +from localstack.utils.aws.resources import create_s3_bucket +from localstack.utils.docker_utils import DOCKER_CLIENT +from localstack.utils.run import LOG, run + +if TYPE_CHECKING: + from mypy_boto3_ecr import ECRClient + from mypy_boto3_s3 import S3Client + + +def load_python_lambda_to_s3( + s3_client: "S3Client", + bucket_name: str, + key_name: str, + code_path: str, + additional_python_packages: list[str] = None, +): + """ + Helper function to setup Lambdas that need additional python libs. + Will create a temp-zip and upload in the s3 bucket. + Installs additional libs and package with the zip + + :param s3_client: client for S3 + :param bucket_name: bucket name (bucket will be created) + :param key_name: key name for the uploaded zip file + :param code_path: the path to the source code that should be included + :param additional_python_packages: a list of strings with python packages that are required to run the lambda + :return: None + """ + try: + temp_dir = tempfile.mkdtemp() + tmp_zip_path = os.path.join(tempfile.gettempdir(), "helper.zip") + # install python packages + if additional_python_packages: + try: + run(f"cd {temp_dir} && pip install {' '.join(additional_python_packages)} -t .") + except Exception as e: + LOG.error( + "Could not install additional packages %s: %s", additional_python_packages, e + ) + # add the lambda to the directory + _zip_lambda_resources( + lambda_code_path=code_path, + handler_file_name="index.py", + resources_dir=temp_dir, + zip_path=tmp_zip_path, + ) + _upload_to_s3(s3_client, bucket_name=bucket_name, key_name=key_name, file=tmp_zip_path) + + finally: + if temp_dir: + shutil.rmtree(temp_dir) + if tmp_zip_path and os.path.exists(tmp_zip_path): + os.remove(tmp_zip_path) + + +def load_nodejs_lambda_to_s3( + s3_client: "S3Client", + bucket_name: str, + key_name: str, + code_path: str, + additional_nodjs_packages: list[str] = None, + additional_nodejs_packages: list[str] = None, + additional_resources: list[str] = None, +): + """ + Helper function to setup nodeJS Lambdas that need additional libs. + Will create a temp-zip and upload in the s3 bucket. + Installs additional libs and package with the zip + + :param s3_client: client for S3 + :param bucket_name: bucket name (bucket will be created) + :param key_name: key name for the uploaded zip file + :param code_path: the path to the source code that should be included + :param additional_nodjs_packages: a list of strings with nodeJS packages that are required to run the lambda + :param additional_nodejs_packages: a list of strings with nodeJS packages that are required to run the lambda + :param additional_resources: list of path-strings to resources or internal libs that should be packaged into the lambda + :return: None + """ + additional_resources = additional_resources or [] + + if additional_nodjs_packages: + additional_nodejs_packages = additional_nodejs_packages or [] + additional_nodejs_packages.extend(additional_nodjs_packages) + + try: + temp_dir = tempfile.mkdtemp() + tmp_zip_path = os.path.join(tempfile.gettempdir(), "helper.zip") + + # Install NodeJS packages + if additional_nodejs_packages: + try: + os.mkdir(os.path.join(temp_dir, "node_modules")) + run(f"cd {temp_dir} && npm install {' '.join(additional_nodejs_packages)} ") + except Exception as e: + LOG.error( + "Could not install additional packages %s: %s", additional_nodejs_packages, e + ) + + for r in additional_resources: + try: + path = Path(r) + if path.is_dir(): + dir_name = os.path.basename(path) + dest_dir = os.path.join(temp_dir, dir_name) + shutil.copytree(path, dest_dir) + elif path.is_file(): + new_resource_temp_path = os.path.join(temp_dir, os.path.basename(path)) + shutil.copy2(path, new_resource_temp_path) + except Exception as e: + LOG.error("Could not copy additional resources %s: %s", r, e) + + _zip_lambda_resources( + lambda_code_path=code_path, + handler_file_name="index.js", + resources_dir=temp_dir, + zip_path=tmp_zip_path, + ) + _upload_to_s3(s3_client, bucket_name=bucket_name, key_name=key_name, file=tmp_zip_path) + finally: + if temp_dir: + shutil.rmtree(temp_dir) + if tmp_zip_path and os.path.exists(tmp_zip_path): + os.remove(tmp_zip_path) + + +def _zip_lambda_resources( + lambda_code_path: str, handler_file_name: str, resources_dir: str, zip_path: str +): + # add the lambda to the directory + new_resource_temp_path = os.path.join(resources_dir, handler_file_name) + shutil.copy2(lambda_code_path, new_resource_temp_path) + + with zipfile.ZipFile(zip_path, "w") as temp_zip: + # Add the contents of the existing ZIP file + for root, _, files in os.walk(resources_dir): + for file in files: + file_path = os.path.join(root, file) + archive_name = os.path.relpath(file_path, resources_dir) + temp_zip.write(file_path, archive_name) + + +def generate_ecr_image_from_dockerfile( + ecr_client: "ECRClient", + repository_name: str, + file_path: str, + build_in_place: bool = False, +): + """ + Helper function to generate an ECR image from a dockerfile. + + :param ecr_client: client for ECR + :param repository_name: name for the repository to be created + :param file_path: path of the file to be used + :param build_in_place: build the container image in place rather than copying to a temporary location. + This is useful if the build context has other files. + :return: None + """ + repository_uri = ecr_client.create_repository( + repositoryName=repository_name, + )["repository"]["repositoryUri"] + + auth_response = ecr_client.get_authorization_token() + auth_token = auth_response["authorizationData"][0]["authorizationToken"].encode() + username, password = base64.b64decode(auth_token).decode().split(":") + registry = auth_response["authorizationData"][0]["proxyEndpoint"] + DOCKER_CLIENT.login(username, password, registry=registry) + + if build_in_place: + destination_file = file_path + else: + temp_dir = tempfile.mkdtemp() + destination_file = os.path.join(temp_dir, "Dockerfile") + shutil.copy2(file_path, destination_file) + DOCKER_CLIENT.build_image(dockerfile_path=destination_file, image_name=repository_uri) + DOCKER_CLIENT.push_image(repository_uri) + + +def generate_ecr_image_from_docker_image( + ecr_client: "ECRClient", repository_name: str, image_name: str, platform: str = "linux/amd64" +): + """ + Parameters + ---------- + ecr_client + repository_name + image_name + platform + + Returns + ------- + + """ + + DOCKER_CLIENT.pull_image(image_name, platform=platform) + + repository_uri = ecr_client.create_repository( + repositoryName=repository_name, + )["repository"]["repositoryUri"] + + auth_response = ecr_client.get_authorization_token() + auth_token = auth_response["authorizationData"][0]["authorizationToken"].encode() + username, password = base64.b64decode(auth_token).decode().split(":") + registry = auth_response["authorizationData"][0]["proxyEndpoint"] + DOCKER_CLIENT.login(username, password, registry=registry) + + DOCKER_CLIENT.tag_image(image_name, repository_uri) + DOCKER_CLIENT.push_image(repository_uri) + + +def _upload_to_s3(s3_client: "S3Client", bucket_name: str, key_name: str, file: str): + try: + create_s3_bucket(bucket_name, s3_client) + except ClientError as exc: + # when creating an already existing bucket, regions differ in their behavior: + # us-east-1 will silently pass (idempotent) + # any other region will return a `BucketAlreadyOwnedByYou` exception. + if exc.response["Error"]["Code"] != "BucketAlreadyOwnedByYou": + raise exc + s3_client.upload_file(Filename=file, Bucket=bucket_name, Key=key_name) diff --git a/localstack-core/localstack/testing/scenario/provisioning.py b/localstack-core/localstack/testing/scenario/provisioning.py new file mode 100644 index 0000000000000..cc384d3046c65 --- /dev/null +++ b/localstack-core/localstack/testing/scenario/provisioning.py @@ -0,0 +1,426 @@ +import json +import logging +import warnings +from contextlib import contextmanager +from pathlib import Path +from typing import TYPE_CHECKING, Callable, ContextManager, Optional + +import aws_cdk as cdk +from botocore.exceptions import ClientError, WaiterError + +from localstack.config import is_env_true +from localstack.testing.aws.util import is_aws_cloud +from localstack.testing.pytest.fixtures import StackDeployError +from localstack.utils.aws.resources import create_s3_bucket +from localstack.utils.files import load_file +from localstack.utils.functions import call_safe +from localstack.utils.strings import short_uid + +if TYPE_CHECKING: + from mypy_boto3_s3 import S3Client + +from localstack.aws.api.cloudformation import Capability +from localstack.aws.connect import ServiceLevelClientFactory + +LOG = logging.getLogger(__name__) +CDK_BOOTSTRAP_PARAM = "/cdk-bootstrap/hnb659fds/version" +WAITER_CONFIG_AWS = { + "Delay": 6, + "MaxAttempts": 600, +} # total timeout ~1 hour (6 * 600 = 3_600 seconds) +# total timeout ~10 minutes +WAITER_CONFIG_LS = {"Delay": 1, "MaxAttempts": 600} +CFN_MAX_TEMPLATE_SIZE = 51_200 + + +# TODO: move/unify with utils +def cleanup_s3_bucket(s3_client: "S3Client", bucket_name: str, delete_bucket: bool = False): + LOG.debug("Cleaning provisioned S3 Bucket %s", bucket_name) + try: + objs = s3_client.list_objects_v2(Bucket=bucket_name) + objs_num = objs["KeyCount"] + if objs_num > 0: + LOG.debug("Deleting %s objects from bucket_name=%s", objs_num, bucket_name) + obj_keys = [{"Key": o["Key"]} for o in objs["Contents"]] + s3_client.delete_objects(Bucket=bucket_name, Delete={"Objects": obj_keys}) + if delete_bucket: + s3_client.delete_bucket(Bucket=bucket_name) + except Exception: + LOG.warning( + "Failed to clean provisioned S3 Bucket bucket_name=%s", + bucket_name, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + + +# TODO: cross-account tests +# TODO: cross-region references +# TODO: explore asset handling +# TODO: use CDK App as central construct instead of individual stacks +class InfraProvisioner: + """ + An InfraProvisioner encapsulates logic around the setup and teardown of multiple CDK stacks and custom provisioning steps. + Use it to set up your infrastructure against which you can then execute individual or multiple integration tests. + """ + + cloudformation_stacks: dict[str, dict] + custom_cleanup_steps: list[Callable] + custom_setup_steps: list[Callable] + aws_client: ServiceLevelClientFactory + namespace: str + base_path: str | None + cdk_app: cdk.App + persist_output: bool + force_synth: bool + + def __init__( + self, + aws_client: ServiceLevelClientFactory, + namespace: str, + base_path: Optional[str] = None, + force_synth: Optional[bool] = False, + persist_output: Optional[bool] = False, + ): + """ + :param namespace: repo-unique identifier for this CDK app. + A directory with this name will be created at `tests/aws/cdk_templates//` + :param base_path: absolute path to `tests/aws/cdk_templates` where synthesized artifacts are stored + :param aws_client: an aws client factory + :param force_template_update: set to True to always re-synth the CDK app + :return: an instantiated CDK InfraProvisioner which can be used to deploy a CDK app + """ + self.namespace = namespace + self.base_path = base_path + self.cloudformation_stacks = {} + self.custom_cleanup_steps = [] + self.custom_setup_steps = [] + self.aws_client = aws_client + self.force_synth = force_synth + self.persist_output = persist_output + if self.base_path is None: + self.persist_output = False + self.cdk_app = cdk.App(default_stack_synthesizer=cdk.BootstraplessSynthesizer()) + + def get_asset_bucket(self): + account = self.aws_client.sts.get_caller_identity()["Account"] + region = self.aws_client.sts.meta.region_name + return f"localstack-testing-{account}-{region}" + + @contextmanager + def provisioner( + self, skip_deployment: Optional[bool] = False, skip_teardown: Optional[bool] = False + ) -> ContextManager["InfraProvisioner"]: + """ + :param skip_deployment: Set to True to skip stack creation and re-use existing stack without modifications. + Also skips custom setup steps. + Use-case: When you only want to regenerate the synthesized template without actually deploying. + :param skip_teardown: Set to True to skip deleting any previously created stacks. + Also skips custom teardown steps. + Use-case: When you're dealing with resource-heavy stacks that take a long time to provision. + The provisioner will perform a stack update instead of a create, should the stack still exist. + + Example usage: + def my_fixture(infrastructure_setup): + ... + infra = infrastructure_setup(namespace="...") + with infra.provisioner() as prov: + yield prov + """ + try: + self.provision(skip_deployment=skip_deployment) + # TODO: return "sub-view" on InfraProvisioner here for clearer API + yield self + finally: + if not skip_teardown: + self.teardown() + else: + LOG.debug("Skipping teardown. Resources and stacks are not deleted.") + + def provision(self, skip_deployment: Optional[bool] = False): + """ + Execute all previously added custom provisioning steps and deploy added CDK stacks via CloudFormation. + + Already deployed stacks will be updated instead. + """ + self._synth() + if skip_deployment: + LOG.debug("Skipping deployment. Assuming stacks have already been created") + return + + is_update = False + + if all( + self._is_stack_deployed(stack_name, stack) + for stack_name, stack in self.cloudformation_stacks.items() + ): + LOG.debug("All stacks are already deployed. Skipping the provisioning.") + # TODO: in localstack we might want to do a delete/create + # but generally this won't be a common use case when developing against LocalStack + is_update = True + + self._bootstrap() + self._run_manual_setup_tasks() + for stack_name, stack in self.cloudformation_stacks.items(): + change_set_name = f"test-cs-{short_uid()}" + if len(stack["Template"]) > CFN_MAX_TEMPLATE_SIZE: + # if the template size is too big, we need to upload it to s3 first + # and use TemplateURL instead to point to the template in s3 + template_bucket_name = self._template_bucket_name() + self._create_bucket_if_not_exists(template_bucket_name) + key = f"{stack_name}.yaml" + self.aws_client.s3.put_object( + Bucket=template_bucket_name, Key=key, Body=stack["Template"] + ) + url = self.aws_client.s3.generate_presigned_url( + ClientMethod="get_object", + Params={"Bucket": template_bucket_name, "Key": key}, + ExpiresIn=10, + ) + + change_set = self.aws_client.cloudformation.create_change_set( + StackName=stack_name, + ChangeSetName=change_set_name, + TemplateURL=url, + ChangeSetType="UPDATE" if is_update else "CREATE", + Capabilities=[ + Capability.CAPABILITY_AUTO_EXPAND, + Capability.CAPABILITY_IAM, + Capability.CAPABILITY_NAMED_IAM, + ], + ) + else: + change_set = self.aws_client.cloudformation.create_change_set( + StackName=stack_name, + ChangeSetName=change_set_name, + TemplateBody=stack["Template"], + ChangeSetType="UPDATE" if is_update else "CREATE", + Capabilities=[ + Capability.CAPABILITY_AUTO_EXPAND, + Capability.CAPABILITY_IAM, + Capability.CAPABILITY_NAMED_IAM, + ], + ) + stack_id = self.cloudformation_stacks[stack_name]["StackId"] = change_set["StackId"] + try: + self.aws_client.cloudformation.get_waiter("change_set_create_complete").wait( + ChangeSetName=change_set["Id"], + WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS, + ) + except WaiterError: + # it's OK if we don't have any updates to perform here (!) + # there is no specific error code unfortunately + if not (is_update): + raise + else: + LOG.warning("Execution of change set %s failed. Assuming no changes detected.") + else: + self.aws_client.cloudformation.execute_change_set(ChangeSetName=change_set["Id"]) + try: + self.aws_client.cloudformation.get_waiter( + "stack_update_complete" if is_update else "stack_create_complete" + ).wait( + StackName=stack_id, + WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS, + ) + + except WaiterError as e: + raise StackDeployError( + self.aws_client.cloudformation.describe_stacks(StackName=stack_id)[ + "Stacks" + ][0], + self.aws_client.cloudformation.describe_stack_events(StackName=stack_id)[ + "StackEvents" + ], + ) from e + + if stack["AutoCleanS3"]: + stack_resources = self.aws_client.cloudformation.describe_stack_resources( + StackName=stack_id + )["StackResources"] + s3_buckets = [ + r["PhysicalResourceId"] + for r in stack_resources + if r["ResourceType"] == "AWS::S3::Bucket" + ] + + for s3_bucket in s3_buckets: + self.custom_cleanup_steps.append( + lambda bucket=s3_bucket: cleanup_s3_bucket( + self.aws_client.s3, bucket, delete_bucket=False + ) + ) + + # TODO: move this to a CFn testing utility + def get_stack_outputs(self, stack_name: str) -> dict[str, str]: + """ + A simple helper function to extract outputs of a deployed stack in a simple : format. + """ + describe_stack = self.aws_client.cloudformation.describe_stacks(StackName=stack_name) + raw_outputs = describe_stack["Stacks"][0].get("Outputs", {}) + outputs = {o["OutputKey"]: o["OutputValue"] for o in raw_outputs} + return outputs + + def teardown(self): + """ + Reverse operation of `InfraProvisioner.provision`. + First performs any registered clean-up tasks in reverse order and afterwards deletes any previously created CloudFormation stacks + """ + for fn in self.custom_cleanup_steps[::-1]: # traverse in reverse order + call_safe(fn) + + # TODO: dependency detection (coming with proper synth support) + for stack_name, stack in reversed(self.cloudformation_stacks.items()): + try: + stack_id = stack.get("StackId", stack_name) + self.aws_client.cloudformation.delete_stack(StackName=stack_id) + self.aws_client.cloudformation.get_waiter("stack_delete_complete").wait( + StackName=stack_id, + WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS, + ) + except Exception: + LOG.warning( + "Failed to delete stack %s", + stack_name, + exc_info=LOG.isEnabledFor(logging.DEBUG), + ) + # TODO log-groups created by lambda are not automatically cleaned up by CDK + + if not is_aws_cloud(): + # TODO: also clean up s3 bucket on localstack? + # does it even make sense to do a general "de-bootstrapping" after each test? + try: + self.aws_client.ssm.delete_parameter(Name=CDK_BOOTSTRAP_PARAM) + except Exception: + pass + + # clean & delete asset bucket + cleanup_s3_bucket(self.aws_client.s3, self.get_asset_bucket(), delete_bucket=True) + + def add_cdk_stack( + self, + cdk_stack: cdk.Stack, + autoclean_buckets: Optional[bool] = True, + ): + """ + Register a CDK stack to be deployed in a later `InfraProvisioner.provision` call. + Custom tasks registered via `InfraProvisioner.add_custom_setup` will be executed before any stack deployments. + + CAVEAT: `InfraProvisioner` currently does not support CDK-generated assets. + If you need any assets, such as zip files uploaded to s3, please use `InfraProvisioner.add_custom_setup`. + """ + # TODO: unify this after refactoring existing usage + if self.persist_output: + dir_path = self._get_template_path() + dir_path.mkdir(exist_ok=True, parents=True) + template_path = dir_path / f"{cdk_stack.stack_name}.json" + + should_update_template = ( + is_env_true("TEST_CDK_FORCE_SYNTH") or self.force_synth + ) # EXPERIMENTAL / API subject to change + if not template_path.exists() or should_update_template: + with open(template_path, "wt") as fd: + template_json = cdk.assertions.Template.from_stack(cdk_stack).to_json() + json.dump(template_json, fd, indent=2) + # add trailing newline for linter and Git compliance + fd.write("\n") + + self.cloudformation_stacks[cdk_stack.stack_name] = { + "StackName": cdk_stack.stack_name, + "Template": load_file(template_path), + "AutoCleanS3": autoclean_buckets, + } + else: + template_json = cdk.assertions.Template.from_stack(cdk_stack).to_json() + template_str = json.dumps(template_json) + self.cloudformation_stacks[cdk_stack.stack_name] = { + "StackName": cdk_stack.stack_name, + "Template": template_str, + "AutoCleanS3": autoclean_buckets, + } + + def add_custom_teardown(self, cleanup_task: Callable): + """ + Register a custom teardown task. + Anything registered here will be executed on InfraProvisioner.teardown BEFORE any stack deletions. + """ + self.custom_cleanup_steps.append(cleanup_task) + + def add_custom_setup(self, setup_task: Callable): + """ + Register a custom setup task. + Anything registered here will be executed on InfraProvisioner.provision BEFORE any stack operations. + """ + self.custom_setup_steps.append(setup_task) + + # TODO: remove after removing any usage + def add_custom_setup_provisioning_step(self, setup_task: Callable): + """ + DEPRECATED. Use add_custom_setup instead. + + Register a custom setup task. + Anything registered here will be executed on InfraProvisioner.provision BEFORE any stack operations. + """ + warnings.warn( + "`add_custom_setup_provisioning_step` is deprecated. Use `add_custom_setup`", + DeprecationWarning, + stacklevel=2, + ) + self.add_custom_setup(setup_task) + + def _bootstrap(self): + # TODO: add proper bootstrap template to deploy here if there's no parameter yet + self._create_bucket_if_not_exists(self.get_asset_bucket()) + + try: + self.aws_client.ssm.get_parameter(Name=CDK_BOOTSTRAP_PARAM) + except self.aws_client.ssm.exceptions.ParameterNotFound: + self.aws_client.ssm.put_parameter(Name=CDK_BOOTSTRAP_PARAM, Type="String", Value="10") + + def _run_manual_setup_tasks(self): + for fn in self.custom_setup_steps: + fn() + + def _is_stack_deployed(self, stack_name: str, stack: dict) -> bool: + try: + describe_stack = self.aws_client.cloudformation.describe_stacks(StackName=stack_name) + if outputs := describe_stack["Stacks"][0].get("Outputs"): + stack["Outputs"] = {o["OutputKey"]: o["OutputValue"] for o in outputs} + except Exception: + return False + # TODO should we try to run teardown first, if the status is not "CREATE_COMPLETE"? + return describe_stack["Stacks"][0]["StackStatus"] in [ + "CREATE_COMPLETE", + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_COMPLETE", + ] + + def _get_template_path(self) -> Path: + return Path(self.base_path) / self.namespace + + def _template_bucket_name(self): + # TODO: unify this when we use the proper bootstrap template for wider asset support + account_id = self.aws_client.sts.get_caller_identity()["Account"] + region = self.aws_client.sts.meta.region_name + return f"localstack-testing-assets-{account_id}-{region}" + + def _create_bucket_if_not_exists(self, template_bucket_name: str): + try: + self.aws_client.s3.head_bucket(Bucket=template_bucket_name) + except ClientError as exc: + if exc.response["Error"]["Code"] != "404": + raise + create_s3_bucket(template_bucket_name, s3_client=self.aws_client.s3) + + def _synth(self): + # TODO: this doesn't actually synth a CloudAssembly yet + stacks = self.cdk_app.node.children + if not stacks: + return + + for stack in self.cdk_app.node.children: + self.add_cdk_stack(cdk_stack=stack) + + # TODO: move to a util class/module + @staticmethod + def get_asset_bucket_cdk(stack: cdk.Stack): + return cdk.Fn.join("-", ["localstack", "testing", stack.account, stack.region]) diff --git a/localstack-core/localstack/testing/snapshots/__init__.py b/localstack-core/localstack/testing/snapshots/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/localstack-core/localstack/testing/snapshots/transformer_utility.py b/localstack-core/localstack/testing/snapshots/transformer_utility.py new file mode 100644 index 0000000000000..562cc9e097646 --- /dev/null +++ b/localstack-core/localstack/testing/snapshots/transformer_utility.py @@ -0,0 +1,908 @@ +import json +import logging +import re +from datetime import datetime +from json import JSONDecodeError +from typing import Optional, Pattern + +from localstack_snapshot.snapshots.transformer import ( + PATTERN_ISO8601, + JsonpathTransformer, + KeyValueBasedTransformer, + RegexTransformer, + ResponseMetaDataTransformer, + SortingTransformer, + TimestampTransformer, +) + +from localstack.aws.api.secretsmanager import CreateSecretResponse +from localstack.aws.api.stepfunctions import ( + CreateStateMachineOutput, + LongArn, + StartExecutionOutput, + StartSyncExecutionOutput, +) +from localstack.utils.net import IP_REGEX + +LOG = logging.getLogger(__name__) + + +PATTERN_UUID = re.compile( + r"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" +) + +PATTERN_ARN = re.compile(r"arn:(aws[a-zA-Z-]*)?:([a-zA-Z0-9-_.]+)?:([^:]+)?:(\d{12})?:(.*)") +PATTERN_ARN_CHANGESET = re.compile( + r"arn:(aws[a-zA-Z-]*)?:([a-zA-Z0-9-_.]+)?:([^:]+)?:(\d{12})?:changeSet/([^/]+)" +) +PATTERN_LOGSTREAM_ID: Pattern[str] = re.compile( + # r"\d{4}/\d{2}/\d{2}/\[((\$LATEST)|\d+)\][0-9a-f]{32}" # TODO - this was originally included + # but some responses from LS look like this: 2022/5/30/[$LATEST]20b0964ab88b01c1 -> might not be correct on LS? + r"\d{4}/\d{1,2}/\d{1,2}/\[((\$LATEST)|\d+)\][0-9a-f]{8,32}" +) +PATTERN_KEY_ARN = re.compile( + r"arn:(aws[a-zA-Z-]*)?:([a-zA-Z0-9-_.]+)?:([^:]+)?:(\d{12})?:key/[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" +) + + +# TODO: split into generic/aws and put into lib +class TransformerUtility: + @staticmethod + def key_value( + key: str, value_replacement: Optional[str] = None, reference_replacement: bool = True + ): + """Creates a new KeyValueBasedTransformer. If the key matches, the value will be replaced. + + :param key: the name of the key which should be replaced + :param value_replacement: the value which will replace the original value. + By default it is the key-name in lowercase, separated with hyphen + :param reference_replacement: if False, only the original value for this key will be replaced. + If True all references of this value will be replaced (using a regex pattern), for the entire test case. + In this case, the replaced value will be nummerated as well. + Default: True + + :return: KeyValueBasedTransformer + """ + return KeyValueBasedTransformer( + lambda k, v: v if k == key and (v is not None and v != "") else None, + replacement=value_replacement or _replace_camel_string_with_hyphen(key), + replace_reference=reference_replacement, + ) + + @staticmethod + def resource_name(replacement_name: str = "resource"): + """Creates a new KeyValueBasedTransformer for the resource name. + + :param replacement_name ARN of a resource to extract name from + :return: KeyValueBasedTransformer + """ + return KeyValueBasedTransformer(_resource_name_transformer, replacement_name) + + @staticmethod + def jsonpath(jsonpath: str, value_replacement: str, reference_replacement: bool = True): + """Creates a new JsonpathTransformer. If the jsonpath matches, the value will be replaced. + + :param jsonpath: the jsonpath that should be matched + :param value_replacement: the value which will replace the original value. + By default it is the key-name in lowercase, separated with hyphen + :param reference_replacement: if False, only the original value for this key will be replaced. + If True all references of this value will be replaced (using a regex pattern), for the entire test case. + In this case, the replaced value will be nummerated as well. + Default: True + + :return: JsonpathTransformer + """ + return JsonpathTransformer( + jsonpath=jsonpath, + replacement=value_replacement, + replace_reference=reference_replacement, + ) + + @staticmethod + def regex(regex: str | Pattern[str], replacement: str): + """Creates a new RegexTransformer. All matches in the string-converted dict will be replaced. + + :param regex: the regex that should be matched + :param replacement: the value which will replace the original value. + + :return: RegexTransformer + """ + return RegexTransformer(regex, replacement) + + # TODO add more utility functions? e.g. key_value with function as parameter? + + @staticmethod + def lambda_api(): + """ + :return: array with Transformers, for lambda api. + """ + return [ + TransformerUtility.key_value("FunctionName"), + TransformerUtility.key_value( + "CodeSize", value_replacement="", reference_replacement=False + ), + TransformerUtility.jsonpath( + jsonpath="$..Code.Location", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.jsonpath( + jsonpath="$..Content.Location", + value_replacement="", + reference_replacement=False, + ), + KeyValueBasedTransformer(_resource_name_transformer, "resource"), + KeyValueBasedTransformer( + _log_stream_name_transformer, "log-stream-name", replace_reference=True + ), + ] + + @staticmethod + def lambda_report_logs(): + """Transformers for Lambda REPORT logs replacing dynamic metrics including: + * Duration + * Billed Duration + * Max Memory Used + * Init Duration + + Excluding: + * Memory Size + """ + return [ + TransformerUtility.regex( + re.compile(r"Duration: \d+(\.\d{2})? ms"), "Duration: ms" + ), + TransformerUtility.regex(re.compile(r"Used: \d+ MB"), "Used: MB"), + ] + + @staticmethod + def apigateway_api(): + return [ + TransformerUtility.key_value("id"), + TransformerUtility.key_value("name"), + TransformerUtility.key_value("parentId"), + TransformerUtility.key_value("rootResourceId"), + ] + + @staticmethod + def apigateway_proxy_event(): + return [ + TransformerUtility.key_value("extendedRequestId"), + TransformerUtility.key_value("resourceId"), + TransformerUtility.key_value("sourceIp"), + TransformerUtility.jsonpath("$..headers.X-Amz-Cf-Id", value_replacement="cf-id"), + TransformerUtility.jsonpath( + "$..headers.CloudFront-Viewer-ASN", value_replacement="cloudfront-asn" + ), + TransformerUtility.jsonpath( + "$..headers.CloudFront-Viewer-Country", value_replacement="cloudfront-country" + ), + TransformerUtility.jsonpath("$..headers.Via", value_replacement="via"), + TransformerUtility.jsonpath("$..headers.X-Amzn-Trace-Id", value_replacement="trace-id"), + TransformerUtility.jsonpath( + "$..requestContext.requestTime", + value_replacement="", + reference_replacement=False, + ), + KeyValueBasedTransformer( + lambda k, v: str(v) if k == "requestTimeEpoch" else None, + "", + replace_reference=False, + ), + TransformerUtility.regex(IP_REGEX.strip("^$"), ""), + ] + + @staticmethod + def apigateway_invocation_headers(): + return [ + TransformerUtility.key_value("apigw-id"), + TransformerUtility.key_value("Via"), + TransformerUtility.key_value( + "Date", value_replacement="", reference_replacement=False + ), + TransformerUtility.key_value( + "x-amz-apigw-id", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.key_value( + "x-amzn-Remapped-Date", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.key_value( + "X-Amzn-Trace-Id", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.key_value("X-Amzn-Apigateway-Api-Id"), + TransformerUtility.key_value("X-Forwarded-For"), + TransformerUtility.key_value( + "X-Forwarded-Port", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.key_value( + "X-Forwarded-Proto", + value_replacement="", + reference_replacement=False, + ), + ] + + @staticmethod + def apigatewayv2_jwt_authorizer_event(): + return [ + TransformerUtility.jsonpath("$..claims.auth_time", "claims-auth-time"), + TransformerUtility.jsonpath("$..claims.client_id", "claims-client-id"), + TransformerUtility.jsonpath("$..claims.exp", "claims-exp"), + TransformerUtility.jsonpath("$..claims.iat", "claims-iat"), + TransformerUtility.jsonpath("$..claims.jti", "claims-jti"), + TransformerUtility.jsonpath("$..claims.sub", "claims-sub"), + ] + + @staticmethod + def apigatewayv2_lambda_proxy_event(): + return [ + TransformerUtility.key_value("resourceId"), + TransformerUtility.key_value("sourceIp"), + TransformerUtility.jsonpath("$..requestContext.accountId", "account-id"), + TransformerUtility.jsonpath("$..requestContext.apiId", "api-id"), + TransformerUtility.jsonpath("$..requestContext.domainName", "domain-name"), + TransformerUtility.jsonpath("$..requestContext.domainPrefix", "domain-prefix"), + TransformerUtility.jsonpath( + "$..requestContext.extendedRequestId", "extended-request-id" + ), + TransformerUtility.jsonpath("$..requestContext.requestId", "request-id"), + TransformerUtility.jsonpath( + "$..requestContext.requestTime", + value_replacement="", + reference_replacement=False, + ), + KeyValueBasedTransformer( + lambda k, v: str(v) if k == "requestTimeEpoch" else None, + "", + replace_reference=False, + ), + TransformerUtility.key_value("time"), + KeyValueBasedTransformer( + lambda k, v: str(v) if k == "timeEpoch" else None, + "", + replace_reference=False, + ), + TransformerUtility.jsonpath("$..multiValueHeaders.Host[*]", "host"), + TransformerUtility.jsonpath( + "$..multiValueHeaders.X-Forwarded-For[*]", "x-forwarded-for" + ), + TransformerUtility.jsonpath( + "$..multiValueHeaders.X-Forwarded-Port[*]", "x-forwarded-port" + ), + TransformerUtility.jsonpath( + "$..multiValueHeaders.X-Forwarded-Proto[*]", "x-forwarded-proto" + ), + TransformerUtility.jsonpath( + "$..multiValueHeaders.X-Amzn-Trace-Id[*]", "x-amzn-trace-id" + ), + TransformerUtility.jsonpath("$..multiValueHeaders.authorization[*]", "authorization"), + TransformerUtility.jsonpath("$..multiValueHeaders.User-Agent[*]", "user-agent"), + TransformerUtility.regex(r"python-requests/\d+\.\d+(\.\d+)?", "python-requests/x.x.x"), + ] + + @staticmethod + def cloudformation_api(): + """ + :return: array with Transformers, for cloudformation api. + """ + return [ + KeyValueBasedTransformer(_resource_name_transformer, "resource"), + KeyValueBasedTransformer(_change_set_id_transformer, "change-set-id"), + TransformerUtility.key_value("ChangeSetName"), + TransformerUtility.key_value("ChangeSetId"), + TransformerUtility.key_value("StackName"), + ] + + @staticmethod + def cfn_stack_resource(): + """ + :return: array with Transformers, for cloudformation stack resource description; + recommended for verifying the stack resources deployed for scenario tests + """ + return [ + KeyValueBasedTransformer(_resource_name_transformer, "resource"), + KeyValueBasedTransformer(_change_set_id_transformer, "change-set-id"), + TransformerUtility.key_value("LogicalResourceId"), + TransformerUtility.key_value("PhysicalResourceId", reference_replacement=False), + ] + + @staticmethod + def dynamodb_api(): + """ + :return: array with Transformers, for dynamodb api. + """ + return [ + RegexTransformer( + r"([a-zA-Z0-9-_.]*)?test_table_([a-zA-Z0-9-_.]*)?", replacement="" + ), + ] + + @staticmethod + def dynamodb_streams_api(): + return [ + RegexTransformer( + r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}$", replacement="" + ), + TransformerUtility.key_value("TableName"), + TransformerUtility.key_value("TableStatus"), + TransformerUtility.key_value("LatestStreamLabel"), + TransformerUtility.key_value("StartingSequenceNumber", reference_replacement=False), + TransformerUtility.key_value("ShardId"), + TransformerUtility.key_value("StreamLabel"), + TransformerUtility.key_value("SequenceNumber"), + TransformerUtility.key_value("eventID"), + ] + + @staticmethod + def iam_api(): + """ + :return: array with Transformers, for iam api. + """ + return [ + TransformerUtility.key_value("UserName"), + TransformerUtility.key_value("UserId"), + TransformerUtility.key_value("RoleId"), + TransformerUtility.key_value("RoleName"), + TransformerUtility.key_value("PolicyName"), + TransformerUtility.key_value("PolicyId"), + TransformerUtility.key_value("GroupName"), + ] + + @staticmethod + def transcribe_api(): + """ + :return: array with Transformers, for iam api. + """ + return [ + RegexTransformer( + r"([a-zA-Z0-9-_.]*)?\/test-bucket-([a-zA-Z0-9-_.]*)?", replacement="" + ), + TransformerUtility.key_value("TranscriptionJobName", "transcription-job"), + TransformerUtility.key_value("jobName", "job-name"), + TransformerUtility.jsonpath( + jsonpath="$..Transcript..TranscriptFileUri", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.key_value("NextToken", "token", reference_replacement=False), + ] + + @staticmethod + def s3_api(): + """ + :return: array with Transformers, for s3 api. + """ + + s3 = [ + TransformerUtility.key_value("Name", value_replacement="bucket-name"), + TransformerUtility.key_value("BucketName"), + TransformerUtility.key_value("VersionId"), + TransformerUtility.jsonpath( + jsonpath="$..Owner.DisplayName", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.jsonpath( + jsonpath="$..Owner.ID", value_replacement="", reference_replacement=False + ), + ] + # for s3 notifications: + s3.extend(TransformerUtility.s3_notifications_transformer()) + return s3 + + @staticmethod + def s3_notifications_transformer(): + return [ + TransformerUtility.jsonpath( + "$..responseElements.x-amz-id-2", "amz-id", reference_replacement=False + ), + TransformerUtility.jsonpath( + "$..responseElements.x-amz-request-id", + "amz-request-id", + reference_replacement=False, + ), + TransformerUtility.jsonpath("$..s3.configurationId", "config-id"), + TransformerUtility.jsonpath( + "$..s3.object.sequencer", "sequencer", reference_replacement=False + ), + TransformerUtility.jsonpath("$..s3.bucket.ownerIdentity.principalId", "principal-id"), + TransformerUtility.jsonpath("$..userIdentity.principalId", "principal-id"), + TransformerUtility.jsonpath("$..requestParameters.sourceIPAddress", "ip-address"), + TransformerUtility.jsonpath( + "$..s3.object.versionId", + "version-id", + reference_replacement=False, + ), + ] + + @staticmethod + def s3_dynamodb_notifications(): + return [ + TransformerUtility.jsonpath("$..uuid.S", "uuid"), + TransformerUtility.jsonpath("$..M.requestParameters.M.sourceIPAddress.S", "ip-address"), + TransformerUtility.jsonpath( + "$..M.responseElements.M.x-amz-id-2.S", "amz-id", reference_replacement=False + ), + TransformerUtility.jsonpath( + "$..M.responseElements.M.x-amz-request-id.S", + "amz-request-id", + reference_replacement=False, + ), + TransformerUtility.jsonpath("$..M.s3.M.bucket.M.name.S", "bucket-name"), + TransformerUtility.jsonpath("$..M.s3.M.bucket.M.arn.S", "bucket-arn"), + TransformerUtility.jsonpath( + "$..M.s3.M.bucket.M.ownerIdentity.M.principalId.S", "principal-id" + ), + TransformerUtility.jsonpath("$..M.s3.M.configurationId.S", "config-id"), + TransformerUtility.jsonpath("$..M.s3.M.object.M.key.S", "object-key"), + TransformerUtility.jsonpath( + "$..M.s3.M.object.M.sequencer.S", "sequencer", reference_replacement=False + ), + TransformerUtility.jsonpath("$..M.userIdentity.M.principalId.S", "principal-id"), + ] + + @staticmethod + def kinesis_api(): + """ + :return: array with Transformers, for kinesis api. + """ + return [ + JsonpathTransformer( + jsonpath="$..Records..SequenceNumber", + replacement="sequence_number", + replace_reference=True, + ), + TransformerUtility.key_value("SequenceNumber", "sequence_number"), + TransformerUtility.key_value("StartingSequenceNumber", "starting_sequence_number"), + TransformerUtility.key_value("ShardId", "shard_id"), + TransformerUtility.key_value("NextShardIterator", "next_shard_iterator"), + TransformerUtility.key_value( + "EndingHashKey", "ending_hash", reference_replacement=False + ), + TransformerUtility.key_value( + "StartingHashKey", "starting_hash", reference_replacement=False + ), + TransformerUtility.key_value(_resource_name_transformer, "ConsumerARN"), + RegexTransformer( + r"([a-zA-Z0-9-_.]*)?\/consumer:([0-9-_.]*)?", + replacement="", + ), + RegexTransformer( + r"([a-zA-Z0-9-_.]*)?\/test-stream-([a-zA-Z0-9-_.]*)?", + replacement="", + ), + TransformerUtility.key_value( + "ContinuationSequenceNumber", "" + ), + ] + + @staticmethod + def route53resolver_api(): + """ + :return: array with Transformers, for route53resolver api. + """ + return [ + TransformerUtility.key_value( + "SecurityGroupIds", value_replacement="sg-ids", reference_replacement=False + ), + TransformerUtility.key_value("Id"), + TransformerUtility.key_value("HostVPCId", "host-vpc-id"), + KeyValueBasedTransformer(_resource_name_transformer, "Arn"), + TransformerUtility.key_value("CreatorRequestId"), + TransformerUtility.key_value("StatusMessage", reference_replacement=False), + ] + + @staticmethod + def route53_api(): + return [ + TransformerUtility.jsonpath("$..HostedZone.CallerReference", "caller-reference"), + TransformerUtility.jsonpath( + jsonpath="$..DelegationSet.NameServers", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.jsonpath( + jsonpath="$..ChangeInfo.Status", value_replacement="status" + ), + KeyValueBasedTransformer(_route53_hosted_zone_id_transformer, "zone-id"), + TransformerUtility.regex(r"/change/[A-Za-z0-9]+", "/change/"), + TransformerUtility.jsonpath( + jsonpath="$..HostedZone.Name", value_replacement="zone_name" + ), + ] + + @staticmethod + def sqs_api(): + """ + :return: array with Transformers, for sqs api. + """ + return [ + TransformerUtility.key_value("ReceiptHandle"), + TransformerUtility.key_value("TaskHandle"), + TransformerUtility.key_value( + "SenderId" + ), # TODO: flaky against AWS (e.g. /Attributes/SenderId '' → '' ... (expected → actual)) + TransformerUtility.key_value("SequenceNumber"), + TransformerUtility.jsonpath("$..MessageAttributes.RequestID.StringValue", "request-id"), + KeyValueBasedTransformer(_resource_name_transformer, "resource"), + ] + + @staticmethod + def kms_api(): + """ + :return: array with Transformers, for kms api. + """ + return [ + TransformerUtility.key_value("KeyId"), + TransformerUtility.jsonpath( + jsonpath="$..Signature", + value_replacement="", + reference_replacement=False, + ), + TransformerUtility.jsonpath( + jsonpath="$..Mac", value_replacement="", reference_replacement=False + ), + TransformerUtility.key_value("CiphertextBlob", reference_replacement=False), + TransformerUtility.key_value("Plaintext", reference_replacement=False), + RegexTransformer(PATTERN_KEY_ARN, replacement=""), + ] + + @staticmethod + def sns_api(): + """ + :return: array with Transformers, for sns api. + """ + return [ + TransformerUtility.key_value("ReceiptHandle"), + TransformerUtility.key_value("SequenceNumber"), # this might need to be in SQS + TransformerUtility.key_value( + "Signature", value_replacement="", reference_replacement=False + ), + # the body of SNS messages contains a timestamp, need to ignore the hash + TransformerUtility.key_value("MD5OfBody", "", reference_replacement=False), + # this can interfere in ARN with the accountID + TransformerUtility.key_value( + "SenderId", value_replacement="", reference_replacement=False + ), + KeyValueBasedTransformer( + _sns_pem_file_token_transformer, + replacement="signing-cert-file", + ), + # replaces the domain in "SigningCertURL" URL (KeyValue won't work as it replaces reference, and if + # replace_reference is False, then it replaces the whole key + RegexTransformer( + r"(?i)(?<=SigningCertURL[\"|']:\s[\"|'])(https?.*?)(?=/\SimpleNotificationService-)", + replacement="", + ), + # replaces the domain in "UnsubscribeURL" URL (KeyValue won't work as it replaces reference, and if + # replace_reference is False, then it replaces the whole key + RegexTransformer( + r"(?i)(?<=UnsubscribeURL[\"|']:\s[\"|'])(https?.*?)(?=/\?Action=Unsubscribe)", + replacement="", + ), + KeyValueBasedTransformer(_resource_name_transformer, "resource"), + # add a special transformer with 'resource' replacement for SubscriptionARN in UnsubscribeURL + KeyValueBasedTransformer( + _sns_unsubscribe_url_subscription_arn_transformer, replacement="resource" + ), + ] + + @staticmethod + def cloudwatch_api(): + """ + :return: array with Transformers, for cloudwatch api. + """ + return [ + TransformerUtility.key_value("AlarmName"), + TransformerUtility.key_value("Namespace"), + KeyValueBasedTransformer(_resource_name_transformer, "SubscriptionArn"), + TransformerUtility.key_value("Region", "region-name-full"), + ] + + @staticmethod + def logs_api(): + """ + :return: array with Transformers, for logs api + """ + return [ + TransformerUtility.key_value("logGroupName"), + TransformerUtility.key_value("logStreamName"), + TransformerUtility.key_value("creationTime", "